From 1d6be8202ddbc728642995210c28b76fe7406eab Mon Sep 17 00:00:00 2001 From: karamveer28 Date: Thu, 30 Nov 2023 22:58:18 +0530 Subject: [PATCH 01/99] Added file structure for other charts --- operations/deployment/helm/grafana/Chart.yaml | 5 +++++ .../helm/grafana/bitops.config.yaml | 14 ++++++++++++ .../helm/grafana/example.bitops.config.yaml | 15 +++++++++++++ .../helm/grafana/templates/aws-auth.yaml | 22 +++++++++++++++++++ .../deployment/helm/grafana/values.yaml | 21 ++++++++++++++++++ operations/deployment/helm/nginx/Chart.yaml | 5 +++++ .../deployment/helm/nginx/bitops.config.yaml | 14 ++++++++++++ .../helm/nginx/example.bitops.config.yaml | 15 +++++++++++++ .../helm/nginx/templates/aws-auth.yaml | 22 +++++++++++++++++++ operations/deployment/helm/nginx/values.yaml | 21 ++++++++++++++++++ .../deployment/helm/prometheus/Chart.yaml | 5 +++++ .../helm/prometheus/bitops.config.yaml | 14 ++++++++++++ .../prometheus/example.bitops.config.yaml | 15 +++++++++++++ .../helm/prometheus/templates/aws-auth.yaml | 22 +++++++++++++++++++ .../deployment/helm/prometheus/values.yaml | 21 ++++++++++++++++++ 15 files changed, 231 insertions(+) create mode 100644 operations/deployment/helm/grafana/Chart.yaml create mode 100644 operations/deployment/helm/grafana/bitops.config.yaml create mode 100644 operations/deployment/helm/grafana/example.bitops.config.yaml create mode 100644 operations/deployment/helm/grafana/templates/aws-auth.yaml create mode 100644 operations/deployment/helm/grafana/values.yaml create mode 100644 operations/deployment/helm/nginx/Chart.yaml create mode 100644 operations/deployment/helm/nginx/bitops.config.yaml create mode 100644 operations/deployment/helm/nginx/example.bitops.config.yaml create mode 100644 operations/deployment/helm/nginx/templates/aws-auth.yaml create mode 100644 operations/deployment/helm/nginx/values.yaml create mode 100644 operations/deployment/helm/prometheus/Chart.yaml create mode 100644 operations/deployment/helm/prometheus/bitops.config.yaml create mode 100644 operations/deployment/helm/prometheus/example.bitops.config.yaml create mode 100644 operations/deployment/helm/prometheus/templates/aws-auth.yaml create mode 100644 operations/deployment/helm/prometheus/values.yaml diff --git a/operations/deployment/helm/grafana/Chart.yaml b/operations/deployment/helm/grafana/Chart.yaml new file mode 100644 index 0000000..db7b2d3 --- /dev/null +++ b/operations/deployment/helm/grafana/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v2 +appVersion: "1.0" +description: aws-auth configmap for EKS +name: aws-auth +version: 0.1.0 \ No newline at end of file diff --git a/operations/deployment/helm/grafana/bitops.config.yaml b/operations/deployment/helm/grafana/bitops.config.yaml new file mode 100644 index 0000000..60d7548 --- /dev/null +++ b/operations/deployment/helm/grafana/bitops.config.yaml @@ -0,0 +1,14 @@ +helm: + cli: + namespace: kube-system + timeout: 200s + debug: true + atomic: false + force: false + dry-run: false + options: + release-name: aws-auth + skip-deploy: false + k8s: + fetch: + kubeconfig: true \ No newline at end of file diff --git a/operations/deployment/helm/grafana/example.bitops.config.yaml b/operations/deployment/helm/grafana/example.bitops.config.yaml new file mode 100644 index 0000000..eddb4b4 --- /dev/null +++ b/operations/deployment/helm/grafana/example.bitops.config.yaml @@ -0,0 +1,15 @@ +helm: + cli: + namespace: kube-system + timeout: 200s + debug: true + atomic: false + force: false + dry-run: false + options: + release-name: aws-auth + skip-deploy: false + k8s: + fetch: + kubeconfig: true + cluster-name: prod-ekscluster \ No newline at end of file diff --git a/operations/deployment/helm/grafana/templates/aws-auth.yaml b/operations/deployment/helm/grafana/templates/aws-auth.yaml new file mode 100644 index 0000000..e41dea6 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/aws-auth.yaml @@ -0,0 +1,22 @@ +{{- if .Values.configmap.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Values.configmap.name }} +data: +{{- if .Values.data.mapAccounts }} + mapAccounts: | + {{ .Values.data.mapAccounts| nindent 4 | trim }} +{{- end }} + +{{- if .Values.data.mapRoles }} + mapRoles: | + {{ .Values.data.mapRoles| nindent 4 | trim }} +{{- end }} + +{{- if .Values.data.mapUsers }} + mapUsers: | + {{ .Values.data.mapUsers| nindent 4 | trim }} +{{- end }} + +{{- end }} \ No newline at end of file diff --git a/operations/deployment/helm/grafana/values.yaml b/operations/deployment/helm/grafana/values.yaml new file mode 100644 index 0000000..90f836a --- /dev/null +++ b/operations/deployment/helm/grafana/values.yaml @@ -0,0 +1,21 @@ +configmap: + enabled: true + name: aws-auth +data: + + # account number + mapAccounts: | + - "000000000000" + + # add eksworkers to nodes and bootstrappers + # ensure accounts match + # add sso and other users to masters as necessry + mapRoles: | + - "groups": + - "system:bootstrappers" + - "system:nodes" + "rolearn": "arn:aws:iam::000000000:role/prod-eksworker" + "username": "system:node:{{EC2PrivateDNSName}}" + - "groups": + - "system:masters" + "rolearn": "arn:aws:iam::000000000:role/AWSReservedSSO_AdministratorAccess_1111111111" \ No newline at end of file diff --git a/operations/deployment/helm/nginx/Chart.yaml b/operations/deployment/helm/nginx/Chart.yaml new file mode 100644 index 0000000..db7b2d3 --- /dev/null +++ b/operations/deployment/helm/nginx/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v2 +appVersion: "1.0" +description: aws-auth configmap for EKS +name: aws-auth +version: 0.1.0 \ No newline at end of file diff --git a/operations/deployment/helm/nginx/bitops.config.yaml b/operations/deployment/helm/nginx/bitops.config.yaml new file mode 100644 index 0000000..60d7548 --- /dev/null +++ b/operations/deployment/helm/nginx/bitops.config.yaml @@ -0,0 +1,14 @@ +helm: + cli: + namespace: kube-system + timeout: 200s + debug: true + atomic: false + force: false + dry-run: false + options: + release-name: aws-auth + skip-deploy: false + k8s: + fetch: + kubeconfig: true \ No newline at end of file diff --git a/operations/deployment/helm/nginx/example.bitops.config.yaml b/operations/deployment/helm/nginx/example.bitops.config.yaml new file mode 100644 index 0000000..eddb4b4 --- /dev/null +++ b/operations/deployment/helm/nginx/example.bitops.config.yaml @@ -0,0 +1,15 @@ +helm: + cli: + namespace: kube-system + timeout: 200s + debug: true + atomic: false + force: false + dry-run: false + options: + release-name: aws-auth + skip-deploy: false + k8s: + fetch: + kubeconfig: true + cluster-name: prod-ekscluster \ No newline at end of file diff --git a/operations/deployment/helm/nginx/templates/aws-auth.yaml b/operations/deployment/helm/nginx/templates/aws-auth.yaml new file mode 100644 index 0000000..e41dea6 --- /dev/null +++ b/operations/deployment/helm/nginx/templates/aws-auth.yaml @@ -0,0 +1,22 @@ +{{- if .Values.configmap.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Values.configmap.name }} +data: +{{- if .Values.data.mapAccounts }} + mapAccounts: | + {{ .Values.data.mapAccounts| nindent 4 | trim }} +{{- end }} + +{{- if .Values.data.mapRoles }} + mapRoles: | + {{ .Values.data.mapRoles| nindent 4 | trim }} +{{- end }} + +{{- if .Values.data.mapUsers }} + mapUsers: | + {{ .Values.data.mapUsers| nindent 4 | trim }} +{{- end }} + +{{- end }} \ No newline at end of file diff --git a/operations/deployment/helm/nginx/values.yaml b/operations/deployment/helm/nginx/values.yaml new file mode 100644 index 0000000..90f836a --- /dev/null +++ b/operations/deployment/helm/nginx/values.yaml @@ -0,0 +1,21 @@ +configmap: + enabled: true + name: aws-auth +data: + + # account number + mapAccounts: | + - "000000000000" + + # add eksworkers to nodes and bootstrappers + # ensure accounts match + # add sso and other users to masters as necessry + mapRoles: | + - "groups": + - "system:bootstrappers" + - "system:nodes" + "rolearn": "arn:aws:iam::000000000:role/prod-eksworker" + "username": "system:node:{{EC2PrivateDNSName}}" + - "groups": + - "system:masters" + "rolearn": "arn:aws:iam::000000000:role/AWSReservedSSO_AdministratorAccess_1111111111" \ No newline at end of file diff --git a/operations/deployment/helm/prometheus/Chart.yaml b/operations/deployment/helm/prometheus/Chart.yaml new file mode 100644 index 0000000..db7b2d3 --- /dev/null +++ b/operations/deployment/helm/prometheus/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v2 +appVersion: "1.0" +description: aws-auth configmap for EKS +name: aws-auth +version: 0.1.0 \ No newline at end of file diff --git a/operations/deployment/helm/prometheus/bitops.config.yaml b/operations/deployment/helm/prometheus/bitops.config.yaml new file mode 100644 index 0000000..60d7548 --- /dev/null +++ b/operations/deployment/helm/prometheus/bitops.config.yaml @@ -0,0 +1,14 @@ +helm: + cli: + namespace: kube-system + timeout: 200s + debug: true + atomic: false + force: false + dry-run: false + options: + release-name: aws-auth + skip-deploy: false + k8s: + fetch: + kubeconfig: true \ No newline at end of file diff --git a/operations/deployment/helm/prometheus/example.bitops.config.yaml b/operations/deployment/helm/prometheus/example.bitops.config.yaml new file mode 100644 index 0000000..eddb4b4 --- /dev/null +++ b/operations/deployment/helm/prometheus/example.bitops.config.yaml @@ -0,0 +1,15 @@ +helm: + cli: + namespace: kube-system + timeout: 200s + debug: true + atomic: false + force: false + dry-run: false + options: + release-name: aws-auth + skip-deploy: false + k8s: + fetch: + kubeconfig: true + cluster-name: prod-ekscluster \ No newline at end of file diff --git a/operations/deployment/helm/prometheus/templates/aws-auth.yaml b/operations/deployment/helm/prometheus/templates/aws-auth.yaml new file mode 100644 index 0000000..e41dea6 --- /dev/null +++ b/operations/deployment/helm/prometheus/templates/aws-auth.yaml @@ -0,0 +1,22 @@ +{{- if .Values.configmap.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Values.configmap.name }} +data: +{{- if .Values.data.mapAccounts }} + mapAccounts: | + {{ .Values.data.mapAccounts| nindent 4 | trim }} +{{- end }} + +{{- if .Values.data.mapRoles }} + mapRoles: | + {{ .Values.data.mapRoles| nindent 4 | trim }} +{{- end }} + +{{- if .Values.data.mapUsers }} + mapUsers: | + {{ .Values.data.mapUsers| nindent 4 | trim }} +{{- end }} + +{{- end }} \ No newline at end of file diff --git a/operations/deployment/helm/prometheus/values.yaml b/operations/deployment/helm/prometheus/values.yaml new file mode 100644 index 0000000..90f836a --- /dev/null +++ b/operations/deployment/helm/prometheus/values.yaml @@ -0,0 +1,21 @@ +configmap: + enabled: true + name: aws-auth +data: + + # account number + mapAccounts: | + - "000000000000" + + # add eksworkers to nodes and bootstrappers + # ensure accounts match + # add sso and other users to masters as necessry + mapRoles: | + - "groups": + - "system:bootstrappers" + - "system:nodes" + "rolearn": "arn:aws:iam::000000000:role/prod-eksworker" + "username": "system:node:{{EC2PrivateDNSName}}" + - "groups": + - "system:masters" + "rolearn": "arn:aws:iam::000000000:role/AWSReservedSSO_AdministratorAccess_1111111111" \ No newline at end of file From 3a941349ddc276c21e5c9f561d85eb731cc04c5c Mon Sep 17 00:00:00 2001 From: karamveer28 Date: Thu, 30 Nov 2023 23:02:49 +0530 Subject: [PATCH 02/99] changed names --- operations/deployment/helm/grafana/Chart.yaml | 4 ++-- operations/deployment/helm/grafana/bitops.config.yaml | 2 +- operations/deployment/helm/grafana/example.bitops.config.yaml | 2 +- .../helm/grafana/templates/{aws-auth.yaml => grafana.yaml} | 0 operations/deployment/helm/grafana/values.yaml | 2 +- operations/deployment/helm/nginx/Chart.yaml | 4 ++-- operations/deployment/helm/nginx/bitops.config.yaml | 2 +- operations/deployment/helm/nginx/example.bitops.config.yaml | 2 +- .../helm/nginx/templates/{aws-auth.yaml => nginx.yaml} | 0 operations/deployment/helm/nginx/values.yaml | 2 +- operations/deployment/helm/prometheus/Chart.yaml | 4 ++-- operations/deployment/helm/prometheus/bitops.config.yaml | 2 +- .../deployment/helm/prometheus/example.bitops.config.yaml | 2 +- .../prometheus/templates/{aws-auth.yaml => prometheus.yaml} | 0 operations/deployment/helm/prometheus/values.yaml | 2 +- 15 files changed, 15 insertions(+), 15 deletions(-) rename operations/deployment/helm/grafana/templates/{aws-auth.yaml => grafana.yaml} (100%) rename operations/deployment/helm/nginx/templates/{aws-auth.yaml => nginx.yaml} (100%) rename operations/deployment/helm/prometheus/templates/{aws-auth.yaml => prometheus.yaml} (100%) diff --git a/operations/deployment/helm/grafana/Chart.yaml b/operations/deployment/helm/grafana/Chart.yaml index db7b2d3..39be235 100644 --- a/operations/deployment/helm/grafana/Chart.yaml +++ b/operations/deployment/helm/grafana/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 appVersion: "1.0" -description: aws-auth configmap for EKS -name: aws-auth +description: grafana configmap for EKS +name: grafana version: 0.1.0 \ No newline at end of file diff --git a/operations/deployment/helm/grafana/bitops.config.yaml b/operations/deployment/helm/grafana/bitops.config.yaml index 60d7548..0d5d6df 100644 --- a/operations/deployment/helm/grafana/bitops.config.yaml +++ b/operations/deployment/helm/grafana/bitops.config.yaml @@ -7,7 +7,7 @@ helm: force: false dry-run: false options: - release-name: aws-auth + release-name: grafana skip-deploy: false k8s: fetch: diff --git a/operations/deployment/helm/grafana/example.bitops.config.yaml b/operations/deployment/helm/grafana/example.bitops.config.yaml index eddb4b4..4a05f9f 100644 --- a/operations/deployment/helm/grafana/example.bitops.config.yaml +++ b/operations/deployment/helm/grafana/example.bitops.config.yaml @@ -7,7 +7,7 @@ helm: force: false dry-run: false options: - release-name: aws-auth + release-name: grafana skip-deploy: false k8s: fetch: diff --git a/operations/deployment/helm/grafana/templates/aws-auth.yaml b/operations/deployment/helm/grafana/templates/grafana.yaml similarity index 100% rename from operations/deployment/helm/grafana/templates/aws-auth.yaml rename to operations/deployment/helm/grafana/templates/grafana.yaml diff --git a/operations/deployment/helm/grafana/values.yaml b/operations/deployment/helm/grafana/values.yaml index 90f836a..d043012 100644 --- a/operations/deployment/helm/grafana/values.yaml +++ b/operations/deployment/helm/grafana/values.yaml @@ -1,6 +1,6 @@ configmap: enabled: true - name: aws-auth + name: grafana data: # account number diff --git a/operations/deployment/helm/nginx/Chart.yaml b/operations/deployment/helm/nginx/Chart.yaml index db7b2d3..379c5ba 100644 --- a/operations/deployment/helm/nginx/Chart.yaml +++ b/operations/deployment/helm/nginx/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 appVersion: "1.0" -description: aws-auth configmap for EKS -name: aws-auth +description: nginx configmap for EKS +name: nginx version: 0.1.0 \ No newline at end of file diff --git a/operations/deployment/helm/nginx/bitops.config.yaml b/operations/deployment/helm/nginx/bitops.config.yaml index 60d7548..3fc43df 100644 --- a/operations/deployment/helm/nginx/bitops.config.yaml +++ b/operations/deployment/helm/nginx/bitops.config.yaml @@ -7,7 +7,7 @@ helm: force: false dry-run: false options: - release-name: aws-auth + release-name: nginx skip-deploy: false k8s: fetch: diff --git a/operations/deployment/helm/nginx/example.bitops.config.yaml b/operations/deployment/helm/nginx/example.bitops.config.yaml index eddb4b4..b65e3f9 100644 --- a/operations/deployment/helm/nginx/example.bitops.config.yaml +++ b/operations/deployment/helm/nginx/example.bitops.config.yaml @@ -7,7 +7,7 @@ helm: force: false dry-run: false options: - release-name: aws-auth + release-name: anginx skip-deploy: false k8s: fetch: diff --git a/operations/deployment/helm/nginx/templates/aws-auth.yaml b/operations/deployment/helm/nginx/templates/nginx.yaml similarity index 100% rename from operations/deployment/helm/nginx/templates/aws-auth.yaml rename to operations/deployment/helm/nginx/templates/nginx.yaml diff --git a/operations/deployment/helm/nginx/values.yaml b/operations/deployment/helm/nginx/values.yaml index 90f836a..9a44d60 100644 --- a/operations/deployment/helm/nginx/values.yaml +++ b/operations/deployment/helm/nginx/values.yaml @@ -1,6 +1,6 @@ configmap: enabled: true - name: aws-auth + name: nginx data: # account number diff --git a/operations/deployment/helm/prometheus/Chart.yaml b/operations/deployment/helm/prometheus/Chart.yaml index db7b2d3..dce0507 100644 --- a/operations/deployment/helm/prometheus/Chart.yaml +++ b/operations/deployment/helm/prometheus/Chart.yaml @@ -1,5 +1,5 @@ apiVersion: v2 appVersion: "1.0" -description: aws-auth configmap for EKS -name: aws-auth +description: prometheus configmap for EKS +name: prometheus version: 0.1.0 \ No newline at end of file diff --git a/operations/deployment/helm/prometheus/bitops.config.yaml b/operations/deployment/helm/prometheus/bitops.config.yaml index 60d7548..733fa72 100644 --- a/operations/deployment/helm/prometheus/bitops.config.yaml +++ b/operations/deployment/helm/prometheus/bitops.config.yaml @@ -7,7 +7,7 @@ helm: force: false dry-run: false options: - release-name: aws-auth + release-name: prometheus skip-deploy: false k8s: fetch: diff --git a/operations/deployment/helm/prometheus/example.bitops.config.yaml b/operations/deployment/helm/prometheus/example.bitops.config.yaml index eddb4b4..578f706 100644 --- a/operations/deployment/helm/prometheus/example.bitops.config.yaml +++ b/operations/deployment/helm/prometheus/example.bitops.config.yaml @@ -7,7 +7,7 @@ helm: force: false dry-run: false options: - release-name: aws-auth + release-name: prometheus skip-deploy: false k8s: fetch: diff --git a/operations/deployment/helm/prometheus/templates/aws-auth.yaml b/operations/deployment/helm/prometheus/templates/prometheus.yaml similarity index 100% rename from operations/deployment/helm/prometheus/templates/aws-auth.yaml rename to operations/deployment/helm/prometheus/templates/prometheus.yaml diff --git a/operations/deployment/helm/prometheus/values.yaml b/operations/deployment/helm/prometheus/values.yaml index 90f836a..3e33c6c 100644 --- a/operations/deployment/helm/prometheus/values.yaml +++ b/operations/deployment/helm/prometheus/values.yaml @@ -1,6 +1,6 @@ configmap: enabled: true - name: aws-auth + name: prometheus data: # account number From 781d6e5d01de566c61c7025f8d3769b5495a7775 Mon Sep 17 00:00:00 2001 From: karamveer28 Date: Mon, 4 Dec 2023 20:56:14 +0530 Subject: [PATCH 03/99] Test --- action.yaml | 2 +- .../deployment/helm/grafana/bitops.config.yaml | 14 -------------- .../helm/grafana/example.bitops.config.yaml | 17 ++++++++++++++++- .../deployment/helm/nginx/bitops.config.yaml | 14 -------------- .../helm/nginx/example.bitops.config.yaml | 17 ++++++++++++++++- .../helm/prometheus/bitops.config.yaml | 14 -------------- .../helm/prometheus/example.bitops.config.yaml | 17 ++++++++++++++++- 7 files changed, 49 insertions(+), 46 deletions(-) delete mode 100644 operations/deployment/helm/grafana/bitops.config.yaml delete mode 100644 operations/deployment/helm/nginx/bitops.config.yaml delete mode 100644 operations/deployment/helm/prometheus/bitops.config.yaml diff --git a/action.yaml b/action.yaml index 9ab566c..3d396e8 100644 --- a/action.yaml +++ b/action.yaml @@ -1,4 +1,4 @@ -name: 'Deploy ESK to AWS' +name: 'Deploy EKS to AWS' description: 'Deploy Kubernetes Service (EKS) to AWS' branding: icon: upload-cloud diff --git a/operations/deployment/helm/grafana/bitops.config.yaml b/operations/deployment/helm/grafana/bitops.config.yaml deleted file mode 100644 index 0d5d6df..0000000 --- a/operations/deployment/helm/grafana/bitops.config.yaml +++ /dev/null @@ -1,14 +0,0 @@ -helm: - cli: - namespace: kube-system - timeout: 200s - debug: true - atomic: false - force: false - dry-run: false - options: - release-name: grafana - skip-deploy: false - k8s: - fetch: - kubeconfig: true \ No newline at end of file diff --git a/operations/deployment/helm/grafana/example.bitops.config.yaml b/operations/deployment/helm/grafana/example.bitops.config.yaml index 4a05f9f..40e4319 100644 --- a/operations/deployment/helm/grafana/example.bitops.config.yaml +++ b/operations/deployment/helm/grafana/example.bitops.config.yaml @@ -12,4 +12,19 @@ helm: k8s: fetch: kubeconfig: true - cluster-name: prod-ekscluster \ No newline at end of file + cluster-name: prod-ekscluster + +# helm: +# cli: +# namespace: kube-system +# timeout: 200s +# debug: true +# atomic: false +# force: false +# dry-run: false +# options: +# release-name: grafana +# skip-deploy: false +# k8s: +# fetch: +# kubeconfig: true \ No newline at end of file diff --git a/operations/deployment/helm/nginx/bitops.config.yaml b/operations/deployment/helm/nginx/bitops.config.yaml deleted file mode 100644 index 3fc43df..0000000 --- a/operations/deployment/helm/nginx/bitops.config.yaml +++ /dev/null @@ -1,14 +0,0 @@ -helm: - cli: - namespace: kube-system - timeout: 200s - debug: true - atomic: false - force: false - dry-run: false - options: - release-name: nginx - skip-deploy: false - k8s: - fetch: - kubeconfig: true \ No newline at end of file diff --git a/operations/deployment/helm/nginx/example.bitops.config.yaml b/operations/deployment/helm/nginx/example.bitops.config.yaml index b65e3f9..7f67bad 100644 --- a/operations/deployment/helm/nginx/example.bitops.config.yaml +++ b/operations/deployment/helm/nginx/example.bitops.config.yaml @@ -12,4 +12,19 @@ helm: k8s: fetch: kubeconfig: true - cluster-name: prod-ekscluster \ No newline at end of file + cluster-name: prod-ekscluster + +# helm: +# cli: +# namespace: kube-system +# timeout: 200s +# debug: true +# atomic: false +# force: false +# dry-run: false +# options: +# release-name: nginx +# skip-deploy: false +# k8s: +# fetch: +# kubeconfig: true \ No newline at end of file diff --git a/operations/deployment/helm/prometheus/bitops.config.yaml b/operations/deployment/helm/prometheus/bitops.config.yaml deleted file mode 100644 index 733fa72..0000000 --- a/operations/deployment/helm/prometheus/bitops.config.yaml +++ /dev/null @@ -1,14 +0,0 @@ -helm: - cli: - namespace: kube-system - timeout: 200s - debug: true - atomic: false - force: false - dry-run: false - options: - release-name: prometheus - skip-deploy: false - k8s: - fetch: - kubeconfig: true \ No newline at end of file diff --git a/operations/deployment/helm/prometheus/example.bitops.config.yaml b/operations/deployment/helm/prometheus/example.bitops.config.yaml index 578f706..980002e 100644 --- a/operations/deployment/helm/prometheus/example.bitops.config.yaml +++ b/operations/deployment/helm/prometheus/example.bitops.config.yaml @@ -12,4 +12,19 @@ helm: k8s: fetch: kubeconfig: true - cluster-name: prod-ekscluster \ No newline at end of file + cluster-name: prod-ekscluster + +# helm: +# cli: +# namespace: kube-system +# timeout: 200s +# debug: true +# atomic: false +# force: false +# dry-run: false +# options: +# release-name: prometheus +# skip-deploy: false +# k8s: +# fetch: +# kubeconfig: true \ No newline at end of file From ca8a8ad2249ba5b5c9e20a7ad0ef2679dbaefb50 Mon Sep 17 00:00:00 2001 From: karamveer28 Date: Tue, 5 Dec 2023 18:54:30 +0530 Subject: [PATCH 04/99] Changed grafana version --- operations/deployment/helm/grafana/Chart.yaml | 2 +- operations/deployment/helm/nginx/Chart.yaml | 5 ---- .../helm/nginx/example.bitops.config.yaml | 30 ------------------- .../helm/nginx/templates/nginx.yaml | 22 -------------- operations/deployment/helm/nginx/values.yaml | 21 ------------- .../deployment/helm/prometheus/Chart.yaml | 5 ---- .../prometheus/example.bitops.config.yaml | 30 ------------------- .../helm/prometheus/templates/prometheus.yaml | 22 -------------- .../deployment/helm/prometheus/values.yaml | 21 ------------- 9 files changed, 1 insertion(+), 157 deletions(-) delete mode 100644 operations/deployment/helm/nginx/Chart.yaml delete mode 100644 operations/deployment/helm/nginx/example.bitops.config.yaml delete mode 100644 operations/deployment/helm/nginx/templates/nginx.yaml delete mode 100644 operations/deployment/helm/nginx/values.yaml delete mode 100644 operations/deployment/helm/prometheus/Chart.yaml delete mode 100644 operations/deployment/helm/prometheus/example.bitops.config.yaml delete mode 100644 operations/deployment/helm/prometheus/templates/prometheus.yaml delete mode 100644 operations/deployment/helm/prometheus/values.yaml diff --git a/operations/deployment/helm/grafana/Chart.yaml b/operations/deployment/helm/grafana/Chart.yaml index 39be235..e49b34c 100644 --- a/operations/deployment/helm/grafana/Chart.yaml +++ b/operations/deployment/helm/grafana/Chart.yaml @@ -2,4 +2,4 @@ apiVersion: v2 appVersion: "1.0" description: grafana configmap for EKS name: grafana -version: 0.1.0 \ No newline at end of file +version: 9.1.7 \ No newline at end of file diff --git a/operations/deployment/helm/nginx/Chart.yaml b/operations/deployment/helm/nginx/Chart.yaml deleted file mode 100644 index 379c5ba..0000000 --- a/operations/deployment/helm/nginx/Chart.yaml +++ /dev/null @@ -1,5 +0,0 @@ -apiVersion: v2 -appVersion: "1.0" -description: nginx configmap for EKS -name: nginx -version: 0.1.0 \ No newline at end of file diff --git a/operations/deployment/helm/nginx/example.bitops.config.yaml b/operations/deployment/helm/nginx/example.bitops.config.yaml deleted file mode 100644 index 7f67bad..0000000 --- a/operations/deployment/helm/nginx/example.bitops.config.yaml +++ /dev/null @@ -1,30 +0,0 @@ -helm: - cli: - namespace: kube-system - timeout: 200s - debug: true - atomic: false - force: false - dry-run: false - options: - release-name: anginx - skip-deploy: false - k8s: - fetch: - kubeconfig: true - cluster-name: prod-ekscluster - -# helm: -# cli: -# namespace: kube-system -# timeout: 200s -# debug: true -# atomic: false -# force: false -# dry-run: false -# options: -# release-name: nginx -# skip-deploy: false -# k8s: -# fetch: -# kubeconfig: true \ No newline at end of file diff --git a/operations/deployment/helm/nginx/templates/nginx.yaml b/operations/deployment/helm/nginx/templates/nginx.yaml deleted file mode 100644 index e41dea6..0000000 --- a/operations/deployment/helm/nginx/templates/nginx.yaml +++ /dev/null @@ -1,22 +0,0 @@ -{{- if .Values.configmap.enabled }} -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ .Values.configmap.name }} -data: -{{- if .Values.data.mapAccounts }} - mapAccounts: | - {{ .Values.data.mapAccounts| nindent 4 | trim }} -{{- end }} - -{{- if .Values.data.mapRoles }} - mapRoles: | - {{ .Values.data.mapRoles| nindent 4 | trim }} -{{- end }} - -{{- if .Values.data.mapUsers }} - mapUsers: | - {{ .Values.data.mapUsers| nindent 4 | trim }} -{{- end }} - -{{- end }} \ No newline at end of file diff --git a/operations/deployment/helm/nginx/values.yaml b/operations/deployment/helm/nginx/values.yaml deleted file mode 100644 index 9a44d60..0000000 --- a/operations/deployment/helm/nginx/values.yaml +++ /dev/null @@ -1,21 +0,0 @@ -configmap: - enabled: true - name: nginx -data: - - # account number - mapAccounts: | - - "000000000000" - - # add eksworkers to nodes and bootstrappers - # ensure accounts match - # add sso and other users to masters as necessry - mapRoles: | - - "groups": - - "system:bootstrappers" - - "system:nodes" - "rolearn": "arn:aws:iam::000000000:role/prod-eksworker" - "username": "system:node:{{EC2PrivateDNSName}}" - - "groups": - - "system:masters" - "rolearn": "arn:aws:iam::000000000:role/AWSReservedSSO_AdministratorAccess_1111111111" \ No newline at end of file diff --git a/operations/deployment/helm/prometheus/Chart.yaml b/operations/deployment/helm/prometheus/Chart.yaml deleted file mode 100644 index dce0507..0000000 --- a/operations/deployment/helm/prometheus/Chart.yaml +++ /dev/null @@ -1,5 +0,0 @@ -apiVersion: v2 -appVersion: "1.0" -description: prometheus configmap for EKS -name: prometheus -version: 0.1.0 \ No newline at end of file diff --git a/operations/deployment/helm/prometheus/example.bitops.config.yaml b/operations/deployment/helm/prometheus/example.bitops.config.yaml deleted file mode 100644 index 980002e..0000000 --- a/operations/deployment/helm/prometheus/example.bitops.config.yaml +++ /dev/null @@ -1,30 +0,0 @@ -helm: - cli: - namespace: kube-system - timeout: 200s - debug: true - atomic: false - force: false - dry-run: false - options: - release-name: prometheus - skip-deploy: false - k8s: - fetch: - kubeconfig: true - cluster-name: prod-ekscluster - -# helm: -# cli: -# namespace: kube-system -# timeout: 200s -# debug: true -# atomic: false -# force: false -# dry-run: false -# options: -# release-name: prometheus -# skip-deploy: false -# k8s: -# fetch: -# kubeconfig: true \ No newline at end of file diff --git a/operations/deployment/helm/prometheus/templates/prometheus.yaml b/operations/deployment/helm/prometheus/templates/prometheus.yaml deleted file mode 100644 index e41dea6..0000000 --- a/operations/deployment/helm/prometheus/templates/prometheus.yaml +++ /dev/null @@ -1,22 +0,0 @@ -{{- if .Values.configmap.enabled }} -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ .Values.configmap.name }} -data: -{{- if .Values.data.mapAccounts }} - mapAccounts: | - {{ .Values.data.mapAccounts| nindent 4 | trim }} -{{- end }} - -{{- if .Values.data.mapRoles }} - mapRoles: | - {{ .Values.data.mapRoles| nindent 4 | trim }} -{{- end }} - -{{- if .Values.data.mapUsers }} - mapUsers: | - {{ .Values.data.mapUsers| nindent 4 | trim }} -{{- end }} - -{{- end }} \ No newline at end of file diff --git a/operations/deployment/helm/prometheus/values.yaml b/operations/deployment/helm/prometheus/values.yaml deleted file mode 100644 index 3e33c6c..0000000 --- a/operations/deployment/helm/prometheus/values.yaml +++ /dev/null @@ -1,21 +0,0 @@ -configmap: - enabled: true - name: prometheus -data: - - # account number - mapAccounts: | - - "000000000000" - - # add eksworkers to nodes and bootstrappers - # ensure accounts match - # add sso and other users to masters as necessry - mapRoles: | - - "groups": - - "system:bootstrappers" - - "system:nodes" - "rolearn": "arn:aws:iam::000000000:role/prod-eksworker" - "username": "system:node:{{EC2PrivateDNSName}}" - - "groups": - - "system:masters" - "rolearn": "arn:aws:iam::000000000:role/AWSReservedSSO_AdministratorAccess_1111111111" \ No newline at end of file From c0694f65b115ad87579a511e0e90ccf9d36a7cf0 Mon Sep 17 00:00:00 2001 From: karamveer28 Date: Tue, 5 Dec 2023 21:46:00 +0530 Subject: [PATCH 05/99] values.yaml edit --- operations/deployment/helm/grafana/Chart.yaml | 19 +- .../deployment/helm/grafana/values.yaml | 1285 ++++++++++++++++- 2 files changed, 1300 insertions(+), 4 deletions(-) diff --git a/operations/deployment/helm/grafana/Chart.yaml b/operations/deployment/helm/grafana/Chart.yaml index e49b34c..b929aff 100644 --- a/operations/deployment/helm/grafana/Chart.yaml +++ b/operations/deployment/helm/grafana/Chart.yaml @@ -1,5 +1,18 @@ apiVersion: v2 -appVersion: "1.0" -description: grafana configmap for EKS name: grafana -version: 9.1.7 \ No newline at end of file +version: 7.0.13 +appVersion: 10.2.2 +kubeVersion: "^1.8.0-0" +description: The leading tool for querying and visualizing time series and metrics. +home: https://grafana.com +icon: https://artifacthub.io/image/b4fed1a7-6c8f-4945-b99d-096efa3e4116 +sources: + - https://github.com/grafana/grafana + - https://github.com/grafana/helm-charts +annotations: + "artifacthub.io/license": AGPL-3.0-only + "artifacthub.io/links": | + - name: Chart Source + url: https://github.com/grafana/helm-charts + - name: Upstream Project + url: https://github.com/grafana/grafana \ No newline at end of file diff --git a/operations/deployment/helm/grafana/values.yaml b/operations/deployment/helm/grafana/values.yaml index d043012..6278369 100644 --- a/operations/deployment/helm/grafana/values.yaml +++ b/operations/deployment/helm/grafana/values.yaml @@ -1,6 +1,735 @@ +global: + # -- Overrides the Docker registry globally for all images + imageRegistry: null + + # To help compatibility with other charts which use global.imagePullSecrets. + # Allow either an array of {name: pullSecret} maps (k8s-style), or an array of strings (more common helm-style). + # Can be tempalted. + # global: + # imagePullSecrets: + # - name: pullSecret1 + # - name: pullSecret2 + # or + # global: + # imagePullSecrets: + # - pullSecret1 + # - pullSecret2 + imagePullSecrets: [] + +rbac: + create: true + ## Use an existing ClusterRole/Role (depending on rbac.namespaced false/true) + # useExistingRole: name-of-some-role + # useExistingClusterRole: name-of-some-clusterRole + pspEnabled: false + pspUseAppArmor: false + namespaced: false + extraRoleRules: [] + # - apiGroups: [] + # resources: [] + # verbs: [] + extraClusterRoleRules: [] + # - apiGroups: [] + # resources: [] + # verbs: [] +serviceAccount: + create: true + name: + nameTest: + ## ServiceAccount labels. + labels: {} +## Service account annotations. Can be templated. +# annotations: +# eks.amazonaws.com/role-arn: arn:aws:iam::123456789000:role/iam-role-name-here + autoMount: true + +replicas: 1 + +## Create a headless service for the deployment +headlessService: false + +## Create HorizontalPodAutoscaler object for deployment type +# +autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 5 + targetCPU: "60" + targetMemory: "" + behavior: {} + +## See `kubectl explain poddisruptionbudget.spec` for more +## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ +podDisruptionBudget: {} +# apiVersion: "" +# minAvailable: 1 +# maxUnavailable: 1 + +## See `kubectl explain deployment.spec.strategy` for more +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy +deploymentStrategy: + type: RollingUpdate + +readinessProbe: + httpGet: + path: /api/health + port: 3000 + +livenessProbe: + httpGet: + path: /api/health + port: 3000 + initialDelaySeconds: 60 + timeoutSeconds: 30 + failureThreshold: 10 + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: "default-scheduler" + +image: + # -- The Docker registry + registry: docker.io + # -- Docker image repository + repository: grafana/grafana + # Overrides the Grafana image tag whose default is the chart appVersion + tag: "" + sha: "" + pullPolicy: IfNotPresent + + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Can be templated. + ## + pullSecrets: [] + # - myRegistrKeySecretName + +testFramework: + enabled: true + image: + # -- The Docker registry + registry: docker.io + repository: bats/bats + tag: "v1.4.1" + imagePullPolicy: IfNotPresent + securityContext: {} + +securityContext: + runAsNonRoot: true + runAsUser: 472 + runAsGroup: 472 + fsGroup: 472 + +containerSecurityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + +# Enable creating the grafana configmap +createConfigmap: true + +# Extra configmaps to mount in grafana pods +# Values are templated. +extraConfigmapMounts: [] + # - name: certs-configmap + # mountPath: /etc/grafana/ssl/ + # subPath: certificates.crt # (optional) + # configMap: certs-configmap + # readOnly: true + + +extraEmptyDirMounts: [] + # - name: provisioning-notifiers + # mountPath: /etc/grafana/provisioning/notifiers + + +# Apply extra labels to common labels. +extraLabels: {} + +## Assign a PriorityClassName to pods if set +# priorityClassName: + +downloadDashboardsImage: + # -- The Docker registry + registry: docker.io + repository: curlimages/curl + tag: 7.85.0 + sha: "" + pullPolicy: IfNotPresent + +downloadDashboards: + env: {} + envFromSecret: "" + resources: {} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + envValueFrom: {} + # ENV_NAME: + # configMapKeyRef: + # name: configmap-name + # key: value_key + +## Pod Annotations +# podAnnotations: {} + +## Pod Labels +# podLabels: {} + +podPortName: grafana +gossipPortName: gossip +## Deployment annotations +# annotations: {} + +## Expose the grafana service to be accessed from outside the cluster (LoadBalancer service). +## or access it from within the cluster (ClusterIP service). Set the service type and the port to serve it. +## ref: http://kubernetes.io/docs/user-guide/services/ +## +service: + enabled: true + type: ClusterIP + port: 80 + targetPort: 3000 + # targetPort: 4181 To be used with a proxy extraContainer + ## Service annotations. Can be templated. + annotations: {} + labels: {} + portName: service + # Adds the appProtocol field to the service. This allows to work with istio protocol selection. Ex: "http" or "tcp" + appProtocol: "" + +serviceMonitor: + ## If true, a ServiceMonitor CRD is created for a prometheus operator + ## https://github.com/coreos/prometheus-operator + ## + enabled: false + path: /metrics + # namespace: monitoring (defaults to use the namespace this chart is deployed to) + labels: {} + interval: 30s + scheme: http + tlsConfig: {} + scrapeTimeout: 30s + relabelings: [] + metricRelabelings: [] + targetLabels: [] + +extraExposePorts: [] + # - name: keycloak + # port: 8080 + # targetPort: 8080 + +# overrides pod.spec.hostAliases in the grafana deployment's pods +hostAliases: [] + # - ip: "1.2.3.4" + # hostnames: + # - "my.host.com" + +ingress: + enabled: false + # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName + # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress + # ingressClassName: nginx + # Values can be templated + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + labels: {} + path: / + + # pathType is only for k8s >= 1.1= + pathType: Prefix + + hosts: + - chart-example.local + ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. + extraPaths: [] + # - path: /* + # backend: + # serviceName: ssl-redirect + # servicePort: use-annotation + ## Or for k8s > 1.19 + # - path: /* + # pathType: Prefix + # backend: + # service: + # name: ssl-redirect + # port: + # name: use-annotation + + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} +# limits: +# cpu: 100m +# memory: 128Mi +# requests: +# cpu: 100m +# memory: 128Mi + +## Node labels for pod assignment +## ref: https://kubernetes.io/docs/user-guide/node-selection/ +# +nodeSelector: {} + +## Tolerations for pod assignment +## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] + +## Affinity for pod assignment (evaluated as template) +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +affinity: {} + +## Topology Spread Constraints +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ +## +topologySpreadConstraints: [] + +## Additional init containers (evaluated as template) +## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ +## +extraInitContainers: [] + +## Enable an Specify container in extraContainers. This is meant to allow adding an authentication proxy to a grafana pod +extraContainers: "" +# extraContainers: | +# - name: proxy +# image: quay.io/gambol99/keycloak-proxy:latest +# args: +# - -provider=github +# - -client-id= +# - -client-secret= +# - -github-org= +# - -email-domain=* +# - -cookie-secret= +# - -http-address=http://0.0.0.0:4181 +# - -upstream-url=http://127.0.0.1:3000 +# ports: +# - name: proxy-web +# containerPort: 4181 + +## Volumes that can be used in init containers that will not be mounted to deployment pods +extraContainerVolumes: [] +# - name: volume-from-secret +# secret: +# secretName: secret-to-mount +# - name: empty-dir-volume +# emptyDir: {} + +## Enable persistence using Persistent Volume Claims +## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + type: pvc + enabled: false + # storageClassName: default + accessModes: + - ReadWriteOnce + size: 10Gi + # annotations: {} + finalizers: + - kubernetes.io/pvc-protection + # selectorLabels: {} + ## Sub-directory of the PV to mount. Can be templated. + # subPath: "" + ## Name of an existing PVC. Can be templated. + # existingClaim: + ## Extra labels to apply to a PVC. + extraPvcLabels: {} + + ## If persistence is not enabled, this allows to mount the + ## local storage in-memory to improve performance + ## + inMemory: + enabled: false + ## The maximum usage on memory medium EmptyDir would be + ## the minimum value between the SizeLimit specified + ## here and the sum of memory limits of all containers in a pod + ## + # sizeLimit: 300Mi + +initChownData: + ## If false, data ownership will not be reset at startup + ## This allows the grafana-server to be run with an arbitrary user + ## + enabled: true + + ## initChownData container image + ## + image: + # -- The Docker registry + registry: docker.io + repository: library/busybox + tag: "1.31.1" + sha: "" + pullPolicy: IfNotPresent + + ## initChownData resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + securityContext: + runAsNonRoot: false + runAsUser: 0 + seccompProfile: + type: RuntimeDefault + capabilities: + add: + - CHOWN + +# Administrator credentials when not using an existing secret (see below) +adminUser: admin +# adminPassword: strongpassword + +# Use an existing secret for the admin user. +admin: + ## Name of the secret. Can be templated. + existingSecret: "" + userKey: admin-user + passwordKey: admin-password + +## Define command to be executed at startup by grafana container +## Needed if using `vault-env` to manage secrets (ref: https://banzaicloud.com/blog/inject-secrets-into-pods-vault/) +## Default is "run.sh" as defined in grafana's Dockerfile +# command: +# - "sh" +# - "/run.sh" + +## Optionally define args if command is used +## Needed if using `hashicorp/envconsul` to manage secrets +## By default no arguments are set +# args: +# - "-secret" +# - "secret/grafana" +# - "./grafana" + +## Extra environment variables that will be pass onto deployment pods +## +## to provide grafana with access to CloudWatch on AWS EKS: +## 1. create an iam role of type "Web identity" with provider oidc.eks.* (note the provider for later) +## 2. edit the "Trust relationships" of the role, add a line inside the StringEquals clause using the +## same oidc eks provider as noted before (same as the existing line) +## also, replace NAMESPACE and prometheus-operator-grafana with the service account namespace and name +## +## "oidc.eks.us-east-1.amazonaws.com/id/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX:sub": "system:serviceaccount:NAMESPACE:prometheus-operator-grafana", +## +## 3. attach a policy to the role, you can use a built in policy called CloudWatchReadOnlyAccess +## 4. use the following env: (replace 123456789000 and iam-role-name-here with your aws account number and role name) +## +## env: +## AWS_ROLE_ARN: arn:aws:iam::123456789000:role/iam-role-name-here +## AWS_WEB_IDENTITY_TOKEN_FILE: /var/run/secrets/eks.amazonaws.com/serviceaccount/token +## AWS_REGION: us-east-1 +## +## 5. uncomment the EKS section in extraSecretMounts: below +## 6. uncomment the annotation section in the serviceAccount: above +## make sure to replace arn:aws:iam::123456789000:role/iam-role-name-here with your role arn + +env: {} + +## "valueFrom" environment variable references that will be added to deployment pods. Name is templated. +## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#envvarsource-v1-core +## Renders in container spec as: +## env: +## ... +## - name: +## valueFrom: +## +envValueFrom: {} + # ENV_NAME: + # configMapKeyRef: + # name: configmap-name + # key: value_key + +## The name of a secret in the same kubernetes namespace which contain values to be added to the environment +## This can be useful for auth tokens, etc. Value is templated. +envFromSecret: "" + +## Sensible environment variables that will be rendered as new secret object +## This can be useful for auth tokens, etc. +## If the secret values contains "{{", they'll need to be properly escaped so that they are not interpreted by Helm +## ref: https://helm.sh/docs/howto/charts_tips_and_tricks/#using-the-tpl-function +envRenderSecret: {} + +## The names of secrets in the same kubernetes namespace which contain values to be added to the environment +## Each entry should contain a name key, and can optionally specify whether the secret must be defined with an optional key. +## Name is templated. +envFromSecrets: [] +## - name: secret-name +## optional: true + +## The names of conifgmaps in the same kubernetes namespace which contain values to be added to the environment +## Each entry should contain a name key, and can optionally specify whether the configmap must be defined with an optional key. +## Name is templated. +## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#configmapenvsource-v1-core +envFromConfigMaps: [] +## - name: configmap-name +## optional: true + +# Inject Kubernetes services as environment variables. +# See https://kubernetes.io/docs/concepts/services-networking/connect-applications-service/#environment-variables +enableServiceLinks: true + +## Additional grafana server secret mounts +# Defines additional mounts with secrets. Secrets must be manually created in the namespace. +extraSecretMounts: [] + # - name: secret-files + # mountPath: /etc/secrets + # secretName: grafana-secret-files + # readOnly: true + # subPath: "" + # + # for AWS EKS (cloudwatch) use the following (see also instruction in env: above) + # - name: aws-iam-token + # mountPath: /var/run/secrets/eks.amazonaws.com/serviceaccount + # readOnly: true + # projected: + # defaultMode: 420 + # sources: + # - serviceAccountToken: + # audience: sts.amazonaws.com + # expirationSeconds: 86400 + # path: token + # + # for CSI e.g. Azure Key Vault use the following + # - name: secrets-store-inline + # mountPath: /run/secrets + # readOnly: true + # csi: + # driver: secrets-store.csi.k8s.io + # readOnly: true + # volumeAttributes: + # secretProviderClass: "akv-grafana-spc" + # nodePublishSecretRef: # Only required when using service principal mode + # name: grafana-akv-creds # Only required when using service principal mode + +## Additional grafana server volume mounts +# Defines additional volume mounts. +extraVolumeMounts: [] + # - name: extra-volume-0 + # mountPath: /mnt/volume0 + # readOnly: true + # existingClaim: volume-claim + # - name: extra-volume-1 + # mountPath: /mnt/volume1 + # readOnly: true + # hostPath: /usr/shared/ + # - name: grafana-secrets + # mountPath: /mnt/volume2 + # csi: true + # data: + # driver: secrets-store.csi.k8s.io + # readOnly: true + # volumeAttributes: + # secretProviderClass: "grafana-env-spc" + +## Container Lifecycle Hooks. Execute a specific bash command or make an HTTP request +lifecycleHooks: {} + # postStart: + # exec: + # command: [] + +## Pass the plugins you want installed as a list. +## +plugins: [] + # - digrich-bubblechart-panel + # - grafana-clock-panel + ## You can also use other plugin download URL, as long as they are valid zip files, + ## and specify the name of the plugin after the semicolon. Like this: + # - https://grafana.com/api/plugins/marcusolsson-json-datasource/versions/1.3.2/download;marcusolsson-json-datasource + +## Configure grafana datasources +## ref: http://docs.grafana.org/administration/provisioning/#datasources +## +datasources: {} +# datasources.yaml: +# apiVersion: 1 +# datasources: +# - name: Prometheus +# type: prometheus +# url: http://prometheus-prometheus-server +# access: proxy +# isDefault: true +# - name: CloudWatch +# type: cloudwatch +# access: proxy +# uid: cloudwatch +# editable: false +# jsonData: +# authType: default +# defaultRegion: us-east-1 +# deleteDatasources: [] +# - name: Prometheus + +## Configure grafana alerting (can be templated) +## ref: http://docs.grafana.org/administration/provisioning/#alerting +## +alerting: {} + # rules.yaml: + # apiVersion: 1 + # groups: + # - orgId: 1 + # name: '{{ .Chart.Name }}_my_rule_group' + # folder: my_first_folder + # interval: 60s + # rules: + # - uid: my_id_1 + # title: my_first_rule + # condition: A + # data: + # - refId: A + # datasourceUid: '-100' + # model: + # conditions: + # - evaluator: + # params: + # - 3 + # type: gt + # operator: + # type: and + # query: + # params: + # - A + # reducer: + # type: last + # type: query + # datasource: + # type: __expr__ + # uid: '-100' + # expression: 1==0 + # intervalMs: 1000 + # maxDataPoints: 43200 + # refId: A + # type: math + # dashboardUid: my_dashboard + # panelId: 123 + # noDataState: Alerting + # for: 60s + # annotations: + # some_key: some_value + # labels: + # team: sre_team_1 + # contactpoints.yaml: + # secret: + # apiVersion: 1 + # contactPoints: + # - orgId: 1 + # name: cp_1 + # receivers: + # - uid: first_uid + # type: pagerduty + # settings: + # integrationKey: XXX + # severity: critical + # class: ping failure + # component: Grafana + # group: app-stack + # summary: | + # {{ `{{ include "default.message" . }}` }} + +## Configure notifiers +## ref: http://docs.grafana.org/administration/provisioning/#alert-notification-channels +## +notifiers: {} +# notifiers.yaml: +# notifiers: +# - name: email-notifier +# type: email +# uid: email1 +# # either: +# org_id: 1 +# # or +# org_name: Main Org. +# is_default: true +# settings: +# addresses: an_email_address@example.com +# delete_notifiers: + +## Configure grafana dashboard providers +## ref: http://docs.grafana.org/administration/provisioning/#dashboards +## +## `path` must be /var/lib/grafana/dashboards/ +## +dashboardProviders: {} +# dashboardproviders.yaml: +# apiVersion: 1 +# providers: +# - name: 'default' +# orgId: 1 +# folder: '' +# type: file +# disableDeletion: false +# editable: true +# options: +# path: /var/lib/grafana/dashboards/default + +## Configure grafana dashboard to import +## NOTE: To use dashboards you must also enable/configure dashboardProviders +## ref: https://grafana.com/dashboards +## +## dashboards per provider, use provider name as key. +## +dashboards: {} + # default: + # some-dashboard: + # json: | + # $RAW_JSON + # custom-dashboard: + # file: dashboards/custom-dashboard.json + # prometheus-stats: + # gnetId: 2 + # revision: 2 + # datasource: Prometheus + # local-dashboard: + # url: https://example.com/repository/test.json + # token: '' + # local-dashboard-base64: + # url: https://example.com/repository/test-b64.json + # token: '' + # b64content: true + # local-dashboard-gitlab: + # url: https://example.com/repository/test-gitlab.json + # gitlabToken: '' + # local-dashboard-bitbucket: + # url: https://example.com/repository/test-bitbucket.json + # bearerToken: '' + # local-dashboard-azure: + # url: https://example.com/repository/test-azure.json + # basic: '' + # acceptHeader: '*/*' + +## Reference to external ConfigMap per provider. Use provider name as key and ConfigMap name as value. +## A provider dashboards must be defined either by external ConfigMaps or in values.yaml, not in both. +## ConfigMap data example: +## +## data: +## example-dashboard.json: | +## RAW_JSON configmap: enabled: true name: grafana + data: # account number @@ -18,4 +747,558 @@ data: "username": "system:node:{{EC2PrivateDNSName}}" - "groups": - "system:masters" - "rolearn": "arn:aws:iam::000000000:role/AWSReservedSSO_AdministratorAccess_1111111111" \ No newline at end of file + "rolearn": "arn:aws:iam::000000000:role/AWSReservedSSO_AdministratorAccess_1111111111" +## +dashboardsConfigMaps: {} +# default: "" + +## Grafana's primary configuration +## NOTE: values in map will be converted to ini format +## ref: http://docs.grafana.org/installation/configuration/ +## +grafana.ini: + paths: + data: /var/lib/grafana/ + logs: /var/log/grafana + plugins: /var/lib/grafana/plugins + provisioning: /etc/grafana/provisioning + analytics: + check_for_updates: true + log: + mode: console + grafana_net: + url: https://grafana.net + server: + domain: "{{ if (and .Values.ingress.enabled .Values.ingress.hosts) }}{{ .Values.ingress.hosts | first }}{{ else }}''{{ end }}" +## grafana Authentication can be enabled with the following values on grafana.ini + # server: + # The full public facing url you use in browser, used for redirects and emails + # root_url: + # https://grafana.com/docs/grafana/latest/auth/github/#enable-github-in-grafana + # auth.github: + # enabled: false + # allow_sign_up: false + # scopes: user:email,read:org + # auth_url: https://github.com/login/oauth/authorize + # token_url: https://github.com/login/oauth/access_token + # api_url: https://api.github.com/user + # team_ids: + # allowed_organizations: + # client_id: + # client_secret: +## LDAP Authentication can be enabled with the following values on grafana.ini +## NOTE: Grafana will fail to start if the value for ldap.toml is invalid + # auth.ldap: + # enabled: true + # allow_sign_up: true + # config_file: /etc/grafana/ldap.toml + +## Grafana's LDAP configuration +## Templated by the template in _helpers.tpl +## NOTE: To enable the grafana.ini must be configured with auth.ldap.enabled +## ref: http://docs.grafana.org/installation/configuration/#auth-ldap +## ref: http://docs.grafana.org/installation/ldap/#configuration +ldap: + enabled: false + # `existingSecret` is a reference to an existing secret containing the ldap configuration + # for Grafana in a key `ldap-toml`. + existingSecret: "" + # `config` is the content of `ldap.toml` that will be stored in the created secret + config: "" + # config: |- + # verbose_logging = true + + # [[servers]] + # host = "my-ldap-server" + # port = 636 + # use_ssl = true + # start_tls = false + # ssl_skip_verify = false + # bind_dn = "uid=%s,ou=users,dc=myorg,dc=com" + +## Grafana's SMTP configuration +## NOTE: To enable, grafana.ini must be configured with smtp.enabled +## ref: http://docs.grafana.org/installation/configuration/#smtp +smtp: + # `existingSecret` is a reference to an existing secret containing the smtp configuration + # for Grafana. + existingSecret: "" + userKey: "user" + passwordKey: "password" + +## Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders +## Requires at least Grafana 5 to work and can't be used together with parameters dashboardProviders, datasources and dashboards +sidecar: + image: + # -- The Docker registry + registry: quay.io + repository: kiwigrid/k8s-sidecar + tag: 1.25.2 + sha: "" + imagePullPolicy: IfNotPresent + resources: {} +# limits: +# cpu: 100m +# memory: 100Mi +# requests: +# cpu: 50m +# memory: 50Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + # skipTlsVerify Set to true to skip tls verification for kube api calls + # skipTlsVerify: true + enableUniqueFilenames: false + readinessProbe: {} + livenessProbe: {} + # Log level default for all sidecars. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. Defaults to INFO + # logLevel: INFO + alerts: + enabled: false + # Additional environment variables for the alerts sidecar + env: {} + # Do not reprocess already processed unchanged resources on k8s API reconnect. + # ignoreAlreadyProcessed: true + # label that the configmaps with alert are marked with + label: grafana_alert + # value of label that the configmaps with alert are set to + labelValue: "" + # Log level. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. + # logLevel: INFO + # If specified, the sidecar will search for alert config-maps inside this namespace. + # Otherwise the namespace in which the sidecar is running will be used. + # It's also possible to specify ALL to search in all namespaces + searchNamespace: null + # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. + watchMethod: WATCH + # search in configmap, secret or both + resource: both + # watchServerTimeout: request to the server, asking it to cleanly close the connection after that. + # defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S + # watchServerTimeout: 3600 + # + # watchClientTimeout: is a client-side timeout, configuring your local socket. + # If you have a network outage dropping all packets with no RST/FIN, + # this is how long your client waits before realizing & dropping the connection. + # defaults to 66sec (sic!) + # watchClientTimeout: 60 + # + # Endpoint to send request to reload alerts + reloadURL: "http://localhost:3000/api/admin/provisioning/alerting/reload" + # Absolute path to shell script to execute after a alert got reloaded + script: null + skipReload: false + # This is needed if skipReload is true, to load any alerts defined at startup time. + # Deploy the alert sidecar as an initContainer. + initAlerts: false + # Additional alert sidecar volume mounts + extraMounts: [] + # Sets the size limit of the alert sidecar emptyDir volume + sizeLimit: {} + dashboards: + enabled: false + # Additional environment variables for the dashboards sidecar + env: {} + # Do not reprocess already processed unchanged resources on k8s API reconnect. + # ignoreAlreadyProcessed: true + SCProvider: true + # label that the configmaps with dashboards are marked with + label: grafana_dashboard + # value of label that the configmaps with dashboards are set to + labelValue: "" + # Log level. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. + # logLevel: INFO + # folder in the pod that should hold the collected dashboards (unless `defaultFolderName` is set) + folder: /tmp/dashboards + # The default folder name, it will create a subfolder under the `folder` and put dashboards in there instead + defaultFolderName: null + # Namespaces list. If specified, the sidecar will search for config-maps/secrets inside these namespaces. + # Otherwise the namespace in which the sidecar is running will be used. + # It's also possible to specify ALL to search in all namespaces. + searchNamespace: null + # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. + watchMethod: WATCH + # search in configmap, secret or both + resource: both + # If specified, the sidecar will look for annotation with this name to create folder and put graph here. + # You can use this parameter together with `provider.foldersFromFilesStructure`to annotate configmaps and create folder structure. + folderAnnotation: null + # Endpoint to send request to reload alerts + reloadURL: "http://localhost:3000/api/admin/provisioning/dashboards/reload" + # Absolute path to shell script to execute after a configmap got reloaded + script: null + skipReload: false + # watchServerTimeout: request to the server, asking it to cleanly close the connection after that. + # defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S + # watchServerTimeout: 3600 + # + # watchClientTimeout: is a client-side timeout, configuring your local socket. + # If you have a network outage dropping all packets with no RST/FIN, + # this is how long your client waits before realizing & dropping the connection. + # defaults to 66sec (sic!) + # watchClientTimeout: 60 + # + # provider configuration that lets grafana manage the dashboards + provider: + # name of the provider, should be unique + name: sidecarProvider + # orgid as configured in grafana + orgid: 1 + # folder in which the dashboards should be imported in grafana + folder: '' + # type of the provider + type: file + # disableDelete to activate a import-only behaviour + disableDelete: false + # allow updating provisioned dashboards from the UI + allowUiUpdates: false + # allow Grafana to replicate dashboard structure from filesystem + foldersFromFilesStructure: false + # Additional dashboard sidecar volume mounts + extraMounts: [] + # Sets the size limit of the dashboard sidecar emptyDir volume + sizeLimit: {} + datasources: + enabled: false + # Additional environment variables for the datasourcessidecar + env: {} + # Do not reprocess already processed unchanged resources on k8s API reconnect. + # ignoreAlreadyProcessed: true + # label that the configmaps with datasources are marked with + label: grafana_datasource + # value of label that the configmaps with datasources are set to + labelValue: "" + # Log level. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. + # logLevel: INFO + # If specified, the sidecar will search for datasource config-maps inside this namespace. + # Otherwise the namespace in which the sidecar is running will be used. + # It's also possible to specify ALL to search in all namespaces + searchNamespace: null + # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. + watchMethod: WATCH + # search in configmap, secret or both + resource: both + # watchServerTimeout: request to the server, asking it to cleanly close the connection after that. + # defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S + # watchServerTimeout: 3600 + # + # watchClientTimeout: is a client-side timeout, configuring your local socket. + # If you have a network outage dropping all packets with no RST/FIN, + # this is how long your client waits before realizing & dropping the connection. + # defaults to 66sec (sic!) + # watchClientTimeout: 60 + # + # Endpoint to send request to reload datasources + reloadURL: "http://localhost:3000/api/admin/provisioning/datasources/reload" + # Absolute path to shell script to execute after a datasource got reloaded + script: null + skipReload: false + # Deploy the datasource sidecar as an initContainer in addition to a container. + # This is needed if skipReload is true, to load any datasources defined at startup time. + initDatasources: false + # Sets the size limit of the datasource sidecar emptyDir volume + sizeLimit: {} + plugins: + enabled: false + # Additional environment variables for the plugins sidecar + env: {} + # Do not reprocess already processed unchanged resources on k8s API reconnect. + # ignoreAlreadyProcessed: true + # label that the configmaps with plugins are marked with + label: grafana_plugin + # value of label that the configmaps with plugins are set to + labelValue: "" + # Log level. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. + # logLevel: INFO + # If specified, the sidecar will search for plugin config-maps inside this namespace. + # Otherwise the namespace in which the sidecar is running will be used. + # It's also possible to specify ALL to search in all namespaces + searchNamespace: null + # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. + watchMethod: WATCH + # search in configmap, secret or both + resource: both + # watchServerTimeout: request to the server, asking it to cleanly close the connection after that. + # defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S + # watchServerTimeout: 3600 + # + # watchClientTimeout: is a client-side timeout, configuring your local socket. + # If you have a network outage dropping all packets with no RST/FIN, + # this is how long your client waits before realizing & dropping the connection. + # defaults to 66sec (sic!) + # watchClientTimeout: 60 + # + # Endpoint to send request to reload plugins + reloadURL: "http://localhost:3000/api/admin/provisioning/plugins/reload" + # Absolute path to shell script to execute after a plugin got reloaded + script: null + skipReload: false + # Deploy the datasource sidecar as an initContainer in addition to a container. + # This is needed if skipReload is true, to load any plugins defined at startup time. + initPlugins: false + # Sets the size limit of the plugin sidecar emptyDir volume + sizeLimit: {} + notifiers: + enabled: false + # Additional environment variables for the notifierssidecar + env: {} + # Do not reprocess already processed unchanged resources on k8s API reconnect. + # ignoreAlreadyProcessed: true + # label that the configmaps with notifiers are marked with + label: grafana_notifier + # value of label that the configmaps with notifiers are set to + labelValue: "" + # Log level. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. + # logLevel: INFO + # If specified, the sidecar will search for notifier config-maps inside this namespace. + # Otherwise the namespace in which the sidecar is running will be used. + # It's also possible to specify ALL to search in all namespaces + searchNamespace: null + # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. + watchMethod: WATCH + # search in configmap, secret or both + resource: both + # watchServerTimeout: request to the server, asking it to cleanly close the connection after that. + # defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S + # watchServerTimeout: 3600 + # + # watchClientTimeout: is a client-side timeout, configuring your local socket. + # If you have a network outage dropping all packets with no RST/FIN, + # this is how long your client waits before realizing & dropping the connection. + # defaults to 66sec (sic!) + # watchClientTimeout: 60 + # + # Endpoint to send request to reload notifiers + reloadURL: "http://localhost:3000/api/admin/provisioning/notifications/reload" + # Absolute path to shell script to execute after a notifier got reloaded + script: null + skipReload: false + # Deploy the notifier sidecar as an initContainer in addition to a container. + # This is needed if skipReload is true, to load any notifiers defined at startup time. + initNotifiers: false + # Sets the size limit of the notifier sidecar emptyDir volume + sizeLimit: {} + +## Override the deployment namespace +## +namespaceOverride: "" + +## Number of old ReplicaSets to retain +## +revisionHistoryLimit: 10 + +## Add a seperate remote image renderer deployment/service +imageRenderer: + deploymentStrategy: {} + # Enable the image-renderer deployment & service + enabled: false + replicas: 1 + autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 5 + targetCPU: "60" + targetMemory: "" + behavior: {} + image: + # -- The Docker registry + registry: docker.io + # image-renderer Image repository + repository: grafana/grafana-image-renderer + # image-renderer Image tag + tag: latest + # image-renderer Image sha (optional) + sha: "" + # image-renderer ImagePullPolicy + pullPolicy: Always + # extra environment variables + env: + HTTP_HOST: "0.0.0.0" + # RENDERING_ARGS: --no-sandbox,--disable-gpu,--window-size=1280x758 + # RENDERING_MODE: clustered + # IGNORE_HTTPS_ERRORS: true + + ## "valueFrom" environment variable references that will be added to deployment pods. Name is templated. + ## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#envvarsource-v1-core + ## Renders in container spec as: + ## env: + ## ... + ## - name: + ## valueFrom: + ## + envValueFrom: {} + # ENV_NAME: + # configMapKeyRef: + # name: configmap-name + # key: value_key + + # image-renderer deployment serviceAccount + serviceAccountName: "" + # image-renderer deployment securityContext + securityContext: {} + # image-renderer deployment container securityContext + containerSecurityContext: + seccompProfile: + type: RuntimeDefault + capabilities: + drop: ['ALL'] + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + ## image-renderer pod annotation + podAnnotations: {} + # image-renderer deployment Host Aliases + hostAliases: [] + # image-renderer deployment priority class + priorityClassName: '' + service: + # Enable the image-renderer service + enabled: true + # image-renderer service port name + portName: 'http' + # image-renderer service port used by both service and deployment + port: 8081 + targetPort: 8081 + # Adds the appProtocol field to the image-renderer service. This allows to work with istio protocol selection. Ex: "http" or "tcp" + appProtocol: "" + serviceMonitor: + ## If true, a ServiceMonitor CRD is created for a prometheus operator + ## https://github.com/coreos/prometheus-operator + ## + enabled: false + path: /metrics + # namespace: monitoring (defaults to use the namespace this chart is deployed to) + labels: {} + interval: 1m + scheme: http + tlsConfig: {} + scrapeTimeout: 30s + relabelings: [] + # See: https://doc.crds.dev/github.com/prometheus-operator/kube-prometheus/monitoring.coreos.com/ServiceMonitor/v1@v0.11.0#spec-targetLabels + targetLabels: [] + # - targetLabel1 + # - targetLabel2 + # If https is enabled in Grafana, this needs to be set as 'https' to correctly configure the callback used in Grafana + grafanaProtocol: http + # In case a sub_path is used this needs to be added to the image renderer callback + grafanaSubPath: "" + # name of the image-renderer port on the pod + podPortName: http + # number of image-renderer replica sets to keep + revisionHistoryLimit: 10 + networkPolicy: + # Enable a NetworkPolicy to limit inbound traffic to only the created grafana pods + limitIngress: true + # Enable a NetworkPolicy to limit outbound traffic to only the created grafana pods + limitEgress: false + # Allow additional services to access image-renderer (eg. Prometheus operator when ServiceMonitor is enabled) + extraIngressSelectors: [] + resources: {} +# limits: +# cpu: 100m +# memory: 100Mi +# requests: +# cpu: 50m +# memory: 50Mi + ## Node labels for pod assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + # + nodeSelector: {} + + ## Tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + + ## Affinity for pod assignment (evaluated as template) + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## + affinity: {} + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: "default-scheduler" + +networkPolicy: + ## @param networkPolicy.enabled Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. + ## + enabled: false + ## @param networkPolicy.allowExternal Don't require client label for connections + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to grafana port defined. + ## When true, grafana will accept connections from any source + ## (with the correct destination port). + ## + ingress: true + ## @param networkPolicy.ingress When true enables the creation + ## an ingress network policy + ## + allowExternal: true + ## @param networkPolicy.explicitNamespacesSelector A Kubernetes LabelSelector to explicitly select namespaces from which traffic could be allowed + ## If explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace + ## and that match other criteria, the ones that have the good label, can reach the grafana. + ## But sometimes, we want the grafana to be accessible to clients from other namespaces, in this case, we can use this + ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. + ## + ## Example: + ## explicitNamespacesSelector: + ## matchLabels: + ## role: frontend + ## matchExpressions: + ## - {key: role, operator: In, values: [frontend]} + ## + explicitNamespacesSelector: {} + ## + ## + ## + ## + ## + ## + egress: + ## @param networkPolicy.egress.enabled When enabled, an egress network policy will be + ## created allowing grafana to connect to external data sources from kubernetes cluster. + enabled: false + ## + ## @param networkPolicy.egress.blockDNSResolution When enabled, DNS resolution will be blocked + ## for all pods in the grafana namespace. + blockDNSResolution: false + ## + ## @param networkPolicy.egress.ports Add individual ports to be allowed by the egress + ports: [] + ## Add ports to the egress by specifying - port: + ## E.X. + ## - port: 80 + ## - port: 443 + ## + ## @param networkPolicy.egress.to Allow egress traffic to specific destinations + to: [] + ## Add destinations to the egress by specifying - ipBlock: + ## E.X. + ## to: + ## - namespaceSelector: + ## matchExpressions: + ## - {key: role, operator: In, values: [grafana]} + ## + ## + ## + ## + ## + +# Enable backward compatibility of kubernetes where version below 1.13 doesn't have the enableServiceLinks option +enableKubeBackwardCompatibility: false +useStatefulSet: false +# Create a dynamic manifests via values: +extraObjects: [] + # - apiVersion: "kubernetes-client.io/v1" + # kind: ExternalSecret + # metadata: + # name: grafana-secrets + # spec: + # backendType: gcpSecretsManager + # data: + # - key: grafana-admin-password + # name: adminPassword \ No newline at end of file From a49fc989b786a72ff2ab968c9a28b0cba73a2191 Mon Sep 17 00:00:00 2001 From: karamveer28 Date: Tue, 5 Dec 2023 23:00:58 +0530 Subject: [PATCH 06/99] destroy tf stack --- action.yaml | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/action.yaml b/action.yaml index 3d396e8..e99fded 100644 --- a/action.yaml +++ b/action.yaml @@ -21,6 +21,9 @@ inputs: tf_state_bucket_destroy: description: 'Force purge and deletion of S3 bucket defined. Any file contained there will be destroyed. Will only run if aws_eks_create is set to false.' required: false + tf_stack_destroy: + description: 'Set to "true" to Destroy the stack through Terraform.' + required: false # AWS aws_access_key_id: @@ -121,6 +124,7 @@ runs: bitops_code_store: ${{ inputs.bitops_code_store }} tf_state_bucket: ${{ inputs.tf_state_bucket }} tf_state_bucket_destroy: ${{ inputs.tf_state_bucket_destroy }} + tf_stack_destroy: ${{ inputs.tf_stack_destroy }} # Current repo vars gh_action_repo: ${{ github.action_path }} From 5b08313e234fdfe6d4a9cace4571b411fb7b3ac8 Mon Sep 17 00:00:00 2001 From: karamveer28 Date: Wed, 6 Dec 2023 18:31:01 +0530 Subject: [PATCH 07/99] Added env file --- operations/deployment/helm/grafana/.env | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 operations/deployment/helm/grafana/.env diff --git a/operations/deployment/helm/grafana/.env b/operations/deployment/helm/grafana/.env new file mode 100644 index 0000000..8ce79ba --- /dev/null +++ b/operations/deployment/helm/grafana/.env @@ -0,0 +1,2 @@ +GF_SECURITY_ADMIN_USER=admin +GF_SECURITY_ADMIN_PASSWORD=grafana \ No newline at end of file From babb4dcd31638e9743125bc6452b130807d8f2c3 Mon Sep 17 00:00:00 2001 From: karamveer28 Date: Wed, 6 Dec 2023 18:44:24 +0530 Subject: [PATCH 08/99] env file rename --- operations/deployment/helm/grafana/{.env => ENV_FILE} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename operations/deployment/helm/grafana/{.env => ENV_FILE} (100%) diff --git a/operations/deployment/helm/grafana/.env b/operations/deployment/helm/grafana/ENV_FILE similarity index 100% rename from operations/deployment/helm/grafana/.env rename to operations/deployment/helm/grafana/ENV_FILE From 73babd68ef90cb54ed3d50f20dd26e708abd9b49 Mon Sep 17 00:00:00 2001 From: karamveer28 Date: Wed, 6 Dec 2023 19:15:59 +0530 Subject: [PATCH 09/99] Added bitOps file --- .../deployment/helm/grafana/bitops.config.yaml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) create mode 100644 operations/deployment/helm/grafana/bitops.config.yaml diff --git a/operations/deployment/helm/grafana/bitops.config.yaml b/operations/deployment/helm/grafana/bitops.config.yaml new file mode 100644 index 0000000..0d5d6df --- /dev/null +++ b/operations/deployment/helm/grafana/bitops.config.yaml @@ -0,0 +1,14 @@ +helm: + cli: + namespace: kube-system + timeout: 200s + debug: true + atomic: false + force: false + dry-run: false + options: + release-name: grafana + skip-deploy: false + k8s: + fetch: + kubeconfig: true \ No newline at end of file From 43f25354ea292a9cf0ac31bc5ab431e247d88dd3 Mon Sep 17 00:00:00 2001 From: karamveer28 Date: Wed, 6 Dec 2023 20:40:04 +0530 Subject: [PATCH 10/99] Changed namespace --- operations/deployment/helm/grafana/bitops.config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/operations/deployment/helm/grafana/bitops.config.yaml b/operations/deployment/helm/grafana/bitops.config.yaml index 0d5d6df..5ed1eb3 100644 --- a/operations/deployment/helm/grafana/bitops.config.yaml +++ b/operations/deployment/helm/grafana/bitops.config.yaml @@ -1,6 +1,6 @@ helm: cli: - namespace: kube-system + namespace: grafana timeout: 200s debug: true atomic: false From e03758b91524f72743de6f7e63b4ef4a31a60510 Mon Sep 17 00:00:00 2001 From: karamveer28 Date: Wed, 6 Dec 2023 21:39:20 +0530 Subject: [PATCH 11/99] Removed aws-auth config folder --- .../deployment/helm/aws-auth/Chart.yaml | 5 ----- .../helm/aws-auth/bitops.config.yaml | 14 ------------ .../helm/aws-auth/example.bitops.config.yaml | 15 ------------- .../helm/aws-auth/templates/aws-auth.yaml | 22 ------------------- .../deployment/helm/aws-auth/values.yaml | 21 ------------------ 5 files changed, 77 deletions(-) delete mode 100644 operations/deployment/helm/aws-auth/Chart.yaml delete mode 100644 operations/deployment/helm/aws-auth/bitops.config.yaml delete mode 100644 operations/deployment/helm/aws-auth/example.bitops.config.yaml delete mode 100644 operations/deployment/helm/aws-auth/templates/aws-auth.yaml delete mode 100644 operations/deployment/helm/aws-auth/values.yaml diff --git a/operations/deployment/helm/aws-auth/Chart.yaml b/operations/deployment/helm/aws-auth/Chart.yaml deleted file mode 100644 index db7b2d3..0000000 --- a/operations/deployment/helm/aws-auth/Chart.yaml +++ /dev/null @@ -1,5 +0,0 @@ -apiVersion: v2 -appVersion: "1.0" -description: aws-auth configmap for EKS -name: aws-auth -version: 0.1.0 \ No newline at end of file diff --git a/operations/deployment/helm/aws-auth/bitops.config.yaml b/operations/deployment/helm/aws-auth/bitops.config.yaml deleted file mode 100644 index 60d7548..0000000 --- a/operations/deployment/helm/aws-auth/bitops.config.yaml +++ /dev/null @@ -1,14 +0,0 @@ -helm: - cli: - namespace: kube-system - timeout: 200s - debug: true - atomic: false - force: false - dry-run: false - options: - release-name: aws-auth - skip-deploy: false - k8s: - fetch: - kubeconfig: true \ No newline at end of file diff --git a/operations/deployment/helm/aws-auth/example.bitops.config.yaml b/operations/deployment/helm/aws-auth/example.bitops.config.yaml deleted file mode 100644 index eddb4b4..0000000 --- a/operations/deployment/helm/aws-auth/example.bitops.config.yaml +++ /dev/null @@ -1,15 +0,0 @@ -helm: - cli: - namespace: kube-system - timeout: 200s - debug: true - atomic: false - force: false - dry-run: false - options: - release-name: aws-auth - skip-deploy: false - k8s: - fetch: - kubeconfig: true - cluster-name: prod-ekscluster \ No newline at end of file diff --git a/operations/deployment/helm/aws-auth/templates/aws-auth.yaml b/operations/deployment/helm/aws-auth/templates/aws-auth.yaml deleted file mode 100644 index e41dea6..0000000 --- a/operations/deployment/helm/aws-auth/templates/aws-auth.yaml +++ /dev/null @@ -1,22 +0,0 @@ -{{- if .Values.configmap.enabled }} -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ .Values.configmap.name }} -data: -{{- if .Values.data.mapAccounts }} - mapAccounts: | - {{ .Values.data.mapAccounts| nindent 4 | trim }} -{{- end }} - -{{- if .Values.data.mapRoles }} - mapRoles: | - {{ .Values.data.mapRoles| nindent 4 | trim }} -{{- end }} - -{{- if .Values.data.mapUsers }} - mapUsers: | - {{ .Values.data.mapUsers| nindent 4 | trim }} -{{- end }} - -{{- end }} \ No newline at end of file diff --git a/operations/deployment/helm/aws-auth/values.yaml b/operations/deployment/helm/aws-auth/values.yaml deleted file mode 100644 index 90f836a..0000000 --- a/operations/deployment/helm/aws-auth/values.yaml +++ /dev/null @@ -1,21 +0,0 @@ -configmap: - enabled: true - name: aws-auth -data: - - # account number - mapAccounts: | - - "000000000000" - - # add eksworkers to nodes and bootstrappers - # ensure accounts match - # add sso and other users to masters as necessry - mapRoles: | - - "groups": - - "system:bootstrappers" - - "system:nodes" - "rolearn": "arn:aws:iam::000000000:role/prod-eksworker" - "username": "system:node:{{EC2PrivateDNSName}}" - - "groups": - - "system:masters" - "rolearn": "arn:aws:iam::000000000:role/AWSReservedSSO_AdministratorAccess_1111111111" \ No newline at end of file From 0dabfc1b53e5ec8bf02a7daf5336ce2836e340c6 Mon Sep 17 00:00:00 2001 From: karamveer28 Date: Wed, 6 Dec 2023 22:51:56 +0530 Subject: [PATCH 12/99] Added two charts --- .../deployment/helm/aws-auth/Chart.yaml | 5 + .../{grafana => aws-auth}/bitops.config.yaml | 4 +- .../helm/aws-auth/example.bitops.config.yaml | 15 + .../templates/aws-auth.yaml} | 0 .../deployment/helm/aws-auth/values.yaml | 21 + .../deployment/helm/aws-auth2/Chart.yaml | 5 + .../helm/aws-auth2/bitops.config.yaml | 14 + .../helm/aws-auth2/example.bitops.config.yaml | 15 + .../helm/aws-auth2/templates/aws-auth.yaml | 22 + .../deployment/helm/aws-auth2/values.yaml | 21 + operations/deployment/helm/grafana/Chart.yaml | 18 - operations/deployment/helm/grafana/ENV_FILE | 2 - .../helm/grafana/example.bitops.config.yaml | 30 - .../deployment/helm/grafana/values.yaml | 1304 ----------------- 14 files changed, 120 insertions(+), 1356 deletions(-) create mode 100644 operations/deployment/helm/aws-auth/Chart.yaml rename operations/deployment/helm/{grafana => aws-auth}/bitops.config.yaml (77%) create mode 100644 operations/deployment/helm/aws-auth/example.bitops.config.yaml rename operations/deployment/helm/{grafana/templates/grafana.yaml => aws-auth/templates/aws-auth.yaml} (100%) create mode 100644 operations/deployment/helm/aws-auth/values.yaml create mode 100644 operations/deployment/helm/aws-auth2/Chart.yaml create mode 100644 operations/deployment/helm/aws-auth2/bitops.config.yaml create mode 100644 operations/deployment/helm/aws-auth2/example.bitops.config.yaml create mode 100644 operations/deployment/helm/aws-auth2/templates/aws-auth.yaml create mode 100644 operations/deployment/helm/aws-auth2/values.yaml delete mode 100644 operations/deployment/helm/grafana/Chart.yaml delete mode 100644 operations/deployment/helm/grafana/ENV_FILE delete mode 100644 operations/deployment/helm/grafana/example.bitops.config.yaml delete mode 100644 operations/deployment/helm/grafana/values.yaml diff --git a/operations/deployment/helm/aws-auth/Chart.yaml b/operations/deployment/helm/aws-auth/Chart.yaml new file mode 100644 index 0000000..db7b2d3 --- /dev/null +++ b/operations/deployment/helm/aws-auth/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v2 +appVersion: "1.0" +description: aws-auth configmap for EKS +name: aws-auth +version: 0.1.0 \ No newline at end of file diff --git a/operations/deployment/helm/grafana/bitops.config.yaml b/operations/deployment/helm/aws-auth/bitops.config.yaml similarity index 77% rename from operations/deployment/helm/grafana/bitops.config.yaml rename to operations/deployment/helm/aws-auth/bitops.config.yaml index 5ed1eb3..60d7548 100644 --- a/operations/deployment/helm/grafana/bitops.config.yaml +++ b/operations/deployment/helm/aws-auth/bitops.config.yaml @@ -1,13 +1,13 @@ helm: cli: - namespace: grafana + namespace: kube-system timeout: 200s debug: true atomic: false force: false dry-run: false options: - release-name: grafana + release-name: aws-auth skip-deploy: false k8s: fetch: diff --git a/operations/deployment/helm/aws-auth/example.bitops.config.yaml b/operations/deployment/helm/aws-auth/example.bitops.config.yaml new file mode 100644 index 0000000..eddb4b4 --- /dev/null +++ b/operations/deployment/helm/aws-auth/example.bitops.config.yaml @@ -0,0 +1,15 @@ +helm: + cli: + namespace: kube-system + timeout: 200s + debug: true + atomic: false + force: false + dry-run: false + options: + release-name: aws-auth + skip-deploy: false + k8s: + fetch: + kubeconfig: true + cluster-name: prod-ekscluster \ No newline at end of file diff --git a/operations/deployment/helm/grafana/templates/grafana.yaml b/operations/deployment/helm/aws-auth/templates/aws-auth.yaml similarity index 100% rename from operations/deployment/helm/grafana/templates/grafana.yaml rename to operations/deployment/helm/aws-auth/templates/aws-auth.yaml diff --git a/operations/deployment/helm/aws-auth/values.yaml b/operations/deployment/helm/aws-auth/values.yaml new file mode 100644 index 0000000..90f836a --- /dev/null +++ b/operations/deployment/helm/aws-auth/values.yaml @@ -0,0 +1,21 @@ +configmap: + enabled: true + name: aws-auth +data: + + # account number + mapAccounts: | + - "000000000000" + + # add eksworkers to nodes and bootstrappers + # ensure accounts match + # add sso and other users to masters as necessry + mapRoles: | + - "groups": + - "system:bootstrappers" + - "system:nodes" + "rolearn": "arn:aws:iam::000000000:role/prod-eksworker" + "username": "system:node:{{EC2PrivateDNSName}}" + - "groups": + - "system:masters" + "rolearn": "arn:aws:iam::000000000:role/AWSReservedSSO_AdministratorAccess_1111111111" \ No newline at end of file diff --git a/operations/deployment/helm/aws-auth2/Chart.yaml b/operations/deployment/helm/aws-auth2/Chart.yaml new file mode 100644 index 0000000..5ea3261 --- /dev/null +++ b/operations/deployment/helm/aws-auth2/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v2 +appVersion: "1.0" +description: aws-auth configmap for EKS +name: aws-auth2 +version: 0.1.0 \ No newline at end of file diff --git a/operations/deployment/helm/aws-auth2/bitops.config.yaml b/operations/deployment/helm/aws-auth2/bitops.config.yaml new file mode 100644 index 0000000..cdcd9a5 --- /dev/null +++ b/operations/deployment/helm/aws-auth2/bitops.config.yaml @@ -0,0 +1,14 @@ +helm: + cli: + namespace: kube-system + timeout: 200s + debug: true + atomic: false + force: false + dry-run: false + options: + release-name: aws-auth2 + skip-deploy: false + k8s: + fetch: + kubeconfig: true \ No newline at end of file diff --git a/operations/deployment/helm/aws-auth2/example.bitops.config.yaml b/operations/deployment/helm/aws-auth2/example.bitops.config.yaml new file mode 100644 index 0000000..3a88b77 --- /dev/null +++ b/operations/deployment/helm/aws-auth2/example.bitops.config.yaml @@ -0,0 +1,15 @@ +helm: + cli: + namespace: kube-system + timeout: 200s + debug: true + atomic: false + force: false + dry-run: false + options: + release-name: aws-auth2 + skip-deploy: false + k8s: + fetch: + kubeconfig: true + cluster-name: prod-ekscluster \ No newline at end of file diff --git a/operations/deployment/helm/aws-auth2/templates/aws-auth.yaml b/operations/deployment/helm/aws-auth2/templates/aws-auth.yaml new file mode 100644 index 0000000..e41dea6 --- /dev/null +++ b/operations/deployment/helm/aws-auth2/templates/aws-auth.yaml @@ -0,0 +1,22 @@ +{{- if .Values.configmap.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Values.configmap.name }} +data: +{{- if .Values.data.mapAccounts }} + mapAccounts: | + {{ .Values.data.mapAccounts| nindent 4 | trim }} +{{- end }} + +{{- if .Values.data.mapRoles }} + mapRoles: | + {{ .Values.data.mapRoles| nindent 4 | trim }} +{{- end }} + +{{- if .Values.data.mapUsers }} + mapUsers: | + {{ .Values.data.mapUsers| nindent 4 | trim }} +{{- end }} + +{{- end }} \ No newline at end of file diff --git a/operations/deployment/helm/aws-auth2/values.yaml b/operations/deployment/helm/aws-auth2/values.yaml new file mode 100644 index 0000000..11d37a7 --- /dev/null +++ b/operations/deployment/helm/aws-auth2/values.yaml @@ -0,0 +1,21 @@ +configmap: + enabled: true + name: aws-auth2 +data: + + # account number + mapAccounts: | + - "000000000000" + + # add eksworkers to nodes and bootstrappers + # ensure accounts match + # add sso and other users to masters as necessry + mapRoles: | + - "groups": + - "system:bootstrappers" + - "system:nodes" + "rolearn": "arn:aws:iam::000000000:role/prod-eksworker" + "username": "system:node:{{EC2PrivateDNSName}}" + - "groups": + - "system:masters" + "rolearn": "arn:aws:iam::000000000:role/AWSReservedSSO_AdministratorAccess_1111111111" \ No newline at end of file diff --git a/operations/deployment/helm/grafana/Chart.yaml b/operations/deployment/helm/grafana/Chart.yaml deleted file mode 100644 index b929aff..0000000 --- a/operations/deployment/helm/grafana/Chart.yaml +++ /dev/null @@ -1,18 +0,0 @@ -apiVersion: v2 -name: grafana -version: 7.0.13 -appVersion: 10.2.2 -kubeVersion: "^1.8.0-0" -description: The leading tool for querying and visualizing time series and metrics. -home: https://grafana.com -icon: https://artifacthub.io/image/b4fed1a7-6c8f-4945-b99d-096efa3e4116 -sources: - - https://github.com/grafana/grafana - - https://github.com/grafana/helm-charts -annotations: - "artifacthub.io/license": AGPL-3.0-only - "artifacthub.io/links": | - - name: Chart Source - url: https://github.com/grafana/helm-charts - - name: Upstream Project - url: https://github.com/grafana/grafana \ No newline at end of file diff --git a/operations/deployment/helm/grafana/ENV_FILE b/operations/deployment/helm/grafana/ENV_FILE deleted file mode 100644 index 8ce79ba..0000000 --- a/operations/deployment/helm/grafana/ENV_FILE +++ /dev/null @@ -1,2 +0,0 @@ -GF_SECURITY_ADMIN_USER=admin -GF_SECURITY_ADMIN_PASSWORD=grafana \ No newline at end of file diff --git a/operations/deployment/helm/grafana/example.bitops.config.yaml b/operations/deployment/helm/grafana/example.bitops.config.yaml deleted file mode 100644 index 40e4319..0000000 --- a/operations/deployment/helm/grafana/example.bitops.config.yaml +++ /dev/null @@ -1,30 +0,0 @@ -helm: - cli: - namespace: kube-system - timeout: 200s - debug: true - atomic: false - force: false - dry-run: false - options: - release-name: grafana - skip-deploy: false - k8s: - fetch: - kubeconfig: true - cluster-name: prod-ekscluster - -# helm: -# cli: -# namespace: kube-system -# timeout: 200s -# debug: true -# atomic: false -# force: false -# dry-run: false -# options: -# release-name: grafana -# skip-deploy: false -# k8s: -# fetch: -# kubeconfig: true \ No newline at end of file diff --git a/operations/deployment/helm/grafana/values.yaml b/operations/deployment/helm/grafana/values.yaml deleted file mode 100644 index 6278369..0000000 --- a/operations/deployment/helm/grafana/values.yaml +++ /dev/null @@ -1,1304 +0,0 @@ -global: - # -- Overrides the Docker registry globally for all images - imageRegistry: null - - # To help compatibility with other charts which use global.imagePullSecrets. - # Allow either an array of {name: pullSecret} maps (k8s-style), or an array of strings (more common helm-style). - # Can be tempalted. - # global: - # imagePullSecrets: - # - name: pullSecret1 - # - name: pullSecret2 - # or - # global: - # imagePullSecrets: - # - pullSecret1 - # - pullSecret2 - imagePullSecrets: [] - -rbac: - create: true - ## Use an existing ClusterRole/Role (depending on rbac.namespaced false/true) - # useExistingRole: name-of-some-role - # useExistingClusterRole: name-of-some-clusterRole - pspEnabled: false - pspUseAppArmor: false - namespaced: false - extraRoleRules: [] - # - apiGroups: [] - # resources: [] - # verbs: [] - extraClusterRoleRules: [] - # - apiGroups: [] - # resources: [] - # verbs: [] -serviceAccount: - create: true - name: - nameTest: - ## ServiceAccount labels. - labels: {} -## Service account annotations. Can be templated. -# annotations: -# eks.amazonaws.com/role-arn: arn:aws:iam::123456789000:role/iam-role-name-here - autoMount: true - -replicas: 1 - -## Create a headless service for the deployment -headlessService: false - -## Create HorizontalPodAutoscaler object for deployment type -# -autoscaling: - enabled: false - minReplicas: 1 - maxReplicas: 5 - targetCPU: "60" - targetMemory: "" - behavior: {} - -## See `kubectl explain poddisruptionbudget.spec` for more -## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ -podDisruptionBudget: {} -# apiVersion: "" -# minAvailable: 1 -# maxUnavailable: 1 - -## See `kubectl explain deployment.spec.strategy` for more -## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy -deploymentStrategy: - type: RollingUpdate - -readinessProbe: - httpGet: - path: /api/health - port: 3000 - -livenessProbe: - httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 60 - timeoutSeconds: 30 - failureThreshold: 10 - -## Use an alternate scheduler, e.g. "stork". -## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ -## -# schedulerName: "default-scheduler" - -image: - # -- The Docker registry - registry: docker.io - # -- Docker image repository - repository: grafana/grafana - # Overrides the Grafana image tag whose default is the chart appVersion - tag: "" - sha: "" - pullPolicy: IfNotPresent - - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## Can be templated. - ## - pullSecrets: [] - # - myRegistrKeySecretName - -testFramework: - enabled: true - image: - # -- The Docker registry - registry: docker.io - repository: bats/bats - tag: "v1.4.1" - imagePullPolicy: IfNotPresent - securityContext: {} - -securityContext: - runAsNonRoot: true - runAsUser: 472 - runAsGroup: 472 - fsGroup: 472 - -containerSecurityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - seccompProfile: - type: RuntimeDefault - -# Enable creating the grafana configmap -createConfigmap: true - -# Extra configmaps to mount in grafana pods -# Values are templated. -extraConfigmapMounts: [] - # - name: certs-configmap - # mountPath: /etc/grafana/ssl/ - # subPath: certificates.crt # (optional) - # configMap: certs-configmap - # readOnly: true - - -extraEmptyDirMounts: [] - # - name: provisioning-notifiers - # mountPath: /etc/grafana/provisioning/notifiers - - -# Apply extra labels to common labels. -extraLabels: {} - -## Assign a PriorityClassName to pods if set -# priorityClassName: - -downloadDashboardsImage: - # -- The Docker registry - registry: docker.io - repository: curlimages/curl - tag: 7.85.0 - sha: "" - pullPolicy: IfNotPresent - -downloadDashboards: - env: {} - envFromSecret: "" - resources: {} - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - seccompProfile: - type: RuntimeDefault - envValueFrom: {} - # ENV_NAME: - # configMapKeyRef: - # name: configmap-name - # key: value_key - -## Pod Annotations -# podAnnotations: {} - -## Pod Labels -# podLabels: {} - -podPortName: grafana -gossipPortName: gossip -## Deployment annotations -# annotations: {} - -## Expose the grafana service to be accessed from outside the cluster (LoadBalancer service). -## or access it from within the cluster (ClusterIP service). Set the service type and the port to serve it. -## ref: http://kubernetes.io/docs/user-guide/services/ -## -service: - enabled: true - type: ClusterIP - port: 80 - targetPort: 3000 - # targetPort: 4181 To be used with a proxy extraContainer - ## Service annotations. Can be templated. - annotations: {} - labels: {} - portName: service - # Adds the appProtocol field to the service. This allows to work with istio protocol selection. Ex: "http" or "tcp" - appProtocol: "" - -serviceMonitor: - ## If true, a ServiceMonitor CRD is created for a prometheus operator - ## https://github.com/coreos/prometheus-operator - ## - enabled: false - path: /metrics - # namespace: monitoring (defaults to use the namespace this chart is deployed to) - labels: {} - interval: 30s - scheme: http - tlsConfig: {} - scrapeTimeout: 30s - relabelings: [] - metricRelabelings: [] - targetLabels: [] - -extraExposePorts: [] - # - name: keycloak - # port: 8080 - # targetPort: 8080 - -# overrides pod.spec.hostAliases in the grafana deployment's pods -hostAliases: [] - # - ip: "1.2.3.4" - # hostnames: - # - "my.host.com" - -ingress: - enabled: false - # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName - # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress - # ingressClassName: nginx - # Values can be templated - annotations: {} - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: "true" - labels: {} - path: / - - # pathType is only for k8s >= 1.1= - pathType: Prefix - - hosts: - - chart-example.local - ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. - extraPaths: [] - # - path: /* - # backend: - # serviceName: ssl-redirect - # servicePort: use-annotation - ## Or for k8s > 1.19 - # - path: /* - # pathType: Prefix - # backend: - # service: - # name: ssl-redirect - # port: - # name: use-annotation - - - tls: [] - # - secretName: chart-example-tls - # hosts: - # - chart-example.local - -resources: {} -# limits: -# cpu: 100m -# memory: 128Mi -# requests: -# cpu: 100m -# memory: 128Mi - -## Node labels for pod assignment -## ref: https://kubernetes.io/docs/user-guide/node-selection/ -# -nodeSelector: {} - -## Tolerations for pod assignment -## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ -## -tolerations: [] - -## Affinity for pod assignment (evaluated as template) -## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity -## -affinity: {} - -## Topology Spread Constraints -## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ -## -topologySpreadConstraints: [] - -## Additional init containers (evaluated as template) -## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ -## -extraInitContainers: [] - -## Enable an Specify container in extraContainers. This is meant to allow adding an authentication proxy to a grafana pod -extraContainers: "" -# extraContainers: | -# - name: proxy -# image: quay.io/gambol99/keycloak-proxy:latest -# args: -# - -provider=github -# - -client-id= -# - -client-secret= -# - -github-org= -# - -email-domain=* -# - -cookie-secret= -# - -http-address=http://0.0.0.0:4181 -# - -upstream-url=http://127.0.0.1:3000 -# ports: -# - name: proxy-web -# containerPort: 4181 - -## Volumes that can be used in init containers that will not be mounted to deployment pods -extraContainerVolumes: [] -# - name: volume-from-secret -# secret: -# secretName: secret-to-mount -# - name: empty-dir-volume -# emptyDir: {} - -## Enable persistence using Persistent Volume Claims -## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ -## -persistence: - type: pvc - enabled: false - # storageClassName: default - accessModes: - - ReadWriteOnce - size: 10Gi - # annotations: {} - finalizers: - - kubernetes.io/pvc-protection - # selectorLabels: {} - ## Sub-directory of the PV to mount. Can be templated. - # subPath: "" - ## Name of an existing PVC. Can be templated. - # existingClaim: - ## Extra labels to apply to a PVC. - extraPvcLabels: {} - - ## If persistence is not enabled, this allows to mount the - ## local storage in-memory to improve performance - ## - inMemory: - enabled: false - ## The maximum usage on memory medium EmptyDir would be - ## the minimum value between the SizeLimit specified - ## here and the sum of memory limits of all containers in a pod - ## - # sizeLimit: 300Mi - -initChownData: - ## If false, data ownership will not be reset at startup - ## This allows the grafana-server to be run with an arbitrary user - ## - enabled: true - - ## initChownData container image - ## - image: - # -- The Docker registry - registry: docker.io - repository: library/busybox - tag: "1.31.1" - sha: "" - pullPolicy: IfNotPresent - - ## initChownData resource requests and limits - ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ - ## - resources: {} - # limits: - # cpu: 100m - # memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi - securityContext: - runAsNonRoot: false - runAsUser: 0 - seccompProfile: - type: RuntimeDefault - capabilities: - add: - - CHOWN - -# Administrator credentials when not using an existing secret (see below) -adminUser: admin -# adminPassword: strongpassword - -# Use an existing secret for the admin user. -admin: - ## Name of the secret. Can be templated. - existingSecret: "" - userKey: admin-user - passwordKey: admin-password - -## Define command to be executed at startup by grafana container -## Needed if using `vault-env` to manage secrets (ref: https://banzaicloud.com/blog/inject-secrets-into-pods-vault/) -## Default is "run.sh" as defined in grafana's Dockerfile -# command: -# - "sh" -# - "/run.sh" - -## Optionally define args if command is used -## Needed if using `hashicorp/envconsul` to manage secrets -## By default no arguments are set -# args: -# - "-secret" -# - "secret/grafana" -# - "./grafana" - -## Extra environment variables that will be pass onto deployment pods -## -## to provide grafana with access to CloudWatch on AWS EKS: -## 1. create an iam role of type "Web identity" with provider oidc.eks.* (note the provider for later) -## 2. edit the "Trust relationships" of the role, add a line inside the StringEquals clause using the -## same oidc eks provider as noted before (same as the existing line) -## also, replace NAMESPACE and prometheus-operator-grafana with the service account namespace and name -## -## "oidc.eks.us-east-1.amazonaws.com/id/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX:sub": "system:serviceaccount:NAMESPACE:prometheus-operator-grafana", -## -## 3. attach a policy to the role, you can use a built in policy called CloudWatchReadOnlyAccess -## 4. use the following env: (replace 123456789000 and iam-role-name-here with your aws account number and role name) -## -## env: -## AWS_ROLE_ARN: arn:aws:iam::123456789000:role/iam-role-name-here -## AWS_WEB_IDENTITY_TOKEN_FILE: /var/run/secrets/eks.amazonaws.com/serviceaccount/token -## AWS_REGION: us-east-1 -## -## 5. uncomment the EKS section in extraSecretMounts: below -## 6. uncomment the annotation section in the serviceAccount: above -## make sure to replace arn:aws:iam::123456789000:role/iam-role-name-here with your role arn - -env: {} - -## "valueFrom" environment variable references that will be added to deployment pods. Name is templated. -## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#envvarsource-v1-core -## Renders in container spec as: -## env: -## ... -## - name: -## valueFrom: -## -envValueFrom: {} - # ENV_NAME: - # configMapKeyRef: - # name: configmap-name - # key: value_key - -## The name of a secret in the same kubernetes namespace which contain values to be added to the environment -## This can be useful for auth tokens, etc. Value is templated. -envFromSecret: "" - -## Sensible environment variables that will be rendered as new secret object -## This can be useful for auth tokens, etc. -## If the secret values contains "{{", they'll need to be properly escaped so that they are not interpreted by Helm -## ref: https://helm.sh/docs/howto/charts_tips_and_tricks/#using-the-tpl-function -envRenderSecret: {} - -## The names of secrets in the same kubernetes namespace which contain values to be added to the environment -## Each entry should contain a name key, and can optionally specify whether the secret must be defined with an optional key. -## Name is templated. -envFromSecrets: [] -## - name: secret-name -## optional: true - -## The names of conifgmaps in the same kubernetes namespace which contain values to be added to the environment -## Each entry should contain a name key, and can optionally specify whether the configmap must be defined with an optional key. -## Name is templated. -## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#configmapenvsource-v1-core -envFromConfigMaps: [] -## - name: configmap-name -## optional: true - -# Inject Kubernetes services as environment variables. -# See https://kubernetes.io/docs/concepts/services-networking/connect-applications-service/#environment-variables -enableServiceLinks: true - -## Additional grafana server secret mounts -# Defines additional mounts with secrets. Secrets must be manually created in the namespace. -extraSecretMounts: [] - # - name: secret-files - # mountPath: /etc/secrets - # secretName: grafana-secret-files - # readOnly: true - # subPath: "" - # - # for AWS EKS (cloudwatch) use the following (see also instruction in env: above) - # - name: aws-iam-token - # mountPath: /var/run/secrets/eks.amazonaws.com/serviceaccount - # readOnly: true - # projected: - # defaultMode: 420 - # sources: - # - serviceAccountToken: - # audience: sts.amazonaws.com - # expirationSeconds: 86400 - # path: token - # - # for CSI e.g. Azure Key Vault use the following - # - name: secrets-store-inline - # mountPath: /run/secrets - # readOnly: true - # csi: - # driver: secrets-store.csi.k8s.io - # readOnly: true - # volumeAttributes: - # secretProviderClass: "akv-grafana-spc" - # nodePublishSecretRef: # Only required when using service principal mode - # name: grafana-akv-creds # Only required when using service principal mode - -## Additional grafana server volume mounts -# Defines additional volume mounts. -extraVolumeMounts: [] - # - name: extra-volume-0 - # mountPath: /mnt/volume0 - # readOnly: true - # existingClaim: volume-claim - # - name: extra-volume-1 - # mountPath: /mnt/volume1 - # readOnly: true - # hostPath: /usr/shared/ - # - name: grafana-secrets - # mountPath: /mnt/volume2 - # csi: true - # data: - # driver: secrets-store.csi.k8s.io - # readOnly: true - # volumeAttributes: - # secretProviderClass: "grafana-env-spc" - -## Container Lifecycle Hooks. Execute a specific bash command or make an HTTP request -lifecycleHooks: {} - # postStart: - # exec: - # command: [] - -## Pass the plugins you want installed as a list. -## -plugins: [] - # - digrich-bubblechart-panel - # - grafana-clock-panel - ## You can also use other plugin download URL, as long as they are valid zip files, - ## and specify the name of the plugin after the semicolon. Like this: - # - https://grafana.com/api/plugins/marcusolsson-json-datasource/versions/1.3.2/download;marcusolsson-json-datasource - -## Configure grafana datasources -## ref: http://docs.grafana.org/administration/provisioning/#datasources -## -datasources: {} -# datasources.yaml: -# apiVersion: 1 -# datasources: -# - name: Prometheus -# type: prometheus -# url: http://prometheus-prometheus-server -# access: proxy -# isDefault: true -# - name: CloudWatch -# type: cloudwatch -# access: proxy -# uid: cloudwatch -# editable: false -# jsonData: -# authType: default -# defaultRegion: us-east-1 -# deleteDatasources: [] -# - name: Prometheus - -## Configure grafana alerting (can be templated) -## ref: http://docs.grafana.org/administration/provisioning/#alerting -## -alerting: {} - # rules.yaml: - # apiVersion: 1 - # groups: - # - orgId: 1 - # name: '{{ .Chart.Name }}_my_rule_group' - # folder: my_first_folder - # interval: 60s - # rules: - # - uid: my_id_1 - # title: my_first_rule - # condition: A - # data: - # - refId: A - # datasourceUid: '-100' - # model: - # conditions: - # - evaluator: - # params: - # - 3 - # type: gt - # operator: - # type: and - # query: - # params: - # - A - # reducer: - # type: last - # type: query - # datasource: - # type: __expr__ - # uid: '-100' - # expression: 1==0 - # intervalMs: 1000 - # maxDataPoints: 43200 - # refId: A - # type: math - # dashboardUid: my_dashboard - # panelId: 123 - # noDataState: Alerting - # for: 60s - # annotations: - # some_key: some_value - # labels: - # team: sre_team_1 - # contactpoints.yaml: - # secret: - # apiVersion: 1 - # contactPoints: - # - orgId: 1 - # name: cp_1 - # receivers: - # - uid: first_uid - # type: pagerduty - # settings: - # integrationKey: XXX - # severity: critical - # class: ping failure - # component: Grafana - # group: app-stack - # summary: | - # {{ `{{ include "default.message" . }}` }} - -## Configure notifiers -## ref: http://docs.grafana.org/administration/provisioning/#alert-notification-channels -## -notifiers: {} -# notifiers.yaml: -# notifiers: -# - name: email-notifier -# type: email -# uid: email1 -# # either: -# org_id: 1 -# # or -# org_name: Main Org. -# is_default: true -# settings: -# addresses: an_email_address@example.com -# delete_notifiers: - -## Configure grafana dashboard providers -## ref: http://docs.grafana.org/administration/provisioning/#dashboards -## -## `path` must be /var/lib/grafana/dashboards/ -## -dashboardProviders: {} -# dashboardproviders.yaml: -# apiVersion: 1 -# providers: -# - name: 'default' -# orgId: 1 -# folder: '' -# type: file -# disableDeletion: false -# editable: true -# options: -# path: /var/lib/grafana/dashboards/default - -## Configure grafana dashboard to import -## NOTE: To use dashboards you must also enable/configure dashboardProviders -## ref: https://grafana.com/dashboards -## -## dashboards per provider, use provider name as key. -## -dashboards: {} - # default: - # some-dashboard: - # json: | - # $RAW_JSON - # custom-dashboard: - # file: dashboards/custom-dashboard.json - # prometheus-stats: - # gnetId: 2 - # revision: 2 - # datasource: Prometheus - # local-dashboard: - # url: https://example.com/repository/test.json - # token: '' - # local-dashboard-base64: - # url: https://example.com/repository/test-b64.json - # token: '' - # b64content: true - # local-dashboard-gitlab: - # url: https://example.com/repository/test-gitlab.json - # gitlabToken: '' - # local-dashboard-bitbucket: - # url: https://example.com/repository/test-bitbucket.json - # bearerToken: '' - # local-dashboard-azure: - # url: https://example.com/repository/test-azure.json - # basic: '' - # acceptHeader: '*/*' - -## Reference to external ConfigMap per provider. Use provider name as key and ConfigMap name as value. -## A provider dashboards must be defined either by external ConfigMaps or in values.yaml, not in both. -## ConfigMap data example: -## -## data: -## example-dashboard.json: | -## RAW_JSON -configmap: - enabled: true - name: grafana - -data: - - # account number - mapAccounts: | - - "000000000000" - - # add eksworkers to nodes and bootstrappers - # ensure accounts match - # add sso and other users to masters as necessry - mapRoles: | - - "groups": - - "system:bootstrappers" - - "system:nodes" - "rolearn": "arn:aws:iam::000000000:role/prod-eksworker" - "username": "system:node:{{EC2PrivateDNSName}}" - - "groups": - - "system:masters" - "rolearn": "arn:aws:iam::000000000:role/AWSReservedSSO_AdministratorAccess_1111111111" -## -dashboardsConfigMaps: {} -# default: "" - -## Grafana's primary configuration -## NOTE: values in map will be converted to ini format -## ref: http://docs.grafana.org/installation/configuration/ -## -grafana.ini: - paths: - data: /var/lib/grafana/ - logs: /var/log/grafana - plugins: /var/lib/grafana/plugins - provisioning: /etc/grafana/provisioning - analytics: - check_for_updates: true - log: - mode: console - grafana_net: - url: https://grafana.net - server: - domain: "{{ if (and .Values.ingress.enabled .Values.ingress.hosts) }}{{ .Values.ingress.hosts | first }}{{ else }}''{{ end }}" -## grafana Authentication can be enabled with the following values on grafana.ini - # server: - # The full public facing url you use in browser, used for redirects and emails - # root_url: - # https://grafana.com/docs/grafana/latest/auth/github/#enable-github-in-grafana - # auth.github: - # enabled: false - # allow_sign_up: false - # scopes: user:email,read:org - # auth_url: https://github.com/login/oauth/authorize - # token_url: https://github.com/login/oauth/access_token - # api_url: https://api.github.com/user - # team_ids: - # allowed_organizations: - # client_id: - # client_secret: -## LDAP Authentication can be enabled with the following values on grafana.ini -## NOTE: Grafana will fail to start if the value for ldap.toml is invalid - # auth.ldap: - # enabled: true - # allow_sign_up: true - # config_file: /etc/grafana/ldap.toml - -## Grafana's LDAP configuration -## Templated by the template in _helpers.tpl -## NOTE: To enable the grafana.ini must be configured with auth.ldap.enabled -## ref: http://docs.grafana.org/installation/configuration/#auth-ldap -## ref: http://docs.grafana.org/installation/ldap/#configuration -ldap: - enabled: false - # `existingSecret` is a reference to an existing secret containing the ldap configuration - # for Grafana in a key `ldap-toml`. - existingSecret: "" - # `config` is the content of `ldap.toml` that will be stored in the created secret - config: "" - # config: |- - # verbose_logging = true - - # [[servers]] - # host = "my-ldap-server" - # port = 636 - # use_ssl = true - # start_tls = false - # ssl_skip_verify = false - # bind_dn = "uid=%s,ou=users,dc=myorg,dc=com" - -## Grafana's SMTP configuration -## NOTE: To enable, grafana.ini must be configured with smtp.enabled -## ref: http://docs.grafana.org/installation/configuration/#smtp -smtp: - # `existingSecret` is a reference to an existing secret containing the smtp configuration - # for Grafana. - existingSecret: "" - userKey: "user" - passwordKey: "password" - -## Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders -## Requires at least Grafana 5 to work and can't be used together with parameters dashboardProviders, datasources and dashboards -sidecar: - image: - # -- The Docker registry - registry: quay.io - repository: kiwigrid/k8s-sidecar - tag: 1.25.2 - sha: "" - imagePullPolicy: IfNotPresent - resources: {} -# limits: -# cpu: 100m -# memory: 100Mi -# requests: -# cpu: 50m -# memory: 50Mi - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - seccompProfile: - type: RuntimeDefault - # skipTlsVerify Set to true to skip tls verification for kube api calls - # skipTlsVerify: true - enableUniqueFilenames: false - readinessProbe: {} - livenessProbe: {} - # Log level default for all sidecars. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. Defaults to INFO - # logLevel: INFO - alerts: - enabled: false - # Additional environment variables for the alerts sidecar - env: {} - # Do not reprocess already processed unchanged resources on k8s API reconnect. - # ignoreAlreadyProcessed: true - # label that the configmaps with alert are marked with - label: grafana_alert - # value of label that the configmaps with alert are set to - labelValue: "" - # Log level. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. - # logLevel: INFO - # If specified, the sidecar will search for alert config-maps inside this namespace. - # Otherwise the namespace in which the sidecar is running will be used. - # It's also possible to specify ALL to search in all namespaces - searchNamespace: null - # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. - watchMethod: WATCH - # search in configmap, secret or both - resource: both - # watchServerTimeout: request to the server, asking it to cleanly close the connection after that. - # defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S - # watchServerTimeout: 3600 - # - # watchClientTimeout: is a client-side timeout, configuring your local socket. - # If you have a network outage dropping all packets with no RST/FIN, - # this is how long your client waits before realizing & dropping the connection. - # defaults to 66sec (sic!) - # watchClientTimeout: 60 - # - # Endpoint to send request to reload alerts - reloadURL: "http://localhost:3000/api/admin/provisioning/alerting/reload" - # Absolute path to shell script to execute after a alert got reloaded - script: null - skipReload: false - # This is needed if skipReload is true, to load any alerts defined at startup time. - # Deploy the alert sidecar as an initContainer. - initAlerts: false - # Additional alert sidecar volume mounts - extraMounts: [] - # Sets the size limit of the alert sidecar emptyDir volume - sizeLimit: {} - dashboards: - enabled: false - # Additional environment variables for the dashboards sidecar - env: {} - # Do not reprocess already processed unchanged resources on k8s API reconnect. - # ignoreAlreadyProcessed: true - SCProvider: true - # label that the configmaps with dashboards are marked with - label: grafana_dashboard - # value of label that the configmaps with dashboards are set to - labelValue: "" - # Log level. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. - # logLevel: INFO - # folder in the pod that should hold the collected dashboards (unless `defaultFolderName` is set) - folder: /tmp/dashboards - # The default folder name, it will create a subfolder under the `folder` and put dashboards in there instead - defaultFolderName: null - # Namespaces list. If specified, the sidecar will search for config-maps/secrets inside these namespaces. - # Otherwise the namespace in which the sidecar is running will be used. - # It's also possible to specify ALL to search in all namespaces. - searchNamespace: null - # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. - watchMethod: WATCH - # search in configmap, secret or both - resource: both - # If specified, the sidecar will look for annotation with this name to create folder and put graph here. - # You can use this parameter together with `provider.foldersFromFilesStructure`to annotate configmaps and create folder structure. - folderAnnotation: null - # Endpoint to send request to reload alerts - reloadURL: "http://localhost:3000/api/admin/provisioning/dashboards/reload" - # Absolute path to shell script to execute after a configmap got reloaded - script: null - skipReload: false - # watchServerTimeout: request to the server, asking it to cleanly close the connection after that. - # defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S - # watchServerTimeout: 3600 - # - # watchClientTimeout: is a client-side timeout, configuring your local socket. - # If you have a network outage dropping all packets with no RST/FIN, - # this is how long your client waits before realizing & dropping the connection. - # defaults to 66sec (sic!) - # watchClientTimeout: 60 - # - # provider configuration that lets grafana manage the dashboards - provider: - # name of the provider, should be unique - name: sidecarProvider - # orgid as configured in grafana - orgid: 1 - # folder in which the dashboards should be imported in grafana - folder: '' - # type of the provider - type: file - # disableDelete to activate a import-only behaviour - disableDelete: false - # allow updating provisioned dashboards from the UI - allowUiUpdates: false - # allow Grafana to replicate dashboard structure from filesystem - foldersFromFilesStructure: false - # Additional dashboard sidecar volume mounts - extraMounts: [] - # Sets the size limit of the dashboard sidecar emptyDir volume - sizeLimit: {} - datasources: - enabled: false - # Additional environment variables for the datasourcessidecar - env: {} - # Do not reprocess already processed unchanged resources on k8s API reconnect. - # ignoreAlreadyProcessed: true - # label that the configmaps with datasources are marked with - label: grafana_datasource - # value of label that the configmaps with datasources are set to - labelValue: "" - # Log level. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. - # logLevel: INFO - # If specified, the sidecar will search for datasource config-maps inside this namespace. - # Otherwise the namespace in which the sidecar is running will be used. - # It's also possible to specify ALL to search in all namespaces - searchNamespace: null - # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. - watchMethod: WATCH - # search in configmap, secret or both - resource: both - # watchServerTimeout: request to the server, asking it to cleanly close the connection after that. - # defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S - # watchServerTimeout: 3600 - # - # watchClientTimeout: is a client-side timeout, configuring your local socket. - # If you have a network outage dropping all packets with no RST/FIN, - # this is how long your client waits before realizing & dropping the connection. - # defaults to 66sec (sic!) - # watchClientTimeout: 60 - # - # Endpoint to send request to reload datasources - reloadURL: "http://localhost:3000/api/admin/provisioning/datasources/reload" - # Absolute path to shell script to execute after a datasource got reloaded - script: null - skipReload: false - # Deploy the datasource sidecar as an initContainer in addition to a container. - # This is needed if skipReload is true, to load any datasources defined at startup time. - initDatasources: false - # Sets the size limit of the datasource sidecar emptyDir volume - sizeLimit: {} - plugins: - enabled: false - # Additional environment variables for the plugins sidecar - env: {} - # Do not reprocess already processed unchanged resources on k8s API reconnect. - # ignoreAlreadyProcessed: true - # label that the configmaps with plugins are marked with - label: grafana_plugin - # value of label that the configmaps with plugins are set to - labelValue: "" - # Log level. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. - # logLevel: INFO - # If specified, the sidecar will search for plugin config-maps inside this namespace. - # Otherwise the namespace in which the sidecar is running will be used. - # It's also possible to specify ALL to search in all namespaces - searchNamespace: null - # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. - watchMethod: WATCH - # search in configmap, secret or both - resource: both - # watchServerTimeout: request to the server, asking it to cleanly close the connection after that. - # defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S - # watchServerTimeout: 3600 - # - # watchClientTimeout: is a client-side timeout, configuring your local socket. - # If you have a network outage dropping all packets with no RST/FIN, - # this is how long your client waits before realizing & dropping the connection. - # defaults to 66sec (sic!) - # watchClientTimeout: 60 - # - # Endpoint to send request to reload plugins - reloadURL: "http://localhost:3000/api/admin/provisioning/plugins/reload" - # Absolute path to shell script to execute after a plugin got reloaded - script: null - skipReload: false - # Deploy the datasource sidecar as an initContainer in addition to a container. - # This is needed if skipReload is true, to load any plugins defined at startup time. - initPlugins: false - # Sets the size limit of the plugin sidecar emptyDir volume - sizeLimit: {} - notifiers: - enabled: false - # Additional environment variables for the notifierssidecar - env: {} - # Do not reprocess already processed unchanged resources on k8s API reconnect. - # ignoreAlreadyProcessed: true - # label that the configmaps with notifiers are marked with - label: grafana_notifier - # value of label that the configmaps with notifiers are set to - labelValue: "" - # Log level. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. - # logLevel: INFO - # If specified, the sidecar will search for notifier config-maps inside this namespace. - # Otherwise the namespace in which the sidecar is running will be used. - # It's also possible to specify ALL to search in all namespaces - searchNamespace: null - # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. - watchMethod: WATCH - # search in configmap, secret or both - resource: both - # watchServerTimeout: request to the server, asking it to cleanly close the connection after that. - # defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S - # watchServerTimeout: 3600 - # - # watchClientTimeout: is a client-side timeout, configuring your local socket. - # If you have a network outage dropping all packets with no RST/FIN, - # this is how long your client waits before realizing & dropping the connection. - # defaults to 66sec (sic!) - # watchClientTimeout: 60 - # - # Endpoint to send request to reload notifiers - reloadURL: "http://localhost:3000/api/admin/provisioning/notifications/reload" - # Absolute path to shell script to execute after a notifier got reloaded - script: null - skipReload: false - # Deploy the notifier sidecar as an initContainer in addition to a container. - # This is needed if skipReload is true, to load any notifiers defined at startup time. - initNotifiers: false - # Sets the size limit of the notifier sidecar emptyDir volume - sizeLimit: {} - -## Override the deployment namespace -## -namespaceOverride: "" - -## Number of old ReplicaSets to retain -## -revisionHistoryLimit: 10 - -## Add a seperate remote image renderer deployment/service -imageRenderer: - deploymentStrategy: {} - # Enable the image-renderer deployment & service - enabled: false - replicas: 1 - autoscaling: - enabled: false - minReplicas: 1 - maxReplicas: 5 - targetCPU: "60" - targetMemory: "" - behavior: {} - image: - # -- The Docker registry - registry: docker.io - # image-renderer Image repository - repository: grafana/grafana-image-renderer - # image-renderer Image tag - tag: latest - # image-renderer Image sha (optional) - sha: "" - # image-renderer ImagePullPolicy - pullPolicy: Always - # extra environment variables - env: - HTTP_HOST: "0.0.0.0" - # RENDERING_ARGS: --no-sandbox,--disable-gpu,--window-size=1280x758 - # RENDERING_MODE: clustered - # IGNORE_HTTPS_ERRORS: true - - ## "valueFrom" environment variable references that will be added to deployment pods. Name is templated. - ## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#envvarsource-v1-core - ## Renders in container spec as: - ## env: - ## ... - ## - name: - ## valueFrom: - ## - envValueFrom: {} - # ENV_NAME: - # configMapKeyRef: - # name: configmap-name - # key: value_key - - # image-renderer deployment serviceAccount - serviceAccountName: "" - # image-renderer deployment securityContext - securityContext: {} - # image-renderer deployment container securityContext - containerSecurityContext: - seccompProfile: - type: RuntimeDefault - capabilities: - drop: ['ALL'] - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - ## image-renderer pod annotation - podAnnotations: {} - # image-renderer deployment Host Aliases - hostAliases: [] - # image-renderer deployment priority class - priorityClassName: '' - service: - # Enable the image-renderer service - enabled: true - # image-renderer service port name - portName: 'http' - # image-renderer service port used by both service and deployment - port: 8081 - targetPort: 8081 - # Adds the appProtocol field to the image-renderer service. This allows to work with istio protocol selection. Ex: "http" or "tcp" - appProtocol: "" - serviceMonitor: - ## If true, a ServiceMonitor CRD is created for a prometheus operator - ## https://github.com/coreos/prometheus-operator - ## - enabled: false - path: /metrics - # namespace: monitoring (defaults to use the namespace this chart is deployed to) - labels: {} - interval: 1m - scheme: http - tlsConfig: {} - scrapeTimeout: 30s - relabelings: [] - # See: https://doc.crds.dev/github.com/prometheus-operator/kube-prometheus/monitoring.coreos.com/ServiceMonitor/v1@v0.11.0#spec-targetLabels - targetLabels: [] - # - targetLabel1 - # - targetLabel2 - # If https is enabled in Grafana, this needs to be set as 'https' to correctly configure the callback used in Grafana - grafanaProtocol: http - # In case a sub_path is used this needs to be added to the image renderer callback - grafanaSubPath: "" - # name of the image-renderer port on the pod - podPortName: http - # number of image-renderer replica sets to keep - revisionHistoryLimit: 10 - networkPolicy: - # Enable a NetworkPolicy to limit inbound traffic to only the created grafana pods - limitIngress: true - # Enable a NetworkPolicy to limit outbound traffic to only the created grafana pods - limitEgress: false - # Allow additional services to access image-renderer (eg. Prometheus operator when ServiceMonitor is enabled) - extraIngressSelectors: [] - resources: {} -# limits: -# cpu: 100m -# memory: 100Mi -# requests: -# cpu: 50m -# memory: 50Mi - ## Node labels for pod assignment - ## ref: https://kubernetes.io/docs/user-guide/node-selection/ - # - nodeSelector: {} - - ## Tolerations for pod assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ - ## - tolerations: [] - - ## Affinity for pod assignment (evaluated as template) - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity - ## - affinity: {} - - ## Use an alternate scheduler, e.g. "stork". - ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ - ## - # schedulerName: "default-scheduler" - -networkPolicy: - ## @param networkPolicy.enabled Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. - ## - enabled: false - ## @param networkPolicy.allowExternal Don't require client label for connections - ## The Policy model to apply. When set to false, only pods with the correct - ## client label will have network access to grafana port defined. - ## When true, grafana will accept connections from any source - ## (with the correct destination port). - ## - ingress: true - ## @param networkPolicy.ingress When true enables the creation - ## an ingress network policy - ## - allowExternal: true - ## @param networkPolicy.explicitNamespacesSelector A Kubernetes LabelSelector to explicitly select namespaces from which traffic could be allowed - ## If explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace - ## and that match other criteria, the ones that have the good label, can reach the grafana. - ## But sometimes, we want the grafana to be accessible to clients from other namespaces, in this case, we can use this - ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. - ## - ## Example: - ## explicitNamespacesSelector: - ## matchLabels: - ## role: frontend - ## matchExpressions: - ## - {key: role, operator: In, values: [frontend]} - ## - explicitNamespacesSelector: {} - ## - ## - ## - ## - ## - ## - egress: - ## @param networkPolicy.egress.enabled When enabled, an egress network policy will be - ## created allowing grafana to connect to external data sources from kubernetes cluster. - enabled: false - ## - ## @param networkPolicy.egress.blockDNSResolution When enabled, DNS resolution will be blocked - ## for all pods in the grafana namespace. - blockDNSResolution: false - ## - ## @param networkPolicy.egress.ports Add individual ports to be allowed by the egress - ports: [] - ## Add ports to the egress by specifying - port: - ## E.X. - ## - port: 80 - ## - port: 443 - ## - ## @param networkPolicy.egress.to Allow egress traffic to specific destinations - to: [] - ## Add destinations to the egress by specifying - ipBlock: - ## E.X. - ## to: - ## - namespaceSelector: - ## matchExpressions: - ## - {key: role, operator: In, values: [grafana]} - ## - ## - ## - ## - ## - -# Enable backward compatibility of kubernetes where version below 1.13 doesn't have the enableServiceLinks option -enableKubeBackwardCompatibility: false -useStatefulSet: false -# Create a dynamic manifests via values: -extraObjects: [] - # - apiVersion: "kubernetes-client.io/v1" - # kind: ExternalSecret - # metadata: - # name: grafana-secrets - # spec: - # backendType: gcpSecretsManager - # data: - # - key: grafana-admin-password - # name: adminPassword \ No newline at end of file From 411eb4bddfc7d979a6ee922bee645cd17e81c149 Mon Sep 17 00:00:00 2001 From: karamveer28 Date: Wed, 6 Dec 2023 22:59:30 +0530 Subject: [PATCH 13/99] Test new chart --- .../deployment/helm/aws-auth2/Chart.yaml | 5 - .../helm/aws-auth2/bitops.config.yaml | 14 - .../helm/aws-auth2/example.bitops.config.yaml | 15 - .../helm/aws-auth2/templates/aws-auth.yaml | 22 - .../deployment/helm/aws-auth2/values.yaml | 21 - .../deployment/helm/grafana/.helmignore | 23 + operations/deployment/helm/grafana/Chart.yaml | 33 + operations/deployment/helm/grafana/README.md | 757 ++++++++++ .../helm/grafana/ci/default-values.yaml | 1 + .../helm/grafana/ci/with-affinity-values.yaml | 16 + .../ci/with-dashboard-json-values.yaml | 53 + .../grafana/ci/with-dashboard-values.yaml | 19 + .../ci/with-extraconfigmapmounts-values.yaml | 7 + .../ci/with-image-renderer-values.yaml | 19 + .../helm/grafana/ci/with-persistence.yaml | 3 + .../grafana/dashboards/custom-dashboard.json | 1 + .../helm/grafana/templates/NOTES.txt | 55 + .../helm/grafana/templates/_helpers.tpl | 227 +++ .../helm/grafana/templates/_pod.tpl | 1276 ++++++++++++++++ .../helm/grafana/templates/clusterrole.yaml | 25 + .../grafana/templates/clusterrolebinding.yaml | 24 + .../helm/grafana/templates/configSecret.yaml | 43 + .../configmap-dashboard-provider.yaml | 29 + .../helm/grafana/templates/configmap.yaml | 144 ++ .../templates/dashboards-json-configmap.yaml | 38 + .../helm/grafana/templates/deployment.yaml | 51 + .../grafana/templates/extra-manifests.yaml | 4 + .../grafana/templates/headless-service.yaml | 22 + .../helm/grafana/templates/hpa.yaml | 52 + .../templates/image-renderer-deployment.yaml | 131 ++ .../grafana/templates/image-renderer-hpa.yaml | 47 + .../image-renderer-network-policy.yaml | 79 + .../templates/image-renderer-service.yaml | 31 + .../image-renderer-servicemonitor.yaml | 48 + .../helm/grafana/templates/ingress.yaml | 78 + .../helm/grafana/templates/networkpolicy.yaml | 61 + .../templates/poddisruptionbudget.yaml | 22 + .../grafana/templates/podsecuritypolicy.yaml | 49 + .../helm/grafana/templates/pvc.yaml | 36 + .../helm/grafana/templates/role.yaml | 32 + .../helm/grafana/templates/rolebinding.yaml | 25 + .../helm/grafana/templates/secret-env.yaml | 14 + .../helm/grafana/templates/secret.yaml | 26 + .../helm/grafana/templates/service.yaml | 58 + .../grafana/templates/serviceaccount.yaml | 17 + .../grafana/templates/servicemonitor.yaml | 52 + .../helm/grafana/templates/statefulset.yaml | 56 + .../templates/tests/test-configmap.yaml | 20 + .../tests/test-podsecuritypolicy.yaml | 32 + .../grafana/templates/tests/test-role.yaml | 17 + .../templates/tests/test-rolebinding.yaml | 20 + .../templates/tests/test-serviceaccount.yaml | 12 + .../helm/grafana/templates/tests/test.yaml | 49 + .../deployment/helm/grafana/values.yaml | 1282 +++++++++++++++++ 54 files changed, 5216 insertions(+), 77 deletions(-) delete mode 100644 operations/deployment/helm/aws-auth2/Chart.yaml delete mode 100644 operations/deployment/helm/aws-auth2/bitops.config.yaml delete mode 100644 operations/deployment/helm/aws-auth2/example.bitops.config.yaml delete mode 100644 operations/deployment/helm/aws-auth2/templates/aws-auth.yaml delete mode 100644 operations/deployment/helm/aws-auth2/values.yaml create mode 100644 operations/deployment/helm/grafana/.helmignore create mode 100644 operations/deployment/helm/grafana/Chart.yaml create mode 100644 operations/deployment/helm/grafana/README.md create mode 100644 operations/deployment/helm/grafana/ci/default-values.yaml create mode 100644 operations/deployment/helm/grafana/ci/with-affinity-values.yaml create mode 100644 operations/deployment/helm/grafana/ci/with-dashboard-json-values.yaml create mode 100644 operations/deployment/helm/grafana/ci/with-dashboard-values.yaml create mode 100644 operations/deployment/helm/grafana/ci/with-extraconfigmapmounts-values.yaml create mode 100644 operations/deployment/helm/grafana/ci/with-image-renderer-values.yaml create mode 100644 operations/deployment/helm/grafana/ci/with-persistence.yaml create mode 100644 operations/deployment/helm/grafana/dashboards/custom-dashboard.json create mode 100644 operations/deployment/helm/grafana/templates/NOTES.txt create mode 100644 operations/deployment/helm/grafana/templates/_helpers.tpl create mode 100644 operations/deployment/helm/grafana/templates/_pod.tpl create mode 100644 operations/deployment/helm/grafana/templates/clusterrole.yaml create mode 100644 operations/deployment/helm/grafana/templates/clusterrolebinding.yaml create mode 100644 operations/deployment/helm/grafana/templates/configSecret.yaml create mode 100644 operations/deployment/helm/grafana/templates/configmap-dashboard-provider.yaml create mode 100644 operations/deployment/helm/grafana/templates/configmap.yaml create mode 100644 operations/deployment/helm/grafana/templates/dashboards-json-configmap.yaml create mode 100644 operations/deployment/helm/grafana/templates/deployment.yaml create mode 100644 operations/deployment/helm/grafana/templates/extra-manifests.yaml create mode 100644 operations/deployment/helm/grafana/templates/headless-service.yaml create mode 100644 operations/deployment/helm/grafana/templates/hpa.yaml create mode 100644 operations/deployment/helm/grafana/templates/image-renderer-deployment.yaml create mode 100644 operations/deployment/helm/grafana/templates/image-renderer-hpa.yaml create mode 100644 operations/deployment/helm/grafana/templates/image-renderer-network-policy.yaml create mode 100644 operations/deployment/helm/grafana/templates/image-renderer-service.yaml create mode 100644 operations/deployment/helm/grafana/templates/image-renderer-servicemonitor.yaml create mode 100644 operations/deployment/helm/grafana/templates/ingress.yaml create mode 100644 operations/deployment/helm/grafana/templates/networkpolicy.yaml create mode 100644 operations/deployment/helm/grafana/templates/poddisruptionbudget.yaml create mode 100644 operations/deployment/helm/grafana/templates/podsecuritypolicy.yaml create mode 100644 operations/deployment/helm/grafana/templates/pvc.yaml create mode 100644 operations/deployment/helm/grafana/templates/role.yaml create mode 100644 operations/deployment/helm/grafana/templates/rolebinding.yaml create mode 100644 operations/deployment/helm/grafana/templates/secret-env.yaml create mode 100644 operations/deployment/helm/grafana/templates/secret.yaml create mode 100644 operations/deployment/helm/grafana/templates/service.yaml create mode 100644 operations/deployment/helm/grafana/templates/serviceaccount.yaml create mode 100644 operations/deployment/helm/grafana/templates/servicemonitor.yaml create mode 100644 operations/deployment/helm/grafana/templates/statefulset.yaml create mode 100644 operations/deployment/helm/grafana/templates/tests/test-configmap.yaml create mode 100644 operations/deployment/helm/grafana/templates/tests/test-podsecuritypolicy.yaml create mode 100644 operations/deployment/helm/grafana/templates/tests/test-role.yaml create mode 100644 operations/deployment/helm/grafana/templates/tests/test-rolebinding.yaml create mode 100644 operations/deployment/helm/grafana/templates/tests/test-serviceaccount.yaml create mode 100644 operations/deployment/helm/grafana/templates/tests/test.yaml create mode 100644 operations/deployment/helm/grafana/values.yaml diff --git a/operations/deployment/helm/aws-auth2/Chart.yaml b/operations/deployment/helm/aws-auth2/Chart.yaml deleted file mode 100644 index 5ea3261..0000000 --- a/operations/deployment/helm/aws-auth2/Chart.yaml +++ /dev/null @@ -1,5 +0,0 @@ -apiVersion: v2 -appVersion: "1.0" -description: aws-auth configmap for EKS -name: aws-auth2 -version: 0.1.0 \ No newline at end of file diff --git a/operations/deployment/helm/aws-auth2/bitops.config.yaml b/operations/deployment/helm/aws-auth2/bitops.config.yaml deleted file mode 100644 index cdcd9a5..0000000 --- a/operations/deployment/helm/aws-auth2/bitops.config.yaml +++ /dev/null @@ -1,14 +0,0 @@ -helm: - cli: - namespace: kube-system - timeout: 200s - debug: true - atomic: false - force: false - dry-run: false - options: - release-name: aws-auth2 - skip-deploy: false - k8s: - fetch: - kubeconfig: true \ No newline at end of file diff --git a/operations/deployment/helm/aws-auth2/example.bitops.config.yaml b/operations/deployment/helm/aws-auth2/example.bitops.config.yaml deleted file mode 100644 index 3a88b77..0000000 --- a/operations/deployment/helm/aws-auth2/example.bitops.config.yaml +++ /dev/null @@ -1,15 +0,0 @@ -helm: - cli: - namespace: kube-system - timeout: 200s - debug: true - atomic: false - force: false - dry-run: false - options: - release-name: aws-auth2 - skip-deploy: false - k8s: - fetch: - kubeconfig: true - cluster-name: prod-ekscluster \ No newline at end of file diff --git a/operations/deployment/helm/aws-auth2/templates/aws-auth.yaml b/operations/deployment/helm/aws-auth2/templates/aws-auth.yaml deleted file mode 100644 index e41dea6..0000000 --- a/operations/deployment/helm/aws-auth2/templates/aws-auth.yaml +++ /dev/null @@ -1,22 +0,0 @@ -{{- if .Values.configmap.enabled }} -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ .Values.configmap.name }} -data: -{{- if .Values.data.mapAccounts }} - mapAccounts: | - {{ .Values.data.mapAccounts| nindent 4 | trim }} -{{- end }} - -{{- if .Values.data.mapRoles }} - mapRoles: | - {{ .Values.data.mapRoles| nindent 4 | trim }} -{{- end }} - -{{- if .Values.data.mapUsers }} - mapUsers: | - {{ .Values.data.mapUsers| nindent 4 | trim }} -{{- end }} - -{{- end }} \ No newline at end of file diff --git a/operations/deployment/helm/aws-auth2/values.yaml b/operations/deployment/helm/aws-auth2/values.yaml deleted file mode 100644 index 11d37a7..0000000 --- a/operations/deployment/helm/aws-auth2/values.yaml +++ /dev/null @@ -1,21 +0,0 @@ -configmap: - enabled: true - name: aws-auth2 -data: - - # account number - mapAccounts: | - - "000000000000" - - # add eksworkers to nodes and bootstrappers - # ensure accounts match - # add sso and other users to masters as necessry - mapRoles: | - - "groups": - - "system:bootstrappers" - - "system:nodes" - "rolearn": "arn:aws:iam::000000000:role/prod-eksworker" - "username": "system:node:{{EC2PrivateDNSName}}" - - "groups": - - "system:masters" - "rolearn": "arn:aws:iam::000000000:role/AWSReservedSSO_AdministratorAccess_1111111111" \ No newline at end of file diff --git a/operations/deployment/helm/grafana/.helmignore b/operations/deployment/helm/grafana/.helmignore new file mode 100644 index 0000000..8cade13 --- /dev/null +++ b/operations/deployment/helm/grafana/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.vscode +.project +.idea/ +*.tmproj +OWNERS diff --git a/operations/deployment/helm/grafana/Chart.yaml b/operations/deployment/helm/grafana/Chart.yaml new file mode 100644 index 0000000..387f2ab --- /dev/null +++ b/operations/deployment/helm/grafana/Chart.yaml @@ -0,0 +1,33 @@ +apiVersion: v2 +name: grafana +version: 7.0.13 +appVersion: 10.2.2 +kubeVersion: "^1.8.0-0" +description: The leading tool for querying and visualizing time series and metrics. +home: https://grafana.com +icon: https://artifacthub.io/image/b4fed1a7-6c8f-4945-b99d-096efa3e4116 +sources: + - https://github.com/grafana/grafana + - https://github.com/grafana/helm-charts +annotations: + "artifacthub.io/license": AGPL-3.0-only + "artifacthub.io/links": | + - name: Chart Source + url: https://github.com/grafana/helm-charts + - name: Upstream Project + url: https://github.com/grafana/grafana +maintainers: + - name: zanhsieh + email: zanhsieh@gmail.com + - name: rtluckie + email: rluckie@cisco.com + - name: maorfr + email: maor.friedman@redhat.com + - name: Xtigyro + email: miroslav.hadzhiev@gmail.com + - name: torstenwalter + email: mail@torstenwalter.de +type: application +keywords: + - monitoring + - metric diff --git a/operations/deployment/helm/grafana/README.md b/operations/deployment/helm/grafana/README.md new file mode 100644 index 0000000..5420545 --- /dev/null +++ b/operations/deployment/helm/grafana/README.md @@ -0,0 +1,757 @@ +# Grafana Helm Chart + +* Installs the web dashboarding system [Grafana](http://grafana.org/) + +## Get Repo Info + +```console +helm repo add grafana https://grafana.github.io/helm-charts +helm repo update +``` + +_See [helm repo](https://helm.sh/docs/helm/helm_repo/) for command documentation._ + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +helm install my-release grafana/grafana +``` + +## Uninstalling the Chart + +To uninstall/delete the my-release deployment: + +```console +helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Upgrading an existing Release to a new major version + +A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an +incompatible breaking change needing manual actions. + +### To 4.0.0 (And 3.12.1) + +This version requires Helm >= 2.12.0. + +### To 5.0.0 + +You have to add --force to your helm upgrade command as the labels of the chart have changed. + +### To 6.0.0 + +This version requires Helm >= 3.1.0. + +### To 7.0.0 + +For consistency with other Helm charts, the `global.image.registry` parameter was renamed +to `global.imageRegistry`. If you were not previously setting `global.image.registry`, no action +is required on upgrade. If you were previously setting `global.image.registry`, you will +need to instead set `global.imageRegistry`. + +## Configuration + +| Parameter | Description | Default | +|-------------------------------------------|-----------------------------------------------|---------------------------------------------------------| +| `replicas` | Number of nodes | `1` | +| `podDisruptionBudget.minAvailable` | Pod disruption minimum available | `nil` | +| `podDisruptionBudget.maxUnavailable` | Pod disruption maximum unavailable | `nil` | +| `podDisruptionBudget.apiVersion` | Pod disruption apiVersion | `nil` | +| `deploymentStrategy` | Deployment strategy | `{ "type": "RollingUpdate" }` | +| `livenessProbe` | Liveness Probe settings | `{ "httpGet": { "path": "/api/health", "port": 3000 } "initialDelaySeconds": 60, "timeoutSeconds": 30, "failureThreshold": 10 }` | +| `readinessProbe` | Readiness Probe settings | `{ "httpGet": { "path": "/api/health", "port": 3000 } }`| +| `securityContext` | Deployment securityContext | `{"runAsUser": 472, "runAsGroup": 472, "fsGroup": 472}` | +| `priorityClassName` | Name of Priority Class to assign pods | `nil` | +| `image.registry` | Image registry | `docker.io` | +| `image.repository` | Image repository | `grafana/grafana` | +| `image.tag` | Overrides the Grafana image tag whose default is the chart appVersion (`Must be >= 5.0.0`) | `` | +| `image.sha` | Image sha (optional) | `` | +| `image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Image pull secrets (can be templated) | `[]` | +| `service.enabled` | Enable grafana service | `true` | +| `service.type` | Kubernetes service type | `ClusterIP` | +| `service.port` | Kubernetes port where service is exposed | `80` | +| `service.portName` | Name of the port on the service | `service` | +| `service.appProtocol` | Adds the appProtocol field to the service | `` | +| `service.targetPort` | Internal service is port | `3000` | +| `service.nodePort` | Kubernetes service nodePort | `nil` | +| `service.annotations` | Service annotations (can be templated) | `{}` | +| `service.labels` | Custom labels | `{}` | +| `service.clusterIP` | internal cluster service IP | `nil` | +| `service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `nil` | +| `service.loadBalancerSourceRanges` | list of IP CIDRs allowed access to lb (if supported) | `[]` | +| `service.externalIPs` | service external IP addresses | `[]` | +| `service.externalTrafficPolicy` | change the default externalTrafficPolicy | `nil` | +| `headlessService` | Create a headless service | `false` | +| `extraExposePorts` | Additional service ports for sidecar containers| `[]` | +| `hostAliases` | adds rules to the pod's /etc/hosts | `[]` | +| `ingress.enabled` | Enables Ingress | `false` | +| `ingress.annotations` | Ingress annotations (values are templated) | `{}` | +| `ingress.labels` | Custom labels | `{}` | +| `ingress.path` | Ingress accepted path | `/` | +| `ingress.pathType` | Ingress type of path | `Prefix` | +| `ingress.hosts` | Ingress accepted hostnames | `["chart-example.local"]` | +| `ingress.extraPaths` | Ingress extra paths to prepend to every host configuration. Useful when configuring [custom actions with AWS ALB Ingress Controller](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.6/guide/ingress/annotations/#actions). Requires `ingress.hosts` to have one or more host entries. | `[]` | +| `ingress.tls` | Ingress TLS configuration | `[]` | +| `ingress.ingressClassName` | Ingress Class Name. MAY be required for Kubernetes versions >= 1.18 | `""` | +| `resources` | CPU/Memory resource requests/limits | `{}` | +| `nodeSelector` | Node labels for pod assignment | `{}` | +| `tolerations` | Toleration labels for pod assignment | `[]` | +| `affinity` | Affinity settings for pod assignment | `{}` | +| `extraInitContainers` | Init containers to add to the grafana pod | `{}` | +| `extraContainers` | Sidecar containers to add to the grafana pod | `""` | +| `extraContainerVolumes` | Volumes that can be mounted in sidecar containers | `[]` | +| `extraLabels` | Custom labels for all manifests | `{}` | +| `schedulerName` | Name of the k8s scheduler (other than default) | `nil` | +| `persistence.enabled` | Use persistent volume to store data | `false` | +| `persistence.type` | Type of persistence (`pvc` or `statefulset`) | `pvc` | +| `persistence.size` | Size of persistent volume claim | `10Gi` | +| `persistence.existingClaim` | Use an existing PVC to persist data (can be templated) | `nil` | +| `persistence.storageClassName` | Type of persistent volume claim | `nil` | +| `persistence.accessModes` | Persistence access modes | `[ReadWriteOnce]` | +| `persistence.annotations` | PersistentVolumeClaim annotations | `{}` | +| `persistence.finalizers` | PersistentVolumeClaim finalizers | `[ "kubernetes.io/pvc-protection" ]` | +| `persistence.extraPvcLabels` | Extra labels to apply to a PVC. | `{}` | +| `persistence.subPath` | Mount a sub dir of the persistent volume (can be templated) | `nil` | +| `persistence.inMemory.enabled` | If persistence is not enabled, whether to mount the local storage in-memory to improve performance | `false` | +| `persistence.inMemory.sizeLimit` | SizeLimit for the in-memory local storage | `nil` | +| `initChownData.enabled` | If false, don't reset data ownership at startup | true | +| `initChownData.image.registry` | init-chown-data container image registry | `docker.io` | +| `initChownData.image.repository` | init-chown-data container image repository | `busybox` | +| `initChownData.image.tag` | init-chown-data container image tag | `1.31.1` | +| `initChownData.image.sha` | init-chown-data container image sha (optional)| `""` | +| `initChownData.image.pullPolicy` | init-chown-data container image pull policy | `IfNotPresent` | +| `initChownData.resources` | init-chown-data pod resource requests & limits | `{}` | +| `schedulerName` | Alternate scheduler name | `nil` | +| `env` | Extra environment variables passed to pods | `{}` | +| `envValueFrom` | Environment variables from alternate sources. See the API docs on [EnvVarSource](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#envvarsource-v1-core) for format details. Can be templated | `{}` | +| `envFromSecret` | Name of a Kubernetes secret (must be manually created in the same namespace) containing values to be added to the environment. Can be templated | `""` | +| `envFromSecrets` | List of Kubernetes secrets (must be manually created in the same namespace) containing values to be added to the environment. Can be templated | `[]` | +| `envFromConfigMaps` | List of Kubernetes ConfigMaps (must be manually created in the same namespace) containing values to be added to the environment. Can be templated | `[]` | +| `envRenderSecret` | Sensible environment variables passed to pods and stored as secret. (passed through [tpl](https://helm.sh/docs/howto/charts_tips_and_tricks/#using-the-tpl-function)) | `{}` | +| `enableServiceLinks` | Inject Kubernetes services as environment variables. | `true` | +| `extraSecretMounts` | Additional grafana server secret mounts | `[]` | +| `extraVolumeMounts` | Additional grafana server volume mounts | `[]` | +| `createConfigmap` | Enable creating the grafana configmap | `true` | +| `extraConfigmapMounts` | Additional grafana server configMap volume mounts (values are templated) | `[]` | +| `extraEmptyDirMounts` | Additional grafana server emptyDir volume mounts | `[]` | +| `plugins` | Plugins to be loaded along with Grafana | `[]` | +| `datasources` | Configure grafana datasources (passed through tpl) | `{}` | +| `alerting` | Configure grafana alerting (passed through tpl) | `{}` | +| `notifiers` | Configure grafana notifiers | `{}` | +| `dashboardProviders` | Configure grafana dashboard providers | `{}` | +| `dashboards` | Dashboards to import | `{}` | +| `dashboardsConfigMaps` | ConfigMaps reference that contains dashboards | `{}` | +| `grafana.ini` | Grafana's primary configuration | `{}` | +| `global.imageRegistry` | Global image pull registry for all images. | `null` | +| `global.imagePullSecrets` | Global image pull secrets (can be templated). Allows either an array of {name: pullSecret} maps (k8s-style), or an array of strings (more common helm-style). | `[]` | +| `ldap.enabled` | Enable LDAP authentication | `false` | +| `ldap.existingSecret` | The name of an existing secret containing the `ldap.toml` file, this must have the key `ldap-toml`. | `""` | +| `ldap.config` | Grafana's LDAP configuration | `""` | +| `annotations` | Deployment annotations | `{}` | +| `labels` | Deployment labels | `{}` | +| `podAnnotations` | Pod annotations | `{}` | +| `podLabels` | Pod labels | `{}` | +| `podPortName` | Name of the grafana port on the pod | `grafana` | +| `lifecycleHooks` | Lifecycle hooks for podStart and preStop [Example](https://kubernetes.io/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/#define-poststart-and-prestop-handlers) | `{}` | +| `sidecar.image.registry` | Sidecar image registry | `quay.io` | +| `sidecar.image.repository` | Sidecar image repository | `kiwigrid/k8s-sidecar` | +| `sidecar.image.tag` | Sidecar image tag | `1.24.6` | +| `sidecar.image.sha` | Sidecar image sha (optional) | `""` | +| `sidecar.imagePullPolicy` | Sidecar image pull policy | `IfNotPresent` | +| `sidecar.resources` | Sidecar resources | `{}` | +| `sidecar.securityContext` | Sidecar securityContext | `{}` | +| `sidecar.enableUniqueFilenames` | Sets the kiwigrid/k8s-sidecar UNIQUE_FILENAMES environment variable. If set to `true` the sidecar will create unique filenames where duplicate data keys exist between ConfigMaps and/or Secrets within the same or multiple Namespaces. | `false` | +| `sidecar.alerts.enabled` | Enables the cluster wide search for alerts and adds/updates/deletes them in grafana |`false` | +| `sidecar.alerts.label` | Label that config maps with alerts should have to be added | `grafana_alert` | +| `sidecar.alerts.labelValue` | Label value that config maps with alerts should have to be added | `""` | +| `sidecar.alerts.searchNamespace` | Namespaces list. If specified, the sidecar will search for alerts config-maps inside these namespaces. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces. | `nil` | +| `sidecar.alerts.watchMethod` | Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. | `WATCH` | +| `sidecar.alerts.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` | +| `sidecar.alerts.reloadURL` | Full url of datasource configuration reload API endpoint, to invoke after a config-map change | `"http://localhost:3000/api/admin/provisioning/alerting/reload"` | +| `sidecar.alerts.skipReload` | Enabling this omits defining the REQ_URL and REQ_METHOD environment variables | `false` | +| `sidecar.alerts.initDatasources` | Set to true to deploy the datasource sidecar as an initContainer. This is needed if skipReload is true, to load any alerts defined at startup time. | `false` | +| `sidecar.alerts.extraMounts` | Additional alerts sidecar volume mounts. | `[]` | +| `sidecar.dashboards.enabled` | Enables the cluster wide search for dashboards and adds/updates/deletes them in grafana | `false` | +| `sidecar.dashboards.SCProvider` | Enables creation of sidecar provider | `true` | +| `sidecar.dashboards.provider.name` | Unique name of the grafana provider | `sidecarProvider` | +| `sidecar.dashboards.provider.orgid` | Id of the organisation, to which the dashboards should be added | `1` | +| `sidecar.dashboards.provider.folder` | Logical folder in which grafana groups dashboards | `""` | +| `sidecar.dashboards.provider.disableDelete` | Activate to avoid the deletion of imported dashboards | `false` | +| `sidecar.dashboards.provider.allowUiUpdates` | Allow updating provisioned dashboards from the UI | `false` | +| `sidecar.dashboards.provider.type` | Provider type | `file` | +| `sidecar.dashboards.provider.foldersFromFilesStructure` | Allow Grafana to replicate dashboard structure from filesystem. | `false` | +| `sidecar.dashboards.watchMethod` | Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. | `WATCH` | +| `sidecar.skipTlsVerify` | Set to true to skip tls verification for kube api calls | `nil` | +| `sidecar.dashboards.label` | Label that config maps with dashboards should have to be added | `grafana_dashboard` | +| `sidecar.dashboards.labelValue` | Label value that config maps with dashboards should have to be added | `""` | +| `sidecar.dashboards.folder` | Folder in the pod that should hold the collected dashboards (unless `sidecar.dashboards.defaultFolderName` is set). This path will be mounted. | `/tmp/dashboards` | +| `sidecar.dashboards.folderAnnotation` | The annotation the sidecar will look for in configmaps to override the destination folder for files | `nil` | +| `sidecar.dashboards.defaultFolderName` | The default folder name, it will create a subfolder under the `sidecar.dashboards.folder` and put dashboards in there instead | `nil` | +| `sidecar.dashboards.searchNamespace` | Namespaces list. If specified, the sidecar will search for dashboards config-maps inside these namespaces. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces. | `nil` | +| `sidecar.dashboards.script` | Absolute path to shell script to execute after a configmap got reloaded. | `nil` | +| `sidecar.dashboards.reloadURL` | Full url of dashboards configuration reload API endpoint, to invoke after a config-map change | `"http://localhost:3000/api/admin/provisioning/dashboards/reload"` | +| `sidecar.dashboards.skipReload` | Enabling this omits defining the REQ_USERNAME, REQ_PASSWORD, REQ_URL and REQ_METHOD environment variables | `false` | +| `sidecar.dashboards.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` | +| `sidecar.dashboards.extraMounts` | Additional dashboard sidecar volume mounts. | `[]` | +| `sidecar.datasources.enabled` | Enables the cluster wide search for datasources and adds/updates/deletes them in grafana |`false` | +| `sidecar.datasources.label` | Label that config maps with datasources should have to be added | `grafana_datasource` | +| `sidecar.datasources.labelValue` | Label value that config maps with datasources should have to be added | `""` | +| `sidecar.datasources.searchNamespace` | Namespaces list. If specified, the sidecar will search for datasources config-maps inside these namespaces. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces. | `nil` | +| `sidecar.datasources.watchMethod` | Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. | `WATCH` | +| `sidecar.datasources.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` | +| `sidecar.datasources.reloadURL` | Full url of datasource configuration reload API endpoint, to invoke after a config-map change | `"http://localhost:3000/api/admin/provisioning/datasources/reload"` | +| `sidecar.datasources.skipReload` | Enabling this omits defining the REQ_URL and REQ_METHOD environment variables | `false` | +| `sidecar.datasources.initDatasources` | Set to true to deploy the datasource sidecar as an initContainer in addition to a container. This is needed if skipReload is true, to load any datasources defined at startup time. | `false` | +| `sidecar.notifiers.enabled` | Enables the cluster wide search for notifiers and adds/updates/deletes them in grafana | `false` | +| `sidecar.notifiers.label` | Label that config maps with notifiers should have to be added | `grafana_notifier` | +| `sidecar.notifiers.labelValue` | Label value that config maps with notifiers should have to be added | `""` | +| `sidecar.notifiers.searchNamespace` | Namespaces list. If specified, the sidecar will search for notifiers config-maps (or secrets) inside these namespaces. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces. | `nil` | +| `sidecar.notifiers.watchMethod` | Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. | `WATCH` | +| `sidecar.notifiers.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` | +| `sidecar.notifiers.reloadURL` | Full url of notifier configuration reload API endpoint, to invoke after a config-map change | `"http://localhost:3000/api/admin/provisioning/notifications/reload"` | +| `sidecar.notifiers.skipReload` | Enabling this omits defining the REQ_URL and REQ_METHOD environment variables | `false` | +| `sidecar.notifiers.initNotifiers` | Set to true to deploy the notifier sidecar as an initContainer in addition to a container. This is needed if skipReload is true, to load any notifiers defined at startup time. | `false` | +| `smtp.existingSecret` | The name of an existing secret containing the SMTP credentials. | `""` | +| `smtp.userKey` | The key in the existing SMTP secret containing the username. | `"user"` | +| `smtp.passwordKey` | The key in the existing SMTP secret containing the password. | `"password"` | +| `admin.existingSecret` | The name of an existing secret containing the admin credentials (can be templated). | `""` | +| `admin.userKey` | The key in the existing admin secret containing the username. | `"admin-user"` | +| `admin.passwordKey` | The key in the existing admin secret containing the password. | `"admin-password"` | +| `serviceAccount.autoMount` | Automount the service account token in the pod| `true` | +| `serviceAccount.annotations` | ServiceAccount annotations | | +| `serviceAccount.create` | Create service account | `true` | +| `serviceAccount.labels` | ServiceAccount labels | `{}` | +| `serviceAccount.name` | Service account name to use, when empty will be set to created account if `serviceAccount.create` is set else to `default` | `` | +| `serviceAccount.nameTest` | Service account name to use for test, when empty will be set to created account if `serviceAccount.create` is set else to `default` | `nil` | +| `rbac.create` | Create and use RBAC resources | `true` | +| `rbac.namespaced` | Creates Role and Rolebinding instead of the default ClusterRole and ClusteRoleBindings for the grafana instance | `false` | +| `rbac.useExistingRole` | Set to a rolename to use existing role - skipping role creating - but still doing serviceaccount and rolebinding to the rolename set here. | `nil` | +| `rbac.pspEnabled` | Create PodSecurityPolicy (with `rbac.create`, grant roles permissions as well) | `false` | +| `rbac.pspUseAppArmor` | Enforce AppArmor in created PodSecurityPolicy (requires `rbac.pspEnabled`) | `false` | +| `rbac.extraRoleRules` | Additional rules to add to the Role | [] | +| `rbac.extraClusterRoleRules` | Additional rules to add to the ClusterRole | [] | +| `command` | Define command to be executed by grafana container at startup | `nil` | +| `args` | Define additional args if command is used | `nil` | +| `testFramework.enabled` | Whether to create test-related resources | `true` | +| `testFramework.image.registry` | `test-framework` image registry. | `docker.io` | +| `testFramework.image.repository` | `test-framework` image repository. | `bats/bats` | +| `testFramework.image.tag` | `test-framework` image tag. | `v1.4.1` | +| `testFramework.imagePullPolicy` | `test-framework` image pull policy. | `IfNotPresent` | +| `testFramework.securityContext` | `test-framework` securityContext | `{}` | +| `downloadDashboards.env` | Environment variables to be passed to the `download-dashboards` container | `{}` | +| `downloadDashboards.envFromSecret` | Name of a Kubernetes secret (must be manually created in the same namespace) containing values to be added to the environment. Can be templated | `""` | +| `downloadDashboards.resources` | Resources of `download-dashboards` container | `{}` | +| `downloadDashboardsImage.registry` | Curl docker image registry | `docker.io` | +| `downloadDashboardsImage.repository` | Curl docker image repository | `curlimages/curl` | +| `downloadDashboardsImage.tag` | Curl docker image tag | `7.73.0` | +| `downloadDashboardsImage.sha` | Curl docker image sha (optional) | `""` | +| `downloadDashboardsImage.pullPolicy` | Curl docker image pull policy | `IfNotPresent` | +| `namespaceOverride` | Override the deployment namespace | `""` (`Release.Namespace`) | +| `serviceMonitor.enabled` | Use servicemonitor from prometheus operator | `false` | +| `serviceMonitor.namespace` | Namespace this servicemonitor is installed in | | +| `serviceMonitor.interval` | How frequently Prometheus should scrape | `1m` | +| `serviceMonitor.path` | Path to scrape | `/metrics` | +| `serviceMonitor.scheme` | Scheme to use for metrics scraping | `http` | +| `serviceMonitor.tlsConfig` | TLS configuration block for the endpoint | `{}` | +| `serviceMonitor.labels` | Labels for the servicemonitor passed to Prometheus Operator | `{}` | +| `serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `30s` | +| `serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping. | `[]` | +| `serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion. | `[]` | +| `revisionHistoryLimit` | Number of old ReplicaSets to retain | `10` | +| `imageRenderer.enabled` | Enable the image-renderer deployment & service | `false` | +| `imageRenderer.image.registry` | image-renderer Image registry | `docker.io` | +| `imageRenderer.image.repository` | image-renderer Image repository | `grafana/grafana-image-renderer` | +| `imageRenderer.image.tag` | image-renderer Image tag | `latest` | +| `imageRenderer.image.sha` | image-renderer Image sha (optional) | `""` | +| `imageRenderer.image.pullPolicy` | image-renderer ImagePullPolicy | `Always` | +| `imageRenderer.env` | extra env-vars for image-renderer | `{}` | +| `imageRenderer.envValueFrom` | Environment variables for image-renderer from alternate sources. See the API docs on [EnvVarSource](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#envvarsource-v1-core) for format details. Can be templated | `{}` | +| `imageRenderer.serviceAccountName` | image-renderer deployment serviceAccountName | `""` | +| `imageRenderer.securityContext` | image-renderer deployment securityContext | `{}` | +| `imageRenderer.podAnnotations ` | image-renderer image-renderer pod annotation | `{}` | +| `imageRenderer.hostAliases` | image-renderer deployment Host Aliases | `[]` | +| `imageRenderer.priorityClassName` | image-renderer deployment priority class | `''` | +| `imageRenderer.service.enabled` | Enable the image-renderer service | `true` | +| `imageRenderer.service.portName` | image-renderer service port name | `http` | +| `imageRenderer.service.port` | image-renderer port used by deployment | `8081` | +| `imageRenderer.service.targetPort` | image-renderer service port used by service | `8081` | +| `imageRenderer.appProtocol` | Adds the appProtocol field to the service | `` | +| `imageRenderer.grafanaSubPath` | Grafana sub path to use for image renderer callback url | `''` | +| `imageRenderer.podPortName` | name of the image-renderer port on the pod | `http` | +| `imageRenderer.revisionHistoryLimit` | number of image-renderer replica sets to keep | `10` | +| `imageRenderer.networkPolicy.limitIngress` | Enable a NetworkPolicy to limit inbound traffic from only the created grafana pods | `true` | +| `imageRenderer.networkPolicy.limitEgress` | Enable a NetworkPolicy to limit outbound traffic to only the created grafana pods | `false` | +| `imageRenderer.resources` | Set resource limits for image-renderer pods | `{}` | +| `imageRenderer.nodeSelector` | Node labels for pod assignment | `{}` | +| `imageRenderer.tolerations` | Toleration labels for pod assignment | `[]` | +| `imageRenderer.affinity` | Affinity settings for pod assignment | `{}` | +| `networkPolicy.enabled` | Enable creation of NetworkPolicy resources. | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.explicitNamespacesSelector` | A Kubernetes LabelSelector to explicitly select namespaces from which traffic could be allowed | `{}` | +| `networkPolicy.ingress` | Enable the creation of an ingress network policy | `true` | +| `networkPolicy.egress.enabled` | Enable the creation of an egress network policy | `false` | +| `networkPolicy.egress.ports` | An array of ports to allow for the egress | `[]` | +| `enableKubeBackwardCompatibility` | Enable backward compatibility of kubernetes where pod's defintion version below 1.13 doesn't have the enableServiceLinks option | `false` | + +### Example ingress with path + +With grafana 6.3 and above + +```yaml +grafana.ini: + server: + domain: monitoring.example.com + root_url: "%(protocol)s://%(domain)s/grafana" + serve_from_sub_path: true +ingress: + enabled: true + hosts: + - "monitoring.example.com" + path: "/grafana" +``` + +### Example of extraVolumeMounts + +Volume can be type persistentVolumeClaim or hostPath but not both at same time. +If neither existingClaim or hostPath argument is given then type is emptyDir. + +```yaml +- extraVolumeMounts: + - name: plugins + mountPath: /var/lib/grafana/plugins + subPath: configs/grafana/plugins + existingClaim: existing-grafana-claim + readOnly: false + - name: dashboards + mountPath: /var/lib/grafana/dashboards + hostPath: /usr/shared/grafana/dashboards + readOnly: false +``` + +## Import dashboards + +There are a few methods to import dashboards to Grafana. Below are some examples and explanations as to how to use each method: + +```yaml +dashboards: + default: + some-dashboard: + json: | + { + "annotations": + + ... + # Complete json file here + ... + + "title": "Some Dashboard", + "uid": "abcd1234", + "version": 1 + } + custom-dashboard: + # This is a path to a file inside the dashboards directory inside the chart directory + file: dashboards/custom-dashboard.json + prometheus-stats: + # Ref: https://grafana.com/dashboards/2 + gnetId: 2 + revision: 2 + datasource: Prometheus + loki-dashboard-quick-search: + gnetId: 12019 + revision: 2 + datasource: + - name: DS_PROMETHEUS + value: Prometheus + - name: DS_LOKI + value: Loki + local-dashboard: + url: https://raw.githubusercontent.com/user/repository/master/dashboards/dashboard.json +``` + +## BASE64 dashboards + +Dashboards could be stored on a server that does not return JSON directly and instead of it returns a Base64 encoded file (e.g. Gerrit) +A new parameter has been added to the url use case so if you specify a b64content value equals to true after the url entry a Base64 decoding is applied before save the file to disk. +If this entry is not set or is equals to false not decoding is applied to the file before saving it to disk. + +### Gerrit use case + +Gerrit API for download files has the following schema: where {project-name} and +{file-id} usually has '/' in their values and so they MUST be replaced by %2F so if project-name is user/repo, branch-id is master and file-id is equals to dir1/dir2/dashboard +the url value is + +## Sidecar for dashboards + +If the parameter `sidecar.dashboards.enabled` is set, a sidecar container is deployed in the grafana +pod. This container watches all configmaps (or secrets) in the cluster and filters out the ones with +a label as defined in `sidecar.dashboards.label`. The files defined in those configmaps are written +to a folder and accessed by grafana. Changes to the configmaps are monitored and the imported +dashboards are deleted/updated. + +A recommendation is to use one configmap per dashboard, as a reduction of multiple dashboards inside +one configmap is currently not properly mirrored in grafana. + +Example dashboard config: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: sample-grafana-dashboard + labels: + grafana_dashboard: "1" +data: + k8s-dashboard.json: |- + [...] +``` + +## Sidecar for datasources + +If the parameter `sidecar.datasources.enabled` is set, an init container is deployed in the grafana +pod. This container lists all secrets (or configmaps, though not recommended) in the cluster and +filters out the ones with a label as defined in `sidecar.datasources.label`. The files defined in +those secrets are written to a folder and accessed by grafana on startup. Using these yaml files, +the data sources in grafana can be imported. + +Should you aim for reloading datasources in Grafana each time the config is changed, set `sidecar.datasources.skipReload: false` and adjust `sidecar.datasources.reloadURL` to `http://..svc.cluster.local/api/admin/provisioning/datasources/reload`. + +Secrets are recommended over configmaps for this usecase because datasources usually contain private +data like usernames and passwords. Secrets are the more appropriate cluster resource to manage those. + +Example values to add a postgres datasource as a kubernetes secret: +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: grafana-datasources + labels: + grafana_datasource: 'true' # default value for: sidecar.datasources.label +stringData: + pg-db.yaml: |- + apiVersion: 1 + datasources: + - name: My pg db datasource + type: postgres + url: my-postgresql-db:5432 + user: db-readonly-user + secureJsonData: + password: 'SUperSEcretPa$$word' + jsonData: + database: my_datase + sslmode: 'disable' # disable/require/verify-ca/verify-full + maxOpenConns: 0 # Grafana v5.4+ + maxIdleConns: 2 # Grafana v5.4+ + connMaxLifetime: 14400 # Grafana v5.4+ + postgresVersion: 1000 # 903=9.3, 904=9.4, 905=9.5, 906=9.6, 1000=10 + timescaledb: false + # allow users to edit datasources from the UI. + editable: false +``` + +Example values to add a datasource adapted from [Grafana](http://docs.grafana.org/administration/provisioning/#example-datasource-config-file): + +```yaml +datasources: + datasources.yaml: + apiVersion: 1 + datasources: + # name of the datasource. Required + - name: Graphite + # datasource type. Required + type: graphite + # access mode. proxy or direct (Server or Browser in the UI). Required + access: proxy + # org id. will default to orgId 1 if not specified + orgId: 1 + # url + url: http://localhost:8080 + # database password, if used + password: + # database user, if used + user: + # database name, if used + database: + # enable/disable basic auth + basicAuth: + # basic auth username + basicAuthUser: + # basic auth password + basicAuthPassword: + # enable/disable with credentials headers + withCredentials: + # mark as default datasource. Max one per org + isDefault: + # fields that will be converted to json and stored in json_data + jsonData: + graphiteVersion: "1.1" + tlsAuth: true + tlsAuthWithCACert: true + # json object of data that will be encrypted. + secureJsonData: + tlsCACert: "..." + tlsClientCert: "..." + tlsClientKey: "..." + version: 1 + # allow users to edit datasources from the UI. + editable: false +``` + +## Sidecar for notifiers + +If the parameter `sidecar.notifiers.enabled` is set, an init container is deployed in the grafana +pod. This container lists all secrets (or configmaps, though not recommended) in the cluster and +filters out the ones with a label as defined in `sidecar.notifiers.label`. The files defined in +those secrets are written to a folder and accessed by grafana on startup. Using these yaml files, +the notification channels in grafana can be imported. The secrets must be created before +`helm install` so that the notifiers init container can list the secrets. + +Secrets are recommended over configmaps for this usecase because alert notification channels usually contain +private data like SMTP usernames and passwords. Secrets are the more appropriate cluster resource to manage those. + +Example datasource config adapted from [Grafana](https://grafana.com/docs/grafana/latest/administration/provisioning/#alert-notification-channels): + +```yaml +notifiers: + - name: notification-channel-1 + type: slack + uid: notifier1 + # either + org_id: 2 + # or + org_name: Main Org. + is_default: true + send_reminder: true + frequency: 1h + disable_resolve_message: false + # See `Supported Settings` section for settings supporter for each + # alert notification type. + settings: + recipient: 'XXX' + token: 'xoxb' + uploadImage: true + url: https://slack.com + +delete_notifiers: + - name: notification-channel-1 + uid: notifier1 + org_id: 2 + - name: notification-channel-2 + # default org_id: 1 +``` + +## Sidecar for alerting resources + +If the parameter `sidecar.alerts.enabled` is set, a sidecar container is deployed in the grafana +pod. This container watches all configmaps (or secrets) in the cluster (namespace defined by `sidecar.alerts.searchNamespace`) and filters out the ones with +a label as defined in `sidecar.alerts.label` (default is `grafana_alert`). The files defined in those configmaps are written +to a folder and accessed by grafana. Changes to the configmaps are monitored and the imported alerting resources are updated, however, deletions are a little more complicated (see below). + +This sidecar can be used to provision alert rules, contact points, notification policies, notification templates and mute timings as shown in [Grafana Documentation](https://grafana.com/docs/grafana/next/alerting/set-up/provision-alerting-resources/file-provisioning/). + +To fetch the alert config which will be provisioned, use the alert provisioning API ([Grafana Documentation](https://grafana.com/docs/grafana/next/developers/http_api/alerting_provisioning/)). +You can use either JSON or YAML format. + +Example config for an alert rule: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: sample-grafana-alert + labels: + grafana_alert: "1" +data: + k8s-alert.yml: |- + apiVersion: 1 + groups: + - orgId: 1 + name: k8s-alert + [...] +``` + +To delete provisioned alert rules is a two step process, you need to delete the configmap which defined the alert rule +and then create a configuration which deletes the alert rule. + +Example deletion configuration: +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: delete-sample-grafana-alert + namespace: monitoring + labels: + grafana_alert: "1" +data: + delete-k8s-alert.yml: |- + apiVersion: 1 + deleteRules: + - orgId: 1 + uid: 16624780-6564-45dc-825c-8bded4ad92d3 +``` + +## Statically provision alerting resources +If you don't need to change alerting resources (alert rules, contact points, notification policies and notification templates) regularly you could use the `alerting` config option instead of the sidecar option above. +This will grab the alerting config and apply it statically at build time for the helm file. + +There are two methods to statically provision alerting configuration in Grafana. Below are some examples and explanations as to how to use each method: + +```yaml +alerting: + team1-alert-rules.yaml: + file: alerting/team1/rules.yaml + team2-alert-rules.yaml: + file: alerting/team2/rules.yaml + team3-alert-rules.yaml: + file: alerting/team3/rules.yaml + notification-policies.yaml: + file: alerting/shared/notification-policies.yaml + notification-templates.yaml: + file: alerting/shared/notification-templates.yaml + contactpoints.yaml: + apiVersion: 1 + contactPoints: + - orgId: 1 + name: Slack channel + receivers: + - uid: default-receiver + type: slack + settings: + # Webhook URL to be filled in + url: "" + # We need to escape double curly braces for the tpl function. + text: '{{ `{{ template "default.message" . }}` }}' + title: '{{ `{{ template "default.title" . }}` }}' +``` + +The two possibilities for static alerting resource provisioning are: + +* Inlining the file contents as shown for contact points in the above example. +* Importing a file using a relative path starting from the chart root directory as shown for the alert rules in the above example. + +### Important notes on file provisioning + +* The format of the files is defined in the [Grafana documentation](https://grafana.com/docs/grafana/next/alerting/set-up/provision-alerting-resources/file-provisioning/) on file provisioning. +* The chart supports importing YAML and JSON files. +* The filename must be unique, otherwise one volume mount will overwrite the other. +* In case of inlining, double curly braces that arise from the Grafana configuration format and are not intended as templates for the chart must be escaped. +* The number of total files under `alerting:` is not limited. Each file will end up as a volume mount in the corresponding provisioning folder of the deployed Grafana instance. +* The file size for each import is limited by what the function `.Files.Get` can handle, which suffices for most cases. + +## How to serve Grafana with a path prefix (/grafana) + +In order to serve Grafana with a prefix (e.g., ), add the following to your values.yaml. + +```yaml +ingress: + enabled: true + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.ingress.kubernetes.io/rewrite-target: /$1 + nginx.ingress.kubernetes.io/use-regex: "true" + + path: /grafana/?(.*) + hosts: + - k8s.example.dev + +grafana.ini: + server: + root_url: http://localhost:3000/grafana # this host can be localhost +``` + +## How to securely reference secrets in grafana.ini + +This example uses Grafana [file providers](https://grafana.com/docs/grafana/latest/administration/configuration/#file-provider) for secret values and the `extraSecretMounts` configuration flag (Additional grafana server secret mounts) to mount the secrets. + +In grafana.ini: + +```yaml +grafana.ini: + [auth.generic_oauth] + enabled = true + client_id = $__file{/etc/secrets/auth_generic_oauth/client_id} + client_secret = $__file{/etc/secrets/auth_generic_oauth/client_secret} +``` + +Existing secret, or created along with helm: + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: auth-generic-oauth-secret +type: Opaque +stringData: + client_id: + client_secret: +``` + +Include in the `extraSecretMounts` configuration flag: + +```yaml +- extraSecretMounts: + - name: auth-generic-oauth-secret-mount + secretName: auth-generic-oauth-secret + defaultMode: 0440 + mountPath: /etc/secrets/auth_generic_oauth + readOnly: true +``` + +### extraSecretMounts using a Container Storage Interface (CSI) provider + +This example uses a CSI driver e.g. retrieving secrets using [Azure Key Vault Provider](https://github.com/Azure/secrets-store-csi-driver-provider-azure) + +```yaml +- extraSecretMounts: + - name: secrets-store-inline + mountPath: /run/secrets + readOnly: true + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: "my-provider" + nodePublishSecretRef: + name: akv-creds +``` + +## Image Renderer Plug-In + +This chart supports enabling [remote image rendering](https://github.com/grafana/grafana-image-renderer/blob/master/README.md#run-in-docker) + +```yaml +imageRenderer: + enabled: true +``` + +### Image Renderer NetworkPolicy + +By default the image-renderer pods will have a network policy which only allows ingress traffic from the created grafana instance + +### High Availability for unified alerting + +If you want to run Grafana in a high availability cluster you need to enable +the headless service by setting `headlessService: true` in your `values.yaml` +file. + +As next step you have to setup the `grafana.ini` in your `values.yaml` in a way +that it will make use of the headless service to obtain all the IPs of the +cluster. You should replace ``{{ Name }}`` with the name of your helm deployment. + +```yaml +grafana.ini: + ... + unified_alerting: + enabled: true + ha_peers: {{ Name }}-headless:9094 + ha_listen_address: ${POD_IP}:9094 + ha_advertise_address: ${POD_IP}:9094 + + alerting: + enabled: false +``` diff --git a/operations/deployment/helm/grafana/ci/default-values.yaml b/operations/deployment/helm/grafana/ci/default-values.yaml new file mode 100644 index 0000000..fc2ba60 --- /dev/null +++ b/operations/deployment/helm/grafana/ci/default-values.yaml @@ -0,0 +1 @@ +# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml. diff --git a/operations/deployment/helm/grafana/ci/with-affinity-values.yaml b/operations/deployment/helm/grafana/ci/with-affinity-values.yaml new file mode 100644 index 0000000..f5b9b53 --- /dev/null +++ b/operations/deployment/helm/grafana/ci/with-affinity-values.yaml @@ -0,0 +1,16 @@ +affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: + app.kubernetes.io/instance: grafana-test + app.kubernetes.io/name: grafana + topologyKey: failure-domain.beta.kubernetes.io/zone + weight: 100 + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app.kubernetes.io/instance: grafana-test + app.kubernetes.io/name: grafana + topologyKey: kubernetes.io/hostname diff --git a/operations/deployment/helm/grafana/ci/with-dashboard-json-values.yaml b/operations/deployment/helm/grafana/ci/with-dashboard-json-values.yaml new file mode 100644 index 0000000..e0c4e41 --- /dev/null +++ b/operations/deployment/helm/grafana/ci/with-dashboard-json-values.yaml @@ -0,0 +1,53 @@ +dashboards: + my-provider: + my-awesome-dashboard: + # An empty but valid dashboard + json: | + { + "__inputs": [], + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "6.3.5" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": null, + "links": [], + "panels": [], + "schemaVersion": 19, + "style": "dark", + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-6h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": ["5s"] + }, + "timezone": "", + "title": "Dummy Dashboard", + "uid": "IdcYQooWk", + "version": 1 + } + datasource: Prometheus diff --git a/operations/deployment/helm/grafana/ci/with-dashboard-values.yaml b/operations/deployment/helm/grafana/ci/with-dashboard-values.yaml new file mode 100644 index 0000000..7b662c5 --- /dev/null +++ b/operations/deployment/helm/grafana/ci/with-dashboard-values.yaml @@ -0,0 +1,19 @@ +dashboards: + my-provider: + my-awesome-dashboard: + gnetId: 10000 + revision: 1 + datasource: Prometheus +dashboardProviders: + dashboardproviders.yaml: + apiVersion: 1 + providers: + - name: 'my-provider' + orgId: 1 + folder: '' + type: file + updateIntervalSeconds: 10 + disableDeletion: true + editable: true + options: + path: /var/lib/grafana/dashboards/my-provider diff --git a/operations/deployment/helm/grafana/ci/with-extraconfigmapmounts-values.yaml b/operations/deployment/helm/grafana/ci/with-extraconfigmapmounts-values.yaml new file mode 100644 index 0000000..5cc44a0 --- /dev/null +++ b/operations/deployment/helm/grafana/ci/with-extraconfigmapmounts-values.yaml @@ -0,0 +1,7 @@ +extraConfigmapMounts: + - name: '{{ include "grafana.fullname" . }}' + configMap: '{{ include "grafana.fullname" . }}' + mountPath: /var/lib/grafana/dashboards/test-dashboard.json + # This is not a realistic test, but for this we only care about extraConfigmapMounts not being empty and pointing to an existing ConfigMap + subPath: grafana.ini + readOnly: true diff --git a/operations/deployment/helm/grafana/ci/with-image-renderer-values.yaml b/operations/deployment/helm/grafana/ci/with-image-renderer-values.yaml new file mode 100644 index 0000000..32f3074 --- /dev/null +++ b/operations/deployment/helm/grafana/ci/with-image-renderer-values.yaml @@ -0,0 +1,19 @@ +podLabels: + customLableA: Aaaaa +imageRenderer: + enabled: true + env: + RENDERING_ARGS: --disable-gpu,--window-size=1280x758 + RENDERING_MODE: clustered + podLabels: + customLableB: Bbbbb + networkPolicy: + limitIngress: true + limitEgress: true + resources: + limits: + cpu: 1000m + memory: 1000Mi + requests: + cpu: 500m + memory: 50Mi diff --git a/operations/deployment/helm/grafana/ci/with-persistence.yaml b/operations/deployment/helm/grafana/ci/with-persistence.yaml new file mode 100644 index 0000000..b92ca02 --- /dev/null +++ b/operations/deployment/helm/grafana/ci/with-persistence.yaml @@ -0,0 +1,3 @@ +persistence: + type: pvc + enabled: true diff --git a/operations/deployment/helm/grafana/dashboards/custom-dashboard.json b/operations/deployment/helm/grafana/dashboards/custom-dashboard.json new file mode 100644 index 0000000..9e26dfe --- /dev/null +++ b/operations/deployment/helm/grafana/dashboards/custom-dashboard.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/operations/deployment/helm/grafana/templates/NOTES.txt b/operations/deployment/helm/grafana/templates/NOTES.txt new file mode 100644 index 0000000..d86419f --- /dev/null +++ b/operations/deployment/helm/grafana/templates/NOTES.txt @@ -0,0 +1,55 @@ +1. Get your '{{ .Values.adminUser }}' user password by running: + + kubectl get secret --namespace {{ include "grafana.namespace" . }} {{ .Values.admin.existingSecret | default (include "grafana.fullname" .) }} -o jsonpath="{.data.{{ .Values.admin.passwordKey | default "admin-password" }}}" | base64 --decode ; echo + + +2. The Grafana server can be accessed via port {{ .Values.service.port }} on the following DNS name from within your cluster: + + {{ include "grafana.fullname" . }}.{{ include "grafana.namespace" . }}.svc.cluster.local +{{ if .Values.ingress.enabled }} + If you bind grafana to 80, please update values in values.yaml and reinstall: + ``` + securityContext: + runAsUser: 0 + runAsGroup: 0 + fsGroup: 0 + + command: + - "setcap" + - "'cap_net_bind_service=+ep'" + - "/usr/sbin/grafana-server &&" + - "sh" + - "/run.sh" + ``` + Details refer to https://grafana.com/docs/installation/configuration/#http-port. + Or grafana would always crash. + + From outside the cluster, the server URL(s) are: + {{- range .Values.ingress.hosts }} + http://{{ . }} + {{- end }} +{{- else }} + Get the Grafana URL to visit by running these commands in the same shell: + {{- if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ include "grafana.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "grafana.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ include "grafana.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT + {{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc --namespace {{ include "grafana.namespace" . }} -w {{ include "grafana.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ include "grafana.namespace" . }} {{ include "grafana.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + http://$SERVICE_IP:{{ .Values.service.port -}} + {{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ include "grafana.namespace" . }} -l "app.kubernetes.io/name={{ include "grafana.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + kubectl --namespace {{ include "grafana.namespace" . }} port-forward $POD_NAME 3000 + {{- end }} +{{- end }} + +3. Login with the password from step 1 and the username: {{ .Values.adminUser }} + +{{- if not .Values.persistence.enabled }} +################################################################################# +###### WARNING: Persistence is disabled!!! You will lose your data when ##### +###### the Grafana pod is terminated. ##### +################################################################################# +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/_helpers.tpl b/operations/deployment/helm/grafana/templates/_helpers.tpl new file mode 100644 index 0000000..ead2449 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/_helpers.tpl @@ -0,0 +1,227 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "grafana.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "grafana.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "grafana.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create the name of the service account +*/}} +{{- define "grafana.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "grafana.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} + +{{- define "grafana.serviceAccountNameTest" -}} +{{- if .Values.serviceAccount.create }} +{{- default (print (include "grafana.fullname" .) "-test") .Values.serviceAccount.nameTest }} +{{- else }} +{{- default "default" .Values.serviceAccount.nameTest }} +{{- end }} +{{- end }} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts +*/}} +{{- define "grafana.namespace" -}} +{{- if .Values.namespaceOverride }} +{{- .Values.namespaceOverride }} +{{- else }} +{{- .Release.Namespace }} +{{- end }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "grafana.labels" -}} +helm.sh/chart: {{ include "grafana.chart" . }} +{{ include "grafana.selectorLabels" . }} +{{- if or .Chart.AppVersion .Values.image.tag }} +app.kubernetes.io/version: {{ mustRegexReplaceAllLiteral "@sha.*" .Values.image.tag "" | default .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- with .Values.extraLabels }} +{{ toYaml . }} +{{- end }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "grafana.selectorLabels" -}} +app.kubernetes.io/name: {{ include "grafana.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "grafana.imageRenderer.labels" -}} +helm.sh/chart: {{ include "grafana.chart" . }} +{{ include "grafana.imageRenderer.selectorLabels" . }} +{{- if or .Chart.AppVersion .Values.image.tag }} +app.kubernetes.io/version: {{ mustRegexReplaceAllLiteral "@sha.*" .Values.image.tag "" | default .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels ImageRenderer +*/}} +{{- define "grafana.imageRenderer.selectorLabels" -}} +app.kubernetes.io/name: {{ include "grafana.name" . }}-image-renderer +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Looks if there's an existing secret and reuse its password. If not it generates +new password and use it. +*/}} +{{- define "grafana.password" -}} +{{- $secret := (lookup "v1" "Secret" (include "grafana.namespace" .) (include "grafana.fullname" .) ) }} +{{- if $secret }} +{{- index $secret "data" "admin-password" }} +{{- else }} +{{- (randAlphaNum 40) | b64enc | quote }} +{{- end }} +{{- end }} + +{{/* +Return the appropriate apiVersion for rbac. +*/}} +{{- define "grafana.rbac.apiVersion" -}} +{{- if $.Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1" }} +{{- print "rbac.authorization.k8s.io/v1" }} +{{- else }} +{{- print "rbac.authorization.k8s.io/v1beta1" }} +{{- end }} +{{- end }} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "grafana.ingress.apiVersion" -}} +{{- if and ($.Capabilities.APIVersions.Has "networking.k8s.io/v1") (semverCompare ">= 1.19-0" .Capabilities.KubeVersion.Version) }} +{{- print "networking.k8s.io/v1" }} +{{- else if $.Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" }} +{{- print "networking.k8s.io/v1beta1" }} +{{- else }} +{{- print "extensions/v1beta1" }} +{{- end }} +{{- end }} + +{{/* +Return the appropriate apiVersion for Horizontal Pod Autoscaler. +*/}} +{{- define "grafana.hpa.apiVersion" -}} +{{- if $.Capabilities.APIVersions.Has "autoscaling/v2/HorizontalPodAutoscaler" }} +{{- print "autoscaling/v2" }} +{{- else if $.Capabilities.APIVersions.Has "autoscaling/v2beta2/HorizontalPodAutoscaler" }} +{{- print "autoscaling/v2beta2" }} +{{- else }} +{{- print "autoscaling/v2beta1" }} +{{- end }} +{{- end }} + +{{/* +Return the appropriate apiVersion for podDisruptionBudget. +*/}} +{{- define "grafana.podDisruptionBudget.apiVersion" -}} +{{- if $.Values.podDisruptionBudget.apiVersion }} +{{- print $.Values.podDisruptionBudget.apiVersion }} +{{- else if $.Capabilities.APIVersions.Has "policy/v1/PodDisruptionBudget" }} +{{- print "policy/v1" }} +{{- else }} +{{- print "policy/v1beta1" }} +{{- end }} +{{- end }} + +{{/* +Return if ingress is stable. +*/}} +{{- define "grafana.ingress.isStable" -}} +{{- eq (include "grafana.ingress.apiVersion" .) "networking.k8s.io/v1" }} +{{- end }} + +{{/* +Return if ingress supports ingressClassName. +*/}} +{{- define "grafana.ingress.supportsIngressClassName" -}} +{{- or (eq (include "grafana.ingress.isStable" .) "true") (and (eq (include "grafana.ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18-0" .Capabilities.KubeVersion.Version)) }} +{{- end }} + +{{/* +Return if ingress supports pathType. +*/}} +{{- define "grafana.ingress.supportsPathType" -}} +{{- or (eq (include "grafana.ingress.isStable" .) "true") (and (eq (include "grafana.ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18-0" .Capabilities.KubeVersion.Version)) }} +{{- end }} + +{{/* +Formats imagePullSecrets. Input is (dict "root" . "imagePullSecrets" .{specific imagePullSecrets}) +*/}} +{{- define "grafana.imagePullSecrets" -}} +{{- $root := .root }} +{{- range (concat .root.Values.global.imagePullSecrets .imagePullSecrets) }} +{{- if eq (typeOf .) "map[string]interface {}" }} +- {{ toYaml (dict "name" (tpl .name $root)) | trim }} +{{- else }} +- name: {{ tpl . $root }} +{{- end }} +{{- end }} +{{- end }} + + +{{/* + Checks whether or not the configSecret secret has to be created + */}} +{{- define "grafana.shouldCreateConfigSecret" -}} +{{- $secretFound := false -}} +{{- range $key, $value := .Values.datasources }} + {{- if hasKey $value "secret" }} + {{- $secretFound = true}} + {{- end }} +{{- end }} +{{- range $key, $value := .Values.notifiers }} + {{- if hasKey $value "secret" }} + {{- $secretFound = true}} + {{- end }} +{{- end }} +{{- range $key, $value := .Values.alerting }} + {{- if (or (hasKey $value "secret") (hasKey $value "secretFile")) }} + {{- $secretFound = true}} + {{- end }} +{{- end }} +{{- $secretFound}} +{{- end -}} diff --git a/operations/deployment/helm/grafana/templates/_pod.tpl b/operations/deployment/helm/grafana/templates/_pod.tpl new file mode 100644 index 0000000..f4b9fd3 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/_pod.tpl @@ -0,0 +1,1276 @@ +{{- define "grafana.pod" -}} +{{- $sts := list "sts" "StatefulSet" "statefulset" -}} +{{- $root := . -}} +{{- with .Values.schedulerName }} +schedulerName: "{{ . }}" +{{- end }} +serviceAccountName: {{ include "grafana.serviceAccountName" . }} +automountServiceAccountToken: {{ .Values.serviceAccount.autoMount }} +{{- with .Values.securityContext }} +securityContext: + {{- toYaml . | nindent 2 }} +{{- end }} +{{- with .Values.hostAliases }} +hostAliases: + {{- toYaml . | nindent 2 }} +{{- end }} +{{- with .Values.priorityClassName }} +priorityClassName: {{ . }} +{{- end }} +{{- if ( or .Values.persistence.enabled .Values.dashboards .Values.extraInitContainers (and .Values.sidecar.alerts.enabled .Values.sidecar.alerts.initAlerts) (and .Values.sidecar.datasources.enabled .Values.sidecar.datasources.initDatasources) (and .Values.sidecar.notifiers.enabled .Values.sidecar.notifiers.initNotifiers)) }} +initContainers: +{{- end }} +{{- if ( and .Values.persistence.enabled .Values.initChownData.enabled ) }} + - name: init-chown-data + {{- $registry := .Values.global.imageRegistry | default .Values.initChownData.image.registry -}} + {{- if .Values.initChownData.image.sha }} + image: "{{ $registry }}/{{ .Values.initChownData.image.repository }}:{{ .Values.initChownData.image.tag }}@sha256:{{ .Values.initChownData.image.sha }}" + {{- else }} + image: "{{ $registry }}/{{ .Values.initChownData.image.repository }}:{{ .Values.initChownData.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.initChownData.image.pullPolicy }} + {{- with .Values.initChownData.securityContext }} + securityContext: + {{- toYaml . | nindent 6 }} + {{- end }} + command: + - chown + - -R + - {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.runAsGroup }} + - /var/lib/grafana + {{- with .Values.initChownData.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} + volumeMounts: + - name: storage + mountPath: "/var/lib/grafana" + {{- with .Values.persistence.subPath }} + subPath: {{ tpl . $root }} + {{- end }} +{{- end }} +{{- if .Values.dashboards }} + - name: download-dashboards + {{- $registry := .Values.global.imageRegistry | default .Values.downloadDashboardsImage.registry -}} + {{- if .Values.downloadDashboardsImage.sha }} + image: "{{ $registry }}/{{ .Values.downloadDashboardsImage.repository }}:{{ .Values.downloadDashboardsImage.tag }}@sha256:{{ .Values.downloadDashboardsImage.sha }}" + {{- else }} + image: "{{ $registry }}/{{ .Values.downloadDashboardsImage.repository }}:{{ .Values.downloadDashboardsImage.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.downloadDashboardsImage.pullPolicy }} + command: ["/bin/sh"] + args: [ "-c", "mkdir -p /var/lib/grafana/dashboards/default && /bin/sh -x /etc/grafana/download_dashboards.sh" ] + {{- with .Values.downloadDashboards.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} + env: + {{- range $key, $value := .Values.downloadDashboards.env }} + - name: "{{ $key }}" + value: "{{ $value }}" + {{- end }} + {{- range $key, $value := .Values.downloadDashboards.envValueFrom }} + - name: {{ $key | quote }} + valueFrom: + {{- tpl (toYaml $value) $ | nindent 10 }} + {{- end }} + {{- with .Values.downloadDashboards.securityContext }} + securityContext: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.downloadDashboards.envFromSecret }} + envFrom: + - secretRef: + name: {{ tpl . $root }} + {{- end }} + volumeMounts: + - name: config + mountPath: "/etc/grafana/download_dashboards.sh" + subPath: download_dashboards.sh + - name: storage + mountPath: "/var/lib/grafana" + {{- with .Values.persistence.subPath }} + subPath: {{ tpl . $root }} + {{- end }} + {{- range .Values.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + readOnly: {{ .readOnly }} + {{- end }} +{{- end }} +{{- if and .Values.sidecar.alerts.enabled .Values.sidecar.alerts.initAlerts }} + - name: {{ include "grafana.name" . }}-init-sc-alerts + {{- $registry := .Values.global.imageRegistry | default .Values.sidecar.image.registry -}} + {{- if .Values.sidecar.image.sha }} + image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" + {{- else }} + image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} + env: + {{- range $key, $value := .Values.sidecar.alerts.env }} + - name: "{{ $key }}" + value: "{{ $value }}" + {{- end }} + {{- if .Values.sidecar.alerts.ignoreAlreadyProcessed }} + - name: IGNORE_ALREADY_PROCESSED + value: "true" + {{- end }} + - name: METHOD + value: "LIST" + - name: LABEL + value: "{{ .Values.sidecar.alerts.label }}" + {{- with .Values.sidecar.alerts.labelValue }} + - name: LABEL_VALUE + value: {{ quote . }} + {{- end }} + {{- if or .Values.sidecar.logLevel .Values.sidecar.alerts.logLevel }} + - name: LOG_LEVEL + value: {{ default .Values.sidecar.logLevel .Values.sidecar.alerts.logLevel }} + {{- end }} + - name: FOLDER + value: "/etc/grafana/provisioning/alerting" + - name: RESOURCE + value: {{ quote .Values.sidecar.alerts.resource }} + {{- with .Values.sidecar.enableUniqueFilenames }} + - name: UNIQUE_FILENAMES + value: "{{ . }}" + {{- end }} + {{- with .Values.sidecar.alerts.searchNamespace }} + - name: NAMESPACE + value: {{ . | join "," | quote }} + {{- end }} + {{- with .Values.sidecar.alerts.skipTlsVerify }} + - name: SKIP_TLS_VERIFY + value: {{ quote . }} + {{- end }} + {{- with .Values.sidecar.alerts.script }} + - name: SCRIPT + value: {{ quote . }} + {{- end }} + {{- with .Values.sidecar.livenessProbe }} + livenessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.readinessProbe }} + readinessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.securityContext }} + securityContext: + {{- toYaml . | nindent 6 }} + {{- end }} + volumeMounts: + - name: sc-alerts-volume + mountPath: "/etc/grafana/provisioning/alerting" + {{- with .Values.sidecar.alerts.extraMounts }} + {{- toYaml . | trim | nindent 6 }} + {{- end }} +{{- end }} +{{- if and .Values.sidecar.datasources.enabled .Values.sidecar.datasources.initDatasources }} + - name: {{ include "grafana.name" . }}-init-sc-datasources + {{- $registry := .Values.global.imageRegistry | default .Values.sidecar.image.registry -}} + {{- if .Values.sidecar.image.sha }} + image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" + {{- else }} + image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} + env: + {{- range $key, $value := .Values.sidecar.datasources.env }} + - name: "{{ $key }}" + value: "{{ $value }}" + {{- end }} + {{- if .Values.sidecar.datasources.ignoreAlreadyProcessed }} + - name: IGNORE_ALREADY_PROCESSED + value: "true" + {{- end }} + - name: METHOD + value: "LIST" + - name: LABEL + value: "{{ .Values.sidecar.datasources.label }}" + {{- with .Values.sidecar.datasources.labelValue }} + - name: LABEL_VALUE + value: {{ quote . }} + {{- end }} + {{- if or .Values.sidecar.logLevel .Values.sidecar.datasources.logLevel }} + - name: LOG_LEVEL + value: {{ default .Values.sidecar.logLevel .Values.sidecar.datasources.logLevel }} + {{- end }} + - name: FOLDER + value: "/etc/grafana/provisioning/datasources" + - name: RESOURCE + value: {{ quote .Values.sidecar.datasources.resource }} + {{- with .Values.sidecar.enableUniqueFilenames }} + - name: UNIQUE_FILENAMES + value: "{{ . }}" + {{- end }} + {{- if .Values.sidecar.datasources.searchNamespace }} + - name: NAMESPACE + value: "{{ tpl (.Values.sidecar.datasources.searchNamespace | join ",") . }}" + {{- end }} + {{- with .Values.sidecar.skipTlsVerify }} + - name: SKIP_TLS_VERIFY + value: "{{ . }}" + {{- end }} + {{- with .Values.sidecar.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.securityContext }} + securityContext: + {{- toYaml . | nindent 6 }} + {{- end }} + volumeMounts: + - name: sc-datasources-volume + mountPath: "/etc/grafana/provisioning/datasources" +{{- end }} +{{- if and .Values.sidecar.notifiers.enabled .Values.sidecar.notifiers.initNotifiers }} + - name: {{ include "grafana.name" . }}-init-sc-notifiers + {{- $registry := .Values.global.imageRegistry | default .Values.sidecar.image.registry -}} + {{- if .Values.sidecar.image.sha }} + image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" + {{- else }} + image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} + env: + {{- range $key, $value := .Values.sidecar.notifiers.env }} + - name: "{{ $key }}" + value: "{{ $value }}" + {{- end }} + {{- if .Values.sidecar.notifiers.ignoreAlreadyProcessed }} + - name: IGNORE_ALREADY_PROCESSED + value: "true" + {{- end }} + - name: METHOD + value: LIST + - name: LABEL + value: "{{ .Values.sidecar.notifiers.label }}" + {{- with .Values.sidecar.notifiers.labelValue }} + - name: LABEL_VALUE + value: {{ quote . }} + {{- end }} + {{- if or .Values.sidecar.logLevel .Values.sidecar.notifiers.logLevel }} + - name: LOG_LEVEL + value: {{ default .Values.sidecar.logLevel .Values.sidecar.notifiers.logLevel }} + {{- end }} + - name: FOLDER + value: "/etc/grafana/provisioning/notifiers" + - name: RESOURCE + value: {{ quote .Values.sidecar.notifiers.resource }} + {{- with .Values.sidecar.enableUniqueFilenames }} + - name: UNIQUE_FILENAMES + value: "{{ . }}" + {{- end }} + {{- with .Values.sidecar.notifiers.searchNamespace }} + - name: NAMESPACE + value: "{{ tpl (. | join ",") $root }}" + {{- end }} + {{- with .Values.sidecar.skipTlsVerify }} + - name: SKIP_TLS_VERIFY + value: "{{ . }}" + {{- end }} + {{- with .Values.sidecar.livenessProbe }} + livenessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.readinessProbe }} + readinessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.securityContext }} + securityContext: + {{- toYaml . | nindent 6 }} + {{- end }} + volumeMounts: + - name: sc-notifiers-volume + mountPath: "/etc/grafana/provisioning/notifiers" +{{- end}} +{{- with .Values.extraInitContainers }} + {{- tpl (toYaml .) $root | nindent 2 }} +{{- end }} +{{- if or .Values.image.pullSecrets .Values.global.imagePullSecrets }} +imagePullSecrets: + {{- include "grafana.imagePullSecrets" (dict "root" $root "imagePullSecrets" .Values.image.pullSecrets) | nindent 2 }} +{{- end }} +{{- if not .Values.enableKubeBackwardCompatibility }} +enableServiceLinks: {{ .Values.enableServiceLinks }} +{{- end }} +containers: +{{- if and .Values.sidecar.alerts.enabled (not .Values.sidecar.alerts.initAlerts) }} + - name: {{ include "grafana.name" . }}-sc-alerts + {{- $registry := .Values.global.imageRegistry | default .Values.sidecar.image.registry -}} + {{- if .Values.sidecar.image.sha }} + image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" + {{- else }} + image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} + env: + {{- range $key, $value := .Values.sidecar.alerts.env }} + - name: "{{ $key }}" + value: "{{ $value }}" + {{- end }} + {{- if .Values.sidecar.alerts.ignoreAlreadyProcessed }} + - name: IGNORE_ALREADY_PROCESSED + value: "true" + {{- end }} + - name: METHOD + value: {{ .Values.sidecar.alerts.watchMethod }} + - name: LABEL + value: "{{ .Values.sidecar.alerts.label }}" + {{- with .Values.sidecar.alerts.labelValue }} + - name: LABEL_VALUE + value: {{ quote . }} + {{- end }} + {{- if or .Values.sidecar.logLevel .Values.sidecar.alerts.logLevel }} + - name: LOG_LEVEL + value: {{ default .Values.sidecar.logLevel .Values.sidecar.alerts.logLevel }} + {{- end }} + - name: FOLDER + value: "/etc/grafana/provisioning/alerting" + - name: RESOURCE + value: {{ quote .Values.sidecar.alerts.resource }} + {{- with .Values.sidecar.enableUniqueFilenames }} + - name: UNIQUE_FILENAMES + value: "{{ . }}" + {{- end }} + {{- with .Values.sidecar.alerts.searchNamespace }} + - name: NAMESPACE + value: {{ . | join "," | quote }} + {{- end }} + {{- with .Values.sidecar.alerts.skipTlsVerify }} + - name: SKIP_TLS_VERIFY + value: {{ quote . }} + {{- end }} + {{- with .Values.sidecar.alerts.script }} + - name: SCRIPT + value: {{ quote . }} + {{- end }} + {{- if and (not .Values.env.GF_SECURITY_ADMIN_USER) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + - name: REQ_USERNAME + valueFrom: + secretKeyRef: + name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.userKey | default "admin-user" }} + {{- end }} + {{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + - name: REQ_PASSWORD + valueFrom: + secretKeyRef: + name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.passwordKey | default "admin-password" }} + {{- end }} + {{- if not .Values.sidecar.alerts.skipReload }} + - name: REQ_URL + value: {{ .Values.sidecar.alerts.reloadURL }} + - name: REQ_METHOD + value: POST + {{- end }} + {{- if .Values.sidecar.alerts.watchServerTimeout }} + {{- if ne .Values.sidecar.alerts.watchMethod "WATCH" }} + {{- fail (printf "Cannot use .Values.sidecar.alerts.watchServerTimeout with .Values.sidecar.alerts.watchMethod %s" .Values.sidecar.alerts.watchMethod) }} + {{- end }} + - name: WATCH_SERVER_TIMEOUT + value: "{{ .Values.sidecar.alerts.watchServerTimeout }}" + {{- end }} + {{- if .Values.sidecar.alerts.watchClientTimeout }} + {{- if ne .Values.sidecar.alerts.watchMethod "WATCH" }} + {{- fail (printf "Cannot use .Values.sidecar.alerts.watchClientTimeout with .Values.sidecar.alerts.watchMethod %s" .Values.sidecar.alerts.watchMethod) }} + {{- end }} + - name: WATCH_CLIENT_TIMEOUT + value: "{{ .Values.sidecar.alerts.watchClientTimeout }}" + {{- end }} + {{- with .Values.sidecar.livenessProbe }} + livenessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.readinessProbe }} + readinessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.securityContext }} + securityContext: + {{- toYaml . | nindent 6 }} + {{- end }} + volumeMounts: + - name: sc-alerts-volume + mountPath: "/etc/grafana/provisioning/alerting" + {{- with .Values.sidecar.alerts.extraMounts }} + {{- toYaml . | trim | nindent 6 }} + {{- end }} +{{- end}} +{{- if .Values.sidecar.dashboards.enabled }} + - name: {{ include "grafana.name" . }}-sc-dashboard + {{- $registry := .Values.global.imageRegistry | default .Values.sidecar.image.registry -}} + {{- if .Values.sidecar.image.sha }} + image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" + {{- else }} + image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} + env: + {{- range $key, $value := .Values.sidecar.dashboards.env }} + - name: "{{ $key }}" + value: "{{ $value }}" + {{- end }} + {{- if .Values.sidecar.dashboards.ignoreAlreadyProcessed }} + - name: IGNORE_ALREADY_PROCESSED + value: "true" + {{- end }} + - name: METHOD + value: {{ .Values.sidecar.dashboards.watchMethod }} + - name: LABEL + value: "{{ .Values.sidecar.dashboards.label }}" + {{- with .Values.sidecar.dashboards.labelValue }} + - name: LABEL_VALUE + value: {{ quote . }} + {{- end }} + {{- if or .Values.sidecar.logLevel .Values.sidecar.dashboards.logLevel }} + - name: LOG_LEVEL + value: {{ default .Values.sidecar.logLevel .Values.sidecar.dashboards.logLevel }} + {{- end }} + - name: FOLDER + value: "{{ .Values.sidecar.dashboards.folder }}{{- with .Values.sidecar.dashboards.defaultFolderName }}/{{ . }}{{- end }}" + - name: RESOURCE + value: {{ quote .Values.sidecar.dashboards.resource }} + {{- with .Values.sidecar.enableUniqueFilenames }} + - name: UNIQUE_FILENAMES + value: "{{ . }}" + {{- end }} + {{- with .Values.sidecar.dashboards.searchNamespace }} + - name: NAMESPACE + value: "{{ tpl (. | join ",") $root }}" + {{- end }} + {{- with .Values.sidecar.skipTlsVerify }} + - name: SKIP_TLS_VERIFY + value: "{{ . }}" + {{- end }} + {{- with .Values.sidecar.dashboards.folderAnnotation }} + - name: FOLDER_ANNOTATION + value: "{{ . }}" + {{- end }} + {{- with .Values.sidecar.dashboards.script }} + - name: SCRIPT + value: "{{ . }}" + {{- end }} + {{- if not .Values.sidecar.dashboards.skipReload }} + {{- if and (not .Values.env.GF_SECURITY_ADMIN_USER) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + - name: REQ_USERNAME + valueFrom: + secretKeyRef: + name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.userKey | default "admin-user" }} + {{- end }} + {{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + - name: REQ_PASSWORD + valueFrom: + secretKeyRef: + name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.passwordKey | default "admin-password" }} + {{- end }} + - name: REQ_URL + value: {{ .Values.sidecar.dashboards.reloadURL }} + - name: REQ_METHOD + value: POST + {{- end }} + {{- if .Values.sidecar.dashboards.watchServerTimeout }} + {{- if ne .Values.sidecar.dashboards.watchMethod "WATCH" }} + {{- fail (printf "Cannot use .Values.sidecar.dashboards.watchServerTimeout with .Values.sidecar.dashboards.watchMethod %s" .Values.sidecar.dashboards.watchMethod) }} + {{- end }} + - name: WATCH_SERVER_TIMEOUT + value: "{{ .Values.sidecar.dashboards.watchServerTimeout }}" + {{- end }} + {{- if .Values.sidecar.dashboards.watchClientTimeout }} + {{- if ne .Values.sidecar.dashboards.watchMethod "WATCH" }} + {{- fail (printf "Cannot use .Values.sidecar.dashboards.watchClientTimeout with .Values.sidecar.dashboards.watchMethod %s" .Values.sidecar.dashboards.watchMethod) }} + {{- end }} + - name: WATCH_CLIENT_TIMEOUT + value: {{ .Values.sidecar.dashboards.watchClientTimeout | quote }} + {{- end }} + {{- with .Values.sidecar.livenessProbe }} + livenessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.readinessProbe }} + readinessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.securityContext }} + securityContext: + {{- toYaml . | nindent 6 }} + {{- end }} + volumeMounts: + - name: sc-dashboard-volume + mountPath: {{ .Values.sidecar.dashboards.folder | quote }} + {{- with .Values.sidecar.dashboards.extraMounts }} + {{- toYaml . | trim | nindent 6 }} + {{- end }} +{{- end}} +{{- if and .Values.sidecar.datasources.enabled (not .Values.sidecar.datasources.initDatasources) }} + - name: {{ include "grafana.name" . }}-sc-datasources + {{- $registry := .Values.global.imageRegistry | default .Values.sidecar.image.registry -}} + {{- if .Values.sidecar.image.sha }} + image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" + {{- else }} + image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} + env: + {{- range $key, $value := .Values.sidecar.datasources.env }} + - name: "{{ $key }}" + value: "{{ $value }}" + {{- end }} + {{- if .Values.sidecar.datasources.ignoreAlreadyProcessed }} + - name: IGNORE_ALREADY_PROCESSED + value: "true" + {{- end }} + - name: METHOD + value: {{ .Values.sidecar.datasources.watchMethod }} + - name: LABEL + value: "{{ .Values.sidecar.datasources.label }}" + {{- with .Values.sidecar.datasources.labelValue }} + - name: LABEL_VALUE + value: {{ quote . }} + {{- end }} + {{- if or .Values.sidecar.logLevel .Values.sidecar.datasources.logLevel }} + - name: LOG_LEVEL + value: {{ default .Values.sidecar.logLevel .Values.sidecar.datasources.logLevel }} + {{- end }} + - name: FOLDER + value: "/etc/grafana/provisioning/datasources" + - name: RESOURCE + value: {{ quote .Values.sidecar.datasources.resource }} + {{- with .Values.sidecar.enableUniqueFilenames }} + - name: UNIQUE_FILENAMES + value: "{{ . }}" + {{- end }} + {{- with .Values.sidecar.datasources.searchNamespace }} + - name: NAMESPACE + value: "{{ tpl (. | join ",") $root }}" + {{- end }} + {{- if .Values.sidecar.skipTlsVerify }} + - name: SKIP_TLS_VERIFY + value: "{{ .Values.sidecar.skipTlsVerify }}" + {{- end }} + {{- if .Values.sidecar.datasources.script }} + - name: SCRIPT + value: "{{ .Values.sidecar.datasources.script }}" + {{- end }} + {{- if and (not .Values.env.GF_SECURITY_ADMIN_USER) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + - name: REQ_USERNAME + valueFrom: + secretKeyRef: + name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.userKey | default "admin-user" }} + {{- end }} + {{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + - name: REQ_PASSWORD + valueFrom: + secretKeyRef: + name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.passwordKey | default "admin-password" }} + {{- end }} + {{- if not .Values.sidecar.datasources.skipReload }} + - name: REQ_URL + value: {{ .Values.sidecar.datasources.reloadURL }} + - name: REQ_METHOD + value: POST + {{- end }} + {{- if .Values.sidecar.datasources.watchServerTimeout }} + {{- if ne .Values.sidecar.datasources.watchMethod "WATCH" }} + {{- fail (printf "Cannot use .Values.sidecar.datasources.watchServerTimeout with .Values.sidecar.datasources.watchMethod %s" .Values.sidecar.datasources.watchMethod) }} + {{- end }} + - name: WATCH_SERVER_TIMEOUT + value: "{{ .Values.sidecar.datasources.watchServerTimeout }}" + {{- end }} + {{- if .Values.sidecar.datasources.watchClientTimeout }} + {{- if ne .Values.sidecar.datasources.watchMethod "WATCH" }} + {{- fail (printf "Cannot use .Values.sidecar.datasources.watchClientTimeout with .Values.sidecar.datasources.watchMethod %s" .Values.sidecar.datasources.watchMethod) }} + {{- end }} + - name: WATCH_CLIENT_TIMEOUT + value: "{{ .Values.sidecar.datasources.watchClientTimeout }}" + {{- end }} + {{- with .Values.sidecar.livenessProbe }} + livenessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.readinessProbe }} + readinessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.securityContext }} + securityContext: + {{- toYaml . | nindent 6 }} + {{- end }} + volumeMounts: + - name: sc-datasources-volume + mountPath: "/etc/grafana/provisioning/datasources" +{{- end}} +{{- if .Values.sidecar.notifiers.enabled }} + - name: {{ include "grafana.name" . }}-sc-notifiers + {{- $registry := .Values.global.imageRegistry | default .Values.sidecar.image.registry -}} + {{- if .Values.sidecar.image.sha }} + image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" + {{- else }} + image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} + env: + {{- range $key, $value := .Values.sidecar.notifiers.env }} + - name: "{{ $key }}" + value: "{{ $value }}" + {{- end }} + {{- if .Values.sidecar.notifiers.ignoreAlreadyProcessed }} + - name: IGNORE_ALREADY_PROCESSED + value: "true" + {{- end }} + - name: METHOD + value: {{ .Values.sidecar.notifiers.watchMethod }} + - name: LABEL + value: "{{ .Values.sidecar.notifiers.label }}" + {{- with .Values.sidecar.notifiers.labelValue }} + - name: LABEL_VALUE + value: {{ quote . }} + {{- end }} + {{- if or .Values.sidecar.logLevel .Values.sidecar.notifiers.logLevel }} + - name: LOG_LEVEL + value: {{ default .Values.sidecar.logLevel .Values.sidecar.notifiers.logLevel }} + {{- end }} + - name: FOLDER + value: "/etc/grafana/provisioning/notifiers" + - name: RESOURCE + value: {{ quote .Values.sidecar.notifiers.resource }} + {{- if .Values.sidecar.enableUniqueFilenames }} + - name: UNIQUE_FILENAMES + value: "{{ .Values.sidecar.enableUniqueFilenames }}" + {{- end }} + {{- with .Values.sidecar.notifiers.searchNamespace }} + - name: NAMESPACE + value: "{{ tpl (. | join ",") $root }}" + {{- end }} + {{- with .Values.sidecar.skipTlsVerify }} + - name: SKIP_TLS_VERIFY + value: "{{ . }}" + {{- end }} + {{- if .Values.sidecar.notifiers.script }} + - name: SCRIPT + value: "{{ .Values.sidecar.notifiers.script }}" + {{- end }} + {{- if and (not .Values.env.GF_SECURITY_ADMIN_USER) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + - name: REQ_USERNAME + valueFrom: + secretKeyRef: + name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.userKey | default "admin-user" }} + {{- end }} + {{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + - name: REQ_PASSWORD + valueFrom: + secretKeyRef: + name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.passwordKey | default "admin-password" }} + {{- end }} + {{- if not .Values.sidecar.notifiers.skipReload }} + - name: REQ_URL + value: {{ .Values.sidecar.notifiers.reloadURL }} + - name: REQ_METHOD + value: POST + {{- end }} + {{- if .Values.sidecar.notifiers.watchServerTimeout }} + {{- if ne .Values.sidecar.notifiers.watchMethod "WATCH" }} + {{- fail (printf "Cannot use .Values.sidecar.notifiers.watchServerTimeout with .Values.sidecar.notifiers.watchMethod %s" .Values.sidecar.notifiers.watchMethod) }} + {{- end }} + - name: WATCH_SERVER_TIMEOUT + value: "{{ .Values.sidecar.notifiers.watchServerTimeout }}" + {{- end }} + {{- if .Values.sidecar.notifiers.watchClientTimeout }} + {{- if ne .Values.sidecar.notifiers.watchMethod "WATCH" }} + {{- fail (printf "Cannot use .Values.sidecar.notifiers.watchClientTimeout with .Values.sidecar.notifiers.watchMethod %s" .Values.sidecar.notifiers.watchMethod) }} + {{- end }} + - name: WATCH_CLIENT_TIMEOUT + value: "{{ .Values.sidecar.notifiers.watchClientTimeout }}" + {{- end }} + {{- with .Values.sidecar.livenessProbe }} + livenessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.readinessProbe }} + readinessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.securityContext }} + securityContext: + {{- toYaml . | nindent 6 }} + {{- end }} + volumeMounts: + - name: sc-notifiers-volume + mountPath: "/etc/grafana/provisioning/notifiers" +{{- end}} +{{- if .Values.sidecar.plugins.enabled }} + - name: {{ include "grafana.name" . }}-sc-plugins + {{- $registry := .Values.global.imageRegistry | default .Values.sidecar.image.registry -}} + {{- if .Values.sidecar.image.sha }} + image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" + {{- else }} + image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} + env: + {{- range $key, $value := .Values.sidecar.plugins.env }} + - name: "{{ $key }}" + value: "{{ $value }}" + {{- end }} + {{- if .Values.sidecar.plugins.ignoreAlreadyProcessed }} + - name: IGNORE_ALREADY_PROCESSED + value: "true" + {{- end }} + - name: METHOD + value: {{ .Values.sidecar.plugins.watchMethod }} + - name: LABEL + value: "{{ .Values.sidecar.plugins.label }}" + {{- if .Values.sidecar.plugins.labelValue }} + - name: LABEL_VALUE + value: {{ quote .Values.sidecar.plugins.labelValue }} + {{- end }} + {{- if or .Values.sidecar.logLevel .Values.sidecar.plugins.logLevel }} + - name: LOG_LEVEL + value: {{ default .Values.sidecar.logLevel .Values.sidecar.plugins.logLevel }} + {{- end }} + - name: FOLDER + value: "/etc/grafana/provisioning/plugins" + - name: RESOURCE + value: {{ quote .Values.sidecar.plugins.resource }} + {{- with .Values.sidecar.enableUniqueFilenames }} + - name: UNIQUE_FILENAMES + value: "{{ . }}" + {{- end }} + {{- with .Values.sidecar.plugins.searchNamespace }} + - name: NAMESPACE + value: "{{ tpl (. | join ",") $root }}" + {{- end }} + {{- with .Values.sidecar.plugins.script }} + - name: SCRIPT + value: "{{ . }}" + {{- end }} + {{- with .Values.sidecar.skipTlsVerify }} + - name: SKIP_TLS_VERIFY + value: "{{ . }}" + {{- end }} + {{- if and (not .Values.env.GF_SECURITY_ADMIN_USER) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + - name: REQ_USERNAME + valueFrom: + secretKeyRef: + name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.userKey | default "admin-user" }} + {{- end }} + {{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + - name: REQ_PASSWORD + valueFrom: + secretKeyRef: + name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.passwordKey | default "admin-password" }} + {{- end }} + {{- if not .Values.sidecar.plugins.skipReload }} + - name: REQ_URL + value: {{ .Values.sidecar.plugins.reloadURL }} + - name: REQ_METHOD + value: POST + {{- end }} + {{- if .Values.sidecar.plugins.watchServerTimeout }} + {{- if ne .Values.sidecar.plugins.watchMethod "WATCH" }} + {{- fail (printf "Cannot use .Values.sidecar.plugins.watchServerTimeout with .Values.sidecar.plugins.watchMethod %s" .Values.sidecar.plugins.watchMethod) }} + {{- end }} + - name: WATCH_SERVER_TIMEOUT + value: "{{ .Values.sidecar.plugins.watchServerTimeout }}" + {{- end }} + {{- if .Values.sidecar.plugins.watchClientTimeout }} + {{- if ne .Values.sidecar.plugins.watchMethod "WATCH" }} + {{- fail (printf "Cannot use .Values.sidecar.plugins.watchClientTimeout with .Values.sidecar.plugins.watchMethod %s" .Values.sidecar.plugins.watchMethod) }} + {{- end }} + - name: WATCH_CLIENT_TIMEOUT + value: "{{ .Values.sidecar.plugins.watchClientTimeout }}" + {{- end }} + {{- with .Values.sidecar.livenessProbe }} + livenessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.readinessProbe }} + readinessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.securityContext }} + securityContext: + {{- toYaml . | nindent 6 }} + {{- end }} + volumeMounts: + - name: sc-plugins-volume + mountPath: "/etc/grafana/provisioning/plugins" +{{- end}} + - name: {{ .Chart.Name }} + {{- $registry := .Values.global.imageRegistry | default .Values.image.registry -}} + {{- if .Values.image.sha }} + image: "{{ $registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}@sha256:{{ .Values.image.sha }}" + {{- else }} + image: "{{ $registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + {{- end }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- if .Values.command }} + command: + {{- range .Values.command }} + - {{ . | quote }} + {{- end }} + {{- end }} + {{- if .Values.args }} + args: + {{- range .Values.args }} + - {{ . | quote }} + {{- end }} + {{- end }} + {{- with .Values.containerSecurityContext }} + securityContext: + {{- toYaml . | nindent 6 }} + {{- end }} + volumeMounts: + - name: config + mountPath: "/etc/grafana/grafana.ini" + subPath: grafana.ini + {{- if .Values.ldap.enabled }} + - name: ldap + mountPath: "/etc/grafana/ldap.toml" + subPath: ldap.toml + {{- end }} + {{- range .Values.extraConfigmapMounts }} + - name: {{ tpl .name $root }} + mountPath: {{ tpl .mountPath $root }} + subPath: {{ tpl (.subPath | default "") $root }} + readOnly: {{ .readOnly }} + {{- end }} + - name: storage + mountPath: "/var/lib/grafana" + {{- with .Values.persistence.subPath }} + subPath: {{ tpl . $root }} + {{- end }} + {{- with .Values.dashboards }} + {{- range $provider, $dashboards := . }} + {{- range $key, $value := $dashboards }} + {{- if (or (hasKey $value "json") (hasKey $value "file")) }} + - name: dashboards-{{ $provider }} + mountPath: "/var/lib/grafana/dashboards/{{ $provider }}/{{ $key }}.json" + subPath: "{{ $key }}.json" + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- with .Values.dashboardsConfigMaps }} + {{- range (keys . | sortAlpha) }} + - name: dashboards-{{ . }} + mountPath: "/var/lib/grafana/dashboards/{{ . }}" + {{- end }} + {{- end }} + {{- with .Values.datasources }} + {{- $datasources := . }} + {{- range (keys . | sortAlpha) }} + {{- if (or (hasKey (index $datasources .) "secret")) }} {{/*check if current datasource should be handeled as secret */}} + - name: config-secret + mountPath: "/etc/grafana/provisioning/datasources/{{ . }}" + subPath: {{ . | quote }} + {{- else }} + - name: config + mountPath: "/etc/grafana/provisioning/datasources/{{ . }}" + subPath: {{ . | quote }} + {{- end }} + {{- end }} + {{- end }} + {{- with .Values.notifiers }} + {{- $notifiers := . }} + {{- range (keys . | sortAlpha) }} + {{- if (or (hasKey (index $notifiers .) "secret")) }} {{/*check if current notifier should be handeled as secret */}} + - name: config-secret + mountPath: "/etc/grafana/provisioning/notifiers/{{ . }}" + subPath: {{ . | quote }} + {{- else }} + - name: config + mountPath: "/etc/grafana/provisioning/notifiers/{{ . }}" + subPath: {{ . | quote }} + {{- end }} + {{- end }} + {{- end }} + {{- with .Values.alerting }} + {{- $alertingmap := .}} + {{- range (keys . | sortAlpha) }} + {{- if (or (hasKey (index $.Values.alerting .) "secret") (hasKey (index $.Values.alerting .) "secretFile")) }} {{/*check if current alerting entry should be handeled as secret */}} + - name: config-secret + mountPath: "/etc/grafana/provisioning/alerting/{{ . }}" + subPath: {{ . | quote }} + {{- else }} + - name: config + mountPath: "/etc/grafana/provisioning/alerting/{{ . }}" + subPath: {{ . | quote }} + {{- end }} + {{- end }} + {{- end }} + {{- with .Values.dashboardProviders }} + {{- range (keys . | sortAlpha) }} + - name: config + mountPath: "/etc/grafana/provisioning/dashboards/{{ . }}" + subPath: {{ . | quote }} + {{- end }} + {{- end }} + {{- with .Values.sidecar.alerts.enabled }} + - name: sc-alerts-volume + mountPath: "/etc/grafana/provisioning/alerting" + {{- end}} + {{- if .Values.sidecar.dashboards.enabled }} + - name: sc-dashboard-volume + mountPath: {{ .Values.sidecar.dashboards.folder | quote }} + {{- if .Values.sidecar.dashboards.SCProvider }} + - name: sc-dashboard-provider + mountPath: "/etc/grafana/provisioning/dashboards/sc-dashboardproviders.yaml" + subPath: provider.yaml + {{- end}} + {{- end}} + {{- if .Values.sidecar.datasources.enabled }} + - name: sc-datasources-volume + mountPath: "/etc/grafana/provisioning/datasources" + {{- end}} + {{- if .Values.sidecar.plugins.enabled }} + - name: sc-plugins-volume + mountPath: "/etc/grafana/provisioning/plugins" + {{- end}} + {{- if .Values.sidecar.notifiers.enabled }} + - name: sc-notifiers-volume + mountPath: "/etc/grafana/provisioning/notifiers" + {{- end}} + {{- range .Values.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + readOnly: {{ .readOnly }} + subPath: {{ .subPath | default "" }} + {{- end }} + {{- range .Values.extraVolumeMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath | default "" }} + readOnly: {{ .readOnly }} + {{- end }} + {{- range .Values.extraEmptyDirMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + {{- end }} + ports: + - name: {{ .Values.podPortName }} + containerPort: {{ .Values.service.targetPort }} + protocol: TCP + - name: {{ .Values.gossipPortName }}-tcp + containerPort: 9094 + protocol: TCP + - name: {{ .Values.gossipPortName }}-udp + containerPort: 9094 + protocol: UDP + env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + {{- if and (not .Values.env.GF_SECURITY_ADMIN_USER) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + - name: GF_SECURITY_ADMIN_USER + valueFrom: + secretKeyRef: + name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.userKey | default "admin-user" }} + {{- end }} + {{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + - name: GF_SECURITY_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.passwordKey | default "admin-password" }} + {{- end }} + {{- if .Values.plugins }} + - name: GF_INSTALL_PLUGINS + valueFrom: + configMapKeyRef: + name: {{ include "grafana.fullname" . }} + key: plugins + {{- end }} + {{- if .Values.smtp.existingSecret }} + - name: GF_SMTP_USER + valueFrom: + secretKeyRef: + name: {{ .Values.smtp.existingSecret }} + key: {{ .Values.smtp.userKey | default "user" }} + - name: GF_SMTP_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .Values.smtp.existingSecret }} + key: {{ .Values.smtp.passwordKey | default "password" }} + {{- end }} + {{- if .Values.imageRenderer.enabled }} + - name: GF_RENDERING_SERVER_URL + value: http://{{ include "grafana.fullname" . }}-image-renderer.{{ include "grafana.namespace" . }}:{{ .Values.imageRenderer.service.port }}/render + - name: GF_RENDERING_CALLBACK_URL + value: {{ .Values.imageRenderer.grafanaProtocol }}://{{ include "grafana.fullname" . }}.{{ include "grafana.namespace" . }}:{{ .Values.service.port }}/{{ .Values.imageRenderer.grafanaSubPath }} + {{- end }} + - name: GF_PATHS_DATA + value: {{ (get .Values "grafana.ini").paths.data }} + - name: GF_PATHS_LOGS + value: {{ (get .Values "grafana.ini").paths.logs }} + - name: GF_PATHS_PLUGINS + value: {{ (get .Values "grafana.ini").paths.plugins }} + - name: GF_PATHS_PROVISIONING + value: {{ (get .Values "grafana.ini").paths.provisioning }} + {{- range $key, $value := .Values.envValueFrom }} + - name: {{ $key | quote }} + valueFrom: + {{- tpl (toYaml $value) $ | nindent 10 }} + {{- end }} + {{- range $key, $value := .Values.env }} + - name: "{{ tpl $key $ }}" + value: "{{ tpl (print $value) $ }}" + {{- end }} + {{- if or .Values.envFromSecret (or .Values.envRenderSecret .Values.envFromSecrets) .Values.envFromConfigMaps }} + envFrom: + {{- if .Values.envFromSecret }} + - secretRef: + name: {{ tpl .Values.envFromSecret . }} + {{- end }} + {{- if .Values.envRenderSecret }} + - secretRef: + name: {{ include "grafana.fullname" . }}-env + {{- end }} + {{- range .Values.envFromSecrets }} + - secretRef: + name: {{ tpl .name $ }} + optional: {{ .optional | default false }} + {{- end }} + {{- range .Values.envFromConfigMaps }} + - configMapRef: + name: {{ tpl .name $ }} + optional: {{ .optional | default false }} + {{- end }} + {{- end }} + {{- with .Values.livenessProbe }} + livenessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.readinessProbe }} + readinessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.lifecycleHooks }} + lifecycle: + {{- tpl (toYaml .) $root | nindent 6 }} + {{- end }} + {{- with .Values.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} +{{- with .Values.extraContainers }} + {{- tpl . $ | nindent 2 }} +{{- end }} +{{- with .Values.nodeSelector }} +nodeSelector: + {{- toYaml . | nindent 2 }} +{{- end }} +{{- with .Values.affinity }} +affinity: + {{- tpl (toYaml .) $root | nindent 2 }} +{{- end }} +{{- with .Values.topologySpreadConstraints }} +topologySpreadConstraints: + {{- toYaml . | nindent 2 }} +{{- end }} +{{- with .Values.tolerations }} +tolerations: + {{- toYaml . | nindent 2 }} +{{- end }} +volumes: + - name: config + configMap: + name: {{ include "grafana.fullname" . }} + {{- $createConfigSecret := eq (include "grafana.shouldCreateConfigSecret" .) "true" -}} + {{- if and .Values.createConfigmap $createConfigSecret }} + - name: config-secret + secret: + secretName: {{ include "grafana.fullname" . }}-config-secret + {{- end }} + {{- range .Values.extraConfigmapMounts }} + - name: {{ tpl .name $root }} + configMap: + name: {{ tpl .configMap $root }} + {{- with .items }} + items: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- end }} + {{- if .Values.dashboards }} + {{- range (keys .Values.dashboards | sortAlpha) }} + - name: dashboards-{{ . }} + configMap: + name: {{ include "grafana.fullname" $ }}-dashboards-{{ . }} + {{- end }} + {{- end }} + {{- if .Values.dashboardsConfigMaps }} + {{- range $provider, $name := .Values.dashboardsConfigMaps }} + - name: dashboards-{{ $provider }} + configMap: + name: {{ tpl $name $root }} + {{- end }} + {{- end }} + {{- if .Values.ldap.enabled }} + - name: ldap + secret: + {{- if .Values.ldap.existingSecret }} + secretName: {{ .Values.ldap.existingSecret }} + {{- else }} + secretName: {{ include "grafana.fullname" . }} + {{- end }} + items: + - key: ldap-toml + path: ldap.toml + {{- end }} + {{- if and .Values.persistence.enabled (eq .Values.persistence.type "pvc") }} + - name: storage + persistentVolumeClaim: + claimName: {{ tpl (.Values.persistence.existingClaim | default (include "grafana.fullname" .)) . }} + {{- else if and .Values.persistence.enabled (has .Values.persistence.type $sts) }} + {{/* nothing */}} + {{- else }} + - name: storage + {{- if .Values.persistence.inMemory.enabled }} + emptyDir: + medium: Memory + {{- with .Values.persistence.inMemory.sizeLimit }} + sizeLimit: {{ . }} + {{- end }} + {{- else }} + emptyDir: {} + {{- end }} + {{- end }} + {{- if .Values.sidecar.alerts.enabled }} + - name: sc-alerts-volume + emptyDir: + {{- with .Values.sidecar.alerts.sizeLimit }} + sizeLimit: {{ . }} + {{- else }} + {} + {{- end }} + {{- end }} + {{- if .Values.sidecar.dashboards.enabled }} + - name: sc-dashboard-volume + emptyDir: + {{- with .Values.sidecar.dashboards.sizeLimit }} + sizeLimit: {{ . }} + {{- else }} + {} + {{- end }} + {{- if .Values.sidecar.dashboards.SCProvider }} + - name: sc-dashboard-provider + configMap: + name: {{ include "grafana.fullname" . }}-config-dashboards + {{- end }} + {{- end }} + {{- if .Values.sidecar.datasources.enabled }} + - name: sc-datasources-volume + emptyDir: + {{- with .Values.sidecar.datasources.sizeLimit }} + sizeLimit: {{ . }} + {{- else }} + {} + {{- end }} + {{- end }} + {{- if .Values.sidecar.plugins.enabled }} + - name: sc-plugins-volume + emptyDir: + {{- with .Values.sidecar.plugins.sizeLimit }} + sizeLimit: {{ . }} + {{- else }} + {} + {{- end }} + {{- end }} + {{- if .Values.sidecar.notifiers.enabled }} + - name: sc-notifiers-volume + emptyDir: + {{- with .Values.sidecar.notifiers.sizeLimit }} + sizeLimit: {{ . }} + {{- else }} + {} + {{- end }} + {{- end }} + {{- range .Values.extraSecretMounts }} + {{- if .secretName }} + - name: {{ .name }} + secret: + secretName: {{ .secretName }} + defaultMode: {{ .defaultMode }} + {{- with .items }} + items: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- else if .projected }} + - name: {{ .name }} + projected: + {{- toYaml .projected | nindent 6 }} + {{- else if .csi }} + - name: {{ .name }} + csi: + {{- toYaml .csi | nindent 6 }} + {{- end }} + {{- end }} + {{- range .Values.extraVolumes }} + - name: {{ .name }} + {{- if .existingClaim }} + persistentVolumeClaim: + claimName: {{ .existingClaim }} + {{- else if .hostPath }} + hostPath: + {{ toYaml .hostPath | nindent 6 }} + {{- else if .csi }} + csi: + {{- toYaml .data | nindent 6 }} + {{- else if .configMap }} + configMap: + {{- toYaml .configMap | nindent 6 }} + {{- else }} + emptyDir: {} + {{- end }} + {{- end }} + {{- range .Values.extraEmptyDirMounts }} + - name: {{ .name }} + emptyDir: {} + {{- end }} + {{- with .Values.extraContainerVolumes }} + {{- tpl (toYaml .) $root | nindent 2 }} + {{- end }} +{{- end }} + diff --git a/operations/deployment/helm/grafana/templates/clusterrole.yaml b/operations/deployment/helm/grafana/templates/clusterrole.yaml new file mode 100644 index 0000000..3af4b62 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/clusterrole.yaml @@ -0,0 +1,25 @@ +{{- if and .Values.rbac.create (or (not .Values.rbac.namespaced) .Values.rbac.extraClusterRoleRules) (not .Values.rbac.useExistingClusterRole) }} +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "grafana.fullname" . }}-clusterrole +{{- if or .Values.sidecar.dashboards.enabled .Values.rbac.extraClusterRoleRules .Values.sidecar.datasources.enabled .Values.sidecar.plugins.enabled .Values.sidecar.alerts.enabled }} +rules: + {{- if or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled .Values.sidecar.plugins.enabled .Values.sidecar.alerts.enabled }} + - apiGroups: [""] # "" indicates the core API group + resources: ["configmaps", "secrets"] + verbs: ["get", "watch", "list"] + {{- end}} + {{- with .Values.rbac.extraClusterRoleRules }} + {{- toYaml . | nindent 2 }} + {{- end}} +{{- else }} +rules: [] +{{- end}} +{{- end}} diff --git a/operations/deployment/helm/grafana/templates/clusterrolebinding.yaml b/operations/deployment/helm/grafana/templates/clusterrolebinding.yaml new file mode 100644 index 0000000..bda9431 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/clusterrolebinding.yaml @@ -0,0 +1,24 @@ +{{- if and .Values.rbac.create (or (not .Values.rbac.namespaced) .Values.rbac.extraClusterRoleRules) }} +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ include "grafana.fullname" . }}-clusterrolebinding + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +subjects: + - kind: ServiceAccount + name: {{ include "grafana.serviceAccountName" . }} + namespace: {{ include "grafana.namespace" . }} +roleRef: + kind: ClusterRole + {{- if .Values.rbac.useExistingClusterRole }} + name: {{ .Values.rbac.useExistingClusterRole }} + {{- else }} + name: {{ include "grafana.fullname" . }}-clusterrole + {{- end }} + apiGroup: rbac.authorization.k8s.io +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/configSecret.yaml b/operations/deployment/helm/grafana/templates/configSecret.yaml new file mode 100644 index 0000000..f8937cc --- /dev/null +++ b/operations/deployment/helm/grafana/templates/configSecret.yaml @@ -0,0 +1,43 @@ +{{- $createConfigSecret := eq (include "grafana.shouldCreateConfigSecret" .) "true" -}} +{{- if and .Values.createConfigmap $createConfigSecret }} +{{- $files := .Files }} +{{- $root := . -}} +apiVersion: v1 +kind: Secret +metadata: + name: "{{ include "grafana.fullname" . }}-config-secret" + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +data: +{{- range $key, $value := .Values.alerting }} + {{- if (hasKey $value "secretFile") }} + {{- $key | nindent 2 }}: + {{- toYaml ( $files.Get $value.secretFile ) | b64enc | nindent 4}} + {{/* as of https://helm.sh/docs/chart_template_guide/accessing_files/ this will only work if you fork this chart and add files to it*/}} + {{- end }} +{{- end }} +stringData: +{{- range $key, $value := .Values.datasources }} +{{- if (hasKey $value "secret") }} +{{- $key | nindent 2 }}: | + {{- tpl (toYaml $value | nindent 4) $root }} +{{- end }} +{{- end }} +{{- range $key, $value := .Values.notifiers }} +{{- if (hasKey $value "secret") }} +{{- $key | nindent 2 }}: | + {{- tpl (toYaml $value | nindent 4) $root }} +{{- end }} +{{- end }} +{{- range $key, $value := .Values.alerting }} +{{ if (hasKey $value "secret") }} + {{- $key | nindent 2 }}: | + {{- tpl (toYaml $value.secret | nindent 4) $root }} + {{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/operations/deployment/helm/grafana/templates/configmap-dashboard-provider.yaml b/operations/deployment/helm/grafana/templates/configmap-dashboard-provider.yaml new file mode 100644 index 0000000..1f706a8 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/configmap-dashboard-provider.yaml @@ -0,0 +1,29 @@ +{{- if and .Values.sidecar.dashboards.enabled .Values.sidecar.dashboards.SCProvider }} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "grafana.fullname" . }}-config-dashboards + namespace: {{ include "grafana.namespace" . }} +data: + provider.yaml: |- + apiVersion: 1 + providers: + - name: '{{ .Values.sidecar.dashboards.provider.name }}' + orgId: {{ .Values.sidecar.dashboards.provider.orgid }} + {{- if not .Values.sidecar.dashboards.provider.foldersFromFilesStructure }} + folder: '{{ .Values.sidecar.dashboards.provider.folder }}' + {{- end }} + type: {{ .Values.sidecar.dashboards.provider.type }} + disableDeletion: {{ .Values.sidecar.dashboards.provider.disableDelete }} + allowUiUpdates: {{ .Values.sidecar.dashboards.provider.allowUiUpdates }} + updateIntervalSeconds: {{ .Values.sidecar.dashboards.provider.updateIntervalSeconds | default 30 }} + options: + foldersFromFilesStructure: {{ .Values.sidecar.dashboards.provider.foldersFromFilesStructure }} + path: {{ .Values.sidecar.dashboards.folder }}{{- with .Values.sidecar.dashboards.defaultFolderName }}/{{ . }}{{- end }} +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/configmap.yaml b/operations/deployment/helm/grafana/templates/configmap.yaml new file mode 100644 index 0000000..7b837d9 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/configmap.yaml @@ -0,0 +1,144 @@ +{{- if .Values.createConfigmap }} +{{- $files := .Files }} +{{- $root := . -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "grafana.fullname" . }} + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +data: + {{- with .Values.plugins }} + plugins: {{ join "," . }} + {{- end }} + grafana.ini: | + {{- range $elem, $elemVal := index .Values "grafana.ini" }} + {{- if not (kindIs "map" $elemVal) }} + {{- if kindIs "invalid" $elemVal }} + {{ $elem }} = + {{- else if kindIs "string" $elemVal }} + {{ $elem }} = {{ tpl $elemVal $ }} + {{- else }} + {{ $elem }} = {{ $elemVal }} + {{- end }} + {{- end }} + {{- end }} + {{- range $key, $value := index .Values "grafana.ini" }} + {{- if kindIs "map" $value }} + [{{ $key }}] + {{- range $elem, $elemVal := $value }} + {{- if kindIs "invalid" $elemVal }} + {{ $elem }} = + {{- else if kindIs "string" $elemVal }} + {{ $elem }} = {{ tpl $elemVal $ }} + {{- else }} + {{ $elem }} = {{ $elemVal }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + + {{- range $key, $value := .Values.datasources }} + {{- if not (hasKey $value "secret") }} + {{- $key | nindent 2 }}: | + {{- tpl (toYaml $value | nindent 4) $root }} + {{- end }} + {{- end }} + + {{- range $key, $value := .Values.notifiers }} + {{- if not (hasKey $value "secret") }} + {{- $key | nindent 2 }}: | + {{- toYaml $value | nindent 4 }} + {{- end }} + {{- end }} + + {{- range $key, $value := .Values.alerting }} + {{- if (hasKey $value "file") }} + {{- $key | nindent 2 }}: + {{- toYaml ( $files.Get $value.file ) | nindent 4}} + {{- else if (or (hasKey $value "secret") (hasKey $value "secretFile"))}} + {{/* will be stored inside secret generated by "configSecret.yaml"*/}} + {{- else }} + {{- $key | nindent 2 }}: | + {{- tpl (toYaml $value | nindent 4) $root }} + {{- end }} + {{- end }} + + {{- range $key, $value := .Values.dashboardProviders }} + {{- $key | nindent 2 }}: | + {{- toYaml $value | nindent 4 }} + {{- end }} + +{{- if .Values.dashboards }} + download_dashboards.sh: | + #!/usr/bin/env sh + set -euf + {{- if .Values.dashboardProviders }} + {{- range $key, $value := .Values.dashboardProviders }} + {{- range $value.providers }} + mkdir -p {{ .options.path }} + {{- end }} + {{- end }} + {{- end }} + {{ $dashboardProviders := .Values.dashboardProviders }} + {{- range $provider, $dashboards := .Values.dashboards }} + {{- range $key, $value := $dashboards }} + {{- if (or (hasKey $value "gnetId") (hasKey $value "url")) }} + curl -skf \ + --connect-timeout 60 \ + --max-time 60 \ + {{- if not $value.b64content }} + {{- if not $value.acceptHeader }} + -H "Accept: application/json" \ + {{- else }} + -H "Accept: {{ $value.acceptHeader }}" \ + {{- end }} + {{- if $value.token }} + -H "Authorization: token {{ $value.token }}" \ + {{- end }} + {{- if $value.bearerToken }} + -H "Authorization: Bearer {{ $value.bearerToken }}" \ + {{- end }} + {{- if $value.basic }} + -H "Authorization: Basic {{ $value.basic }}" \ + {{- end }} + {{- if $value.gitlabToken }} + -H "PRIVATE-TOKEN: {{ $value.gitlabToken }}" \ + {{- end }} + -H "Content-Type: application/json;charset=UTF-8" \ + {{- end }} + {{- $dpPath := "" -}} + {{- range $kd := (index $dashboardProviders "dashboardproviders.yaml").providers }} + {{- if eq $kd.name $provider }} + {{- $dpPath = $kd.options.path }} + {{- end }} + {{- end }} + {{- if $value.url }} + "{{ $value.url }}" \ + {{- else }} + "https://grafana.com/api/dashboards/{{ $value.gnetId }}/revisions/{{- if $value.revision -}}{{ $value.revision }}{{- else -}}1{{- end -}}/download" \ + {{- end }} + {{- if $value.datasource }} + {{- if kindIs "string" $value.datasource }} + | sed '/-- .* --/! s/"datasource":.*,/"datasource": "{{ $value.datasource }}",/g' \ + {{- end }} + {{- if kindIs "slice" $value.datasource }} + {{- range $value.datasource }} + | sed '/-- .* --/! s/${{"{"}}{{ .name }}}/{{ .value }}/g' \ + {{- end }} + {{- end }} + {{- end }} + {{- if $value.b64content }} + | base64 -d \ + {{- end }} + > "{{- if $dpPath -}}{{ $dpPath }}{{- else -}}/var/lib/grafana/dashboards/{{ $provider }}{{- end -}}/{{ $key }}.json" + {{ end }} + {{- end }} + {{- end }} +{{- end }} +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/dashboards-json-configmap.yaml b/operations/deployment/helm/grafana/templates/dashboards-json-configmap.yaml new file mode 100644 index 0000000..b96ce72 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/dashboards-json-configmap.yaml @@ -0,0 +1,38 @@ +{{- if .Values.dashboards }} +{{ $files := .Files }} +{{- range $provider, $dashboards := .Values.dashboards }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "grafana.fullname" $ }}-dashboards-{{ $provider }} + namespace: {{ include "grafana.namespace" $ }} + labels: + {{- include "grafana.labels" $ | nindent 4 }} + dashboard-provider: {{ $provider }} + {{- if $.Values.sidecar.dashboards.enabled }} + {{ $.Values.sidecar.dashboards.label }}: {{ $.Values.sidecar.dashboards.labelValue | quote }} + {{- end }} +{{- if $dashboards }} +data: +{{- $dashboardFound := false }} +{{- range $key, $value := $dashboards }} +{{- if (or (hasKey $value "json") (hasKey $value "file")) }} +{{- $dashboardFound = true }} + {{- print $key | nindent 2 }}.json: + {{- if hasKey $value "json" }} + |- + {{- $value.json | nindent 6 }} + {{- end }} + {{- if hasKey $value "file" }} + {{- toYaml ( $files.Get $value.file ) | nindent 4}} + {{- end }} +{{- end }} +{{- end }} +{{- if not $dashboardFound }} + {} +{{- end }} +{{- end }} +--- +{{- end }} + +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/deployment.yaml b/operations/deployment/helm/grafana/templates/deployment.yaml new file mode 100644 index 0000000..bfa26bb --- /dev/null +++ b/operations/deployment/helm/grafana/templates/deployment.yaml @@ -0,0 +1,51 @@ +{{- if (and (not .Values.useStatefulSet) (or (not .Values.persistence.enabled) (eq .Values.persistence.type "pvc"))) }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "grafana.fullname" . }} + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if and (not .Values.autoscaling.enabled) (.Values.replicas) }} + replicas: {{ .Values.replicas }} + {{- end }} + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + selector: + matchLabels: + {{- include "grafana.selectorLabels" . | nindent 6 }} + {{- with .Values.deploymentStrategy }} + strategy: + {{- toYaml . | trim | nindent 4 }} + {{- end }} + template: + metadata: + labels: + {{- include "grafana.selectorLabels" . | nindent 8 }} + {{- with .Values.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/dashboards-json-config: {{ include (print $.Template.BasePath "/dashboards-json-configmap.yaml") . | sha256sum }} + checksum/sc-dashboard-provider-config: {{ include (print $.Template.BasePath "/configmap-dashboard-provider.yaml") . | sha256sum }} + {{- if and (or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret))) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.envRenderSecret }} + checksum/secret-env: {{ include (print $.Template.BasePath "/secret-env.yaml") . | sha256sum }} + {{- end }} + kubectl.kubernetes.io/default-container: {{ .Chart.Name }} + {{- with .Values.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- include "grafana.pod" . | nindent 6 }} +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/extra-manifests.yaml b/operations/deployment/helm/grafana/templates/extra-manifests.yaml new file mode 100644 index 0000000..a9bb3b6 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/extra-manifests.yaml @@ -0,0 +1,4 @@ +{{ range .Values.extraObjects }} +--- +{{ tpl (toYaml .) $ }} +{{ end }} diff --git a/operations/deployment/helm/grafana/templates/headless-service.yaml b/operations/deployment/helm/grafana/templates/headless-service.yaml new file mode 100644 index 0000000..3028589 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/headless-service.yaml @@ -0,0 +1,22 @@ +{{- $sts := list "sts" "StatefulSet" "statefulset" -}} +{{- if or .Values.headlessService (and .Values.persistence.enabled (not .Values.persistence.existingClaim) (has .Values.persistence.type $sts)) }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "grafana.fullname" . }}-headless + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + clusterIP: None + selector: + {{- include "grafana.selectorLabels" . | nindent 4 }} + type: ClusterIP + ports: + - name: {{ .Values.gossipPortName }}-tcp + port: 9094 +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/hpa.yaml b/operations/deployment/helm/grafana/templates/hpa.yaml new file mode 100644 index 0000000..46bbcb4 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/hpa.yaml @@ -0,0 +1,52 @@ +{{- $sts := list "sts" "StatefulSet" "statefulset" -}} +{{- if .Values.autoscaling.enabled }} +apiVersion: {{ include "grafana.hpa.apiVersion" . }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "grafana.fullname" . }} + namespace: {{ include "grafana.namespace" . }} + labels: + app.kubernetes.io/name: {{ include "grafana.name" . }} + helm.sh/chart: {{ include "grafana.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + {{- if has .Values.persistence.type $sts }} + kind: StatefulSet + {{- else }} + kind: Deployment + {{- end }} + name: {{ include "grafana.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetMemory }} + - type: Resource + resource: + name: memory + {{- if eq (include "grafana.hpa.apiVersion" .) "autoscaling/v2beta1" }} + targetAverageUtilization: {{ .Values.autoscaling.targetMemory }} + {{- else }} + target: + type: Utilization + averageUtilization: {{ .Values.autoscaling.targetMemory }} + {{- end }} + {{- end }} + {{- if .Values.autoscaling.targetCPU }} + - type: Resource + resource: + name: cpu + {{- if eq (include "grafana.hpa.apiVersion" .) "autoscaling/v2beta1" }} + targetAverageUtilization: {{ .Values.autoscaling.targetCPU }} + {{- else }} + target: + type: Utilization + averageUtilization: {{ .Values.autoscaling.targetCPU }} + {{- end }} + {{- end }} + {{- if .Values.autoscaling.behavior }} + behavior: {{ toYaml .Values.autoscaling.behavior | nindent 4 }} + {{- end }} +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/image-renderer-deployment.yaml b/operations/deployment/helm/grafana/templates/image-renderer-deployment.yaml new file mode 100644 index 0000000..ea97969 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/image-renderer-deployment.yaml @@ -0,0 +1,131 @@ +{{ if .Values.imageRenderer.enabled }} +{{- $root := . -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "grafana.fullname" . }}-image-renderer + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.imageRenderer.labels" . | nindent 4 }} + {{- with .Values.imageRenderer.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.imageRenderer.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if and (not .Values.imageRenderer.autoscaling.enabled) (.Values.imageRenderer.replicas) }} + replicas: {{ .Values.imageRenderer.replicas }} + {{- end }} + revisionHistoryLimit: {{ .Values.imageRenderer.revisionHistoryLimit }} + selector: + matchLabels: + {{- include "grafana.imageRenderer.selectorLabels" . | nindent 6 }} + + {{- with .Values.imageRenderer.deploymentStrategy }} + strategy: + {{- toYaml . | trim | nindent 4 }} + {{- end }} + template: + metadata: + labels: + {{- include "grafana.imageRenderer.selectorLabels" . | nindent 8 }} + {{- with .Values.imageRenderer.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- with .Values.imageRenderer.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- with .Values.imageRenderer.schedulerName }} + schedulerName: "{{ . }}" + {{- end }} + {{- with .Values.imageRenderer.serviceAccountName }} + serviceAccountName: "{{ . }}" + {{- end }} + {{- with .Values.imageRenderer.securityContext }} + securityContext: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.imageRenderer.hostAliases }} + hostAliases: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.imageRenderer.priorityClassName }} + priorityClassName: {{ . }} + {{- end }} + {{- with .Values.imageRenderer.image.pullSecrets }} + imagePullSecrets: + {{- range . }} + - name: {{ tpl . $root }} + {{- end}} + {{- end }} + containers: + - name: {{ .Chart.Name }}-image-renderer + {{- $registry := .Values.global.imageRegistry | default .Values.imageRenderer.image.registry -}} + {{- if .Values.imageRenderer.image.sha }} + image: "{{ $registry }}/{{ .Values.imageRenderer.image.repository }}:{{ .Values.imageRenderer.image.tag }}@sha256:{{ .Values.imageRenderer.image.sha }}" + {{- else }} + image: "{{ $registry }}/{{ .Values.imageRenderer.image.repository }}:{{ .Values.imageRenderer.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.imageRenderer.image.pullPolicy }} + {{- if .Values.imageRenderer.command }} + command: + {{- range .Values.imageRenderer.command }} + - {{ . }} + {{- end }} + {{- end}} + ports: + - name: {{ .Values.imageRenderer.service.portName }} + containerPort: {{ .Values.imageRenderer.service.targetPort }} + protocol: TCP + livenessProbe: + httpGet: + path: / + port: {{ .Values.imageRenderer.service.portName }} + env: + - name: HTTP_PORT + value: {{ .Values.imageRenderer.service.targetPort | quote }} + {{- if .Values.imageRenderer.serviceMonitor.enabled }} + - name: ENABLE_METRICS + value: "true" + {{- end }} + {{- range $key, $value := .Values.imageRenderer.envValueFrom }} + - name: {{ $key | quote }} + valueFrom: + {{- tpl (toYaml $value) $ | nindent 16 }} + {{- end }} + {{- range $key, $value := .Values.imageRenderer.env }} + - name: {{ $key | quote }} + value: {{ $value | quote }} + {{- end }} + {{- with .Values.imageRenderer.containerSecurityContext }} + securityContext: + {{- toYaml . | nindent 12 }} + {{- end }} + volumeMounts: + - mountPath: /tmp + name: image-renderer-tmpfs + {{- with .Values.imageRenderer.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.imageRenderer.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.imageRenderer.affinity }} + affinity: + {{- tpl (toYaml .) $root | nindent 8 }} + {{- end }} + {{- with .Values.imageRenderer.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + - name: image-renderer-tmpfs + emptyDir: {} +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/image-renderer-hpa.yaml b/operations/deployment/helm/grafana/templates/image-renderer-hpa.yaml new file mode 100644 index 0000000..b0f0059 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/image-renderer-hpa.yaml @@ -0,0 +1,47 @@ +{{- if and .Values.imageRenderer.enabled .Values.imageRenderer.autoscaling.enabled }} +apiVersion: {{ include "grafana.hpa.apiVersion" . }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "grafana.fullname" . }}-image-renderer + namespace: {{ include "grafana.namespace" . }} + labels: + app.kubernetes.io/name: {{ include "grafana.name" . }}-image-renderer + helm.sh/chart: {{ include "grafana.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "grafana.fullname" . }}-image-renderer + minReplicas: {{ .Values.imageRenderer.autoscaling.minReplicas }} + maxReplicas: {{ .Values.imageRenderer.autoscaling.maxReplicas }} + metrics: + {{- if .Values.imageRenderer.autoscaling.targetMemory }} + - type: Resource + resource: + name: memory + {{- if eq (include "grafana.hpa.apiVersion" .) "autoscaling/v2beta1" }} + targetAverageUtilization: {{ .Values.imageRenderer.autoscaling.targetMemory }} + {{- else }} + target: + type: Utilization + averageUtilization: {{ .Values.imageRenderer.autoscaling.targetMemory }} + {{- end }} + {{- end }} + {{- if .Values.imageRenderer.autoscaling.targetCPU }} + - type: Resource + resource: + name: cpu + {{- if eq (include "grafana.hpa.apiVersion" .) "autoscaling/v2beta1" }} + targetAverageUtilization: {{ .Values.imageRenderer.autoscaling.targetCPU }} + {{- else }} + target: + type: Utilization + averageUtilization: {{ .Values.imageRenderer.autoscaling.targetCPU }} + {{- end }} + {{- end }} + {{- if .Values.imageRenderer.autoscaling.behavior }} + behavior: {{ toYaml .Values.imageRenderer.autoscaling.behavior | nindent 4 }} + {{- end }} +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/image-renderer-network-policy.yaml b/operations/deployment/helm/grafana/templates/image-renderer-network-policy.yaml new file mode 100644 index 0000000..d1a0eb3 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/image-renderer-network-policy.yaml @@ -0,0 +1,79 @@ +{{- if and .Values.imageRenderer.enabled .Values.imageRenderer.networkPolicy.limitIngress }} +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ include "grafana.fullname" . }}-image-renderer-ingress + namespace: {{ include "grafana.namespace" . }} + annotations: + comment: Limit image-renderer ingress traffic from grafana +spec: + podSelector: + matchLabels: + {{- include "grafana.imageRenderer.selectorLabels" . | nindent 6 }} + {{- with .Values.imageRenderer.podLabels }} + {{- toYaml . | nindent 6 }} + {{- end }} + + policyTypes: + - Ingress + ingress: + - ports: + - port: {{ .Values.imageRenderer.service.targetPort }} + protocol: TCP + from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: {{ include "grafana.namespace" . }} + podSelector: + matchLabels: + {{- include "grafana.selectorLabels" . | nindent 14 }} + {{- with .Values.podLabels }} + {{- toYaml . | nindent 14 }} + {{- end }} + {{- with .Values.imageRenderer.networkPolicy.extraIngressSelectors -}} + {{ toYaml . | nindent 8 }} + {{- end }} +{{- end }} + +{{- if and .Values.imageRenderer.enabled .Values.imageRenderer.networkPolicy.limitEgress }} +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ include "grafana.fullname" . }}-image-renderer-egress + namespace: {{ include "grafana.namespace" . }} + annotations: + comment: Limit image-renderer egress traffic to grafana +spec: + podSelector: + matchLabels: + {{- include "grafana.imageRenderer.selectorLabels" . | nindent 6 }} + {{- with .Values.imageRenderer.podLabels }} + {{- toYaml . | nindent 6 }} + {{- end }} + + policyTypes: + - Egress + egress: + # allow dns resolution + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + # talk only to grafana + - ports: + - port: {{ .Values.service.targetPort }} + protocol: TCP + to: + - namespaceSelector: + matchLabels: + name: {{ include "grafana.namespace" . }} + podSelector: + matchLabels: + {{- include "grafana.selectorLabels" . | nindent 14 }} + {{- with .Values.podLabels }} + {{- toYaml . | nindent 14 }} + {{- end }} +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/image-renderer-service.yaml b/operations/deployment/helm/grafana/templates/image-renderer-service.yaml new file mode 100644 index 0000000..f8da127 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/image-renderer-service.yaml @@ -0,0 +1,31 @@ +{{- if and .Values.imageRenderer.enabled .Values.imageRenderer.service.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "grafana.fullname" . }}-image-renderer + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.imageRenderer.labels" . | nindent 4 }} + {{- with .Values.imageRenderer.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.imageRenderer.service.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + {{- with .Values.imageRenderer.service.clusterIP }} + clusterIP: {{ . }} + {{- end }} + ports: + - name: {{ .Values.imageRenderer.service.portName }} + port: {{ .Values.imageRenderer.service.port }} + protocol: TCP + targetPort: {{ .Values.imageRenderer.service.targetPort }} + {{- with .Values.imageRenderer.appProtocol }} + appProtocol: {{ . }} + {{- end }} + selector: + {{- include "grafana.imageRenderer.selectorLabels" . | nindent 4 }} +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/image-renderer-servicemonitor.yaml b/operations/deployment/helm/grafana/templates/image-renderer-servicemonitor.yaml new file mode 100644 index 0000000..5d9f09d --- /dev/null +++ b/operations/deployment/helm/grafana/templates/image-renderer-servicemonitor.yaml @@ -0,0 +1,48 @@ +{{- if .Values.imageRenderer.serviceMonitor.enabled }} +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "grafana.fullname" . }}-image-renderer + {{- if .Values.imageRenderer.serviceMonitor.namespace }} + namespace: {{ tpl .Values.imageRenderer.serviceMonitor.namespace . }} + {{- else }} + namespace: {{ include "grafana.namespace" . }} + {{- end }} + labels: + {{- include "grafana.imageRenderer.labels" . | nindent 4 }} + {{- with .Values.imageRenderer.serviceMonitor.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + endpoints: + - port: {{ .Values.imageRenderer.service.portName }} + {{- with .Values.imageRenderer.serviceMonitor.interval }} + interval: {{ . }} + {{- end }} + {{- with .Values.imageRenderer.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ . }} + {{- end }} + honorLabels: true + path: {{ .Values.imageRenderer.serviceMonitor.path }} + scheme: {{ .Values.imageRenderer.serviceMonitor.scheme }} + {{- with .Values.imageRenderer.serviceMonitor.tlsConfig }} + tlsConfig: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.imageRenderer.serviceMonitor.relabelings }} + relabelings: + {{- toYaml . | nindent 6 }} + {{- end }} + jobLabel: "{{ .Release.Name }}-image-renderer" + selector: + matchLabels: + {{- include "grafana.imageRenderer.selectorLabels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ include "grafana.namespace" . }} + {{- with .Values.imageRenderer.serviceMonitor.targetLabels }} + targetLabels: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/ingress.yaml b/operations/deployment/helm/grafana/templates/ingress.yaml new file mode 100644 index 0000000..063cdfa --- /dev/null +++ b/operations/deployment/helm/grafana/templates/ingress.yaml @@ -0,0 +1,78 @@ +{{- if .Values.ingress.enabled -}} +{{- $ingressApiIsStable := eq (include "grafana.ingress.isStable" .) "true" -}} +{{- $ingressSupportsIngressClassName := eq (include "grafana.ingress.supportsIngressClassName" .) "true" -}} +{{- $ingressSupportsPathType := eq (include "grafana.ingress.supportsPathType" .) "true" -}} +{{- $fullName := include "grafana.fullname" . -}} +{{- $servicePort := .Values.service.port -}} +{{- $ingressPath := .Values.ingress.path -}} +{{- $ingressPathType := .Values.ingress.pathType -}} +{{- $extraPaths := .Values.ingress.extraPaths -}} +apiVersion: {{ include "grafana.ingress.apiVersion" . }} +kind: Ingress +metadata: + name: {{ $fullName }} + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.ingress.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.ingress.annotations }} + annotations: + {{- range $key, $value := . }} + {{ $key }}: {{ tpl $value $ | quote }} + {{- end }} + {{- end }} +spec: + {{- if and $ingressSupportsIngressClassName .Values.ingress.ingressClassName }} + ingressClassName: {{ .Values.ingress.ingressClassName }} + {{- end -}} + {{- with .Values.ingress.tls }} + tls: + {{- tpl (toYaml .) $ | nindent 4 }} + {{- end }} + rules: + {{- if .Values.ingress.hosts }} + {{- range .Values.ingress.hosts }} + - host: {{ tpl . $ }} + http: + paths: + {{- with $extraPaths }} + {{- toYaml . | nindent 10 }} + {{- end }} + - path: {{ $ingressPath }} + {{- if $ingressSupportsPathType }} + pathType: {{ $ingressPathType }} + {{- end }} + backend: + {{- if $ingressApiIsStable }} + service: + name: {{ $fullName }} + port: + number: {{ $servicePort }} + {{- else }} + serviceName: {{ $fullName }} + servicePort: {{ $servicePort }} + {{- end }} + {{- end }} + {{- else }} + - http: + paths: + - backend: + {{- if $ingressApiIsStable }} + service: + name: {{ $fullName }} + port: + number: {{ $servicePort }} + {{- else }} + serviceName: {{ $fullName }} + servicePort: {{ $servicePort }} + {{- end }} + {{- with $ingressPath }} + path: {{ . }} + {{- end }} + {{- if $ingressSupportsPathType }} + pathType: {{ $ingressPathType }} + {{- end }} + {{- end -}} +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/networkpolicy.yaml b/operations/deployment/helm/grafana/templates/networkpolicy.yaml new file mode 100644 index 0000000..4cd3ed6 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/networkpolicy.yaml @@ -0,0 +1,61 @@ +{{- if .Values.networkPolicy.enabled }} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ include "grafana.fullname" . }} + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + policyTypes: + {{- if .Values.networkPolicy.ingress }} + - Ingress + {{- end }} + {{- if .Values.networkPolicy.egress.enabled }} + - Egress + {{- end }} + podSelector: + matchLabels: + {{- include "grafana.selectorLabels" . | nindent 6 }} + + {{- if .Values.networkPolicy.egress.enabled }} + egress: + {{- if not .Values.networkPolicy.egress.blockDNSResolution }} + - ports: + - port: 53 + protocol: UDP + {{- end }} + - ports: + {{ .Values.networkPolicy.egress.ports | toJson }} + {{- with .Values.networkPolicy.egress.to }} + to: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.networkPolicy.ingress }} + ingress: + - ports: + - port: {{ .Values.service.targetPort }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ include "grafana.fullname" . }}-client: "true" + {{- with .Values.networkPolicy.explicitNamespacesSelector }} + - namespaceSelector: + {{- toYaml . | nindent 12 }} + {{- end }} + - podSelector: + matchLabels: + {{- include "grafana.labels" . | nindent 14 }} + role: read + {{- end }} + {{- end }} +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/poddisruptionbudget.yaml b/operations/deployment/helm/grafana/templates/poddisruptionbudget.yaml new file mode 100644 index 0000000..0525121 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/poddisruptionbudget.yaml @@ -0,0 +1,22 @@ +{{- if .Values.podDisruptionBudget }} +apiVersion: {{ include "grafana.podDisruptionBudget.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "grafana.fullname" . }} + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- with .Values.podDisruptionBudget.minAvailable }} + minAvailable: {{ . }} + {{- end }} + {{- with .Values.podDisruptionBudget.maxUnavailable }} + maxUnavailable: {{ . }} + {{- end }} + selector: + matchLabels: + {{- include "grafana.selectorLabels" . | nindent 6 }} +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/podsecuritypolicy.yaml b/operations/deployment/helm/grafana/templates/podsecuritypolicy.yaml new file mode 100644 index 0000000..eed7af9 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/podsecuritypolicy.yaml @@ -0,0 +1,49 @@ +{{- if and .Values.rbac.pspEnabled (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ include "grafana.fullname" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + annotations: + seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default' + seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' + {{- if .Values.rbac.pspUseAppArmor }} + apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default' + apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + {{- end }} +spec: + privileged: false + allowPrivilegeEscalation: false + requiredDropCapabilities: + # Default set from Docker, with DAC_OVERRIDE and CHOWN + - ALL + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'csi' + - 'secret' + - 'downwardAPI' + - 'persistentVolumeClaim' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/pvc.yaml b/operations/deployment/helm/grafana/templates/pvc.yaml new file mode 100644 index 0000000..eb8f87f --- /dev/null +++ b/operations/deployment/helm/grafana/templates/pvc.yaml @@ -0,0 +1,36 @@ +{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) (eq .Values.persistence.type "pvc")}} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ include "grafana.fullname" . }} + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.persistence.extraPvcLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.persistence.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.persistence.finalizers }} + finalizers: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{- with .Values.persistence.storageClassName }} + storageClassName: {{ . }} + {{- end }} + {{- with .Values.persistence.selectorLabels }} + selector: + matchLabels: + {{- toYaml . | nindent 6 }} + {{- end }} +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/role.yaml b/operations/deployment/helm/grafana/templates/role.yaml new file mode 100644 index 0000000..4b5edd9 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/role.yaml @@ -0,0 +1,32 @@ +{{- if and .Values.rbac.create (not .Values.rbac.useExistingRole) -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "grafana.fullname" . }} + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- if or .Values.rbac.pspEnabled (and .Values.rbac.namespaced (or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled .Values.sidecar.plugins.enabled .Values.rbac.extraRoleRules)) }} +rules: + {{- if and .Values.rbac.pspEnabled (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }} + - apiGroups: ['extensions'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: [{{ include "grafana.fullname" . }}] + {{- end }} + {{- if and .Values.rbac.namespaced (or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled .Values.sidecar.plugins.enabled) }} + - apiGroups: [""] # "" indicates the core API group + resources: ["configmaps", "secrets"] + verbs: ["get", "watch", "list"] + {{- end }} + {{- with .Values.rbac.extraRoleRules }} + {{- toYaml . | nindent 2 }} + {{- end}} +{{- else }} +rules: [] +{{- end }} +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/rolebinding.yaml b/operations/deployment/helm/grafana/templates/rolebinding.yaml new file mode 100644 index 0000000..58f77c6 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/rolebinding.yaml @@ -0,0 +1,25 @@ +{{- if .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "grafana.fullname" . }} + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + {{- if .Values.rbac.useExistingRole }} + name: {{ .Values.rbac.useExistingRole }} + {{- else }} + name: {{ include "grafana.fullname" . }} + {{- end }} +subjects: +- kind: ServiceAccount + name: {{ include "grafana.serviceAccountName" . }} + namespace: {{ include "grafana.namespace" . }} +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/secret-env.yaml b/operations/deployment/helm/grafana/templates/secret-env.yaml new file mode 100644 index 0000000..eb14aac --- /dev/null +++ b/operations/deployment/helm/grafana/templates/secret-env.yaml @@ -0,0 +1,14 @@ +{{- if .Values.envRenderSecret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "grafana.fullname" . }}-env + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} +type: Opaque +data: +{{- range $key, $val := .Values.envRenderSecret }} + {{ $key }}: {{ tpl ($val | toString) $ | b64enc | quote }} +{{- end }} +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/secret.yaml b/operations/deployment/helm/grafana/templates/secret.yaml new file mode 100644 index 0000000..5cbd527 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/secret.yaml @@ -0,0 +1,26 @@ +{{- if or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret)) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "grafana.fullname" . }} + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +type: Opaque +data: + {{- if and (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) }} + admin-user: {{ .Values.adminUser | b64enc | quote }} + {{- if .Values.adminPassword }} + admin-password: {{ .Values.adminPassword | b64enc | quote }} + {{- else }} + admin-password: {{ include "grafana.password" . }} + {{- end }} + {{- end }} + {{- if not .Values.ldap.existingSecret }} + ldap-toml: {{ tpl .Values.ldap.config $ | b64enc | quote }} + {{- end }} +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/service.yaml b/operations/deployment/helm/grafana/templates/service.yaml new file mode 100644 index 0000000..9102c1e --- /dev/null +++ b/operations/deployment/helm/grafana/templates/service.yaml @@ -0,0 +1,58 @@ +{{- if .Values.service.enabled }} +{{- $root := . }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "grafana.fullname" . }} + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.service.annotations }} + annotations: + {{- tpl (toYaml . | nindent 4) $root }} + {{- end }} +spec: + {{- if (or (eq .Values.service.type "ClusterIP") (empty .Values.service.type)) }} + type: ClusterIP + {{- with .Values.service.clusterIP }} + clusterIP: {{ . }} + {{- end }} + {{- else if eq .Values.service.type "LoadBalancer" }} + type: {{ .Values.service.type }} + {{- with .Values.service.loadBalancerIP }} + loadBalancerIP: {{ . }} + {{- end }} + {{- with .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- else }} + type: {{ .Values.service.type }} + {{- end }} + {{- with .Values.service.externalIPs }} + externalIPs: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.service.externalTrafficPolicy }} + externalTrafficPolicy: {{ . }} + {{- end }} + ports: + - name: {{ .Values.service.portName }} + port: {{ .Values.service.port }} + protocol: TCP + targetPort: {{ .Values.service.targetPort }} + {{- with .Values.service.appProtocol }} + appProtocol: {{ . }} + {{- end }} + {{- if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort))) }} + nodePort: {{ .Values.service.nodePort }} + {{- end }} + {{- with .Values.extraExposePorts }} + {{- tpl (toYaml . | nindent 4) $root }} + {{- end }} + selector: + {{- include "grafana.selectorLabels" . | nindent 4 }} +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/serviceaccount.yaml b/operations/deployment/helm/grafana/templates/serviceaccount.yaml new file mode 100644 index 0000000..784e71b --- /dev/null +++ b/operations/deployment/helm/grafana/templates/serviceaccount.yaml @@ -0,0 +1,17 @@ +{{- if .Values.serviceAccount.create }} +{{- $root := . -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- tpl (toYaml . | nindent 4) $root }} + {{- end }} + name: {{ include "grafana.serviceAccountName" . }} + namespace: {{ include "grafana.namespace" . }} +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/servicemonitor.yaml b/operations/deployment/helm/grafana/templates/servicemonitor.yaml new file mode 100644 index 0000000..7239682 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/servicemonitor.yaml @@ -0,0 +1,52 @@ +{{- if .Values.serviceMonitor.enabled }} +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "grafana.fullname" . }} + {{- if .Values.serviceMonitor.namespace }} + namespace: {{ tpl .Values.serviceMonitor.namespace . }} + {{- else }} + namespace: {{ include "grafana.namespace" . }} + {{- end }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.serviceMonitor.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + endpoints: + - port: {{ .Values.service.portName }} + {{- with .Values.serviceMonitor.interval }} + interval: {{ . }} + {{- end }} + {{- with .Values.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ . }} + {{- end }} + honorLabels: true + path: {{ .Values.serviceMonitor.path }} + scheme: {{ .Values.serviceMonitor.scheme }} + {{- with .Values.serviceMonitor.tlsConfig }} + tlsConfig: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.serviceMonitor.relabelings }} + relabelings: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml . | nindent 6 }} + {{- end }} + jobLabel: "{{ .Release.Name }}" + selector: + matchLabels: + {{- include "grafana.selectorLabels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ include "grafana.namespace" . }} + {{- with .Values.serviceMonitor.targetLabels }} + targetLabels: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/statefulset.yaml b/operations/deployment/helm/grafana/templates/statefulset.yaml new file mode 100644 index 0000000..e6c944a --- /dev/null +++ b/operations/deployment/helm/grafana/templates/statefulset.yaml @@ -0,0 +1,56 @@ +{{- $sts := list "sts" "StatefulSet" "statefulset" -}} +{{- if (or (.Values.useStatefulSet) (and .Values.persistence.enabled (not .Values.persistence.existingClaim) (has .Values.persistence.type $sts)))}} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "grafana.fullname" . }} + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + replicas: {{ .Values.replicas }} + selector: + matchLabels: + {{- include "grafana.selectorLabels" . | nindent 6 }} + serviceName: {{ include "grafana.fullname" . }}-headless + template: + metadata: + labels: + {{- include "grafana.selectorLabels" . | nindent 8 }} + {{- with .Values.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/dashboards-json-config: {{ include (print $.Template.BasePath "/dashboards-json-configmap.yaml") . | sha256sum }} + checksum/sc-dashboard-provider-config: {{ include (print $.Template.BasePath "/configmap-dashboard-provider.yaml") . | sha256sum }} + {{- if and (or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret))) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- end }} + kubectl.kubernetes.io/default-container: {{ .Chart.Name }} + {{- with .Values.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- include "grafana.pod" . | nindent 6 }} + {{- if .Values.persistence.enabled}} + volumeClaimTemplates: + - metadata: + name: storage + spec: + accessModes: {{ .Values.persistence.accessModes }} + storageClassName: {{ .Values.persistence.storageClassName }} + resources: + requests: + storage: {{ .Values.persistence.size }} + {{- with .Values.persistence.selectorLabels }} + selector: + matchLabels: + {{- toYaml . | nindent 10 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/tests/test-configmap.yaml b/operations/deployment/helm/grafana/templates/tests/test-configmap.yaml new file mode 100644 index 0000000..01c96c9 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/tests/test-configmap.yaml @@ -0,0 +1,20 @@ +{{- if .Values.testFramework.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "grafana.fullname" . }}-test + namespace: {{ include "grafana.namespace" . }} + annotations: + "helm.sh/hook": test-success + "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" + labels: + {{- include "grafana.labels" . | nindent 4 }} +data: + run.sh: |- + @test "Test Health" { + url="http://{{ include "grafana.fullname" . }}/api/health" + + code=$(wget --server-response --spider --timeout 90 --tries 10 ${url} 2>&1 | awk '/^ HTTP/{print $2}') + [ "$code" == "200" ] + } +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/tests/test-podsecuritypolicy.yaml b/operations/deployment/helm/grafana/templates/tests/test-podsecuritypolicy.yaml new file mode 100644 index 0000000..1821772 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/tests/test-podsecuritypolicy.yaml @@ -0,0 +1,32 @@ +{{- if and (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") .Values.testFramework.enabled .Values.rbac.pspEnabled }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ include "grafana.fullname" . }}-test + annotations: + "helm.sh/hook": test-success + "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" + labels: + {{- include "grafana.labels" . | nindent 4 }} +spec: + allowPrivilegeEscalation: true + privileged: false + hostNetwork: false + hostIPC: false + hostPID: false + fsGroup: + rule: RunAsAny + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + runAsUser: + rule: RunAsAny + volumes: + - configMap + - downwardAPI + - emptyDir + - projected + - csi + - secret +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/tests/test-role.yaml b/operations/deployment/helm/grafana/templates/tests/test-role.yaml new file mode 100644 index 0000000..cb4c782 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/tests/test-role.yaml @@ -0,0 +1,17 @@ +{{- if and (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") .Values.testFramework.enabled .Values.rbac.pspEnabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "grafana.fullname" . }}-test + namespace: {{ include "grafana.namespace" . }} + annotations: + "helm.sh/hook": test-success + "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" + labels: + {{- include "grafana.labels" . | nindent 4 }} +rules: + - apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: [{{ include "grafana.fullname" . }}-test] +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/tests/test-rolebinding.yaml b/operations/deployment/helm/grafana/templates/tests/test-rolebinding.yaml new file mode 100644 index 0000000..f40d791 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/tests/test-rolebinding.yaml @@ -0,0 +1,20 @@ +{{- if and (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") .Values.testFramework.enabled .Values.rbac.pspEnabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "grafana.fullname" . }}-test + namespace: {{ include "grafana.namespace" . }} + annotations: + "helm.sh/hook": test-success + "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" + labels: + {{- include "grafana.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "grafana.fullname" . }}-test +subjects: + - kind: ServiceAccount + name: {{ include "grafana.serviceAccountNameTest" . }} + namespace: {{ include "grafana.namespace" . }} +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/tests/test-serviceaccount.yaml b/operations/deployment/helm/grafana/templates/tests/test-serviceaccount.yaml new file mode 100644 index 0000000..38fba35 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/tests/test-serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if and .Values.testFramework.enabled .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "grafana.labels" . | nindent 4 }} + name: {{ include "grafana.serviceAccountNameTest" . }} + namespace: {{ include "grafana.namespace" . }} + annotations: + "helm.sh/hook": test-success + "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/tests/test.yaml b/operations/deployment/helm/grafana/templates/tests/test.yaml new file mode 100644 index 0000000..15067ae --- /dev/null +++ b/operations/deployment/helm/grafana/templates/tests/test.yaml @@ -0,0 +1,49 @@ +{{- if .Values.testFramework.enabled }} +{{- $root := . }} +apiVersion: v1 +kind: Pod +metadata: + name: {{ include "grafana.fullname" . }}-test + labels: + {{- include "grafana.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test-success + "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" + namespace: {{ include "grafana.namespace" . }} +spec: + serviceAccountName: {{ include "grafana.serviceAccountNameTest" . }} + {{- with .Values.testFramework.securityContext }} + securityContext: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- if or .Values.image.pullSecrets .Values.global.imagePullSecrets }} + imagePullSecrets: + {{- include "grafana.imagePullSecrets" (dict "root" $root "imagePullSecrets" .Values.image.pullSecrets) | nindent 4 }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- tpl (toYaml .) $root | nindent 4 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 4 }} + {{- end }} + containers: + - name: {{ .Release.Name }}-test + image: "{{ .Values.global.imageRegistry | default .Values.testFramework.image.registry }}/{{ .Values.testFramework.image.repository }}:{{ .Values.testFramework.image.tag }}" + imagePullPolicy: "{{ .Values.testFramework.imagePullPolicy}}" + command: ["/opt/bats/bin/bats", "-t", "/tests/run.sh"] + volumeMounts: + - mountPath: /tests + name: tests + readOnly: true + volumes: + - name: tests + configMap: + name: {{ include "grafana.fullname" . }}-test + restartPolicy: Never +{{- end }} diff --git a/operations/deployment/helm/grafana/values.yaml b/operations/deployment/helm/grafana/values.yaml new file mode 100644 index 0000000..07502cc --- /dev/null +++ b/operations/deployment/helm/grafana/values.yaml @@ -0,0 +1,1282 @@ +global: + # -- Overrides the Docker registry globally for all images + imageRegistry: null + + # To help compatibility with other charts which use global.imagePullSecrets. + # Allow either an array of {name: pullSecret} maps (k8s-style), or an array of strings (more common helm-style). + # Can be tempalted. + # global: + # imagePullSecrets: + # - name: pullSecret1 + # - name: pullSecret2 + # or + # global: + # imagePullSecrets: + # - pullSecret1 + # - pullSecret2 + imagePullSecrets: [] + +rbac: + create: true + ## Use an existing ClusterRole/Role (depending on rbac.namespaced false/true) + # useExistingRole: name-of-some-role + # useExistingClusterRole: name-of-some-clusterRole + pspEnabled: false + pspUseAppArmor: false + namespaced: false + extraRoleRules: [] + # - apiGroups: [] + # resources: [] + # verbs: [] + extraClusterRoleRules: [] + # - apiGroups: [] + # resources: [] + # verbs: [] +serviceAccount: + create: true + name: + nameTest: + ## ServiceAccount labels. + labels: {} +## Service account annotations. Can be templated. +# annotations: +# eks.amazonaws.com/role-arn: arn:aws:iam::123456789000:role/iam-role-name-here + autoMount: true + +replicas: 1 + +## Create a headless service for the deployment +headlessService: false + +## Create HorizontalPodAutoscaler object for deployment type +# +autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 5 + targetCPU: "60" + targetMemory: "" + behavior: {} + +## See `kubectl explain poddisruptionbudget.spec` for more +## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ +podDisruptionBudget: {} +# apiVersion: "" +# minAvailable: 1 +# maxUnavailable: 1 + +## See `kubectl explain deployment.spec.strategy` for more +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy +deploymentStrategy: + type: RollingUpdate + +readinessProbe: + httpGet: + path: /api/health + port: 3000 + +livenessProbe: + httpGet: + path: /api/health + port: 3000 + initialDelaySeconds: 60 + timeoutSeconds: 30 + failureThreshold: 10 + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: "default-scheduler" + +image: + # -- The Docker registry + registry: docker.io + # -- Docker image repository + repository: grafana/grafana + # Overrides the Grafana image tag whose default is the chart appVersion + tag: "" + sha: "" + pullPolicy: IfNotPresent + + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Can be templated. + ## + pullSecrets: [] + # - myRegistrKeySecretName + +testFramework: + enabled: true + image: + # -- The Docker registry + registry: docker.io + repository: bats/bats + tag: "v1.4.1" + imagePullPolicy: IfNotPresent + securityContext: {} + +securityContext: + runAsNonRoot: true + runAsUser: 472 + runAsGroup: 472 + fsGroup: 472 + +containerSecurityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + +# Enable creating the grafana configmap +createConfigmap: true + +# Extra configmaps to mount in grafana pods +# Values are templated. +extraConfigmapMounts: [] + # - name: certs-configmap + # mountPath: /etc/grafana/ssl/ + # subPath: certificates.crt # (optional) + # configMap: certs-configmap + # readOnly: true + + +extraEmptyDirMounts: [] + # - name: provisioning-notifiers + # mountPath: /etc/grafana/provisioning/notifiers + + +# Apply extra labels to common labels. +extraLabels: {} + +## Assign a PriorityClassName to pods if set +# priorityClassName: + +downloadDashboardsImage: + # -- The Docker registry + registry: docker.io + repository: curlimages/curl + tag: 7.85.0 + sha: "" + pullPolicy: IfNotPresent + +downloadDashboards: + env: {} + envFromSecret: "" + resources: {} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + envValueFrom: {} + # ENV_NAME: + # configMapKeyRef: + # name: configmap-name + # key: value_key + +## Pod Annotations +# podAnnotations: {} + +## Pod Labels +# podLabels: {} + +podPortName: grafana +gossipPortName: gossip +## Deployment annotations +# annotations: {} + +## Expose the grafana service to be accessed from outside the cluster (LoadBalancer service). +## or access it from within the cluster (ClusterIP service). Set the service type and the port to serve it. +## ref: http://kubernetes.io/docs/user-guide/services/ +## +service: + enabled: true + type: ClusterIP + port: 80 + targetPort: 3000 + # targetPort: 4181 To be used with a proxy extraContainer + ## Service annotations. Can be templated. + annotations: {} + labels: {} + portName: service + # Adds the appProtocol field to the service. This allows to work with istio protocol selection. Ex: "http" or "tcp" + appProtocol: "" + +serviceMonitor: + ## If true, a ServiceMonitor CRD is created for a prometheus operator + ## https://github.com/coreos/prometheus-operator + ## + enabled: false + path: /metrics + # namespace: monitoring (defaults to use the namespace this chart is deployed to) + labels: {} + interval: 30s + scheme: http + tlsConfig: {} + scrapeTimeout: 30s + relabelings: [] + metricRelabelings: [] + targetLabels: [] + +extraExposePorts: [] + # - name: keycloak + # port: 8080 + # targetPort: 8080 + +# overrides pod.spec.hostAliases in the grafana deployment's pods +hostAliases: [] + # - ip: "1.2.3.4" + # hostnames: + # - "my.host.com" + +ingress: + enabled: false + # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName + # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress + # ingressClassName: nginx + # Values can be templated + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + labels: {} + path: / + + # pathType is only for k8s >= 1.1= + pathType: Prefix + + hosts: + - chart-example.local + ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. + extraPaths: [] + # - path: /* + # backend: + # serviceName: ssl-redirect + # servicePort: use-annotation + ## Or for k8s > 1.19 + # - path: /* + # pathType: Prefix + # backend: + # service: + # name: ssl-redirect + # port: + # name: use-annotation + + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} +# limits: +# cpu: 100m +# memory: 128Mi +# requests: +# cpu: 100m +# memory: 128Mi + +## Node labels for pod assignment +## ref: https://kubernetes.io/docs/user-guide/node-selection/ +# +nodeSelector: {} + +## Tolerations for pod assignment +## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] + +## Affinity for pod assignment (evaluated as template) +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +affinity: {} + +## Topology Spread Constraints +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ +## +topologySpreadConstraints: [] + +## Additional init containers (evaluated as template) +## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ +## +extraInitContainers: [] + +## Enable an Specify container in extraContainers. This is meant to allow adding an authentication proxy to a grafana pod +extraContainers: "" +# extraContainers: | +# - name: proxy +# image: quay.io/gambol99/keycloak-proxy:latest +# args: +# - -provider=github +# - -client-id= +# - -client-secret= +# - -github-org= +# - -email-domain=* +# - -cookie-secret= +# - -http-address=http://0.0.0.0:4181 +# - -upstream-url=http://127.0.0.1:3000 +# ports: +# - name: proxy-web +# containerPort: 4181 + +## Volumes that can be used in init containers that will not be mounted to deployment pods +extraContainerVolumes: [] +# - name: volume-from-secret +# secret: +# secretName: secret-to-mount +# - name: empty-dir-volume +# emptyDir: {} + +## Enable persistence using Persistent Volume Claims +## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + type: pvc + enabled: false + # storageClassName: default + accessModes: + - ReadWriteOnce + size: 10Gi + # annotations: {} + finalizers: + - kubernetes.io/pvc-protection + # selectorLabels: {} + ## Sub-directory of the PV to mount. Can be templated. + # subPath: "" + ## Name of an existing PVC. Can be templated. + # existingClaim: + ## Extra labels to apply to a PVC. + extraPvcLabels: {} + + ## If persistence is not enabled, this allows to mount the + ## local storage in-memory to improve performance + ## + inMemory: + enabled: false + ## The maximum usage on memory medium EmptyDir would be + ## the minimum value between the SizeLimit specified + ## here and the sum of memory limits of all containers in a pod + ## + # sizeLimit: 300Mi + +initChownData: + ## If false, data ownership will not be reset at startup + ## This allows the grafana-server to be run with an arbitrary user + ## + enabled: true + + ## initChownData container image + ## + image: + # -- The Docker registry + registry: docker.io + repository: library/busybox + tag: "1.31.1" + sha: "" + pullPolicy: IfNotPresent + + ## initChownData resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + securityContext: + runAsNonRoot: false + runAsUser: 0 + seccompProfile: + type: RuntimeDefault + capabilities: + add: + - CHOWN + +# Administrator credentials when not using an existing secret (see below) +adminUser: admin +# adminPassword: strongpassword + +# Use an existing secret for the admin user. +admin: + ## Name of the secret. Can be templated. + existingSecret: "" + userKey: admin-user + passwordKey: admin-password + +## Define command to be executed at startup by grafana container +## Needed if using `vault-env` to manage secrets (ref: https://banzaicloud.com/blog/inject-secrets-into-pods-vault/) +## Default is "run.sh" as defined in grafana's Dockerfile +# command: +# - "sh" +# - "/run.sh" + +## Optionally define args if command is used +## Needed if using `hashicorp/envconsul` to manage secrets +## By default no arguments are set +# args: +# - "-secret" +# - "secret/grafana" +# - "./grafana" + +## Extra environment variables that will be pass onto deployment pods +## +## to provide grafana with access to CloudWatch on AWS EKS: +## 1. create an iam role of type "Web identity" with provider oidc.eks.* (note the provider for later) +## 2. edit the "Trust relationships" of the role, add a line inside the StringEquals clause using the +## same oidc eks provider as noted before (same as the existing line) +## also, replace NAMESPACE and prometheus-operator-grafana with the service account namespace and name +## +## "oidc.eks.us-east-1.amazonaws.com/id/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX:sub": "system:serviceaccount:NAMESPACE:prometheus-operator-grafana", +## +## 3. attach a policy to the role, you can use a built in policy called CloudWatchReadOnlyAccess +## 4. use the following env: (replace 123456789000 and iam-role-name-here with your aws account number and role name) +## +## env: +## AWS_ROLE_ARN: arn:aws:iam::123456789000:role/iam-role-name-here +## AWS_WEB_IDENTITY_TOKEN_FILE: /var/run/secrets/eks.amazonaws.com/serviceaccount/token +## AWS_REGION: us-east-1 +## +## 5. uncomment the EKS section in extraSecretMounts: below +## 6. uncomment the annotation section in the serviceAccount: above +## make sure to replace arn:aws:iam::123456789000:role/iam-role-name-here with your role arn + +env: {} + +## "valueFrom" environment variable references that will be added to deployment pods. Name is templated. +## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#envvarsource-v1-core +## Renders in container spec as: +## env: +## ... +## - name: +## valueFrom: +## +envValueFrom: {} + # ENV_NAME: + # configMapKeyRef: + # name: configmap-name + # key: value_key + +## The name of a secret in the same kubernetes namespace which contain values to be added to the environment +## This can be useful for auth tokens, etc. Value is templated. +envFromSecret: "" + +## Sensible environment variables that will be rendered as new secret object +## This can be useful for auth tokens, etc. +## If the secret values contains "{{", they'll need to be properly escaped so that they are not interpreted by Helm +## ref: https://helm.sh/docs/howto/charts_tips_and_tricks/#using-the-tpl-function +envRenderSecret: {} + +## The names of secrets in the same kubernetes namespace which contain values to be added to the environment +## Each entry should contain a name key, and can optionally specify whether the secret must be defined with an optional key. +## Name is templated. +envFromSecrets: [] +## - name: secret-name +## optional: true + +## The names of conifgmaps in the same kubernetes namespace which contain values to be added to the environment +## Each entry should contain a name key, and can optionally specify whether the configmap must be defined with an optional key. +## Name is templated. +## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#configmapenvsource-v1-core +envFromConfigMaps: [] +## - name: configmap-name +## optional: true + +# Inject Kubernetes services as environment variables. +# See https://kubernetes.io/docs/concepts/services-networking/connect-applications-service/#environment-variables +enableServiceLinks: true + +## Additional grafana server secret mounts +# Defines additional mounts with secrets. Secrets must be manually created in the namespace. +extraSecretMounts: [] + # - name: secret-files + # mountPath: /etc/secrets + # secretName: grafana-secret-files + # readOnly: true + # subPath: "" + # + # for AWS EKS (cloudwatch) use the following (see also instruction in env: above) + # - name: aws-iam-token + # mountPath: /var/run/secrets/eks.amazonaws.com/serviceaccount + # readOnly: true + # projected: + # defaultMode: 420 + # sources: + # - serviceAccountToken: + # audience: sts.amazonaws.com + # expirationSeconds: 86400 + # path: token + # + # for CSI e.g. Azure Key Vault use the following + # - name: secrets-store-inline + # mountPath: /run/secrets + # readOnly: true + # csi: + # driver: secrets-store.csi.k8s.io + # readOnly: true + # volumeAttributes: + # secretProviderClass: "akv-grafana-spc" + # nodePublishSecretRef: # Only required when using service principal mode + # name: grafana-akv-creds # Only required when using service principal mode + +## Additional grafana server volume mounts +# Defines additional volume mounts. +extraVolumeMounts: [] + # - name: extra-volume-0 + # mountPath: /mnt/volume0 + # readOnly: true + # existingClaim: volume-claim + # - name: extra-volume-1 + # mountPath: /mnt/volume1 + # readOnly: true + # hostPath: /usr/shared/ + # - name: grafana-secrets + # mountPath: /mnt/volume2 + # csi: true + # data: + # driver: secrets-store.csi.k8s.io + # readOnly: true + # volumeAttributes: + # secretProviderClass: "grafana-env-spc" + +## Container Lifecycle Hooks. Execute a specific bash command or make an HTTP request +lifecycleHooks: {} + # postStart: + # exec: + # command: [] + +## Pass the plugins you want installed as a list. +## +plugins: [] + # - digrich-bubblechart-panel + # - grafana-clock-panel + ## You can also use other plugin download URL, as long as they are valid zip files, + ## and specify the name of the plugin after the semicolon. Like this: + # - https://grafana.com/api/plugins/marcusolsson-json-datasource/versions/1.3.2/download;marcusolsson-json-datasource + +## Configure grafana datasources +## ref: http://docs.grafana.org/administration/provisioning/#datasources +## +datasources: {} +# datasources.yaml: +# apiVersion: 1 +# datasources: +# - name: Prometheus +# type: prometheus +# url: http://prometheus-prometheus-server +# access: proxy +# isDefault: true +# - name: CloudWatch +# type: cloudwatch +# access: proxy +# uid: cloudwatch +# editable: false +# jsonData: +# authType: default +# defaultRegion: us-east-1 +# deleteDatasources: [] +# - name: Prometheus + +## Configure grafana alerting (can be templated) +## ref: http://docs.grafana.org/administration/provisioning/#alerting +## +alerting: {} + # rules.yaml: + # apiVersion: 1 + # groups: + # - orgId: 1 + # name: '{{ .Chart.Name }}_my_rule_group' + # folder: my_first_folder + # interval: 60s + # rules: + # - uid: my_id_1 + # title: my_first_rule + # condition: A + # data: + # - refId: A + # datasourceUid: '-100' + # model: + # conditions: + # - evaluator: + # params: + # - 3 + # type: gt + # operator: + # type: and + # query: + # params: + # - A + # reducer: + # type: last + # type: query + # datasource: + # type: __expr__ + # uid: '-100' + # expression: 1==0 + # intervalMs: 1000 + # maxDataPoints: 43200 + # refId: A + # type: math + # dashboardUid: my_dashboard + # panelId: 123 + # noDataState: Alerting + # for: 60s + # annotations: + # some_key: some_value + # labels: + # team: sre_team_1 + # contactpoints.yaml: + # secret: + # apiVersion: 1 + # contactPoints: + # - orgId: 1 + # name: cp_1 + # receivers: + # - uid: first_uid + # type: pagerduty + # settings: + # integrationKey: XXX + # severity: critical + # class: ping failure + # component: Grafana + # group: app-stack + # summary: | + # {{ `{{ include "default.message" . }}` }} + +## Configure notifiers +## ref: http://docs.grafana.org/administration/provisioning/#alert-notification-channels +## +notifiers: {} +# notifiers.yaml: +# notifiers: +# - name: email-notifier +# type: email +# uid: email1 +# # either: +# org_id: 1 +# # or +# org_name: Main Org. +# is_default: true +# settings: +# addresses: an_email_address@example.com +# delete_notifiers: + +## Configure grafana dashboard providers +## ref: http://docs.grafana.org/administration/provisioning/#dashboards +## +## `path` must be /var/lib/grafana/dashboards/ +## +dashboardProviders: {} +# dashboardproviders.yaml: +# apiVersion: 1 +# providers: +# - name: 'default' +# orgId: 1 +# folder: '' +# type: file +# disableDeletion: false +# editable: true +# options: +# path: /var/lib/grafana/dashboards/default + +## Configure grafana dashboard to import +## NOTE: To use dashboards you must also enable/configure dashboardProviders +## ref: https://grafana.com/dashboards +## +## dashboards per provider, use provider name as key. +## +dashboards: {} + # default: + # some-dashboard: + # json: | + # $RAW_JSON + # custom-dashboard: + # file: dashboards/custom-dashboard.json + # prometheus-stats: + # gnetId: 2 + # revision: 2 + # datasource: Prometheus + # local-dashboard: + # url: https://example.com/repository/test.json + # token: '' + # local-dashboard-base64: + # url: https://example.com/repository/test-b64.json + # token: '' + # b64content: true + # local-dashboard-gitlab: + # url: https://example.com/repository/test-gitlab.json + # gitlabToken: '' + # local-dashboard-bitbucket: + # url: https://example.com/repository/test-bitbucket.json + # bearerToken: '' + # local-dashboard-azure: + # url: https://example.com/repository/test-azure.json + # basic: '' + # acceptHeader: '*/*' + +## Reference to external ConfigMap per provider. Use provider name as key and ConfigMap name as value. +## A provider dashboards must be defined either by external ConfigMaps or in values.yaml, not in both. +## ConfigMap data example: +## +## data: +## example-dashboard.json: | +## RAW_JSON +## +dashboardsConfigMaps: {} +# default: "" + +## Grafana's primary configuration +## NOTE: values in map will be converted to ini format +## ref: http://docs.grafana.org/installation/configuration/ +## +grafana.ini: + paths: + data: /var/lib/grafana/ + logs: /var/log/grafana + plugins: /var/lib/grafana/plugins + provisioning: /etc/grafana/provisioning + analytics: + check_for_updates: true + log: + mode: console + grafana_net: + url: https://grafana.net + server: + domain: "{{ if (and .Values.ingress.enabled .Values.ingress.hosts) }}{{ .Values.ingress.hosts | first }}{{ else }}''{{ end }}" +## grafana Authentication can be enabled with the following values on grafana.ini + # server: + # The full public facing url you use in browser, used for redirects and emails + # root_url: + # https://grafana.com/docs/grafana/latest/auth/github/#enable-github-in-grafana + # auth.github: + # enabled: false + # allow_sign_up: false + # scopes: user:email,read:org + # auth_url: https://github.com/login/oauth/authorize + # token_url: https://github.com/login/oauth/access_token + # api_url: https://api.github.com/user + # team_ids: + # allowed_organizations: + # client_id: + # client_secret: +## LDAP Authentication can be enabled with the following values on grafana.ini +## NOTE: Grafana will fail to start if the value for ldap.toml is invalid + # auth.ldap: + # enabled: true + # allow_sign_up: true + # config_file: /etc/grafana/ldap.toml + +## Grafana's LDAP configuration +## Templated by the template in _helpers.tpl +## NOTE: To enable the grafana.ini must be configured with auth.ldap.enabled +## ref: http://docs.grafana.org/installation/configuration/#auth-ldap +## ref: http://docs.grafana.org/installation/ldap/#configuration +ldap: + enabled: false + # `existingSecret` is a reference to an existing secret containing the ldap configuration + # for Grafana in a key `ldap-toml`. + existingSecret: "" + # `config` is the content of `ldap.toml` that will be stored in the created secret + config: "" + # config: |- + # verbose_logging = true + + # [[servers]] + # host = "my-ldap-server" + # port = 636 + # use_ssl = true + # start_tls = false + # ssl_skip_verify = false + # bind_dn = "uid=%s,ou=users,dc=myorg,dc=com" + +## Grafana's SMTP configuration +## NOTE: To enable, grafana.ini must be configured with smtp.enabled +## ref: http://docs.grafana.org/installation/configuration/#smtp +smtp: + # `existingSecret` is a reference to an existing secret containing the smtp configuration + # for Grafana. + existingSecret: "" + userKey: "user" + passwordKey: "password" + +## Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders +## Requires at least Grafana 5 to work and can't be used together with parameters dashboardProviders, datasources and dashboards +sidecar: + image: + # -- The Docker registry + registry: quay.io + repository: kiwigrid/k8s-sidecar + tag: 1.25.2 + sha: "" + imagePullPolicy: IfNotPresent + resources: {} +# limits: +# cpu: 100m +# memory: 100Mi +# requests: +# cpu: 50m +# memory: 50Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + # skipTlsVerify Set to true to skip tls verification for kube api calls + # skipTlsVerify: true + enableUniqueFilenames: false + readinessProbe: {} + livenessProbe: {} + # Log level default for all sidecars. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. Defaults to INFO + # logLevel: INFO + alerts: + enabled: false + # Additional environment variables for the alerts sidecar + env: {} + # Do not reprocess already processed unchanged resources on k8s API reconnect. + # ignoreAlreadyProcessed: true + # label that the configmaps with alert are marked with + label: grafana_alert + # value of label that the configmaps with alert are set to + labelValue: "" + # Log level. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. + # logLevel: INFO + # If specified, the sidecar will search for alert config-maps inside this namespace. + # Otherwise the namespace in which the sidecar is running will be used. + # It's also possible to specify ALL to search in all namespaces + searchNamespace: null + # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. + watchMethod: WATCH + # search in configmap, secret or both + resource: both + # watchServerTimeout: request to the server, asking it to cleanly close the connection after that. + # defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S + # watchServerTimeout: 3600 + # + # watchClientTimeout: is a client-side timeout, configuring your local socket. + # If you have a network outage dropping all packets with no RST/FIN, + # this is how long your client waits before realizing & dropping the connection. + # defaults to 66sec (sic!) + # watchClientTimeout: 60 + # + # Endpoint to send request to reload alerts + reloadURL: "http://localhost:3000/api/admin/provisioning/alerting/reload" + # Absolute path to shell script to execute after a alert got reloaded + script: null + skipReload: false + # This is needed if skipReload is true, to load any alerts defined at startup time. + # Deploy the alert sidecar as an initContainer. + initAlerts: false + # Additional alert sidecar volume mounts + extraMounts: [] + # Sets the size limit of the alert sidecar emptyDir volume + sizeLimit: {} + dashboards: + enabled: false + # Additional environment variables for the dashboards sidecar + env: {} + # Do not reprocess already processed unchanged resources on k8s API reconnect. + # ignoreAlreadyProcessed: true + SCProvider: true + # label that the configmaps with dashboards are marked with + label: grafana_dashboard + # value of label that the configmaps with dashboards are set to + labelValue: "" + # Log level. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. + # logLevel: INFO + # folder in the pod that should hold the collected dashboards (unless `defaultFolderName` is set) + folder: /tmp/dashboards + # The default folder name, it will create a subfolder under the `folder` and put dashboards in there instead + defaultFolderName: null + # Namespaces list. If specified, the sidecar will search for config-maps/secrets inside these namespaces. + # Otherwise the namespace in which the sidecar is running will be used. + # It's also possible to specify ALL to search in all namespaces. + searchNamespace: null + # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. + watchMethod: WATCH + # search in configmap, secret or both + resource: both + # If specified, the sidecar will look for annotation with this name to create folder and put graph here. + # You can use this parameter together with `provider.foldersFromFilesStructure`to annotate configmaps and create folder structure. + folderAnnotation: null + # Endpoint to send request to reload alerts + reloadURL: "http://localhost:3000/api/admin/provisioning/dashboards/reload" + # Absolute path to shell script to execute after a configmap got reloaded + script: null + skipReload: false + # watchServerTimeout: request to the server, asking it to cleanly close the connection after that. + # defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S + # watchServerTimeout: 3600 + # + # watchClientTimeout: is a client-side timeout, configuring your local socket. + # If you have a network outage dropping all packets with no RST/FIN, + # this is how long your client waits before realizing & dropping the connection. + # defaults to 66sec (sic!) + # watchClientTimeout: 60 + # + # provider configuration that lets grafana manage the dashboards + provider: + # name of the provider, should be unique + name: sidecarProvider + # orgid as configured in grafana + orgid: 1 + # folder in which the dashboards should be imported in grafana + folder: '' + # type of the provider + type: file + # disableDelete to activate a import-only behaviour + disableDelete: false + # allow updating provisioned dashboards from the UI + allowUiUpdates: false + # allow Grafana to replicate dashboard structure from filesystem + foldersFromFilesStructure: false + # Additional dashboard sidecar volume mounts + extraMounts: [] + # Sets the size limit of the dashboard sidecar emptyDir volume + sizeLimit: {} + datasources: + enabled: false + # Additional environment variables for the datasourcessidecar + env: {} + # Do not reprocess already processed unchanged resources on k8s API reconnect. + # ignoreAlreadyProcessed: true + # label that the configmaps with datasources are marked with + label: grafana_datasource + # value of label that the configmaps with datasources are set to + labelValue: "" + # Log level. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. + # logLevel: INFO + # If specified, the sidecar will search for datasource config-maps inside this namespace. + # Otherwise the namespace in which the sidecar is running will be used. + # It's also possible to specify ALL to search in all namespaces + searchNamespace: null + # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. + watchMethod: WATCH + # search in configmap, secret or both + resource: both + # watchServerTimeout: request to the server, asking it to cleanly close the connection after that. + # defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S + # watchServerTimeout: 3600 + # + # watchClientTimeout: is a client-side timeout, configuring your local socket. + # If you have a network outage dropping all packets with no RST/FIN, + # this is how long your client waits before realizing & dropping the connection. + # defaults to 66sec (sic!) + # watchClientTimeout: 60 + # + # Endpoint to send request to reload datasources + reloadURL: "http://localhost:3000/api/admin/provisioning/datasources/reload" + # Absolute path to shell script to execute after a datasource got reloaded + script: null + skipReload: false + # Deploy the datasource sidecar as an initContainer in addition to a container. + # This is needed if skipReload is true, to load any datasources defined at startup time. + initDatasources: false + # Sets the size limit of the datasource sidecar emptyDir volume + sizeLimit: {} + plugins: + enabled: false + # Additional environment variables for the plugins sidecar + env: {} + # Do not reprocess already processed unchanged resources on k8s API reconnect. + # ignoreAlreadyProcessed: true + # label that the configmaps with plugins are marked with + label: grafana_plugin + # value of label that the configmaps with plugins are set to + labelValue: "" + # Log level. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. + # logLevel: INFO + # If specified, the sidecar will search for plugin config-maps inside this namespace. + # Otherwise the namespace in which the sidecar is running will be used. + # It's also possible to specify ALL to search in all namespaces + searchNamespace: null + # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. + watchMethod: WATCH + # search in configmap, secret or both + resource: both + # watchServerTimeout: request to the server, asking it to cleanly close the connection after that. + # defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S + # watchServerTimeout: 3600 + # + # watchClientTimeout: is a client-side timeout, configuring your local socket. + # If you have a network outage dropping all packets with no RST/FIN, + # this is how long your client waits before realizing & dropping the connection. + # defaults to 66sec (sic!) + # watchClientTimeout: 60 + # + # Endpoint to send request to reload plugins + reloadURL: "http://localhost:3000/api/admin/provisioning/plugins/reload" + # Absolute path to shell script to execute after a plugin got reloaded + script: null + skipReload: false + # Deploy the datasource sidecar as an initContainer in addition to a container. + # This is needed if skipReload is true, to load any plugins defined at startup time. + initPlugins: false + # Sets the size limit of the plugin sidecar emptyDir volume + sizeLimit: {} + notifiers: + enabled: false + # Additional environment variables for the notifierssidecar + env: {} + # Do not reprocess already processed unchanged resources on k8s API reconnect. + # ignoreAlreadyProcessed: true + # label that the configmaps with notifiers are marked with + label: grafana_notifier + # value of label that the configmaps with notifiers are set to + labelValue: "" + # Log level. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. + # logLevel: INFO + # If specified, the sidecar will search for notifier config-maps inside this namespace. + # Otherwise the namespace in which the sidecar is running will be used. + # It's also possible to specify ALL to search in all namespaces + searchNamespace: null + # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. + watchMethod: WATCH + # search in configmap, secret or both + resource: both + # watchServerTimeout: request to the server, asking it to cleanly close the connection after that. + # defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S + # watchServerTimeout: 3600 + # + # watchClientTimeout: is a client-side timeout, configuring your local socket. + # If you have a network outage dropping all packets with no RST/FIN, + # this is how long your client waits before realizing & dropping the connection. + # defaults to 66sec (sic!) + # watchClientTimeout: 60 + # + # Endpoint to send request to reload notifiers + reloadURL: "http://localhost:3000/api/admin/provisioning/notifications/reload" + # Absolute path to shell script to execute after a notifier got reloaded + script: null + skipReload: false + # Deploy the notifier sidecar as an initContainer in addition to a container. + # This is needed if skipReload is true, to load any notifiers defined at startup time. + initNotifiers: false + # Sets the size limit of the notifier sidecar emptyDir volume + sizeLimit: {} + +## Override the deployment namespace +## +namespaceOverride: "" + +## Number of old ReplicaSets to retain +## +revisionHistoryLimit: 10 + +## Add a seperate remote image renderer deployment/service +imageRenderer: + deploymentStrategy: {} + # Enable the image-renderer deployment & service + enabled: false + replicas: 1 + autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 5 + targetCPU: "60" + targetMemory: "" + behavior: {} + image: + # -- The Docker registry + registry: docker.io + # image-renderer Image repository + repository: grafana/grafana-image-renderer + # image-renderer Image tag + tag: latest + # image-renderer Image sha (optional) + sha: "" + # image-renderer ImagePullPolicy + pullPolicy: Always + # extra environment variables + env: + HTTP_HOST: "0.0.0.0" + # RENDERING_ARGS: --no-sandbox,--disable-gpu,--window-size=1280x758 + # RENDERING_MODE: clustered + # IGNORE_HTTPS_ERRORS: true + + ## "valueFrom" environment variable references that will be added to deployment pods. Name is templated. + ## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#envvarsource-v1-core + ## Renders in container spec as: + ## env: + ## ... + ## - name: + ## valueFrom: + ## + envValueFrom: {} + # ENV_NAME: + # configMapKeyRef: + # name: configmap-name + # key: value_key + + # image-renderer deployment serviceAccount + serviceAccountName: "" + # image-renderer deployment securityContext + securityContext: {} + # image-renderer deployment container securityContext + containerSecurityContext: + seccompProfile: + type: RuntimeDefault + capabilities: + drop: ['ALL'] + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + ## image-renderer pod annotation + podAnnotations: {} + # image-renderer deployment Host Aliases + hostAliases: [] + # image-renderer deployment priority class + priorityClassName: '' + service: + # Enable the image-renderer service + enabled: true + # image-renderer service port name + portName: 'http' + # image-renderer service port used by both service and deployment + port: 8081 + targetPort: 8081 + # Adds the appProtocol field to the image-renderer service. This allows to work with istio protocol selection. Ex: "http" or "tcp" + appProtocol: "" + serviceMonitor: + ## If true, a ServiceMonitor CRD is created for a prometheus operator + ## https://github.com/coreos/prometheus-operator + ## + enabled: false + path: /metrics + # namespace: monitoring (defaults to use the namespace this chart is deployed to) + labels: {} + interval: 1m + scheme: http + tlsConfig: {} + scrapeTimeout: 30s + relabelings: [] + # See: https://doc.crds.dev/github.com/prometheus-operator/kube-prometheus/monitoring.coreos.com/ServiceMonitor/v1@v0.11.0#spec-targetLabels + targetLabels: [] + # - targetLabel1 + # - targetLabel2 + # If https is enabled in Grafana, this needs to be set as 'https' to correctly configure the callback used in Grafana + grafanaProtocol: http + # In case a sub_path is used this needs to be added to the image renderer callback + grafanaSubPath: "" + # name of the image-renderer port on the pod + podPortName: http + # number of image-renderer replica sets to keep + revisionHistoryLimit: 10 + networkPolicy: + # Enable a NetworkPolicy to limit inbound traffic to only the created grafana pods + limitIngress: true + # Enable a NetworkPolicy to limit outbound traffic to only the created grafana pods + limitEgress: false + # Allow additional services to access image-renderer (eg. Prometheus operator when ServiceMonitor is enabled) + extraIngressSelectors: [] + resources: {} +# limits: +# cpu: 100m +# memory: 100Mi +# requests: +# cpu: 50m +# memory: 50Mi + ## Node labels for pod assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + # + nodeSelector: {} + + ## Tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + + ## Affinity for pod assignment (evaluated as template) + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## + affinity: {} + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: "default-scheduler" + +networkPolicy: + ## @param networkPolicy.enabled Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. + ## + enabled: false + ## @param networkPolicy.allowExternal Don't require client label for connections + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to grafana port defined. + ## When true, grafana will accept connections from any source + ## (with the correct destination port). + ## + ingress: true + ## @param networkPolicy.ingress When true enables the creation + ## an ingress network policy + ## + allowExternal: true + ## @param networkPolicy.explicitNamespacesSelector A Kubernetes LabelSelector to explicitly select namespaces from which traffic could be allowed + ## If explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace + ## and that match other criteria, the ones that have the good label, can reach the grafana. + ## But sometimes, we want the grafana to be accessible to clients from other namespaces, in this case, we can use this + ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. + ## + ## Example: + ## explicitNamespacesSelector: + ## matchLabels: + ## role: frontend + ## matchExpressions: + ## - {key: role, operator: In, values: [frontend]} + ## + explicitNamespacesSelector: {} + ## + ## + ## + ## + ## + ## + egress: + ## @param networkPolicy.egress.enabled When enabled, an egress network policy will be + ## created allowing grafana to connect to external data sources from kubernetes cluster. + enabled: false + ## + ## @param networkPolicy.egress.blockDNSResolution When enabled, DNS resolution will be blocked + ## for all pods in the grafana namespace. + blockDNSResolution: false + ## + ## @param networkPolicy.egress.ports Add individual ports to be allowed by the egress + ports: [] + ## Add ports to the egress by specifying - port: + ## E.X. + ## - port: 80 + ## - port: 443 + ## + ## @param networkPolicy.egress.to Allow egress traffic to specific destinations + to: [] + ## Add destinations to the egress by specifying - ipBlock: + ## E.X. + ## to: + ## - namespaceSelector: + ## matchExpressions: + ## - {key: role, operator: In, values: [grafana]} + ## + ## + ## + ## + ## + +# Enable backward compatibility of kubernetes where version below 1.13 doesn't have the enableServiceLinks option +enableKubeBackwardCompatibility: false +useStatefulSet: false +# Create a dynamic manifests via values: +extraObjects: [] + # - apiVersion: "kubernetes-client.io/v1" + # kind: ExternalSecret + # metadata: + # name: grafana-secrets + # spec: + # backendType: gcpSecretsManager + # data: + # - key: grafana-admin-password + # name: adminPassword From ac79a20bedd224fc987d8257aba118ac6cc1e24b Mon Sep 17 00:00:00 2001 From: karamveer28 Date: Wed, 6 Dec 2023 23:04:06 +0530 Subject: [PATCH 14/99] test with env file --- .../deployment/helm/aws-auth/Chart.yaml | 5 ----- .../helm/aws-auth/bitops.config.yaml | 14 ------------ .../helm/aws-auth/example.bitops.config.yaml | 15 ------------- .../helm/aws-auth/templates/aws-auth.yaml | 22 ------------------- .../deployment/helm/aws-auth/values.yaml | 21 ------------------ operations/deployment/helm/grafana/ENV_FILE | 2 ++ 6 files changed, 2 insertions(+), 77 deletions(-) delete mode 100644 operations/deployment/helm/aws-auth/Chart.yaml delete mode 100644 operations/deployment/helm/aws-auth/bitops.config.yaml delete mode 100644 operations/deployment/helm/aws-auth/example.bitops.config.yaml delete mode 100644 operations/deployment/helm/aws-auth/templates/aws-auth.yaml delete mode 100644 operations/deployment/helm/aws-auth/values.yaml create mode 100644 operations/deployment/helm/grafana/ENV_FILE diff --git a/operations/deployment/helm/aws-auth/Chart.yaml b/operations/deployment/helm/aws-auth/Chart.yaml deleted file mode 100644 index db7b2d3..0000000 --- a/operations/deployment/helm/aws-auth/Chart.yaml +++ /dev/null @@ -1,5 +0,0 @@ -apiVersion: v2 -appVersion: "1.0" -description: aws-auth configmap for EKS -name: aws-auth -version: 0.1.0 \ No newline at end of file diff --git a/operations/deployment/helm/aws-auth/bitops.config.yaml b/operations/deployment/helm/aws-auth/bitops.config.yaml deleted file mode 100644 index 60d7548..0000000 --- a/operations/deployment/helm/aws-auth/bitops.config.yaml +++ /dev/null @@ -1,14 +0,0 @@ -helm: - cli: - namespace: kube-system - timeout: 200s - debug: true - atomic: false - force: false - dry-run: false - options: - release-name: aws-auth - skip-deploy: false - k8s: - fetch: - kubeconfig: true \ No newline at end of file diff --git a/operations/deployment/helm/aws-auth/example.bitops.config.yaml b/operations/deployment/helm/aws-auth/example.bitops.config.yaml deleted file mode 100644 index eddb4b4..0000000 --- a/operations/deployment/helm/aws-auth/example.bitops.config.yaml +++ /dev/null @@ -1,15 +0,0 @@ -helm: - cli: - namespace: kube-system - timeout: 200s - debug: true - atomic: false - force: false - dry-run: false - options: - release-name: aws-auth - skip-deploy: false - k8s: - fetch: - kubeconfig: true - cluster-name: prod-ekscluster \ No newline at end of file diff --git a/operations/deployment/helm/aws-auth/templates/aws-auth.yaml b/operations/deployment/helm/aws-auth/templates/aws-auth.yaml deleted file mode 100644 index e41dea6..0000000 --- a/operations/deployment/helm/aws-auth/templates/aws-auth.yaml +++ /dev/null @@ -1,22 +0,0 @@ -{{- if .Values.configmap.enabled }} -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ .Values.configmap.name }} -data: -{{- if .Values.data.mapAccounts }} - mapAccounts: | - {{ .Values.data.mapAccounts| nindent 4 | trim }} -{{- end }} - -{{- if .Values.data.mapRoles }} - mapRoles: | - {{ .Values.data.mapRoles| nindent 4 | trim }} -{{- end }} - -{{- if .Values.data.mapUsers }} - mapUsers: | - {{ .Values.data.mapUsers| nindent 4 | trim }} -{{- end }} - -{{- end }} \ No newline at end of file diff --git a/operations/deployment/helm/aws-auth/values.yaml b/operations/deployment/helm/aws-auth/values.yaml deleted file mode 100644 index 90f836a..0000000 --- a/operations/deployment/helm/aws-auth/values.yaml +++ /dev/null @@ -1,21 +0,0 @@ -configmap: - enabled: true - name: aws-auth -data: - - # account number - mapAccounts: | - - "000000000000" - - # add eksworkers to nodes and bootstrappers - # ensure accounts match - # add sso and other users to masters as necessry - mapRoles: | - - "groups": - - "system:bootstrappers" - - "system:nodes" - "rolearn": "arn:aws:iam::000000000:role/prod-eksworker" - "username": "system:node:{{EC2PrivateDNSName}}" - - "groups": - - "system:masters" - "rolearn": "arn:aws:iam::000000000:role/AWSReservedSSO_AdministratorAccess_1111111111" \ No newline at end of file diff --git a/operations/deployment/helm/grafana/ENV_FILE b/operations/deployment/helm/grafana/ENV_FILE new file mode 100644 index 0000000..8ce79ba --- /dev/null +++ b/operations/deployment/helm/grafana/ENV_FILE @@ -0,0 +1,2 @@ +GF_SECURITY_ADMIN_USER=admin +GF_SECURITY_ADMIN_PASSWORD=grafana \ No newline at end of file From 0577cb3a5c6bf5a3beacf98775252765cc7238dc Mon Sep 17 00:00:00 2001 From: karamveer28 Date: Wed, 6 Dec 2023 23:11:17 +0530 Subject: [PATCH 15/99] Added bitops files --- .../deployment/helm/grafana/bitops.config.yaml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) create mode 100644 operations/deployment/helm/grafana/bitops.config.yaml diff --git a/operations/deployment/helm/grafana/bitops.config.yaml b/operations/deployment/helm/grafana/bitops.config.yaml new file mode 100644 index 0000000..0d5d6df --- /dev/null +++ b/operations/deployment/helm/grafana/bitops.config.yaml @@ -0,0 +1,14 @@ +helm: + cli: + namespace: kube-system + timeout: 200s + debug: true + atomic: false + force: false + dry-run: false + options: + release-name: grafana + skip-deploy: false + k8s: + fetch: + kubeconfig: true \ No newline at end of file From edd0c33dd3edfd4260a5cc268c4e4244570c0111 Mon Sep 17 00:00:00 2001 From: karamveer28 Date: Wed, 6 Dec 2023 23:20:41 +0530 Subject: [PATCH 16/99] Added prometheus chart --- operations/deployment/helm/grafana/Chart.yaml | 33 - operations/deployment/helm/grafana/ENV_FILE | 2 - operations/deployment/helm/grafana/README.md | 757 ---------- .../helm/grafana/bitops.config.yaml | 14 - .../helm/grafana/ci/default-values.yaml | 1 - .../helm/grafana/ci/with-affinity-values.yaml | 16 - .../ci/with-dashboard-json-values.yaml | 53 - .../grafana/ci/with-dashboard-values.yaml | 19 - .../ci/with-extraconfigmapmounts-values.yaml | 7 - .../ci/with-image-renderer-values.yaml | 19 - .../helm/grafana/ci/with-persistence.yaml | 3 - .../grafana/dashboards/custom-dashboard.json | 1 - .../helm/grafana/templates/NOTES.txt | 55 - .../helm/grafana/templates/_helpers.tpl | 227 --- .../helm/grafana/templates/_pod.tpl | 1276 ---------------- .../helm/grafana/templates/clusterrole.yaml | 25 - .../grafana/templates/clusterrolebinding.yaml | 24 - .../helm/grafana/templates/configSecret.yaml | 43 - .../configmap-dashboard-provider.yaml | 29 - .../helm/grafana/templates/configmap.yaml | 144 -- .../templates/dashboards-json-configmap.yaml | 38 - .../helm/grafana/templates/deployment.yaml | 51 - .../grafana/templates/extra-manifests.yaml | 4 - .../grafana/templates/headless-service.yaml | 22 - .../helm/grafana/templates/hpa.yaml | 52 - .../templates/image-renderer-deployment.yaml | 131 -- .../grafana/templates/image-renderer-hpa.yaml | 47 - .../image-renderer-network-policy.yaml | 79 - .../templates/image-renderer-service.yaml | 31 - .../image-renderer-servicemonitor.yaml | 48 - .../helm/grafana/templates/ingress.yaml | 78 - .../helm/grafana/templates/networkpolicy.yaml | 61 - .../templates/poddisruptionbudget.yaml | 22 - .../grafana/templates/podsecuritypolicy.yaml | 49 - .../helm/grafana/templates/pvc.yaml | 36 - .../helm/grafana/templates/role.yaml | 32 - .../helm/grafana/templates/rolebinding.yaml | 25 - .../helm/grafana/templates/secret-env.yaml | 14 - .../helm/grafana/templates/secret.yaml | 26 - .../helm/grafana/templates/service.yaml | 58 - .../grafana/templates/serviceaccount.yaml | 17 - .../grafana/templates/servicemonitor.yaml | 52 - .../helm/grafana/templates/statefulset.yaml | 56 - .../templates/tests/test-configmap.yaml | 20 - .../tests/test-podsecuritypolicy.yaml | 32 - .../grafana/templates/tests/test-role.yaml | 17 - .../templates/tests/test-rolebinding.yaml | 20 - .../templates/tests/test-serviceaccount.yaml | 12 - .../helm/grafana/templates/tests/test.yaml | 49 - .../deployment/helm/grafana/values.yaml | 1282 ----------------- .../helm/{grafana => prometheus}/.helmignore | 2 +- .../deployment/helm/prometheus/Chart.lock | 15 + .../deployment/helm/prometheus/Chart.yaml | 53 + .../deployment/helm/prometheus/README.md | 382 +++++ .../helm/prometheus/templates/NOTES.txt | 113 ++ .../helm/prometheus/templates/_helpers.tpl | 234 +++ .../prometheus/templates/clusterrole.yaml | 56 + .../templates/clusterrolebinding.yaml | 16 + .../helm/prometheus/templates/cm.yaml | 99 ++ .../helm/prometheus/templates/deploy.yaml | 363 +++++ .../prometheus/templates/extra-manifests.yaml | 4 + .../prometheus/templates/headless-svc.yaml | 35 + .../helm/prometheus/templates/ingress.yaml | 57 + .../prometheus/templates/network-policy.yaml | 16 + .../helm/prometheus/templates/pdb.yaml | 15 + .../helm/prometheus/templates/psp.yaml | 53 + .../helm/prometheus/templates/pvc.yaml | 43 + .../prometheus/templates/rolebinding.yaml | 18 + .../helm/prometheus/templates/service.yaml | 63 + .../prometheus/templates/serviceaccount.yaml | 16 + .../helm/prometheus/templates/sts.yaml | 385 +++++ .../helm/prometheus/templates/vpa.yaml | 26 + .../helm/prometheus/values.schema.json | 738 ++++++++++ .../deployment/helm/prometheus/values.yaml | 1253 ++++++++++++++++ 74 files changed, 4054 insertions(+), 5210 deletions(-) delete mode 100644 operations/deployment/helm/grafana/Chart.yaml delete mode 100644 operations/deployment/helm/grafana/ENV_FILE delete mode 100644 operations/deployment/helm/grafana/README.md delete mode 100644 operations/deployment/helm/grafana/bitops.config.yaml delete mode 100644 operations/deployment/helm/grafana/ci/default-values.yaml delete mode 100644 operations/deployment/helm/grafana/ci/with-affinity-values.yaml delete mode 100644 operations/deployment/helm/grafana/ci/with-dashboard-json-values.yaml delete mode 100644 operations/deployment/helm/grafana/ci/with-dashboard-values.yaml delete mode 100644 operations/deployment/helm/grafana/ci/with-extraconfigmapmounts-values.yaml delete mode 100644 operations/deployment/helm/grafana/ci/with-image-renderer-values.yaml delete mode 100644 operations/deployment/helm/grafana/ci/with-persistence.yaml delete mode 100644 operations/deployment/helm/grafana/dashboards/custom-dashboard.json delete mode 100644 operations/deployment/helm/grafana/templates/NOTES.txt delete mode 100644 operations/deployment/helm/grafana/templates/_helpers.tpl delete mode 100644 operations/deployment/helm/grafana/templates/_pod.tpl delete mode 100644 operations/deployment/helm/grafana/templates/clusterrole.yaml delete mode 100644 operations/deployment/helm/grafana/templates/clusterrolebinding.yaml delete mode 100644 operations/deployment/helm/grafana/templates/configSecret.yaml delete mode 100644 operations/deployment/helm/grafana/templates/configmap-dashboard-provider.yaml delete mode 100644 operations/deployment/helm/grafana/templates/configmap.yaml delete mode 100644 operations/deployment/helm/grafana/templates/dashboards-json-configmap.yaml delete mode 100644 operations/deployment/helm/grafana/templates/deployment.yaml delete mode 100644 operations/deployment/helm/grafana/templates/extra-manifests.yaml delete mode 100644 operations/deployment/helm/grafana/templates/headless-service.yaml delete mode 100644 operations/deployment/helm/grafana/templates/hpa.yaml delete mode 100644 operations/deployment/helm/grafana/templates/image-renderer-deployment.yaml delete mode 100644 operations/deployment/helm/grafana/templates/image-renderer-hpa.yaml delete mode 100644 operations/deployment/helm/grafana/templates/image-renderer-network-policy.yaml delete mode 100644 operations/deployment/helm/grafana/templates/image-renderer-service.yaml delete mode 100644 operations/deployment/helm/grafana/templates/image-renderer-servicemonitor.yaml delete mode 100644 operations/deployment/helm/grafana/templates/ingress.yaml delete mode 100644 operations/deployment/helm/grafana/templates/networkpolicy.yaml delete mode 100644 operations/deployment/helm/grafana/templates/poddisruptionbudget.yaml delete mode 100644 operations/deployment/helm/grafana/templates/podsecuritypolicy.yaml delete mode 100644 operations/deployment/helm/grafana/templates/pvc.yaml delete mode 100644 operations/deployment/helm/grafana/templates/role.yaml delete mode 100644 operations/deployment/helm/grafana/templates/rolebinding.yaml delete mode 100644 operations/deployment/helm/grafana/templates/secret-env.yaml delete mode 100644 operations/deployment/helm/grafana/templates/secret.yaml delete mode 100644 operations/deployment/helm/grafana/templates/service.yaml delete mode 100644 operations/deployment/helm/grafana/templates/serviceaccount.yaml delete mode 100644 operations/deployment/helm/grafana/templates/servicemonitor.yaml delete mode 100644 operations/deployment/helm/grafana/templates/statefulset.yaml delete mode 100644 operations/deployment/helm/grafana/templates/tests/test-configmap.yaml delete mode 100644 operations/deployment/helm/grafana/templates/tests/test-podsecuritypolicy.yaml delete mode 100644 operations/deployment/helm/grafana/templates/tests/test-role.yaml delete mode 100644 operations/deployment/helm/grafana/templates/tests/test-rolebinding.yaml delete mode 100644 operations/deployment/helm/grafana/templates/tests/test-serviceaccount.yaml delete mode 100644 operations/deployment/helm/grafana/templates/tests/test.yaml delete mode 100644 operations/deployment/helm/grafana/values.yaml rename operations/deployment/helm/{grafana => prometheus}/.helmignore (97%) create mode 100644 operations/deployment/helm/prometheus/Chart.lock create mode 100644 operations/deployment/helm/prometheus/Chart.yaml create mode 100644 operations/deployment/helm/prometheus/README.md create mode 100644 operations/deployment/helm/prometheus/templates/NOTES.txt create mode 100644 operations/deployment/helm/prometheus/templates/_helpers.tpl create mode 100644 operations/deployment/helm/prometheus/templates/clusterrole.yaml create mode 100644 operations/deployment/helm/prometheus/templates/clusterrolebinding.yaml create mode 100644 operations/deployment/helm/prometheus/templates/cm.yaml create mode 100644 operations/deployment/helm/prometheus/templates/deploy.yaml create mode 100644 operations/deployment/helm/prometheus/templates/extra-manifests.yaml create mode 100644 operations/deployment/helm/prometheus/templates/headless-svc.yaml create mode 100644 operations/deployment/helm/prometheus/templates/ingress.yaml create mode 100644 operations/deployment/helm/prometheus/templates/network-policy.yaml create mode 100644 operations/deployment/helm/prometheus/templates/pdb.yaml create mode 100644 operations/deployment/helm/prometheus/templates/psp.yaml create mode 100644 operations/deployment/helm/prometheus/templates/pvc.yaml create mode 100644 operations/deployment/helm/prometheus/templates/rolebinding.yaml create mode 100644 operations/deployment/helm/prometheus/templates/service.yaml create mode 100644 operations/deployment/helm/prometheus/templates/serviceaccount.yaml create mode 100644 operations/deployment/helm/prometheus/templates/sts.yaml create mode 100644 operations/deployment/helm/prometheus/templates/vpa.yaml create mode 100644 operations/deployment/helm/prometheus/values.schema.json create mode 100644 operations/deployment/helm/prometheus/values.yaml diff --git a/operations/deployment/helm/grafana/Chart.yaml b/operations/deployment/helm/grafana/Chart.yaml deleted file mode 100644 index 387f2ab..0000000 --- a/operations/deployment/helm/grafana/Chart.yaml +++ /dev/null @@ -1,33 +0,0 @@ -apiVersion: v2 -name: grafana -version: 7.0.13 -appVersion: 10.2.2 -kubeVersion: "^1.8.0-0" -description: The leading tool for querying and visualizing time series and metrics. -home: https://grafana.com -icon: https://artifacthub.io/image/b4fed1a7-6c8f-4945-b99d-096efa3e4116 -sources: - - https://github.com/grafana/grafana - - https://github.com/grafana/helm-charts -annotations: - "artifacthub.io/license": AGPL-3.0-only - "artifacthub.io/links": | - - name: Chart Source - url: https://github.com/grafana/helm-charts - - name: Upstream Project - url: https://github.com/grafana/grafana -maintainers: - - name: zanhsieh - email: zanhsieh@gmail.com - - name: rtluckie - email: rluckie@cisco.com - - name: maorfr - email: maor.friedman@redhat.com - - name: Xtigyro - email: miroslav.hadzhiev@gmail.com - - name: torstenwalter - email: mail@torstenwalter.de -type: application -keywords: - - monitoring - - metric diff --git a/operations/deployment/helm/grafana/ENV_FILE b/operations/deployment/helm/grafana/ENV_FILE deleted file mode 100644 index 8ce79ba..0000000 --- a/operations/deployment/helm/grafana/ENV_FILE +++ /dev/null @@ -1,2 +0,0 @@ -GF_SECURITY_ADMIN_USER=admin -GF_SECURITY_ADMIN_PASSWORD=grafana \ No newline at end of file diff --git a/operations/deployment/helm/grafana/README.md b/operations/deployment/helm/grafana/README.md deleted file mode 100644 index 5420545..0000000 --- a/operations/deployment/helm/grafana/README.md +++ /dev/null @@ -1,757 +0,0 @@ -# Grafana Helm Chart - -* Installs the web dashboarding system [Grafana](http://grafana.org/) - -## Get Repo Info - -```console -helm repo add grafana https://grafana.github.io/helm-charts -helm repo update -``` - -_See [helm repo](https://helm.sh/docs/helm/helm_repo/) for command documentation._ - -## Installing the Chart - -To install the chart with the release name `my-release`: - -```console -helm install my-release grafana/grafana -``` - -## Uninstalling the Chart - -To uninstall/delete the my-release deployment: - -```console -helm delete my-release -``` - -The command removes all the Kubernetes components associated with the chart and deletes the release. - -## Upgrading an existing Release to a new major version - -A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an -incompatible breaking change needing manual actions. - -### To 4.0.0 (And 3.12.1) - -This version requires Helm >= 2.12.0. - -### To 5.0.0 - -You have to add --force to your helm upgrade command as the labels of the chart have changed. - -### To 6.0.0 - -This version requires Helm >= 3.1.0. - -### To 7.0.0 - -For consistency with other Helm charts, the `global.image.registry` parameter was renamed -to `global.imageRegistry`. If you were not previously setting `global.image.registry`, no action -is required on upgrade. If you were previously setting `global.image.registry`, you will -need to instead set `global.imageRegistry`. - -## Configuration - -| Parameter | Description | Default | -|-------------------------------------------|-----------------------------------------------|---------------------------------------------------------| -| `replicas` | Number of nodes | `1` | -| `podDisruptionBudget.minAvailable` | Pod disruption minimum available | `nil` | -| `podDisruptionBudget.maxUnavailable` | Pod disruption maximum unavailable | `nil` | -| `podDisruptionBudget.apiVersion` | Pod disruption apiVersion | `nil` | -| `deploymentStrategy` | Deployment strategy | `{ "type": "RollingUpdate" }` | -| `livenessProbe` | Liveness Probe settings | `{ "httpGet": { "path": "/api/health", "port": 3000 } "initialDelaySeconds": 60, "timeoutSeconds": 30, "failureThreshold": 10 }` | -| `readinessProbe` | Readiness Probe settings | `{ "httpGet": { "path": "/api/health", "port": 3000 } }`| -| `securityContext` | Deployment securityContext | `{"runAsUser": 472, "runAsGroup": 472, "fsGroup": 472}` | -| `priorityClassName` | Name of Priority Class to assign pods | `nil` | -| `image.registry` | Image registry | `docker.io` | -| `image.repository` | Image repository | `grafana/grafana` | -| `image.tag` | Overrides the Grafana image tag whose default is the chart appVersion (`Must be >= 5.0.0`) | `` | -| `image.sha` | Image sha (optional) | `` | -| `image.pullPolicy` | Image pull policy | `IfNotPresent` | -| `image.pullSecrets` | Image pull secrets (can be templated) | `[]` | -| `service.enabled` | Enable grafana service | `true` | -| `service.type` | Kubernetes service type | `ClusterIP` | -| `service.port` | Kubernetes port where service is exposed | `80` | -| `service.portName` | Name of the port on the service | `service` | -| `service.appProtocol` | Adds the appProtocol field to the service | `` | -| `service.targetPort` | Internal service is port | `3000` | -| `service.nodePort` | Kubernetes service nodePort | `nil` | -| `service.annotations` | Service annotations (can be templated) | `{}` | -| `service.labels` | Custom labels | `{}` | -| `service.clusterIP` | internal cluster service IP | `nil` | -| `service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `nil` | -| `service.loadBalancerSourceRanges` | list of IP CIDRs allowed access to lb (if supported) | `[]` | -| `service.externalIPs` | service external IP addresses | `[]` | -| `service.externalTrafficPolicy` | change the default externalTrafficPolicy | `nil` | -| `headlessService` | Create a headless service | `false` | -| `extraExposePorts` | Additional service ports for sidecar containers| `[]` | -| `hostAliases` | adds rules to the pod's /etc/hosts | `[]` | -| `ingress.enabled` | Enables Ingress | `false` | -| `ingress.annotations` | Ingress annotations (values are templated) | `{}` | -| `ingress.labels` | Custom labels | `{}` | -| `ingress.path` | Ingress accepted path | `/` | -| `ingress.pathType` | Ingress type of path | `Prefix` | -| `ingress.hosts` | Ingress accepted hostnames | `["chart-example.local"]` | -| `ingress.extraPaths` | Ingress extra paths to prepend to every host configuration. Useful when configuring [custom actions with AWS ALB Ingress Controller](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.6/guide/ingress/annotations/#actions). Requires `ingress.hosts` to have one or more host entries. | `[]` | -| `ingress.tls` | Ingress TLS configuration | `[]` | -| `ingress.ingressClassName` | Ingress Class Name. MAY be required for Kubernetes versions >= 1.18 | `""` | -| `resources` | CPU/Memory resource requests/limits | `{}` | -| `nodeSelector` | Node labels for pod assignment | `{}` | -| `tolerations` | Toleration labels for pod assignment | `[]` | -| `affinity` | Affinity settings for pod assignment | `{}` | -| `extraInitContainers` | Init containers to add to the grafana pod | `{}` | -| `extraContainers` | Sidecar containers to add to the grafana pod | `""` | -| `extraContainerVolumes` | Volumes that can be mounted in sidecar containers | `[]` | -| `extraLabels` | Custom labels for all manifests | `{}` | -| `schedulerName` | Name of the k8s scheduler (other than default) | `nil` | -| `persistence.enabled` | Use persistent volume to store data | `false` | -| `persistence.type` | Type of persistence (`pvc` or `statefulset`) | `pvc` | -| `persistence.size` | Size of persistent volume claim | `10Gi` | -| `persistence.existingClaim` | Use an existing PVC to persist data (can be templated) | `nil` | -| `persistence.storageClassName` | Type of persistent volume claim | `nil` | -| `persistence.accessModes` | Persistence access modes | `[ReadWriteOnce]` | -| `persistence.annotations` | PersistentVolumeClaim annotations | `{}` | -| `persistence.finalizers` | PersistentVolumeClaim finalizers | `[ "kubernetes.io/pvc-protection" ]` | -| `persistence.extraPvcLabels` | Extra labels to apply to a PVC. | `{}` | -| `persistence.subPath` | Mount a sub dir of the persistent volume (can be templated) | `nil` | -| `persistence.inMemory.enabled` | If persistence is not enabled, whether to mount the local storage in-memory to improve performance | `false` | -| `persistence.inMemory.sizeLimit` | SizeLimit for the in-memory local storage | `nil` | -| `initChownData.enabled` | If false, don't reset data ownership at startup | true | -| `initChownData.image.registry` | init-chown-data container image registry | `docker.io` | -| `initChownData.image.repository` | init-chown-data container image repository | `busybox` | -| `initChownData.image.tag` | init-chown-data container image tag | `1.31.1` | -| `initChownData.image.sha` | init-chown-data container image sha (optional)| `""` | -| `initChownData.image.pullPolicy` | init-chown-data container image pull policy | `IfNotPresent` | -| `initChownData.resources` | init-chown-data pod resource requests & limits | `{}` | -| `schedulerName` | Alternate scheduler name | `nil` | -| `env` | Extra environment variables passed to pods | `{}` | -| `envValueFrom` | Environment variables from alternate sources. See the API docs on [EnvVarSource](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#envvarsource-v1-core) for format details. Can be templated | `{}` | -| `envFromSecret` | Name of a Kubernetes secret (must be manually created in the same namespace) containing values to be added to the environment. Can be templated | `""` | -| `envFromSecrets` | List of Kubernetes secrets (must be manually created in the same namespace) containing values to be added to the environment. Can be templated | `[]` | -| `envFromConfigMaps` | List of Kubernetes ConfigMaps (must be manually created in the same namespace) containing values to be added to the environment. Can be templated | `[]` | -| `envRenderSecret` | Sensible environment variables passed to pods and stored as secret. (passed through [tpl](https://helm.sh/docs/howto/charts_tips_and_tricks/#using-the-tpl-function)) | `{}` | -| `enableServiceLinks` | Inject Kubernetes services as environment variables. | `true` | -| `extraSecretMounts` | Additional grafana server secret mounts | `[]` | -| `extraVolumeMounts` | Additional grafana server volume mounts | `[]` | -| `createConfigmap` | Enable creating the grafana configmap | `true` | -| `extraConfigmapMounts` | Additional grafana server configMap volume mounts (values are templated) | `[]` | -| `extraEmptyDirMounts` | Additional grafana server emptyDir volume mounts | `[]` | -| `plugins` | Plugins to be loaded along with Grafana | `[]` | -| `datasources` | Configure grafana datasources (passed through tpl) | `{}` | -| `alerting` | Configure grafana alerting (passed through tpl) | `{}` | -| `notifiers` | Configure grafana notifiers | `{}` | -| `dashboardProviders` | Configure grafana dashboard providers | `{}` | -| `dashboards` | Dashboards to import | `{}` | -| `dashboardsConfigMaps` | ConfigMaps reference that contains dashboards | `{}` | -| `grafana.ini` | Grafana's primary configuration | `{}` | -| `global.imageRegistry` | Global image pull registry for all images. | `null` | -| `global.imagePullSecrets` | Global image pull secrets (can be templated). Allows either an array of {name: pullSecret} maps (k8s-style), or an array of strings (more common helm-style). | `[]` | -| `ldap.enabled` | Enable LDAP authentication | `false` | -| `ldap.existingSecret` | The name of an existing secret containing the `ldap.toml` file, this must have the key `ldap-toml`. | `""` | -| `ldap.config` | Grafana's LDAP configuration | `""` | -| `annotations` | Deployment annotations | `{}` | -| `labels` | Deployment labels | `{}` | -| `podAnnotations` | Pod annotations | `{}` | -| `podLabels` | Pod labels | `{}` | -| `podPortName` | Name of the grafana port on the pod | `grafana` | -| `lifecycleHooks` | Lifecycle hooks for podStart and preStop [Example](https://kubernetes.io/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/#define-poststart-and-prestop-handlers) | `{}` | -| `sidecar.image.registry` | Sidecar image registry | `quay.io` | -| `sidecar.image.repository` | Sidecar image repository | `kiwigrid/k8s-sidecar` | -| `sidecar.image.tag` | Sidecar image tag | `1.24.6` | -| `sidecar.image.sha` | Sidecar image sha (optional) | `""` | -| `sidecar.imagePullPolicy` | Sidecar image pull policy | `IfNotPresent` | -| `sidecar.resources` | Sidecar resources | `{}` | -| `sidecar.securityContext` | Sidecar securityContext | `{}` | -| `sidecar.enableUniqueFilenames` | Sets the kiwigrid/k8s-sidecar UNIQUE_FILENAMES environment variable. If set to `true` the sidecar will create unique filenames where duplicate data keys exist between ConfigMaps and/or Secrets within the same or multiple Namespaces. | `false` | -| `sidecar.alerts.enabled` | Enables the cluster wide search for alerts and adds/updates/deletes them in grafana |`false` | -| `sidecar.alerts.label` | Label that config maps with alerts should have to be added | `grafana_alert` | -| `sidecar.alerts.labelValue` | Label value that config maps with alerts should have to be added | `""` | -| `sidecar.alerts.searchNamespace` | Namespaces list. If specified, the sidecar will search for alerts config-maps inside these namespaces. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces. | `nil` | -| `sidecar.alerts.watchMethod` | Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. | `WATCH` | -| `sidecar.alerts.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` | -| `sidecar.alerts.reloadURL` | Full url of datasource configuration reload API endpoint, to invoke after a config-map change | `"http://localhost:3000/api/admin/provisioning/alerting/reload"` | -| `sidecar.alerts.skipReload` | Enabling this omits defining the REQ_URL and REQ_METHOD environment variables | `false` | -| `sidecar.alerts.initDatasources` | Set to true to deploy the datasource sidecar as an initContainer. This is needed if skipReload is true, to load any alerts defined at startup time. | `false` | -| `sidecar.alerts.extraMounts` | Additional alerts sidecar volume mounts. | `[]` | -| `sidecar.dashboards.enabled` | Enables the cluster wide search for dashboards and adds/updates/deletes them in grafana | `false` | -| `sidecar.dashboards.SCProvider` | Enables creation of sidecar provider | `true` | -| `sidecar.dashboards.provider.name` | Unique name of the grafana provider | `sidecarProvider` | -| `sidecar.dashboards.provider.orgid` | Id of the organisation, to which the dashboards should be added | `1` | -| `sidecar.dashboards.provider.folder` | Logical folder in which grafana groups dashboards | `""` | -| `sidecar.dashboards.provider.disableDelete` | Activate to avoid the deletion of imported dashboards | `false` | -| `sidecar.dashboards.provider.allowUiUpdates` | Allow updating provisioned dashboards from the UI | `false` | -| `sidecar.dashboards.provider.type` | Provider type | `file` | -| `sidecar.dashboards.provider.foldersFromFilesStructure` | Allow Grafana to replicate dashboard structure from filesystem. | `false` | -| `sidecar.dashboards.watchMethod` | Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. | `WATCH` | -| `sidecar.skipTlsVerify` | Set to true to skip tls verification for kube api calls | `nil` | -| `sidecar.dashboards.label` | Label that config maps with dashboards should have to be added | `grafana_dashboard` | -| `sidecar.dashboards.labelValue` | Label value that config maps with dashboards should have to be added | `""` | -| `sidecar.dashboards.folder` | Folder in the pod that should hold the collected dashboards (unless `sidecar.dashboards.defaultFolderName` is set). This path will be mounted. | `/tmp/dashboards` | -| `sidecar.dashboards.folderAnnotation` | The annotation the sidecar will look for in configmaps to override the destination folder for files | `nil` | -| `sidecar.dashboards.defaultFolderName` | The default folder name, it will create a subfolder under the `sidecar.dashboards.folder` and put dashboards in there instead | `nil` | -| `sidecar.dashboards.searchNamespace` | Namespaces list. If specified, the sidecar will search for dashboards config-maps inside these namespaces. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces. | `nil` | -| `sidecar.dashboards.script` | Absolute path to shell script to execute after a configmap got reloaded. | `nil` | -| `sidecar.dashboards.reloadURL` | Full url of dashboards configuration reload API endpoint, to invoke after a config-map change | `"http://localhost:3000/api/admin/provisioning/dashboards/reload"` | -| `sidecar.dashboards.skipReload` | Enabling this omits defining the REQ_USERNAME, REQ_PASSWORD, REQ_URL and REQ_METHOD environment variables | `false` | -| `sidecar.dashboards.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` | -| `sidecar.dashboards.extraMounts` | Additional dashboard sidecar volume mounts. | `[]` | -| `sidecar.datasources.enabled` | Enables the cluster wide search for datasources and adds/updates/deletes them in grafana |`false` | -| `sidecar.datasources.label` | Label that config maps with datasources should have to be added | `grafana_datasource` | -| `sidecar.datasources.labelValue` | Label value that config maps with datasources should have to be added | `""` | -| `sidecar.datasources.searchNamespace` | Namespaces list. If specified, the sidecar will search for datasources config-maps inside these namespaces. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces. | `nil` | -| `sidecar.datasources.watchMethod` | Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. | `WATCH` | -| `sidecar.datasources.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` | -| `sidecar.datasources.reloadURL` | Full url of datasource configuration reload API endpoint, to invoke after a config-map change | `"http://localhost:3000/api/admin/provisioning/datasources/reload"` | -| `sidecar.datasources.skipReload` | Enabling this omits defining the REQ_URL and REQ_METHOD environment variables | `false` | -| `sidecar.datasources.initDatasources` | Set to true to deploy the datasource sidecar as an initContainer in addition to a container. This is needed if skipReload is true, to load any datasources defined at startup time. | `false` | -| `sidecar.notifiers.enabled` | Enables the cluster wide search for notifiers and adds/updates/deletes them in grafana | `false` | -| `sidecar.notifiers.label` | Label that config maps with notifiers should have to be added | `grafana_notifier` | -| `sidecar.notifiers.labelValue` | Label value that config maps with notifiers should have to be added | `""` | -| `sidecar.notifiers.searchNamespace` | Namespaces list. If specified, the sidecar will search for notifiers config-maps (or secrets) inside these namespaces. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces. | `nil` | -| `sidecar.notifiers.watchMethod` | Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. | `WATCH` | -| `sidecar.notifiers.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` | -| `sidecar.notifiers.reloadURL` | Full url of notifier configuration reload API endpoint, to invoke after a config-map change | `"http://localhost:3000/api/admin/provisioning/notifications/reload"` | -| `sidecar.notifiers.skipReload` | Enabling this omits defining the REQ_URL and REQ_METHOD environment variables | `false` | -| `sidecar.notifiers.initNotifiers` | Set to true to deploy the notifier sidecar as an initContainer in addition to a container. This is needed if skipReload is true, to load any notifiers defined at startup time. | `false` | -| `smtp.existingSecret` | The name of an existing secret containing the SMTP credentials. | `""` | -| `smtp.userKey` | The key in the existing SMTP secret containing the username. | `"user"` | -| `smtp.passwordKey` | The key in the existing SMTP secret containing the password. | `"password"` | -| `admin.existingSecret` | The name of an existing secret containing the admin credentials (can be templated). | `""` | -| `admin.userKey` | The key in the existing admin secret containing the username. | `"admin-user"` | -| `admin.passwordKey` | The key in the existing admin secret containing the password. | `"admin-password"` | -| `serviceAccount.autoMount` | Automount the service account token in the pod| `true` | -| `serviceAccount.annotations` | ServiceAccount annotations | | -| `serviceAccount.create` | Create service account | `true` | -| `serviceAccount.labels` | ServiceAccount labels | `{}` | -| `serviceAccount.name` | Service account name to use, when empty will be set to created account if `serviceAccount.create` is set else to `default` | `` | -| `serviceAccount.nameTest` | Service account name to use for test, when empty will be set to created account if `serviceAccount.create` is set else to `default` | `nil` | -| `rbac.create` | Create and use RBAC resources | `true` | -| `rbac.namespaced` | Creates Role and Rolebinding instead of the default ClusterRole and ClusteRoleBindings for the grafana instance | `false` | -| `rbac.useExistingRole` | Set to a rolename to use existing role - skipping role creating - but still doing serviceaccount and rolebinding to the rolename set here. | `nil` | -| `rbac.pspEnabled` | Create PodSecurityPolicy (with `rbac.create`, grant roles permissions as well) | `false` | -| `rbac.pspUseAppArmor` | Enforce AppArmor in created PodSecurityPolicy (requires `rbac.pspEnabled`) | `false` | -| `rbac.extraRoleRules` | Additional rules to add to the Role | [] | -| `rbac.extraClusterRoleRules` | Additional rules to add to the ClusterRole | [] | -| `command` | Define command to be executed by grafana container at startup | `nil` | -| `args` | Define additional args if command is used | `nil` | -| `testFramework.enabled` | Whether to create test-related resources | `true` | -| `testFramework.image.registry` | `test-framework` image registry. | `docker.io` | -| `testFramework.image.repository` | `test-framework` image repository. | `bats/bats` | -| `testFramework.image.tag` | `test-framework` image tag. | `v1.4.1` | -| `testFramework.imagePullPolicy` | `test-framework` image pull policy. | `IfNotPresent` | -| `testFramework.securityContext` | `test-framework` securityContext | `{}` | -| `downloadDashboards.env` | Environment variables to be passed to the `download-dashboards` container | `{}` | -| `downloadDashboards.envFromSecret` | Name of a Kubernetes secret (must be manually created in the same namespace) containing values to be added to the environment. Can be templated | `""` | -| `downloadDashboards.resources` | Resources of `download-dashboards` container | `{}` | -| `downloadDashboardsImage.registry` | Curl docker image registry | `docker.io` | -| `downloadDashboardsImage.repository` | Curl docker image repository | `curlimages/curl` | -| `downloadDashboardsImage.tag` | Curl docker image tag | `7.73.0` | -| `downloadDashboardsImage.sha` | Curl docker image sha (optional) | `""` | -| `downloadDashboardsImage.pullPolicy` | Curl docker image pull policy | `IfNotPresent` | -| `namespaceOverride` | Override the deployment namespace | `""` (`Release.Namespace`) | -| `serviceMonitor.enabled` | Use servicemonitor from prometheus operator | `false` | -| `serviceMonitor.namespace` | Namespace this servicemonitor is installed in | | -| `serviceMonitor.interval` | How frequently Prometheus should scrape | `1m` | -| `serviceMonitor.path` | Path to scrape | `/metrics` | -| `serviceMonitor.scheme` | Scheme to use for metrics scraping | `http` | -| `serviceMonitor.tlsConfig` | TLS configuration block for the endpoint | `{}` | -| `serviceMonitor.labels` | Labels for the servicemonitor passed to Prometheus Operator | `{}` | -| `serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `30s` | -| `serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping. | `[]` | -| `serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion. | `[]` | -| `revisionHistoryLimit` | Number of old ReplicaSets to retain | `10` | -| `imageRenderer.enabled` | Enable the image-renderer deployment & service | `false` | -| `imageRenderer.image.registry` | image-renderer Image registry | `docker.io` | -| `imageRenderer.image.repository` | image-renderer Image repository | `grafana/grafana-image-renderer` | -| `imageRenderer.image.tag` | image-renderer Image tag | `latest` | -| `imageRenderer.image.sha` | image-renderer Image sha (optional) | `""` | -| `imageRenderer.image.pullPolicy` | image-renderer ImagePullPolicy | `Always` | -| `imageRenderer.env` | extra env-vars for image-renderer | `{}` | -| `imageRenderer.envValueFrom` | Environment variables for image-renderer from alternate sources. See the API docs on [EnvVarSource](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#envvarsource-v1-core) for format details. Can be templated | `{}` | -| `imageRenderer.serviceAccountName` | image-renderer deployment serviceAccountName | `""` | -| `imageRenderer.securityContext` | image-renderer deployment securityContext | `{}` | -| `imageRenderer.podAnnotations ` | image-renderer image-renderer pod annotation | `{}` | -| `imageRenderer.hostAliases` | image-renderer deployment Host Aliases | `[]` | -| `imageRenderer.priorityClassName` | image-renderer deployment priority class | `''` | -| `imageRenderer.service.enabled` | Enable the image-renderer service | `true` | -| `imageRenderer.service.portName` | image-renderer service port name | `http` | -| `imageRenderer.service.port` | image-renderer port used by deployment | `8081` | -| `imageRenderer.service.targetPort` | image-renderer service port used by service | `8081` | -| `imageRenderer.appProtocol` | Adds the appProtocol field to the service | `` | -| `imageRenderer.grafanaSubPath` | Grafana sub path to use for image renderer callback url | `''` | -| `imageRenderer.podPortName` | name of the image-renderer port on the pod | `http` | -| `imageRenderer.revisionHistoryLimit` | number of image-renderer replica sets to keep | `10` | -| `imageRenderer.networkPolicy.limitIngress` | Enable a NetworkPolicy to limit inbound traffic from only the created grafana pods | `true` | -| `imageRenderer.networkPolicy.limitEgress` | Enable a NetworkPolicy to limit outbound traffic to only the created grafana pods | `false` | -| `imageRenderer.resources` | Set resource limits for image-renderer pods | `{}` | -| `imageRenderer.nodeSelector` | Node labels for pod assignment | `{}` | -| `imageRenderer.tolerations` | Toleration labels for pod assignment | `[]` | -| `imageRenderer.affinity` | Affinity settings for pod assignment | `{}` | -| `networkPolicy.enabled` | Enable creation of NetworkPolicy resources. | `false` | -| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | -| `networkPolicy.explicitNamespacesSelector` | A Kubernetes LabelSelector to explicitly select namespaces from which traffic could be allowed | `{}` | -| `networkPolicy.ingress` | Enable the creation of an ingress network policy | `true` | -| `networkPolicy.egress.enabled` | Enable the creation of an egress network policy | `false` | -| `networkPolicy.egress.ports` | An array of ports to allow for the egress | `[]` | -| `enableKubeBackwardCompatibility` | Enable backward compatibility of kubernetes where pod's defintion version below 1.13 doesn't have the enableServiceLinks option | `false` | - -### Example ingress with path - -With grafana 6.3 and above - -```yaml -grafana.ini: - server: - domain: monitoring.example.com - root_url: "%(protocol)s://%(domain)s/grafana" - serve_from_sub_path: true -ingress: - enabled: true - hosts: - - "monitoring.example.com" - path: "/grafana" -``` - -### Example of extraVolumeMounts - -Volume can be type persistentVolumeClaim or hostPath but not both at same time. -If neither existingClaim or hostPath argument is given then type is emptyDir. - -```yaml -- extraVolumeMounts: - - name: plugins - mountPath: /var/lib/grafana/plugins - subPath: configs/grafana/plugins - existingClaim: existing-grafana-claim - readOnly: false - - name: dashboards - mountPath: /var/lib/grafana/dashboards - hostPath: /usr/shared/grafana/dashboards - readOnly: false -``` - -## Import dashboards - -There are a few methods to import dashboards to Grafana. Below are some examples and explanations as to how to use each method: - -```yaml -dashboards: - default: - some-dashboard: - json: | - { - "annotations": - - ... - # Complete json file here - ... - - "title": "Some Dashboard", - "uid": "abcd1234", - "version": 1 - } - custom-dashboard: - # This is a path to a file inside the dashboards directory inside the chart directory - file: dashboards/custom-dashboard.json - prometheus-stats: - # Ref: https://grafana.com/dashboards/2 - gnetId: 2 - revision: 2 - datasource: Prometheus - loki-dashboard-quick-search: - gnetId: 12019 - revision: 2 - datasource: - - name: DS_PROMETHEUS - value: Prometheus - - name: DS_LOKI - value: Loki - local-dashboard: - url: https://raw.githubusercontent.com/user/repository/master/dashboards/dashboard.json -``` - -## BASE64 dashboards - -Dashboards could be stored on a server that does not return JSON directly and instead of it returns a Base64 encoded file (e.g. Gerrit) -A new parameter has been added to the url use case so if you specify a b64content value equals to true after the url entry a Base64 decoding is applied before save the file to disk. -If this entry is not set or is equals to false not decoding is applied to the file before saving it to disk. - -### Gerrit use case - -Gerrit API for download files has the following schema: where {project-name} and -{file-id} usually has '/' in their values and so they MUST be replaced by %2F so if project-name is user/repo, branch-id is master and file-id is equals to dir1/dir2/dashboard -the url value is - -## Sidecar for dashboards - -If the parameter `sidecar.dashboards.enabled` is set, a sidecar container is deployed in the grafana -pod. This container watches all configmaps (or secrets) in the cluster and filters out the ones with -a label as defined in `sidecar.dashboards.label`. The files defined in those configmaps are written -to a folder and accessed by grafana. Changes to the configmaps are monitored and the imported -dashboards are deleted/updated. - -A recommendation is to use one configmap per dashboard, as a reduction of multiple dashboards inside -one configmap is currently not properly mirrored in grafana. - -Example dashboard config: - -```yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: sample-grafana-dashboard - labels: - grafana_dashboard: "1" -data: - k8s-dashboard.json: |- - [...] -``` - -## Sidecar for datasources - -If the parameter `sidecar.datasources.enabled` is set, an init container is deployed in the grafana -pod. This container lists all secrets (or configmaps, though not recommended) in the cluster and -filters out the ones with a label as defined in `sidecar.datasources.label`. The files defined in -those secrets are written to a folder and accessed by grafana on startup. Using these yaml files, -the data sources in grafana can be imported. - -Should you aim for reloading datasources in Grafana each time the config is changed, set `sidecar.datasources.skipReload: false` and adjust `sidecar.datasources.reloadURL` to `http://..svc.cluster.local/api/admin/provisioning/datasources/reload`. - -Secrets are recommended over configmaps for this usecase because datasources usually contain private -data like usernames and passwords. Secrets are the more appropriate cluster resource to manage those. - -Example values to add a postgres datasource as a kubernetes secret: -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: grafana-datasources - labels: - grafana_datasource: 'true' # default value for: sidecar.datasources.label -stringData: - pg-db.yaml: |- - apiVersion: 1 - datasources: - - name: My pg db datasource - type: postgres - url: my-postgresql-db:5432 - user: db-readonly-user - secureJsonData: - password: 'SUperSEcretPa$$word' - jsonData: - database: my_datase - sslmode: 'disable' # disable/require/verify-ca/verify-full - maxOpenConns: 0 # Grafana v5.4+ - maxIdleConns: 2 # Grafana v5.4+ - connMaxLifetime: 14400 # Grafana v5.4+ - postgresVersion: 1000 # 903=9.3, 904=9.4, 905=9.5, 906=9.6, 1000=10 - timescaledb: false - # allow users to edit datasources from the UI. - editable: false -``` - -Example values to add a datasource adapted from [Grafana](http://docs.grafana.org/administration/provisioning/#example-datasource-config-file): - -```yaml -datasources: - datasources.yaml: - apiVersion: 1 - datasources: - # name of the datasource. Required - - name: Graphite - # datasource type. Required - type: graphite - # access mode. proxy or direct (Server or Browser in the UI). Required - access: proxy - # org id. will default to orgId 1 if not specified - orgId: 1 - # url - url: http://localhost:8080 - # database password, if used - password: - # database user, if used - user: - # database name, if used - database: - # enable/disable basic auth - basicAuth: - # basic auth username - basicAuthUser: - # basic auth password - basicAuthPassword: - # enable/disable with credentials headers - withCredentials: - # mark as default datasource. Max one per org - isDefault: - # fields that will be converted to json and stored in json_data - jsonData: - graphiteVersion: "1.1" - tlsAuth: true - tlsAuthWithCACert: true - # json object of data that will be encrypted. - secureJsonData: - tlsCACert: "..." - tlsClientCert: "..." - tlsClientKey: "..." - version: 1 - # allow users to edit datasources from the UI. - editable: false -``` - -## Sidecar for notifiers - -If the parameter `sidecar.notifiers.enabled` is set, an init container is deployed in the grafana -pod. This container lists all secrets (or configmaps, though not recommended) in the cluster and -filters out the ones with a label as defined in `sidecar.notifiers.label`. The files defined in -those secrets are written to a folder and accessed by grafana on startup. Using these yaml files, -the notification channels in grafana can be imported. The secrets must be created before -`helm install` so that the notifiers init container can list the secrets. - -Secrets are recommended over configmaps for this usecase because alert notification channels usually contain -private data like SMTP usernames and passwords. Secrets are the more appropriate cluster resource to manage those. - -Example datasource config adapted from [Grafana](https://grafana.com/docs/grafana/latest/administration/provisioning/#alert-notification-channels): - -```yaml -notifiers: - - name: notification-channel-1 - type: slack - uid: notifier1 - # either - org_id: 2 - # or - org_name: Main Org. - is_default: true - send_reminder: true - frequency: 1h - disable_resolve_message: false - # See `Supported Settings` section for settings supporter for each - # alert notification type. - settings: - recipient: 'XXX' - token: 'xoxb' - uploadImage: true - url: https://slack.com - -delete_notifiers: - - name: notification-channel-1 - uid: notifier1 - org_id: 2 - - name: notification-channel-2 - # default org_id: 1 -``` - -## Sidecar for alerting resources - -If the parameter `sidecar.alerts.enabled` is set, a sidecar container is deployed in the grafana -pod. This container watches all configmaps (or secrets) in the cluster (namespace defined by `sidecar.alerts.searchNamespace`) and filters out the ones with -a label as defined in `sidecar.alerts.label` (default is `grafana_alert`). The files defined in those configmaps are written -to a folder and accessed by grafana. Changes to the configmaps are monitored and the imported alerting resources are updated, however, deletions are a little more complicated (see below). - -This sidecar can be used to provision alert rules, contact points, notification policies, notification templates and mute timings as shown in [Grafana Documentation](https://grafana.com/docs/grafana/next/alerting/set-up/provision-alerting-resources/file-provisioning/). - -To fetch the alert config which will be provisioned, use the alert provisioning API ([Grafana Documentation](https://grafana.com/docs/grafana/next/developers/http_api/alerting_provisioning/)). -You can use either JSON or YAML format. - -Example config for an alert rule: - -```yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: sample-grafana-alert - labels: - grafana_alert: "1" -data: - k8s-alert.yml: |- - apiVersion: 1 - groups: - - orgId: 1 - name: k8s-alert - [...] -``` - -To delete provisioned alert rules is a two step process, you need to delete the configmap which defined the alert rule -and then create a configuration which deletes the alert rule. - -Example deletion configuration: -```yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: delete-sample-grafana-alert - namespace: monitoring - labels: - grafana_alert: "1" -data: - delete-k8s-alert.yml: |- - apiVersion: 1 - deleteRules: - - orgId: 1 - uid: 16624780-6564-45dc-825c-8bded4ad92d3 -``` - -## Statically provision alerting resources -If you don't need to change alerting resources (alert rules, contact points, notification policies and notification templates) regularly you could use the `alerting` config option instead of the sidecar option above. -This will grab the alerting config and apply it statically at build time for the helm file. - -There are two methods to statically provision alerting configuration in Grafana. Below are some examples and explanations as to how to use each method: - -```yaml -alerting: - team1-alert-rules.yaml: - file: alerting/team1/rules.yaml - team2-alert-rules.yaml: - file: alerting/team2/rules.yaml - team3-alert-rules.yaml: - file: alerting/team3/rules.yaml - notification-policies.yaml: - file: alerting/shared/notification-policies.yaml - notification-templates.yaml: - file: alerting/shared/notification-templates.yaml - contactpoints.yaml: - apiVersion: 1 - contactPoints: - - orgId: 1 - name: Slack channel - receivers: - - uid: default-receiver - type: slack - settings: - # Webhook URL to be filled in - url: "" - # We need to escape double curly braces for the tpl function. - text: '{{ `{{ template "default.message" . }}` }}' - title: '{{ `{{ template "default.title" . }}` }}' -``` - -The two possibilities for static alerting resource provisioning are: - -* Inlining the file contents as shown for contact points in the above example. -* Importing a file using a relative path starting from the chart root directory as shown for the alert rules in the above example. - -### Important notes on file provisioning - -* The format of the files is defined in the [Grafana documentation](https://grafana.com/docs/grafana/next/alerting/set-up/provision-alerting-resources/file-provisioning/) on file provisioning. -* The chart supports importing YAML and JSON files. -* The filename must be unique, otherwise one volume mount will overwrite the other. -* In case of inlining, double curly braces that arise from the Grafana configuration format and are not intended as templates for the chart must be escaped. -* The number of total files under `alerting:` is not limited. Each file will end up as a volume mount in the corresponding provisioning folder of the deployed Grafana instance. -* The file size for each import is limited by what the function `.Files.Get` can handle, which suffices for most cases. - -## How to serve Grafana with a path prefix (/grafana) - -In order to serve Grafana with a prefix (e.g., ), add the following to your values.yaml. - -```yaml -ingress: - enabled: true - annotations: - kubernetes.io/ingress.class: "nginx" - nginx.ingress.kubernetes.io/rewrite-target: /$1 - nginx.ingress.kubernetes.io/use-regex: "true" - - path: /grafana/?(.*) - hosts: - - k8s.example.dev - -grafana.ini: - server: - root_url: http://localhost:3000/grafana # this host can be localhost -``` - -## How to securely reference secrets in grafana.ini - -This example uses Grafana [file providers](https://grafana.com/docs/grafana/latest/administration/configuration/#file-provider) for secret values and the `extraSecretMounts` configuration flag (Additional grafana server secret mounts) to mount the secrets. - -In grafana.ini: - -```yaml -grafana.ini: - [auth.generic_oauth] - enabled = true - client_id = $__file{/etc/secrets/auth_generic_oauth/client_id} - client_secret = $__file{/etc/secrets/auth_generic_oauth/client_secret} -``` - -Existing secret, or created along with helm: - -```yaml ---- -apiVersion: v1 -kind: Secret -metadata: - name: auth-generic-oauth-secret -type: Opaque -stringData: - client_id: - client_secret: -``` - -Include in the `extraSecretMounts` configuration flag: - -```yaml -- extraSecretMounts: - - name: auth-generic-oauth-secret-mount - secretName: auth-generic-oauth-secret - defaultMode: 0440 - mountPath: /etc/secrets/auth_generic_oauth - readOnly: true -``` - -### extraSecretMounts using a Container Storage Interface (CSI) provider - -This example uses a CSI driver e.g. retrieving secrets using [Azure Key Vault Provider](https://github.com/Azure/secrets-store-csi-driver-provider-azure) - -```yaml -- extraSecretMounts: - - name: secrets-store-inline - mountPath: /run/secrets - readOnly: true - csi: - driver: secrets-store.csi.k8s.io - readOnly: true - volumeAttributes: - secretProviderClass: "my-provider" - nodePublishSecretRef: - name: akv-creds -``` - -## Image Renderer Plug-In - -This chart supports enabling [remote image rendering](https://github.com/grafana/grafana-image-renderer/blob/master/README.md#run-in-docker) - -```yaml -imageRenderer: - enabled: true -``` - -### Image Renderer NetworkPolicy - -By default the image-renderer pods will have a network policy which only allows ingress traffic from the created grafana instance - -### High Availability for unified alerting - -If you want to run Grafana in a high availability cluster you need to enable -the headless service by setting `headlessService: true` in your `values.yaml` -file. - -As next step you have to setup the `grafana.ini` in your `values.yaml` in a way -that it will make use of the headless service to obtain all the IPs of the -cluster. You should replace ``{{ Name }}`` with the name of your helm deployment. - -```yaml -grafana.ini: - ... - unified_alerting: - enabled: true - ha_peers: {{ Name }}-headless:9094 - ha_listen_address: ${POD_IP}:9094 - ha_advertise_address: ${POD_IP}:9094 - - alerting: - enabled: false -``` diff --git a/operations/deployment/helm/grafana/bitops.config.yaml b/operations/deployment/helm/grafana/bitops.config.yaml deleted file mode 100644 index 0d5d6df..0000000 --- a/operations/deployment/helm/grafana/bitops.config.yaml +++ /dev/null @@ -1,14 +0,0 @@ -helm: - cli: - namespace: kube-system - timeout: 200s - debug: true - atomic: false - force: false - dry-run: false - options: - release-name: grafana - skip-deploy: false - k8s: - fetch: - kubeconfig: true \ No newline at end of file diff --git a/operations/deployment/helm/grafana/ci/default-values.yaml b/operations/deployment/helm/grafana/ci/default-values.yaml deleted file mode 100644 index fc2ba60..0000000 --- a/operations/deployment/helm/grafana/ci/default-values.yaml +++ /dev/null @@ -1 +0,0 @@ -# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml. diff --git a/operations/deployment/helm/grafana/ci/with-affinity-values.yaml b/operations/deployment/helm/grafana/ci/with-affinity-values.yaml deleted file mode 100644 index f5b9b53..0000000 --- a/operations/deployment/helm/grafana/ci/with-affinity-values.yaml +++ /dev/null @@ -1,16 +0,0 @@ -affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - podAffinityTerm: - labelSelector: - matchLabels: - app.kubernetes.io/instance: grafana-test - app.kubernetes.io/name: grafana - topologyKey: failure-domain.beta.kubernetes.io/zone - weight: 100 - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - app.kubernetes.io/instance: grafana-test - app.kubernetes.io/name: grafana - topologyKey: kubernetes.io/hostname diff --git a/operations/deployment/helm/grafana/ci/with-dashboard-json-values.yaml b/operations/deployment/helm/grafana/ci/with-dashboard-json-values.yaml deleted file mode 100644 index e0c4e41..0000000 --- a/operations/deployment/helm/grafana/ci/with-dashboard-json-values.yaml +++ /dev/null @@ -1,53 +0,0 @@ -dashboards: - my-provider: - my-awesome-dashboard: - # An empty but valid dashboard - json: | - { - "__inputs": [], - "__requires": [ - { - "type": "grafana", - "id": "grafana", - "name": "Grafana", - "version": "6.3.5" - } - ], - "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": "-- Grafana --", - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - } - ] - }, - "editable": true, - "gnetId": null, - "graphTooltip": 0, - "id": null, - "links": [], - "panels": [], - "schemaVersion": 19, - "style": "dark", - "tags": [], - "templating": { - "list": [] - }, - "time": { - "from": "now-6h", - "to": "now" - }, - "timepicker": { - "refresh_intervals": ["5s"] - }, - "timezone": "", - "title": "Dummy Dashboard", - "uid": "IdcYQooWk", - "version": 1 - } - datasource: Prometheus diff --git a/operations/deployment/helm/grafana/ci/with-dashboard-values.yaml b/operations/deployment/helm/grafana/ci/with-dashboard-values.yaml deleted file mode 100644 index 7b662c5..0000000 --- a/operations/deployment/helm/grafana/ci/with-dashboard-values.yaml +++ /dev/null @@ -1,19 +0,0 @@ -dashboards: - my-provider: - my-awesome-dashboard: - gnetId: 10000 - revision: 1 - datasource: Prometheus -dashboardProviders: - dashboardproviders.yaml: - apiVersion: 1 - providers: - - name: 'my-provider' - orgId: 1 - folder: '' - type: file - updateIntervalSeconds: 10 - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards/my-provider diff --git a/operations/deployment/helm/grafana/ci/with-extraconfigmapmounts-values.yaml b/operations/deployment/helm/grafana/ci/with-extraconfigmapmounts-values.yaml deleted file mode 100644 index 5cc44a0..0000000 --- a/operations/deployment/helm/grafana/ci/with-extraconfigmapmounts-values.yaml +++ /dev/null @@ -1,7 +0,0 @@ -extraConfigmapMounts: - - name: '{{ include "grafana.fullname" . }}' - configMap: '{{ include "grafana.fullname" . }}' - mountPath: /var/lib/grafana/dashboards/test-dashboard.json - # This is not a realistic test, but for this we only care about extraConfigmapMounts not being empty and pointing to an existing ConfigMap - subPath: grafana.ini - readOnly: true diff --git a/operations/deployment/helm/grafana/ci/with-image-renderer-values.yaml b/operations/deployment/helm/grafana/ci/with-image-renderer-values.yaml deleted file mode 100644 index 32f3074..0000000 --- a/operations/deployment/helm/grafana/ci/with-image-renderer-values.yaml +++ /dev/null @@ -1,19 +0,0 @@ -podLabels: - customLableA: Aaaaa -imageRenderer: - enabled: true - env: - RENDERING_ARGS: --disable-gpu,--window-size=1280x758 - RENDERING_MODE: clustered - podLabels: - customLableB: Bbbbb - networkPolicy: - limitIngress: true - limitEgress: true - resources: - limits: - cpu: 1000m - memory: 1000Mi - requests: - cpu: 500m - memory: 50Mi diff --git a/operations/deployment/helm/grafana/ci/with-persistence.yaml b/operations/deployment/helm/grafana/ci/with-persistence.yaml deleted file mode 100644 index b92ca02..0000000 --- a/operations/deployment/helm/grafana/ci/with-persistence.yaml +++ /dev/null @@ -1,3 +0,0 @@ -persistence: - type: pvc - enabled: true diff --git a/operations/deployment/helm/grafana/dashboards/custom-dashboard.json b/operations/deployment/helm/grafana/dashboards/custom-dashboard.json deleted file mode 100644 index 9e26dfe..0000000 --- a/operations/deployment/helm/grafana/dashboards/custom-dashboard.json +++ /dev/null @@ -1 +0,0 @@ -{} \ No newline at end of file diff --git a/operations/deployment/helm/grafana/templates/NOTES.txt b/operations/deployment/helm/grafana/templates/NOTES.txt deleted file mode 100644 index d86419f..0000000 --- a/operations/deployment/helm/grafana/templates/NOTES.txt +++ /dev/null @@ -1,55 +0,0 @@ -1. Get your '{{ .Values.adminUser }}' user password by running: - - kubectl get secret --namespace {{ include "grafana.namespace" . }} {{ .Values.admin.existingSecret | default (include "grafana.fullname" .) }} -o jsonpath="{.data.{{ .Values.admin.passwordKey | default "admin-password" }}}" | base64 --decode ; echo - - -2. The Grafana server can be accessed via port {{ .Values.service.port }} on the following DNS name from within your cluster: - - {{ include "grafana.fullname" . }}.{{ include "grafana.namespace" . }}.svc.cluster.local -{{ if .Values.ingress.enabled }} - If you bind grafana to 80, please update values in values.yaml and reinstall: - ``` - securityContext: - runAsUser: 0 - runAsGroup: 0 - fsGroup: 0 - - command: - - "setcap" - - "'cap_net_bind_service=+ep'" - - "/usr/sbin/grafana-server &&" - - "sh" - - "/run.sh" - ``` - Details refer to https://grafana.com/docs/installation/configuration/#http-port. - Or grafana would always crash. - - From outside the cluster, the server URL(s) are: - {{- range .Values.ingress.hosts }} - http://{{ . }} - {{- end }} -{{- else }} - Get the Grafana URL to visit by running these commands in the same shell: - {{- if contains "NodePort" .Values.service.type }} - export NODE_PORT=$(kubectl get --namespace {{ include "grafana.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "grafana.fullname" . }}) - export NODE_IP=$(kubectl get nodes --namespace {{ include "grafana.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}") - echo http://$NODE_IP:$NODE_PORT - {{- else if contains "LoadBalancer" .Values.service.type }} - NOTE: It may take a few minutes for the LoadBalancer IP to be available. - You can watch the status of by running 'kubectl get svc --namespace {{ include "grafana.namespace" . }} -w {{ include "grafana.fullname" . }}' - export SERVICE_IP=$(kubectl get svc --namespace {{ include "grafana.namespace" . }} {{ include "grafana.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') - http://$SERVICE_IP:{{ .Values.service.port -}} - {{- else if contains "ClusterIP" .Values.service.type }} - export POD_NAME=$(kubectl get pods --namespace {{ include "grafana.namespace" . }} -l "app.kubernetes.io/name={{ include "grafana.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") - kubectl --namespace {{ include "grafana.namespace" . }} port-forward $POD_NAME 3000 - {{- end }} -{{- end }} - -3. Login with the password from step 1 and the username: {{ .Values.adminUser }} - -{{- if not .Values.persistence.enabled }} -################################################################################# -###### WARNING: Persistence is disabled!!! You will lose your data when ##### -###### the Grafana pod is terminated. ##### -################################################################################# -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/_helpers.tpl b/operations/deployment/helm/grafana/templates/_helpers.tpl deleted file mode 100644 index ead2449..0000000 --- a/operations/deployment/helm/grafana/templates/_helpers.tpl +++ /dev/null @@ -1,227 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Expand the name of the chart. -*/}} -{{- define "grafana.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "grafana.fullname" -}} -{{- if .Values.fullnameOverride }} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- $name := default .Chart.Name .Values.nameOverride }} -{{- if contains $name .Release.Name }} -{{- .Release.Name | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} -{{- end }} -{{- end }} -{{- end }} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "grafana.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Create the name of the service account -*/}} -{{- define "grafana.serviceAccountName" -}} -{{- if .Values.serviceAccount.create }} -{{- default (include "grafana.fullname" .) .Values.serviceAccount.name }} -{{- else }} -{{- default "default" .Values.serviceAccount.name }} -{{- end }} -{{- end }} - -{{- define "grafana.serviceAccountNameTest" -}} -{{- if .Values.serviceAccount.create }} -{{- default (print (include "grafana.fullname" .) "-test") .Values.serviceAccount.nameTest }} -{{- else }} -{{- default "default" .Values.serviceAccount.nameTest }} -{{- end }} -{{- end }} - -{{/* -Allow the release namespace to be overridden for multi-namespace deployments in combined charts -*/}} -{{- define "grafana.namespace" -}} -{{- if .Values.namespaceOverride }} -{{- .Values.namespaceOverride }} -{{- else }} -{{- .Release.Namespace }} -{{- end }} -{{- end }} - -{{/* -Common labels -*/}} -{{- define "grafana.labels" -}} -helm.sh/chart: {{ include "grafana.chart" . }} -{{ include "grafana.selectorLabels" . }} -{{- if or .Chart.AppVersion .Values.image.tag }} -app.kubernetes.io/version: {{ mustRegexReplaceAllLiteral "@sha.*" .Values.image.tag "" | default .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- with .Values.extraLabels }} -{{ toYaml . }} -{{- end }} -{{- end }} - -{{/* -Selector labels -*/}} -{{- define "grafana.selectorLabels" -}} -app.kubernetes.io/name: {{ include "grafana.name" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -{{- end }} - -{{/* -Common labels -*/}} -{{- define "grafana.imageRenderer.labels" -}} -helm.sh/chart: {{ include "grafana.chart" . }} -{{ include "grafana.imageRenderer.selectorLabels" . }} -{{- if or .Chart.AppVersion .Values.image.tag }} -app.kubernetes.io/version: {{ mustRegexReplaceAllLiteral "@sha.*" .Values.image.tag "" | default .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end }} - -{{/* -Selector labels ImageRenderer -*/}} -{{- define "grafana.imageRenderer.selectorLabels" -}} -app.kubernetes.io/name: {{ include "grafana.name" . }}-image-renderer -app.kubernetes.io/instance: {{ .Release.Name }} -{{- end }} - -{{/* -Looks if there's an existing secret and reuse its password. If not it generates -new password and use it. -*/}} -{{- define "grafana.password" -}} -{{- $secret := (lookup "v1" "Secret" (include "grafana.namespace" .) (include "grafana.fullname" .) ) }} -{{- if $secret }} -{{- index $secret "data" "admin-password" }} -{{- else }} -{{- (randAlphaNum 40) | b64enc | quote }} -{{- end }} -{{- end }} - -{{/* -Return the appropriate apiVersion for rbac. -*/}} -{{- define "grafana.rbac.apiVersion" -}} -{{- if $.Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1" }} -{{- print "rbac.authorization.k8s.io/v1" }} -{{- else }} -{{- print "rbac.authorization.k8s.io/v1beta1" }} -{{- end }} -{{- end }} - -{{/* -Return the appropriate apiVersion for ingress. -*/}} -{{- define "grafana.ingress.apiVersion" -}} -{{- if and ($.Capabilities.APIVersions.Has "networking.k8s.io/v1") (semverCompare ">= 1.19-0" .Capabilities.KubeVersion.Version) }} -{{- print "networking.k8s.io/v1" }} -{{- else if $.Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" }} -{{- print "networking.k8s.io/v1beta1" }} -{{- else }} -{{- print "extensions/v1beta1" }} -{{- end }} -{{- end }} - -{{/* -Return the appropriate apiVersion for Horizontal Pod Autoscaler. -*/}} -{{- define "grafana.hpa.apiVersion" -}} -{{- if $.Capabilities.APIVersions.Has "autoscaling/v2/HorizontalPodAutoscaler" }} -{{- print "autoscaling/v2" }} -{{- else if $.Capabilities.APIVersions.Has "autoscaling/v2beta2/HorizontalPodAutoscaler" }} -{{- print "autoscaling/v2beta2" }} -{{- else }} -{{- print "autoscaling/v2beta1" }} -{{- end }} -{{- end }} - -{{/* -Return the appropriate apiVersion for podDisruptionBudget. -*/}} -{{- define "grafana.podDisruptionBudget.apiVersion" -}} -{{- if $.Values.podDisruptionBudget.apiVersion }} -{{- print $.Values.podDisruptionBudget.apiVersion }} -{{- else if $.Capabilities.APIVersions.Has "policy/v1/PodDisruptionBudget" }} -{{- print "policy/v1" }} -{{- else }} -{{- print "policy/v1beta1" }} -{{- end }} -{{- end }} - -{{/* -Return if ingress is stable. -*/}} -{{- define "grafana.ingress.isStable" -}} -{{- eq (include "grafana.ingress.apiVersion" .) "networking.k8s.io/v1" }} -{{- end }} - -{{/* -Return if ingress supports ingressClassName. -*/}} -{{- define "grafana.ingress.supportsIngressClassName" -}} -{{- or (eq (include "grafana.ingress.isStable" .) "true") (and (eq (include "grafana.ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18-0" .Capabilities.KubeVersion.Version)) }} -{{- end }} - -{{/* -Return if ingress supports pathType. -*/}} -{{- define "grafana.ingress.supportsPathType" -}} -{{- or (eq (include "grafana.ingress.isStable" .) "true") (and (eq (include "grafana.ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18-0" .Capabilities.KubeVersion.Version)) }} -{{- end }} - -{{/* -Formats imagePullSecrets. Input is (dict "root" . "imagePullSecrets" .{specific imagePullSecrets}) -*/}} -{{- define "grafana.imagePullSecrets" -}} -{{- $root := .root }} -{{- range (concat .root.Values.global.imagePullSecrets .imagePullSecrets) }} -{{- if eq (typeOf .) "map[string]interface {}" }} -- {{ toYaml (dict "name" (tpl .name $root)) | trim }} -{{- else }} -- name: {{ tpl . $root }} -{{- end }} -{{- end }} -{{- end }} - - -{{/* - Checks whether or not the configSecret secret has to be created - */}} -{{- define "grafana.shouldCreateConfigSecret" -}} -{{- $secretFound := false -}} -{{- range $key, $value := .Values.datasources }} - {{- if hasKey $value "secret" }} - {{- $secretFound = true}} - {{- end }} -{{- end }} -{{- range $key, $value := .Values.notifiers }} - {{- if hasKey $value "secret" }} - {{- $secretFound = true}} - {{- end }} -{{- end }} -{{- range $key, $value := .Values.alerting }} - {{- if (or (hasKey $value "secret") (hasKey $value "secretFile")) }} - {{- $secretFound = true}} - {{- end }} -{{- end }} -{{- $secretFound}} -{{- end -}} diff --git a/operations/deployment/helm/grafana/templates/_pod.tpl b/operations/deployment/helm/grafana/templates/_pod.tpl deleted file mode 100644 index f4b9fd3..0000000 --- a/operations/deployment/helm/grafana/templates/_pod.tpl +++ /dev/null @@ -1,1276 +0,0 @@ -{{- define "grafana.pod" -}} -{{- $sts := list "sts" "StatefulSet" "statefulset" -}} -{{- $root := . -}} -{{- with .Values.schedulerName }} -schedulerName: "{{ . }}" -{{- end }} -serviceAccountName: {{ include "grafana.serviceAccountName" . }} -automountServiceAccountToken: {{ .Values.serviceAccount.autoMount }} -{{- with .Values.securityContext }} -securityContext: - {{- toYaml . | nindent 2 }} -{{- end }} -{{- with .Values.hostAliases }} -hostAliases: - {{- toYaml . | nindent 2 }} -{{- end }} -{{- with .Values.priorityClassName }} -priorityClassName: {{ . }} -{{- end }} -{{- if ( or .Values.persistence.enabled .Values.dashboards .Values.extraInitContainers (and .Values.sidecar.alerts.enabled .Values.sidecar.alerts.initAlerts) (and .Values.sidecar.datasources.enabled .Values.sidecar.datasources.initDatasources) (and .Values.sidecar.notifiers.enabled .Values.sidecar.notifiers.initNotifiers)) }} -initContainers: -{{- end }} -{{- if ( and .Values.persistence.enabled .Values.initChownData.enabled ) }} - - name: init-chown-data - {{- $registry := .Values.global.imageRegistry | default .Values.initChownData.image.registry -}} - {{- if .Values.initChownData.image.sha }} - image: "{{ $registry }}/{{ .Values.initChownData.image.repository }}:{{ .Values.initChownData.image.tag }}@sha256:{{ .Values.initChownData.image.sha }}" - {{- else }} - image: "{{ $registry }}/{{ .Values.initChownData.image.repository }}:{{ .Values.initChownData.image.tag }}" - {{- end }} - imagePullPolicy: {{ .Values.initChownData.image.pullPolicy }} - {{- with .Values.initChownData.securityContext }} - securityContext: - {{- toYaml . | nindent 6 }} - {{- end }} - command: - - chown - - -R - - {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.runAsGroup }} - - /var/lib/grafana - {{- with .Values.initChownData.resources }} - resources: - {{- toYaml . | nindent 6 }} - {{- end }} - volumeMounts: - - name: storage - mountPath: "/var/lib/grafana" - {{- with .Values.persistence.subPath }} - subPath: {{ tpl . $root }} - {{- end }} -{{- end }} -{{- if .Values.dashboards }} - - name: download-dashboards - {{- $registry := .Values.global.imageRegistry | default .Values.downloadDashboardsImage.registry -}} - {{- if .Values.downloadDashboardsImage.sha }} - image: "{{ $registry }}/{{ .Values.downloadDashboardsImage.repository }}:{{ .Values.downloadDashboardsImage.tag }}@sha256:{{ .Values.downloadDashboardsImage.sha }}" - {{- else }} - image: "{{ $registry }}/{{ .Values.downloadDashboardsImage.repository }}:{{ .Values.downloadDashboardsImage.tag }}" - {{- end }} - imagePullPolicy: {{ .Values.downloadDashboardsImage.pullPolicy }} - command: ["/bin/sh"] - args: [ "-c", "mkdir -p /var/lib/grafana/dashboards/default && /bin/sh -x /etc/grafana/download_dashboards.sh" ] - {{- with .Values.downloadDashboards.resources }} - resources: - {{- toYaml . | nindent 6 }} - {{- end }} - env: - {{- range $key, $value := .Values.downloadDashboards.env }} - - name: "{{ $key }}" - value: "{{ $value }}" - {{- end }} - {{- range $key, $value := .Values.downloadDashboards.envValueFrom }} - - name: {{ $key | quote }} - valueFrom: - {{- tpl (toYaml $value) $ | nindent 10 }} - {{- end }} - {{- with .Values.downloadDashboards.securityContext }} - securityContext: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.downloadDashboards.envFromSecret }} - envFrom: - - secretRef: - name: {{ tpl . $root }} - {{- end }} - volumeMounts: - - name: config - mountPath: "/etc/grafana/download_dashboards.sh" - subPath: download_dashboards.sh - - name: storage - mountPath: "/var/lib/grafana" - {{- with .Values.persistence.subPath }} - subPath: {{ tpl . $root }} - {{- end }} - {{- range .Values.extraSecretMounts }} - - name: {{ .name }} - mountPath: {{ .mountPath }} - readOnly: {{ .readOnly }} - {{- end }} -{{- end }} -{{- if and .Values.sidecar.alerts.enabled .Values.sidecar.alerts.initAlerts }} - - name: {{ include "grafana.name" . }}-init-sc-alerts - {{- $registry := .Values.global.imageRegistry | default .Values.sidecar.image.registry -}} - {{- if .Values.sidecar.image.sha }} - image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" - {{- else }} - image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" - {{- end }} - imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} - env: - {{- range $key, $value := .Values.sidecar.alerts.env }} - - name: "{{ $key }}" - value: "{{ $value }}" - {{- end }} - {{- if .Values.sidecar.alerts.ignoreAlreadyProcessed }} - - name: IGNORE_ALREADY_PROCESSED - value: "true" - {{- end }} - - name: METHOD - value: "LIST" - - name: LABEL - value: "{{ .Values.sidecar.alerts.label }}" - {{- with .Values.sidecar.alerts.labelValue }} - - name: LABEL_VALUE - value: {{ quote . }} - {{- end }} - {{- if or .Values.sidecar.logLevel .Values.sidecar.alerts.logLevel }} - - name: LOG_LEVEL - value: {{ default .Values.sidecar.logLevel .Values.sidecar.alerts.logLevel }} - {{- end }} - - name: FOLDER - value: "/etc/grafana/provisioning/alerting" - - name: RESOURCE - value: {{ quote .Values.sidecar.alerts.resource }} - {{- with .Values.sidecar.enableUniqueFilenames }} - - name: UNIQUE_FILENAMES - value: "{{ . }}" - {{- end }} - {{- with .Values.sidecar.alerts.searchNamespace }} - - name: NAMESPACE - value: {{ . | join "," | quote }} - {{- end }} - {{- with .Values.sidecar.alerts.skipTlsVerify }} - - name: SKIP_TLS_VERIFY - value: {{ quote . }} - {{- end }} - {{- with .Values.sidecar.alerts.script }} - - name: SCRIPT - value: {{ quote . }} - {{- end }} - {{- with .Values.sidecar.livenessProbe }} - livenessProbe: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.sidecar.readinessProbe }} - readinessProbe: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.sidecar.resources }} - resources: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.sidecar.securityContext }} - securityContext: - {{- toYaml . | nindent 6 }} - {{- end }} - volumeMounts: - - name: sc-alerts-volume - mountPath: "/etc/grafana/provisioning/alerting" - {{- with .Values.sidecar.alerts.extraMounts }} - {{- toYaml . | trim | nindent 6 }} - {{- end }} -{{- end }} -{{- if and .Values.sidecar.datasources.enabled .Values.sidecar.datasources.initDatasources }} - - name: {{ include "grafana.name" . }}-init-sc-datasources - {{- $registry := .Values.global.imageRegistry | default .Values.sidecar.image.registry -}} - {{- if .Values.sidecar.image.sha }} - image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" - {{- else }} - image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" - {{- end }} - imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} - env: - {{- range $key, $value := .Values.sidecar.datasources.env }} - - name: "{{ $key }}" - value: "{{ $value }}" - {{- end }} - {{- if .Values.sidecar.datasources.ignoreAlreadyProcessed }} - - name: IGNORE_ALREADY_PROCESSED - value: "true" - {{- end }} - - name: METHOD - value: "LIST" - - name: LABEL - value: "{{ .Values.sidecar.datasources.label }}" - {{- with .Values.sidecar.datasources.labelValue }} - - name: LABEL_VALUE - value: {{ quote . }} - {{- end }} - {{- if or .Values.sidecar.logLevel .Values.sidecar.datasources.logLevel }} - - name: LOG_LEVEL - value: {{ default .Values.sidecar.logLevel .Values.sidecar.datasources.logLevel }} - {{- end }} - - name: FOLDER - value: "/etc/grafana/provisioning/datasources" - - name: RESOURCE - value: {{ quote .Values.sidecar.datasources.resource }} - {{- with .Values.sidecar.enableUniqueFilenames }} - - name: UNIQUE_FILENAMES - value: "{{ . }}" - {{- end }} - {{- if .Values.sidecar.datasources.searchNamespace }} - - name: NAMESPACE - value: "{{ tpl (.Values.sidecar.datasources.searchNamespace | join ",") . }}" - {{- end }} - {{- with .Values.sidecar.skipTlsVerify }} - - name: SKIP_TLS_VERIFY - value: "{{ . }}" - {{- end }} - {{- with .Values.sidecar.resources }} - resources: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.sidecar.securityContext }} - securityContext: - {{- toYaml . | nindent 6 }} - {{- end }} - volumeMounts: - - name: sc-datasources-volume - mountPath: "/etc/grafana/provisioning/datasources" -{{- end }} -{{- if and .Values.sidecar.notifiers.enabled .Values.sidecar.notifiers.initNotifiers }} - - name: {{ include "grafana.name" . }}-init-sc-notifiers - {{- $registry := .Values.global.imageRegistry | default .Values.sidecar.image.registry -}} - {{- if .Values.sidecar.image.sha }} - image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" - {{- else }} - image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" - {{- end }} - imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} - env: - {{- range $key, $value := .Values.sidecar.notifiers.env }} - - name: "{{ $key }}" - value: "{{ $value }}" - {{- end }} - {{- if .Values.sidecar.notifiers.ignoreAlreadyProcessed }} - - name: IGNORE_ALREADY_PROCESSED - value: "true" - {{- end }} - - name: METHOD - value: LIST - - name: LABEL - value: "{{ .Values.sidecar.notifiers.label }}" - {{- with .Values.sidecar.notifiers.labelValue }} - - name: LABEL_VALUE - value: {{ quote . }} - {{- end }} - {{- if or .Values.sidecar.logLevel .Values.sidecar.notifiers.logLevel }} - - name: LOG_LEVEL - value: {{ default .Values.sidecar.logLevel .Values.sidecar.notifiers.logLevel }} - {{- end }} - - name: FOLDER - value: "/etc/grafana/provisioning/notifiers" - - name: RESOURCE - value: {{ quote .Values.sidecar.notifiers.resource }} - {{- with .Values.sidecar.enableUniqueFilenames }} - - name: UNIQUE_FILENAMES - value: "{{ . }}" - {{- end }} - {{- with .Values.sidecar.notifiers.searchNamespace }} - - name: NAMESPACE - value: "{{ tpl (. | join ",") $root }}" - {{- end }} - {{- with .Values.sidecar.skipTlsVerify }} - - name: SKIP_TLS_VERIFY - value: "{{ . }}" - {{- end }} - {{- with .Values.sidecar.livenessProbe }} - livenessProbe: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.sidecar.readinessProbe }} - readinessProbe: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.sidecar.resources }} - resources: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.sidecar.securityContext }} - securityContext: - {{- toYaml . | nindent 6 }} - {{- end }} - volumeMounts: - - name: sc-notifiers-volume - mountPath: "/etc/grafana/provisioning/notifiers" -{{- end}} -{{- with .Values.extraInitContainers }} - {{- tpl (toYaml .) $root | nindent 2 }} -{{- end }} -{{- if or .Values.image.pullSecrets .Values.global.imagePullSecrets }} -imagePullSecrets: - {{- include "grafana.imagePullSecrets" (dict "root" $root "imagePullSecrets" .Values.image.pullSecrets) | nindent 2 }} -{{- end }} -{{- if not .Values.enableKubeBackwardCompatibility }} -enableServiceLinks: {{ .Values.enableServiceLinks }} -{{- end }} -containers: -{{- if and .Values.sidecar.alerts.enabled (not .Values.sidecar.alerts.initAlerts) }} - - name: {{ include "grafana.name" . }}-sc-alerts - {{- $registry := .Values.global.imageRegistry | default .Values.sidecar.image.registry -}} - {{- if .Values.sidecar.image.sha }} - image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" - {{- else }} - image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" - {{- end }} - imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} - env: - {{- range $key, $value := .Values.sidecar.alerts.env }} - - name: "{{ $key }}" - value: "{{ $value }}" - {{- end }} - {{- if .Values.sidecar.alerts.ignoreAlreadyProcessed }} - - name: IGNORE_ALREADY_PROCESSED - value: "true" - {{- end }} - - name: METHOD - value: {{ .Values.sidecar.alerts.watchMethod }} - - name: LABEL - value: "{{ .Values.sidecar.alerts.label }}" - {{- with .Values.sidecar.alerts.labelValue }} - - name: LABEL_VALUE - value: {{ quote . }} - {{- end }} - {{- if or .Values.sidecar.logLevel .Values.sidecar.alerts.logLevel }} - - name: LOG_LEVEL - value: {{ default .Values.sidecar.logLevel .Values.sidecar.alerts.logLevel }} - {{- end }} - - name: FOLDER - value: "/etc/grafana/provisioning/alerting" - - name: RESOURCE - value: {{ quote .Values.sidecar.alerts.resource }} - {{- with .Values.sidecar.enableUniqueFilenames }} - - name: UNIQUE_FILENAMES - value: "{{ . }}" - {{- end }} - {{- with .Values.sidecar.alerts.searchNamespace }} - - name: NAMESPACE - value: {{ . | join "," | quote }} - {{- end }} - {{- with .Values.sidecar.alerts.skipTlsVerify }} - - name: SKIP_TLS_VERIFY - value: {{ quote . }} - {{- end }} - {{- with .Values.sidecar.alerts.script }} - - name: SCRIPT - value: {{ quote . }} - {{- end }} - {{- if and (not .Values.env.GF_SECURITY_ADMIN_USER) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} - - name: REQ_USERNAME - valueFrom: - secretKeyRef: - name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} - key: {{ .Values.admin.userKey | default "admin-user" }} - {{- end }} - {{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} - - name: REQ_PASSWORD - valueFrom: - secretKeyRef: - name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} - key: {{ .Values.admin.passwordKey | default "admin-password" }} - {{- end }} - {{- if not .Values.sidecar.alerts.skipReload }} - - name: REQ_URL - value: {{ .Values.sidecar.alerts.reloadURL }} - - name: REQ_METHOD - value: POST - {{- end }} - {{- if .Values.sidecar.alerts.watchServerTimeout }} - {{- if ne .Values.sidecar.alerts.watchMethod "WATCH" }} - {{- fail (printf "Cannot use .Values.sidecar.alerts.watchServerTimeout with .Values.sidecar.alerts.watchMethod %s" .Values.sidecar.alerts.watchMethod) }} - {{- end }} - - name: WATCH_SERVER_TIMEOUT - value: "{{ .Values.sidecar.alerts.watchServerTimeout }}" - {{- end }} - {{- if .Values.sidecar.alerts.watchClientTimeout }} - {{- if ne .Values.sidecar.alerts.watchMethod "WATCH" }} - {{- fail (printf "Cannot use .Values.sidecar.alerts.watchClientTimeout with .Values.sidecar.alerts.watchMethod %s" .Values.sidecar.alerts.watchMethod) }} - {{- end }} - - name: WATCH_CLIENT_TIMEOUT - value: "{{ .Values.sidecar.alerts.watchClientTimeout }}" - {{- end }} - {{- with .Values.sidecar.livenessProbe }} - livenessProbe: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.sidecar.readinessProbe }} - readinessProbe: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.sidecar.resources }} - resources: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.sidecar.securityContext }} - securityContext: - {{- toYaml . | nindent 6 }} - {{- end }} - volumeMounts: - - name: sc-alerts-volume - mountPath: "/etc/grafana/provisioning/alerting" - {{- with .Values.sidecar.alerts.extraMounts }} - {{- toYaml . | trim | nindent 6 }} - {{- end }} -{{- end}} -{{- if .Values.sidecar.dashboards.enabled }} - - name: {{ include "grafana.name" . }}-sc-dashboard - {{- $registry := .Values.global.imageRegistry | default .Values.sidecar.image.registry -}} - {{- if .Values.sidecar.image.sha }} - image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" - {{- else }} - image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" - {{- end }} - imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} - env: - {{- range $key, $value := .Values.sidecar.dashboards.env }} - - name: "{{ $key }}" - value: "{{ $value }}" - {{- end }} - {{- if .Values.sidecar.dashboards.ignoreAlreadyProcessed }} - - name: IGNORE_ALREADY_PROCESSED - value: "true" - {{- end }} - - name: METHOD - value: {{ .Values.sidecar.dashboards.watchMethod }} - - name: LABEL - value: "{{ .Values.sidecar.dashboards.label }}" - {{- with .Values.sidecar.dashboards.labelValue }} - - name: LABEL_VALUE - value: {{ quote . }} - {{- end }} - {{- if or .Values.sidecar.logLevel .Values.sidecar.dashboards.logLevel }} - - name: LOG_LEVEL - value: {{ default .Values.sidecar.logLevel .Values.sidecar.dashboards.logLevel }} - {{- end }} - - name: FOLDER - value: "{{ .Values.sidecar.dashboards.folder }}{{- with .Values.sidecar.dashboards.defaultFolderName }}/{{ . }}{{- end }}" - - name: RESOURCE - value: {{ quote .Values.sidecar.dashboards.resource }} - {{- with .Values.sidecar.enableUniqueFilenames }} - - name: UNIQUE_FILENAMES - value: "{{ . }}" - {{- end }} - {{- with .Values.sidecar.dashboards.searchNamespace }} - - name: NAMESPACE - value: "{{ tpl (. | join ",") $root }}" - {{- end }} - {{- with .Values.sidecar.skipTlsVerify }} - - name: SKIP_TLS_VERIFY - value: "{{ . }}" - {{- end }} - {{- with .Values.sidecar.dashboards.folderAnnotation }} - - name: FOLDER_ANNOTATION - value: "{{ . }}" - {{- end }} - {{- with .Values.sidecar.dashboards.script }} - - name: SCRIPT - value: "{{ . }}" - {{- end }} - {{- if not .Values.sidecar.dashboards.skipReload }} - {{- if and (not .Values.env.GF_SECURITY_ADMIN_USER) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} - - name: REQ_USERNAME - valueFrom: - secretKeyRef: - name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} - key: {{ .Values.admin.userKey | default "admin-user" }} - {{- end }} - {{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} - - name: REQ_PASSWORD - valueFrom: - secretKeyRef: - name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} - key: {{ .Values.admin.passwordKey | default "admin-password" }} - {{- end }} - - name: REQ_URL - value: {{ .Values.sidecar.dashboards.reloadURL }} - - name: REQ_METHOD - value: POST - {{- end }} - {{- if .Values.sidecar.dashboards.watchServerTimeout }} - {{- if ne .Values.sidecar.dashboards.watchMethod "WATCH" }} - {{- fail (printf "Cannot use .Values.sidecar.dashboards.watchServerTimeout with .Values.sidecar.dashboards.watchMethod %s" .Values.sidecar.dashboards.watchMethod) }} - {{- end }} - - name: WATCH_SERVER_TIMEOUT - value: "{{ .Values.sidecar.dashboards.watchServerTimeout }}" - {{- end }} - {{- if .Values.sidecar.dashboards.watchClientTimeout }} - {{- if ne .Values.sidecar.dashboards.watchMethod "WATCH" }} - {{- fail (printf "Cannot use .Values.sidecar.dashboards.watchClientTimeout with .Values.sidecar.dashboards.watchMethod %s" .Values.sidecar.dashboards.watchMethod) }} - {{- end }} - - name: WATCH_CLIENT_TIMEOUT - value: {{ .Values.sidecar.dashboards.watchClientTimeout | quote }} - {{- end }} - {{- with .Values.sidecar.livenessProbe }} - livenessProbe: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.sidecar.readinessProbe }} - readinessProbe: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.sidecar.resources }} - resources: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.sidecar.securityContext }} - securityContext: - {{- toYaml . | nindent 6 }} - {{- end }} - volumeMounts: - - name: sc-dashboard-volume - mountPath: {{ .Values.sidecar.dashboards.folder | quote }} - {{- with .Values.sidecar.dashboards.extraMounts }} - {{- toYaml . | trim | nindent 6 }} - {{- end }} -{{- end}} -{{- if and .Values.sidecar.datasources.enabled (not .Values.sidecar.datasources.initDatasources) }} - - name: {{ include "grafana.name" . }}-sc-datasources - {{- $registry := .Values.global.imageRegistry | default .Values.sidecar.image.registry -}} - {{- if .Values.sidecar.image.sha }} - image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" - {{- else }} - image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" - {{- end }} - imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} - env: - {{- range $key, $value := .Values.sidecar.datasources.env }} - - name: "{{ $key }}" - value: "{{ $value }}" - {{- end }} - {{- if .Values.sidecar.datasources.ignoreAlreadyProcessed }} - - name: IGNORE_ALREADY_PROCESSED - value: "true" - {{- end }} - - name: METHOD - value: {{ .Values.sidecar.datasources.watchMethod }} - - name: LABEL - value: "{{ .Values.sidecar.datasources.label }}" - {{- with .Values.sidecar.datasources.labelValue }} - - name: LABEL_VALUE - value: {{ quote . }} - {{- end }} - {{- if or .Values.sidecar.logLevel .Values.sidecar.datasources.logLevel }} - - name: LOG_LEVEL - value: {{ default .Values.sidecar.logLevel .Values.sidecar.datasources.logLevel }} - {{- end }} - - name: FOLDER - value: "/etc/grafana/provisioning/datasources" - - name: RESOURCE - value: {{ quote .Values.sidecar.datasources.resource }} - {{- with .Values.sidecar.enableUniqueFilenames }} - - name: UNIQUE_FILENAMES - value: "{{ . }}" - {{- end }} - {{- with .Values.sidecar.datasources.searchNamespace }} - - name: NAMESPACE - value: "{{ tpl (. | join ",") $root }}" - {{- end }} - {{- if .Values.sidecar.skipTlsVerify }} - - name: SKIP_TLS_VERIFY - value: "{{ .Values.sidecar.skipTlsVerify }}" - {{- end }} - {{- if .Values.sidecar.datasources.script }} - - name: SCRIPT - value: "{{ .Values.sidecar.datasources.script }}" - {{- end }} - {{- if and (not .Values.env.GF_SECURITY_ADMIN_USER) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} - - name: REQ_USERNAME - valueFrom: - secretKeyRef: - name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} - key: {{ .Values.admin.userKey | default "admin-user" }} - {{- end }} - {{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} - - name: REQ_PASSWORD - valueFrom: - secretKeyRef: - name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} - key: {{ .Values.admin.passwordKey | default "admin-password" }} - {{- end }} - {{- if not .Values.sidecar.datasources.skipReload }} - - name: REQ_URL - value: {{ .Values.sidecar.datasources.reloadURL }} - - name: REQ_METHOD - value: POST - {{- end }} - {{- if .Values.sidecar.datasources.watchServerTimeout }} - {{- if ne .Values.sidecar.datasources.watchMethod "WATCH" }} - {{- fail (printf "Cannot use .Values.sidecar.datasources.watchServerTimeout with .Values.sidecar.datasources.watchMethod %s" .Values.sidecar.datasources.watchMethod) }} - {{- end }} - - name: WATCH_SERVER_TIMEOUT - value: "{{ .Values.sidecar.datasources.watchServerTimeout }}" - {{- end }} - {{- if .Values.sidecar.datasources.watchClientTimeout }} - {{- if ne .Values.sidecar.datasources.watchMethod "WATCH" }} - {{- fail (printf "Cannot use .Values.sidecar.datasources.watchClientTimeout with .Values.sidecar.datasources.watchMethod %s" .Values.sidecar.datasources.watchMethod) }} - {{- end }} - - name: WATCH_CLIENT_TIMEOUT - value: "{{ .Values.sidecar.datasources.watchClientTimeout }}" - {{- end }} - {{- with .Values.sidecar.livenessProbe }} - livenessProbe: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.sidecar.readinessProbe }} - readinessProbe: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.sidecar.resources }} - resources: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.sidecar.securityContext }} - securityContext: - {{- toYaml . | nindent 6 }} - {{- end }} - volumeMounts: - - name: sc-datasources-volume - mountPath: "/etc/grafana/provisioning/datasources" -{{- end}} -{{- if .Values.sidecar.notifiers.enabled }} - - name: {{ include "grafana.name" . }}-sc-notifiers - {{- $registry := .Values.global.imageRegistry | default .Values.sidecar.image.registry -}} - {{- if .Values.sidecar.image.sha }} - image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" - {{- else }} - image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" - {{- end }} - imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} - env: - {{- range $key, $value := .Values.sidecar.notifiers.env }} - - name: "{{ $key }}" - value: "{{ $value }}" - {{- end }} - {{- if .Values.sidecar.notifiers.ignoreAlreadyProcessed }} - - name: IGNORE_ALREADY_PROCESSED - value: "true" - {{- end }} - - name: METHOD - value: {{ .Values.sidecar.notifiers.watchMethod }} - - name: LABEL - value: "{{ .Values.sidecar.notifiers.label }}" - {{- with .Values.sidecar.notifiers.labelValue }} - - name: LABEL_VALUE - value: {{ quote . }} - {{- end }} - {{- if or .Values.sidecar.logLevel .Values.sidecar.notifiers.logLevel }} - - name: LOG_LEVEL - value: {{ default .Values.sidecar.logLevel .Values.sidecar.notifiers.logLevel }} - {{- end }} - - name: FOLDER - value: "/etc/grafana/provisioning/notifiers" - - name: RESOURCE - value: {{ quote .Values.sidecar.notifiers.resource }} - {{- if .Values.sidecar.enableUniqueFilenames }} - - name: UNIQUE_FILENAMES - value: "{{ .Values.sidecar.enableUniqueFilenames }}" - {{- end }} - {{- with .Values.sidecar.notifiers.searchNamespace }} - - name: NAMESPACE - value: "{{ tpl (. | join ",") $root }}" - {{- end }} - {{- with .Values.sidecar.skipTlsVerify }} - - name: SKIP_TLS_VERIFY - value: "{{ . }}" - {{- end }} - {{- if .Values.sidecar.notifiers.script }} - - name: SCRIPT - value: "{{ .Values.sidecar.notifiers.script }}" - {{- end }} - {{- if and (not .Values.env.GF_SECURITY_ADMIN_USER) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} - - name: REQ_USERNAME - valueFrom: - secretKeyRef: - name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} - key: {{ .Values.admin.userKey | default "admin-user" }} - {{- end }} - {{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} - - name: REQ_PASSWORD - valueFrom: - secretKeyRef: - name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} - key: {{ .Values.admin.passwordKey | default "admin-password" }} - {{- end }} - {{- if not .Values.sidecar.notifiers.skipReload }} - - name: REQ_URL - value: {{ .Values.sidecar.notifiers.reloadURL }} - - name: REQ_METHOD - value: POST - {{- end }} - {{- if .Values.sidecar.notifiers.watchServerTimeout }} - {{- if ne .Values.sidecar.notifiers.watchMethod "WATCH" }} - {{- fail (printf "Cannot use .Values.sidecar.notifiers.watchServerTimeout with .Values.sidecar.notifiers.watchMethod %s" .Values.sidecar.notifiers.watchMethod) }} - {{- end }} - - name: WATCH_SERVER_TIMEOUT - value: "{{ .Values.sidecar.notifiers.watchServerTimeout }}" - {{- end }} - {{- if .Values.sidecar.notifiers.watchClientTimeout }} - {{- if ne .Values.sidecar.notifiers.watchMethod "WATCH" }} - {{- fail (printf "Cannot use .Values.sidecar.notifiers.watchClientTimeout with .Values.sidecar.notifiers.watchMethod %s" .Values.sidecar.notifiers.watchMethod) }} - {{- end }} - - name: WATCH_CLIENT_TIMEOUT - value: "{{ .Values.sidecar.notifiers.watchClientTimeout }}" - {{- end }} - {{- with .Values.sidecar.livenessProbe }} - livenessProbe: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.sidecar.readinessProbe }} - readinessProbe: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.sidecar.resources }} - resources: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.sidecar.securityContext }} - securityContext: - {{- toYaml . | nindent 6 }} - {{- end }} - volumeMounts: - - name: sc-notifiers-volume - mountPath: "/etc/grafana/provisioning/notifiers" -{{- end}} -{{- if .Values.sidecar.plugins.enabled }} - - name: {{ include "grafana.name" . }}-sc-plugins - {{- $registry := .Values.global.imageRegistry | default .Values.sidecar.image.registry -}} - {{- if .Values.sidecar.image.sha }} - image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" - {{- else }} - image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" - {{- end }} - imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} - env: - {{- range $key, $value := .Values.sidecar.plugins.env }} - - name: "{{ $key }}" - value: "{{ $value }}" - {{- end }} - {{- if .Values.sidecar.plugins.ignoreAlreadyProcessed }} - - name: IGNORE_ALREADY_PROCESSED - value: "true" - {{- end }} - - name: METHOD - value: {{ .Values.sidecar.plugins.watchMethod }} - - name: LABEL - value: "{{ .Values.sidecar.plugins.label }}" - {{- if .Values.sidecar.plugins.labelValue }} - - name: LABEL_VALUE - value: {{ quote .Values.sidecar.plugins.labelValue }} - {{- end }} - {{- if or .Values.sidecar.logLevel .Values.sidecar.plugins.logLevel }} - - name: LOG_LEVEL - value: {{ default .Values.sidecar.logLevel .Values.sidecar.plugins.logLevel }} - {{- end }} - - name: FOLDER - value: "/etc/grafana/provisioning/plugins" - - name: RESOURCE - value: {{ quote .Values.sidecar.plugins.resource }} - {{- with .Values.sidecar.enableUniqueFilenames }} - - name: UNIQUE_FILENAMES - value: "{{ . }}" - {{- end }} - {{- with .Values.sidecar.plugins.searchNamespace }} - - name: NAMESPACE - value: "{{ tpl (. | join ",") $root }}" - {{- end }} - {{- with .Values.sidecar.plugins.script }} - - name: SCRIPT - value: "{{ . }}" - {{- end }} - {{- with .Values.sidecar.skipTlsVerify }} - - name: SKIP_TLS_VERIFY - value: "{{ . }}" - {{- end }} - {{- if and (not .Values.env.GF_SECURITY_ADMIN_USER) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} - - name: REQ_USERNAME - valueFrom: - secretKeyRef: - name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} - key: {{ .Values.admin.userKey | default "admin-user" }} - {{- end }} - {{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} - - name: REQ_PASSWORD - valueFrom: - secretKeyRef: - name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} - key: {{ .Values.admin.passwordKey | default "admin-password" }} - {{- end }} - {{- if not .Values.sidecar.plugins.skipReload }} - - name: REQ_URL - value: {{ .Values.sidecar.plugins.reloadURL }} - - name: REQ_METHOD - value: POST - {{- end }} - {{- if .Values.sidecar.plugins.watchServerTimeout }} - {{- if ne .Values.sidecar.plugins.watchMethod "WATCH" }} - {{- fail (printf "Cannot use .Values.sidecar.plugins.watchServerTimeout with .Values.sidecar.plugins.watchMethod %s" .Values.sidecar.plugins.watchMethod) }} - {{- end }} - - name: WATCH_SERVER_TIMEOUT - value: "{{ .Values.sidecar.plugins.watchServerTimeout }}" - {{- end }} - {{- if .Values.sidecar.plugins.watchClientTimeout }} - {{- if ne .Values.sidecar.plugins.watchMethod "WATCH" }} - {{- fail (printf "Cannot use .Values.sidecar.plugins.watchClientTimeout with .Values.sidecar.plugins.watchMethod %s" .Values.sidecar.plugins.watchMethod) }} - {{- end }} - - name: WATCH_CLIENT_TIMEOUT - value: "{{ .Values.sidecar.plugins.watchClientTimeout }}" - {{- end }} - {{- with .Values.sidecar.livenessProbe }} - livenessProbe: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.sidecar.readinessProbe }} - readinessProbe: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.sidecar.resources }} - resources: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.sidecar.securityContext }} - securityContext: - {{- toYaml . | nindent 6 }} - {{- end }} - volumeMounts: - - name: sc-plugins-volume - mountPath: "/etc/grafana/provisioning/plugins" -{{- end}} - - name: {{ .Chart.Name }} - {{- $registry := .Values.global.imageRegistry | default .Values.image.registry -}} - {{- if .Values.image.sha }} - image: "{{ $registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}@sha256:{{ .Values.image.sha }}" - {{- else }} - image: "{{ $registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" - {{- end }} - imagePullPolicy: {{ .Values.image.pullPolicy }} - {{- if .Values.command }} - command: - {{- range .Values.command }} - - {{ . | quote }} - {{- end }} - {{- end }} - {{- if .Values.args }} - args: - {{- range .Values.args }} - - {{ . | quote }} - {{- end }} - {{- end }} - {{- with .Values.containerSecurityContext }} - securityContext: - {{- toYaml . | nindent 6 }} - {{- end }} - volumeMounts: - - name: config - mountPath: "/etc/grafana/grafana.ini" - subPath: grafana.ini - {{- if .Values.ldap.enabled }} - - name: ldap - mountPath: "/etc/grafana/ldap.toml" - subPath: ldap.toml - {{- end }} - {{- range .Values.extraConfigmapMounts }} - - name: {{ tpl .name $root }} - mountPath: {{ tpl .mountPath $root }} - subPath: {{ tpl (.subPath | default "") $root }} - readOnly: {{ .readOnly }} - {{- end }} - - name: storage - mountPath: "/var/lib/grafana" - {{- with .Values.persistence.subPath }} - subPath: {{ tpl . $root }} - {{- end }} - {{- with .Values.dashboards }} - {{- range $provider, $dashboards := . }} - {{- range $key, $value := $dashboards }} - {{- if (or (hasKey $value "json") (hasKey $value "file")) }} - - name: dashboards-{{ $provider }} - mountPath: "/var/lib/grafana/dashboards/{{ $provider }}/{{ $key }}.json" - subPath: "{{ $key }}.json" - {{- end }} - {{- end }} - {{- end }} - {{- end }} - {{- with .Values.dashboardsConfigMaps }} - {{- range (keys . | sortAlpha) }} - - name: dashboards-{{ . }} - mountPath: "/var/lib/grafana/dashboards/{{ . }}" - {{- end }} - {{- end }} - {{- with .Values.datasources }} - {{- $datasources := . }} - {{- range (keys . | sortAlpha) }} - {{- if (or (hasKey (index $datasources .) "secret")) }} {{/*check if current datasource should be handeled as secret */}} - - name: config-secret - mountPath: "/etc/grafana/provisioning/datasources/{{ . }}" - subPath: {{ . | quote }} - {{- else }} - - name: config - mountPath: "/etc/grafana/provisioning/datasources/{{ . }}" - subPath: {{ . | quote }} - {{- end }} - {{- end }} - {{- end }} - {{- with .Values.notifiers }} - {{- $notifiers := . }} - {{- range (keys . | sortAlpha) }} - {{- if (or (hasKey (index $notifiers .) "secret")) }} {{/*check if current notifier should be handeled as secret */}} - - name: config-secret - mountPath: "/etc/grafana/provisioning/notifiers/{{ . }}" - subPath: {{ . | quote }} - {{- else }} - - name: config - mountPath: "/etc/grafana/provisioning/notifiers/{{ . }}" - subPath: {{ . | quote }} - {{- end }} - {{- end }} - {{- end }} - {{- with .Values.alerting }} - {{- $alertingmap := .}} - {{- range (keys . | sortAlpha) }} - {{- if (or (hasKey (index $.Values.alerting .) "secret") (hasKey (index $.Values.alerting .) "secretFile")) }} {{/*check if current alerting entry should be handeled as secret */}} - - name: config-secret - mountPath: "/etc/grafana/provisioning/alerting/{{ . }}" - subPath: {{ . | quote }} - {{- else }} - - name: config - mountPath: "/etc/grafana/provisioning/alerting/{{ . }}" - subPath: {{ . | quote }} - {{- end }} - {{- end }} - {{- end }} - {{- with .Values.dashboardProviders }} - {{- range (keys . | sortAlpha) }} - - name: config - mountPath: "/etc/grafana/provisioning/dashboards/{{ . }}" - subPath: {{ . | quote }} - {{- end }} - {{- end }} - {{- with .Values.sidecar.alerts.enabled }} - - name: sc-alerts-volume - mountPath: "/etc/grafana/provisioning/alerting" - {{- end}} - {{- if .Values.sidecar.dashboards.enabled }} - - name: sc-dashboard-volume - mountPath: {{ .Values.sidecar.dashboards.folder | quote }} - {{- if .Values.sidecar.dashboards.SCProvider }} - - name: sc-dashboard-provider - mountPath: "/etc/grafana/provisioning/dashboards/sc-dashboardproviders.yaml" - subPath: provider.yaml - {{- end}} - {{- end}} - {{- if .Values.sidecar.datasources.enabled }} - - name: sc-datasources-volume - mountPath: "/etc/grafana/provisioning/datasources" - {{- end}} - {{- if .Values.sidecar.plugins.enabled }} - - name: sc-plugins-volume - mountPath: "/etc/grafana/provisioning/plugins" - {{- end}} - {{- if .Values.sidecar.notifiers.enabled }} - - name: sc-notifiers-volume - mountPath: "/etc/grafana/provisioning/notifiers" - {{- end}} - {{- range .Values.extraSecretMounts }} - - name: {{ .name }} - mountPath: {{ .mountPath }} - readOnly: {{ .readOnly }} - subPath: {{ .subPath | default "" }} - {{- end }} - {{- range .Values.extraVolumeMounts }} - - name: {{ .name }} - mountPath: {{ .mountPath }} - subPath: {{ .subPath | default "" }} - readOnly: {{ .readOnly }} - {{- end }} - {{- range .Values.extraEmptyDirMounts }} - - name: {{ .name }} - mountPath: {{ .mountPath }} - {{- end }} - ports: - - name: {{ .Values.podPortName }} - containerPort: {{ .Values.service.targetPort }} - protocol: TCP - - name: {{ .Values.gossipPortName }}-tcp - containerPort: 9094 - protocol: TCP - - name: {{ .Values.gossipPortName }}-udp - containerPort: 9094 - protocol: UDP - env: - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - {{- if and (not .Values.env.GF_SECURITY_ADMIN_USER) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} - - name: GF_SECURITY_ADMIN_USER - valueFrom: - secretKeyRef: - name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} - key: {{ .Values.admin.userKey | default "admin-user" }} - {{- end }} - {{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} - - name: GF_SECURITY_ADMIN_PASSWORD - valueFrom: - secretKeyRef: - name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} - key: {{ .Values.admin.passwordKey | default "admin-password" }} - {{- end }} - {{- if .Values.plugins }} - - name: GF_INSTALL_PLUGINS - valueFrom: - configMapKeyRef: - name: {{ include "grafana.fullname" . }} - key: plugins - {{- end }} - {{- if .Values.smtp.existingSecret }} - - name: GF_SMTP_USER - valueFrom: - secretKeyRef: - name: {{ .Values.smtp.existingSecret }} - key: {{ .Values.smtp.userKey | default "user" }} - - name: GF_SMTP_PASSWORD - valueFrom: - secretKeyRef: - name: {{ .Values.smtp.existingSecret }} - key: {{ .Values.smtp.passwordKey | default "password" }} - {{- end }} - {{- if .Values.imageRenderer.enabled }} - - name: GF_RENDERING_SERVER_URL - value: http://{{ include "grafana.fullname" . }}-image-renderer.{{ include "grafana.namespace" . }}:{{ .Values.imageRenderer.service.port }}/render - - name: GF_RENDERING_CALLBACK_URL - value: {{ .Values.imageRenderer.grafanaProtocol }}://{{ include "grafana.fullname" . }}.{{ include "grafana.namespace" . }}:{{ .Values.service.port }}/{{ .Values.imageRenderer.grafanaSubPath }} - {{- end }} - - name: GF_PATHS_DATA - value: {{ (get .Values "grafana.ini").paths.data }} - - name: GF_PATHS_LOGS - value: {{ (get .Values "grafana.ini").paths.logs }} - - name: GF_PATHS_PLUGINS - value: {{ (get .Values "grafana.ini").paths.plugins }} - - name: GF_PATHS_PROVISIONING - value: {{ (get .Values "grafana.ini").paths.provisioning }} - {{- range $key, $value := .Values.envValueFrom }} - - name: {{ $key | quote }} - valueFrom: - {{- tpl (toYaml $value) $ | nindent 10 }} - {{- end }} - {{- range $key, $value := .Values.env }} - - name: "{{ tpl $key $ }}" - value: "{{ tpl (print $value) $ }}" - {{- end }} - {{- if or .Values.envFromSecret (or .Values.envRenderSecret .Values.envFromSecrets) .Values.envFromConfigMaps }} - envFrom: - {{- if .Values.envFromSecret }} - - secretRef: - name: {{ tpl .Values.envFromSecret . }} - {{- end }} - {{- if .Values.envRenderSecret }} - - secretRef: - name: {{ include "grafana.fullname" . }}-env - {{- end }} - {{- range .Values.envFromSecrets }} - - secretRef: - name: {{ tpl .name $ }} - optional: {{ .optional | default false }} - {{- end }} - {{- range .Values.envFromConfigMaps }} - - configMapRef: - name: {{ tpl .name $ }} - optional: {{ .optional | default false }} - {{- end }} - {{- end }} - {{- with .Values.livenessProbe }} - livenessProbe: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.readinessProbe }} - readinessProbe: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.lifecycleHooks }} - lifecycle: - {{- tpl (toYaml .) $root | nindent 6 }} - {{- end }} - {{- with .Values.resources }} - resources: - {{- toYaml . | nindent 6 }} - {{- end }} -{{- with .Values.extraContainers }} - {{- tpl . $ | nindent 2 }} -{{- end }} -{{- with .Values.nodeSelector }} -nodeSelector: - {{- toYaml . | nindent 2 }} -{{- end }} -{{- with .Values.affinity }} -affinity: - {{- tpl (toYaml .) $root | nindent 2 }} -{{- end }} -{{- with .Values.topologySpreadConstraints }} -topologySpreadConstraints: - {{- toYaml . | nindent 2 }} -{{- end }} -{{- with .Values.tolerations }} -tolerations: - {{- toYaml . | nindent 2 }} -{{- end }} -volumes: - - name: config - configMap: - name: {{ include "grafana.fullname" . }} - {{- $createConfigSecret := eq (include "grafana.shouldCreateConfigSecret" .) "true" -}} - {{- if and .Values.createConfigmap $createConfigSecret }} - - name: config-secret - secret: - secretName: {{ include "grafana.fullname" . }}-config-secret - {{- end }} - {{- range .Values.extraConfigmapMounts }} - - name: {{ tpl .name $root }} - configMap: - name: {{ tpl .configMap $root }} - {{- with .items }} - items: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- end }} - {{- if .Values.dashboards }} - {{- range (keys .Values.dashboards | sortAlpha) }} - - name: dashboards-{{ . }} - configMap: - name: {{ include "grafana.fullname" $ }}-dashboards-{{ . }} - {{- end }} - {{- end }} - {{- if .Values.dashboardsConfigMaps }} - {{- range $provider, $name := .Values.dashboardsConfigMaps }} - - name: dashboards-{{ $provider }} - configMap: - name: {{ tpl $name $root }} - {{- end }} - {{- end }} - {{- if .Values.ldap.enabled }} - - name: ldap - secret: - {{- if .Values.ldap.existingSecret }} - secretName: {{ .Values.ldap.existingSecret }} - {{- else }} - secretName: {{ include "grafana.fullname" . }} - {{- end }} - items: - - key: ldap-toml - path: ldap.toml - {{- end }} - {{- if and .Values.persistence.enabled (eq .Values.persistence.type "pvc") }} - - name: storage - persistentVolumeClaim: - claimName: {{ tpl (.Values.persistence.existingClaim | default (include "grafana.fullname" .)) . }} - {{- else if and .Values.persistence.enabled (has .Values.persistence.type $sts) }} - {{/* nothing */}} - {{- else }} - - name: storage - {{- if .Values.persistence.inMemory.enabled }} - emptyDir: - medium: Memory - {{- with .Values.persistence.inMemory.sizeLimit }} - sizeLimit: {{ . }} - {{- end }} - {{- else }} - emptyDir: {} - {{- end }} - {{- end }} - {{- if .Values.sidecar.alerts.enabled }} - - name: sc-alerts-volume - emptyDir: - {{- with .Values.sidecar.alerts.sizeLimit }} - sizeLimit: {{ . }} - {{- else }} - {} - {{- end }} - {{- end }} - {{- if .Values.sidecar.dashboards.enabled }} - - name: sc-dashboard-volume - emptyDir: - {{- with .Values.sidecar.dashboards.sizeLimit }} - sizeLimit: {{ . }} - {{- else }} - {} - {{- end }} - {{- if .Values.sidecar.dashboards.SCProvider }} - - name: sc-dashboard-provider - configMap: - name: {{ include "grafana.fullname" . }}-config-dashboards - {{- end }} - {{- end }} - {{- if .Values.sidecar.datasources.enabled }} - - name: sc-datasources-volume - emptyDir: - {{- with .Values.sidecar.datasources.sizeLimit }} - sizeLimit: {{ . }} - {{- else }} - {} - {{- end }} - {{- end }} - {{- if .Values.sidecar.plugins.enabled }} - - name: sc-plugins-volume - emptyDir: - {{- with .Values.sidecar.plugins.sizeLimit }} - sizeLimit: {{ . }} - {{- else }} - {} - {{- end }} - {{- end }} - {{- if .Values.sidecar.notifiers.enabled }} - - name: sc-notifiers-volume - emptyDir: - {{- with .Values.sidecar.notifiers.sizeLimit }} - sizeLimit: {{ . }} - {{- else }} - {} - {{- end }} - {{- end }} - {{- range .Values.extraSecretMounts }} - {{- if .secretName }} - - name: {{ .name }} - secret: - secretName: {{ .secretName }} - defaultMode: {{ .defaultMode }} - {{- with .items }} - items: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- else if .projected }} - - name: {{ .name }} - projected: - {{- toYaml .projected | nindent 6 }} - {{- else if .csi }} - - name: {{ .name }} - csi: - {{- toYaml .csi | nindent 6 }} - {{- end }} - {{- end }} - {{- range .Values.extraVolumes }} - - name: {{ .name }} - {{- if .existingClaim }} - persistentVolumeClaim: - claimName: {{ .existingClaim }} - {{- else if .hostPath }} - hostPath: - {{ toYaml .hostPath | nindent 6 }} - {{- else if .csi }} - csi: - {{- toYaml .data | nindent 6 }} - {{- else if .configMap }} - configMap: - {{- toYaml .configMap | nindent 6 }} - {{- else }} - emptyDir: {} - {{- end }} - {{- end }} - {{- range .Values.extraEmptyDirMounts }} - - name: {{ .name }} - emptyDir: {} - {{- end }} - {{- with .Values.extraContainerVolumes }} - {{- tpl (toYaml .) $root | nindent 2 }} - {{- end }} -{{- end }} - diff --git a/operations/deployment/helm/grafana/templates/clusterrole.yaml b/operations/deployment/helm/grafana/templates/clusterrole.yaml deleted file mode 100644 index 3af4b62..0000000 --- a/operations/deployment/helm/grafana/templates/clusterrole.yaml +++ /dev/null @@ -1,25 +0,0 @@ -{{- if and .Values.rbac.create (or (not .Values.rbac.namespaced) .Values.rbac.extraClusterRoleRules) (not .Values.rbac.useExistingClusterRole) }} -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - labels: - {{- include "grafana.labels" . | nindent 4 }} - {{- with .Values.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} - name: {{ include "grafana.fullname" . }}-clusterrole -{{- if or .Values.sidecar.dashboards.enabled .Values.rbac.extraClusterRoleRules .Values.sidecar.datasources.enabled .Values.sidecar.plugins.enabled .Values.sidecar.alerts.enabled }} -rules: - {{- if or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled .Values.sidecar.plugins.enabled .Values.sidecar.alerts.enabled }} - - apiGroups: [""] # "" indicates the core API group - resources: ["configmaps", "secrets"] - verbs: ["get", "watch", "list"] - {{- end}} - {{- with .Values.rbac.extraClusterRoleRules }} - {{- toYaml . | nindent 2 }} - {{- end}} -{{- else }} -rules: [] -{{- end}} -{{- end}} diff --git a/operations/deployment/helm/grafana/templates/clusterrolebinding.yaml b/operations/deployment/helm/grafana/templates/clusterrolebinding.yaml deleted file mode 100644 index bda9431..0000000 --- a/operations/deployment/helm/grafana/templates/clusterrolebinding.yaml +++ /dev/null @@ -1,24 +0,0 @@ -{{- if and .Values.rbac.create (or (not .Values.rbac.namespaced) .Values.rbac.extraClusterRoleRules) }} -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: {{ include "grafana.fullname" . }}-clusterrolebinding - labels: - {{- include "grafana.labels" . | nindent 4 }} - {{- with .Values.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -subjects: - - kind: ServiceAccount - name: {{ include "grafana.serviceAccountName" . }} - namespace: {{ include "grafana.namespace" . }} -roleRef: - kind: ClusterRole - {{- if .Values.rbac.useExistingClusterRole }} - name: {{ .Values.rbac.useExistingClusterRole }} - {{- else }} - name: {{ include "grafana.fullname" . }}-clusterrole - {{- end }} - apiGroup: rbac.authorization.k8s.io -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/configSecret.yaml b/operations/deployment/helm/grafana/templates/configSecret.yaml deleted file mode 100644 index f8937cc..0000000 --- a/operations/deployment/helm/grafana/templates/configSecret.yaml +++ /dev/null @@ -1,43 +0,0 @@ -{{- $createConfigSecret := eq (include "grafana.shouldCreateConfigSecret" .) "true" -}} -{{- if and .Values.createConfigmap $createConfigSecret }} -{{- $files := .Files }} -{{- $root := . -}} -apiVersion: v1 -kind: Secret -metadata: - name: "{{ include "grafana.fullname" . }}-config-secret" - namespace: {{ include "grafana.namespace" . }} - labels: - {{- include "grafana.labels" . | nindent 4 }} - {{- with .Values.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -data: -{{- range $key, $value := .Values.alerting }} - {{- if (hasKey $value "secretFile") }} - {{- $key | nindent 2 }}: - {{- toYaml ( $files.Get $value.secretFile ) | b64enc | nindent 4}} - {{/* as of https://helm.sh/docs/chart_template_guide/accessing_files/ this will only work if you fork this chart and add files to it*/}} - {{- end }} -{{- end }} -stringData: -{{- range $key, $value := .Values.datasources }} -{{- if (hasKey $value "secret") }} -{{- $key | nindent 2 }}: | - {{- tpl (toYaml $value | nindent 4) $root }} -{{- end }} -{{- end }} -{{- range $key, $value := .Values.notifiers }} -{{- if (hasKey $value "secret") }} -{{- $key | nindent 2 }}: | - {{- tpl (toYaml $value | nindent 4) $root }} -{{- end }} -{{- end }} -{{- range $key, $value := .Values.alerting }} -{{ if (hasKey $value "secret") }} - {{- $key | nindent 2 }}: | - {{- tpl (toYaml $value.secret | nindent 4) $root }} - {{- end }} -{{- end }} -{{- end }} \ No newline at end of file diff --git a/operations/deployment/helm/grafana/templates/configmap-dashboard-provider.yaml b/operations/deployment/helm/grafana/templates/configmap-dashboard-provider.yaml deleted file mode 100644 index 1f706a8..0000000 --- a/operations/deployment/helm/grafana/templates/configmap-dashboard-provider.yaml +++ /dev/null @@ -1,29 +0,0 @@ -{{- if and .Values.sidecar.dashboards.enabled .Values.sidecar.dashboards.SCProvider }} -apiVersion: v1 -kind: ConfigMap -metadata: - labels: - {{- include "grafana.labels" . | nindent 4 }} - {{- with .Values.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} - name: {{ include "grafana.fullname" . }}-config-dashboards - namespace: {{ include "grafana.namespace" . }} -data: - provider.yaml: |- - apiVersion: 1 - providers: - - name: '{{ .Values.sidecar.dashboards.provider.name }}' - orgId: {{ .Values.sidecar.dashboards.provider.orgid }} - {{- if not .Values.sidecar.dashboards.provider.foldersFromFilesStructure }} - folder: '{{ .Values.sidecar.dashboards.provider.folder }}' - {{- end }} - type: {{ .Values.sidecar.dashboards.provider.type }} - disableDeletion: {{ .Values.sidecar.dashboards.provider.disableDelete }} - allowUiUpdates: {{ .Values.sidecar.dashboards.provider.allowUiUpdates }} - updateIntervalSeconds: {{ .Values.sidecar.dashboards.provider.updateIntervalSeconds | default 30 }} - options: - foldersFromFilesStructure: {{ .Values.sidecar.dashboards.provider.foldersFromFilesStructure }} - path: {{ .Values.sidecar.dashboards.folder }}{{- with .Values.sidecar.dashboards.defaultFolderName }}/{{ . }}{{- end }} -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/configmap.yaml b/operations/deployment/helm/grafana/templates/configmap.yaml deleted file mode 100644 index 7b837d9..0000000 --- a/operations/deployment/helm/grafana/templates/configmap.yaml +++ /dev/null @@ -1,144 +0,0 @@ -{{- if .Values.createConfigmap }} -{{- $files := .Files }} -{{- $root := . -}} -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ include "grafana.fullname" . }} - namespace: {{ include "grafana.namespace" . }} - labels: - {{- include "grafana.labels" . | nindent 4 }} - {{- with .Values.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -data: - {{- with .Values.plugins }} - plugins: {{ join "," . }} - {{- end }} - grafana.ini: | - {{- range $elem, $elemVal := index .Values "grafana.ini" }} - {{- if not (kindIs "map" $elemVal) }} - {{- if kindIs "invalid" $elemVal }} - {{ $elem }} = - {{- else if kindIs "string" $elemVal }} - {{ $elem }} = {{ tpl $elemVal $ }} - {{- else }} - {{ $elem }} = {{ $elemVal }} - {{- end }} - {{- end }} - {{- end }} - {{- range $key, $value := index .Values "grafana.ini" }} - {{- if kindIs "map" $value }} - [{{ $key }}] - {{- range $elem, $elemVal := $value }} - {{- if kindIs "invalid" $elemVal }} - {{ $elem }} = - {{- else if kindIs "string" $elemVal }} - {{ $elem }} = {{ tpl $elemVal $ }} - {{- else }} - {{ $elem }} = {{ $elemVal }} - {{- end }} - {{- end }} - {{- end }} - {{- end }} - - {{- range $key, $value := .Values.datasources }} - {{- if not (hasKey $value "secret") }} - {{- $key | nindent 2 }}: | - {{- tpl (toYaml $value | nindent 4) $root }} - {{- end }} - {{- end }} - - {{- range $key, $value := .Values.notifiers }} - {{- if not (hasKey $value "secret") }} - {{- $key | nindent 2 }}: | - {{- toYaml $value | nindent 4 }} - {{- end }} - {{- end }} - - {{- range $key, $value := .Values.alerting }} - {{- if (hasKey $value "file") }} - {{- $key | nindent 2 }}: - {{- toYaml ( $files.Get $value.file ) | nindent 4}} - {{- else if (or (hasKey $value "secret") (hasKey $value "secretFile"))}} - {{/* will be stored inside secret generated by "configSecret.yaml"*/}} - {{- else }} - {{- $key | nindent 2 }}: | - {{- tpl (toYaml $value | nindent 4) $root }} - {{- end }} - {{- end }} - - {{- range $key, $value := .Values.dashboardProviders }} - {{- $key | nindent 2 }}: | - {{- toYaml $value | nindent 4 }} - {{- end }} - -{{- if .Values.dashboards }} - download_dashboards.sh: | - #!/usr/bin/env sh - set -euf - {{- if .Values.dashboardProviders }} - {{- range $key, $value := .Values.dashboardProviders }} - {{- range $value.providers }} - mkdir -p {{ .options.path }} - {{- end }} - {{- end }} - {{- end }} - {{ $dashboardProviders := .Values.dashboardProviders }} - {{- range $provider, $dashboards := .Values.dashboards }} - {{- range $key, $value := $dashboards }} - {{- if (or (hasKey $value "gnetId") (hasKey $value "url")) }} - curl -skf \ - --connect-timeout 60 \ - --max-time 60 \ - {{- if not $value.b64content }} - {{- if not $value.acceptHeader }} - -H "Accept: application/json" \ - {{- else }} - -H "Accept: {{ $value.acceptHeader }}" \ - {{- end }} - {{- if $value.token }} - -H "Authorization: token {{ $value.token }}" \ - {{- end }} - {{- if $value.bearerToken }} - -H "Authorization: Bearer {{ $value.bearerToken }}" \ - {{- end }} - {{- if $value.basic }} - -H "Authorization: Basic {{ $value.basic }}" \ - {{- end }} - {{- if $value.gitlabToken }} - -H "PRIVATE-TOKEN: {{ $value.gitlabToken }}" \ - {{- end }} - -H "Content-Type: application/json;charset=UTF-8" \ - {{- end }} - {{- $dpPath := "" -}} - {{- range $kd := (index $dashboardProviders "dashboardproviders.yaml").providers }} - {{- if eq $kd.name $provider }} - {{- $dpPath = $kd.options.path }} - {{- end }} - {{- end }} - {{- if $value.url }} - "{{ $value.url }}" \ - {{- else }} - "https://grafana.com/api/dashboards/{{ $value.gnetId }}/revisions/{{- if $value.revision -}}{{ $value.revision }}{{- else -}}1{{- end -}}/download" \ - {{- end }} - {{- if $value.datasource }} - {{- if kindIs "string" $value.datasource }} - | sed '/-- .* --/! s/"datasource":.*,/"datasource": "{{ $value.datasource }}",/g' \ - {{- end }} - {{- if kindIs "slice" $value.datasource }} - {{- range $value.datasource }} - | sed '/-- .* --/! s/${{"{"}}{{ .name }}}/{{ .value }}/g' \ - {{- end }} - {{- end }} - {{- end }} - {{- if $value.b64content }} - | base64 -d \ - {{- end }} - > "{{- if $dpPath -}}{{ $dpPath }}{{- else -}}/var/lib/grafana/dashboards/{{ $provider }}{{- end -}}/{{ $key }}.json" - {{ end }} - {{- end }} - {{- end }} -{{- end }} -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/dashboards-json-configmap.yaml b/operations/deployment/helm/grafana/templates/dashboards-json-configmap.yaml deleted file mode 100644 index b96ce72..0000000 --- a/operations/deployment/helm/grafana/templates/dashboards-json-configmap.yaml +++ /dev/null @@ -1,38 +0,0 @@ -{{- if .Values.dashboards }} -{{ $files := .Files }} -{{- range $provider, $dashboards := .Values.dashboards }} -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ include "grafana.fullname" $ }}-dashboards-{{ $provider }} - namespace: {{ include "grafana.namespace" $ }} - labels: - {{- include "grafana.labels" $ | nindent 4 }} - dashboard-provider: {{ $provider }} - {{- if $.Values.sidecar.dashboards.enabled }} - {{ $.Values.sidecar.dashboards.label }}: {{ $.Values.sidecar.dashboards.labelValue | quote }} - {{- end }} -{{- if $dashboards }} -data: -{{- $dashboardFound := false }} -{{- range $key, $value := $dashboards }} -{{- if (or (hasKey $value "json") (hasKey $value "file")) }} -{{- $dashboardFound = true }} - {{- print $key | nindent 2 }}.json: - {{- if hasKey $value "json" }} - |- - {{- $value.json | nindent 6 }} - {{- end }} - {{- if hasKey $value "file" }} - {{- toYaml ( $files.Get $value.file ) | nindent 4}} - {{- end }} -{{- end }} -{{- end }} -{{- if not $dashboardFound }} - {} -{{- end }} -{{- end }} ---- -{{- end }} - -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/deployment.yaml b/operations/deployment/helm/grafana/templates/deployment.yaml deleted file mode 100644 index bfa26bb..0000000 --- a/operations/deployment/helm/grafana/templates/deployment.yaml +++ /dev/null @@ -1,51 +0,0 @@ -{{- if (and (not .Values.useStatefulSet) (or (not .Values.persistence.enabled) (eq .Values.persistence.type "pvc"))) }} -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ include "grafana.fullname" . }} - namespace: {{ include "grafana.namespace" . }} - labels: - {{- include "grafana.labels" . | nindent 4 }} - {{- with .Values.labels }} - {{- toYaml . | nindent 4 }} - {{- end }} - {{- with .Values.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -spec: - {{- if and (not .Values.autoscaling.enabled) (.Values.replicas) }} - replicas: {{ .Values.replicas }} - {{- end }} - revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} - selector: - matchLabels: - {{- include "grafana.selectorLabels" . | nindent 6 }} - {{- with .Values.deploymentStrategy }} - strategy: - {{- toYaml . | trim | nindent 4 }} - {{- end }} - template: - metadata: - labels: - {{- include "grafana.selectorLabels" . | nindent 8 }} - {{- with .Values.podLabels }} - {{- toYaml . | nindent 8 }} - {{- end }} - annotations: - checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} - checksum/dashboards-json-config: {{ include (print $.Template.BasePath "/dashboards-json-configmap.yaml") . | sha256sum }} - checksum/sc-dashboard-provider-config: {{ include (print $.Template.BasePath "/configmap-dashboard-provider.yaml") . | sha256sum }} - {{- if and (or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret))) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} - checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} - {{- end }} - {{- if .Values.envRenderSecret }} - checksum/secret-env: {{ include (print $.Template.BasePath "/secret-env.yaml") . | sha256sum }} - {{- end }} - kubectl.kubernetes.io/default-container: {{ .Chart.Name }} - {{- with .Values.podAnnotations }} - {{- toYaml . | nindent 8 }} - {{- end }} - spec: - {{- include "grafana.pod" . | nindent 6 }} -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/extra-manifests.yaml b/operations/deployment/helm/grafana/templates/extra-manifests.yaml deleted file mode 100644 index a9bb3b6..0000000 --- a/operations/deployment/helm/grafana/templates/extra-manifests.yaml +++ /dev/null @@ -1,4 +0,0 @@ -{{ range .Values.extraObjects }} ---- -{{ tpl (toYaml .) $ }} -{{ end }} diff --git a/operations/deployment/helm/grafana/templates/headless-service.yaml b/operations/deployment/helm/grafana/templates/headless-service.yaml deleted file mode 100644 index 3028589..0000000 --- a/operations/deployment/helm/grafana/templates/headless-service.yaml +++ /dev/null @@ -1,22 +0,0 @@ -{{- $sts := list "sts" "StatefulSet" "statefulset" -}} -{{- if or .Values.headlessService (and .Values.persistence.enabled (not .Values.persistence.existingClaim) (has .Values.persistence.type $sts)) }} -apiVersion: v1 -kind: Service -metadata: - name: {{ include "grafana.fullname" . }}-headless - namespace: {{ include "grafana.namespace" . }} - labels: - {{- include "grafana.labels" . | nindent 4 }} - {{- with .Values.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -spec: - clusterIP: None - selector: - {{- include "grafana.selectorLabels" . | nindent 4 }} - type: ClusterIP - ports: - - name: {{ .Values.gossipPortName }}-tcp - port: 9094 -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/hpa.yaml b/operations/deployment/helm/grafana/templates/hpa.yaml deleted file mode 100644 index 46bbcb4..0000000 --- a/operations/deployment/helm/grafana/templates/hpa.yaml +++ /dev/null @@ -1,52 +0,0 @@ -{{- $sts := list "sts" "StatefulSet" "statefulset" -}} -{{- if .Values.autoscaling.enabled }} -apiVersion: {{ include "grafana.hpa.apiVersion" . }} -kind: HorizontalPodAutoscaler -metadata: - name: {{ include "grafana.fullname" . }} - namespace: {{ include "grafana.namespace" . }} - labels: - app.kubernetes.io/name: {{ include "grafana.name" . }} - helm.sh/chart: {{ include "grafana.chart" . }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} -spec: - scaleTargetRef: - apiVersion: apps/v1 - {{- if has .Values.persistence.type $sts }} - kind: StatefulSet - {{- else }} - kind: Deployment - {{- end }} - name: {{ include "grafana.fullname" . }} - minReplicas: {{ .Values.autoscaling.minReplicas }} - maxReplicas: {{ .Values.autoscaling.maxReplicas }} - metrics: - {{- if .Values.autoscaling.targetMemory }} - - type: Resource - resource: - name: memory - {{- if eq (include "grafana.hpa.apiVersion" .) "autoscaling/v2beta1" }} - targetAverageUtilization: {{ .Values.autoscaling.targetMemory }} - {{- else }} - target: - type: Utilization - averageUtilization: {{ .Values.autoscaling.targetMemory }} - {{- end }} - {{- end }} - {{- if .Values.autoscaling.targetCPU }} - - type: Resource - resource: - name: cpu - {{- if eq (include "grafana.hpa.apiVersion" .) "autoscaling/v2beta1" }} - targetAverageUtilization: {{ .Values.autoscaling.targetCPU }} - {{- else }} - target: - type: Utilization - averageUtilization: {{ .Values.autoscaling.targetCPU }} - {{- end }} - {{- end }} - {{- if .Values.autoscaling.behavior }} - behavior: {{ toYaml .Values.autoscaling.behavior | nindent 4 }} - {{- end }} -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/image-renderer-deployment.yaml b/operations/deployment/helm/grafana/templates/image-renderer-deployment.yaml deleted file mode 100644 index ea97969..0000000 --- a/operations/deployment/helm/grafana/templates/image-renderer-deployment.yaml +++ /dev/null @@ -1,131 +0,0 @@ -{{ if .Values.imageRenderer.enabled }} -{{- $root := . -}} -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ include "grafana.fullname" . }}-image-renderer - namespace: {{ include "grafana.namespace" . }} - labels: - {{- include "grafana.imageRenderer.labels" . | nindent 4 }} - {{- with .Values.imageRenderer.labels }} - {{- toYaml . | nindent 4 }} - {{- end }} - {{- with .Values.imageRenderer.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -spec: - {{- if and (not .Values.imageRenderer.autoscaling.enabled) (.Values.imageRenderer.replicas) }} - replicas: {{ .Values.imageRenderer.replicas }} - {{- end }} - revisionHistoryLimit: {{ .Values.imageRenderer.revisionHistoryLimit }} - selector: - matchLabels: - {{- include "grafana.imageRenderer.selectorLabels" . | nindent 6 }} - - {{- with .Values.imageRenderer.deploymentStrategy }} - strategy: - {{- toYaml . | trim | nindent 4 }} - {{- end }} - template: - metadata: - labels: - {{- include "grafana.imageRenderer.selectorLabels" . | nindent 8 }} - {{- with .Values.imageRenderer.podLabels }} - {{- toYaml . | nindent 8 }} - {{- end }} - annotations: - checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} - {{- with .Values.imageRenderer.podAnnotations }} - {{- toYaml . | nindent 8 }} - {{- end }} - spec: - {{- with .Values.imageRenderer.schedulerName }} - schedulerName: "{{ . }}" - {{- end }} - {{- with .Values.imageRenderer.serviceAccountName }} - serviceAccountName: "{{ . }}" - {{- end }} - {{- with .Values.imageRenderer.securityContext }} - securityContext: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.imageRenderer.hostAliases }} - hostAliases: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.imageRenderer.priorityClassName }} - priorityClassName: {{ . }} - {{- end }} - {{- with .Values.imageRenderer.image.pullSecrets }} - imagePullSecrets: - {{- range . }} - - name: {{ tpl . $root }} - {{- end}} - {{- end }} - containers: - - name: {{ .Chart.Name }}-image-renderer - {{- $registry := .Values.global.imageRegistry | default .Values.imageRenderer.image.registry -}} - {{- if .Values.imageRenderer.image.sha }} - image: "{{ $registry }}/{{ .Values.imageRenderer.image.repository }}:{{ .Values.imageRenderer.image.tag }}@sha256:{{ .Values.imageRenderer.image.sha }}" - {{- else }} - image: "{{ $registry }}/{{ .Values.imageRenderer.image.repository }}:{{ .Values.imageRenderer.image.tag }}" - {{- end }} - imagePullPolicy: {{ .Values.imageRenderer.image.pullPolicy }} - {{- if .Values.imageRenderer.command }} - command: - {{- range .Values.imageRenderer.command }} - - {{ . }} - {{- end }} - {{- end}} - ports: - - name: {{ .Values.imageRenderer.service.portName }} - containerPort: {{ .Values.imageRenderer.service.targetPort }} - protocol: TCP - livenessProbe: - httpGet: - path: / - port: {{ .Values.imageRenderer.service.portName }} - env: - - name: HTTP_PORT - value: {{ .Values.imageRenderer.service.targetPort | quote }} - {{- if .Values.imageRenderer.serviceMonitor.enabled }} - - name: ENABLE_METRICS - value: "true" - {{- end }} - {{- range $key, $value := .Values.imageRenderer.envValueFrom }} - - name: {{ $key | quote }} - valueFrom: - {{- tpl (toYaml $value) $ | nindent 16 }} - {{- end }} - {{- range $key, $value := .Values.imageRenderer.env }} - - name: {{ $key | quote }} - value: {{ $value | quote }} - {{- end }} - {{- with .Values.imageRenderer.containerSecurityContext }} - securityContext: - {{- toYaml . | nindent 12 }} - {{- end }} - volumeMounts: - - mountPath: /tmp - name: image-renderer-tmpfs - {{- with .Values.imageRenderer.resources }} - resources: - {{- toYaml . | nindent 12 }} - {{- end }} - {{- with .Values.imageRenderer.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.imageRenderer.affinity }} - affinity: - {{- tpl (toYaml .) $root | nindent 8 }} - {{- end }} - {{- with .Values.imageRenderer.tolerations }} - tolerations: - {{- toYaml . | nindent 8 }} - {{- end }} - volumes: - - name: image-renderer-tmpfs - emptyDir: {} -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/image-renderer-hpa.yaml b/operations/deployment/helm/grafana/templates/image-renderer-hpa.yaml deleted file mode 100644 index b0f0059..0000000 --- a/operations/deployment/helm/grafana/templates/image-renderer-hpa.yaml +++ /dev/null @@ -1,47 +0,0 @@ -{{- if and .Values.imageRenderer.enabled .Values.imageRenderer.autoscaling.enabled }} -apiVersion: {{ include "grafana.hpa.apiVersion" . }} -kind: HorizontalPodAutoscaler -metadata: - name: {{ include "grafana.fullname" . }}-image-renderer - namespace: {{ include "grafana.namespace" . }} - labels: - app.kubernetes.io/name: {{ include "grafana.name" . }}-image-renderer - helm.sh/chart: {{ include "grafana.chart" . }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} -spec: - scaleTargetRef: - apiVersion: apps/v1 - kind: Deployment - name: {{ include "grafana.fullname" . }}-image-renderer - minReplicas: {{ .Values.imageRenderer.autoscaling.minReplicas }} - maxReplicas: {{ .Values.imageRenderer.autoscaling.maxReplicas }} - metrics: - {{- if .Values.imageRenderer.autoscaling.targetMemory }} - - type: Resource - resource: - name: memory - {{- if eq (include "grafana.hpa.apiVersion" .) "autoscaling/v2beta1" }} - targetAverageUtilization: {{ .Values.imageRenderer.autoscaling.targetMemory }} - {{- else }} - target: - type: Utilization - averageUtilization: {{ .Values.imageRenderer.autoscaling.targetMemory }} - {{- end }} - {{- end }} - {{- if .Values.imageRenderer.autoscaling.targetCPU }} - - type: Resource - resource: - name: cpu - {{- if eq (include "grafana.hpa.apiVersion" .) "autoscaling/v2beta1" }} - targetAverageUtilization: {{ .Values.imageRenderer.autoscaling.targetCPU }} - {{- else }} - target: - type: Utilization - averageUtilization: {{ .Values.imageRenderer.autoscaling.targetCPU }} - {{- end }} - {{- end }} - {{- if .Values.imageRenderer.autoscaling.behavior }} - behavior: {{ toYaml .Values.imageRenderer.autoscaling.behavior | nindent 4 }} - {{- end }} -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/image-renderer-network-policy.yaml b/operations/deployment/helm/grafana/templates/image-renderer-network-policy.yaml deleted file mode 100644 index d1a0eb3..0000000 --- a/operations/deployment/helm/grafana/templates/image-renderer-network-policy.yaml +++ /dev/null @@ -1,79 +0,0 @@ -{{- if and .Values.imageRenderer.enabled .Values.imageRenderer.networkPolicy.limitIngress }} ---- -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: {{ include "grafana.fullname" . }}-image-renderer-ingress - namespace: {{ include "grafana.namespace" . }} - annotations: - comment: Limit image-renderer ingress traffic from grafana -spec: - podSelector: - matchLabels: - {{- include "grafana.imageRenderer.selectorLabels" . | nindent 6 }} - {{- with .Values.imageRenderer.podLabels }} - {{- toYaml . | nindent 6 }} - {{- end }} - - policyTypes: - - Ingress - ingress: - - ports: - - port: {{ .Values.imageRenderer.service.targetPort }} - protocol: TCP - from: - - namespaceSelector: - matchLabels: - kubernetes.io/metadata.name: {{ include "grafana.namespace" . }} - podSelector: - matchLabels: - {{- include "grafana.selectorLabels" . | nindent 14 }} - {{- with .Values.podLabels }} - {{- toYaml . | nindent 14 }} - {{- end }} - {{- with .Values.imageRenderer.networkPolicy.extraIngressSelectors -}} - {{ toYaml . | nindent 8 }} - {{- end }} -{{- end }} - -{{- if and .Values.imageRenderer.enabled .Values.imageRenderer.networkPolicy.limitEgress }} ---- -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: {{ include "grafana.fullname" . }}-image-renderer-egress - namespace: {{ include "grafana.namespace" . }} - annotations: - comment: Limit image-renderer egress traffic to grafana -spec: - podSelector: - matchLabels: - {{- include "grafana.imageRenderer.selectorLabels" . | nindent 6 }} - {{- with .Values.imageRenderer.podLabels }} - {{- toYaml . | nindent 6 }} - {{- end }} - - policyTypes: - - Egress - egress: - # allow dns resolution - - ports: - - port: 53 - protocol: UDP - - port: 53 - protocol: TCP - # talk only to grafana - - ports: - - port: {{ .Values.service.targetPort }} - protocol: TCP - to: - - namespaceSelector: - matchLabels: - name: {{ include "grafana.namespace" . }} - podSelector: - matchLabels: - {{- include "grafana.selectorLabels" . | nindent 14 }} - {{- with .Values.podLabels }} - {{- toYaml . | nindent 14 }} - {{- end }} -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/image-renderer-service.yaml b/operations/deployment/helm/grafana/templates/image-renderer-service.yaml deleted file mode 100644 index f8da127..0000000 --- a/operations/deployment/helm/grafana/templates/image-renderer-service.yaml +++ /dev/null @@ -1,31 +0,0 @@ -{{- if and .Values.imageRenderer.enabled .Values.imageRenderer.service.enabled }} -apiVersion: v1 -kind: Service -metadata: - name: {{ include "grafana.fullname" . }}-image-renderer - namespace: {{ include "grafana.namespace" . }} - labels: - {{- include "grafana.imageRenderer.labels" . | nindent 4 }} - {{- with .Values.imageRenderer.service.labels }} - {{- toYaml . | nindent 4 }} - {{- end }} - {{- with .Values.imageRenderer.service.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -spec: - type: ClusterIP - {{- with .Values.imageRenderer.service.clusterIP }} - clusterIP: {{ . }} - {{- end }} - ports: - - name: {{ .Values.imageRenderer.service.portName }} - port: {{ .Values.imageRenderer.service.port }} - protocol: TCP - targetPort: {{ .Values.imageRenderer.service.targetPort }} - {{- with .Values.imageRenderer.appProtocol }} - appProtocol: {{ . }} - {{- end }} - selector: - {{- include "grafana.imageRenderer.selectorLabels" . | nindent 4 }} -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/image-renderer-servicemonitor.yaml b/operations/deployment/helm/grafana/templates/image-renderer-servicemonitor.yaml deleted file mode 100644 index 5d9f09d..0000000 --- a/operations/deployment/helm/grafana/templates/image-renderer-servicemonitor.yaml +++ /dev/null @@ -1,48 +0,0 @@ -{{- if .Values.imageRenderer.serviceMonitor.enabled }} ---- -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - name: {{ include "grafana.fullname" . }}-image-renderer - {{- if .Values.imageRenderer.serviceMonitor.namespace }} - namespace: {{ tpl .Values.imageRenderer.serviceMonitor.namespace . }} - {{- else }} - namespace: {{ include "grafana.namespace" . }} - {{- end }} - labels: - {{- include "grafana.imageRenderer.labels" . | nindent 4 }} - {{- with .Values.imageRenderer.serviceMonitor.labels }} - {{- toYaml . | nindent 4 }} - {{- end }} -spec: - endpoints: - - port: {{ .Values.imageRenderer.service.portName }} - {{- with .Values.imageRenderer.serviceMonitor.interval }} - interval: {{ . }} - {{- end }} - {{- with .Values.imageRenderer.serviceMonitor.scrapeTimeout }} - scrapeTimeout: {{ . }} - {{- end }} - honorLabels: true - path: {{ .Values.imageRenderer.serviceMonitor.path }} - scheme: {{ .Values.imageRenderer.serviceMonitor.scheme }} - {{- with .Values.imageRenderer.serviceMonitor.tlsConfig }} - tlsConfig: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.imageRenderer.serviceMonitor.relabelings }} - relabelings: - {{- toYaml . | nindent 6 }} - {{- end }} - jobLabel: "{{ .Release.Name }}-image-renderer" - selector: - matchLabels: - {{- include "grafana.imageRenderer.selectorLabels" . | nindent 6 }} - namespaceSelector: - matchNames: - - {{ include "grafana.namespace" . }} - {{- with .Values.imageRenderer.serviceMonitor.targetLabels }} - targetLabels: - {{- toYaml . | nindent 4 }} - {{- end }} -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/ingress.yaml b/operations/deployment/helm/grafana/templates/ingress.yaml deleted file mode 100644 index 063cdfa..0000000 --- a/operations/deployment/helm/grafana/templates/ingress.yaml +++ /dev/null @@ -1,78 +0,0 @@ -{{- if .Values.ingress.enabled -}} -{{- $ingressApiIsStable := eq (include "grafana.ingress.isStable" .) "true" -}} -{{- $ingressSupportsIngressClassName := eq (include "grafana.ingress.supportsIngressClassName" .) "true" -}} -{{- $ingressSupportsPathType := eq (include "grafana.ingress.supportsPathType" .) "true" -}} -{{- $fullName := include "grafana.fullname" . -}} -{{- $servicePort := .Values.service.port -}} -{{- $ingressPath := .Values.ingress.path -}} -{{- $ingressPathType := .Values.ingress.pathType -}} -{{- $extraPaths := .Values.ingress.extraPaths -}} -apiVersion: {{ include "grafana.ingress.apiVersion" . }} -kind: Ingress -metadata: - name: {{ $fullName }} - namespace: {{ include "grafana.namespace" . }} - labels: - {{- include "grafana.labels" . | nindent 4 }} - {{- with .Values.ingress.labels }} - {{- toYaml . | nindent 4 }} - {{- end }} - {{- with .Values.ingress.annotations }} - annotations: - {{- range $key, $value := . }} - {{ $key }}: {{ tpl $value $ | quote }} - {{- end }} - {{- end }} -spec: - {{- if and $ingressSupportsIngressClassName .Values.ingress.ingressClassName }} - ingressClassName: {{ .Values.ingress.ingressClassName }} - {{- end -}} - {{- with .Values.ingress.tls }} - tls: - {{- tpl (toYaml .) $ | nindent 4 }} - {{- end }} - rules: - {{- if .Values.ingress.hosts }} - {{- range .Values.ingress.hosts }} - - host: {{ tpl . $ }} - http: - paths: - {{- with $extraPaths }} - {{- toYaml . | nindent 10 }} - {{- end }} - - path: {{ $ingressPath }} - {{- if $ingressSupportsPathType }} - pathType: {{ $ingressPathType }} - {{- end }} - backend: - {{- if $ingressApiIsStable }} - service: - name: {{ $fullName }} - port: - number: {{ $servicePort }} - {{- else }} - serviceName: {{ $fullName }} - servicePort: {{ $servicePort }} - {{- end }} - {{- end }} - {{- else }} - - http: - paths: - - backend: - {{- if $ingressApiIsStable }} - service: - name: {{ $fullName }} - port: - number: {{ $servicePort }} - {{- else }} - serviceName: {{ $fullName }} - servicePort: {{ $servicePort }} - {{- end }} - {{- with $ingressPath }} - path: {{ . }} - {{- end }} - {{- if $ingressSupportsPathType }} - pathType: {{ $ingressPathType }} - {{- end }} - {{- end -}} -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/networkpolicy.yaml b/operations/deployment/helm/grafana/templates/networkpolicy.yaml deleted file mode 100644 index 4cd3ed6..0000000 --- a/operations/deployment/helm/grafana/templates/networkpolicy.yaml +++ /dev/null @@ -1,61 +0,0 @@ -{{- if .Values.networkPolicy.enabled }} -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: {{ include "grafana.fullname" . }} - namespace: {{ include "grafana.namespace" . }} - labels: - {{- include "grafana.labels" . | nindent 4 }} - {{- with .Values.labels }} - {{- toYaml . | nindent 4 }} - {{- end }} - {{- with .Values.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -spec: - policyTypes: - {{- if .Values.networkPolicy.ingress }} - - Ingress - {{- end }} - {{- if .Values.networkPolicy.egress.enabled }} - - Egress - {{- end }} - podSelector: - matchLabels: - {{- include "grafana.selectorLabels" . | nindent 6 }} - - {{- if .Values.networkPolicy.egress.enabled }} - egress: - {{- if not .Values.networkPolicy.egress.blockDNSResolution }} - - ports: - - port: 53 - protocol: UDP - {{- end }} - - ports: - {{ .Values.networkPolicy.egress.ports | toJson }} - {{- with .Values.networkPolicy.egress.to }} - to: - {{- toYaml . | nindent 12 }} - {{- end }} - {{- end }} - {{- if .Values.networkPolicy.ingress }} - ingress: - - ports: - - port: {{ .Values.service.targetPort }} - {{- if not .Values.networkPolicy.allowExternal }} - from: - - podSelector: - matchLabels: - {{ include "grafana.fullname" . }}-client: "true" - {{- with .Values.networkPolicy.explicitNamespacesSelector }} - - namespaceSelector: - {{- toYaml . | nindent 12 }} - {{- end }} - - podSelector: - matchLabels: - {{- include "grafana.labels" . | nindent 14 }} - role: read - {{- end }} - {{- end }} -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/poddisruptionbudget.yaml b/operations/deployment/helm/grafana/templates/poddisruptionbudget.yaml deleted file mode 100644 index 0525121..0000000 --- a/operations/deployment/helm/grafana/templates/poddisruptionbudget.yaml +++ /dev/null @@ -1,22 +0,0 @@ -{{- if .Values.podDisruptionBudget }} -apiVersion: {{ include "grafana.podDisruptionBudget.apiVersion" . }} -kind: PodDisruptionBudget -metadata: - name: {{ include "grafana.fullname" . }} - namespace: {{ include "grafana.namespace" . }} - labels: - {{- include "grafana.labels" . | nindent 4 }} - {{- with .Values.labels }} - {{- toYaml . | nindent 4 }} - {{- end }} -spec: - {{- with .Values.podDisruptionBudget.minAvailable }} - minAvailable: {{ . }} - {{- end }} - {{- with .Values.podDisruptionBudget.maxUnavailable }} - maxUnavailable: {{ . }} - {{- end }} - selector: - matchLabels: - {{- include "grafana.selectorLabels" . | nindent 6 }} -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/podsecuritypolicy.yaml b/operations/deployment/helm/grafana/templates/podsecuritypolicy.yaml deleted file mode 100644 index eed7af9..0000000 --- a/operations/deployment/helm/grafana/templates/podsecuritypolicy.yaml +++ /dev/null @@ -1,49 +0,0 @@ -{{- if and .Values.rbac.pspEnabled (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }} -apiVersion: policy/v1beta1 -kind: PodSecurityPolicy -metadata: - name: {{ include "grafana.fullname" . }} - labels: - {{- include "grafana.labels" . | nindent 4 }} - annotations: - seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default' - seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' - {{- if .Values.rbac.pspUseAppArmor }} - apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default' - apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' - {{- end }} -spec: - privileged: false - allowPrivilegeEscalation: false - requiredDropCapabilities: - # Default set from Docker, with DAC_OVERRIDE and CHOWN - - ALL - volumes: - - 'configMap' - - 'emptyDir' - - 'projected' - - 'csi' - - 'secret' - - 'downwardAPI' - - 'persistentVolumeClaim' - hostNetwork: false - hostIPC: false - hostPID: false - runAsUser: - rule: 'RunAsAny' - seLinux: - rule: 'RunAsAny' - supplementalGroups: - rule: 'MustRunAs' - ranges: - # Forbid adding the root group. - - min: 1 - max: 65535 - fsGroup: - rule: 'MustRunAs' - ranges: - # Forbid adding the root group. - - min: 1 - max: 65535 - readOnlyRootFilesystem: false -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/pvc.yaml b/operations/deployment/helm/grafana/templates/pvc.yaml deleted file mode 100644 index eb8f87f..0000000 --- a/operations/deployment/helm/grafana/templates/pvc.yaml +++ /dev/null @@ -1,36 +0,0 @@ -{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) (eq .Values.persistence.type "pvc")}} -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: {{ include "grafana.fullname" . }} - namespace: {{ include "grafana.namespace" . }} - labels: - {{- include "grafana.labels" . | nindent 4 }} - {{- with .Values.persistence.extraPvcLabels }} - {{- toYaml . | nindent 4 }} - {{- end }} - {{- with .Values.persistence.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} - {{- with .Values.persistence.finalizers }} - finalizers: - {{- toYaml . | nindent 4 }} - {{- end }} -spec: - accessModes: - {{- range .Values.persistence.accessModes }} - - {{ . | quote }} - {{- end }} - resources: - requests: - storage: {{ .Values.persistence.size | quote }} - {{- with .Values.persistence.storageClassName }} - storageClassName: {{ . }} - {{- end }} - {{- with .Values.persistence.selectorLabels }} - selector: - matchLabels: - {{- toYaml . | nindent 6 }} - {{- end }} -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/role.yaml b/operations/deployment/helm/grafana/templates/role.yaml deleted file mode 100644 index 4b5edd9..0000000 --- a/operations/deployment/helm/grafana/templates/role.yaml +++ /dev/null @@ -1,32 +0,0 @@ -{{- if and .Values.rbac.create (not .Values.rbac.useExistingRole) -}} -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: {{ include "grafana.fullname" . }} - namespace: {{ include "grafana.namespace" . }} - labels: - {{- include "grafana.labels" . | nindent 4 }} - {{- with .Values.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -{{- if or .Values.rbac.pspEnabled (and .Values.rbac.namespaced (or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled .Values.sidecar.plugins.enabled .Values.rbac.extraRoleRules)) }} -rules: - {{- if and .Values.rbac.pspEnabled (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }} - - apiGroups: ['extensions'] - resources: ['podsecuritypolicies'] - verbs: ['use'] - resourceNames: [{{ include "grafana.fullname" . }}] - {{- end }} - {{- if and .Values.rbac.namespaced (or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled .Values.sidecar.plugins.enabled) }} - - apiGroups: [""] # "" indicates the core API group - resources: ["configmaps", "secrets"] - verbs: ["get", "watch", "list"] - {{- end }} - {{- with .Values.rbac.extraRoleRules }} - {{- toYaml . | nindent 2 }} - {{- end}} -{{- else }} -rules: [] -{{- end }} -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/rolebinding.yaml b/operations/deployment/helm/grafana/templates/rolebinding.yaml deleted file mode 100644 index 58f77c6..0000000 --- a/operations/deployment/helm/grafana/templates/rolebinding.yaml +++ /dev/null @@ -1,25 +0,0 @@ -{{- if .Values.rbac.create }} -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: {{ include "grafana.fullname" . }} - namespace: {{ include "grafana.namespace" . }} - labels: - {{- include "grafana.labels" . | nindent 4 }} - {{- with .Values.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - {{- if .Values.rbac.useExistingRole }} - name: {{ .Values.rbac.useExistingRole }} - {{- else }} - name: {{ include "grafana.fullname" . }} - {{- end }} -subjects: -- kind: ServiceAccount - name: {{ include "grafana.serviceAccountName" . }} - namespace: {{ include "grafana.namespace" . }} -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/secret-env.yaml b/operations/deployment/helm/grafana/templates/secret-env.yaml deleted file mode 100644 index eb14aac..0000000 --- a/operations/deployment/helm/grafana/templates/secret-env.yaml +++ /dev/null @@ -1,14 +0,0 @@ -{{- if .Values.envRenderSecret }} -apiVersion: v1 -kind: Secret -metadata: - name: {{ include "grafana.fullname" . }}-env - namespace: {{ include "grafana.namespace" . }} - labels: - {{- include "grafana.labels" . | nindent 4 }} -type: Opaque -data: -{{- range $key, $val := .Values.envRenderSecret }} - {{ $key }}: {{ tpl ($val | toString) $ | b64enc | quote }} -{{- end }} -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/secret.yaml b/operations/deployment/helm/grafana/templates/secret.yaml deleted file mode 100644 index 5cbd527..0000000 --- a/operations/deployment/helm/grafana/templates/secret.yaml +++ /dev/null @@ -1,26 +0,0 @@ -{{- if or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret)) }} -apiVersion: v1 -kind: Secret -metadata: - name: {{ include "grafana.fullname" . }} - namespace: {{ include "grafana.namespace" . }} - labels: - {{- include "grafana.labels" . | nindent 4 }} - {{- with .Values.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -type: Opaque -data: - {{- if and (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) }} - admin-user: {{ .Values.adminUser | b64enc | quote }} - {{- if .Values.adminPassword }} - admin-password: {{ .Values.adminPassword | b64enc | quote }} - {{- else }} - admin-password: {{ include "grafana.password" . }} - {{- end }} - {{- end }} - {{- if not .Values.ldap.existingSecret }} - ldap-toml: {{ tpl .Values.ldap.config $ | b64enc | quote }} - {{- end }} -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/service.yaml b/operations/deployment/helm/grafana/templates/service.yaml deleted file mode 100644 index 9102c1e..0000000 --- a/operations/deployment/helm/grafana/templates/service.yaml +++ /dev/null @@ -1,58 +0,0 @@ -{{- if .Values.service.enabled }} -{{- $root := . }} -apiVersion: v1 -kind: Service -metadata: - name: {{ include "grafana.fullname" . }} - namespace: {{ include "grafana.namespace" . }} - labels: - {{- include "grafana.labels" . | nindent 4 }} - {{- with .Values.service.labels }} - {{- toYaml . | nindent 4 }} - {{- end }} - {{- with .Values.service.annotations }} - annotations: - {{- tpl (toYaml . | nindent 4) $root }} - {{- end }} -spec: - {{- if (or (eq .Values.service.type "ClusterIP") (empty .Values.service.type)) }} - type: ClusterIP - {{- with .Values.service.clusterIP }} - clusterIP: {{ . }} - {{- end }} - {{- else if eq .Values.service.type "LoadBalancer" }} - type: {{ .Values.service.type }} - {{- with .Values.service.loadBalancerIP }} - loadBalancerIP: {{ . }} - {{- end }} - {{- with .Values.service.loadBalancerSourceRanges }} - loadBalancerSourceRanges: - {{- toYaml . | nindent 4 }} - {{- end }} - {{- else }} - type: {{ .Values.service.type }} - {{- end }} - {{- with .Values.service.externalIPs }} - externalIPs: - {{- toYaml . | nindent 4 }} - {{- end }} - {{- with .Values.service.externalTrafficPolicy }} - externalTrafficPolicy: {{ . }} - {{- end }} - ports: - - name: {{ .Values.service.portName }} - port: {{ .Values.service.port }} - protocol: TCP - targetPort: {{ .Values.service.targetPort }} - {{- with .Values.service.appProtocol }} - appProtocol: {{ . }} - {{- end }} - {{- if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort))) }} - nodePort: {{ .Values.service.nodePort }} - {{- end }} - {{- with .Values.extraExposePorts }} - {{- tpl (toYaml . | nindent 4) $root }} - {{- end }} - selector: - {{- include "grafana.selectorLabels" . | nindent 4 }} -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/serviceaccount.yaml b/operations/deployment/helm/grafana/templates/serviceaccount.yaml deleted file mode 100644 index 784e71b..0000000 --- a/operations/deployment/helm/grafana/templates/serviceaccount.yaml +++ /dev/null @@ -1,17 +0,0 @@ -{{- if .Values.serviceAccount.create }} -{{- $root := . -}} -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - {{- include "grafana.labels" . | nindent 4 }} - {{- with .Values.serviceAccount.labels }} - {{- toYaml . | nindent 4 }} - {{- end }} - {{- with .Values.serviceAccount.annotations }} - annotations: - {{- tpl (toYaml . | nindent 4) $root }} - {{- end }} - name: {{ include "grafana.serviceAccountName" . }} - namespace: {{ include "grafana.namespace" . }} -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/servicemonitor.yaml b/operations/deployment/helm/grafana/templates/servicemonitor.yaml deleted file mode 100644 index 7239682..0000000 --- a/operations/deployment/helm/grafana/templates/servicemonitor.yaml +++ /dev/null @@ -1,52 +0,0 @@ -{{- if .Values.serviceMonitor.enabled }} ---- -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - name: {{ include "grafana.fullname" . }} - {{- if .Values.serviceMonitor.namespace }} - namespace: {{ tpl .Values.serviceMonitor.namespace . }} - {{- else }} - namespace: {{ include "grafana.namespace" . }} - {{- end }} - labels: - {{- include "grafana.labels" . | nindent 4 }} - {{- with .Values.serviceMonitor.labels }} - {{- toYaml . | nindent 4 }} - {{- end }} -spec: - endpoints: - - port: {{ .Values.service.portName }} - {{- with .Values.serviceMonitor.interval }} - interval: {{ . }} - {{- end }} - {{- with .Values.serviceMonitor.scrapeTimeout }} - scrapeTimeout: {{ . }} - {{- end }} - honorLabels: true - path: {{ .Values.serviceMonitor.path }} - scheme: {{ .Values.serviceMonitor.scheme }} - {{- with .Values.serviceMonitor.tlsConfig }} - tlsConfig: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.serviceMonitor.relabelings }} - relabelings: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.serviceMonitor.metricRelabelings }} - metricRelabelings: - {{- toYaml . | nindent 6 }} - {{- end }} - jobLabel: "{{ .Release.Name }}" - selector: - matchLabels: - {{- include "grafana.selectorLabels" . | nindent 6 }} - namespaceSelector: - matchNames: - - {{ include "grafana.namespace" . }} - {{- with .Values.serviceMonitor.targetLabels }} - targetLabels: - {{- toYaml . | nindent 4 }} - {{- end }} -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/statefulset.yaml b/operations/deployment/helm/grafana/templates/statefulset.yaml deleted file mode 100644 index e6c944a..0000000 --- a/operations/deployment/helm/grafana/templates/statefulset.yaml +++ /dev/null @@ -1,56 +0,0 @@ -{{- $sts := list "sts" "StatefulSet" "statefulset" -}} -{{- if (or (.Values.useStatefulSet) (and .Values.persistence.enabled (not .Values.persistence.existingClaim) (has .Values.persistence.type $sts)))}} -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: {{ include "grafana.fullname" . }} - namespace: {{ include "grafana.namespace" . }} - labels: - {{- include "grafana.labels" . | nindent 4 }} - {{- with .Values.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -spec: - replicas: {{ .Values.replicas }} - selector: - matchLabels: - {{- include "grafana.selectorLabels" . | nindent 6 }} - serviceName: {{ include "grafana.fullname" . }}-headless - template: - metadata: - labels: - {{- include "grafana.selectorLabels" . | nindent 8 }} - {{- with .Values.podLabels }} - {{- toYaml . | nindent 8 }} - {{- end }} - annotations: - checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} - checksum/dashboards-json-config: {{ include (print $.Template.BasePath "/dashboards-json-configmap.yaml") . | sha256sum }} - checksum/sc-dashboard-provider-config: {{ include (print $.Template.BasePath "/configmap-dashboard-provider.yaml") . | sha256sum }} - {{- if and (or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret))) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} - checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} - {{- end }} - kubectl.kubernetes.io/default-container: {{ .Chart.Name }} - {{- with .Values.podAnnotations }} - {{- toYaml . | nindent 8 }} - {{- end }} - spec: - {{- include "grafana.pod" . | nindent 6 }} - {{- if .Values.persistence.enabled}} - volumeClaimTemplates: - - metadata: - name: storage - spec: - accessModes: {{ .Values.persistence.accessModes }} - storageClassName: {{ .Values.persistence.storageClassName }} - resources: - requests: - storage: {{ .Values.persistence.size }} - {{- with .Values.persistence.selectorLabels }} - selector: - matchLabels: - {{- toYaml . | nindent 10 }} - {{- end }} - {{- end }} -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/tests/test-configmap.yaml b/operations/deployment/helm/grafana/templates/tests/test-configmap.yaml deleted file mode 100644 index 01c96c9..0000000 --- a/operations/deployment/helm/grafana/templates/tests/test-configmap.yaml +++ /dev/null @@ -1,20 +0,0 @@ -{{- if .Values.testFramework.enabled }} -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ include "grafana.fullname" . }}-test - namespace: {{ include "grafana.namespace" . }} - annotations: - "helm.sh/hook": test-success - "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" - labels: - {{- include "grafana.labels" . | nindent 4 }} -data: - run.sh: |- - @test "Test Health" { - url="http://{{ include "grafana.fullname" . }}/api/health" - - code=$(wget --server-response --spider --timeout 90 --tries 10 ${url} 2>&1 | awk '/^ HTTP/{print $2}') - [ "$code" == "200" ] - } -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/tests/test-podsecuritypolicy.yaml b/operations/deployment/helm/grafana/templates/tests/test-podsecuritypolicy.yaml deleted file mode 100644 index 1821772..0000000 --- a/operations/deployment/helm/grafana/templates/tests/test-podsecuritypolicy.yaml +++ /dev/null @@ -1,32 +0,0 @@ -{{- if and (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") .Values.testFramework.enabled .Values.rbac.pspEnabled }} -apiVersion: policy/v1beta1 -kind: PodSecurityPolicy -metadata: - name: {{ include "grafana.fullname" . }}-test - annotations: - "helm.sh/hook": test-success - "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" - labels: - {{- include "grafana.labels" . | nindent 4 }} -spec: - allowPrivilegeEscalation: true - privileged: false - hostNetwork: false - hostIPC: false - hostPID: false - fsGroup: - rule: RunAsAny - seLinux: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - runAsUser: - rule: RunAsAny - volumes: - - configMap - - downwardAPI - - emptyDir - - projected - - csi - - secret -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/tests/test-role.yaml b/operations/deployment/helm/grafana/templates/tests/test-role.yaml deleted file mode 100644 index cb4c782..0000000 --- a/operations/deployment/helm/grafana/templates/tests/test-role.yaml +++ /dev/null @@ -1,17 +0,0 @@ -{{- if and (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") .Values.testFramework.enabled .Values.rbac.pspEnabled }} -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: {{ include "grafana.fullname" . }}-test - namespace: {{ include "grafana.namespace" . }} - annotations: - "helm.sh/hook": test-success - "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" - labels: - {{- include "grafana.labels" . | nindent 4 }} -rules: - - apiGroups: ['policy'] - resources: ['podsecuritypolicies'] - verbs: ['use'] - resourceNames: [{{ include "grafana.fullname" . }}-test] -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/tests/test-rolebinding.yaml b/operations/deployment/helm/grafana/templates/tests/test-rolebinding.yaml deleted file mode 100644 index f40d791..0000000 --- a/operations/deployment/helm/grafana/templates/tests/test-rolebinding.yaml +++ /dev/null @@ -1,20 +0,0 @@ -{{- if and (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") .Values.testFramework.enabled .Values.rbac.pspEnabled }} -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: {{ include "grafana.fullname" . }}-test - namespace: {{ include "grafana.namespace" . }} - annotations: - "helm.sh/hook": test-success - "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" - labels: - {{- include "grafana.labels" . | nindent 4 }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: {{ include "grafana.fullname" . }}-test -subjects: - - kind: ServiceAccount - name: {{ include "grafana.serviceAccountNameTest" . }} - namespace: {{ include "grafana.namespace" . }} -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/tests/test-serviceaccount.yaml b/operations/deployment/helm/grafana/templates/tests/test-serviceaccount.yaml deleted file mode 100644 index 38fba35..0000000 --- a/operations/deployment/helm/grafana/templates/tests/test-serviceaccount.yaml +++ /dev/null @@ -1,12 +0,0 @@ -{{- if and .Values.testFramework.enabled .Values.serviceAccount.create }} -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - {{- include "grafana.labels" . | nindent 4 }} - name: {{ include "grafana.serviceAccountNameTest" . }} - namespace: {{ include "grafana.namespace" . }} - annotations: - "helm.sh/hook": test-success - "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/tests/test.yaml b/operations/deployment/helm/grafana/templates/tests/test.yaml deleted file mode 100644 index 15067ae..0000000 --- a/operations/deployment/helm/grafana/templates/tests/test.yaml +++ /dev/null @@ -1,49 +0,0 @@ -{{- if .Values.testFramework.enabled }} -{{- $root := . }} -apiVersion: v1 -kind: Pod -metadata: - name: {{ include "grafana.fullname" . }}-test - labels: - {{- include "grafana.labels" . | nindent 4 }} - annotations: - "helm.sh/hook": test-success - "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" - namespace: {{ include "grafana.namespace" . }} -spec: - serviceAccountName: {{ include "grafana.serviceAccountNameTest" . }} - {{- with .Values.testFramework.securityContext }} - securityContext: - {{- toYaml . | nindent 4 }} - {{- end }} - {{- if or .Values.image.pullSecrets .Values.global.imagePullSecrets }} - imagePullSecrets: - {{- include "grafana.imagePullSecrets" (dict "root" $root "imagePullSecrets" .Values.image.pullSecrets) | nindent 4 }} - {{- end }} - {{- with .Values.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 4 }} - {{- end }} - {{- with .Values.affinity }} - affinity: - {{- tpl (toYaml .) $root | nindent 4 }} - {{- end }} - {{- with .Values.tolerations }} - tolerations: - {{- toYaml . | nindent 4 }} - {{- end }} - containers: - - name: {{ .Release.Name }}-test - image: "{{ .Values.global.imageRegistry | default .Values.testFramework.image.registry }}/{{ .Values.testFramework.image.repository }}:{{ .Values.testFramework.image.tag }}" - imagePullPolicy: "{{ .Values.testFramework.imagePullPolicy}}" - command: ["/opt/bats/bin/bats", "-t", "/tests/run.sh"] - volumeMounts: - - mountPath: /tests - name: tests - readOnly: true - volumes: - - name: tests - configMap: - name: {{ include "grafana.fullname" . }}-test - restartPolicy: Never -{{- end }} diff --git a/operations/deployment/helm/grafana/values.yaml b/operations/deployment/helm/grafana/values.yaml deleted file mode 100644 index 07502cc..0000000 --- a/operations/deployment/helm/grafana/values.yaml +++ /dev/null @@ -1,1282 +0,0 @@ -global: - # -- Overrides the Docker registry globally for all images - imageRegistry: null - - # To help compatibility with other charts which use global.imagePullSecrets. - # Allow either an array of {name: pullSecret} maps (k8s-style), or an array of strings (more common helm-style). - # Can be tempalted. - # global: - # imagePullSecrets: - # - name: pullSecret1 - # - name: pullSecret2 - # or - # global: - # imagePullSecrets: - # - pullSecret1 - # - pullSecret2 - imagePullSecrets: [] - -rbac: - create: true - ## Use an existing ClusterRole/Role (depending on rbac.namespaced false/true) - # useExistingRole: name-of-some-role - # useExistingClusterRole: name-of-some-clusterRole - pspEnabled: false - pspUseAppArmor: false - namespaced: false - extraRoleRules: [] - # - apiGroups: [] - # resources: [] - # verbs: [] - extraClusterRoleRules: [] - # - apiGroups: [] - # resources: [] - # verbs: [] -serviceAccount: - create: true - name: - nameTest: - ## ServiceAccount labels. - labels: {} -## Service account annotations. Can be templated. -# annotations: -# eks.amazonaws.com/role-arn: arn:aws:iam::123456789000:role/iam-role-name-here - autoMount: true - -replicas: 1 - -## Create a headless service for the deployment -headlessService: false - -## Create HorizontalPodAutoscaler object for deployment type -# -autoscaling: - enabled: false - minReplicas: 1 - maxReplicas: 5 - targetCPU: "60" - targetMemory: "" - behavior: {} - -## See `kubectl explain poddisruptionbudget.spec` for more -## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ -podDisruptionBudget: {} -# apiVersion: "" -# minAvailable: 1 -# maxUnavailable: 1 - -## See `kubectl explain deployment.spec.strategy` for more -## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy -deploymentStrategy: - type: RollingUpdate - -readinessProbe: - httpGet: - path: /api/health - port: 3000 - -livenessProbe: - httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 60 - timeoutSeconds: 30 - failureThreshold: 10 - -## Use an alternate scheduler, e.g. "stork". -## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ -## -# schedulerName: "default-scheduler" - -image: - # -- The Docker registry - registry: docker.io - # -- Docker image repository - repository: grafana/grafana - # Overrides the Grafana image tag whose default is the chart appVersion - tag: "" - sha: "" - pullPolicy: IfNotPresent - - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## Can be templated. - ## - pullSecrets: [] - # - myRegistrKeySecretName - -testFramework: - enabled: true - image: - # -- The Docker registry - registry: docker.io - repository: bats/bats - tag: "v1.4.1" - imagePullPolicy: IfNotPresent - securityContext: {} - -securityContext: - runAsNonRoot: true - runAsUser: 472 - runAsGroup: 472 - fsGroup: 472 - -containerSecurityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - seccompProfile: - type: RuntimeDefault - -# Enable creating the grafana configmap -createConfigmap: true - -# Extra configmaps to mount in grafana pods -# Values are templated. -extraConfigmapMounts: [] - # - name: certs-configmap - # mountPath: /etc/grafana/ssl/ - # subPath: certificates.crt # (optional) - # configMap: certs-configmap - # readOnly: true - - -extraEmptyDirMounts: [] - # - name: provisioning-notifiers - # mountPath: /etc/grafana/provisioning/notifiers - - -# Apply extra labels to common labels. -extraLabels: {} - -## Assign a PriorityClassName to pods if set -# priorityClassName: - -downloadDashboardsImage: - # -- The Docker registry - registry: docker.io - repository: curlimages/curl - tag: 7.85.0 - sha: "" - pullPolicy: IfNotPresent - -downloadDashboards: - env: {} - envFromSecret: "" - resources: {} - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - seccompProfile: - type: RuntimeDefault - envValueFrom: {} - # ENV_NAME: - # configMapKeyRef: - # name: configmap-name - # key: value_key - -## Pod Annotations -# podAnnotations: {} - -## Pod Labels -# podLabels: {} - -podPortName: grafana -gossipPortName: gossip -## Deployment annotations -# annotations: {} - -## Expose the grafana service to be accessed from outside the cluster (LoadBalancer service). -## or access it from within the cluster (ClusterIP service). Set the service type and the port to serve it. -## ref: http://kubernetes.io/docs/user-guide/services/ -## -service: - enabled: true - type: ClusterIP - port: 80 - targetPort: 3000 - # targetPort: 4181 To be used with a proxy extraContainer - ## Service annotations. Can be templated. - annotations: {} - labels: {} - portName: service - # Adds the appProtocol field to the service. This allows to work with istio protocol selection. Ex: "http" or "tcp" - appProtocol: "" - -serviceMonitor: - ## If true, a ServiceMonitor CRD is created for a prometheus operator - ## https://github.com/coreos/prometheus-operator - ## - enabled: false - path: /metrics - # namespace: monitoring (defaults to use the namespace this chart is deployed to) - labels: {} - interval: 30s - scheme: http - tlsConfig: {} - scrapeTimeout: 30s - relabelings: [] - metricRelabelings: [] - targetLabels: [] - -extraExposePorts: [] - # - name: keycloak - # port: 8080 - # targetPort: 8080 - -# overrides pod.spec.hostAliases in the grafana deployment's pods -hostAliases: [] - # - ip: "1.2.3.4" - # hostnames: - # - "my.host.com" - -ingress: - enabled: false - # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName - # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress - # ingressClassName: nginx - # Values can be templated - annotations: {} - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: "true" - labels: {} - path: / - - # pathType is only for k8s >= 1.1= - pathType: Prefix - - hosts: - - chart-example.local - ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. - extraPaths: [] - # - path: /* - # backend: - # serviceName: ssl-redirect - # servicePort: use-annotation - ## Or for k8s > 1.19 - # - path: /* - # pathType: Prefix - # backend: - # service: - # name: ssl-redirect - # port: - # name: use-annotation - - - tls: [] - # - secretName: chart-example-tls - # hosts: - # - chart-example.local - -resources: {} -# limits: -# cpu: 100m -# memory: 128Mi -# requests: -# cpu: 100m -# memory: 128Mi - -## Node labels for pod assignment -## ref: https://kubernetes.io/docs/user-guide/node-selection/ -# -nodeSelector: {} - -## Tolerations for pod assignment -## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ -## -tolerations: [] - -## Affinity for pod assignment (evaluated as template) -## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity -## -affinity: {} - -## Topology Spread Constraints -## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ -## -topologySpreadConstraints: [] - -## Additional init containers (evaluated as template) -## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ -## -extraInitContainers: [] - -## Enable an Specify container in extraContainers. This is meant to allow adding an authentication proxy to a grafana pod -extraContainers: "" -# extraContainers: | -# - name: proxy -# image: quay.io/gambol99/keycloak-proxy:latest -# args: -# - -provider=github -# - -client-id= -# - -client-secret= -# - -github-org= -# - -email-domain=* -# - -cookie-secret= -# - -http-address=http://0.0.0.0:4181 -# - -upstream-url=http://127.0.0.1:3000 -# ports: -# - name: proxy-web -# containerPort: 4181 - -## Volumes that can be used in init containers that will not be mounted to deployment pods -extraContainerVolumes: [] -# - name: volume-from-secret -# secret: -# secretName: secret-to-mount -# - name: empty-dir-volume -# emptyDir: {} - -## Enable persistence using Persistent Volume Claims -## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ -## -persistence: - type: pvc - enabled: false - # storageClassName: default - accessModes: - - ReadWriteOnce - size: 10Gi - # annotations: {} - finalizers: - - kubernetes.io/pvc-protection - # selectorLabels: {} - ## Sub-directory of the PV to mount. Can be templated. - # subPath: "" - ## Name of an existing PVC. Can be templated. - # existingClaim: - ## Extra labels to apply to a PVC. - extraPvcLabels: {} - - ## If persistence is not enabled, this allows to mount the - ## local storage in-memory to improve performance - ## - inMemory: - enabled: false - ## The maximum usage on memory medium EmptyDir would be - ## the minimum value between the SizeLimit specified - ## here and the sum of memory limits of all containers in a pod - ## - # sizeLimit: 300Mi - -initChownData: - ## If false, data ownership will not be reset at startup - ## This allows the grafana-server to be run with an arbitrary user - ## - enabled: true - - ## initChownData container image - ## - image: - # -- The Docker registry - registry: docker.io - repository: library/busybox - tag: "1.31.1" - sha: "" - pullPolicy: IfNotPresent - - ## initChownData resource requests and limits - ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ - ## - resources: {} - # limits: - # cpu: 100m - # memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi - securityContext: - runAsNonRoot: false - runAsUser: 0 - seccompProfile: - type: RuntimeDefault - capabilities: - add: - - CHOWN - -# Administrator credentials when not using an existing secret (see below) -adminUser: admin -# adminPassword: strongpassword - -# Use an existing secret for the admin user. -admin: - ## Name of the secret. Can be templated. - existingSecret: "" - userKey: admin-user - passwordKey: admin-password - -## Define command to be executed at startup by grafana container -## Needed if using `vault-env` to manage secrets (ref: https://banzaicloud.com/blog/inject-secrets-into-pods-vault/) -## Default is "run.sh" as defined in grafana's Dockerfile -# command: -# - "sh" -# - "/run.sh" - -## Optionally define args if command is used -## Needed if using `hashicorp/envconsul` to manage secrets -## By default no arguments are set -# args: -# - "-secret" -# - "secret/grafana" -# - "./grafana" - -## Extra environment variables that will be pass onto deployment pods -## -## to provide grafana with access to CloudWatch on AWS EKS: -## 1. create an iam role of type "Web identity" with provider oidc.eks.* (note the provider for later) -## 2. edit the "Trust relationships" of the role, add a line inside the StringEquals clause using the -## same oidc eks provider as noted before (same as the existing line) -## also, replace NAMESPACE and prometheus-operator-grafana with the service account namespace and name -## -## "oidc.eks.us-east-1.amazonaws.com/id/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX:sub": "system:serviceaccount:NAMESPACE:prometheus-operator-grafana", -## -## 3. attach a policy to the role, you can use a built in policy called CloudWatchReadOnlyAccess -## 4. use the following env: (replace 123456789000 and iam-role-name-here with your aws account number and role name) -## -## env: -## AWS_ROLE_ARN: arn:aws:iam::123456789000:role/iam-role-name-here -## AWS_WEB_IDENTITY_TOKEN_FILE: /var/run/secrets/eks.amazonaws.com/serviceaccount/token -## AWS_REGION: us-east-1 -## -## 5. uncomment the EKS section in extraSecretMounts: below -## 6. uncomment the annotation section in the serviceAccount: above -## make sure to replace arn:aws:iam::123456789000:role/iam-role-name-here with your role arn - -env: {} - -## "valueFrom" environment variable references that will be added to deployment pods. Name is templated. -## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#envvarsource-v1-core -## Renders in container spec as: -## env: -## ... -## - name: -## valueFrom: -## -envValueFrom: {} - # ENV_NAME: - # configMapKeyRef: - # name: configmap-name - # key: value_key - -## The name of a secret in the same kubernetes namespace which contain values to be added to the environment -## This can be useful for auth tokens, etc. Value is templated. -envFromSecret: "" - -## Sensible environment variables that will be rendered as new secret object -## This can be useful for auth tokens, etc. -## If the secret values contains "{{", they'll need to be properly escaped so that they are not interpreted by Helm -## ref: https://helm.sh/docs/howto/charts_tips_and_tricks/#using-the-tpl-function -envRenderSecret: {} - -## The names of secrets in the same kubernetes namespace which contain values to be added to the environment -## Each entry should contain a name key, and can optionally specify whether the secret must be defined with an optional key. -## Name is templated. -envFromSecrets: [] -## - name: secret-name -## optional: true - -## The names of conifgmaps in the same kubernetes namespace which contain values to be added to the environment -## Each entry should contain a name key, and can optionally specify whether the configmap must be defined with an optional key. -## Name is templated. -## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#configmapenvsource-v1-core -envFromConfigMaps: [] -## - name: configmap-name -## optional: true - -# Inject Kubernetes services as environment variables. -# See https://kubernetes.io/docs/concepts/services-networking/connect-applications-service/#environment-variables -enableServiceLinks: true - -## Additional grafana server secret mounts -# Defines additional mounts with secrets. Secrets must be manually created in the namespace. -extraSecretMounts: [] - # - name: secret-files - # mountPath: /etc/secrets - # secretName: grafana-secret-files - # readOnly: true - # subPath: "" - # - # for AWS EKS (cloudwatch) use the following (see also instruction in env: above) - # - name: aws-iam-token - # mountPath: /var/run/secrets/eks.amazonaws.com/serviceaccount - # readOnly: true - # projected: - # defaultMode: 420 - # sources: - # - serviceAccountToken: - # audience: sts.amazonaws.com - # expirationSeconds: 86400 - # path: token - # - # for CSI e.g. Azure Key Vault use the following - # - name: secrets-store-inline - # mountPath: /run/secrets - # readOnly: true - # csi: - # driver: secrets-store.csi.k8s.io - # readOnly: true - # volumeAttributes: - # secretProviderClass: "akv-grafana-spc" - # nodePublishSecretRef: # Only required when using service principal mode - # name: grafana-akv-creds # Only required when using service principal mode - -## Additional grafana server volume mounts -# Defines additional volume mounts. -extraVolumeMounts: [] - # - name: extra-volume-0 - # mountPath: /mnt/volume0 - # readOnly: true - # existingClaim: volume-claim - # - name: extra-volume-1 - # mountPath: /mnt/volume1 - # readOnly: true - # hostPath: /usr/shared/ - # - name: grafana-secrets - # mountPath: /mnt/volume2 - # csi: true - # data: - # driver: secrets-store.csi.k8s.io - # readOnly: true - # volumeAttributes: - # secretProviderClass: "grafana-env-spc" - -## Container Lifecycle Hooks. Execute a specific bash command or make an HTTP request -lifecycleHooks: {} - # postStart: - # exec: - # command: [] - -## Pass the plugins you want installed as a list. -## -plugins: [] - # - digrich-bubblechart-panel - # - grafana-clock-panel - ## You can also use other plugin download URL, as long as they are valid zip files, - ## and specify the name of the plugin after the semicolon. Like this: - # - https://grafana.com/api/plugins/marcusolsson-json-datasource/versions/1.3.2/download;marcusolsson-json-datasource - -## Configure grafana datasources -## ref: http://docs.grafana.org/administration/provisioning/#datasources -## -datasources: {} -# datasources.yaml: -# apiVersion: 1 -# datasources: -# - name: Prometheus -# type: prometheus -# url: http://prometheus-prometheus-server -# access: proxy -# isDefault: true -# - name: CloudWatch -# type: cloudwatch -# access: proxy -# uid: cloudwatch -# editable: false -# jsonData: -# authType: default -# defaultRegion: us-east-1 -# deleteDatasources: [] -# - name: Prometheus - -## Configure grafana alerting (can be templated) -## ref: http://docs.grafana.org/administration/provisioning/#alerting -## -alerting: {} - # rules.yaml: - # apiVersion: 1 - # groups: - # - orgId: 1 - # name: '{{ .Chart.Name }}_my_rule_group' - # folder: my_first_folder - # interval: 60s - # rules: - # - uid: my_id_1 - # title: my_first_rule - # condition: A - # data: - # - refId: A - # datasourceUid: '-100' - # model: - # conditions: - # - evaluator: - # params: - # - 3 - # type: gt - # operator: - # type: and - # query: - # params: - # - A - # reducer: - # type: last - # type: query - # datasource: - # type: __expr__ - # uid: '-100' - # expression: 1==0 - # intervalMs: 1000 - # maxDataPoints: 43200 - # refId: A - # type: math - # dashboardUid: my_dashboard - # panelId: 123 - # noDataState: Alerting - # for: 60s - # annotations: - # some_key: some_value - # labels: - # team: sre_team_1 - # contactpoints.yaml: - # secret: - # apiVersion: 1 - # contactPoints: - # - orgId: 1 - # name: cp_1 - # receivers: - # - uid: first_uid - # type: pagerduty - # settings: - # integrationKey: XXX - # severity: critical - # class: ping failure - # component: Grafana - # group: app-stack - # summary: | - # {{ `{{ include "default.message" . }}` }} - -## Configure notifiers -## ref: http://docs.grafana.org/administration/provisioning/#alert-notification-channels -## -notifiers: {} -# notifiers.yaml: -# notifiers: -# - name: email-notifier -# type: email -# uid: email1 -# # either: -# org_id: 1 -# # or -# org_name: Main Org. -# is_default: true -# settings: -# addresses: an_email_address@example.com -# delete_notifiers: - -## Configure grafana dashboard providers -## ref: http://docs.grafana.org/administration/provisioning/#dashboards -## -## `path` must be /var/lib/grafana/dashboards/ -## -dashboardProviders: {} -# dashboardproviders.yaml: -# apiVersion: 1 -# providers: -# - name: 'default' -# orgId: 1 -# folder: '' -# type: file -# disableDeletion: false -# editable: true -# options: -# path: /var/lib/grafana/dashboards/default - -## Configure grafana dashboard to import -## NOTE: To use dashboards you must also enable/configure dashboardProviders -## ref: https://grafana.com/dashboards -## -## dashboards per provider, use provider name as key. -## -dashboards: {} - # default: - # some-dashboard: - # json: | - # $RAW_JSON - # custom-dashboard: - # file: dashboards/custom-dashboard.json - # prometheus-stats: - # gnetId: 2 - # revision: 2 - # datasource: Prometheus - # local-dashboard: - # url: https://example.com/repository/test.json - # token: '' - # local-dashboard-base64: - # url: https://example.com/repository/test-b64.json - # token: '' - # b64content: true - # local-dashboard-gitlab: - # url: https://example.com/repository/test-gitlab.json - # gitlabToken: '' - # local-dashboard-bitbucket: - # url: https://example.com/repository/test-bitbucket.json - # bearerToken: '' - # local-dashboard-azure: - # url: https://example.com/repository/test-azure.json - # basic: '' - # acceptHeader: '*/*' - -## Reference to external ConfigMap per provider. Use provider name as key and ConfigMap name as value. -## A provider dashboards must be defined either by external ConfigMaps or in values.yaml, not in both. -## ConfigMap data example: -## -## data: -## example-dashboard.json: | -## RAW_JSON -## -dashboardsConfigMaps: {} -# default: "" - -## Grafana's primary configuration -## NOTE: values in map will be converted to ini format -## ref: http://docs.grafana.org/installation/configuration/ -## -grafana.ini: - paths: - data: /var/lib/grafana/ - logs: /var/log/grafana - plugins: /var/lib/grafana/plugins - provisioning: /etc/grafana/provisioning - analytics: - check_for_updates: true - log: - mode: console - grafana_net: - url: https://grafana.net - server: - domain: "{{ if (and .Values.ingress.enabled .Values.ingress.hosts) }}{{ .Values.ingress.hosts | first }}{{ else }}''{{ end }}" -## grafana Authentication can be enabled with the following values on grafana.ini - # server: - # The full public facing url you use in browser, used for redirects and emails - # root_url: - # https://grafana.com/docs/grafana/latest/auth/github/#enable-github-in-grafana - # auth.github: - # enabled: false - # allow_sign_up: false - # scopes: user:email,read:org - # auth_url: https://github.com/login/oauth/authorize - # token_url: https://github.com/login/oauth/access_token - # api_url: https://api.github.com/user - # team_ids: - # allowed_organizations: - # client_id: - # client_secret: -## LDAP Authentication can be enabled with the following values on grafana.ini -## NOTE: Grafana will fail to start if the value for ldap.toml is invalid - # auth.ldap: - # enabled: true - # allow_sign_up: true - # config_file: /etc/grafana/ldap.toml - -## Grafana's LDAP configuration -## Templated by the template in _helpers.tpl -## NOTE: To enable the grafana.ini must be configured with auth.ldap.enabled -## ref: http://docs.grafana.org/installation/configuration/#auth-ldap -## ref: http://docs.grafana.org/installation/ldap/#configuration -ldap: - enabled: false - # `existingSecret` is a reference to an existing secret containing the ldap configuration - # for Grafana in a key `ldap-toml`. - existingSecret: "" - # `config` is the content of `ldap.toml` that will be stored in the created secret - config: "" - # config: |- - # verbose_logging = true - - # [[servers]] - # host = "my-ldap-server" - # port = 636 - # use_ssl = true - # start_tls = false - # ssl_skip_verify = false - # bind_dn = "uid=%s,ou=users,dc=myorg,dc=com" - -## Grafana's SMTP configuration -## NOTE: To enable, grafana.ini must be configured with smtp.enabled -## ref: http://docs.grafana.org/installation/configuration/#smtp -smtp: - # `existingSecret` is a reference to an existing secret containing the smtp configuration - # for Grafana. - existingSecret: "" - userKey: "user" - passwordKey: "password" - -## Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders -## Requires at least Grafana 5 to work and can't be used together with parameters dashboardProviders, datasources and dashboards -sidecar: - image: - # -- The Docker registry - registry: quay.io - repository: kiwigrid/k8s-sidecar - tag: 1.25.2 - sha: "" - imagePullPolicy: IfNotPresent - resources: {} -# limits: -# cpu: 100m -# memory: 100Mi -# requests: -# cpu: 50m -# memory: 50Mi - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - seccompProfile: - type: RuntimeDefault - # skipTlsVerify Set to true to skip tls verification for kube api calls - # skipTlsVerify: true - enableUniqueFilenames: false - readinessProbe: {} - livenessProbe: {} - # Log level default for all sidecars. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. Defaults to INFO - # logLevel: INFO - alerts: - enabled: false - # Additional environment variables for the alerts sidecar - env: {} - # Do not reprocess already processed unchanged resources on k8s API reconnect. - # ignoreAlreadyProcessed: true - # label that the configmaps with alert are marked with - label: grafana_alert - # value of label that the configmaps with alert are set to - labelValue: "" - # Log level. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. - # logLevel: INFO - # If specified, the sidecar will search for alert config-maps inside this namespace. - # Otherwise the namespace in which the sidecar is running will be used. - # It's also possible to specify ALL to search in all namespaces - searchNamespace: null - # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. - watchMethod: WATCH - # search in configmap, secret or both - resource: both - # watchServerTimeout: request to the server, asking it to cleanly close the connection after that. - # defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S - # watchServerTimeout: 3600 - # - # watchClientTimeout: is a client-side timeout, configuring your local socket. - # If you have a network outage dropping all packets with no RST/FIN, - # this is how long your client waits before realizing & dropping the connection. - # defaults to 66sec (sic!) - # watchClientTimeout: 60 - # - # Endpoint to send request to reload alerts - reloadURL: "http://localhost:3000/api/admin/provisioning/alerting/reload" - # Absolute path to shell script to execute after a alert got reloaded - script: null - skipReload: false - # This is needed if skipReload is true, to load any alerts defined at startup time. - # Deploy the alert sidecar as an initContainer. - initAlerts: false - # Additional alert sidecar volume mounts - extraMounts: [] - # Sets the size limit of the alert sidecar emptyDir volume - sizeLimit: {} - dashboards: - enabled: false - # Additional environment variables for the dashboards sidecar - env: {} - # Do not reprocess already processed unchanged resources on k8s API reconnect. - # ignoreAlreadyProcessed: true - SCProvider: true - # label that the configmaps with dashboards are marked with - label: grafana_dashboard - # value of label that the configmaps with dashboards are set to - labelValue: "" - # Log level. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. - # logLevel: INFO - # folder in the pod that should hold the collected dashboards (unless `defaultFolderName` is set) - folder: /tmp/dashboards - # The default folder name, it will create a subfolder under the `folder` and put dashboards in there instead - defaultFolderName: null - # Namespaces list. If specified, the sidecar will search for config-maps/secrets inside these namespaces. - # Otherwise the namespace in which the sidecar is running will be used. - # It's also possible to specify ALL to search in all namespaces. - searchNamespace: null - # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. - watchMethod: WATCH - # search in configmap, secret or both - resource: both - # If specified, the sidecar will look for annotation with this name to create folder and put graph here. - # You can use this parameter together with `provider.foldersFromFilesStructure`to annotate configmaps and create folder structure. - folderAnnotation: null - # Endpoint to send request to reload alerts - reloadURL: "http://localhost:3000/api/admin/provisioning/dashboards/reload" - # Absolute path to shell script to execute after a configmap got reloaded - script: null - skipReload: false - # watchServerTimeout: request to the server, asking it to cleanly close the connection after that. - # defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S - # watchServerTimeout: 3600 - # - # watchClientTimeout: is a client-side timeout, configuring your local socket. - # If you have a network outage dropping all packets with no RST/FIN, - # this is how long your client waits before realizing & dropping the connection. - # defaults to 66sec (sic!) - # watchClientTimeout: 60 - # - # provider configuration that lets grafana manage the dashboards - provider: - # name of the provider, should be unique - name: sidecarProvider - # orgid as configured in grafana - orgid: 1 - # folder in which the dashboards should be imported in grafana - folder: '' - # type of the provider - type: file - # disableDelete to activate a import-only behaviour - disableDelete: false - # allow updating provisioned dashboards from the UI - allowUiUpdates: false - # allow Grafana to replicate dashboard structure from filesystem - foldersFromFilesStructure: false - # Additional dashboard sidecar volume mounts - extraMounts: [] - # Sets the size limit of the dashboard sidecar emptyDir volume - sizeLimit: {} - datasources: - enabled: false - # Additional environment variables for the datasourcessidecar - env: {} - # Do not reprocess already processed unchanged resources on k8s API reconnect. - # ignoreAlreadyProcessed: true - # label that the configmaps with datasources are marked with - label: grafana_datasource - # value of label that the configmaps with datasources are set to - labelValue: "" - # Log level. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. - # logLevel: INFO - # If specified, the sidecar will search for datasource config-maps inside this namespace. - # Otherwise the namespace in which the sidecar is running will be used. - # It's also possible to specify ALL to search in all namespaces - searchNamespace: null - # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. - watchMethod: WATCH - # search in configmap, secret or both - resource: both - # watchServerTimeout: request to the server, asking it to cleanly close the connection after that. - # defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S - # watchServerTimeout: 3600 - # - # watchClientTimeout: is a client-side timeout, configuring your local socket. - # If you have a network outage dropping all packets with no RST/FIN, - # this is how long your client waits before realizing & dropping the connection. - # defaults to 66sec (sic!) - # watchClientTimeout: 60 - # - # Endpoint to send request to reload datasources - reloadURL: "http://localhost:3000/api/admin/provisioning/datasources/reload" - # Absolute path to shell script to execute after a datasource got reloaded - script: null - skipReload: false - # Deploy the datasource sidecar as an initContainer in addition to a container. - # This is needed if skipReload is true, to load any datasources defined at startup time. - initDatasources: false - # Sets the size limit of the datasource sidecar emptyDir volume - sizeLimit: {} - plugins: - enabled: false - # Additional environment variables for the plugins sidecar - env: {} - # Do not reprocess already processed unchanged resources on k8s API reconnect. - # ignoreAlreadyProcessed: true - # label that the configmaps with plugins are marked with - label: grafana_plugin - # value of label that the configmaps with plugins are set to - labelValue: "" - # Log level. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. - # logLevel: INFO - # If specified, the sidecar will search for plugin config-maps inside this namespace. - # Otherwise the namespace in which the sidecar is running will be used. - # It's also possible to specify ALL to search in all namespaces - searchNamespace: null - # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. - watchMethod: WATCH - # search in configmap, secret or both - resource: both - # watchServerTimeout: request to the server, asking it to cleanly close the connection after that. - # defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S - # watchServerTimeout: 3600 - # - # watchClientTimeout: is a client-side timeout, configuring your local socket. - # If you have a network outage dropping all packets with no RST/FIN, - # this is how long your client waits before realizing & dropping the connection. - # defaults to 66sec (sic!) - # watchClientTimeout: 60 - # - # Endpoint to send request to reload plugins - reloadURL: "http://localhost:3000/api/admin/provisioning/plugins/reload" - # Absolute path to shell script to execute after a plugin got reloaded - script: null - skipReload: false - # Deploy the datasource sidecar as an initContainer in addition to a container. - # This is needed if skipReload is true, to load any plugins defined at startup time. - initPlugins: false - # Sets the size limit of the plugin sidecar emptyDir volume - sizeLimit: {} - notifiers: - enabled: false - # Additional environment variables for the notifierssidecar - env: {} - # Do not reprocess already processed unchanged resources on k8s API reconnect. - # ignoreAlreadyProcessed: true - # label that the configmaps with notifiers are marked with - label: grafana_notifier - # value of label that the configmaps with notifiers are set to - labelValue: "" - # Log level. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. - # logLevel: INFO - # If specified, the sidecar will search for notifier config-maps inside this namespace. - # Otherwise the namespace in which the sidecar is running will be used. - # It's also possible to specify ALL to search in all namespaces - searchNamespace: null - # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. - watchMethod: WATCH - # search in configmap, secret or both - resource: both - # watchServerTimeout: request to the server, asking it to cleanly close the connection after that. - # defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S - # watchServerTimeout: 3600 - # - # watchClientTimeout: is a client-side timeout, configuring your local socket. - # If you have a network outage dropping all packets with no RST/FIN, - # this is how long your client waits before realizing & dropping the connection. - # defaults to 66sec (sic!) - # watchClientTimeout: 60 - # - # Endpoint to send request to reload notifiers - reloadURL: "http://localhost:3000/api/admin/provisioning/notifications/reload" - # Absolute path to shell script to execute after a notifier got reloaded - script: null - skipReload: false - # Deploy the notifier sidecar as an initContainer in addition to a container. - # This is needed if skipReload is true, to load any notifiers defined at startup time. - initNotifiers: false - # Sets the size limit of the notifier sidecar emptyDir volume - sizeLimit: {} - -## Override the deployment namespace -## -namespaceOverride: "" - -## Number of old ReplicaSets to retain -## -revisionHistoryLimit: 10 - -## Add a seperate remote image renderer deployment/service -imageRenderer: - deploymentStrategy: {} - # Enable the image-renderer deployment & service - enabled: false - replicas: 1 - autoscaling: - enabled: false - minReplicas: 1 - maxReplicas: 5 - targetCPU: "60" - targetMemory: "" - behavior: {} - image: - # -- The Docker registry - registry: docker.io - # image-renderer Image repository - repository: grafana/grafana-image-renderer - # image-renderer Image tag - tag: latest - # image-renderer Image sha (optional) - sha: "" - # image-renderer ImagePullPolicy - pullPolicy: Always - # extra environment variables - env: - HTTP_HOST: "0.0.0.0" - # RENDERING_ARGS: --no-sandbox,--disable-gpu,--window-size=1280x758 - # RENDERING_MODE: clustered - # IGNORE_HTTPS_ERRORS: true - - ## "valueFrom" environment variable references that will be added to deployment pods. Name is templated. - ## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#envvarsource-v1-core - ## Renders in container spec as: - ## env: - ## ... - ## - name: - ## valueFrom: - ## - envValueFrom: {} - # ENV_NAME: - # configMapKeyRef: - # name: configmap-name - # key: value_key - - # image-renderer deployment serviceAccount - serviceAccountName: "" - # image-renderer deployment securityContext - securityContext: {} - # image-renderer deployment container securityContext - containerSecurityContext: - seccompProfile: - type: RuntimeDefault - capabilities: - drop: ['ALL'] - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - ## image-renderer pod annotation - podAnnotations: {} - # image-renderer deployment Host Aliases - hostAliases: [] - # image-renderer deployment priority class - priorityClassName: '' - service: - # Enable the image-renderer service - enabled: true - # image-renderer service port name - portName: 'http' - # image-renderer service port used by both service and deployment - port: 8081 - targetPort: 8081 - # Adds the appProtocol field to the image-renderer service. This allows to work with istio protocol selection. Ex: "http" or "tcp" - appProtocol: "" - serviceMonitor: - ## If true, a ServiceMonitor CRD is created for a prometheus operator - ## https://github.com/coreos/prometheus-operator - ## - enabled: false - path: /metrics - # namespace: monitoring (defaults to use the namespace this chart is deployed to) - labels: {} - interval: 1m - scheme: http - tlsConfig: {} - scrapeTimeout: 30s - relabelings: [] - # See: https://doc.crds.dev/github.com/prometheus-operator/kube-prometheus/monitoring.coreos.com/ServiceMonitor/v1@v0.11.0#spec-targetLabels - targetLabels: [] - # - targetLabel1 - # - targetLabel2 - # If https is enabled in Grafana, this needs to be set as 'https' to correctly configure the callback used in Grafana - grafanaProtocol: http - # In case a sub_path is used this needs to be added to the image renderer callback - grafanaSubPath: "" - # name of the image-renderer port on the pod - podPortName: http - # number of image-renderer replica sets to keep - revisionHistoryLimit: 10 - networkPolicy: - # Enable a NetworkPolicy to limit inbound traffic to only the created grafana pods - limitIngress: true - # Enable a NetworkPolicy to limit outbound traffic to only the created grafana pods - limitEgress: false - # Allow additional services to access image-renderer (eg. Prometheus operator when ServiceMonitor is enabled) - extraIngressSelectors: [] - resources: {} -# limits: -# cpu: 100m -# memory: 100Mi -# requests: -# cpu: 50m -# memory: 50Mi - ## Node labels for pod assignment - ## ref: https://kubernetes.io/docs/user-guide/node-selection/ - # - nodeSelector: {} - - ## Tolerations for pod assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ - ## - tolerations: [] - - ## Affinity for pod assignment (evaluated as template) - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity - ## - affinity: {} - - ## Use an alternate scheduler, e.g. "stork". - ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ - ## - # schedulerName: "default-scheduler" - -networkPolicy: - ## @param networkPolicy.enabled Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. - ## - enabled: false - ## @param networkPolicy.allowExternal Don't require client label for connections - ## The Policy model to apply. When set to false, only pods with the correct - ## client label will have network access to grafana port defined. - ## When true, grafana will accept connections from any source - ## (with the correct destination port). - ## - ingress: true - ## @param networkPolicy.ingress When true enables the creation - ## an ingress network policy - ## - allowExternal: true - ## @param networkPolicy.explicitNamespacesSelector A Kubernetes LabelSelector to explicitly select namespaces from which traffic could be allowed - ## If explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace - ## and that match other criteria, the ones that have the good label, can reach the grafana. - ## But sometimes, we want the grafana to be accessible to clients from other namespaces, in this case, we can use this - ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. - ## - ## Example: - ## explicitNamespacesSelector: - ## matchLabels: - ## role: frontend - ## matchExpressions: - ## - {key: role, operator: In, values: [frontend]} - ## - explicitNamespacesSelector: {} - ## - ## - ## - ## - ## - ## - egress: - ## @param networkPolicy.egress.enabled When enabled, an egress network policy will be - ## created allowing grafana to connect to external data sources from kubernetes cluster. - enabled: false - ## - ## @param networkPolicy.egress.blockDNSResolution When enabled, DNS resolution will be blocked - ## for all pods in the grafana namespace. - blockDNSResolution: false - ## - ## @param networkPolicy.egress.ports Add individual ports to be allowed by the egress - ports: [] - ## Add ports to the egress by specifying - port: - ## E.X. - ## - port: 80 - ## - port: 443 - ## - ## @param networkPolicy.egress.to Allow egress traffic to specific destinations - to: [] - ## Add destinations to the egress by specifying - ipBlock: - ## E.X. - ## to: - ## - namespaceSelector: - ## matchExpressions: - ## - {key: role, operator: In, values: [grafana]} - ## - ## - ## - ## - ## - -# Enable backward compatibility of kubernetes where version below 1.13 doesn't have the enableServiceLinks option -enableKubeBackwardCompatibility: false -useStatefulSet: false -# Create a dynamic manifests via values: -extraObjects: [] - # - apiVersion: "kubernetes-client.io/v1" - # kind: ExternalSecret - # metadata: - # name: grafana-secrets - # spec: - # backendType: gcpSecretsManager - # data: - # - key: grafana-admin-password - # name: adminPassword diff --git a/operations/deployment/helm/grafana/.helmignore b/operations/deployment/helm/prometheus/.helmignore similarity index 97% rename from operations/deployment/helm/grafana/.helmignore rename to operations/deployment/helm/prometheus/.helmignore index 8cade13..825c007 100644 --- a/operations/deployment/helm/grafana/.helmignore +++ b/operations/deployment/helm/prometheus/.helmignore @@ -16,8 +16,8 @@ *.tmp *~ # Various IDEs -.vscode .project .idea/ *.tmproj + OWNERS diff --git a/operations/deployment/helm/prometheus/Chart.lock b/operations/deployment/helm/prometheus/Chart.lock new file mode 100644 index 0000000..83e5660 --- /dev/null +++ b/operations/deployment/helm/prometheus/Chart.lock @@ -0,0 +1,15 @@ +dependencies: +- name: alertmanager + repository: https://prometheus-community.github.io/helm-charts + version: 1.7.0 +- name: kube-state-metrics + repository: https://prometheus-community.github.io/helm-charts + version: 5.15.2 +- name: prometheus-node-exporter + repository: https://prometheus-community.github.io/helm-charts + version: 4.24.0 +- name: prometheus-pushgateway + repository: https://prometheus-community.github.io/helm-charts + version: 2.4.2 +digest: sha256:83aa9d98623ddd76cceaf5a17b69372ae34ab715db5ccb84dbc472d5fb78a01e +generated: "2023-11-17T18:47:14.928659+02:00" diff --git a/operations/deployment/helm/prometheus/Chart.yaml b/operations/deployment/helm/prometheus/Chart.yaml new file mode 100644 index 0000000..f176a65 --- /dev/null +++ b/operations/deployment/helm/prometheus/Chart.yaml @@ -0,0 +1,53 @@ +apiVersion: v2 +name: prometheus +appVersion: v2.48.0 +version: 25.8.0 +kubeVersion: ">=1.19.0-0" +description: Prometheus is a monitoring system and time series database. +home: https://prometheus.io/ +icon: https://raw.githubusercontent.com/prometheus/prometheus.github.io/master/assets/prometheus_logo-cb55bb5c346.png +sources: + - https://github.com/prometheus/alertmanager + - https://github.com/prometheus/prometheus + - https://github.com/prometheus/pushgateway + - https://github.com/prometheus/node_exporter + - https://github.com/kubernetes/kube-state-metrics +maintainers: + - name: gianrubio + email: gianrubio@gmail.com + - name: zanhsieh + email: zanhsieh@gmail.com + - name: Xtigyro + email: miroslav.hadzhiev@gmail.com + - name: naseemkullah + email: naseem@transit.app + - name: zeritti + email: rootsandtrees@posteo.de +type: application +dependencies: + - name: alertmanager + version: "1.7.*" + repository: https://prometheus-community.github.io/helm-charts + condition: alertmanager.enabled + - name: kube-state-metrics + version: "5.15.*" + repository: https://prometheus-community.github.io/helm-charts + condition: kube-state-metrics.enabled + - name: prometheus-node-exporter + version: "4.24.*" + repository: https://prometheus-community.github.io/helm-charts + condition: prometheus-node-exporter.enabled + - name: prometheus-pushgateway + version: "2.4.*" + repository: https://prometheus-community.github.io/helm-charts + condition: prometheus-pushgateway.enabled +keywords: + - monitoring + - prometheus +annotations: + "artifacthub.io/license": Apache-2.0 + "artifacthub.io/links": | + - name: Chart Source + url: https://github.com/prometheus-community/helm-charts + - name: Upstream Project + url: https://github.com/prometheus/prometheus diff --git a/operations/deployment/helm/prometheus/README.md b/operations/deployment/helm/prometheus/README.md new file mode 100644 index 0000000..2cb744c --- /dev/null +++ b/operations/deployment/helm/prometheus/README.md @@ -0,0 +1,382 @@ +# Prometheus + +[Prometheus](https://prometheus.io/), a [Cloud Native Computing Foundation](https://cncf.io/) project, is a systems and service monitoring system. It collects metrics from configured targets at given intervals, evaluates rule expressions, displays the results, and can trigger alerts if some condition is observed to be true. + +This chart bootstraps a [Prometheus](https://prometheus.io/) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.7+ + +## Get Repository Info + +```console +helm repo add prometheus-community https://prometheus-community.github.io/helm-charts +helm repo update +``` + +_See [helm repository](https://helm.sh/docs/helm/helm_repo/) for command documentation._ + +## Install Chart + +Starting with version 16.0, the Prometheus chart requires Helm 3.7+ in order to install successfully. Please check your `helm` release before installation. + +```console +helm install [RELEASE_NAME] prometheus-community/prometheus +``` + +_See [configuration](#configuration) below._ + +_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._ + +## Dependencies + +By default this chart installs additional, dependent charts: + +- [alertmanager](https://github.com/prometheus-community/helm-charts/tree/main/charts/alertmanager) +- [kube-state-metrics](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-state-metrics) +- [prometheus-node-exporter](https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-node-exporter) +- [prometheus-pushgateway](https://github.com/walker-tom/helm-charts/tree/main/charts/prometheus-pushgateway) + +To disable the dependency during installation, set `alertmanager.enabled`, `kube-state-metrics.enabled`, `prometheus-node-exporter.enabled` and `prometheus-pushgateway.enabled` to `false`. + +_See [helm dependency](https://helm.sh/docs/helm/helm_dependency/) for command documentation._ + +## Uninstall Chart + +```console +helm uninstall [RELEASE_NAME] +``` + +This removes all the Kubernetes components associated with the chart and deletes the release. + +_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._ + +## Updating values.schema.json + +A [`values.schema.json`](https://helm.sh/docs/topics/charts/#schema-files) file has been added to validate chart values. When `values.yaml` file has a structure change (i.e. add a new field, change value type, etc.), modify `values.schema.json` file manually or run `helm schema-gen values.yaml > values.schema.json` to ensure the schema is aligned with the latest values. Refer to [helm plugin `helm-schema-gen`](https://github.com/karuppiah7890/helm-schema-gen) for plugin installation instructions. + +## Upgrading Chart + +```console +helm upgrade [RELEASE_NAME] prometheus-community/prometheus --install +``` + +_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._ + +### To 25.0 + +The `server.remoteRead[].url` and `server.remoteWrite[].url` fields now support templating. Allowing for `url` values such as `https://{{ .Release.Name }}.example.com`. + +Any entries in these which previously included `{{` or `}}` must be escaped with `{{ "{{" }}` and `{{ "}}" }}` respectively. Entries which did not previously include the template-like syntax will not be affected. + +### To 24.0 + +Require Kubernetes 1.19+ + +Release 1.0.0 of the _alertmanager_ replaced [configmap-reload](https://github.com/jimmidyson/configmap-reload) with [prometheus-config-reloader](https://github.com/prometheus-operator/prometheus-operator/tree/main/cmd/prometheus-config-reloader). +Extra command-line arguments specified via `configmapReload.prometheus.extraArgs` are not compatible and will break with the new prometheus-config-reloader. Please, refer to the [sources](https://github.com/prometheus-operator/prometheus-operator/blob/main/cmd/prometheus-config-reloader/main.go) in order to make the appropriate adjustment to the extra command-line arguments. + +### To 23.0 + +Release 5.0.0 of the _kube-state-metrics_ chart introduced a separation of the `image.repository` value in two distinct values: + +```console + image: + registry: registry.k8s.io + repository: kube-state-metrics/kube-state-metrics +``` + +If a custom values file or CLI flags set `kube-state.metrics.image.repository`, please, set the new values accordingly. + +If you are upgrading _prometheus-pushgateway_ with the chart and _prometheus-pushgateway_ has been deployed as a statefulset with a persistent volume, the statefulset must be deleted before upgrading the chart, e.g.: + +```bash +kubectl delete sts -l app.kubernetes.io/name=prometheus-pushgateway -n monitoring --cascade=orphan +``` + +Users are advised to review changes in the corresponding chart releases before upgrading. + +### To 22.0 + +The `app.kubernetes.io/version` label has been removed from the pod selector. + +Therefore, you must delete the previous StatefulSet or Deployment before upgrading. Performing this operation will cause **Prometheus to stop functioning** until the upgrade is complete. + +```console +kubectl delete deploy,sts -l app.kubernetes.io/name=prometheus +``` + +### To 21.0 + +The Kubernetes labels have been updated to follow [Helm 3 label and annotation best practices](https://helm.sh/docs/chart_best_practices/labels/). +Specifically, labels mapping is listed below: + +| OLD | NEW | +|--------------------|------------------------------| +|heritage | app.kubernetes.io/managed-by | +|chart | helm.sh/chart | +|[container version] | app.kubernetes.io/version | +|app | app.kubernetes.io/name | +|release | app.kubernetes.io/instance | + +Therefore, depending on the way you've configured the chart, the previous StatefulSet or Deployment need to be deleted before upgrade. + +If `runAsStatefulSet: false` (this is the default): + +```console +kubectl delete deploy -l app=prometheus +``` + +If `runAsStatefulSet: true`: + +```console +kubectl delete sts -l app=prometheus +``` + +After that do the actual upgrade: + +```console +helm upgrade -i prometheus prometheus-community/prometheus +``` + +### To 20.0 + +The [configmap-reload](https://github.com/jimmidyson/configmap-reload) container was replaced by the [prometheus-config-reloader](https://github.com/prometheus-operator/prometheus-operator/tree/main/cmd/prometheus-config-reloader). +Extra command-line arguments specified via configmapReload.prometheus.extraArgs are not compatible and will break with the new prometheus-config-reloader, refer to the [sources](https://github.com/prometheus-operator/prometheus-operator/blob/main/cmd/prometheus-config-reloader/main.go) in order to make the appropriate adjustment to the extra command-line arguments. + +### To 19.0 + +Prometheus has been updated to version v2.40.5. + +Prometheus-pushgateway was updated to version 2.0.0 which adapted [Helm label and annotation best practices](https://helm.sh/docs/chart_best_practices/labels/). +See the [upgrade docs of the prometheus-pushgateway chart](https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-pushgateway#to-200) to see whats to do, before you upgrade Prometheus! + +The condition in Chart.yaml to disable kube-state-metrics has been changed from `kubeStateMetrics.enabled` to `kube-state-metrics.enabled` + +The Docker image tag is used from appVersion field in Chart.yaml by default. + +Unused subchart configs has been removed and subchart config is now on the bottom of the config file. + +If Prometheus is used as deployment the updatestrategy has been changed to "Recreate" by default, so Helm updates work out of the box. + +`.Values.server.extraTemplates` & `.Values.server.extraObjects` has been removed in favour of `.Values.extraManifests`, which can do the same. + +`.Values.server.enabled` has been removed as it's useless now that all components are created by subcharts. + +All files in `templates/server` directory has been moved to `templates` directory. + +```bash +helm upgrade [RELEASE_NAME] prometheus-community/prometheus --version 19.0.0 +``` + +### To 18.0 + +Version 18.0.0 uses alertmanager service from the [alertmanager chart](https://github.com/prometheus-community/helm-charts/tree/main/charts/alertmanager). If you've made some config changes, please check the old `alertmanager` and the new `alertmanager` configuration section in values.yaml for differences. + +Note that the `configmapReload` section for `alertmanager` was moved out of dedicated section (`configmapReload.alertmanager`) to alertmanager embedded (`alertmanager.configmapReload`). + +Before you update, please scale down the `prometheus-server` deployment to `0` then perform upgrade: + +```bash +# In 17.x +kubectl scale deploy prometheus-server --replicas=0 +# Upgrade +helm upgrade [RELEASE_NAME] prometheus-community/prometheus --version 18.0.0 +``` + +### To 17.0 + +Version 17.0.0 uses pushgateway service from the [prometheus-pushgateway chart](https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-pushgateway). If you've made some config changes, please check the old `pushgateway` and the new `prometheus-pushgateway` configuration section in values.yaml for differences. + +Before you update, please scale down the `prometheus-server` deployment to `0` then perform upgrade: + +```bash +# In 16.x +kubectl scale deploy prometheus-server --replicas=0 +# Upgrade +helm upgrade [RELEASE_NAME] prometheus-community/prometheus --version 17.0.0 +``` + +### To 16.0 + +Starting from version 16.0 embedded services (like alertmanager, node-exporter etc.) are moved out of Prometheus chart and the respecting charts from this repository are used as dependencies. Version 16.0.0 moves node-exporter service to [prometheus-node-exporter chart](https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-node-exporter). If you've made some config changes, please check the old `nodeExporter` and the new `prometheus-node-exporter` configuration section in values.yaml for differences. + +Before you update, please scale down the `prometheus-server` deployment to `0` then perform upgrade: + +```bash +# In 15.x +kubectl scale deploy prometheus-server --replicas=0 +# Upgrade +helm upgrade [RELEASE_NAME] prometheus-community/prometheus --version 16.0.0 +``` + +### To 15.0 + +Version 15.0.0 changes the relabeling config, aligning it with the [Prometheus community conventions](https://github.com/prometheus/prometheus/pull/9832). If you've made manual changes to the relabeling config, you have to adapt your changes. + +Before you update please execute the following command, to be able to update kube-state-metrics: + +```bash +kubectl delete deployments.apps -l app.kubernetes.io/instance=prometheus,app.kubernetes.io/name=kube-state-metrics --cascade=orphan +``` + +### To 9.0 + +Version 9.0 adds a new option to enable or disable the Prometheus Server. This supports the use case of running a Prometheus server in one k8s cluster and scraping exporters in another cluster while using the same chart for each deployment. To install the server `server.enabled` must be set to `true`. + +### To 5.0 + +As of version 5.0, this chart uses Prometheus 2.x. This version of prometheus introduces a new data format and is not compatible with prometheus 1.x. It is recommended to install this as a new release, as updating existing releases will not work. See the [prometheus docs](https://prometheus.io/docs/prometheus/latest/migration/#storage) for instructions on retaining your old data. + +Prometheus version 2.x has made changes to alertmanager, storage and recording rules. Check out the migration guide [here](https://prometheus.io/docs/prometheus/2.0/migration/). + +Users of this chart will need to update their alerting rules to the new format before they can upgrade. + +### Example Migration + +Assuming you have an existing release of the prometheus chart, named `prometheus-old`. In order to update to prometheus 2.x while keeping your old data do the following: + +1. Update the `prometheus-old` release. Disable scraping on every component besides the prometheus server, similar to the configuration below: + + ```yaml + alertmanager: + enabled: false + alertmanagerFiles: + alertmanager.yml: "" + kubeStateMetrics: + enabled: false + nodeExporter: + enabled: false + pushgateway: + enabled: false + server: + extraArgs: + storage.local.retention: 720h + serverFiles: + alerts: "" + prometheus.yml: "" + rules: "" + ``` + +1. Deploy a new release of the chart with version 5.0+ using prometheus 2.x. In the values.yaml set the scrape config as usual, and also add the `prometheus-old` instance as a remote-read target. + + ```yaml + prometheus.yml: + ... + remote_read: + - url: http://prometheus-old/api/v1/read + ... + ``` + + Old data will be available when you query the new prometheus instance. + +## Configuration + +See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments, visit the chart's [values.yaml](./values.yaml), or run these configuration commands: + +```console +helm show values prometheus-community/prometheus +``` + +You may similarly use the above configuration commands on each chart [dependency](#dependencies) to see its configurations. + +### Scraping Pod Metrics via Annotations + +This chart uses a default configuration that causes prometheus to scrape a variety of kubernetes resource types, provided they have the correct annotations. In this section we describe how to configure pods to be scraped; for information on how other resource types can be scraped you can do a `helm template` to get the kubernetes resource definitions, and then reference the prometheus configuration in the ConfigMap against the prometheus documentation for [relabel_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config) and [kubernetes_sd_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#kubernetes_sd_config). + +In order to get prometheus to scrape pods, you must add annotations to the pods as below: + +```yaml +metadata: + annotations: + prometheus.io/scrape: "true" + prometheus.io/path: /metrics + prometheus.io/port: "8080" +``` + +You should adjust `prometheus.io/path` based on the URL that your pod serves metrics from. `prometheus.io/port` should be set to the port that your pod serves metrics from. Note that the values for `prometheus.io/scrape` and `prometheus.io/port` must be enclosed in double quotes. + +### Sharing Alerts Between Services + +Note that when [installing](#install-chart) or [upgrading](#upgrading-chart) you may use multiple values override files. This is particularly useful when you have alerts belonging to multiple services in the cluster. For example, + +```yaml +# values.yaml +# ... + +# service1-alert.yaml +serverFiles: + alerts: + service1: + - alert: anAlert + # ... + +# service2-alert.yaml +serverFiles: + alerts: + service2: + - alert: anAlert + # ... +``` + +```console +helm install [RELEASE_NAME] prometheus-community/prometheus -f values.yaml -f service1-alert.yaml -f service2-alert.yaml +``` + +### RBAC Configuration + +Roles and RoleBindings resources will be created automatically for `server` service. + +To manually setup RBAC you need to set the parameter `rbac.create=false` and specify the service account to be used for each service by setting the parameters: `serviceAccounts.{{ component }}.create` to `false` and `serviceAccounts.{{ component }}.name` to the name of a pre-existing service account. + +> **Tip**: You can refer to the default `*-clusterrole.yaml` and `*-clusterrolebinding.yaml` files in [templates](templates/) to customize your own. + +### ConfigMap Files + +AlertManager is configured through [alertmanager.yml](https://prometheus.io/docs/alerting/configuration/). This file (and any others listed in `alertmanagerFiles`) will be mounted into the `alertmanager` pod. + +Prometheus is configured through [prometheus.yml](https://prometheus.io/docs/operating/configuration/). This file (and any others listed in `serverFiles`) will be mounted into the `server` pod. + +### Ingress TLS + +If your cluster allows automatic creation/retrieval of TLS certificates (e.g. [cert-manager](https://github.com/jetstack/cert-manager)), please refer to the documentation for that mechanism. + +To manually configure TLS, first create/retrieve a key & certificate pair for the address(es) you wish to protect. Then create a TLS secret in the namespace: + +```console +kubectl create secret tls prometheus-server-tls --cert=path/to/tls.cert --key=path/to/tls.key +``` + +Include the secret's name, along with the desired hostnames, in the alertmanager/server Ingress TLS section of your custom `values.yaml` file: + +```yaml +server: + ingress: + ## If true, Prometheus server Ingress will be created + ## + enabled: true + + ## Prometheus server Ingress hostnames + ## Must be provided if Ingress is enabled + ## + hosts: + - prometheus.domain.com + + ## Prometheus server Ingress TLS configuration + ## Secrets must be manually created in the namespace + ## + tls: + - secretName: prometheus-server-tls + hosts: + - prometheus.domain.com +``` + +### NetworkPolicy + +Enabling Network Policy for Prometheus will secure connections to Alert Manager and Kube State Metrics by only accepting connections from Prometheus Server. All inbound connections to Prometheus Server are still allowed. + +To enable network policy for Prometheus, install a networking plugin that implements the Kubernetes NetworkPolicy spec, and set `networkPolicy.enabled` to true. + +If NetworkPolicy is enabled for Prometheus' scrape targets, you may also need to manually create a networkpolicy which allows it. diff --git a/operations/deployment/helm/prometheus/templates/NOTES.txt b/operations/deployment/helm/prometheus/templates/NOTES.txt new file mode 100644 index 0000000..fc03c2a --- /dev/null +++ b/operations/deployment/helm/prometheus/templates/NOTES.txt @@ -0,0 +1,113 @@ +The Prometheus server can be accessed via port {{ .Values.server.service.servicePort }} on the following DNS name from within your cluster: +{{ template "prometheus.server.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + +{{ if .Values.server.ingress.enabled -}} +From outside the cluster, the server URL(s) are: +{{- range .Values.server.ingress.hosts }} +http://{{ . }} +{{- end }} +{{- else }} +Get the Prometheus server URL by running these commands in the same shell: +{{- if contains "NodePort" .Values.server.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "prometheus.server.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.server.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "prometheus.server.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "prometheus.server.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.server.service.servicePort }} +{{- else if contains "ClusterIP" .Values.server.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "prometheus.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 9090 +{{- end }} + + +{{- if .Values.server.persistentVolume.enabled }} +{{- else }} +################################################################################# +###### WARNING: Persistence is disabled!!! You will lose your data when ##### +###### the Server pod is terminated. ##### +################################################################################# +{{- end }} +{{- end }} + +{{ if .Values.alertmanager.enabled }} +The Prometheus alertmanager can be accessed via port {{ .Values.alertmanager.service.port }} on the following DNS name from within your cluster: +{{ template "prometheus.alertmanager.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local + +{{ if .Values.alertmanager.ingress.enabled -}} +From outside the cluster, the alertmanager URL(s) are: +{{- range .Values.alertmanager.ingress.hosts }} +http://{{ . }} +{{- end }} +{{- else }} +Get the Alertmanager URL by running these commands in the same shell: +{{- if contains "NodePort" .Values.alertmanager.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "prometheus.alertmanager.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.alertmanager.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "prometheus.alertmanager.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "prometheus.alertmanager.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.alertmanager.service.servicePort }} +{{- else if contains "ClusterIP" .Values.alertmanager.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "alertmanager.name" .Subcharts.alertmanager }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 9093 +{{- end }} +{{- end }} + +{{- if .Values.alertmanager.persistence.enabled }} +{{- else }} +################################################################################# +###### WARNING: Persistence is disabled!!! You will lose your data when ##### +###### the AlertManager pod is terminated. ##### +################################################################################# +{{- end }} +{{- end }} + +{{- if (index .Values "prometheus-node-exporter" "enabled") }} +################################################################################# +###### WARNING: Pod Security Policy has been disabled by default since ##### +###### it deprecated after k8s 1.25+. use ##### +###### (index .Values "prometheus-node-exporter" "rbac" ##### +###### . "pspEnabled") with (index .Values ##### +###### "prometheus-node-exporter" "rbac" "pspAnnotations") ##### +###### in case you still need it. ##### +################################################################################# +{{- end }} + +{{ if (index .Values "prometheus-pushgateway" "enabled") }} +The Prometheus PushGateway can be accessed via port {{ index .Values "prometheus-pushgateway" "service" "port" }} on the following DNS name from within your cluster: +{{ include "prometheus-pushgateway.fullname" (index .Subcharts "prometheus-pushgateway") }}.{{ .Release.Namespace }}.svc.cluster.local + +{{ if (index .Values "prometheus-pushgateway" "ingress" "enabled") -}} +From outside the cluster, the pushgateway URL(s) are: +{{- range (index .Values "prometheus-pushgateway" "ingress" "hosts") }} +http://{{ . }} +{{- end }} +{{- else }} +Get the PushGateway URL by running these commands in the same shell: +{{- $pushgateway_svc_type := index .Values "prometheus-pushgateway" "service" "type" -}} +{{- if contains "NodePort" $pushgateway_svc_type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "prometheus-pushgateway.fullname" (index .Subcharts "prometheus-pushgateway") }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" $pushgateway_svc_type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ include "prometheus-pushgateway.fullname" (index .Subcharts "prometheus-pushgateway") }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "prometheus-pushgateway.fullname" (index .Subcharts "prometheus-pushgateway") }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ index .Values "prometheus-pushgateway" "service" "port" }} +{{- else if contains "ClusterIP" $pushgateway_svc_type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ include "prometheus.name" (index .Subcharts "prometheus-pushgateway") }},component=pushgateway" -o jsonpath="{.items[0].metadata.name}") + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 9091 +{{- end }} +{{- end }} +{{- end }} + +For more information on running Prometheus, visit: +https://prometheus.io/ diff --git a/operations/deployment/helm/prometheus/templates/_helpers.tpl b/operations/deployment/helm/prometheus/templates/_helpers.tpl new file mode 100644 index 0000000..0810e3c --- /dev/null +++ b/operations/deployment/helm/prometheus/templates/_helpers.tpl @@ -0,0 +1,234 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "prometheus.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "prometheus.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create labels for prometheus +*/}} +{{- define "prometheus.common.matchLabels" -}} +app.kubernetes.io/name: {{ include "prometheus.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} + +{{/* +Create unified labels for prometheus components +*/}} +{{- define "prometheus.common.metaLabels" -}} +app.kubernetes.io/version: {{ .Chart.AppVersion }} +helm.sh/chart: {{ include "prometheus.chart" . }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +app.kubernetes.io/part-of: {{ include "prometheus.name" . }} +{{- with .Values.commonMetaLabels}} +{{ toYaml . }} +{{- end }} +{{- end -}} + +{{- define "prometheus.server.labels" -}} +{{ include "prometheus.server.matchLabels" . }} +{{ include "prometheus.common.metaLabels" . }} +{{- end -}} + +{{- define "prometheus.server.matchLabels" -}} +app.kubernetes.io/component: {{ .Values.server.name }} +{{ include "prometheus.common.matchLabels" . }} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "prometheus.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create a fully qualified ClusterRole name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "prometheus.clusterRoleName" -}} +{{- if .Values.server.clusterRoleNameOverride -}} +{{ .Values.server.clusterRoleNameOverride | trunc 63 | trimSuffix "-" }} +{{- else -}} +{{ include "prometheus.server.fullname" . }} +{{- end -}} +{{- end -}} + +{{/* +Create a fully qualified alertmanager name for communicating with the user via NOTES.txt +*/}} +{{- define "prometheus.alertmanager.fullname" -}} +{{- template "alertmanager.fullname" .Subcharts.alertmanager -}} +{{- end -}} + +{{/* +Create a fully qualified Prometheus server name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "prometheus.server.fullname" -}} +{{- if .Values.server.fullnameOverride -}} +{{- .Values.server.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- printf "%s-%s" .Release.Name .Values.server.name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s-%s" .Release.Name $name .Values.server.name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Get KubeVersion removing pre-release information. +*/}} +{{- define "prometheus.kubeVersion" -}} + {{- default .Capabilities.KubeVersion.Version (regexFind "v[0-9]+\\.[0-9]+\\.[0-9]+" .Capabilities.KubeVersion.Version) -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "prometheus.deployment.apiVersion" -}} +{{- print "apps/v1" -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "prometheus.networkPolicy.apiVersion" -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for poddisruptionbudget. +*/}} +{{- define "prometheus.podDisruptionBudget.apiVersion" -}} +{{- if .Capabilities.APIVersions.Has "policy/v1" }} +{{- print "policy/v1" -}} +{{- else -}} +{{- print "policy/v1beta1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for rbac. +*/}} +{{- define "rbac.apiVersion" -}} +{{- if .Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1" }} +{{- print "rbac.authorization.k8s.io/v1" -}} +{{- else -}} +{{- print "rbac.authorization.k8s.io/v1beta1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "ingress.apiVersion" -}} + {{- if and (.Capabilities.APIVersions.Has "networking.k8s.io/v1") (semverCompare ">= 1.19.x" (include "prometheus.kubeVersion" .)) -}} + {{- print "networking.k8s.io/v1" -}} + {{- else if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" -}} + {{- print "networking.k8s.io/v1beta1" -}} + {{- else -}} + {{- print "extensions/v1beta1" -}} + {{- end -}} +{{- end -}} + +{{/* +Return if ingress is stable. +*/}} +{{- define "ingress.isStable" -}} + {{- eq (include "ingress.apiVersion" .) "networking.k8s.io/v1" -}} +{{- end -}} + +{{/* +Return if ingress supports ingressClassName. +*/}} +{{- define "ingress.supportsIngressClassName" -}} + {{- or (eq (include "ingress.isStable" .) "true") (and (eq (include "ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18.x" (include "prometheus.kubeVersion" .))) -}} +{{- end -}} + +{{/* +Return if ingress supports pathType. +*/}} +{{- define "ingress.supportsPathType" -}} + {{- or (eq (include "ingress.isStable" .) "true") (and (eq (include "ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18.x" (include "prometheus.kubeVersion" .))) -}} +{{- end -}} + +{{/* +Create the name of the service account to use for the server component +*/}} +{{- define "prometheus.serviceAccountName.server" -}} +{{- if .Values.serviceAccounts.server.create -}} + {{ default (include "prometheus.server.fullname" .) .Values.serviceAccounts.server.name }} +{{- else -}} + {{ default "default" .Values.serviceAccounts.server.name }} +{{- end -}} +{{- end -}} + +{{/* +Define the prometheus.namespace template if set with forceNamespace or .Release.Namespace is set +*/}} +{{- define "prometheus.namespace" -}} + {{- default .Release.Namespace .Values.forceNamespace -}} +{{- end }} + +{{/* +Define template prometheus.namespaces producing a list of namespaces to monitor +*/}} +{{- define "prometheus.namespaces" -}} +{{- $namespaces := list }} +{{- if and .Values.rbac.create .Values.server.useExistingClusterRoleName }} + {{- if .Values.server.namespaces -}} + {{- range $ns := join "," .Values.server.namespaces | split "," }} + {{- $namespaces = append $namespaces (tpl $ns $) }} + {{- end -}} + {{- end -}} + {{- if .Values.server.releaseNamespace -}} + {{- $namespaces = append $namespaces (include "prometheus.namespace" .) }} + {{- end -}} +{{- end -}} +{{ mustToJson $namespaces }} +{{- end -}} + +{{/* +Define prometheus.server.remoteWrite producing a list of remoteWrite configurations with URL templating +*/}} +{{- define "prometheus.server.remoteWrite" -}} +{{- $remoteWrites := list }} +{{- range $remoteWrite := .Values.server.remoteWrite }} + {{- $remoteWrites = tpl $remoteWrite.url $ | set $remoteWrite "url" | append $remoteWrites }} +{{- end -}} +{{ toYaml $remoteWrites }} +{{- end -}} + +{{/* +Define prometheus.server.remoteRead producing a list of remoteRead configurations with URL templating +*/}} +{{- define "prometheus.server.remoteRead" -}} +{{- $remoteReads := list }} +{{- range $remoteRead := .Values.server.remoteRead }} + {{- $remoteReads = tpl $remoteRead.url $ | set $remoteRead "url" | append $remoteReads }} +{{- end -}} +{{ toYaml $remoteReads }} +{{- end -}} + diff --git a/operations/deployment/helm/prometheus/templates/clusterrole.yaml b/operations/deployment/helm/prometheus/templates/clusterrole.yaml new file mode 100644 index 0000000..25e3cec --- /dev/null +++ b/operations/deployment/helm/prometheus/templates/clusterrole.yaml @@ -0,0 +1,56 @@ +{{- if and .Values.rbac.create (empty .Values.server.useExistingClusterRoleName) -}} +apiVersion: {{ template "rbac.apiVersion" . }} +kind: ClusterRole +metadata: + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + name: {{ include "prometheus.clusterRoleName" . }} +rules: +{{- if and .Values.podSecurityPolicy.enabled (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }} + - apiGroups: + - extensions + resources: + - podsecuritypolicies + verbs: + - use + resourceNames: + - {{ template "prometheus.server.fullname" . }} +{{- end }} + - apiGroups: + - "" + resources: + - nodes + - nodes/proxy + - nodes/metrics + - services + - endpoints + - pods + - ingresses + - configmaps + verbs: + - get + - list + - watch + - apiGroups: + - "extensions" + - "networking.k8s.io" + resources: + - ingresses/status + - ingresses + verbs: + - get + - list + - watch + - apiGroups: + - "discovery.k8s.io" + resources: + - endpointslices + verbs: + - get + - list + - watch + - nonResourceURLs: + - "/metrics" + verbs: + - get +{{- end }} diff --git a/operations/deployment/helm/prometheus/templates/clusterrolebinding.yaml b/operations/deployment/helm/prometheus/templates/clusterrolebinding.yaml new file mode 100644 index 0000000..28f4bda --- /dev/null +++ b/operations/deployment/helm/prometheus/templates/clusterrolebinding.yaml @@ -0,0 +1,16 @@ +{{- if and .Values.rbac.create (empty .Values.server.namespaces) (empty .Values.server.useExistingClusterRoleName) -}} +apiVersion: {{ template "rbac.apiVersion" . }} +kind: ClusterRoleBinding +metadata: + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + name: {{ include "prometheus.clusterRoleName" . }} +subjects: + - kind: ServiceAccount + name: {{ template "prometheus.serviceAccountName.server" . }} + namespace: {{ include "prometheus.namespace" . }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ include "prometheus.clusterRoleName" . }} +{{- end }} diff --git a/operations/deployment/helm/prometheus/templates/cm.yaml b/operations/deployment/helm/prometheus/templates/cm.yaml new file mode 100644 index 0000000..c670666 --- /dev/null +++ b/operations/deployment/helm/prometheus/templates/cm.yaml @@ -0,0 +1,99 @@ +{{- if (empty .Values.server.configMapOverrideName) -}} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + {{- with .Values.server.extraConfigmapLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ template "prometheus.server.fullname" . }} + namespace: {{ include "prometheus.namespace" . }} +data: + allow-snippet-annotations: "false" +{{- $root := . -}} +{{- range $key, $value := .Values.ruleFiles }} + {{ $key }}: {{- toYaml $value | indent 2 }} +{{- end }} +{{- range $key, $value := .Values.serverFiles }} + {{ $key }}: | +{{- if eq $key "prometheus.yml" }} + global: +{{ $root.Values.server.global | toYaml | trimSuffix "\n" | indent 6 }} +{{- if $root.Values.server.remoteWrite }} + remote_write: +{{- include "prometheus.server.remoteWrite" $root | nindent 4 }} +{{- end }} +{{- if $root.Values.server.remoteRead }} + remote_read: +{{- include "prometheus.server.remoteRead" $root | nindent 4 }} +{{- end }} +{{- if or $root.Values.server.tsdb $root.Values.server.exemplars }} + storage: +{{- if $root.Values.server.tsdb }} + tsdb: +{{ $root.Values.server.tsdb | toYaml | indent 8 }} +{{- end }} +{{- if $root.Values.server.exemplars }} + exemplars: +{{ $root.Values.server.exemplars | toYaml | indent 8 }} +{{- end }} +{{- end }} +{{- if $root.Values.scrapeConfigFiles }} + scrape_config_files: +{{ toYaml $root.Values.scrapeConfigFiles | indent 4 }} +{{- end }} +{{- end }} +{{- if eq $key "alerts" }} +{{- if and (not (empty $value)) (empty $value.groups) }} + groups: +{{- range $ruleKey, $ruleValue := $value }} + - name: {{ $ruleKey -}}.rules + rules: +{{ $ruleValue | toYaml | trimSuffix "\n" | indent 6 }} +{{- end }} +{{- else }} +{{ toYaml $value | indent 4 }} +{{- end }} +{{- else }} +{{ toYaml $value | default "{}" | indent 4 }} +{{- end }} +{{- if eq $key "prometheus.yml" -}} +{{- if $root.Values.extraScrapeConfigs }} +{{ tpl $root.Values.extraScrapeConfigs $root | indent 4 }} +{{- end -}} +{{- if or ($root.Values.alertmanager.enabled) ($root.Values.server.alertmanagers) }} + alerting: +{{- if $root.Values.alertRelabelConfigs }} +{{ $root.Values.alertRelabelConfigs | toYaml | trimSuffix "\n" | indent 6 }} +{{- end }} + alertmanagers: +{{- if $root.Values.server.alertmanagers }} +{{ toYaml $root.Values.server.alertmanagers | indent 8 }} +{{- else }} + - kubernetes_sd_configs: + - role: pod + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + {{- if $root.Values.alertmanager.prefixURL }} + path_prefix: {{ $root.Values.alertmanager.prefixURL }} + {{- end }} + relabel_configs: + - source_labels: [__meta_kubernetes_namespace] + regex: {{ $root.Release.Namespace }} + action: keep + - source_labels: [__meta_kubernetes_pod_label_app_kubernetes_io_instance] + regex: {{ $root.Release.Name }} + action: keep + - source_labels: [__meta_kubernetes_pod_label_app_kubernetes_io_name] + regex: {{ default "alertmanager" $root.Values.alertmanager.nameOverride | trunc 63 | trimSuffix "-" }} + action: keep + - source_labels: [__meta_kubernetes_pod_container_port_number] + regex: "9093" + action: keep +{{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/operations/deployment/helm/prometheus/templates/deploy.yaml b/operations/deployment/helm/prometheus/templates/deploy.yaml new file mode 100644 index 0000000..93f93c4 --- /dev/null +++ b/operations/deployment/helm/prometheus/templates/deploy.yaml @@ -0,0 +1,363 @@ +{{- if not .Values.server.statefulSet.enabled -}} +apiVersion: {{ template "prometheus.deployment.apiVersion" . }} +kind: Deployment +metadata: +{{- if .Values.server.deploymentAnnotations }} + annotations: + {{ toYaml .Values.server.deploymentAnnotations | nindent 4 }} +{{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + name: {{ template "prometheus.server.fullname" . }} + namespace: {{ include "prometheus.namespace" . }} +spec: + selector: + matchLabels: + {{- include "prometheus.server.matchLabels" . | nindent 6 }} + replicas: {{ .Values.server.replicaCount }} + revisionHistoryLimit: {{ .Values.server.revisionHistoryLimit }} + {{- if .Values.server.strategy }} + strategy: +{{ toYaml .Values.server.strategy | trim | indent 4 }} + {{ if eq .Values.server.strategy.type "Recreate" }}rollingUpdate: null{{ end }} +{{- end }} + template: + metadata: + {{- if .Values.server.podAnnotations }} + annotations: + {{ toYaml .Values.server.podAnnotations | nindent 8 }} + {{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 8 }} + {{- if .Values.server.podLabels}} + {{ toYaml .Values.server.podLabels | nindent 8 }} + {{- end}} + spec: +{{- if .Values.server.priorityClassName }} + priorityClassName: "{{ .Values.server.priorityClassName }}" +{{- end }} +{{- if .Values.server.schedulerName }} + schedulerName: "{{ .Values.server.schedulerName }}" +{{- end }} +{{- if semverCompare ">=1.13-0" .Capabilities.KubeVersion.GitVersion }} + {{- if or (.Values.server.enableServiceLinks) (eq (.Values.server.enableServiceLinks | toString) "") }} + enableServiceLinks: true + {{- else }} + enableServiceLinks: false + {{- end }} +{{- end }} + serviceAccountName: {{ template "prometheus.serviceAccountName.server" . }} +{{- if kindIs "bool" .Values.server.automountServiceAccountToken }} + automountServiceAccountToken: {{ .Values.server.automountServiceAccountToken }} +{{- end }} + {{- if .Values.server.extraInitContainers }} + initContainers: +{{ toYaml .Values.server.extraInitContainers | indent 8 }} + {{- end }} + containers: + {{- if .Values.configmapReload.prometheus.enabled }} + - name: {{ template "prometheus.name" . }}-{{ .Values.server.name }}-{{ .Values.configmapReload.prometheus.name }} + {{- if .Values.configmapReload.prometheus.image.digest }} + image: "{{ .Values.configmapReload.prometheus.image.repository }}@{{ .Values.configmapReload.prometheus.image.digest }}" + {{- else }} + image: "{{ .Values.configmapReload.prometheus.image.repository }}:{{ .Values.configmapReload.prometheus.image.tag }}" + {{- end }} + imagePullPolicy: "{{ .Values.configmapReload.prometheus.image.pullPolicy }}" + {{- with .Values.configmapReload.prometheus.containerSecurityContext }} + securityContext: + {{- toYaml . | nindent 12 }} + {{- end }} + args: + - --watched-dir=/etc/config + {{- $default_url := "http://127.0.0.1:9090/-/reload" }} + {{- with .Values.server.prefixURL }} + {{- $default_url = printf "http://127.0.0.1:9090%s/-/reload" . }} + {{- end }} + - --reload-url={{ default $default_url .Values.configmapReload.reloadUrl }} + {{- range $key, $value := .Values.configmapReload.prometheus.extraArgs }} + - --{{ $key }}={{ $value }} + {{- end }} + {{- range .Values.configmapReload.prometheus.extraVolumeDirs }} + - --watched-dir={{ . }} + {{- end }} + {{- with .Values.configmapReload.env }} + env: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- if .Values.configmapReload.prometheus.containerPort }} + ports: + - containerPort: {{ .Values.configmapReload.prometheus.containerPort }} + {{- end }} + {{- with .Values.configmapReload.prometheus.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + volumeMounts: + - name: config-volume + mountPath: /etc/config + readOnly: true + {{- range .Values.configmapReload.prometheus.extraConfigmapMounts }} + - name: {{ $.Values.configmapReload.prometheus.name }}-{{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- with .Values.configmapReload.prometheus.extraVolumeMounts }} + {{ toYaml . | nindent 12 }} + {{- end }} + {{- end }} + + - name: {{ template "prometheus.name" . }}-{{ .Values.server.name }} + {{- if .Values.server.image.digest }} + image: "{{ .Values.server.image.repository }}@{{ .Values.server.image.digest }}" + {{- else }} + image: "{{ .Values.server.image.repository }}:{{ .Values.server.image.tag | default .Chart.AppVersion}}" + {{- end }} + imagePullPolicy: "{{ .Values.server.image.pullPolicy }}" + {{- with .Values.server.command }} + command: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- if .Values.server.env }} + env: +{{ toYaml .Values.server.env | indent 12}} + {{- end }} + args: + {{- if .Values.server.defaultFlagsOverride }} + {{ toYaml .Values.server.defaultFlagsOverride | nindent 12}} + {{- else }} + {{- if .Values.server.retention }} + - --storage.tsdb.retention.time={{ .Values.server.retention }} + {{- end }} + {{- if .Values.server.retentionSize }} + - --storage.tsdb.retention.size={{ .Values.server.retentionSize }} + {{- end }} + - --config.file={{ .Values.server.configPath }} + {{- if .Values.server.storagePath }} + - --storage.tsdb.path={{ .Values.server.storagePath }} + {{- else }} + - --storage.tsdb.path={{ .Values.server.persistentVolume.mountPath }} + {{- end }} + - --web.console.libraries=/etc/prometheus/console_libraries + - --web.console.templates=/etc/prometheus/consoles + {{- range .Values.server.extraFlags }} + - --{{ . }} + {{- end }} + {{- range $key, $value := .Values.server.extraArgs }} + - --{{ $key }}={{ $value }} + {{- end }} + {{- if .Values.server.prefixURL }} + - --web.route-prefix={{ .Values.server.prefixURL }} + {{- end }} + {{- if .Values.server.baseURL }} + - --web.external-url={{ .Values.server.baseURL }} + {{- end }} + {{- end }} + ports: + - containerPort: 9090 + {{- if .Values.server.portName }} + name: {{ .Values.server.portName }} + {{- end }} + {{- if .Values.server.hostPort }} + hostPort: {{ .Values.server.hostPort }} + {{- end }} + readinessProbe: + {{- if not .Values.server.tcpSocketProbeEnabled }} + httpGet: + path: {{ .Values.server.prefixURL }}/-/ready + port: 9090 + scheme: {{ .Values.server.probeScheme }} + {{- with .Values.server.probeHeaders }} + httpHeaders: +{{- toYaml . | nindent 14 }} + {{- end }} + {{- else }} + tcpSocket: + port: 9090 + {{- end }} + initialDelaySeconds: {{ .Values.server.readinessProbeInitialDelay }} + periodSeconds: {{ .Values.server.readinessProbePeriodSeconds }} + timeoutSeconds: {{ .Values.server.readinessProbeTimeout }} + failureThreshold: {{ .Values.server.readinessProbeFailureThreshold }} + successThreshold: {{ .Values.server.readinessProbeSuccessThreshold }} + livenessProbe: + {{- if not .Values.server.tcpSocketProbeEnabled }} + httpGet: + path: {{ .Values.server.prefixURL }}/-/healthy + port: 9090 + scheme: {{ .Values.server.probeScheme }} + {{- with .Values.server.probeHeaders }} + httpHeaders: +{{- toYaml . | nindent 14 }} + {{- end }} + {{- else }} + tcpSocket: + port: 9090 + {{- end }} + initialDelaySeconds: {{ .Values.server.livenessProbeInitialDelay }} + periodSeconds: {{ .Values.server.livenessProbePeriodSeconds }} + timeoutSeconds: {{ .Values.server.livenessProbeTimeout }} + failureThreshold: {{ .Values.server.livenessProbeFailureThreshold }} + successThreshold: {{ .Values.server.livenessProbeSuccessThreshold }} + {{- if .Values.server.startupProbe.enabled }} + startupProbe: + {{- if not .Values.server.tcpSocketProbeEnabled }} + httpGet: + path: {{ .Values.server.prefixURL }}/-/healthy + port: 9090 + scheme: {{ .Values.server.probeScheme }} + {{- if .Values.server.probeHeaders }} + httpHeaders: + {{- range .Values.server.probeHeaders}} + - name: {{ .name }} + value: {{ .value }} + {{- end }} + {{- end }} + {{- else }} + tcpSocket: + port: 9090 + {{- end }} + failureThreshold: {{ .Values.server.startupProbe.failureThreshold }} + periodSeconds: {{ .Values.server.startupProbe.periodSeconds }} + timeoutSeconds: {{ .Values.server.startupProbe.timeoutSeconds }} + {{- end }} + {{- with .Values.server.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + volumeMounts: + - name: config-volume + mountPath: /etc/config + - name: storage-volume + mountPath: {{ .Values.server.persistentVolume.mountPath }} + subPath: "{{ .Values.server.persistentVolume.subPath }}" + {{- range .Values.server.extraHostPathMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- range .Values.server.extraConfigmapMounts }} + - name: {{ $.Values.server.name }}-{{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- range .Values.server.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- if .Values.server.extraVolumeMounts }} + {{ toYaml .Values.server.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- with .Values.server.containerSecurityContext }} + securityContext: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- if .Values.server.sidecarContainers }} + {{- range $name, $spec := .Values.server.sidecarContainers }} + - name: {{ $name }} + {{- if kindIs "string" $spec }} + {{- tpl $spec $ | nindent 10 }} + {{- else }} + {{- toYaml $spec | nindent 10 }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.server.hostNetwork }} + hostNetwork: true + dnsPolicy: ClusterFirstWithHostNet + {{- else }} + dnsPolicy: {{ .Values.server.dnsPolicy }} + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + {{- if .Values.server.nodeSelector }} + nodeSelector: +{{ toYaml .Values.server.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.server.hostAliases }} + hostAliases: +{{ toYaml .Values.server.hostAliases | indent 8 }} + {{- end }} + {{- if .Values.server.dnsConfig }} + dnsConfig: +{{ toYaml .Values.server.dnsConfig | indent 8 }} + {{- end }} + {{- with .Values.server.securityContext }} + securityContext: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.server.tolerations }} + tolerations: +{{ toYaml .Values.server.tolerations | indent 8 }} + {{- end }} + {{- if .Values.server.affinity }} + affinity: +{{ toYaml .Values.server.affinity | indent 8 }} + {{- end }} + {{- with .Values.server.topologySpreadConstraints }} + topologySpreadConstraints: + {{- toYaml . | nindent 8 }} + {{- end }} + terminationGracePeriodSeconds: {{ .Values.server.terminationGracePeriodSeconds }} + volumes: + - name: config-volume + {{- if empty .Values.server.configFromSecret }} + configMap: + name: {{ if .Values.server.configMapOverrideName }}{{ .Release.Name }}-{{ .Values.server.configMapOverrideName }}{{- else }}{{ template "prometheus.server.fullname" . }}{{- end }} + {{- else }} + secret: + secretName: {{ .Values.server.configFromSecret }} + {{- end }} + {{- range .Values.server.extraHostPathMounts }} + - name: {{ .name }} + hostPath: + path: {{ .hostPath }} + {{- end }} + {{- range .Values.configmapReload.prometheus.extraConfigmapMounts }} + - name: {{ $.Values.configmapReload.prometheus.name }}-{{ .name }} + configMap: + name: {{ .configMap }} + {{- end }} + {{- range .Values.server.extraConfigmapMounts }} + - name: {{ $.Values.server.name }}-{{ .name }} + configMap: + name: {{ .configMap }} + {{- end }} + {{- range .Values.server.extraSecretMounts }} + - name: {{ .name }} + secret: + secretName: {{ .secretName }} + {{- with .optional }} + optional: {{ . }} + {{- end }} + {{- end }} + {{- range .Values.configmapReload.prometheus.extraConfigmapMounts }} + - name: {{ .name }} + configMap: + name: {{ .configMap }} + {{- with .optional }} + optional: {{ . }} + {{- end }} + {{- end }} +{{- if .Values.server.extraVolumes }} +{{ toYaml .Values.server.extraVolumes | indent 8}} +{{- end }} + - name: storage-volume + {{- if .Values.server.persistentVolume.enabled }} + persistentVolumeClaim: + claimName: {{ if .Values.server.persistentVolume.existingClaim }}{{ .Values.server.persistentVolume.existingClaim }}{{- else }}{{ template "prometheus.server.fullname" . }}{{- end }} + {{- else }} + emptyDir: + {{- if .Values.server.emptyDir.sizeLimit }} + sizeLimit: {{ .Values.server.emptyDir.sizeLimit }} + {{- else }} + {} + {{- end -}} + {{- end -}} +{{- end -}} diff --git a/operations/deployment/helm/prometheus/templates/extra-manifests.yaml b/operations/deployment/helm/prometheus/templates/extra-manifests.yaml new file mode 100644 index 0000000..2b21b71 --- /dev/null +++ b/operations/deployment/helm/prometheus/templates/extra-manifests.yaml @@ -0,0 +1,4 @@ +{{ range .Values.extraManifests }} +--- +{{ tpl . $ }} +{{ end }} diff --git a/operations/deployment/helm/prometheus/templates/headless-svc.yaml b/operations/deployment/helm/prometheus/templates/headless-svc.yaml new file mode 100644 index 0000000..df9db99 --- /dev/null +++ b/operations/deployment/helm/prometheus/templates/headless-svc.yaml @@ -0,0 +1,35 @@ +{{- if .Values.server.statefulSet.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.server.statefulSet.headless.annotations }} + annotations: +{{ toYaml .Values.server.statefulSet.headless.annotations | indent 4 }} +{{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} +{{- if .Values.server.statefulSet.headless.labels }} +{{ toYaml .Values.server.statefulSet.headless.labels | indent 4 }} +{{- end }} + name: {{ template "prometheus.server.fullname" . }}-headless + namespace: {{ include "prometheus.namespace" . }} +spec: + clusterIP: None + ports: + - name: http + port: {{ .Values.server.statefulSet.headless.servicePort }} + protocol: TCP + targetPort: 9090 + {{- if .Values.server.statefulSet.headless.gRPC.enabled }} + - name: grpc + port: {{ .Values.server.statefulSet.headless.gRPC.servicePort }} + protocol: TCP + targetPort: 10901 + {{- if .Values.server.statefulSet.headless.gRPC.nodePort }} + nodePort: {{ .Values.server.statefulSet.headless.gRPC.nodePort }} + {{- end }} + {{- end }} + + selector: + {{- include "prometheus.server.matchLabels" . | nindent 4 }} +{{- end -}} diff --git a/operations/deployment/helm/prometheus/templates/ingress.yaml b/operations/deployment/helm/prometheus/templates/ingress.yaml new file mode 100644 index 0000000..84341a9 --- /dev/null +++ b/operations/deployment/helm/prometheus/templates/ingress.yaml @@ -0,0 +1,57 @@ +{{- if .Values.server.ingress.enabled -}} +{{- $ingressApiIsStable := eq (include "ingress.isStable" .) "true" -}} +{{- $ingressSupportsIngressClassName := eq (include "ingress.supportsIngressClassName" .) "true" -}} +{{- $ingressSupportsPathType := eq (include "ingress.supportsPathType" .) "true" -}} +{{- $releaseName := .Release.Name -}} +{{- $serviceName := include "prometheus.server.fullname" . }} +{{- $servicePort := .Values.server.ingress.servicePort | default .Values.server.service.servicePort -}} +{{- $ingressPath := .Values.server.ingress.path -}} +{{- $ingressPathType := .Values.server.ingress.pathType -}} +{{- $extraPaths := .Values.server.ingress.extraPaths -}} +apiVersion: {{ template "ingress.apiVersion" . }} +kind: Ingress +metadata: +{{- if .Values.server.ingress.annotations }} + annotations: +{{ toYaml .Values.server.ingress.annotations | indent 4 }} +{{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} +{{- range $key, $value := .Values.server.ingress.extraLabels }} + {{ $key }}: {{ $value }} +{{- end }} + name: {{ template "prometheus.server.fullname" . }} + namespace: {{ include "prometheus.namespace" . }} +spec: + {{- if and $ingressSupportsIngressClassName .Values.server.ingress.ingressClassName }} + ingressClassName: {{ .Values.server.ingress.ingressClassName }} + {{- end }} + rules: + {{- range .Values.server.ingress.hosts }} + {{- $url := splitList "/" . }} + - host: {{ first $url }} + http: + paths: +{{ if $extraPaths }} +{{ toYaml $extraPaths | indent 10 }} +{{- end }} + - path: {{ $ingressPath }} + {{- if $ingressSupportsPathType }} + pathType: {{ $ingressPathType }} + {{- end }} + backend: + {{- if $ingressApiIsStable }} + service: + name: {{ $serviceName }} + port: + number: {{ $servicePort }} + {{- else }} + serviceName: {{ $serviceName }} + servicePort: {{ $servicePort }} + {{- end }} + {{- end -}} +{{- if .Values.server.ingress.tls }} + tls: +{{ toYaml .Values.server.ingress.tls | indent 4 }} + {{- end -}} +{{- end -}} diff --git a/operations/deployment/helm/prometheus/templates/network-policy.yaml b/operations/deployment/helm/prometheus/templates/network-policy.yaml new file mode 100644 index 0000000..3254ffc --- /dev/null +++ b/operations/deployment/helm/prometheus/templates/network-policy.yaml @@ -0,0 +1,16 @@ +{{- if .Values.networkPolicy.enabled }} +apiVersion: {{ template "prometheus.networkPolicy.apiVersion" . }} +kind: NetworkPolicy +metadata: + name: {{ template "prometheus.server.fullname" . }} + namespace: {{ include "prometheus.namespace" . }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} +spec: + podSelector: + matchLabels: + {{- include "prometheus.server.matchLabels" . | nindent 6 }} + ingress: + - ports: + - port: 9090 +{{- end }} diff --git a/operations/deployment/helm/prometheus/templates/pdb.yaml b/operations/deployment/helm/prometheus/templates/pdb.yaml new file mode 100644 index 0000000..7ffe673 --- /dev/null +++ b/operations/deployment/helm/prometheus/templates/pdb.yaml @@ -0,0 +1,15 @@ +{{- if .Values.server.podDisruptionBudget.enabled }} +{{- $pdbSpec := omit .Values.server.podDisruptionBudget "enabled" }} +apiVersion: {{ template "prometheus.podDisruptionBudget.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ template "prometheus.server.fullname" . }} + namespace: {{ include "prometheus.namespace" . }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} +spec: + selector: + matchLabels: + {{- include "prometheus.server.matchLabels" . | nindent 6 }} + {{- toYaml $pdbSpec | nindent 2 }} +{{- end }} diff --git a/operations/deployment/helm/prometheus/templates/psp.yaml b/operations/deployment/helm/prometheus/templates/psp.yaml new file mode 100644 index 0000000..5776e25 --- /dev/null +++ b/operations/deployment/helm/prometheus/templates/psp.yaml @@ -0,0 +1,53 @@ +{{- if and .Values.rbac.create .Values.podSecurityPolicy.enabled }} +{{- if .Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy" }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ template "prometheus.server.fullname" . }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + {{- with .Values.server.podSecurityPolicy.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + privileged: false + allowPrivilegeEscalation: false + allowedCapabilities: + - 'CHOWN' + volumes: + - 'configMap' + - 'persistentVolumeClaim' + - 'emptyDir' + - 'secret' + - 'hostPath' + allowedHostPaths: + - pathPrefix: /etc + readOnly: true + - pathPrefix: {{ .Values.server.persistentVolume.mountPath }} + {{- range .Values.server.extraHostPathMounts }} + - pathPrefix: {{ .hostPath }} + readOnly: {{ .readOnly }} + {{- end }} + hostNetwork: false + hostPID: false + hostIPC: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +{{- end }} +{{- end }} diff --git a/operations/deployment/helm/prometheus/templates/pvc.yaml b/operations/deployment/helm/prometheus/templates/pvc.yaml new file mode 100644 index 0000000..a9dc4fc --- /dev/null +++ b/operations/deployment/helm/prometheus/templates/pvc.yaml @@ -0,0 +1,43 @@ +{{- if not .Values.server.statefulSet.enabled -}} +{{- if .Values.server.persistentVolume.enabled -}} +{{- if not .Values.server.persistentVolume.existingClaim -}} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + {{- if .Values.server.persistentVolume.annotations }} + annotations: +{{ toYaml .Values.server.persistentVolume.annotations | indent 4 }} + {{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + {{- with .Values.server.persistentVolume.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ template "prometheus.server.fullname" . }} + namespace: {{ include "prometheus.namespace" . }} +spec: + accessModes: +{{ toYaml .Values.server.persistentVolume.accessModes | indent 4 }} +{{- if .Values.server.persistentVolume.storageClass }} +{{- if (eq "-" .Values.server.persistentVolume.storageClass) }} + storageClassName: "" +{{- else }} + storageClassName: "{{ .Values.server.persistentVolume.storageClass }}" +{{- end }} +{{- end }} +{{- if .Values.server.persistentVolume.volumeBindingMode }} + volumeBindingMode: "{{ .Values.server.persistentVolume.volumeBindingMode }}" +{{- end }} + resources: + requests: + storage: "{{ .Values.server.persistentVolume.size }}" +{{- if .Values.server.persistentVolume.selector }} + selector: + {{- toYaml .Values.server.persistentVolume.selector | nindent 4 }} +{{- end -}} +{{- if .Values.server.persistentVolume.volumeName }} + volumeName: "{{ .Values.server.persistentVolume.volumeName }}" +{{- end -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/operations/deployment/helm/prometheus/templates/rolebinding.yaml b/operations/deployment/helm/prometheus/templates/rolebinding.yaml new file mode 100644 index 0000000..721b388 --- /dev/null +++ b/operations/deployment/helm/prometheus/templates/rolebinding.yaml @@ -0,0 +1,18 @@ +{{- range include "prometheus.namespaces" . | fromJsonArray }} +--- +apiVersion: {{ template "rbac.apiVersion" $ }} +kind: RoleBinding +metadata: + labels: + {{- include "prometheus.server.labels" $ | nindent 4 }} + name: {{ template "prometheus.server.fullname" $ }} + namespace: {{ . }} +subjects: + - kind: ServiceAccount + name: {{ template "prometheus.serviceAccountName.server" $ }} + namespace: {{ include "prometheus.namespace" $ }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: ClusterRole + name: {{ $.Values.server.useExistingClusterRoleName }} +{{ end -}} diff --git a/operations/deployment/helm/prometheus/templates/service.yaml b/operations/deployment/helm/prometheus/templates/service.yaml new file mode 100644 index 0000000..069f327 --- /dev/null +++ b/operations/deployment/helm/prometheus/templates/service.yaml @@ -0,0 +1,63 @@ +{{- if .Values.server.service.enabled -}} +apiVersion: v1 +kind: Service +metadata: +{{- if .Values.server.service.annotations }} + annotations: +{{ toYaml .Values.server.service.annotations | indent 4 }} +{{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} +{{- if .Values.server.service.labels }} +{{ toYaml .Values.server.service.labels | indent 4 }} +{{- end }} + name: {{ template "prometheus.server.fullname" . }} + namespace: {{ include "prometheus.namespace" . }} +spec: +{{- if .Values.server.service.clusterIP }} + clusterIP: {{ .Values.server.service.clusterIP }} +{{- end }} +{{- if .Values.server.service.externalIPs }} + externalIPs: +{{ toYaml .Values.server.service.externalIPs | indent 4 }} +{{- end }} +{{- if .Values.server.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.server.service.loadBalancerIP }} +{{- end }} +{{- if .Values.server.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- range $cidr := .Values.server.service.loadBalancerSourceRanges }} + - {{ $cidr }} + {{- end }} +{{- end }} + ports: + - name: http + port: {{ .Values.server.service.servicePort }} + protocol: TCP + targetPort: 9090 + {{- if .Values.server.service.nodePort }} + nodePort: {{ .Values.server.service.nodePort }} + {{- end }} + {{- if .Values.server.service.gRPC.enabled }} + - name: grpc + port: {{ .Values.server.service.gRPC.servicePort }} + protocol: TCP + targetPort: 10901 + {{- if .Values.server.service.gRPC.nodePort }} + nodePort: {{ .Values.server.service.gRPC.nodePort }} + {{- end }} + {{- end }} +{{- if .Values.server.service.additionalPorts }} +{{ toYaml .Values.server.service.additionalPorts | indent 4 }} +{{- end }} + selector: + {{- if and .Values.server.statefulSet.enabled .Values.server.service.statefulsetReplica.enabled }} + statefulset.kubernetes.io/pod-name: {{ template "prometheus.server.fullname" . }}-{{ .Values.server.service.statefulsetReplica.replica }} + {{- else -}} + {{- include "prometheus.server.matchLabels" . | nindent 4 }} +{{- if .Values.server.service.sessionAffinity }} + sessionAffinity: {{ .Values.server.service.sessionAffinity }} +{{- end }} + {{- end }} + type: "{{ .Values.server.service.type }}" +{{- end -}} diff --git a/operations/deployment/helm/prometheus/templates/serviceaccount.yaml b/operations/deployment/helm/prometheus/templates/serviceaccount.yaml new file mode 100644 index 0000000..6d5ab0c --- /dev/null +++ b/operations/deployment/helm/prometheus/templates/serviceaccount.yaml @@ -0,0 +1,16 @@ +{{- if .Values.serviceAccounts.server.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + name: {{ template "prometheus.serviceAccountName.server" . }} + namespace: {{ include "prometheus.namespace" . }} + annotations: +{{ toYaml .Values.serviceAccounts.server.annotations | indent 4 }} +{{- if kindIs "bool" .Values.server.automountServiceAccountToken }} +automountServiceAccountToken: {{ .Values.server.automountServiceAccountToken }} +{{- else if kindIs "bool" .Values.serviceAccounts.server.automountServiceAccountToken }} +automountServiceAccountToken: {{ .Values.serviceAccounts.server.automountServiceAccountToken }} +{{- end }} +{{- end }} diff --git a/operations/deployment/helm/prometheus/templates/sts.yaml b/operations/deployment/helm/prometheus/templates/sts.yaml new file mode 100644 index 0000000..63851c4 --- /dev/null +++ b/operations/deployment/helm/prometheus/templates/sts.yaml @@ -0,0 +1,385 @@ +{{- if .Values.server.statefulSet.enabled -}} +apiVersion: apps/v1 +kind: StatefulSet +metadata: +{{- if .Values.server.statefulSet.annotations }} + annotations: + {{ toYaml .Values.server.statefulSet.annotations | nindent 4 }} +{{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} + {{- if .Values.server.statefulSet.labels}} + {{ toYaml .Values.server.statefulSet.labels | nindent 4 }} + {{- end}} + name: {{ template "prometheus.server.fullname" . }} + namespace: {{ include "prometheus.namespace" . }} +spec: + {{- if semverCompare ">= 1.27.x" (include "prometheus.kubeVersion" .) }} + persistentVolumeClaimRetentionPolicy: + whenDeleted: {{ ternary "Delete" "Retain" .Values.server.statefulSet.pvcDeleteOnStsDelete }} + whenScaled: {{ ternary "Delete" "Retain" .Values.server.statefulSet.pvcDeleteOnStsScale }} + {{- end }} + serviceName: {{ template "prometheus.server.fullname" . }}-headless + selector: + matchLabels: + {{- include "prometheus.server.matchLabels" . | nindent 6 }} + replicas: {{ .Values.server.replicaCount }} + revisionHistoryLimit: {{ .Values.server.revisionHistoryLimit }} + podManagementPolicy: {{ .Values.server.statefulSet.podManagementPolicy }} + template: + metadata: + {{- if .Values.server.podAnnotations }} + annotations: + {{ toYaml .Values.server.podAnnotations | nindent 8 }} + {{- end }} + labels: + {{- include "prometheus.server.labels" . | nindent 8 }} + {{- if .Values.server.podLabels}} + {{ toYaml .Values.server.podLabels | nindent 8 }} + {{- end}} + spec: +{{- if .Values.server.priorityClassName }} + priorityClassName: "{{ .Values.server.priorityClassName }}" +{{- end }} +{{- if .Values.server.schedulerName }} + schedulerName: "{{ .Values.server.schedulerName }}" +{{- end }} +{{- if semverCompare ">=1.13-0" .Capabilities.KubeVersion.GitVersion }} + {{- if or (.Values.server.enableServiceLinks) (eq (.Values.server.enableServiceLinks | toString) "") }} + enableServiceLinks: true + {{- else }} + enableServiceLinks: false + {{- end }} +{{- end }} + serviceAccountName: {{ template "prometheus.serviceAccountName.server" . }} +{{- if kindIs "bool" .Values.server.automountServiceAccountToken }} + automountServiceAccountToken: {{ .Values.server.automountServiceAccountToken }} +{{- end }} + {{- if .Values.server.extraInitContainers }} + initContainers: +{{ toYaml .Values.server.extraInitContainers | indent 8 }} + {{- end }} + containers: + {{- if .Values.configmapReload.prometheus.enabled }} + - name: {{ template "prometheus.name" . }}-{{ .Values.server.name }}-{{ .Values.configmapReload.prometheus.name }} + {{- if .Values.configmapReload.prometheus.image.digest }} + image: "{{ .Values.configmapReload.prometheus.image.repository }}@{{ .Values.configmapReload.prometheus.image.digest }}" + {{- else }} + image: "{{ .Values.configmapReload.prometheus.image.repository }}:{{ .Values.configmapReload.prometheus.image.tag }}" + {{- end }} + imagePullPolicy: "{{ .Values.configmapReload.prometheus.image.pullPolicy }}" + {{- with .Values.configmapReload.prometheus.containerSecurityContext }} + securityContext: + {{- toYaml . | nindent 12 }} + {{- end }} + args: + - --watched-dir=/etc/config + {{- $default_url := "http://127.0.0.1:9090/-/reload" }} + {{- with .Values.server.prefixURL }} + {{- $default_url = printf "http://127.0.0.1:9090%s/-/reload" . }} + {{- end }} + - --reload-url={{ default $default_url .Values.configmapReload.reloadUrl }} + {{- range $key, $value := .Values.configmapReload.prometheus.extraArgs }} + - --{{ $key }}={{ $value }} + {{- end }} + {{- range .Values.configmapReload.prometheus.extraVolumeDirs }} + - --watched-dir={{ . }} + {{- end }} + {{- with .Values.configmapReload.env }} + env: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- if .Values.configmapReload.prometheus.containerPort }} + ports: + - containerPort: {{ .Values.configmapReload.prometheus.containerPort }} + {{- end }} + {{- with .Values.configmapReload.prometheus.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + volumeMounts: + - name: config-volume + mountPath: /etc/config + readOnly: true + {{- range .Values.configmapReload.prometheus.extraConfigmapMounts }} + - name: {{ $.Values.configmapReload.prometheus.name }}-{{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- end }} + + - name: {{ template "prometheus.name" . }}-{{ .Values.server.name }} + {{- if .Values.server.image.digest }} + image: "{{ .Values.server.image.repository }}@{{ .Values.server.image.digest }}" + {{- else }} + image: "{{ .Values.server.image.repository }}:{{ .Values.server.image.tag | default .Chart.AppVersion }}" + {{- end }} + imagePullPolicy: "{{ .Values.server.image.pullPolicy }}" + {{- with .Values.server.command }} + command: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- if .Values.server.env }} + env: +{{ toYaml .Values.server.env | indent 12}} + {{- end }} + args: + {{- if .Values.server.defaultFlagsOverride }} + {{ toYaml .Values.server.defaultFlagsOverride | nindent 12}} + {{- else }} + {{- if .Values.server.prefixURL }} + - --web.route-prefix={{ .Values.server.prefixURL }} + {{- end }} + {{- if .Values.server.retention }} + - --storage.tsdb.retention.time={{ .Values.server.retention }} + {{- end }} + {{- if .Values.server.retentionSize }} + - --storage.tsdb.retention.size={{ .Values.server.retentionSize }} + {{- end }} + - --config.file={{ .Values.server.configPath }} + {{- if .Values.server.storagePath }} + - --storage.tsdb.path={{ .Values.server.storagePath }} + {{- else }} + - --storage.tsdb.path={{ .Values.server.persistentVolume.mountPath }} + {{- end }} + - --web.console.libraries=/etc/prometheus/console_libraries + - --web.console.templates=/etc/prometheus/consoles + {{- range .Values.server.extraFlags }} + - --{{ . }} + {{- end }} + {{- range $key, $value := .Values.server.extraArgs }} + - --{{ $key }}={{ $value }} + {{- end }} + {{- if .Values.server.baseURL }} + - --web.external-url={{ .Values.server.baseURL }} + {{- end }} + {{- end }} + ports: + - containerPort: 9090 + {{- if .Values.server.portName }} + name: {{ .Values.server.portName }} + {{- end }} + {{- if .Values.server.hostPort }} + hostPort: {{ .Values.server.hostPort }} + {{- end }} + readinessProbe: + {{- if not .Values.server.tcpSocketProbeEnabled }} + httpGet: + path: {{ .Values.server.prefixURL }}/-/ready + port: 9090 + scheme: {{ .Values.server.probeScheme }} + {{- with .Values.server.probeHeaders }} + httpHeaders: +{{- toYaml . | nindent 14 }} + {{- end }} + {{- else }} + tcpSocket: + port: 9090 + {{- end }} + initialDelaySeconds: {{ .Values.server.readinessProbeInitialDelay }} + periodSeconds: {{ .Values.server.readinessProbePeriodSeconds }} + timeoutSeconds: {{ .Values.server.readinessProbeTimeout }} + failureThreshold: {{ .Values.server.readinessProbeFailureThreshold }} + successThreshold: {{ .Values.server.readinessProbeSuccessThreshold }} + livenessProbe: + {{- if not .Values.server.tcpSocketProbeEnabled }} + httpGet: + path: {{ .Values.server.prefixURL }}/-/healthy + port: 9090 + scheme: {{ .Values.server.probeScheme }} + {{- with .Values.server.probeHeaders }} + httpHeaders: +{{- toYaml . | nindent 14 }} + {{- end }} + {{- else }} + tcpSocket: + port: 9090 + {{- end }} + initialDelaySeconds: {{ .Values.server.livenessProbeInitialDelay }} + periodSeconds: {{ .Values.server.livenessProbePeriodSeconds }} + timeoutSeconds: {{ .Values.server.livenessProbeTimeout }} + failureThreshold: {{ .Values.server.livenessProbeFailureThreshold }} + successThreshold: {{ .Values.server.livenessProbeSuccessThreshold }} + {{- if .Values.server.startupProbe.enabled }} + startupProbe: + {{- if not .Values.server.tcpSocketProbeEnabled }} + httpGet: + path: {{ .Values.server.prefixURL }}/-/healthy + port: 9090 + scheme: {{ .Values.server.probeScheme }} + {{- if .Values.server.probeHeaders }} + httpHeaders: + {{- range .Values.server.probeHeaders}} + - name: {{ .name }} + value: {{ .value }} + {{- end }} + {{- end }} + {{- else }} + tcpSocket: + port: 9090 + {{- end }} + failureThreshold: {{ .Values.server.startupProbe.failureThreshold }} + periodSeconds: {{ .Values.server.startupProbe.periodSeconds }} + timeoutSeconds: {{ .Values.server.startupProbe.timeoutSeconds }} + {{- end }} + {{- with .Values.server.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + volumeMounts: + - name: config-volume + mountPath: /etc/config + - name: {{ ternary .Values.server.persistentVolume.statefulSetNameOverride "storage-volume" (and .Values.server.persistentVolume.enabled (not (empty .Values.server.persistentVolume.statefulSetNameOverride))) }} + mountPath: {{ .Values.server.persistentVolume.mountPath }} + subPath: "{{ .Values.server.persistentVolume.subPath }}" + {{- range .Values.server.extraHostPathMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- range .Values.server.extraConfigmapMounts }} + - name: {{ $.Values.server.name }}-{{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- range .Values.server.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath }} + readOnly: {{ .readOnly }} + {{- end }} + {{- if .Values.server.extraVolumeMounts }} + {{ toYaml .Values.server.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- with .Values.server.containerSecurityContext }} + securityContext: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- if .Values.server.sidecarContainers }} + {{- range $name, $spec := .Values.server.sidecarContainers }} + - name: {{ $name }} + {{- if kindIs "string" $spec }} + {{- tpl $spec $ | nindent 10 }} + {{- else }} + {{- toYaml $spec | nindent 10 }} + {{- end }} + {{- end }} + {{- end }} + hostNetwork: {{ .Values.server.hostNetwork }} + {{- if .Values.server.dnsPolicy }} + dnsPolicy: {{ .Values.server.dnsPolicy }} + {{- end }} + {{- if .Values.imagePullSecrets }} + imagePullSecrets: +{{ toYaml .Values.imagePullSecrets | indent 8 }} + {{- end }} + {{- if .Values.server.nodeSelector }} + nodeSelector: +{{ toYaml .Values.server.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.server.hostAliases }} + hostAliases: +{{ toYaml .Values.server.hostAliases | indent 8 }} + {{- end }} + {{- if .Values.server.dnsConfig }} + dnsConfig: +{{ toYaml .Values.server.dnsConfig | indent 8 }} + {{- end }} + {{- with .Values.server.securityContext }} + securityContext: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- if .Values.server.tolerations }} + tolerations: +{{ toYaml .Values.server.tolerations | indent 8 }} + {{- end }} + {{- if .Values.server.affinity }} + affinity: +{{ toYaml .Values.server.affinity | indent 8 }} + {{- end }} + {{- with .Values.server.topologySpreadConstraints }} + topologySpreadConstraints: + {{- toYaml . | nindent 8 }} + {{- end }} + terminationGracePeriodSeconds: {{ .Values.server.terminationGracePeriodSeconds }} + volumes: + - name: config-volume + {{- if empty .Values.server.configFromSecret }} + configMap: + name: {{ if .Values.server.configMapOverrideName }}{{ .Release.Name }}-{{ .Values.server.configMapOverrideName }}{{- else }}{{ template "prometheus.server.fullname" . }}{{- end }} + {{- else }} + secret: + secretName: {{ .Values.server.configFromSecret }} + {{- end }} + {{- range .Values.server.extraHostPathMounts }} + - name: {{ .name }} + hostPath: + path: {{ .hostPath }} + {{- end }} + {{- range .Values.configmapReload.prometheus.extraConfigmapMounts }} + - name: {{ $.Values.configmapReload.prometheus.name }}-{{ .name }} + configMap: + name: {{ .configMap }} + {{- end }} + {{- range .Values.server.extraConfigmapMounts }} + - name: {{ $.Values.server.name }}-{{ .name }} + configMap: + name: {{ .configMap }} + {{- end }} + {{- range .Values.server.extraSecretMounts }} + - name: {{ .name }} + secret: + secretName: {{ .secretName }} + {{- with .optional }} + optional: {{ . }} + {{- end }} + {{- end }} + {{- range .Values.configmapReload.prometheus.extraConfigmapMounts }} + - name: {{ .name }} + configMap: + name: {{ .configMap }} + {{- with .optional }} + optional: {{ . }} + {{- end }} + {{- end }} +{{- if .Values.server.extraVolumes }} +{{ toYaml .Values.server.extraVolumes | indent 8}} +{{- end }} +{{- if .Values.server.persistentVolume.enabled }} + volumeClaimTemplates: + - metadata: + name: {{ .Values.server.persistentVolume.statefulSetNameOverride | default "storage-volume" }} + {{- if .Values.server.persistentVolume.annotations }} + annotations: +{{ toYaml .Values.server.persistentVolume.annotations | indent 10 }} + {{- end }} + {{- if .Values.server.persistentVolume.labels }} + labels: +{{ toYaml .Values.server.persistentVolume.labels | indent 10 }} + {{- end }} + spec: + accessModes: +{{ toYaml .Values.server.persistentVolume.accessModes | indent 10 }} + resources: + requests: + storage: "{{ .Values.server.persistentVolume.size }}" + {{- if .Values.server.persistentVolume.storageClass }} + {{- if (eq "-" .Values.server.persistentVolume.storageClass) }} + storageClassName: "" + {{- else }} + storageClassName: "{{ .Values.server.persistentVolume.storageClass }}" + {{- end }} + {{- end }} +{{- else }} + - name: storage-volume + emptyDir: + {{- if .Values.server.emptyDir.sizeLimit }} + sizeLimit: {{ .Values.server.emptyDir.sizeLimit }} + {{- else }} + {} + {{- end -}} +{{- end }} +{{- end }} diff --git a/operations/deployment/helm/prometheus/templates/vpa.yaml b/operations/deployment/helm/prometheus/templates/vpa.yaml new file mode 100644 index 0000000..cd07ad8 --- /dev/null +++ b/operations/deployment/helm/prometheus/templates/vpa.yaml @@ -0,0 +1,26 @@ +{{- if .Values.server.verticalAutoscaler.enabled -}} +{{- if .Capabilities.APIVersions.Has "autoscaling.k8s.io/v1/VerticalPodAutoscaler" }} +apiVersion: autoscaling.k8s.io/v1 +{{- else }} +apiVersion: autoscaling.k8s.io/v1beta2 +{{- end }} +kind: VerticalPodAutoscaler +metadata: + name: {{ template "prometheus.server.fullname" . }}-vpa + namespace: {{ include "prometheus.namespace" . }} + labels: + {{- include "prometheus.server.labels" . | nindent 4 }} +spec: + targetRef: + apiVersion: "apps/v1" +{{- if .Values.server.statefulSet.enabled }} + kind: StatefulSet +{{- else }} + kind: Deployment +{{- end }} + name: {{ template "prometheus.server.fullname" . }} + updatePolicy: + updateMode: {{ .Values.server.verticalAutoscaler.updateMode | default "Off" | quote }} + resourcePolicy: + containerPolicies: {{ .Values.server.verticalAutoscaler.containerPolicies | default list | toYaml | trim | nindent 4 }} +{{- end -}} diff --git a/operations/deployment/helm/prometheus/values.schema.json b/operations/deployment/helm/prometheus/values.schema.json new file mode 100644 index 0000000..1828064 --- /dev/null +++ b/operations/deployment/helm/prometheus/values.schema.json @@ -0,0 +1,738 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "alertRelabelConfigs": { + "type": "object" + }, + "alertmanager": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "persistence": { + "type": "object", + "properties": { + "size": { + "type": "string" + } + } + }, + "podSecurityContext": { + "type": "object", + "properties": { + "fsGroup": { + "type": "integer" + }, + "runAsGroup": { + "type": "integer" + }, + "runAsNonRoot": { + "type": "boolean" + }, + "runAsUser": { + "type": "integer" + } + } + } + } + }, + "configmapReload": { + "type": "object", + "properties": { + "env": { + "type": "array" + }, + "prometheus": { + "type": "object", + "properties": { + "containerSecurityContext": { + "type": "object" + }, + "enabled": { + "type": "boolean" + }, + "extraArgs": { + "type": "object" + }, + "extraConfigmapMounts": { + "type": "array" + }, + "extraVolumeDirs": { + "type": "array" + }, + "extraVolumeMounts": { + "type": "array" + }, + "image": { + "type": "object", + "properties": { + "digest": { + "type": "string" + }, + "pullPolicy": { + "type": "string" + }, + "repository": { + "type": "string" + }, + "tag": { + "type": "string" + } + } + }, + "name": { + "type": "string" + }, + "resources": { + "type": "object" + } + } + }, + "reloadUrl": { + "type": "string" + } + } + }, + "extraManifests": { + "type": "array" + }, + "extraScrapeConfigs": { + "type": "string" + }, + "forceNamespace": { + "type": "string" + }, + "imagePullSecrets": { + "type": "array" + }, + "kube-state-metrics": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + } + } + }, + "networkPolicy": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + } + } + }, + "podSecurityPolicy": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + } + } + }, + "prometheus-node-exporter": { + "type": "object", + "properties": { + "containerSecurityContext": { + "type": "object", + "properties": { + "allowPrivilegeEscalation": { + "type": "boolean" + } + } + }, + "enabled": { + "type": "boolean" + }, + "rbac": { + "type": "object", + "properties": { + "pspEnabled": { + "type": "boolean" + } + } + } + } + }, + "prometheus-pushgateway": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "serviceAnnotations": { + "type": "object", + "properties": { + "prometheus.io/probe": { + "type": "string" + } + } + } + } + }, + "rbac": { + "type": "object", + "properties": { + "create": { + "type": "boolean" + } + } + }, + "ruleFiles": { + "type": "object" + }, + "server": { + "type": "object", + "properties": { + "affinity": { + "type": "object" + }, + "alertmanagers": { + "type": "array" + }, + "baseURL": { + "type": "string" + }, + "clusterRoleNameOverride": { + "type": "string" + }, + "command": { + "type": "array" + }, + "configMapOverrideName": { + "type": "string" + }, + "configPath": { + "type": "string" + }, + "containerSecurityContext": { + "type": "object" + }, + "defaultFlagsOverride": { + "type": "array" + }, + "deploymentAnnotations": { + "type": "object" + }, + "dnsConfig": { + "type": "object" + }, + "dnsPolicy": { + "type": "string" + }, + "emptyDir": { + "type": "object", + "properties": { + "sizeLimit": { + "type": "string" + } + } + }, + "enableServiceLinks": { + "type": "boolean" + }, + "env": { + "type": "array" + }, + "exemplars": { + "type": "object" + }, + "extraArgs": { + "type": "object" + }, + "extraConfigmapLabels": { + "type": "object" + }, + "extraConfigmapMounts": { + "type": "array" + }, + "extraFlags": { + "type": "array", + "items": { + "type": "string" + } + }, + "extraHostPathMounts": { + "type": "array" + }, + "extraInitContainers": { + "type": "array" + }, + "extraSecretMounts": { + "type": "array" + }, + "extraVolumeMounts": { + "type": "array" + }, + "extraVolumes": { + "type": "array" + }, + "global": { + "type": "object", + "properties": { + "evaluation_interval": { + "type": "string" + }, + "scrape_interval": { + "type": "string" + }, + "scrape_timeout": { + "type": "string" + } + } + }, + "hostAliases": { + "type": "array" + }, + "hostNetwork": { + "type": "boolean" + }, + "image": { + "type": "object", + "properties": { + "digest": { + "type": "string" + }, + "pullPolicy": { + "type": "string" + }, + "repository": { + "type": "string" + }, + "tag": { + "type": "string" + } + } + }, + "ingress": { + "type": "object", + "properties": { + "annotations": { + "type": "object" + }, + "enabled": { + "type": "boolean" + }, + "extraLabels": { + "type": "object" + }, + "extraPaths": { + "type": "array" + }, + "hosts": { + "type": "array" + }, + "path": { + "type": "string" + }, + "pathType": { + "type": "string" + }, + "tls": { + "type": "array" + } + } + }, + "livenessProbeFailureThreshold": { + "type": "integer" + }, + "livenessProbeInitialDelay": { + "type": "integer" + }, + "livenessProbePeriodSeconds": { + "type": "integer" + }, + "livenessProbeSuccessThreshold": { + "type": "integer" + }, + "livenessProbeTimeout": { + "type": "integer" + }, + "name": { + "type": "string" + }, + "nodeSelector": { + "type": "object" + }, + "persistentVolume": { + "type": "object", + "properties": { + "accessModes": { + "type": "array", + "items": { + "type": "string" + } + }, + "annotations": { + "type": "object" + }, + "enabled": { + "type": "boolean" + }, + "existingClaim": { + "type": "string" + }, + "labels": { + "type": "object" + }, + "mountPath": { + "type": "string" + }, + "size": { + "type": "string" + }, + "statefulSetNameOverride": { + "type": "string" + }, + "subPath": { + "type": "string" + } + } + }, + "podAnnotations": { + "type": "object" + }, + "podDisruptionBudget": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "maxUnavailable": { + "type": [ + "string", + "integer" + ] + } + } + }, + "podLabels": { + "type": "object" + }, + "podSecurityPolicy": { + "type": "object", + "properties": { + "annotations": { + "type": "object" + } + } + }, + "portName": { + "type": "string" + }, + "prefixURL": { + "type": "string" + }, + "priorityClassName": { + "type": "string" + }, + "probeHeaders": { + "type": "array" + }, + "probeScheme": { + "type": "string" + }, + "readinessProbeFailureThreshold": { + "type": "integer" + }, + "readinessProbeInitialDelay": { + "type": "integer" + }, + "readinessProbePeriodSeconds": { + "type": "integer" + }, + "readinessProbeSuccessThreshold": { + "type": "integer" + }, + "readinessProbeTimeout": { + "type": "integer" + }, + "releaseNamespace": { + "type": "boolean" + }, + "remoteRead": { + "type": "array" + }, + "remoteWrite": { + "type": "array" + }, + "replicaCount": { + "type": "integer" + }, + "resources": { + "type": "object" + }, + "retention": { + "type": "string" + }, + "retentionSize": { + "type": "string" + }, + "revisionHistoryLimit": { + "type": "integer" + }, + "securityContext": { + "type": "object", + "properties": { + "fsGroup": { + "type": "integer" + }, + "runAsGroup": { + "type": "integer" + }, + "runAsNonRoot": { + "type": "boolean" + }, + "runAsUser": { + "type": "integer" + } + } + }, + "service": { + "type": "object", + "properties": { + "additionalPorts": { + "type": "array" + }, + "annotations": { + "type": "object" + }, + "clusterIP": { + "type": "string" + }, + "enabled": { + "type": "boolean" + }, + "externalIPs": { + "type": "array" + }, + "gRPC": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "servicePort": { + "type": "integer" + } + } + }, + "labels": { + "type": "object" + }, + "loadBalancerIP": { + "type": "string" + }, + "loadBalancerSourceRanges": { + "type": "array" + }, + "servicePort": { + "type": "integer" + }, + "sessionAffinity": { + "type": "string" + }, + "statefulsetReplica": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "replica": { + "type": "integer" + } + } + }, + "type": { + "type": "string" + } + } + }, + "sidecarContainers": { + "type": "object" + }, + "sidecarTemplateValues": { + "type": "object" + }, + "startupProbe": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "failureThreshold": { + "type": "integer" + }, + "periodSeconds": { + "type": "integer" + }, + "timeoutSeconds": { + "type": "integer" + } + } + }, + "statefulSet": { + "type": "object", + "properties": { + "annotations": { + "type": "object" + }, + "enabled": { + "type": "boolean" + }, + "headless": { + "type": "object", + "properties": { + "annotations": { + "type": "object" + }, + "gRPC": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + }, + "servicePort": { + "type": "integer" + } + } + }, + "labels": { + "type": "object" + }, + "servicePort": { + "type": "integer" + } + } + }, + "labels": { + "type": "object" + }, + "podManagementPolicy": { + "type": "string" + }, + "pvcDeleteOnStsDelete": { + "type": "boolean" + }, + "pvcDeleteOnStsScale": { + "type": "boolean" + } + } + }, + "storagePath": { + "type": "string" + }, + "strategy": { + "type": "object", + "properties": { + "type": { + "type": "string" + } + } + }, + "tcpSocketProbeEnabled": { + "type": "boolean" + }, + "terminationGracePeriodSeconds": { + "type": "integer" + }, + "tolerations": { + "type": "array" + }, + "topologySpreadConstraints": { + "type": "array" + }, + "tsdb": { + "type": "object" + }, + "verticalAutoscaler": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean" + } + } + } + } + }, + "scrapeConfigFiles": { + "type": "array" + }, + "serverFiles": { + "type": "object", + "properties": { + "alerting_rules.yml": { + "type": "object" + }, + "alerts": { + "type": "object" + }, + "prometheus.yml": { + "type": "object", + "properties": { + "rule_files": { + "type": "array", + "items": { + "type": "string" + } + }, + "scrape_configs": { + "type": "array", + "items": { + "type": "object", + "properties": { + "job_name": { + "type": "string" + }, + "static_configs": { + "type": "array", + "items": { + "type": "object", + "properties": { + "targets": { + "type": "array", + "items": { + "type": "string" + } + } + } + } + } + } + } + } + } + }, + "recording_rules.yml": { + "type": "object" + }, + "rules": { + "type": "object" + } + } + }, + "serviceAccounts": { + "type": "object", + "properties": { + "server": { + "type": "object", + "properties": { + "annotations": { + "type": "object" + }, + "create": { + "type": "boolean" + }, + "name": { + "type": "string" + }, + "automountServiceAccountToken": { + "type": "boolean" + } + } + } + } + } + } +} diff --git a/operations/deployment/helm/prometheus/values.yaml b/operations/deployment/helm/prometheus/values.yaml new file mode 100644 index 0000000..43d0f6a --- /dev/null +++ b/operations/deployment/helm/prometheus/values.yaml @@ -0,0 +1,1253 @@ +# yaml-language-server: $schema=values.schema.json +# Default values for prometheus. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +rbac: + create: true + +podSecurityPolicy: + enabled: false + +imagePullSecrets: [] +# - name: "image-pull-secret" + +## Define serviceAccount names for components. Defaults to component's fully qualified name. +## +serviceAccounts: + server: + create: true + name: "" + annotations: {} + + ## Opt out of automounting Kubernetes API credentials. + ## It will be overriden by server.automountServiceAccountToken value, if set. + # automountServiceAccountToken: false + +## Additional labels to attach to all resources +commonMetaLabels: {} + +## Monitors ConfigMap changes and POSTs to a URL +## Ref: https://github.com/prometheus-operator/prometheus-operator/tree/main/cmd/prometheus-config-reloader +## +configmapReload: + ## URL for configmap-reload to use for reloads + ## + reloadUrl: "" + + ## env sets environment variables to pass to the container. Can be set as name/value pairs, + ## read from secrets or configmaps. + env: [] + # - name: SOMEVAR + # value: somevalue + # - name: PASSWORD + # valueFrom: + # secretKeyRef: + # name: mysecret + # key: password + # optional: false + + prometheus: + ## If false, the configmap-reload container will not be deployed + ## + enabled: true + + ## configmap-reload container name + ## + name: configmap-reload + + ## configmap-reload container image + ## + image: + repository: quay.io/prometheus-operator/prometheus-config-reloader + tag: v0.67.0 + # When digest is set to a non-empty value, images will be pulled by digest (regardless of tag value). + digest: "" + pullPolicy: IfNotPresent + + # containerPort: 9533 + + ## Additional configmap-reload container arguments + ## + extraArgs: {} + + ## Additional configmap-reload volume directories + ## + extraVolumeDirs: [] + + ## Additional configmap-reload volume mounts + ## + extraVolumeMounts: [] + + ## Additional configmap-reload mounts + ## + extraConfigmapMounts: [] + # - name: prometheus-alerts + # mountPath: /etc/alerts.d + # subPath: "" + # configMap: prometheus-alerts + # readOnly: true + + ## Security context to be added to configmap-reload container + containerSecurityContext: {} + + ## configmap-reload resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + +server: + ## Prometheus server container name + ## + name: server + + ## Opt out of automounting Kubernetes API credentials. + ## If set it will override serviceAccounts.server.automountServiceAccountToken value for ServiceAccount. + # automountServiceAccountToken: false + + ## Use a ClusterRole (and ClusterRoleBinding) + ## - If set to false - we define a RoleBinding in the defined namespaces ONLY + ## + ## NB: because we need a Role with nonResourceURL's ("/metrics") - you must get someone with Cluster-admin privileges to define this role for you, before running with this setting enabled. + ## This makes prometheus work - for users who do not have ClusterAdmin privs, but wants prometheus to operate on their own namespaces, instead of clusterwide. + ## + ## You MUST also set namespaces to the ones you have access to and want monitored by Prometheus. + ## + # useExistingClusterRoleName: nameofclusterrole + + ## If set it will override prometheus.server.fullname value for ClusterRole and ClusterRoleBinding + ## + clusterRoleNameOverride: "" + + # Enable only the release namespace for monitoring. By default all namespaces are monitored. + # If releaseNamespace and namespaces are both set a merged list will be monitored. + releaseNamespace: false + + ## namespaces to monitor (instead of monitoring all - clusterwide). Needed if you want to run without Cluster-admin privileges. + # namespaces: + # - yournamespace + + # sidecarContainers - add more containers to prometheus server + # Key/Value where Key is the sidecar `- name: ` + # Example: + # sidecarContainers: + # webserver: + # image: nginx + # OR for adding OAuth authentication to Prometheus + # sidecarContainers: + # oauth-proxy: + # image: quay.io/oauth2-proxy/oauth2-proxy:v7.1.2 + # args: + # - --upstream=http://127.0.0.1:9090 + # - --http-address=0.0.0.0:8081 + # - ... + # ports: + # - containerPort: 8081 + # name: oauth-proxy + # protocol: TCP + # resources: {} + sidecarContainers: {} + + # sidecarTemplateValues - context to be used in template for sidecarContainers + # Example: + # sidecarTemplateValues: *your-custom-globals + # sidecarContainers: + # webserver: |- + # {{ include "webserver-container-template" . }} + # Template for `webserver-container-template` might looks like this: + # image: "{{ .Values.server.sidecarTemplateValues.repository }}:{{ .Values.server.sidecarTemplateValues.tag }}" + # ... + # + sidecarTemplateValues: {} + + ## Prometheus server container image + ## + image: + repository: quay.io/prometheus/prometheus + # if not set appVersion field from Chart.yaml is used + tag: "" + # When digest is set to a non-empty value, images will be pulled by digest (regardless of tag value). + digest: "" + pullPolicy: IfNotPresent + + ## Prometheus server command + ## + command: [] + + ## prometheus server priorityClassName + ## + priorityClassName: "" + + ## EnableServiceLinks indicates whether information about services should be injected + ## into pod's environment variables, matching the syntax of Docker links. + ## WARNING: the field is unsupported and will be skipped in K8s prior to v1.13.0. + ## + enableServiceLinks: true + + ## The URL prefix at which the container can be accessed. Useful in the case the '-web.external-url' includes a slug + ## so that the various internal URLs are still able to access as they are in the default case. + ## (Optional) + prefixURL: "" + + ## External URL which can access prometheus + ## Maybe same with Ingress host name + baseURL: "" + + ## Additional server container environment variables + ## + ## You specify this manually like you would a raw deployment manifest. + ## This means you can bind in environment variables from secrets. + ## + ## e.g. static environment variable: + ## - name: DEMO_GREETING + ## value: "Hello from the environment" + ## + ## e.g. secret environment variable: + ## - name: USERNAME + ## valueFrom: + ## secretKeyRef: + ## name: mysecret + ## key: username + env: [] + + # List of flags to override default parameters, e.g: + # - --enable-feature=agent + # - --storage.agent.retention.max-time=30m + # - --config.file=/etc/config/prometheus.yml + defaultFlagsOverride: [] + + extraFlags: + - web.enable-lifecycle + ## web.enable-admin-api flag controls access to the administrative HTTP API which includes functionality such as + ## deleting time series. This is disabled by default. + # - web.enable-admin-api + ## + ## storage.tsdb.no-lockfile flag controls BD locking + # - storage.tsdb.no-lockfile + ## + ## storage.tsdb.wal-compression flag enables compression of the write-ahead log (WAL) + # - storage.tsdb.wal-compression + + ## Path to a configuration file on prometheus server container FS + configPath: /etc/config/prometheus.yml + + ### The data directory used by prometheus to set --storage.tsdb.path + ### When empty server.persistentVolume.mountPath is used instead + storagePath: "" + + global: + ## How frequently to scrape targets by default + ## + scrape_interval: 1m + ## How long until a scrape request times out + ## + scrape_timeout: 10s + ## How frequently to evaluate rules + ## + evaluation_interval: 1m + ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write + ## + remoteWrite: [] + ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_read + ## + remoteRead: [] + + ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#tsdb + ## + tsdb: {} + # out_of_order_time_window: 0s + + ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#exemplars + ## Must be enabled via --enable-feature=exemplar-storage + ## + exemplars: {} + # max_exemplars: 100000 + + ## Custom HTTP headers for Liveness/Readiness/Startup Probe + ## + ## Useful for providing HTTP Basic Auth to healthchecks + probeHeaders: [] + # - name: "Authorization" + # value: "Bearer ABCDEabcde12345" + + ## Additional Prometheus server container arguments + ## + extraArgs: {} + + ## Additional InitContainers to initialize the pod + ## + extraInitContainers: [] + + ## Additional Prometheus server Volume mounts + ## + extraVolumeMounts: [] + + ## Additional Prometheus server Volumes + ## + extraVolumes: [] + + ## Additional Prometheus server hostPath mounts + ## + extraHostPathMounts: [] + # - name: certs-dir + # mountPath: /etc/kubernetes/certs + # subPath: "" + # hostPath: /etc/kubernetes/certs + # readOnly: true + + extraConfigmapMounts: [] + # - name: certs-configmap + # mountPath: /prometheus + # subPath: "" + # configMap: certs-configmap + # readOnly: true + + ## Additional Prometheus server Secret mounts + # Defines additional mounts with secrets. Secrets must be manually created in the namespace. + extraSecretMounts: [] + # - name: secret-files + # mountPath: /etc/secrets + # subPath: "" + # secretName: prom-secret-files + # readOnly: true + + ## ConfigMap override where fullname is {{.Release.Name}}-{{.Values.server.configMapOverrideName}} + ## Defining configMapOverrideName will cause templates/server-configmap.yaml + ## to NOT generate a ConfigMap resource + ## + configMapOverrideName: "" + + ## Extra labels for Prometheus server ConfigMap (ConfigMap that holds serverFiles) + extraConfigmapLabels: {} + + ingress: + ## If true, Prometheus server Ingress will be created + ## + enabled: false + + # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName + # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress + # ingressClassName: nginx + + ## Prometheus server Ingress annotations + ## + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: 'true' + + ## Prometheus server Ingress additional labels + ## + extraLabels: {} + + ## Redirect ingress to an additional defined port on the service + # servicePort: 8081 + + ## Prometheus server Ingress hostnames with optional path + ## Must be provided if Ingress is enabled + ## + hosts: [] + # - prometheus.domain.com + # - domain.com/prometheus + + path: / + + # pathType is only for k8s >= 1.18 + pathType: Prefix + + ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. + extraPaths: [] + # - path: /* + # backend: + # serviceName: ssl-redirect + # servicePort: use-annotation + + ## Prometheus server Ingress TLS configuration + ## Secrets must be manually created in the namespace + ## + tls: [] + # - secretName: prometheus-server-tls + # hosts: + # - prometheus.domain.com + + ## Server Deployment Strategy type + strategy: + type: Recreate + + ## hostAliases allows adding entries to /etc/hosts inside the containers + hostAliases: [] + # - ip: "127.0.0.1" + # hostnames: + # - "example.com" + + ## Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + ## Node labels for Prometheus server pod assignment + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ + ## + nodeSelector: {} + + ## Pod affinity + ## + affinity: {} + + ## Pod topology spread constraints + ## ref. https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ + topologySpreadConstraints: [] + + ## PodDisruptionBudget settings + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + ## + podDisruptionBudget: + enabled: false + maxUnavailable: 1 + # minAvailable: 1 + ## unhealthyPodEvictionPolicy is available since 1.27.0 (beta) + ## https://kubernetes.io/docs/tasks/run-application/configure-pdb/#unhealthy-pod-eviction-policy + # unhealthyPodEvictionPolicy: IfHealthyBudget + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + persistentVolume: + ## If true, Prometheus server will create/use a Persistent Volume Claim + ## If false, use emptyDir + ## + enabled: true + + ## If set it will override the name of the created persistent volume claim + ## generated by the stateful set. + ## + statefulSetNameOverride: "" + + ## Prometheus server data Persistent Volume access modes + ## Must match those of existing PV or dynamic provisioner + ## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + accessModes: + - ReadWriteOnce + + ## Prometheus server data Persistent Volume labels + ## + labels: {} + + ## Prometheus server data Persistent Volume annotations + ## + annotations: {} + + ## Prometheus server data Persistent Volume existing claim name + ## Requires server.persistentVolume.enabled: true + ## If defined, PVC must be created manually before volume will be bound + existingClaim: "" + + ## Prometheus server data Persistent Volume mount root path + ## + mountPath: /data + + ## Prometheus server data Persistent Volume size + ## + size: 8Gi + + ## Prometheus server data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + + ## Prometheus server data Persistent Volume Binding Mode + ## If defined, volumeBindingMode: + ## If undefined (the default) or set to null, no volumeBindingMode spec is + ## set, choosing the default mode. + ## + # volumeBindingMode: "" + + ## Subdirectory of Prometheus server data Persistent Volume to mount + ## Useful if the volume's root directory is not empty + ## + subPath: "" + + ## Persistent Volume Claim Selector + ## Useful if Persistent Volumes have been provisioned in advance + ## Ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + ## + # selector: + # matchLabels: + # release: "stable" + # matchExpressions: + # - { key: environment, operator: In, values: [ dev ] } + + ## Persistent Volume Name + ## Useful if Persistent Volumes have been provisioned in advance and you want to use a specific one + ## + # volumeName: "" + + emptyDir: + ## Prometheus server emptyDir volume size limit + ## + sizeLimit: "" + + ## Annotations to be added to Prometheus server pods + ## + podAnnotations: {} + # iam.amazonaws.com/role: prometheus + + ## Labels to be added to Prometheus server pods + ## + podLabels: {} + + ## Prometheus AlertManager configuration + ## + alertmanagers: [] + + ## Specify if a Pod Security Policy for node-exporter must be created + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ + ## + podSecurityPolicy: + annotations: {} + ## Specify pod annotations + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl + ## + # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' + # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' + # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + + ## Use a StatefulSet if replicaCount needs to be greater than 1 (see below) + ## + replicaCount: 1 + + ## Number of old history to retain to allow rollback + ## Default Kubernetes value is set to 10 + ## + revisionHistoryLimit: 10 + + ## Annotations to be added to deployment + ## + deploymentAnnotations: {} + + statefulSet: + ## If true, use a statefulset instead of a deployment for pod management. + ## This allows to scale replicas to more than 1 pod + ## + enabled: false + + annotations: {} + labels: {} + podManagementPolicy: OrderedReady + + ## Alertmanager headless service to use for the statefulset + ## + headless: + annotations: {} + labels: {} + servicePort: 80 + ## Enable gRPC port on service to allow auto discovery with thanos-querier + gRPC: + enabled: false + servicePort: 10901 + # nodePort: 10901 + + ## Statefulset's persistent volume claim retention policy + ## pvcDeleteOnStsDelete and pvcDeleteOnStsScale determine whether + ## statefulset's PVCs are deleted (true) or retained (false) on scaling down + ## and deleting statefulset, respectively. Requires 1.27.0+. + ## Ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention + ## + pvcDeleteOnStsDelete: false + pvcDeleteOnStsScale: false + + ## Prometheus server readiness and liveness probe initial delay and timeout + ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ + ## + tcpSocketProbeEnabled: false + probeScheme: HTTP + readinessProbeInitialDelay: 30 + readinessProbePeriodSeconds: 5 + readinessProbeTimeout: 4 + readinessProbeFailureThreshold: 3 + readinessProbeSuccessThreshold: 1 + livenessProbeInitialDelay: 30 + livenessProbePeriodSeconds: 15 + livenessProbeTimeout: 10 + livenessProbeFailureThreshold: 3 + livenessProbeSuccessThreshold: 1 + startupProbe: + enabled: false + periodSeconds: 5 + failureThreshold: 30 + timeoutSeconds: 10 + + ## Prometheus server resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + # limits: + # cpu: 500m + # memory: 512Mi + # requests: + # cpu: 500m + # memory: 512Mi + + # Required for use in managed kubernetes clusters (such as AWS EKS) with custom CNI (such as calico), + # because control-plane managed by AWS cannot communicate with pods' IP CIDR and admission webhooks are not working + ## + hostNetwork: false + + # When hostNetwork is enabled, this will set to ClusterFirstWithHostNet automatically + dnsPolicy: ClusterFirst + + # Use hostPort + # hostPort: 9090 + + # Use portName + portName: "" + + ## Vertical Pod Autoscaler config + ## Ref: https://github.com/kubernetes/autoscaler/tree/master/vertical-pod-autoscaler + verticalAutoscaler: + ## If true a VPA object will be created for the controller (either StatefulSet or Deployemnt, based on above configs) + enabled: false + # updateMode: "Auto" + # containerPolicies: + # - containerName: 'prometheus-server' + + # Custom DNS configuration to be added to prometheus server pods + dnsConfig: {} + # nameservers: + # - 1.2.3.4 + # searches: + # - ns1.svc.cluster-domain.example + # - my.dns.search.suffix + # options: + # - name: ndots + # value: "2" + # - name: edns0 + + ## Security context to be added to server pods + ## + securityContext: + runAsUser: 65534 + runAsNonRoot: true + runAsGroup: 65534 + fsGroup: 65534 + + ## Security context to be added to server container + ## + containerSecurityContext: {} + + service: + ## If false, no Service will be created for the Prometheus server + ## + enabled: true + + annotations: {} + labels: {} + clusterIP: "" + + ## List of IP addresses at which the Prometheus server service is available + ## Ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips + ## + externalIPs: [] + + loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 80 + sessionAffinity: None + type: ClusterIP + + ## Enable gRPC port on service to allow auto discovery with thanos-querier + gRPC: + enabled: false + servicePort: 10901 + # nodePort: 10901 + + ## If using a statefulSet (statefulSet.enabled=true), configure the + ## service to connect to a specific replica to have a consistent view + ## of the data. + statefulsetReplica: + enabled: false + replica: 0 + + ## Additional port to define in the Service + additionalPorts: [] + # additionalPorts: + # - name: authenticated + # port: 8081 + # targetPort: 8081 + + ## Prometheus server pod termination grace period + ## + terminationGracePeriodSeconds: 300 + + ## Prometheus data retention period (default if not specified is 15 days) + ## + retention: "15d" + + ## Prometheus' data retention size. Supported units: B, KB, MB, GB, TB, PB, EB. + ## + retentionSize: "" + +## Prometheus server ConfigMap entries for rule files (allow prometheus labels interpolation) +ruleFiles: {} + +## Prometheus server ConfigMap entries for scrape_config_files +## (allows scrape configs defined in additional files) +## +scrapeConfigFiles: [] + +## Prometheus server ConfigMap entries +## +serverFiles: + ## Alerts configuration + ## Ref: https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/ + alerting_rules.yml: {} + # groups: + # - name: Instances + # rules: + # - alert: InstanceDown + # expr: up == 0 + # for: 5m + # labels: + # severity: page + # annotations: + # description: '{{ $labels.instance }} of job {{ $labels.job }} has been down for more than 5 minutes.' + # summary: 'Instance {{ $labels.instance }} down' + ## DEPRECATED DEFAULT VALUE, unless explicitly naming your files, please use alerting_rules.yml + alerts: {} + + ## Records configuration + ## Ref: https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/ + recording_rules.yml: {} + ## DEPRECATED DEFAULT VALUE, unless explicitly naming your files, please use recording_rules.yml + rules: {} + + prometheus.yml: + rule_files: + - /etc/config/recording_rules.yml + - /etc/config/alerting_rules.yml + ## Below two files are DEPRECATED will be removed from this default values file + - /etc/config/rules + - /etc/config/alerts + + scrape_configs: + - job_name: prometheus + static_configs: + - targets: + - localhost:9090 + + # A scrape configuration for running Prometheus on a Kubernetes cluster. + # This uses separate scrape configs for cluster components (i.e. API server, node) + # and services to allow each to use different authentication configs. + # + # Kubernetes labels will be added as Prometheus labels on metrics via the + # `labelmap` relabeling action. + + # Scrape config for API servers. + # + # Kubernetes exposes API servers as endpoints to the default/kubernetes + # service so this uses `endpoints` role and uses relabelling to only keep + # the endpoints associated with the default/kubernetes service using the + # default named port `https`. This works for single API server deployments as + # well as HA API server deployments. + - job_name: 'kubernetes-apiservers' + + kubernetes_sd_configs: + - role: endpoints + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + # If your node certificates are self-signed or use a different CA to the + # master CA, then disable certificate verification below. Note that + # certificate verification is an integral part of a secure infrastructure + # so this should only be disabled in a controlled environment. You can + # disable certificate verification by uncommenting the line below. + # + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + # Keep only the default/kubernetes service endpoints for the https port. This + # will add targets for each API server which Kubernetes adds an endpoint to + # the default/kubernetes service. + relabel_configs: + - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name] + action: keep + regex: default;kubernetes;https + + - job_name: 'kubernetes-nodes' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + # If your node certificates are self-signed or use a different CA to the + # master CA, then disable certificate verification below. Note that + # certificate verification is an integral part of a secure infrastructure + # so this should only be disabled in a controlled environment. You can + # disable certificate verification by uncommenting the line below. + # + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics + + + - job_name: 'kubernetes-nodes-cadvisor' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + # If your node certificates are self-signed or use a different CA to the + # master CA, then disable certificate verification below. Note that + # certificate verification is an integral part of a secure infrastructure + # so this should only be disabled in a controlled environment. You can + # disable certificate verification by uncommenting the line below. + # + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + + # This configuration will work only on kubelet 1.7.3+ + # As the scrape endpoints for cAdvisor have changed + # if you are using older version you need to change the replacement to + # replacement: /api/v1/nodes/$1:4194/proxy/metrics + # more info here https://github.com/coreos/prometheus-operator/issues/633 + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + + # Metric relabel configs to apply to samples before ingestion. + # [Metric Relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs) + # metric_relabel_configs: + # - action: labeldrop + # regex: (kubernetes_io_hostname|failure_domain_beta_kubernetes_io_region|beta_kubernetes_io_os|beta_kubernetes_io_arch|beta_kubernetes_io_instance_type|failure_domain_beta_kubernetes_io_zone) + + # Scrape config for service endpoints. + # + # The relabeling allows the actual service scrape endpoint to be configured + # via the following annotations: + # + # * `prometheus.io/scrape`: Only scrape services that have a value of + # `true`, except if `prometheus.io/scrape-slow` is set to `true` as well. + # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need + # to set this to `https` & most likely set the `tls_config` of the scrape config. + # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. + # * `prometheus.io/port`: If the metrics are exposed on a different port to the + # service then set this appropriately. + # * `prometheus.io/param_`: If the metrics endpoint uses parameters + # then you can set any parameter + - job_name: 'kubernetes-service-endpoints' + honor_labels: true + + kubernetes_sd_configs: + - role: endpoints + + relabel_configs: + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape] + action: keep + regex: true + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape_slow] + action: drop + regex: true + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme] + action: replace + target_label: __scheme__ + regex: (https?) + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port] + action: replace + target_label: __address__ + regex: (.+?)(?::\d+)?;(\d+) + replacement: $1:$2 + - action: labelmap + regex: __meta_kubernetes_service_annotation_prometheus_io_param_(.+) + replacement: __param_$1 + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_service_name] + action: replace + target_label: service + - source_labels: [__meta_kubernetes_pod_node_name] + action: replace + target_label: node + + # Scrape config for slow service endpoints; same as above, but with a larger + # timeout and a larger interval + # + # The relabeling allows the actual service scrape endpoint to be configured + # via the following annotations: + # + # * `prometheus.io/scrape-slow`: Only scrape services that have a value of `true` + # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need + # to set this to `https` & most likely set the `tls_config` of the scrape config. + # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. + # * `prometheus.io/port`: If the metrics are exposed on a different port to the + # service then set this appropriately. + # * `prometheus.io/param_`: If the metrics endpoint uses parameters + # then you can set any parameter + - job_name: 'kubernetes-service-endpoints-slow' + honor_labels: true + + scrape_interval: 5m + scrape_timeout: 30s + + kubernetes_sd_configs: + - role: endpoints + + relabel_configs: + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape_slow] + action: keep + regex: true + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme] + action: replace + target_label: __scheme__ + regex: (https?) + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port] + action: replace + target_label: __address__ + regex: (.+?)(?::\d+)?;(\d+) + replacement: $1:$2 + - action: labelmap + regex: __meta_kubernetes_service_annotation_prometheus_io_param_(.+) + replacement: __param_$1 + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_service_name] + action: replace + target_label: service + - source_labels: [__meta_kubernetes_pod_node_name] + action: replace + target_label: node + + - job_name: 'prometheus-pushgateway' + honor_labels: true + + kubernetes_sd_configs: + - role: service + + relabel_configs: + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe] + action: keep + regex: pushgateway + + # Example scrape config for probing services via the Blackbox Exporter. + # + # The relabeling allows the actual service scrape endpoint to be configured + # via the following annotations: + # + # * `prometheus.io/probe`: Only probe services that have a value of `true` + - job_name: 'kubernetes-services' + honor_labels: true + + metrics_path: /probe + params: + module: [http_2xx] + + kubernetes_sd_configs: + - role: service + + relabel_configs: + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe] + action: keep + regex: true + - source_labels: [__address__] + target_label: __param_target + - target_label: __address__ + replacement: blackbox + - source_labels: [__param_target] + target_label: instance + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + target_label: namespace + - source_labels: [__meta_kubernetes_service_name] + target_label: service + + # Example scrape config for pods + # + # The relabeling allows the actual pod scrape endpoint to be configured via the + # following annotations: + # + # * `prometheus.io/scrape`: Only scrape pods that have a value of `true`, + # except if `prometheus.io/scrape-slow` is set to `true` as well. + # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need + # to set this to `https` & most likely set the `tls_config` of the scrape config. + # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. + # * `prometheus.io/port`: Scrape the pod on the indicated port instead of the default of `9102`. + - job_name: 'kubernetes-pods' + honor_labels: true + + kubernetes_sd_configs: + - role: pod + + relabel_configs: + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape] + action: keep + regex: true + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape_slow] + action: drop + regex: true + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scheme] + action: replace + regex: (https?) + target_label: __scheme__ + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_port, __meta_kubernetes_pod_ip] + action: replace + regex: (\d+);(([A-Fa-f0-9]{1,4}::?){1,7}[A-Fa-f0-9]{1,4}) + replacement: '[$2]:$1' + target_label: __address__ + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_port, __meta_kubernetes_pod_ip] + action: replace + regex: (\d+);((([0-9]+?)(\.|$)){4}) + replacement: $2:$1 + target_label: __address__ + - action: labelmap + regex: __meta_kubernetes_pod_annotation_prometheus_io_param_(.+) + replacement: __param_$1 + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + - source_labels: [__meta_kubernetes_pod_phase] + regex: Pending|Succeeded|Failed|Completed + action: drop + - source_labels: [__meta_kubernetes_pod_node_name] + action: replace + target_label: node + + # Example Scrape config for pods which should be scraped slower. An useful example + # would be stackriver-exporter which queries an API on every scrape of the pod + # + # The relabeling allows the actual pod scrape endpoint to be configured via the + # following annotations: + # + # * `prometheus.io/scrape-slow`: Only scrape pods that have a value of `true` + # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need + # to set this to `https` & most likely set the `tls_config` of the scrape config. + # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. + # * `prometheus.io/port`: Scrape the pod on the indicated port instead of the default of `9102`. + - job_name: 'kubernetes-pods-slow' + honor_labels: true + + scrape_interval: 5m + scrape_timeout: 30s + + kubernetes_sd_configs: + - role: pod + + relabel_configs: + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape_slow] + action: keep + regex: true + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scheme] + action: replace + regex: (https?) + target_label: __scheme__ + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_port, __meta_kubernetes_pod_ip] + action: replace + regex: (\d+);(([A-Fa-f0-9]{1,4}::?){1,7}[A-Fa-f0-9]{1,4}) + replacement: '[$2]:$1' + target_label: __address__ + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_port, __meta_kubernetes_pod_ip] + action: replace + regex: (\d+);((([0-9]+?)(\.|$)){4}) + replacement: $2:$1 + target_label: __address__ + - action: labelmap + regex: __meta_kubernetes_pod_annotation_prometheus_io_param_(.+) + replacement: __param_$1 + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: pod + - source_labels: [__meta_kubernetes_pod_phase] + regex: Pending|Succeeded|Failed|Completed + action: drop + - source_labels: [__meta_kubernetes_pod_node_name] + action: replace + target_label: node + +# adds additional scrape configs to prometheus.yml +# must be a string so you have to add a | after extraScrapeConfigs: +# example adds prometheus-blackbox-exporter scrape config +extraScrapeConfigs: "" + # - job_name: 'prometheus-blackbox-exporter' + # metrics_path: /probe + # params: + # module: [http_2xx] + # static_configs: + # - targets: + # - https://example.com + # relabel_configs: + # - source_labels: [__address__] + # target_label: __param_target + # - source_labels: [__param_target] + # target_label: instance + # - target_label: __address__ + # replacement: prometheus-blackbox-exporter:9115 + +# Adds option to add alert_relabel_configs to avoid duplicate alerts in alertmanager +# useful in H/A prometheus with different external labels but the same alerts +alertRelabelConfigs: {} + # alert_relabel_configs: + # - source_labels: [dc] + # regex: (.+)\d+ + # target_label: dc + +networkPolicy: + ## Enable creation of NetworkPolicy resources. + ## + enabled: false + +# Force namespace of namespaced resources +forceNamespace: "" + +# Extra manifests to deploy as an array +extraManifests: [] + # - | + # apiVersion: v1 + # kind: ConfigMap + # metadata: + # labels: + # name: prometheus-extra + # data: + # extra-data: "value" + +# Configuration of subcharts defined in Chart.yaml + +## alertmanager sub-chart configurable values +## Please see https://github.com/prometheus-community/helm-charts/tree/main/charts/alertmanager +## +alertmanager: + ## If false, alertmanager will not be installed + ## + enabled: true + + persistence: + size: 2Gi + + podSecurityContext: + runAsUser: 65534 + runAsNonRoot: true + runAsGroup: 65534 + fsGroup: 65534 + +## kube-state-metrics sub-chart configurable values +## Please see https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-state-metrics +## +kube-state-metrics: + ## If false, kube-state-metrics sub-chart will not be installed + ## + enabled: true + +## prometheus-node-exporter sub-chart configurable values +## Please see https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-node-exporter +## +prometheus-node-exporter: + ## If false, node-exporter will not be installed + ## + enabled: true + + rbac: + pspEnabled: false + + containerSecurityContext: + allowPrivilegeEscalation: false + +## prometheus-pushgateway sub-chart configurable values +## Please see https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-pushgateway +## +prometheus-pushgateway: + ## If false, pushgateway will not be installed + ## + enabled: true + + # Optional service annotations + serviceAnnotations: + prometheus.io/probe: pushgateway From 98b2bc52845a74a9786a189dface0c375ab0b26e Mon Sep 17 00:00:00 2001 From: karamveer28 Date: Wed, 6 Dec 2023 23:26:20 +0530 Subject: [PATCH 17/99] Added env file --- operations/deployment/helm/prometheus/ENV_FILE | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 operations/deployment/helm/prometheus/ENV_FILE diff --git a/operations/deployment/helm/prometheus/ENV_FILE b/operations/deployment/helm/prometheus/ENV_FILE new file mode 100644 index 0000000..8ce79ba --- /dev/null +++ b/operations/deployment/helm/prometheus/ENV_FILE @@ -0,0 +1,2 @@ +GF_SECURITY_ADMIN_USER=admin +GF_SECURITY_ADMIN_PASSWORD=grafana \ No newline at end of file From 7274e7fddbf4113b8ebaa0edb386ea5d4151e3f8 Mon Sep 17 00:00:00 2001 From: karamveer28 Date: Thu, 7 Dec 2023 18:33:43 +0530 Subject: [PATCH 18/99] Test with new format of prometheus --- .../deployment/helm/prometheus/Chart.yaml | 58 +- .../helm/prometheus/bitops.config.yaml | 14 + .../deployment/helm/prometheus/values.yaml | 2648 ++++++++++------- 3 files changed, 1564 insertions(+), 1156 deletions(-) create mode 100644 operations/deployment/helm/prometheus/bitops.config.yaml diff --git a/operations/deployment/helm/prometheus/Chart.yaml b/operations/deployment/helm/prometheus/Chart.yaml index f176a65..4d0fbc2 100644 --- a/operations/deployment/helm/prometheus/Chart.yaml +++ b/operations/deployment/helm/prometheus/Chart.yaml @@ -1,53 +1,9 @@ apiVersion: v2 -name: prometheus -appVersion: v2.48.0 -version: 25.8.0 -kubeVersion: ">=1.19.0-0" -description: Prometheus is a monitoring system and time series database. -home: https://prometheus.io/ -icon: https://raw.githubusercontent.com/prometheus/prometheus.github.io/master/assets/prometheus_logo-cb55bb5c346.png -sources: - - https://github.com/prometheus/alertmanager - - https://github.com/prometheus/prometheus - - https://github.com/prometheus/pushgateway - - https://github.com/prometheus/node_exporter - - https://github.com/kubernetes/kube-state-metrics -maintainers: - - name: gianrubio - email: gianrubio@gmail.com - - name: zanhsieh - email: zanhsieh@gmail.com - - name: Xtigyro - email: miroslav.hadzhiev@gmail.com - - name: naseemkullah - email: naseem@transit.app - - name: zeritti - email: rootsandtrees@posteo.de -type: application +name: prometheus chart wrapper +description: A Helm chart wrapper for prometheus +version: 0.1.0 + dependencies: - - name: alertmanager - version: "1.7.*" - repository: https://prometheus-community.github.io/helm-charts - condition: alertmanager.enabled - - name: kube-state-metrics - version: "5.15.*" - repository: https://prometheus-community.github.io/helm-charts - condition: kube-state-metrics.enabled - - name: prometheus-node-exporter - version: "4.24.*" - repository: https://prometheus-community.github.io/helm-charts - condition: prometheus-node-exporter.enabled - - name: prometheus-pushgateway - version: "2.4.*" - repository: https://prometheus-community.github.io/helm-charts - condition: prometheus-pushgateway.enabled -keywords: - - monitoring - - prometheus -annotations: - "artifacthub.io/license": Apache-2.0 - "artifacthub.io/links": | - - name: Chart Source - url: https://github.com/prometheus-community/helm-charts - - name: Upstream Project - url: https://github.com/prometheus/prometheus +- name: prometheus + version: 13.8.0 + repository: https://prometheus-community.github.io/helm-charts \ No newline at end of file diff --git a/operations/deployment/helm/prometheus/bitops.config.yaml b/operations/deployment/helm/prometheus/bitops.config.yaml new file mode 100644 index 0000000..26d9981 --- /dev/null +++ b/operations/deployment/helm/prometheus/bitops.config.yaml @@ -0,0 +1,14 @@ +helm: + cli: + namespace: prometheus + timeout: 500s + debug: true + atomic: true + force: false + dry-run: false + options: + release-name: prometheus + skip-deploy: false + kubeconfig: + fetch: + enabled: true \ No newline at end of file diff --git a/operations/deployment/helm/prometheus/values.yaml b/operations/deployment/helm/prometheus/values.yaml index 43d0f6a..528b068 100644 --- a/operations/deployment/helm/prometheus/values.yaml +++ b/operations/deployment/helm/prometheus/values.yaml @@ -1,1253 +1,1691 @@ -# yaml-language-server: $schema=values.schema.json -# Default values for prometheus. -# This is a YAML-formatted file. -# Declare variables to be passed into your templates. - -rbac: - create: true - -podSecurityPolicy: - enabled: false - -imagePullSecrets: [] -# - name: "image-pull-secret" - -## Define serviceAccount names for components. Defaults to component's fully qualified name. -## -serviceAccounts: - server: +prometheus: + rbac: create: true - name: "" - annotations: {} - ## Opt out of automounting Kubernetes API credentials. - ## It will be overriden by server.automountServiceAccountToken value, if set. - # automountServiceAccountToken: false + podSecurityPolicy: + enabled: false -## Additional labels to attach to all resources -commonMetaLabels: {} + imagePullSecrets: + # - name: "image-pull-secret" -## Monitors ConfigMap changes and POSTs to a URL -## Ref: https://github.com/prometheus-operator/prometheus-operator/tree/main/cmd/prometheus-config-reloader -## -configmapReload: - ## URL for configmap-reload to use for reloads + ## Define serviceAccount names for components. Defaults to component's fully qualified name. ## - reloadUrl: "" - - ## env sets environment variables to pass to the container. Can be set as name/value pairs, - ## read from secrets or configmaps. - env: [] - # - name: SOMEVAR - # value: somevalue - # - name: PASSWORD - # valueFrom: - # secretKeyRef: - # name: mysecret - # key: password - # optional: false - - prometheus: - ## If false, the configmap-reload container will not be deployed + serviceAccounts: + alertmanager: + create: true + name: + annotations: {} + nodeExporter: + create: true + name: + annotations: {} + pushgateway: + create: true + name: + annotations: {} + server: + create: true + name: + annotations: {} + + alertmanager: + ## If false, alertmanager will not be installed ## enabled: true - ## configmap-reload container name + ## Use a ClusterRole (and ClusterRoleBinding) + ## - If set to false - we define a Role and RoleBinding in the defined namespaces ONLY + ## This makes alertmanager work - for users who do not have ClusterAdmin privs, but wants alertmanager to operate on their own namespaces, instead of clusterwide. + useClusterRole: true + + ## Set to a rolename to use existing role - skipping role creating - but still doing serviceaccount and rolebinding to the rolename set here. + useExistingRole: false + + ## alertmanager container name ## - name: configmap-reload + name: alertmanager - ## configmap-reload container image + ## alertmanager container image ## image: - repository: quay.io/prometheus-operator/prometheus-config-reloader - tag: v0.67.0 - # When digest is set to a non-empty value, images will be pulled by digest (regardless of tag value). - digest: "" + repository: quay.io/prometheus/alertmanager + tag: v0.21.0 pullPolicy: IfNotPresent - # containerPort: 9533 + ## alertmanager priorityClassName + ## + priorityClassName: "" - ## Additional configmap-reload container arguments + ## Additional alertmanager container arguments ## extraArgs: {} - ## Additional configmap-reload volume directories + ## Additional InitContainers to initialize the pod ## - extraVolumeDirs: [] + extraInitContainers: [] - ## Additional configmap-reload volume mounts - ## - extraVolumeMounts: [] + ## The URL prefix at which the container can be accessed. Useful in the case the '-web.external-url' includes a slug + ## so that the various internal URLs are still able to access as they are in the default case. + ## (Optional) + prefixURL: "" + + ## External URL which can access alertmanager + baseURL: "http://localhost:9093" - ## Additional configmap-reload mounts + ## Additional alertmanager container environment variable + ## For instance to add a http_proxy ## - extraConfigmapMounts: [] - # - name: prometheus-alerts - # mountPath: /etc/alerts.d + extraEnv: {} + + ## Additional alertmanager Secret mounts + # Defines additional mounts with secrets. Secrets must be manually created in the namespace. + extraSecretMounts: [] + # - name: secret-files + # mountPath: /etc/secrets # subPath: "" - # configMap: prometheus-alerts + # secretName: alertmanager-secret-files # readOnly: true - ## Security context to be added to configmap-reload container - containerSecurityContext: {} + ## ConfigMap override where fullname is {{.Release.Name}}-{{.Values.alertmanager.configMapOverrideName}} + ## Defining configMapOverrideName will cause templates/alertmanager-configmap.yaml + ## to NOT generate a ConfigMap resource + ## + configMapOverrideName: "" + + ## The name of a secret in the same kubernetes namespace which contains the Alertmanager config + ## Defining configFromSecret will cause templates/alertmanager-configmap.yaml + ## to NOT generate a ConfigMap resource + ## + configFromSecret: "" + + ## The configuration file name to be loaded to alertmanager + ## Must match the key within configuration loaded from ConfigMap/Secret + ## + configFileName: alertmanager.yml + + ingress: + ## If true, alertmanager Ingress will be created + ## + enabled: false + + ## alertmanager Ingress annotations + ## + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: 'true' + + ## alertmanager Ingress additional labels + ## + extraLabels: {} + + ## alertmanager Ingress hostnames with optional path + ## Must be provided if Ingress is enabled + ## + hosts: [] + # - alertmanager.domain.com + # - domain.com/alertmanager + + ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. + extraPaths: [] + # - path: /* + # backend: + # serviceName: ssl-redirect + # servicePort: use-annotation + + ## alertmanager Ingress TLS configuration + ## Secrets must be manually created in the namespace + ## + tls: [] + # - secretName: prometheus-alerts-tls + # hosts: + # - alertmanager.domain.com + + ## Alertmanager Deployment Strategy type + # strategy: + # type: Recreate + + ## Node tolerations for alertmanager scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + ## Node labels for alertmanager pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + + ## Pod affinity + ## + affinity: {} + + ## PodDisruptionBudget settings + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + ## + podDisruptionBudget: + enabled: false + maxUnavailable: 1 + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + persistentVolume: + ## If true, alertmanager will create/use a Persistent Volume Claim + ## If false, use emptyDir + ## + enabled: true + + ## alertmanager data Persistent Volume access modes + ## Must match those of existing PV or dynamic provisioner + ## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + accessModes: + - ReadWriteOnce + + ## alertmanager data Persistent Volume Claim annotations + ## + annotations: {} + + ## alertmanager data Persistent Volume existing claim name + ## Requires alertmanager.persistentVolume.enabled: true + ## If defined, PVC must be created manually before volume will be bound + existingClaim: "" + + ## alertmanager data Persistent Volume mount root path + ## + mountPath: /data + + ## alertmanager data Persistent Volume size + ## + size: 2Gi + + ## alertmanager data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + + ## alertmanager data Persistent Volume Binding Mode + ## If defined, volumeBindingMode: + ## If undefined (the default) or set to null, no volumeBindingMode spec is + ## set, choosing the default mode. + ## + # volumeBindingMode: "" + + ## Subdirectory of alertmanager data Persistent Volume to mount + ## Useful if the volume's root directory is not empty + ## + subPath: "" + + emptyDir: + ## alertmanager emptyDir volume size limit + ## + sizeLimit: "" + + ## Annotations to be added to alertmanager pods + ## + podAnnotations: {} + ## Tell prometheus to use a specific set of alertmanager pods + ## instead of all alertmanager pods found in the same namespace + ## Useful if you deploy multiple releases within the same namespace + ## + ## prometheus.io/probe: alertmanager-teamA + + ## Labels to be added to Prometheus AlertManager pods + ## + podLabels: {} + + ## Specify if a Pod Security Policy for node-exporter must be created + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ + ## + podSecurityPolicy: + annotations: {} + ## Specify pod annotations + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl + ## + # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' + # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' + # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + + ## Use a StatefulSet if replicaCount needs to be greater than 1 (see below) + ## + replicaCount: 1 + + ## Annotations to be added to deployment + ## + deploymentAnnotations: {} + + statefulSet: + ## If true, use a statefulset instead of a deployment for pod management. + ## This allows to scale replicas to more than 1 pod + ## + enabled: false + + annotations: {} + labels: {} + podManagementPolicy: OrderedReady + + ## Alertmanager headless service to use for the statefulset + ## + headless: + annotations: {} + labels: {} + + ## Enabling peer mesh service end points for enabling the HA alert manager + ## Ref: https://github.com/prometheus/alertmanager/blob/master/README.md + enableMeshPeer: false + + servicePort: 80 - ## configmap-reload resource requests and limits + ## alertmanager resource requests and limits ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ ## resources: {} + # limits: + # cpu: 10m + # memory: 32Mi + # requests: + # cpu: 10m + # memory: 32Mi + + # Custom DNS configuration to be added to alertmanager pods + dnsConfig: {} + # nameservers: + # - 1.2.3.4 + # searches: + # - ns1.svc.cluster-domain.example + # - my.dns.search.suffix + # options: + # - name: ndots + # value: "2" + # - name: edns0 + + ## Security context to be added to alertmanager pods + ## + securityContext: + runAsUser: 65534 + runAsNonRoot: true + runAsGroup: 65534 + fsGroup: 65534 -server: - ## Prometheus server container name - ## - name: server + service: + annotations: {} + labels: {} + clusterIP: "" - ## Opt out of automounting Kubernetes API credentials. - ## If set it will override serviceAccounts.server.automountServiceAccountToken value for ServiceAccount. - # automountServiceAccountToken: false + ## Enabling peer mesh service end points for enabling the HA alert manager + ## Ref: https://github.com/prometheus/alertmanager/blob/master/README.md + # enableMeshPeer : true - ## Use a ClusterRole (and ClusterRoleBinding) - ## - If set to false - we define a RoleBinding in the defined namespaces ONLY - ## - ## NB: because we need a Role with nonResourceURL's ("/metrics") - you must get someone with Cluster-admin privileges to define this role for you, before running with this setting enabled. - ## This makes prometheus work - for users who do not have ClusterAdmin privs, but wants prometheus to operate on their own namespaces, instead of clusterwide. - ## - ## You MUST also set namespaces to the ones you have access to and want monitored by Prometheus. - ## - # useExistingClusterRoleName: nameofclusterrole + ## List of IP addresses at which the alertmanager service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] - ## If set it will override prometheus.server.fullname value for ClusterRole and ClusterRoleBinding - ## - clusterRoleNameOverride: "" - - # Enable only the release namespace for monitoring. By default all namespaces are monitored. - # If releaseNamespace and namespaces are both set a merged list will be monitored. - releaseNamespace: false - - ## namespaces to monitor (instead of monitoring all - clusterwide). Needed if you want to run without Cluster-admin privileges. - # namespaces: - # - yournamespace - - # sidecarContainers - add more containers to prometheus server - # Key/Value where Key is the sidecar `- name: ` - # Example: - # sidecarContainers: - # webserver: - # image: nginx - # OR for adding OAuth authentication to Prometheus - # sidecarContainers: - # oauth-proxy: - # image: quay.io/oauth2-proxy/oauth2-proxy:v7.1.2 - # args: - # - --upstream=http://127.0.0.1:9090 - # - --http-address=0.0.0.0:8081 - # - ... - # ports: - # - containerPort: 8081 - # name: oauth-proxy - # protocol: TCP - # resources: {} - sidecarContainers: {} - - # sidecarTemplateValues - context to be used in template for sidecarContainers - # Example: - # sidecarTemplateValues: *your-custom-globals - # sidecarContainers: - # webserver: |- - # {{ include "webserver-container-template" . }} - # Template for `webserver-container-template` might looks like this: - # image: "{{ .Values.server.sidecarTemplateValues.repository }}:{{ .Values.server.sidecarTemplateValues.tag }}" - # ... - # - sidecarTemplateValues: {} - - ## Prometheus server container image - ## - image: - repository: quay.io/prometheus/prometheus - # if not set appVersion field from Chart.yaml is used - tag: "" - # When digest is set to a non-empty value, images will be pulled by digest (regardless of tag value). - digest: "" - pullPolicy: IfNotPresent - - ## Prometheus server command - ## - command: [] + loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 80 + # nodePort: 30000 + sessionAffinity: None + type: ClusterIP - ## prometheus server priorityClassName + ## Monitors ConfigMap changes and POSTs to a URL + ## Ref: https://github.com/jimmidyson/configmap-reload ## - priorityClassName: "" + configmapReload: + prometheus: + ## If false, the configmap-reload container will not be deployed + ## + enabled: true - ## EnableServiceLinks indicates whether information about services should be injected - ## into pod's environment variables, matching the syntax of Docker links. - ## WARNING: the field is unsupported and will be skipped in K8s prior to v1.13.0. - ## - enableServiceLinks: true + ## configmap-reload container name + ## + name: configmap-reload - ## The URL prefix at which the container can be accessed. Useful in the case the '-web.external-url' includes a slug - ## so that the various internal URLs are still able to access as they are in the default case. - ## (Optional) - prefixURL: "" + ## configmap-reload container image + ## + image: + repository: jimmidyson/configmap-reload + tag: v0.5.0 + pullPolicy: IfNotPresent - ## External URL which can access prometheus - ## Maybe same with Ingress host name - baseURL: "" + ## Additional configmap-reload container arguments + ## + extraArgs: {} + ## Additional configmap-reload volume directories + ## + extraVolumeDirs: [] - ## Additional server container environment variables - ## - ## You specify this manually like you would a raw deployment manifest. - ## This means you can bind in environment variables from secrets. - ## - ## e.g. static environment variable: - ## - name: DEMO_GREETING - ## value: "Hello from the environment" - ## - ## e.g. secret environment variable: - ## - name: USERNAME - ## valueFrom: - ## secretKeyRef: - ## name: mysecret - ## key: username - env: [] - - # List of flags to override default parameters, e.g: - # - --enable-feature=agent - # - --storage.agent.retention.max-time=30m - # - --config.file=/etc/config/prometheus.yml - defaultFlagsOverride: [] - - extraFlags: - - web.enable-lifecycle - ## web.enable-admin-api flag controls access to the administrative HTTP API which includes functionality such as - ## deleting time series. This is disabled by default. - # - web.enable-admin-api - ## - ## storage.tsdb.no-lockfile flag controls BD locking - # - storage.tsdb.no-lockfile - ## - ## storage.tsdb.wal-compression flag enables compression of the write-ahead log (WAL) - # - storage.tsdb.wal-compression - - ## Path to a configuration file on prometheus server container FS - configPath: /etc/config/prometheus.yml - - ### The data directory used by prometheus to set --storage.tsdb.path - ### When empty server.persistentVolume.mountPath is used instead - storagePath: "" - - global: - ## How frequently to scrape targets by default - ## - scrape_interval: 1m - ## How long until a scrape request times out - ## - scrape_timeout: 10s - ## How frequently to evaluate rules - ## - evaluation_interval: 1m - ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write - ## - remoteWrite: [] - ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_read - ## - remoteRead: [] - ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#tsdb - ## - tsdb: {} - # out_of_order_time_window: 0s + ## Additional configmap-reload mounts + ## + extraConfigmapMounts: [] + # - name: prometheus-alerts + # mountPath: /etc/alerts.d + # subPath: "" + # configMap: prometheus-alerts + # readOnly: true - ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#exemplars - ## Must be enabled via --enable-feature=exemplar-storage - ## - exemplars: {} - # max_exemplars: 100000 - ## Custom HTTP headers for Liveness/Readiness/Startup Probe - ## - ## Useful for providing HTTP Basic Auth to healthchecks - probeHeaders: [] - # - name: "Authorization" - # value: "Bearer ABCDEabcde12345" + ## configmap-reload resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + alertmanager: + ## If false, the configmap-reload container will not be deployed + ## + enabled: true - ## Additional Prometheus server container arguments - ## - extraArgs: {} + ## configmap-reload container name + ## + name: configmap-reload - ## Additional InitContainers to initialize the pod - ## - extraInitContainers: [] + ## configmap-reload container image + ## + image: + repository: jimmidyson/configmap-reload + tag: v0.5.0 + pullPolicy: IfNotPresent - ## Additional Prometheus server Volume mounts - ## - extraVolumeMounts: [] + ## Additional configmap-reload container arguments + ## + extraArgs: {} + ## Additional configmap-reload volume directories + ## + extraVolumeDirs: [] - ## Additional Prometheus server Volumes - ## - extraVolumes: [] - ## Additional Prometheus server hostPath mounts - ## - extraHostPathMounts: [] - # - name: certs-dir - # mountPath: /etc/kubernetes/certs - # subPath: "" - # hostPath: /etc/kubernetes/certs - # readOnly: true - - extraConfigmapMounts: [] - # - name: certs-configmap - # mountPath: /prometheus - # subPath: "" - # configMap: certs-configmap - # readOnly: true - - ## Additional Prometheus server Secret mounts - # Defines additional mounts with secrets. Secrets must be manually created in the namespace. - extraSecretMounts: [] - # - name: secret-files - # mountPath: /etc/secrets - # subPath: "" - # secretName: prom-secret-files - # readOnly: true - - ## ConfigMap override where fullname is {{.Release.Name}}-{{.Values.server.configMapOverrideName}} - ## Defining configMapOverrideName will cause templates/server-configmap.yaml - ## to NOT generate a ConfigMap resource + ## Additional configmap-reload mounts + ## + extraConfigmapMounts: [] + # - name: prometheus-alerts + # mountPath: /etc/alerts.d + # subPath: "" + # configMap: prometheus-alerts + # readOnly: true + + + ## configmap-reload resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + + kubeStateMetrics: + ## If false, kube-state-metrics sub-chart will not be installed + ## + enabled: true + + ## kube-state-metrics sub-chart configurable values + ## Please see https://github.com/kubernetes/kube-state-metrics/tree/master/charts/kube-state-metrics ## - configMapOverrideName: "" + # kube-state-metrics: - ## Extra labels for Prometheus server ConfigMap (ConfigMap that holds serverFiles) - extraConfigmapLabels: {} + nodeExporter: + ## If false, node-exporter will not be installed + ## + enabled: true - ingress: - ## If true, Prometheus server Ingress will be created + ## If true, node-exporter pods share the host network namespace ## - enabled: false + hostNetwork: true - # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName - # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress - # ingressClassName: nginx + ## If true, node-exporter pods share the host PID namespace + ## + hostPID: true - ## Prometheus server Ingress annotations + ## If true, node-exporter pods mounts host / at /host/root ## - annotations: {} - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: 'true' + hostRootfs: true - ## Prometheus server Ingress additional labels + ## node-exporter container name ## - extraLabels: {} + name: node-exporter - ## Redirect ingress to an additional defined port on the service - # servicePort: 8081 + ## node-exporter container image + ## + image: + repository: quay.io/prometheus/node-exporter + tag: v1.1.2 + pullPolicy: IfNotPresent + + ## Specify if a Pod Security Policy for node-exporter must be created + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ + ## + podSecurityPolicy: + annotations: {} + ## Specify pod annotations + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl + ## + # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' + # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' + # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + + ## node-exporter priorityClassName + ## + priorityClassName: "" - ## Prometheus server Ingress hostnames with optional path - ## Must be provided if Ingress is enabled + ## Custom Update Strategy ## - hosts: [] - # - prometheus.domain.com - # - domain.com/prometheus + updateStrategy: + type: RollingUpdate - path: / + ## Additional node-exporter container arguments + ## + extraArgs: {} + + ## Additional InitContainers to initialize the pod + ## + extraInitContainers: [] - # pathType is only for k8s >= 1.18 - pathType: Prefix + ## Additional node-exporter hostPath mounts + ## + extraHostPathMounts: [] + # - name: textfile-dir + # mountPath: /srv/txt_collector + # hostPath: /var/lib/node-exporter + # readOnly: true + # mountPropagation: HostToContainer - ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. - extraPaths: [] - # - path: /* - # backend: - # serviceName: ssl-redirect - # servicePort: use-annotation + extraConfigmapMounts: [] + # - name: certs-configmap + # mountPath: /prometheus + # configMap: certs-configmap + # readOnly: true - ## Prometheus server Ingress TLS configuration - ## Secrets must be manually created in the namespace + ## Node tolerations for node-exporter scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + ## Node labels for node-exporter pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ ## - tls: [] - # - secretName: prometheus-server-tls - # hosts: - # - prometheus.domain.com + nodeSelector: {} - ## Server Deployment Strategy type - strategy: - type: Recreate + ## Annotations to be added to node-exporter pods + ## + podAnnotations: {} - ## hostAliases allows adding entries to /etc/hosts inside the containers - hostAliases: [] - # - ip: "127.0.0.1" - # hostnames: - # - "example.com" + ## Labels to be added to node-exporter pods + ## + pod: + labels: {} - ## Node tolerations for server scheduling to nodes with taints - ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/ - ## - tolerations: [] - # - key: "key" - # operator: "Equal|Exists" - # value: "value" - # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" - - ## Node labels for Prometheus server pod assignment - ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ - ## - nodeSelector: {} + ## PodDisruptionBudget settings + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + ## + podDisruptionBudget: + enabled: false + maxUnavailable: 1 - ## Pod affinity - ## - affinity: {} + ## node-exporter resource limits & requests + ## Ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + # limits: + # cpu: 200m + # memory: 50Mi + # requests: + # cpu: 100m + # memory: 30Mi + + # Custom DNS configuration to be added to node-exporter pods + dnsConfig: {} + # nameservers: + # - 1.2.3.4 + # searches: + # - ns1.svc.cluster-domain.example + # - my.dns.search.suffix + # options: + # - name: ndots + # value: "2" + # - name: edns0 + + ## Security context to be added to node-exporter pods + ## + securityContext: + fsGroup: 65534 + runAsGroup: 65534 + runAsNonRoot: true + runAsUser: 65534 + + service: + annotations: + prometheus.io/scrape: "true" + labels: {} - ## Pod topology spread constraints - ## ref. https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ - topologySpreadConstraints: [] + # Exposed as a headless service: + # https://kubernetes.io/docs/concepts/services-networking/service/#headless-services + clusterIP: None - ## PodDisruptionBudget settings - ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ - ## - podDisruptionBudget: - enabled: false - maxUnavailable: 1 - # minAvailable: 1 - ## unhealthyPodEvictionPolicy is available since 1.27.0 (beta) - ## https://kubernetes.io/docs/tasks/run-application/configure-pdb/#unhealthy-pod-eviction-policy - # unhealthyPodEvictionPolicy: IfHealthyBudget - - ## Use an alternate scheduler, e.g. "stork". - ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ - ## - # schedulerName: + ## List of IP addresses at which the node-exporter service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] - persistentVolume: - ## If true, Prometheus server will create/use a Persistent Volume Claim - ## If false, use emptyDir + hostPort: 9100 + loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 9100 + type: ClusterIP + + server: + ## Prometheus server container name ## enabled: true - ## If set it will override the name of the created persistent volume claim - ## generated by the stateful set. + ## Use a ClusterRole (and ClusterRoleBinding) + ## - If set to false - we define a RoleBinding in the defined namespaces ONLY + ## + ## NB: because we need a Role with nonResourceURL's ("/metrics") - you must get someone with Cluster-admin privileges to define this role for you, before running with this setting enabled. + ## This makes prometheus work - for users who do not have ClusterAdmin privs, but wants prometheus to operate on their own namespaces, instead of clusterwide. ## - statefulSetNameOverride: "" + ## You MUST also set namespaces to the ones you have access to and want monitored by Prometheus. + ## + # useExistingClusterRoleName: nameofclusterrole + + ## namespaces to monitor (instead of monitoring all - clusterwide). Needed if you want to run without Cluster-admin privileges. + # namespaces: + # - yournamespace + + name: server - ## Prometheus server data Persistent Volume access modes - ## Must match those of existing PV or dynamic provisioner - ## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + # sidecarContainers - add more containers to prometheus server + # Key/Value where Key is the sidecar `- name: ` + # Example: + # sidecarContainers: + # webserver: + # image: nginx + sidecarContainers: {} + + ## Prometheus server container image ## - accessModes: - - ReadWriteOnce + image: + repository: quay.io/prometheus/prometheus + tag: v2.26.0 + pullPolicy: IfNotPresent - ## Prometheus server data Persistent Volume labels + ## prometheus server priorityClassName ## - labels: {} + priorityClassName: "" - ## Prometheus server data Persistent Volume annotations + ## EnableServiceLinks indicates whether information about services should be injected + ## into pod's environment variables, matching the syntax of Docker links. + ## WARNING: the field is unsupported and will be skipped in K8s prior to v1.13.0. ## - annotations: {} + enableServiceLinks: true - ## Prometheus server data Persistent Volume existing claim name - ## Requires server.persistentVolume.enabled: true - ## If defined, PVC must be created manually before volume will be bound - existingClaim: "" + ## The URL prefix at which the container can be accessed. Useful in the case the '-web.external-url' includes a slug + ## so that the various internal URLs are still able to access as they are in the default case. + ## (Optional) + prefixURL: "" - ## Prometheus server data Persistent Volume mount root path + ## External URL which can access prometheus + ## Maybe same with Ingress host name + baseURL: "" + + ## Additional server container environment variables + ## + ## You specify this manually like you would a raw deployment manifest. + ## This means you can bind in environment variables from secrets. ## - mountPath: /data + ## e.g. static environment variable: + ## - name: DEMO_GREETING + ## value: "Hello from the environment" + ## + ## e.g. secret environment variable: + ## - name: USERNAME + ## valueFrom: + ## secretKeyRef: + ## name: mysecret + ## key: username + env: [] + + extraFlags: + - web.enable-lifecycle + ## web.enable-admin-api flag controls access to the administrative HTTP API which includes functionality such as + ## deleting time series. This is disabled by default. + # - web.enable-admin-api + ## + ## storage.tsdb.no-lockfile flag controls BD locking + # - storage.tsdb.no-lockfile + ## + ## storage.tsdb.wal-compression flag enables compression of the write-ahead log (WAL) + # - storage.tsdb.wal-compression - ## Prometheus server data Persistent Volume size + ## Path to a configuration file on prometheus server container FS + configPath: /etc/config/prometheus.yml + + global: + ## How frequently to scrape targets by default + ## + scrape_interval: 1m + ## How long until a scrape request times out + ## + scrape_timeout: 10s + ## How frequently to evaluate rules + ## + evaluation_interval: 1m + ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write + ## + remoteWrite: [] + ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_read ## - size: 8Gi + remoteRead: [] - ## Prometheus server data Persistent Volume Storage Class - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. (gp2 on AWS, standard on - ## GKE, AWS & OpenStack) + ## Additional Prometheus server container arguments ## - # storageClass: "-" + extraArgs: {} - ## Prometheus server data Persistent Volume Binding Mode - ## If defined, volumeBindingMode: - ## If undefined (the default) or set to null, no volumeBindingMode spec is - ## set, choosing the default mode. + ## Additional InitContainers to initialize the pod ## - # volumeBindingMode: "" + extraInitContainers: [] - ## Subdirectory of Prometheus server data Persistent Volume to mount - ## Useful if the volume's root directory is not empty + ## Additional Prometheus server Volume mounts ## - subPath: "" + extraVolumeMounts: [] - ## Persistent Volume Claim Selector - ## Useful if Persistent Volumes have been provisioned in advance - ## Ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + ## Additional Prometheus server Volumes ## - # selector: - # matchLabels: - # release: "stable" - # matchExpressions: - # - { key: environment, operator: In, values: [ dev ] } + extraVolumes: [] - ## Persistent Volume Name - ## Useful if Persistent Volumes have been provisioned in advance and you want to use a specific one + ## Additional Prometheus server hostPath mounts ## - # volumeName: "" + extraHostPathMounts: [] + # - name: certs-dir + # mountPath: /etc/kubernetes/certs + # subPath: "" + # hostPath: /etc/kubernetes/certs + # readOnly: true + + extraConfigmapMounts: [] + # - name: certs-configmap + # mountPath: /prometheus + # subPath: "" + # configMap: certs-configmap + # readOnly: true - emptyDir: - ## Prometheus server emptyDir volume size limit + ## Additional Prometheus server Secret mounts + # Defines additional mounts with secrets. Secrets must be manually created in the namespace. + extraSecretMounts: [] + # - name: secret-files + # mountPath: /etc/secrets + # subPath: "" + # secretName: prom-secret-files + # readOnly: true + + ## ConfigMap override where fullname is {{.Release.Name}}-{{.Values.server.configMapOverrideName}} + ## Defining configMapOverrideName will cause templates/server-configmap.yaml + ## to NOT generate a ConfigMap resource ## - sizeLimit: "" + configMapOverrideName: "" - ## Annotations to be added to Prometheus server pods - ## - podAnnotations: {} - # iam.amazonaws.com/role: prometheus + ingress: + ## If true, Prometheus server Ingress will be created + ## + enabled: false - ## Labels to be added to Prometheus server pods - ## - podLabels: {} + ## Prometheus server Ingress annotations + ## + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: 'true' - ## Prometheus AlertManager configuration - ## - alertmanagers: [] + ## Prometheus server Ingress additional labels + ## + extraLabels: {} - ## Specify if a Pod Security Policy for node-exporter must be created - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ - ## - podSecurityPolicy: - annotations: {} - ## Specify pod annotations - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl + ## Prometheus server Ingress hostnames with optional path + ## Must be provided if Ingress is enabled + ## + hosts: [] + # - prometheus.domain.com + # - domain.com/prometheus + + ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. + extraPaths: [] + # - path: /* + # backend: + # serviceName: ssl-redirect + # servicePort: use-annotation + + ## Prometheus server Ingress TLS configuration + ## Secrets must be manually created in the namespace ## - # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' - # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' - # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + tls: [] + # - secretName: prometheus-server-tls + # hosts: + # - prometheus.domain.com + + ## Server Deployment Strategy type + # strategy: + # type: Recreate + + ## hostAliases allows adding entries to /etc/hosts inside the containers + hostAliases: [] + # - ip: "127.0.0.1" + # hostnames: + # - "example.com" + + ## Node tolerations for server scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + ## Node labels for Prometheus server pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} - ## Use a StatefulSet if replicaCount needs to be greater than 1 (see below) - ## - replicaCount: 1 + ## Pod affinity + ## + affinity: {} - ## Number of old history to retain to allow rollback - ## Default Kubernetes value is set to 10 - ## - revisionHistoryLimit: 10 + ## PodDisruptionBudget settings + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + ## + podDisruptionBudget: + enabled: false + maxUnavailable: 1 - ## Annotations to be added to deployment - ## - deploymentAnnotations: {} + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + persistentVolume: + ## If true, Prometheus server will create/use a Persistent Volume Claim + ## If false, use emptyDir + ## + enabled: true + + ## Prometheus server data Persistent Volume access modes + ## Must match those of existing PV or dynamic provisioner + ## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + accessModes: + - ReadWriteOnce + + ## Prometheus server data Persistent Volume annotations + ## + annotations: {} + + ## Prometheus server data Persistent Volume existing claim name + ## Requires server.persistentVolume.enabled: true + ## If defined, PVC must be created manually before volume will be bound + existingClaim: "" + + ## Prometheus server data Persistent Volume mount root path + ## + mountPath: /data + + ## Prometheus server data Persistent Volume size + ## + size: 8Gi + + ## Prometheus server data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + + ## Prometheus server data Persistent Volume Binding Mode + ## If defined, volumeBindingMode: + ## If undefined (the default) or set to null, no volumeBindingMode spec is + ## set, choosing the default mode. + ## + # volumeBindingMode: "" + + ## Subdirectory of Prometheus server data Persistent Volume to mount + ## Useful if the volume's root directory is not empty + ## + subPath: "" - statefulSet: - ## If true, use a statefulset instead of a deployment for pod management. - ## This allows to scale replicas to more than 1 pod + emptyDir: + ## Prometheus server emptyDir volume size limit + ## + sizeLimit: "" + + ## Annotations to be added to Prometheus server pods ## - enabled: false + podAnnotations: {} + # iam.amazonaws.com/role: prometheus - annotations: {} - labels: {} - podManagementPolicy: OrderedReady + ## Labels to be added to Prometheus server pods + ## + podLabels: {} + + ## Prometheus AlertManager configuration + ## + alertmanagers: [] - ## Alertmanager headless service to use for the statefulset + ## Specify if a Pod Security Policy for node-exporter must be created + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ ## - headless: + podSecurityPolicy: + annotations: {} + ## Specify pod annotations + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl + ## + # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' + # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' + # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + + ## Use a StatefulSet if replicaCount needs to be greater than 1 (see below) + ## + replicaCount: 1 + + ## Annotations to be added to deployment + ## + deploymentAnnotations: {} + + statefulSet: + ## If true, use a statefulset instead of a deployment for pod management. + ## This allows to scale replicas to more than 1 pod + ## + enabled: false + annotations: {} labels: {} + podManagementPolicy: OrderedReady + + ## Alertmanager headless service to use for the statefulset + ## + headless: + annotations: {} + labels: {} + servicePort: 80 + ## Enable gRPC port on service to allow auto discovery with thanos-querier + gRPC: + enabled: false + servicePort: 10901 + # nodePort: 10901 + + ## Prometheus server readiness and liveness probe initial delay and timeout + ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ + ## + readinessProbeInitialDelay: 30 + readinessProbePeriodSeconds: 5 + readinessProbeTimeout: 4 + readinessProbeFailureThreshold: 3 + readinessProbeSuccessThreshold: 1 + livenessProbeInitialDelay: 30 + livenessProbePeriodSeconds: 15 + livenessProbeTimeout: 10 + livenessProbeFailureThreshold: 3 + livenessProbeSuccessThreshold: 1 + + ## Prometheus server resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + # limits: + # cpu: 500m + # memory: 512Mi + # requests: + # cpu: 500m + # memory: 512Mi + + # Required for use in managed kubernetes clusters (such as AWS EKS) with custom CNI (such as calico), + # because control-plane managed by AWS cannot communicate with pods' IP CIDR and admission webhooks are not working + ## + hostNetwork: false + + # When hostNetwork is enabled, you probably want to set this to ClusterFirstWithHostNet + dnsPolicy: ClusterFirst + + ## Vertical Pod Autoscaler config + ## Ref: https://github.com/kubernetes/autoscaler/tree/master/vertical-pod-autoscaler + verticalAutoscaler: + ## If true a VPA object will be created for the controller (either StatefulSet or Deployemnt, based on above configs) + enabled: false + # updateMode: "Auto" + # containerPolicies: + # - containerName: 'prometheus-server' + + # Custom DNS configuration to be added to prometheus server pods + dnsConfig: {} + # nameservers: + # - 1.2.3.4 + # searches: + # - ns1.svc.cluster-domain.example + # - my.dns.search.suffix + # options: + # - name: ndots + # value: "2" + # - name: edns0 + ## Security context to be added to server pods + ## + securityContext: + runAsUser: 65534 + runAsNonRoot: true + runAsGroup: 65534 + fsGroup: 65534 + + service: + annotations: {} + labels: {} + clusterIP: "" + + ## List of IP addresses at which the Prometheus server service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + loadBalancerIP: "" + loadBalancerSourceRanges: [] servicePort: 80 + sessionAffinity: None + type: ClusterIP + ## Enable gRPC port on service to allow auto discovery with thanos-querier gRPC: enabled: false servicePort: 10901 # nodePort: 10901 - ## Statefulset's persistent volume claim retention policy - ## pvcDeleteOnStsDelete and pvcDeleteOnStsScale determine whether - ## statefulset's PVCs are deleted (true) or retained (false) on scaling down - ## and deleting statefulset, respectively. Requires 1.27.0+. - ## Ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention + ## If using a statefulSet (statefulSet.enabled=true), configure the + ## service to connect to a specific replica to have a consistent view + ## of the data. + statefulsetReplica: + enabled: false + replica: 0 + + ## Prometheus server pod termination grace period ## - pvcDeleteOnStsDelete: false - pvcDeleteOnStsScale: false + terminationGracePeriodSeconds: 300 - ## Prometheus server readiness and liveness probe initial delay and timeout - ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ - ## - tcpSocketProbeEnabled: false - probeScheme: HTTP - readinessProbeInitialDelay: 30 - readinessProbePeriodSeconds: 5 - readinessProbeTimeout: 4 - readinessProbeFailureThreshold: 3 - readinessProbeSuccessThreshold: 1 - livenessProbeInitialDelay: 30 - livenessProbePeriodSeconds: 15 - livenessProbeTimeout: 10 - livenessProbeFailureThreshold: 3 - livenessProbeSuccessThreshold: 1 - startupProbe: - enabled: false - periodSeconds: 5 - failureThreshold: 30 - timeoutSeconds: 10 + ## Prometheus data retention period (default if not specified is 15 days) + ## + retention: "15d" - ## Prometheus server resource requests and limits - ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ - ## - resources: {} - # limits: - # cpu: 500m - # memory: 512Mi - # requests: - # cpu: 500m - # memory: 512Mi - - # Required for use in managed kubernetes clusters (such as AWS EKS) with custom CNI (such as calico), - # because control-plane managed by AWS cannot communicate with pods' IP CIDR and admission webhooks are not working - ## - hostNetwork: false + pushgateway: + ## If false, pushgateway will not be installed + ## + enabled: true - # When hostNetwork is enabled, this will set to ClusterFirstWithHostNet automatically - dnsPolicy: ClusterFirst + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: - # Use hostPort - # hostPort: 9090 + ## pushgateway container name + ## + name: pushgateway - # Use portName - portName: "" + ## pushgateway container image + ## + image: + repository: prom/pushgateway + tag: v1.3.1 + pullPolicy: IfNotPresent - ## Vertical Pod Autoscaler config - ## Ref: https://github.com/kubernetes/autoscaler/tree/master/vertical-pod-autoscaler - verticalAutoscaler: - ## If true a VPA object will be created for the controller (either StatefulSet or Deployemnt, based on above configs) - enabled: false - # updateMode: "Auto" - # containerPolicies: - # - containerName: 'prometheus-server' - - # Custom DNS configuration to be added to prometheus server pods - dnsConfig: {} - # nameservers: - # - 1.2.3.4 - # searches: - # - ns1.svc.cluster-domain.example - # - my.dns.search.suffix - # options: - # - name: ndots - # value: "2" - # - name: edns0 - - ## Security context to be added to server pods - ## - securityContext: - runAsUser: 65534 - runAsNonRoot: true - runAsGroup: 65534 - fsGroup: 65534 + ## pushgateway priorityClassName + ## + priorityClassName: "" - ## Security context to be added to server container - ## - containerSecurityContext: {} + ## Additional pushgateway container arguments + ## + ## for example: persistence.file: /data/pushgateway.data + extraArgs: {} - service: - ## If false, no Service will be created for the Prometheus server + ## Additional InitContainers to initialize the pod ## - enabled: true + extraInitContainers: [] + + ingress: + ## If true, pushgateway Ingress will be created + ## + enabled: false + + ## pushgateway Ingress annotations + ## + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: 'true' + + ## pushgateway Ingress hostnames with optional path + ## Must be provided if Ingress is enabled + ## + hosts: [] + # - pushgateway.domain.com + # - domain.com/pushgateway + + ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. + extraPaths: [] + # - path: /* + # backend: + # serviceName: ssl-redirect + # servicePort: use-annotation + + ## pushgateway Ingress TLS configuration + ## Secrets must be manually created in the namespace + ## + tls: [] + # - secretName: prometheus-alerts-tls + # hosts: + # - pushgateway.domain.com - annotations: {} - labels: {} - clusterIP: "" + ## Node tolerations for pushgateway scheduling to nodes with taints + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ + ## + tolerations: [] + # - key: "key" + # operator: "Equal|Exists" + # value: "value" + # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" + + ## Node labels for pushgateway pod assignment + ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} - ## List of IP addresses at which the Prometheus server service is available - ## Ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips + ## Annotations to be added to pushgateway pods ## - externalIPs: [] + podAnnotations: {} - loadBalancerIP: "" - loadBalancerSourceRanges: [] - servicePort: 80 - sessionAffinity: None - type: ClusterIP + ## Labels to be added to pushgateway pods + ## + podLabels: {} - ## Enable gRPC port on service to allow auto discovery with thanos-querier - gRPC: + ## Specify if a Pod Security Policy for node-exporter must be created + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ + ## + podSecurityPolicy: + annotations: {} + ## Specify pod annotations + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp + ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl + ## + # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' + # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' + # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + + replicaCount: 1 + + ## Annotations to be added to deployment + ## + deploymentAnnotations: {} + + ## PodDisruptionBudget settings + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ + ## + podDisruptionBudget: enabled: false - servicePort: 10901 - # nodePort: 10901 + maxUnavailable: 1 + + ## pushgateway resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + # limits: + # cpu: 10m + # memory: 32Mi + # requests: + # cpu: 10m + # memory: 32Mi + + # Custom DNS configuration to be added to push-gateway pods + dnsConfig: {} + # nameservers: + # - 1.2.3.4 + # searches: + # - ns1.svc.cluster-domain.example + # - my.dns.search.suffix + # options: + # - name: ndots + # value: "2" + # - name: edns0 + + ## Security context to be added to push-gateway pods + ## + securityContext: + runAsUser: 65534 + runAsNonRoot: true + + service: + annotations: + prometheus.io/probe: pushgateway + labels: {} + clusterIP: "" + + ## List of IP addresses at which the pushgateway service is available + ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips + ## + externalIPs: [] + + loadBalancerIP: "" + loadBalancerSourceRanges: [] + servicePort: 9091 + type: ClusterIP + + ## pushgateway Deployment Strategy type + # strategy: + # type: Recreate - ## If using a statefulSet (statefulSet.enabled=true), configure the - ## service to connect to a specific replica to have a consistent view - ## of the data. - statefulsetReplica: + persistentVolume: + ## If true, pushgateway will create/use a Persistent Volume Claim + ## enabled: false - replica: 0 - ## Additional port to define in the Service - additionalPorts: [] - # additionalPorts: - # - name: authenticated - # port: 8081 - # targetPort: 8081 + ## pushgateway data Persistent Volume access modes + ## Must match those of existing PV or dynamic provisioner + ## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + accessModes: + - ReadWriteOnce - ## Prometheus server pod termination grace period - ## - terminationGracePeriodSeconds: 300 + ## pushgateway data Persistent Volume Claim annotations + ## + annotations: {} - ## Prometheus data retention period (default if not specified is 15 days) - ## - retention: "15d" + ## pushgateway data Persistent Volume existing claim name + ## Requires pushgateway.persistentVolume.enabled: true + ## If defined, PVC must be created manually before volume will be bound + existingClaim: "" - ## Prometheus' data retention size. Supported units: B, KB, MB, GB, TB, PB, EB. - ## - retentionSize: "" - -## Prometheus server ConfigMap entries for rule files (allow prometheus labels interpolation) -ruleFiles: {} - -## Prometheus server ConfigMap entries for scrape_config_files -## (allows scrape configs defined in additional files) -## -scrapeConfigFiles: [] - -## Prometheus server ConfigMap entries -## -serverFiles: - ## Alerts configuration - ## Ref: https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/ - alerting_rules.yml: {} - # groups: - # - name: Instances - # rules: - # - alert: InstanceDown - # expr: up == 0 - # for: 5m - # labels: - # severity: page - # annotations: - # description: '{{ $labels.instance }} of job {{ $labels.job }} has been down for more than 5 minutes.' - # summary: 'Instance {{ $labels.instance }} down' - ## DEPRECATED DEFAULT VALUE, unless explicitly naming your files, please use alerting_rules.yml - alerts: {} - - ## Records configuration - ## Ref: https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/ - recording_rules.yml: {} - ## DEPRECATED DEFAULT VALUE, unless explicitly naming your files, please use recording_rules.yml - rules: {} - - prometheus.yml: - rule_files: - - /etc/config/recording_rules.yml - - /etc/config/alerting_rules.yml - ## Below two files are DEPRECATED will be removed from this default values file - - /etc/config/rules - - /etc/config/alerts - - scrape_configs: - - job_name: prometheus - static_configs: - - targets: - - localhost:9090 - - # A scrape configuration for running Prometheus on a Kubernetes cluster. - # This uses separate scrape configs for cluster components (i.e. API server, node) - # and services to allow each to use different authentication configs. - # - # Kubernetes labels will be added as Prometheus labels on metrics via the - # `labelmap` relabeling action. - - # Scrape config for API servers. - # - # Kubernetes exposes API servers as endpoints to the default/kubernetes - # service so this uses `endpoints` role and uses relabelling to only keep - # the endpoints associated with the default/kubernetes service using the - # default named port `https`. This works for single API server deployments as - # well as HA API server deployments. - - job_name: 'kubernetes-apiservers' - - kubernetes_sd_configs: - - role: endpoints - - # Default to scraping over https. If required, just disable this or change to - # `http`. - scheme: https - - # This TLS & bearer token file config is used to connect to the actual scrape - # endpoints for cluster components. This is separate to discovery auth - # configuration because discovery & scraping are two separate concerns in - # Prometheus. The discovery auth config is automatic if Prometheus runs inside - # the cluster. Otherwise, more config options have to be provided within the - # . - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - # If your node certificates are self-signed or use a different CA to the - # master CA, then disable certificate verification below. Note that - # certificate verification is an integral part of a secure infrastructure - # so this should only be disabled in a controlled environment. You can - # disable certificate verification by uncommenting the line below. - # - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - # Keep only the default/kubernetes service endpoints for the https port. This - # will add targets for each API server which Kubernetes adds an endpoint to - # the default/kubernetes service. - relabel_configs: - - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name] - action: keep - regex: default;kubernetes;https - - - job_name: 'kubernetes-nodes' - - # Default to scraping over https. If required, just disable this or change to - # `http`. - scheme: https - - # This TLS & bearer token file config is used to connect to the actual scrape - # endpoints for cluster components. This is separate to discovery auth - # configuration because discovery & scraping are two separate concerns in - # Prometheus. The discovery auth config is automatic if Prometheus runs inside - # the cluster. Otherwise, more config options have to be provided within the - # . - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - # If your node certificates are self-signed or use a different CA to the - # master CA, then disable certificate verification below. Note that - # certificate verification is an integral part of a secure infrastructure - # so this should only be disabled in a controlled environment. You can - # disable certificate verification by uncommenting the line below. - # - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics - - - - job_name: 'kubernetes-nodes-cadvisor' - - # Default to scraping over https. If required, just disable this or change to - # `http`. - scheme: https - - # This TLS & bearer token file config is used to connect to the actual scrape - # endpoints for cluster components. This is separate to discovery auth - # configuration because discovery & scraping are two separate concerns in - # Prometheus. The discovery auth config is automatic if Prometheus runs inside - # the cluster. Otherwise, more config options have to be provided within the - # . - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - # If your node certificates are self-signed or use a different CA to the - # master CA, then disable certificate verification below. Note that - # certificate verification is an integral part of a secure infrastructure - # so this should only be disabled in a controlled environment. You can - # disable certificate verification by uncommenting the line below. - # - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - - # This configuration will work only on kubelet 1.7.3+ - # As the scrape endpoints for cAdvisor have changed - # if you are using older version you need to change the replacement to - # replacement: /api/v1/nodes/$1:4194/proxy/metrics - # more info here https://github.com/coreos/prometheus-operator/issues/633 - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - - # Metric relabel configs to apply to samples before ingestion. - # [Metric Relabeling](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#metric_relabel_configs) - # metric_relabel_configs: - # - action: labeldrop - # regex: (kubernetes_io_hostname|failure_domain_beta_kubernetes_io_region|beta_kubernetes_io_os|beta_kubernetes_io_arch|beta_kubernetes_io_instance_type|failure_domain_beta_kubernetes_io_zone) - - # Scrape config for service endpoints. - # - # The relabeling allows the actual service scrape endpoint to be configured - # via the following annotations: - # - # * `prometheus.io/scrape`: Only scrape services that have a value of - # `true`, except if `prometheus.io/scrape-slow` is set to `true` as well. - # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need - # to set this to `https` & most likely set the `tls_config` of the scrape config. - # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. - # * `prometheus.io/port`: If the metrics are exposed on a different port to the - # service then set this appropriately. - # * `prometheus.io/param_`: If the metrics endpoint uses parameters - # then you can set any parameter - - job_name: 'kubernetes-service-endpoints' - honor_labels: true - - kubernetes_sd_configs: - - role: endpoints - - relabel_configs: - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape] - action: keep - regex: true - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape_slow] - action: drop - regex: true - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme] - action: replace - target_label: __scheme__ - regex: (https?) - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path] - action: replace - target_label: __metrics_path__ - regex: (.+) - - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port] - action: replace - target_label: __address__ - regex: (.+?)(?::\d+)?;(\d+) - replacement: $1:$2 - - action: labelmap - regex: __meta_kubernetes_service_annotation_prometheus_io_param_(.+) - replacement: __param_$1 - - action: labelmap - regex: __meta_kubernetes_service_label_(.+) - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_service_name] - action: replace - target_label: service - - source_labels: [__meta_kubernetes_pod_node_name] - action: replace - target_label: node - - # Scrape config for slow service endpoints; same as above, but with a larger - # timeout and a larger interval - # - # The relabeling allows the actual service scrape endpoint to be configured - # via the following annotations: - # - # * `prometheus.io/scrape-slow`: Only scrape services that have a value of `true` - # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need - # to set this to `https` & most likely set the `tls_config` of the scrape config. - # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. - # * `prometheus.io/port`: If the metrics are exposed on a different port to the - # service then set this appropriately. - # * `prometheus.io/param_`: If the metrics endpoint uses parameters - # then you can set any parameter - - job_name: 'kubernetes-service-endpoints-slow' - honor_labels: true - - scrape_interval: 5m - scrape_timeout: 30s - - kubernetes_sd_configs: - - role: endpoints - - relabel_configs: - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape_slow] - action: keep - regex: true - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme] - action: replace - target_label: __scheme__ - regex: (https?) - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path] - action: replace - target_label: __metrics_path__ - regex: (.+) - - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port] - action: replace - target_label: __address__ - regex: (.+?)(?::\d+)?;(\d+) - replacement: $1:$2 - - action: labelmap - regex: __meta_kubernetes_service_annotation_prometheus_io_param_(.+) - replacement: __param_$1 - - action: labelmap - regex: __meta_kubernetes_service_label_(.+) - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_service_name] - action: replace - target_label: service - - source_labels: [__meta_kubernetes_pod_node_name] - action: replace - target_label: node - - - job_name: 'prometheus-pushgateway' - honor_labels: true - - kubernetes_sd_configs: - - role: service - - relabel_configs: - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe] - action: keep - regex: pushgateway - - # Example scrape config for probing services via the Blackbox Exporter. - # - # The relabeling allows the actual service scrape endpoint to be configured - # via the following annotations: - # - # * `prometheus.io/probe`: Only probe services that have a value of `true` - - job_name: 'kubernetes-services' - honor_labels: true - - metrics_path: /probe - params: - module: [http_2xx] - - kubernetes_sd_configs: - - role: service - - relabel_configs: - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe] - action: keep - regex: true - - source_labels: [__address__] - target_label: __param_target - - target_label: __address__ - replacement: blackbox - - source_labels: [__param_target] - target_label: instance - - action: labelmap - regex: __meta_kubernetes_service_label_(.+) - - source_labels: [__meta_kubernetes_namespace] - target_label: namespace - - source_labels: [__meta_kubernetes_service_name] - target_label: service - - # Example scrape config for pods - # - # The relabeling allows the actual pod scrape endpoint to be configured via the - # following annotations: - # - # * `prometheus.io/scrape`: Only scrape pods that have a value of `true`, - # except if `prometheus.io/scrape-slow` is set to `true` as well. - # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need - # to set this to `https` & most likely set the `tls_config` of the scrape config. - # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. - # * `prometheus.io/port`: Scrape the pod on the indicated port instead of the default of `9102`. - - job_name: 'kubernetes-pods' - honor_labels: true - - kubernetes_sd_configs: - - role: pod - - relabel_configs: - - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape] - action: keep - regex: true - - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape_slow] - action: drop - regex: true - - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scheme] - action: replace - regex: (https?) - target_label: __scheme__ - - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path] - action: replace - target_label: __metrics_path__ - regex: (.+) - - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_port, __meta_kubernetes_pod_ip] - action: replace - regex: (\d+);(([A-Fa-f0-9]{1,4}::?){1,7}[A-Fa-f0-9]{1,4}) - replacement: '[$2]:$1' - target_label: __address__ - - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_port, __meta_kubernetes_pod_ip] - action: replace - regex: (\d+);((([0-9]+?)(\.|$)){4}) - replacement: $2:$1 - target_label: __address__ - - action: labelmap - regex: __meta_kubernetes_pod_annotation_prometheus_io_param_(.+) - replacement: __param_$1 - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - - source_labels: [__meta_kubernetes_pod_phase] - regex: Pending|Succeeded|Failed|Completed - action: drop - - source_labels: [__meta_kubernetes_pod_node_name] - action: replace - target_label: node - - # Example Scrape config for pods which should be scraped slower. An useful example - # would be stackriver-exporter which queries an API on every scrape of the pod - # - # The relabeling allows the actual pod scrape endpoint to be configured via the - # following annotations: - # - # * `prometheus.io/scrape-slow`: Only scrape pods that have a value of `true` - # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need - # to set this to `https` & most likely set the `tls_config` of the scrape config. - # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. - # * `prometheus.io/port`: Scrape the pod on the indicated port instead of the default of `9102`. - - job_name: 'kubernetes-pods-slow' - honor_labels: true - - scrape_interval: 5m - scrape_timeout: 30s - - kubernetes_sd_configs: - - role: pod - - relabel_configs: - - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape_slow] - action: keep - regex: true - - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scheme] - action: replace - regex: (https?) - target_label: __scheme__ - - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path] - action: replace - target_label: __metrics_path__ - regex: (.+) - - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_port, __meta_kubernetes_pod_ip] - action: replace - regex: (\d+);(([A-Fa-f0-9]{1,4}::?){1,7}[A-Fa-f0-9]{1,4}) - replacement: '[$2]:$1' - target_label: __address__ - - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_port, __meta_kubernetes_pod_ip] - action: replace - regex: (\d+);((([0-9]+?)(\.|$)){4}) - replacement: $2:$1 - target_label: __address__ - - action: labelmap - regex: __meta_kubernetes_pod_annotation_prometheus_io_param_(.+) - replacement: __param_$1 - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: pod - - source_labels: [__meta_kubernetes_pod_phase] - regex: Pending|Succeeded|Failed|Completed - action: drop - - source_labels: [__meta_kubernetes_pod_node_name] - action: replace - target_label: node - -# adds additional scrape configs to prometheus.yml -# must be a string so you have to add a | after extraScrapeConfigs: -# example adds prometheus-blackbox-exporter scrape config -extraScrapeConfigs: "" - # - job_name: 'prometheus-blackbox-exporter' - # metrics_path: /probe - # params: - # module: [http_2xx] - # static_configs: - # - targets: - # - https://example.com - # relabel_configs: - # - source_labels: [__address__] - # target_label: __param_target - # - source_labels: [__param_target] - # target_label: instance - # - target_label: __address__ - # replacement: prometheus-blackbox-exporter:9115 - -# Adds option to add alert_relabel_configs to avoid duplicate alerts in alertmanager -# useful in H/A prometheus with different external labels but the same alerts -alertRelabelConfigs: {} - # alert_relabel_configs: - # - source_labels: [dc] - # regex: (.+)\d+ - # target_label: dc - -networkPolicy: - ## Enable creation of NetworkPolicy resources. - ## - enabled: false - -# Force namespace of namespaced resources -forceNamespace: "" - -# Extra manifests to deploy as an array -extraManifests: [] - # - | - # apiVersion: v1 - # kind: ConfigMap - # metadata: - # labels: - # name: prometheus-extra - # data: - # extra-data: "value" - -# Configuration of subcharts defined in Chart.yaml - -## alertmanager sub-chart configurable values -## Please see https://github.com/prometheus-community/helm-charts/tree/main/charts/alertmanager -## -alertmanager: - ## If false, alertmanager will not be installed - ## - enabled: true - - persistence: - size: 2Gi - - podSecurityContext: - runAsUser: 65534 - runAsNonRoot: true - runAsGroup: 65534 - fsGroup: 65534 - -## kube-state-metrics sub-chart configurable values -## Please see https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-state-metrics -## -kube-state-metrics: - ## If false, kube-state-metrics sub-chart will not be installed - ## - enabled: true + ## pushgateway data Persistent Volume mount root path + ## + mountPath: /data -## prometheus-node-exporter sub-chart configurable values -## Please see https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-node-exporter -## -prometheus-node-exporter: - ## If false, node-exporter will not be installed - ## - enabled: true + ## pushgateway data Persistent Volume size + ## + size: 2Gi + + ## pushgateway data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" - rbac: - pspEnabled: false + ## pushgateway data Persistent Volume Binding Mode + ## If defined, volumeBindingMode: + ## If undefined (the default) or set to null, no volumeBindingMode spec is + ## set, choosing the default mode. + ## + # volumeBindingMode: "" + + ## Subdirectory of pushgateway data Persistent Volume to mount + ## Useful if the volume's root directory is not empty + ## + subPath: "" - containerSecurityContext: - allowPrivilegeEscalation: false -## prometheus-pushgateway sub-chart configurable values -## Please see https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-pushgateway -## -prometheus-pushgateway: - ## If false, pushgateway will not be installed + ## alertmanager ConfigMap entries + ## + alertmanagerFiles: + alertmanager.yml: + global: {} + # slack_api_url: '' + + receivers: + - name: default-receiver + # slack_configs: + # - channel: '@you' + # send_resolved: true + + route: + group_wait: 10s + group_interval: 5m + receiver: default-receiver + repeat_interval: 3h + + ## Prometheus server ConfigMap entries ## - enabled: true + serverFiles: + + ## Alerts configuration + ## Ref: https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/ + alerting_rules.yml: {} + # groups: + # - name: Instances + # rules: + # - alert: InstanceDown + # expr: up == 0 + # for: 5m + # labels: + # severity: page + # annotations: + # description: '{{ $labels.instance }} of job {{ $labels.job }} has been down for more than 5 minutes.' + # summary: 'Instance {{ $labels.instance }} down' + ## DEPRECATED DEFAULT VALUE, unless explicitly naming your files, please use alerting_rules.yml + alerts: {} + + ## Records configuration + ## Ref: https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/ + recording_rules.yml: {} + ## DEPRECATED DEFAULT VALUE, unless explicitly naming your files, please use recording_rules.yml + rules: {} + + prometheus.yml: + rule_files: + - /etc/config/recording_rules.yml + - /etc/config/alerting_rules.yml + ## Below two files are DEPRECATED will be removed from this default values file + - /etc/config/rules + - /etc/config/alerts + + scrape_configs: + - job_name: prometheus + static_configs: + - targets: + - localhost:9090 + + # A scrape configuration for running Prometheus on a Kubernetes cluster. + # This uses separate scrape configs for cluster components (i.e. API server, node) + # and services to allow each to use different authentication configs. + # + # Kubernetes labels will be added as Prometheus labels on metrics via the + # `labelmap` relabeling action. + + # Scrape config for API servers. + # + # Kubernetes exposes API servers as endpoints to the default/kubernetes + # service so this uses `endpoints` role and uses relabelling to only keep + # the endpoints associated with the default/kubernetes service using the + # default named port `https`. This works for single API server deployments as + # well as HA API server deployments. + - job_name: 'kubernetes-apiservers' + + kubernetes_sd_configs: + - role: endpoints + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + # If your node certificates are self-signed or use a different CA to the + # master CA, then disable certificate verification below. Note that + # certificate verification is an integral part of a secure infrastructure + # so this should only be disabled in a controlled environment. You can + # disable certificate verification by uncommenting the line below. + # + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + # Keep only the default/kubernetes service endpoints for the https port. This + # will add targets for each API server which Kubernetes adds an endpoint to + # the default/kubernetes service. + relabel_configs: + - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name] + action: keep + regex: default;kubernetes;https + + - job_name: 'kubernetes-nodes' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + # If your node certificates are self-signed or use a different CA to the + # master CA, then disable certificate verification below. Note that + # certificate verification is an integral part of a secure infrastructure + # so this should only be disabled in a controlled environment. You can + # disable certificate verification by uncommenting the line below. + # + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics + + + - job_name: 'kubernetes-nodes-cadvisor' + + # Default to scraping over https. If required, just disable this or change to + # `http`. + scheme: https + + # This TLS & bearer token file config is used to connect to the actual scrape + # endpoints for cluster components. This is separate to discovery auth + # configuration because discovery & scraping are two separate concerns in + # Prometheus. The discovery auth config is automatic if Prometheus runs inside + # the cluster. Otherwise, more config options have to be provided within the + # . + tls_config: + ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt + # If your node certificates are self-signed or use a different CA to the + # master CA, then disable certificate verification below. Note that + # certificate verification is an integral part of a secure infrastructure + # so this should only be disabled in a controlled environment. You can + # disable certificate verification by uncommenting the line below. + # + insecure_skip_verify: true + bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token + + kubernetes_sd_configs: + - role: node + + # This configuration will work only on kubelet 1.7.3+ + # As the scrape endpoints for cAdvisor have changed + # if you are using older version you need to change the replacement to + # replacement: /api/v1/nodes/$1:4194/proxy/metrics + # more info here https://github.com/coreos/prometheus-operator/issues/633 + relabel_configs: + - action: labelmap + regex: __meta_kubernetes_node_label_(.+) + - target_label: __address__ + replacement: kubernetes.default.svc:443 + - source_labels: [__meta_kubernetes_node_name] + regex: (.+) + target_label: __metrics_path__ + replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor + + # Scrape config for service endpoints. + # + # The relabeling allows the actual service scrape endpoint to be configured + # via the following annotations: + # + # * `prometheus.io/scrape`: Only scrape services that have a value of `true` + # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need + # to set this to `https` & most likely set the `tls_config` of the scrape config. + # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. + # * `prometheus.io/port`: If the metrics are exposed on a different port to the + # service then set this appropriately. + - job_name: 'kubernetes-service-endpoints' + + kubernetes_sd_configs: + - role: endpoints + + relabel_configs: + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape] + action: keep + regex: true + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme] + action: replace + target_label: __scheme__ + regex: (https?) + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port] + action: replace + target_label: __address__ + regex: ([^:]+)(?::\d+)?;(\d+) + replacement: $1:$2 + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: kubernetes_namespace + - source_labels: [__meta_kubernetes_service_name] + action: replace + target_label: kubernetes_name + - source_labels: [__meta_kubernetes_pod_node_name] + action: replace + target_label: kubernetes_node + + # Scrape config for slow service endpoints; same as above, but with a larger + # timeout and a larger interval + # + # The relabeling allows the actual service scrape endpoint to be configured + # via the following annotations: + # + # * `prometheus.io/scrape-slow`: Only scrape services that have a value of `true` + # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need + # to set this to `https` & most likely set the `tls_config` of the scrape config. + # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. + # * `prometheus.io/port`: If the metrics are exposed on a different port to the + # service then set this appropriately. + - job_name: 'kubernetes-service-endpoints-slow' + + scrape_interval: 5m + scrape_timeout: 30s + + kubernetes_sd_configs: + - role: endpoints + + relabel_configs: + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape_slow] + action: keep + regex: true + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme] + action: replace + target_label: __scheme__ + regex: (https?) + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port] + action: replace + target_label: __address__ + regex: ([^:]+)(?::\d+)?;(\d+) + replacement: $1:$2 + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: kubernetes_namespace + - source_labels: [__meta_kubernetes_service_name] + action: replace + target_label: kubernetes_name + - source_labels: [__meta_kubernetes_pod_node_name] + action: replace + target_label: kubernetes_node + + - job_name: 'prometheus-pushgateway' + honor_labels: true + + kubernetes_sd_configs: + - role: service + + relabel_configs: + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe] + action: keep + regex: pushgateway + + # Example scrape config for probing services via the Blackbox Exporter. + # + # The relabeling allows the actual service scrape endpoint to be configured + # via the following annotations: + # + # * `prometheus.io/probe`: Only probe services that have a value of `true` + - job_name: 'kubernetes-services' + + metrics_path: /probe + params: + module: [http_2xx] + + kubernetes_sd_configs: + - role: service + + relabel_configs: + - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe] + action: keep + regex: true + - source_labels: [__address__] + target_label: __param_target + - target_label: __address__ + replacement: blackbox + - source_labels: [__param_target] + target_label: instance + - action: labelmap + regex: __meta_kubernetes_service_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + target_label: kubernetes_namespace + - source_labels: [__meta_kubernetes_service_name] + target_label: kubernetes_name + + # Example scrape config for pods + # + # The relabeling allows the actual pod scrape endpoint to be configured via the + # following annotations: + # + # * `prometheus.io/scrape`: Only scrape pods that have a value of `true` + # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need + # to set this to `https` & most likely set the `tls_config` of the scrape config. + # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. + # * `prometheus.io/port`: Scrape the pod on the indicated port instead of the default of `9102`. + - job_name: 'kubernetes-pods' + + kubernetes_sd_configs: + - role: pod + + relabel_configs: + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape] + action: keep + regex: true + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scheme] + action: replace + regex: (https?) + target_label: __scheme__ + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port] + action: replace + regex: ([^:]+)(?::\d+)?;(\d+) + replacement: $1:$2 + target_label: __address__ + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: kubernetes_namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: kubernetes_pod_name + - source_labels: [__meta_kubernetes_pod_phase] + regex: Pending|Succeeded|Failed + action: drop + + # Example Scrape config for pods which should be scraped slower. An useful example + # would be stackriver-exporter which queries an API on every scrape of the pod + # + # The relabeling allows the actual pod scrape endpoint to be configured via the + # following annotations: + # + # * `prometheus.io/scrape-slow`: Only scrape pods that have a value of `true` + # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need + # to set this to `https` & most likely set the `tls_config` of the scrape config. + # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. + # * `prometheus.io/port`: Scrape the pod on the indicated port instead of the default of `9102`. + - job_name: 'kubernetes-pods-slow' + + scrape_interval: 5m + scrape_timeout: 30s + + kubernetes_sd_configs: + - role: pod + + relabel_configs: + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape_slow] + action: keep + regex: true + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scheme] + action: replace + regex: (https?) + target_label: __scheme__ + - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path] + action: replace + target_label: __metrics_path__ + regex: (.+) + - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port] + action: replace + regex: ([^:]+)(?::\d+)?;(\d+) + replacement: $1:$2 + target_label: __address__ + - action: labelmap + regex: __meta_kubernetes_pod_label_(.+) + - source_labels: [__meta_kubernetes_namespace] + action: replace + target_label: kubernetes_namespace + - source_labels: [__meta_kubernetes_pod_name] + action: replace + target_label: kubernetes_pod_name + - source_labels: [__meta_kubernetes_pod_phase] + regex: Pending|Succeeded|Failed + action: drop + + # adds additional scrape configs to prometheus.yml + # must be a string so you have to add a | after extraScrapeConfigs: + # example adds prometheus-blackbox-exporter scrape config + extraScrapeConfigs: + # - job_name: 'prometheus-blackbox-exporter' + # metrics_path: /probe + # params: + # module: [http_2xx] + # static_configs: + # - targets: + # - https://example.com + # relabel_configs: + # - source_labels: [__address__] + # target_label: __param_target + # - source_labels: [__param_target] + # target_label: instance + # - target_label: __address__ + # replacement: prometheus-blackbox-exporter:9115 + + # Adds option to add alert_relabel_configs to avoid duplicate alerts in alertmanager + # useful in H/A prometheus with different external labels but the same alerts + alertRelabelConfigs: + # alert_relabel_configs: + # - source_labels: [dc] + # regex: (.+)\d+ + # target_label: dc + + networkPolicy: + ## Enable creation of NetworkPolicy resources. + ## + enabled: false - # Optional service annotations - serviceAnnotations: - prometheus.io/probe: pushgateway + # Force namespace of namespaced resources + forceNamespace: null \ No newline at end of file From 85741bef26b7e98400d50e4a95c41117e978506d Mon Sep 17 00:00:00 2001 From: karamveer28 Date: Thu, 7 Dec 2023 22:09:32 +0530 Subject: [PATCH 19/99] Added grafana chart for test --- .../helm/{prometheus => grafana}/.helmignore | 2 +- operations/deployment/helm/grafana/Chart.yaml | 33 + operations/deployment/helm/grafana/README.md | 757 ++++++++ .../helm/grafana/ci/default-values.yaml | 1 + .../helm/grafana/ci/with-affinity-values.yaml | 16 + .../ci/with-dashboard-json-values.yaml | 53 + .../grafana/ci/with-dashboard-values.yaml | 19 + .../ci/with-extraconfigmapmounts-values.yaml | 7 + .../ci/with-image-renderer-values.yaml | 19 + .../helm/grafana/ci/with-persistence.yaml | 3 + .../grafana/dashboards/custom-dashboard.json | 1 + .../helm/grafana/templates/NOTES.txt | 55 + .../helm/grafana/templates/_helpers.tpl | 227 +++ .../helm/grafana/templates/_pod.tpl | 1276 +++++++++++++ .../helm/grafana/templates/clusterrole.yaml | 25 + .../grafana/templates/clusterrolebinding.yaml | 24 + .../helm/grafana/templates/configSecret.yaml | 43 + .../configmap-dashboard-provider.yaml | 29 + .../helm/grafana/templates/configmap.yaml | 144 ++ .../templates/dashboards-json-configmap.yaml | 38 + .../helm/grafana/templates/deployment.yaml | 51 + .../grafana/templates/extra-manifests.yaml | 4 + .../grafana/templates/headless-service.yaml | 22 + .../helm/grafana/templates/hpa.yaml | 52 + .../templates/image-renderer-deployment.yaml | 131 ++ .../grafana/templates/image-renderer-hpa.yaml | 47 + .../image-renderer-network-policy.yaml | 79 + .../templates/image-renderer-service.yaml | 31 + .../image-renderer-servicemonitor.yaml | 48 + .../helm/grafana/templates/ingress.yaml | 78 + .../helm/grafana/templates/networkpolicy.yaml | 61 + .../templates/poddisruptionbudget.yaml | 22 + .../grafana/templates/podsecuritypolicy.yaml | 49 + .../helm/grafana/templates/pvc.yaml | 36 + .../helm/grafana/templates/role.yaml | 32 + .../helm/grafana/templates/rolebinding.yaml | 25 + .../helm/grafana/templates/secret-env.yaml | 14 + .../helm/grafana/templates/secret.yaml | 26 + .../helm/grafana/templates/service.yaml | 58 + .../grafana/templates/serviceaccount.yaml | 17 + .../grafana/templates/servicemonitor.yaml | 52 + .../helm/grafana/templates/statefulset.yaml | 56 + .../templates/tests/test-configmap.yaml | 20 + .../tests/test-podsecuritypolicy.yaml | 32 + .../grafana/templates/tests/test-role.yaml | 17 + .../templates/tests/test-rolebinding.yaml | 20 + .../templates/tests/test-serviceaccount.yaml | 12 + .../helm/grafana/templates/tests/test.yaml | 49 + .../deployment/helm/grafana/values.yaml | 1282 +++++++++++++ .../deployment/helm/prometheus/Chart.lock | 15 - .../deployment/helm/prometheus/Chart.yaml | 9 - .../deployment/helm/prometheus/ENV_FILE | 2 - .../deployment/helm/prometheus/README.md | 382 ---- .../helm/prometheus/bitops.config.yaml | 14 - .../helm/prometheus/templates/NOTES.txt | 113 -- .../helm/prometheus/templates/_helpers.tpl | 234 --- .../prometheus/templates/clusterrole.yaml | 56 - .../templates/clusterrolebinding.yaml | 16 - .../helm/prometheus/templates/cm.yaml | 99 - .../helm/prometheus/templates/deploy.yaml | 363 ---- .../prometheus/templates/extra-manifests.yaml | 4 - .../prometheus/templates/headless-svc.yaml | 35 - .../helm/prometheus/templates/ingress.yaml | 57 - .../prometheus/templates/network-policy.yaml | 16 - .../helm/prometheus/templates/pdb.yaml | 15 - .../helm/prometheus/templates/psp.yaml | 53 - .../helm/prometheus/templates/pvc.yaml | 43 - .../prometheus/templates/rolebinding.yaml | 18 - .../helm/prometheus/templates/service.yaml | 63 - .../prometheus/templates/serviceaccount.yaml | 16 - .../helm/prometheus/templates/sts.yaml | 385 ---- .../helm/prometheus/templates/vpa.yaml | 26 - .../helm/prometheus/values.schema.json | 738 ------- .../deployment/helm/prometheus/values.yaml | 1691 ----------------- 74 files changed, 5194 insertions(+), 4464 deletions(-) rename operations/deployment/helm/{prometheus => grafana}/.helmignore (97%) create mode 100644 operations/deployment/helm/grafana/Chart.yaml create mode 100644 operations/deployment/helm/grafana/README.md create mode 100644 operations/deployment/helm/grafana/ci/default-values.yaml create mode 100644 operations/deployment/helm/grafana/ci/with-affinity-values.yaml create mode 100644 operations/deployment/helm/grafana/ci/with-dashboard-json-values.yaml create mode 100644 operations/deployment/helm/grafana/ci/with-dashboard-values.yaml create mode 100644 operations/deployment/helm/grafana/ci/with-extraconfigmapmounts-values.yaml create mode 100644 operations/deployment/helm/grafana/ci/with-image-renderer-values.yaml create mode 100644 operations/deployment/helm/grafana/ci/with-persistence.yaml create mode 100644 operations/deployment/helm/grafana/dashboards/custom-dashboard.json create mode 100644 operations/deployment/helm/grafana/templates/NOTES.txt create mode 100644 operations/deployment/helm/grafana/templates/_helpers.tpl create mode 100644 operations/deployment/helm/grafana/templates/_pod.tpl create mode 100644 operations/deployment/helm/grafana/templates/clusterrole.yaml create mode 100644 operations/deployment/helm/grafana/templates/clusterrolebinding.yaml create mode 100644 operations/deployment/helm/grafana/templates/configSecret.yaml create mode 100644 operations/deployment/helm/grafana/templates/configmap-dashboard-provider.yaml create mode 100644 operations/deployment/helm/grafana/templates/configmap.yaml create mode 100644 operations/deployment/helm/grafana/templates/dashboards-json-configmap.yaml create mode 100644 operations/deployment/helm/grafana/templates/deployment.yaml create mode 100644 operations/deployment/helm/grafana/templates/extra-manifests.yaml create mode 100644 operations/deployment/helm/grafana/templates/headless-service.yaml create mode 100644 operations/deployment/helm/grafana/templates/hpa.yaml create mode 100644 operations/deployment/helm/grafana/templates/image-renderer-deployment.yaml create mode 100644 operations/deployment/helm/grafana/templates/image-renderer-hpa.yaml create mode 100644 operations/deployment/helm/grafana/templates/image-renderer-network-policy.yaml create mode 100644 operations/deployment/helm/grafana/templates/image-renderer-service.yaml create mode 100644 operations/deployment/helm/grafana/templates/image-renderer-servicemonitor.yaml create mode 100644 operations/deployment/helm/grafana/templates/ingress.yaml create mode 100644 operations/deployment/helm/grafana/templates/networkpolicy.yaml create mode 100644 operations/deployment/helm/grafana/templates/poddisruptionbudget.yaml create mode 100644 operations/deployment/helm/grafana/templates/podsecuritypolicy.yaml create mode 100644 operations/deployment/helm/grafana/templates/pvc.yaml create mode 100644 operations/deployment/helm/grafana/templates/role.yaml create mode 100644 operations/deployment/helm/grafana/templates/rolebinding.yaml create mode 100644 operations/deployment/helm/grafana/templates/secret-env.yaml create mode 100644 operations/deployment/helm/grafana/templates/secret.yaml create mode 100644 operations/deployment/helm/grafana/templates/service.yaml create mode 100644 operations/deployment/helm/grafana/templates/serviceaccount.yaml create mode 100644 operations/deployment/helm/grafana/templates/servicemonitor.yaml create mode 100644 operations/deployment/helm/grafana/templates/statefulset.yaml create mode 100644 operations/deployment/helm/grafana/templates/tests/test-configmap.yaml create mode 100644 operations/deployment/helm/grafana/templates/tests/test-podsecuritypolicy.yaml create mode 100644 operations/deployment/helm/grafana/templates/tests/test-role.yaml create mode 100644 operations/deployment/helm/grafana/templates/tests/test-rolebinding.yaml create mode 100644 operations/deployment/helm/grafana/templates/tests/test-serviceaccount.yaml create mode 100644 operations/deployment/helm/grafana/templates/tests/test.yaml create mode 100644 operations/deployment/helm/grafana/values.yaml delete mode 100644 operations/deployment/helm/prometheus/Chart.lock delete mode 100644 operations/deployment/helm/prometheus/Chart.yaml delete mode 100644 operations/deployment/helm/prometheus/ENV_FILE delete mode 100644 operations/deployment/helm/prometheus/README.md delete mode 100644 operations/deployment/helm/prometheus/bitops.config.yaml delete mode 100644 operations/deployment/helm/prometheus/templates/NOTES.txt delete mode 100644 operations/deployment/helm/prometheus/templates/_helpers.tpl delete mode 100644 operations/deployment/helm/prometheus/templates/clusterrole.yaml delete mode 100644 operations/deployment/helm/prometheus/templates/clusterrolebinding.yaml delete mode 100644 operations/deployment/helm/prometheus/templates/cm.yaml delete mode 100644 operations/deployment/helm/prometheus/templates/deploy.yaml delete mode 100644 operations/deployment/helm/prometheus/templates/extra-manifests.yaml delete mode 100644 operations/deployment/helm/prometheus/templates/headless-svc.yaml delete mode 100644 operations/deployment/helm/prometheus/templates/ingress.yaml delete mode 100644 operations/deployment/helm/prometheus/templates/network-policy.yaml delete mode 100644 operations/deployment/helm/prometheus/templates/pdb.yaml delete mode 100644 operations/deployment/helm/prometheus/templates/psp.yaml delete mode 100644 operations/deployment/helm/prometheus/templates/pvc.yaml delete mode 100644 operations/deployment/helm/prometheus/templates/rolebinding.yaml delete mode 100644 operations/deployment/helm/prometheus/templates/service.yaml delete mode 100644 operations/deployment/helm/prometheus/templates/serviceaccount.yaml delete mode 100644 operations/deployment/helm/prometheus/templates/sts.yaml delete mode 100644 operations/deployment/helm/prometheus/templates/vpa.yaml delete mode 100644 operations/deployment/helm/prometheus/values.schema.json delete mode 100644 operations/deployment/helm/prometheus/values.yaml diff --git a/operations/deployment/helm/prometheus/.helmignore b/operations/deployment/helm/grafana/.helmignore similarity index 97% rename from operations/deployment/helm/prometheus/.helmignore rename to operations/deployment/helm/grafana/.helmignore index 825c007..8cade13 100644 --- a/operations/deployment/helm/prometheus/.helmignore +++ b/operations/deployment/helm/grafana/.helmignore @@ -16,8 +16,8 @@ *.tmp *~ # Various IDEs +.vscode .project .idea/ *.tmproj - OWNERS diff --git a/operations/deployment/helm/grafana/Chart.yaml b/operations/deployment/helm/grafana/Chart.yaml new file mode 100644 index 0000000..387f2ab --- /dev/null +++ b/operations/deployment/helm/grafana/Chart.yaml @@ -0,0 +1,33 @@ +apiVersion: v2 +name: grafana +version: 7.0.13 +appVersion: 10.2.2 +kubeVersion: "^1.8.0-0" +description: The leading tool for querying and visualizing time series and metrics. +home: https://grafana.com +icon: https://artifacthub.io/image/b4fed1a7-6c8f-4945-b99d-096efa3e4116 +sources: + - https://github.com/grafana/grafana + - https://github.com/grafana/helm-charts +annotations: + "artifacthub.io/license": AGPL-3.0-only + "artifacthub.io/links": | + - name: Chart Source + url: https://github.com/grafana/helm-charts + - name: Upstream Project + url: https://github.com/grafana/grafana +maintainers: + - name: zanhsieh + email: zanhsieh@gmail.com + - name: rtluckie + email: rluckie@cisco.com + - name: maorfr + email: maor.friedman@redhat.com + - name: Xtigyro + email: miroslav.hadzhiev@gmail.com + - name: torstenwalter + email: mail@torstenwalter.de +type: application +keywords: + - monitoring + - metric diff --git a/operations/deployment/helm/grafana/README.md b/operations/deployment/helm/grafana/README.md new file mode 100644 index 0000000..5420545 --- /dev/null +++ b/operations/deployment/helm/grafana/README.md @@ -0,0 +1,757 @@ +# Grafana Helm Chart + +* Installs the web dashboarding system [Grafana](http://grafana.org/) + +## Get Repo Info + +```console +helm repo add grafana https://grafana.github.io/helm-charts +helm repo update +``` + +_See [helm repo](https://helm.sh/docs/helm/helm_repo/) for command documentation._ + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +helm install my-release grafana/grafana +``` + +## Uninstalling the Chart + +To uninstall/delete the my-release deployment: + +```console +helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Upgrading an existing Release to a new major version + +A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an +incompatible breaking change needing manual actions. + +### To 4.0.0 (And 3.12.1) + +This version requires Helm >= 2.12.0. + +### To 5.0.0 + +You have to add --force to your helm upgrade command as the labels of the chart have changed. + +### To 6.0.0 + +This version requires Helm >= 3.1.0. + +### To 7.0.0 + +For consistency with other Helm charts, the `global.image.registry` parameter was renamed +to `global.imageRegistry`. If you were not previously setting `global.image.registry`, no action +is required on upgrade. If you were previously setting `global.image.registry`, you will +need to instead set `global.imageRegistry`. + +## Configuration + +| Parameter | Description | Default | +|-------------------------------------------|-----------------------------------------------|---------------------------------------------------------| +| `replicas` | Number of nodes | `1` | +| `podDisruptionBudget.minAvailable` | Pod disruption minimum available | `nil` | +| `podDisruptionBudget.maxUnavailable` | Pod disruption maximum unavailable | `nil` | +| `podDisruptionBudget.apiVersion` | Pod disruption apiVersion | `nil` | +| `deploymentStrategy` | Deployment strategy | `{ "type": "RollingUpdate" }` | +| `livenessProbe` | Liveness Probe settings | `{ "httpGet": { "path": "/api/health", "port": 3000 } "initialDelaySeconds": 60, "timeoutSeconds": 30, "failureThreshold": 10 }` | +| `readinessProbe` | Readiness Probe settings | `{ "httpGet": { "path": "/api/health", "port": 3000 } }`| +| `securityContext` | Deployment securityContext | `{"runAsUser": 472, "runAsGroup": 472, "fsGroup": 472}` | +| `priorityClassName` | Name of Priority Class to assign pods | `nil` | +| `image.registry` | Image registry | `docker.io` | +| `image.repository` | Image repository | `grafana/grafana` | +| `image.tag` | Overrides the Grafana image tag whose default is the chart appVersion (`Must be >= 5.0.0`) | `` | +| `image.sha` | Image sha (optional) | `` | +| `image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Image pull secrets (can be templated) | `[]` | +| `service.enabled` | Enable grafana service | `true` | +| `service.type` | Kubernetes service type | `ClusterIP` | +| `service.port` | Kubernetes port where service is exposed | `80` | +| `service.portName` | Name of the port on the service | `service` | +| `service.appProtocol` | Adds the appProtocol field to the service | `` | +| `service.targetPort` | Internal service is port | `3000` | +| `service.nodePort` | Kubernetes service nodePort | `nil` | +| `service.annotations` | Service annotations (can be templated) | `{}` | +| `service.labels` | Custom labels | `{}` | +| `service.clusterIP` | internal cluster service IP | `nil` | +| `service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `nil` | +| `service.loadBalancerSourceRanges` | list of IP CIDRs allowed access to lb (if supported) | `[]` | +| `service.externalIPs` | service external IP addresses | `[]` | +| `service.externalTrafficPolicy` | change the default externalTrafficPolicy | `nil` | +| `headlessService` | Create a headless service | `false` | +| `extraExposePorts` | Additional service ports for sidecar containers| `[]` | +| `hostAliases` | adds rules to the pod's /etc/hosts | `[]` | +| `ingress.enabled` | Enables Ingress | `false` | +| `ingress.annotations` | Ingress annotations (values are templated) | `{}` | +| `ingress.labels` | Custom labels | `{}` | +| `ingress.path` | Ingress accepted path | `/` | +| `ingress.pathType` | Ingress type of path | `Prefix` | +| `ingress.hosts` | Ingress accepted hostnames | `["chart-example.local"]` | +| `ingress.extraPaths` | Ingress extra paths to prepend to every host configuration. Useful when configuring [custom actions with AWS ALB Ingress Controller](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.6/guide/ingress/annotations/#actions). Requires `ingress.hosts` to have one or more host entries. | `[]` | +| `ingress.tls` | Ingress TLS configuration | `[]` | +| `ingress.ingressClassName` | Ingress Class Name. MAY be required for Kubernetes versions >= 1.18 | `""` | +| `resources` | CPU/Memory resource requests/limits | `{}` | +| `nodeSelector` | Node labels for pod assignment | `{}` | +| `tolerations` | Toleration labels for pod assignment | `[]` | +| `affinity` | Affinity settings for pod assignment | `{}` | +| `extraInitContainers` | Init containers to add to the grafana pod | `{}` | +| `extraContainers` | Sidecar containers to add to the grafana pod | `""` | +| `extraContainerVolumes` | Volumes that can be mounted in sidecar containers | `[]` | +| `extraLabels` | Custom labels for all manifests | `{}` | +| `schedulerName` | Name of the k8s scheduler (other than default) | `nil` | +| `persistence.enabled` | Use persistent volume to store data | `false` | +| `persistence.type` | Type of persistence (`pvc` or `statefulset`) | `pvc` | +| `persistence.size` | Size of persistent volume claim | `10Gi` | +| `persistence.existingClaim` | Use an existing PVC to persist data (can be templated) | `nil` | +| `persistence.storageClassName` | Type of persistent volume claim | `nil` | +| `persistence.accessModes` | Persistence access modes | `[ReadWriteOnce]` | +| `persistence.annotations` | PersistentVolumeClaim annotations | `{}` | +| `persistence.finalizers` | PersistentVolumeClaim finalizers | `[ "kubernetes.io/pvc-protection" ]` | +| `persistence.extraPvcLabels` | Extra labels to apply to a PVC. | `{}` | +| `persistence.subPath` | Mount a sub dir of the persistent volume (can be templated) | `nil` | +| `persistence.inMemory.enabled` | If persistence is not enabled, whether to mount the local storage in-memory to improve performance | `false` | +| `persistence.inMemory.sizeLimit` | SizeLimit for the in-memory local storage | `nil` | +| `initChownData.enabled` | If false, don't reset data ownership at startup | true | +| `initChownData.image.registry` | init-chown-data container image registry | `docker.io` | +| `initChownData.image.repository` | init-chown-data container image repository | `busybox` | +| `initChownData.image.tag` | init-chown-data container image tag | `1.31.1` | +| `initChownData.image.sha` | init-chown-data container image sha (optional)| `""` | +| `initChownData.image.pullPolicy` | init-chown-data container image pull policy | `IfNotPresent` | +| `initChownData.resources` | init-chown-data pod resource requests & limits | `{}` | +| `schedulerName` | Alternate scheduler name | `nil` | +| `env` | Extra environment variables passed to pods | `{}` | +| `envValueFrom` | Environment variables from alternate sources. See the API docs on [EnvVarSource](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#envvarsource-v1-core) for format details. Can be templated | `{}` | +| `envFromSecret` | Name of a Kubernetes secret (must be manually created in the same namespace) containing values to be added to the environment. Can be templated | `""` | +| `envFromSecrets` | List of Kubernetes secrets (must be manually created in the same namespace) containing values to be added to the environment. Can be templated | `[]` | +| `envFromConfigMaps` | List of Kubernetes ConfigMaps (must be manually created in the same namespace) containing values to be added to the environment. Can be templated | `[]` | +| `envRenderSecret` | Sensible environment variables passed to pods and stored as secret. (passed through [tpl](https://helm.sh/docs/howto/charts_tips_and_tricks/#using-the-tpl-function)) | `{}` | +| `enableServiceLinks` | Inject Kubernetes services as environment variables. | `true` | +| `extraSecretMounts` | Additional grafana server secret mounts | `[]` | +| `extraVolumeMounts` | Additional grafana server volume mounts | `[]` | +| `createConfigmap` | Enable creating the grafana configmap | `true` | +| `extraConfigmapMounts` | Additional grafana server configMap volume mounts (values are templated) | `[]` | +| `extraEmptyDirMounts` | Additional grafana server emptyDir volume mounts | `[]` | +| `plugins` | Plugins to be loaded along with Grafana | `[]` | +| `datasources` | Configure grafana datasources (passed through tpl) | `{}` | +| `alerting` | Configure grafana alerting (passed through tpl) | `{}` | +| `notifiers` | Configure grafana notifiers | `{}` | +| `dashboardProviders` | Configure grafana dashboard providers | `{}` | +| `dashboards` | Dashboards to import | `{}` | +| `dashboardsConfigMaps` | ConfigMaps reference that contains dashboards | `{}` | +| `grafana.ini` | Grafana's primary configuration | `{}` | +| `global.imageRegistry` | Global image pull registry for all images. | `null` | +| `global.imagePullSecrets` | Global image pull secrets (can be templated). Allows either an array of {name: pullSecret} maps (k8s-style), or an array of strings (more common helm-style). | `[]` | +| `ldap.enabled` | Enable LDAP authentication | `false` | +| `ldap.existingSecret` | The name of an existing secret containing the `ldap.toml` file, this must have the key `ldap-toml`. | `""` | +| `ldap.config` | Grafana's LDAP configuration | `""` | +| `annotations` | Deployment annotations | `{}` | +| `labels` | Deployment labels | `{}` | +| `podAnnotations` | Pod annotations | `{}` | +| `podLabels` | Pod labels | `{}` | +| `podPortName` | Name of the grafana port on the pod | `grafana` | +| `lifecycleHooks` | Lifecycle hooks for podStart and preStop [Example](https://kubernetes.io/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/#define-poststart-and-prestop-handlers) | `{}` | +| `sidecar.image.registry` | Sidecar image registry | `quay.io` | +| `sidecar.image.repository` | Sidecar image repository | `kiwigrid/k8s-sidecar` | +| `sidecar.image.tag` | Sidecar image tag | `1.24.6` | +| `sidecar.image.sha` | Sidecar image sha (optional) | `""` | +| `sidecar.imagePullPolicy` | Sidecar image pull policy | `IfNotPresent` | +| `sidecar.resources` | Sidecar resources | `{}` | +| `sidecar.securityContext` | Sidecar securityContext | `{}` | +| `sidecar.enableUniqueFilenames` | Sets the kiwigrid/k8s-sidecar UNIQUE_FILENAMES environment variable. If set to `true` the sidecar will create unique filenames where duplicate data keys exist between ConfigMaps and/or Secrets within the same or multiple Namespaces. | `false` | +| `sidecar.alerts.enabled` | Enables the cluster wide search for alerts and adds/updates/deletes them in grafana |`false` | +| `sidecar.alerts.label` | Label that config maps with alerts should have to be added | `grafana_alert` | +| `sidecar.alerts.labelValue` | Label value that config maps with alerts should have to be added | `""` | +| `sidecar.alerts.searchNamespace` | Namespaces list. If specified, the sidecar will search for alerts config-maps inside these namespaces. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces. | `nil` | +| `sidecar.alerts.watchMethod` | Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. | `WATCH` | +| `sidecar.alerts.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` | +| `sidecar.alerts.reloadURL` | Full url of datasource configuration reload API endpoint, to invoke after a config-map change | `"http://localhost:3000/api/admin/provisioning/alerting/reload"` | +| `sidecar.alerts.skipReload` | Enabling this omits defining the REQ_URL and REQ_METHOD environment variables | `false` | +| `sidecar.alerts.initDatasources` | Set to true to deploy the datasource sidecar as an initContainer. This is needed if skipReload is true, to load any alerts defined at startup time. | `false` | +| `sidecar.alerts.extraMounts` | Additional alerts sidecar volume mounts. | `[]` | +| `sidecar.dashboards.enabled` | Enables the cluster wide search for dashboards and adds/updates/deletes them in grafana | `false` | +| `sidecar.dashboards.SCProvider` | Enables creation of sidecar provider | `true` | +| `sidecar.dashboards.provider.name` | Unique name of the grafana provider | `sidecarProvider` | +| `sidecar.dashboards.provider.orgid` | Id of the organisation, to which the dashboards should be added | `1` | +| `sidecar.dashboards.provider.folder` | Logical folder in which grafana groups dashboards | `""` | +| `sidecar.dashboards.provider.disableDelete` | Activate to avoid the deletion of imported dashboards | `false` | +| `sidecar.dashboards.provider.allowUiUpdates` | Allow updating provisioned dashboards from the UI | `false` | +| `sidecar.dashboards.provider.type` | Provider type | `file` | +| `sidecar.dashboards.provider.foldersFromFilesStructure` | Allow Grafana to replicate dashboard structure from filesystem. | `false` | +| `sidecar.dashboards.watchMethod` | Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. | `WATCH` | +| `sidecar.skipTlsVerify` | Set to true to skip tls verification for kube api calls | `nil` | +| `sidecar.dashboards.label` | Label that config maps with dashboards should have to be added | `grafana_dashboard` | +| `sidecar.dashboards.labelValue` | Label value that config maps with dashboards should have to be added | `""` | +| `sidecar.dashboards.folder` | Folder in the pod that should hold the collected dashboards (unless `sidecar.dashboards.defaultFolderName` is set). This path will be mounted. | `/tmp/dashboards` | +| `sidecar.dashboards.folderAnnotation` | The annotation the sidecar will look for in configmaps to override the destination folder for files | `nil` | +| `sidecar.dashboards.defaultFolderName` | The default folder name, it will create a subfolder under the `sidecar.dashboards.folder` and put dashboards in there instead | `nil` | +| `sidecar.dashboards.searchNamespace` | Namespaces list. If specified, the sidecar will search for dashboards config-maps inside these namespaces. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces. | `nil` | +| `sidecar.dashboards.script` | Absolute path to shell script to execute after a configmap got reloaded. | `nil` | +| `sidecar.dashboards.reloadURL` | Full url of dashboards configuration reload API endpoint, to invoke after a config-map change | `"http://localhost:3000/api/admin/provisioning/dashboards/reload"` | +| `sidecar.dashboards.skipReload` | Enabling this omits defining the REQ_USERNAME, REQ_PASSWORD, REQ_URL and REQ_METHOD environment variables | `false` | +| `sidecar.dashboards.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` | +| `sidecar.dashboards.extraMounts` | Additional dashboard sidecar volume mounts. | `[]` | +| `sidecar.datasources.enabled` | Enables the cluster wide search for datasources and adds/updates/deletes them in grafana |`false` | +| `sidecar.datasources.label` | Label that config maps with datasources should have to be added | `grafana_datasource` | +| `sidecar.datasources.labelValue` | Label value that config maps with datasources should have to be added | `""` | +| `sidecar.datasources.searchNamespace` | Namespaces list. If specified, the sidecar will search for datasources config-maps inside these namespaces. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces. | `nil` | +| `sidecar.datasources.watchMethod` | Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. | `WATCH` | +| `sidecar.datasources.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` | +| `sidecar.datasources.reloadURL` | Full url of datasource configuration reload API endpoint, to invoke after a config-map change | `"http://localhost:3000/api/admin/provisioning/datasources/reload"` | +| `sidecar.datasources.skipReload` | Enabling this omits defining the REQ_URL and REQ_METHOD environment variables | `false` | +| `sidecar.datasources.initDatasources` | Set to true to deploy the datasource sidecar as an initContainer in addition to a container. This is needed if skipReload is true, to load any datasources defined at startup time. | `false` | +| `sidecar.notifiers.enabled` | Enables the cluster wide search for notifiers and adds/updates/deletes them in grafana | `false` | +| `sidecar.notifiers.label` | Label that config maps with notifiers should have to be added | `grafana_notifier` | +| `sidecar.notifiers.labelValue` | Label value that config maps with notifiers should have to be added | `""` | +| `sidecar.notifiers.searchNamespace` | Namespaces list. If specified, the sidecar will search for notifiers config-maps (or secrets) inside these namespaces. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces. | `nil` | +| `sidecar.notifiers.watchMethod` | Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. | `WATCH` | +| `sidecar.notifiers.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` | +| `sidecar.notifiers.reloadURL` | Full url of notifier configuration reload API endpoint, to invoke after a config-map change | `"http://localhost:3000/api/admin/provisioning/notifications/reload"` | +| `sidecar.notifiers.skipReload` | Enabling this omits defining the REQ_URL and REQ_METHOD environment variables | `false` | +| `sidecar.notifiers.initNotifiers` | Set to true to deploy the notifier sidecar as an initContainer in addition to a container. This is needed if skipReload is true, to load any notifiers defined at startup time. | `false` | +| `smtp.existingSecret` | The name of an existing secret containing the SMTP credentials. | `""` | +| `smtp.userKey` | The key in the existing SMTP secret containing the username. | `"user"` | +| `smtp.passwordKey` | The key in the existing SMTP secret containing the password. | `"password"` | +| `admin.existingSecret` | The name of an existing secret containing the admin credentials (can be templated). | `""` | +| `admin.userKey` | The key in the existing admin secret containing the username. | `"admin-user"` | +| `admin.passwordKey` | The key in the existing admin secret containing the password. | `"admin-password"` | +| `serviceAccount.autoMount` | Automount the service account token in the pod| `true` | +| `serviceAccount.annotations` | ServiceAccount annotations | | +| `serviceAccount.create` | Create service account | `true` | +| `serviceAccount.labels` | ServiceAccount labels | `{}` | +| `serviceAccount.name` | Service account name to use, when empty will be set to created account if `serviceAccount.create` is set else to `default` | `` | +| `serviceAccount.nameTest` | Service account name to use for test, when empty will be set to created account if `serviceAccount.create` is set else to `default` | `nil` | +| `rbac.create` | Create and use RBAC resources | `true` | +| `rbac.namespaced` | Creates Role and Rolebinding instead of the default ClusterRole and ClusteRoleBindings for the grafana instance | `false` | +| `rbac.useExistingRole` | Set to a rolename to use existing role - skipping role creating - but still doing serviceaccount and rolebinding to the rolename set here. | `nil` | +| `rbac.pspEnabled` | Create PodSecurityPolicy (with `rbac.create`, grant roles permissions as well) | `false` | +| `rbac.pspUseAppArmor` | Enforce AppArmor in created PodSecurityPolicy (requires `rbac.pspEnabled`) | `false` | +| `rbac.extraRoleRules` | Additional rules to add to the Role | [] | +| `rbac.extraClusterRoleRules` | Additional rules to add to the ClusterRole | [] | +| `command` | Define command to be executed by grafana container at startup | `nil` | +| `args` | Define additional args if command is used | `nil` | +| `testFramework.enabled` | Whether to create test-related resources | `true` | +| `testFramework.image.registry` | `test-framework` image registry. | `docker.io` | +| `testFramework.image.repository` | `test-framework` image repository. | `bats/bats` | +| `testFramework.image.tag` | `test-framework` image tag. | `v1.4.1` | +| `testFramework.imagePullPolicy` | `test-framework` image pull policy. | `IfNotPresent` | +| `testFramework.securityContext` | `test-framework` securityContext | `{}` | +| `downloadDashboards.env` | Environment variables to be passed to the `download-dashboards` container | `{}` | +| `downloadDashboards.envFromSecret` | Name of a Kubernetes secret (must be manually created in the same namespace) containing values to be added to the environment. Can be templated | `""` | +| `downloadDashboards.resources` | Resources of `download-dashboards` container | `{}` | +| `downloadDashboardsImage.registry` | Curl docker image registry | `docker.io` | +| `downloadDashboardsImage.repository` | Curl docker image repository | `curlimages/curl` | +| `downloadDashboardsImage.tag` | Curl docker image tag | `7.73.0` | +| `downloadDashboardsImage.sha` | Curl docker image sha (optional) | `""` | +| `downloadDashboardsImage.pullPolicy` | Curl docker image pull policy | `IfNotPresent` | +| `namespaceOverride` | Override the deployment namespace | `""` (`Release.Namespace`) | +| `serviceMonitor.enabled` | Use servicemonitor from prometheus operator | `false` | +| `serviceMonitor.namespace` | Namespace this servicemonitor is installed in | | +| `serviceMonitor.interval` | How frequently Prometheus should scrape | `1m` | +| `serviceMonitor.path` | Path to scrape | `/metrics` | +| `serviceMonitor.scheme` | Scheme to use for metrics scraping | `http` | +| `serviceMonitor.tlsConfig` | TLS configuration block for the endpoint | `{}` | +| `serviceMonitor.labels` | Labels for the servicemonitor passed to Prometheus Operator | `{}` | +| `serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `30s` | +| `serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping. | `[]` | +| `serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion. | `[]` | +| `revisionHistoryLimit` | Number of old ReplicaSets to retain | `10` | +| `imageRenderer.enabled` | Enable the image-renderer deployment & service | `false` | +| `imageRenderer.image.registry` | image-renderer Image registry | `docker.io` | +| `imageRenderer.image.repository` | image-renderer Image repository | `grafana/grafana-image-renderer` | +| `imageRenderer.image.tag` | image-renderer Image tag | `latest` | +| `imageRenderer.image.sha` | image-renderer Image sha (optional) | `""` | +| `imageRenderer.image.pullPolicy` | image-renderer ImagePullPolicy | `Always` | +| `imageRenderer.env` | extra env-vars for image-renderer | `{}` | +| `imageRenderer.envValueFrom` | Environment variables for image-renderer from alternate sources. See the API docs on [EnvVarSource](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#envvarsource-v1-core) for format details. Can be templated | `{}` | +| `imageRenderer.serviceAccountName` | image-renderer deployment serviceAccountName | `""` | +| `imageRenderer.securityContext` | image-renderer deployment securityContext | `{}` | +| `imageRenderer.podAnnotations ` | image-renderer image-renderer pod annotation | `{}` | +| `imageRenderer.hostAliases` | image-renderer deployment Host Aliases | `[]` | +| `imageRenderer.priorityClassName` | image-renderer deployment priority class | `''` | +| `imageRenderer.service.enabled` | Enable the image-renderer service | `true` | +| `imageRenderer.service.portName` | image-renderer service port name | `http` | +| `imageRenderer.service.port` | image-renderer port used by deployment | `8081` | +| `imageRenderer.service.targetPort` | image-renderer service port used by service | `8081` | +| `imageRenderer.appProtocol` | Adds the appProtocol field to the service | `` | +| `imageRenderer.grafanaSubPath` | Grafana sub path to use for image renderer callback url | `''` | +| `imageRenderer.podPortName` | name of the image-renderer port on the pod | `http` | +| `imageRenderer.revisionHistoryLimit` | number of image-renderer replica sets to keep | `10` | +| `imageRenderer.networkPolicy.limitIngress` | Enable a NetworkPolicy to limit inbound traffic from only the created grafana pods | `true` | +| `imageRenderer.networkPolicy.limitEgress` | Enable a NetworkPolicy to limit outbound traffic to only the created grafana pods | `false` | +| `imageRenderer.resources` | Set resource limits for image-renderer pods | `{}` | +| `imageRenderer.nodeSelector` | Node labels for pod assignment | `{}` | +| `imageRenderer.tolerations` | Toleration labels for pod assignment | `[]` | +| `imageRenderer.affinity` | Affinity settings for pod assignment | `{}` | +| `networkPolicy.enabled` | Enable creation of NetworkPolicy resources. | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.explicitNamespacesSelector` | A Kubernetes LabelSelector to explicitly select namespaces from which traffic could be allowed | `{}` | +| `networkPolicy.ingress` | Enable the creation of an ingress network policy | `true` | +| `networkPolicy.egress.enabled` | Enable the creation of an egress network policy | `false` | +| `networkPolicy.egress.ports` | An array of ports to allow for the egress | `[]` | +| `enableKubeBackwardCompatibility` | Enable backward compatibility of kubernetes where pod's defintion version below 1.13 doesn't have the enableServiceLinks option | `false` | + +### Example ingress with path + +With grafana 6.3 and above + +```yaml +grafana.ini: + server: + domain: monitoring.example.com + root_url: "%(protocol)s://%(domain)s/grafana" + serve_from_sub_path: true +ingress: + enabled: true + hosts: + - "monitoring.example.com" + path: "/grafana" +``` + +### Example of extraVolumeMounts + +Volume can be type persistentVolumeClaim or hostPath but not both at same time. +If neither existingClaim or hostPath argument is given then type is emptyDir. + +```yaml +- extraVolumeMounts: + - name: plugins + mountPath: /var/lib/grafana/plugins + subPath: configs/grafana/plugins + existingClaim: existing-grafana-claim + readOnly: false + - name: dashboards + mountPath: /var/lib/grafana/dashboards + hostPath: /usr/shared/grafana/dashboards + readOnly: false +``` + +## Import dashboards + +There are a few methods to import dashboards to Grafana. Below are some examples and explanations as to how to use each method: + +```yaml +dashboards: + default: + some-dashboard: + json: | + { + "annotations": + + ... + # Complete json file here + ... + + "title": "Some Dashboard", + "uid": "abcd1234", + "version": 1 + } + custom-dashboard: + # This is a path to a file inside the dashboards directory inside the chart directory + file: dashboards/custom-dashboard.json + prometheus-stats: + # Ref: https://grafana.com/dashboards/2 + gnetId: 2 + revision: 2 + datasource: Prometheus + loki-dashboard-quick-search: + gnetId: 12019 + revision: 2 + datasource: + - name: DS_PROMETHEUS + value: Prometheus + - name: DS_LOKI + value: Loki + local-dashboard: + url: https://raw.githubusercontent.com/user/repository/master/dashboards/dashboard.json +``` + +## BASE64 dashboards + +Dashboards could be stored on a server that does not return JSON directly and instead of it returns a Base64 encoded file (e.g. Gerrit) +A new parameter has been added to the url use case so if you specify a b64content value equals to true after the url entry a Base64 decoding is applied before save the file to disk. +If this entry is not set or is equals to false not decoding is applied to the file before saving it to disk. + +### Gerrit use case + +Gerrit API for download files has the following schema: where {project-name} and +{file-id} usually has '/' in their values and so they MUST be replaced by %2F so if project-name is user/repo, branch-id is master and file-id is equals to dir1/dir2/dashboard +the url value is + +## Sidecar for dashboards + +If the parameter `sidecar.dashboards.enabled` is set, a sidecar container is deployed in the grafana +pod. This container watches all configmaps (or secrets) in the cluster and filters out the ones with +a label as defined in `sidecar.dashboards.label`. The files defined in those configmaps are written +to a folder and accessed by grafana. Changes to the configmaps are monitored and the imported +dashboards are deleted/updated. + +A recommendation is to use one configmap per dashboard, as a reduction of multiple dashboards inside +one configmap is currently not properly mirrored in grafana. + +Example dashboard config: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: sample-grafana-dashboard + labels: + grafana_dashboard: "1" +data: + k8s-dashboard.json: |- + [...] +``` + +## Sidecar for datasources + +If the parameter `sidecar.datasources.enabled` is set, an init container is deployed in the grafana +pod. This container lists all secrets (or configmaps, though not recommended) in the cluster and +filters out the ones with a label as defined in `sidecar.datasources.label`. The files defined in +those secrets are written to a folder and accessed by grafana on startup. Using these yaml files, +the data sources in grafana can be imported. + +Should you aim for reloading datasources in Grafana each time the config is changed, set `sidecar.datasources.skipReload: false` and adjust `sidecar.datasources.reloadURL` to `http://..svc.cluster.local/api/admin/provisioning/datasources/reload`. + +Secrets are recommended over configmaps for this usecase because datasources usually contain private +data like usernames and passwords. Secrets are the more appropriate cluster resource to manage those. + +Example values to add a postgres datasource as a kubernetes secret: +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: grafana-datasources + labels: + grafana_datasource: 'true' # default value for: sidecar.datasources.label +stringData: + pg-db.yaml: |- + apiVersion: 1 + datasources: + - name: My pg db datasource + type: postgres + url: my-postgresql-db:5432 + user: db-readonly-user + secureJsonData: + password: 'SUperSEcretPa$$word' + jsonData: + database: my_datase + sslmode: 'disable' # disable/require/verify-ca/verify-full + maxOpenConns: 0 # Grafana v5.4+ + maxIdleConns: 2 # Grafana v5.4+ + connMaxLifetime: 14400 # Grafana v5.4+ + postgresVersion: 1000 # 903=9.3, 904=9.4, 905=9.5, 906=9.6, 1000=10 + timescaledb: false + # allow users to edit datasources from the UI. + editable: false +``` + +Example values to add a datasource adapted from [Grafana](http://docs.grafana.org/administration/provisioning/#example-datasource-config-file): + +```yaml +datasources: + datasources.yaml: + apiVersion: 1 + datasources: + # name of the datasource. Required + - name: Graphite + # datasource type. Required + type: graphite + # access mode. proxy or direct (Server or Browser in the UI). Required + access: proxy + # org id. will default to orgId 1 if not specified + orgId: 1 + # url + url: http://localhost:8080 + # database password, if used + password: + # database user, if used + user: + # database name, if used + database: + # enable/disable basic auth + basicAuth: + # basic auth username + basicAuthUser: + # basic auth password + basicAuthPassword: + # enable/disable with credentials headers + withCredentials: + # mark as default datasource. Max one per org + isDefault: + # fields that will be converted to json and stored in json_data + jsonData: + graphiteVersion: "1.1" + tlsAuth: true + tlsAuthWithCACert: true + # json object of data that will be encrypted. + secureJsonData: + tlsCACert: "..." + tlsClientCert: "..." + tlsClientKey: "..." + version: 1 + # allow users to edit datasources from the UI. + editable: false +``` + +## Sidecar for notifiers + +If the parameter `sidecar.notifiers.enabled` is set, an init container is deployed in the grafana +pod. This container lists all secrets (or configmaps, though not recommended) in the cluster and +filters out the ones with a label as defined in `sidecar.notifiers.label`. The files defined in +those secrets are written to a folder and accessed by grafana on startup. Using these yaml files, +the notification channels in grafana can be imported. The secrets must be created before +`helm install` so that the notifiers init container can list the secrets. + +Secrets are recommended over configmaps for this usecase because alert notification channels usually contain +private data like SMTP usernames and passwords. Secrets are the more appropriate cluster resource to manage those. + +Example datasource config adapted from [Grafana](https://grafana.com/docs/grafana/latest/administration/provisioning/#alert-notification-channels): + +```yaml +notifiers: + - name: notification-channel-1 + type: slack + uid: notifier1 + # either + org_id: 2 + # or + org_name: Main Org. + is_default: true + send_reminder: true + frequency: 1h + disable_resolve_message: false + # See `Supported Settings` section for settings supporter for each + # alert notification type. + settings: + recipient: 'XXX' + token: 'xoxb' + uploadImage: true + url: https://slack.com + +delete_notifiers: + - name: notification-channel-1 + uid: notifier1 + org_id: 2 + - name: notification-channel-2 + # default org_id: 1 +``` + +## Sidecar for alerting resources + +If the parameter `sidecar.alerts.enabled` is set, a sidecar container is deployed in the grafana +pod. This container watches all configmaps (or secrets) in the cluster (namespace defined by `sidecar.alerts.searchNamespace`) and filters out the ones with +a label as defined in `sidecar.alerts.label` (default is `grafana_alert`). The files defined in those configmaps are written +to a folder and accessed by grafana. Changes to the configmaps are monitored and the imported alerting resources are updated, however, deletions are a little more complicated (see below). + +This sidecar can be used to provision alert rules, contact points, notification policies, notification templates and mute timings as shown in [Grafana Documentation](https://grafana.com/docs/grafana/next/alerting/set-up/provision-alerting-resources/file-provisioning/). + +To fetch the alert config which will be provisioned, use the alert provisioning API ([Grafana Documentation](https://grafana.com/docs/grafana/next/developers/http_api/alerting_provisioning/)). +You can use either JSON or YAML format. + +Example config for an alert rule: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: sample-grafana-alert + labels: + grafana_alert: "1" +data: + k8s-alert.yml: |- + apiVersion: 1 + groups: + - orgId: 1 + name: k8s-alert + [...] +``` + +To delete provisioned alert rules is a two step process, you need to delete the configmap which defined the alert rule +and then create a configuration which deletes the alert rule. + +Example deletion configuration: +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: delete-sample-grafana-alert + namespace: monitoring + labels: + grafana_alert: "1" +data: + delete-k8s-alert.yml: |- + apiVersion: 1 + deleteRules: + - orgId: 1 + uid: 16624780-6564-45dc-825c-8bded4ad92d3 +``` + +## Statically provision alerting resources +If you don't need to change alerting resources (alert rules, contact points, notification policies and notification templates) regularly you could use the `alerting` config option instead of the sidecar option above. +This will grab the alerting config and apply it statically at build time for the helm file. + +There are two methods to statically provision alerting configuration in Grafana. Below are some examples and explanations as to how to use each method: + +```yaml +alerting: + team1-alert-rules.yaml: + file: alerting/team1/rules.yaml + team2-alert-rules.yaml: + file: alerting/team2/rules.yaml + team3-alert-rules.yaml: + file: alerting/team3/rules.yaml + notification-policies.yaml: + file: alerting/shared/notification-policies.yaml + notification-templates.yaml: + file: alerting/shared/notification-templates.yaml + contactpoints.yaml: + apiVersion: 1 + contactPoints: + - orgId: 1 + name: Slack channel + receivers: + - uid: default-receiver + type: slack + settings: + # Webhook URL to be filled in + url: "" + # We need to escape double curly braces for the tpl function. + text: '{{ `{{ template "default.message" . }}` }}' + title: '{{ `{{ template "default.title" . }}` }}' +``` + +The two possibilities for static alerting resource provisioning are: + +* Inlining the file contents as shown for contact points in the above example. +* Importing a file using a relative path starting from the chart root directory as shown for the alert rules in the above example. + +### Important notes on file provisioning + +* The format of the files is defined in the [Grafana documentation](https://grafana.com/docs/grafana/next/alerting/set-up/provision-alerting-resources/file-provisioning/) on file provisioning. +* The chart supports importing YAML and JSON files. +* The filename must be unique, otherwise one volume mount will overwrite the other. +* In case of inlining, double curly braces that arise from the Grafana configuration format and are not intended as templates for the chart must be escaped. +* The number of total files under `alerting:` is not limited. Each file will end up as a volume mount in the corresponding provisioning folder of the deployed Grafana instance. +* The file size for each import is limited by what the function `.Files.Get` can handle, which suffices for most cases. + +## How to serve Grafana with a path prefix (/grafana) + +In order to serve Grafana with a prefix (e.g., ), add the following to your values.yaml. + +```yaml +ingress: + enabled: true + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.ingress.kubernetes.io/rewrite-target: /$1 + nginx.ingress.kubernetes.io/use-regex: "true" + + path: /grafana/?(.*) + hosts: + - k8s.example.dev + +grafana.ini: + server: + root_url: http://localhost:3000/grafana # this host can be localhost +``` + +## How to securely reference secrets in grafana.ini + +This example uses Grafana [file providers](https://grafana.com/docs/grafana/latest/administration/configuration/#file-provider) for secret values and the `extraSecretMounts` configuration flag (Additional grafana server secret mounts) to mount the secrets. + +In grafana.ini: + +```yaml +grafana.ini: + [auth.generic_oauth] + enabled = true + client_id = $__file{/etc/secrets/auth_generic_oauth/client_id} + client_secret = $__file{/etc/secrets/auth_generic_oauth/client_secret} +``` + +Existing secret, or created along with helm: + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: auth-generic-oauth-secret +type: Opaque +stringData: + client_id: + client_secret: +``` + +Include in the `extraSecretMounts` configuration flag: + +```yaml +- extraSecretMounts: + - name: auth-generic-oauth-secret-mount + secretName: auth-generic-oauth-secret + defaultMode: 0440 + mountPath: /etc/secrets/auth_generic_oauth + readOnly: true +``` + +### extraSecretMounts using a Container Storage Interface (CSI) provider + +This example uses a CSI driver e.g. retrieving secrets using [Azure Key Vault Provider](https://github.com/Azure/secrets-store-csi-driver-provider-azure) + +```yaml +- extraSecretMounts: + - name: secrets-store-inline + mountPath: /run/secrets + readOnly: true + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: "my-provider" + nodePublishSecretRef: + name: akv-creds +``` + +## Image Renderer Plug-In + +This chart supports enabling [remote image rendering](https://github.com/grafana/grafana-image-renderer/blob/master/README.md#run-in-docker) + +```yaml +imageRenderer: + enabled: true +``` + +### Image Renderer NetworkPolicy + +By default the image-renderer pods will have a network policy which only allows ingress traffic from the created grafana instance + +### High Availability for unified alerting + +If you want to run Grafana in a high availability cluster you need to enable +the headless service by setting `headlessService: true` in your `values.yaml` +file. + +As next step you have to setup the `grafana.ini` in your `values.yaml` in a way +that it will make use of the headless service to obtain all the IPs of the +cluster. You should replace ``{{ Name }}`` with the name of your helm deployment. + +```yaml +grafana.ini: + ... + unified_alerting: + enabled: true + ha_peers: {{ Name }}-headless:9094 + ha_listen_address: ${POD_IP}:9094 + ha_advertise_address: ${POD_IP}:9094 + + alerting: + enabled: false +``` diff --git a/operations/deployment/helm/grafana/ci/default-values.yaml b/operations/deployment/helm/grafana/ci/default-values.yaml new file mode 100644 index 0000000..fc2ba60 --- /dev/null +++ b/operations/deployment/helm/grafana/ci/default-values.yaml @@ -0,0 +1 @@ +# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml. diff --git a/operations/deployment/helm/grafana/ci/with-affinity-values.yaml b/operations/deployment/helm/grafana/ci/with-affinity-values.yaml new file mode 100644 index 0000000..f5b9b53 --- /dev/null +++ b/operations/deployment/helm/grafana/ci/with-affinity-values.yaml @@ -0,0 +1,16 @@ +affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: + app.kubernetes.io/instance: grafana-test + app.kubernetes.io/name: grafana + topologyKey: failure-domain.beta.kubernetes.io/zone + weight: 100 + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app.kubernetes.io/instance: grafana-test + app.kubernetes.io/name: grafana + topologyKey: kubernetes.io/hostname diff --git a/operations/deployment/helm/grafana/ci/with-dashboard-json-values.yaml b/operations/deployment/helm/grafana/ci/with-dashboard-json-values.yaml new file mode 100644 index 0000000..e0c4e41 --- /dev/null +++ b/operations/deployment/helm/grafana/ci/with-dashboard-json-values.yaml @@ -0,0 +1,53 @@ +dashboards: + my-provider: + my-awesome-dashboard: + # An empty but valid dashboard + json: | + { + "__inputs": [], + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "6.3.5" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": null, + "links": [], + "panels": [], + "schemaVersion": 19, + "style": "dark", + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-6h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": ["5s"] + }, + "timezone": "", + "title": "Dummy Dashboard", + "uid": "IdcYQooWk", + "version": 1 + } + datasource: Prometheus diff --git a/operations/deployment/helm/grafana/ci/with-dashboard-values.yaml b/operations/deployment/helm/grafana/ci/with-dashboard-values.yaml new file mode 100644 index 0000000..7b662c5 --- /dev/null +++ b/operations/deployment/helm/grafana/ci/with-dashboard-values.yaml @@ -0,0 +1,19 @@ +dashboards: + my-provider: + my-awesome-dashboard: + gnetId: 10000 + revision: 1 + datasource: Prometheus +dashboardProviders: + dashboardproviders.yaml: + apiVersion: 1 + providers: + - name: 'my-provider' + orgId: 1 + folder: '' + type: file + updateIntervalSeconds: 10 + disableDeletion: true + editable: true + options: + path: /var/lib/grafana/dashboards/my-provider diff --git a/operations/deployment/helm/grafana/ci/with-extraconfigmapmounts-values.yaml b/operations/deployment/helm/grafana/ci/with-extraconfigmapmounts-values.yaml new file mode 100644 index 0000000..5cc44a0 --- /dev/null +++ b/operations/deployment/helm/grafana/ci/with-extraconfigmapmounts-values.yaml @@ -0,0 +1,7 @@ +extraConfigmapMounts: + - name: '{{ include "grafana.fullname" . }}' + configMap: '{{ include "grafana.fullname" . }}' + mountPath: /var/lib/grafana/dashboards/test-dashboard.json + # This is not a realistic test, but for this we only care about extraConfigmapMounts not being empty and pointing to an existing ConfigMap + subPath: grafana.ini + readOnly: true diff --git a/operations/deployment/helm/grafana/ci/with-image-renderer-values.yaml b/operations/deployment/helm/grafana/ci/with-image-renderer-values.yaml new file mode 100644 index 0000000..32f3074 --- /dev/null +++ b/operations/deployment/helm/grafana/ci/with-image-renderer-values.yaml @@ -0,0 +1,19 @@ +podLabels: + customLableA: Aaaaa +imageRenderer: + enabled: true + env: + RENDERING_ARGS: --disable-gpu,--window-size=1280x758 + RENDERING_MODE: clustered + podLabels: + customLableB: Bbbbb + networkPolicy: + limitIngress: true + limitEgress: true + resources: + limits: + cpu: 1000m + memory: 1000Mi + requests: + cpu: 500m + memory: 50Mi diff --git a/operations/deployment/helm/grafana/ci/with-persistence.yaml b/operations/deployment/helm/grafana/ci/with-persistence.yaml new file mode 100644 index 0000000..b92ca02 --- /dev/null +++ b/operations/deployment/helm/grafana/ci/with-persistence.yaml @@ -0,0 +1,3 @@ +persistence: + type: pvc + enabled: true diff --git a/operations/deployment/helm/grafana/dashboards/custom-dashboard.json b/operations/deployment/helm/grafana/dashboards/custom-dashboard.json new file mode 100644 index 0000000..9e26dfe --- /dev/null +++ b/operations/deployment/helm/grafana/dashboards/custom-dashboard.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/operations/deployment/helm/grafana/templates/NOTES.txt b/operations/deployment/helm/grafana/templates/NOTES.txt new file mode 100644 index 0000000..d86419f --- /dev/null +++ b/operations/deployment/helm/grafana/templates/NOTES.txt @@ -0,0 +1,55 @@ +1. Get your '{{ .Values.adminUser }}' user password by running: + + kubectl get secret --namespace {{ include "grafana.namespace" . }} {{ .Values.admin.existingSecret | default (include "grafana.fullname" .) }} -o jsonpath="{.data.{{ .Values.admin.passwordKey | default "admin-password" }}}" | base64 --decode ; echo + + +2. The Grafana server can be accessed via port {{ .Values.service.port }} on the following DNS name from within your cluster: + + {{ include "grafana.fullname" . }}.{{ include "grafana.namespace" . }}.svc.cluster.local +{{ if .Values.ingress.enabled }} + If you bind grafana to 80, please update values in values.yaml and reinstall: + ``` + securityContext: + runAsUser: 0 + runAsGroup: 0 + fsGroup: 0 + + command: + - "setcap" + - "'cap_net_bind_service=+ep'" + - "/usr/sbin/grafana-server &&" + - "sh" + - "/run.sh" + ``` + Details refer to https://grafana.com/docs/installation/configuration/#http-port. + Or grafana would always crash. + + From outside the cluster, the server URL(s) are: + {{- range .Values.ingress.hosts }} + http://{{ . }} + {{- end }} +{{- else }} + Get the Grafana URL to visit by running these commands in the same shell: + {{- if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ include "grafana.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "grafana.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ include "grafana.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT + {{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc --namespace {{ include "grafana.namespace" . }} -w {{ include "grafana.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ include "grafana.namespace" . }} {{ include "grafana.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + http://$SERVICE_IP:{{ .Values.service.port -}} + {{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ include "grafana.namespace" . }} -l "app.kubernetes.io/name={{ include "grafana.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + kubectl --namespace {{ include "grafana.namespace" . }} port-forward $POD_NAME 3000 + {{- end }} +{{- end }} + +3. Login with the password from step 1 and the username: {{ .Values.adminUser }} + +{{- if not .Values.persistence.enabled }} +################################################################################# +###### WARNING: Persistence is disabled!!! You will lose your data when ##### +###### the Grafana pod is terminated. ##### +################################################################################# +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/_helpers.tpl b/operations/deployment/helm/grafana/templates/_helpers.tpl new file mode 100644 index 0000000..ead2449 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/_helpers.tpl @@ -0,0 +1,227 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "grafana.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "grafana.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "grafana.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create the name of the service account +*/}} +{{- define "grafana.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "grafana.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} + +{{- define "grafana.serviceAccountNameTest" -}} +{{- if .Values.serviceAccount.create }} +{{- default (print (include "grafana.fullname" .) "-test") .Values.serviceAccount.nameTest }} +{{- else }} +{{- default "default" .Values.serviceAccount.nameTest }} +{{- end }} +{{- end }} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts +*/}} +{{- define "grafana.namespace" -}} +{{- if .Values.namespaceOverride }} +{{- .Values.namespaceOverride }} +{{- else }} +{{- .Release.Namespace }} +{{- end }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "grafana.labels" -}} +helm.sh/chart: {{ include "grafana.chart" . }} +{{ include "grafana.selectorLabels" . }} +{{- if or .Chart.AppVersion .Values.image.tag }} +app.kubernetes.io/version: {{ mustRegexReplaceAllLiteral "@sha.*" .Values.image.tag "" | default .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- with .Values.extraLabels }} +{{ toYaml . }} +{{- end }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "grafana.selectorLabels" -}} +app.kubernetes.io/name: {{ include "grafana.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "grafana.imageRenderer.labels" -}} +helm.sh/chart: {{ include "grafana.chart" . }} +{{ include "grafana.imageRenderer.selectorLabels" . }} +{{- if or .Chart.AppVersion .Values.image.tag }} +app.kubernetes.io/version: {{ mustRegexReplaceAllLiteral "@sha.*" .Values.image.tag "" | default .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels ImageRenderer +*/}} +{{- define "grafana.imageRenderer.selectorLabels" -}} +app.kubernetes.io/name: {{ include "grafana.name" . }}-image-renderer +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Looks if there's an existing secret and reuse its password. If not it generates +new password and use it. +*/}} +{{- define "grafana.password" -}} +{{- $secret := (lookup "v1" "Secret" (include "grafana.namespace" .) (include "grafana.fullname" .) ) }} +{{- if $secret }} +{{- index $secret "data" "admin-password" }} +{{- else }} +{{- (randAlphaNum 40) | b64enc | quote }} +{{- end }} +{{- end }} + +{{/* +Return the appropriate apiVersion for rbac. +*/}} +{{- define "grafana.rbac.apiVersion" -}} +{{- if $.Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1" }} +{{- print "rbac.authorization.k8s.io/v1" }} +{{- else }} +{{- print "rbac.authorization.k8s.io/v1beta1" }} +{{- end }} +{{- end }} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "grafana.ingress.apiVersion" -}} +{{- if and ($.Capabilities.APIVersions.Has "networking.k8s.io/v1") (semverCompare ">= 1.19-0" .Capabilities.KubeVersion.Version) }} +{{- print "networking.k8s.io/v1" }} +{{- else if $.Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" }} +{{- print "networking.k8s.io/v1beta1" }} +{{- else }} +{{- print "extensions/v1beta1" }} +{{- end }} +{{- end }} + +{{/* +Return the appropriate apiVersion for Horizontal Pod Autoscaler. +*/}} +{{- define "grafana.hpa.apiVersion" -}} +{{- if $.Capabilities.APIVersions.Has "autoscaling/v2/HorizontalPodAutoscaler" }} +{{- print "autoscaling/v2" }} +{{- else if $.Capabilities.APIVersions.Has "autoscaling/v2beta2/HorizontalPodAutoscaler" }} +{{- print "autoscaling/v2beta2" }} +{{- else }} +{{- print "autoscaling/v2beta1" }} +{{- end }} +{{- end }} + +{{/* +Return the appropriate apiVersion for podDisruptionBudget. +*/}} +{{- define "grafana.podDisruptionBudget.apiVersion" -}} +{{- if $.Values.podDisruptionBudget.apiVersion }} +{{- print $.Values.podDisruptionBudget.apiVersion }} +{{- else if $.Capabilities.APIVersions.Has "policy/v1/PodDisruptionBudget" }} +{{- print "policy/v1" }} +{{- else }} +{{- print "policy/v1beta1" }} +{{- end }} +{{- end }} + +{{/* +Return if ingress is stable. +*/}} +{{- define "grafana.ingress.isStable" -}} +{{- eq (include "grafana.ingress.apiVersion" .) "networking.k8s.io/v1" }} +{{- end }} + +{{/* +Return if ingress supports ingressClassName. +*/}} +{{- define "grafana.ingress.supportsIngressClassName" -}} +{{- or (eq (include "grafana.ingress.isStable" .) "true") (and (eq (include "grafana.ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18-0" .Capabilities.KubeVersion.Version)) }} +{{- end }} + +{{/* +Return if ingress supports pathType. +*/}} +{{- define "grafana.ingress.supportsPathType" -}} +{{- or (eq (include "grafana.ingress.isStable" .) "true") (and (eq (include "grafana.ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18-0" .Capabilities.KubeVersion.Version)) }} +{{- end }} + +{{/* +Formats imagePullSecrets. Input is (dict "root" . "imagePullSecrets" .{specific imagePullSecrets}) +*/}} +{{- define "grafana.imagePullSecrets" -}} +{{- $root := .root }} +{{- range (concat .root.Values.global.imagePullSecrets .imagePullSecrets) }} +{{- if eq (typeOf .) "map[string]interface {}" }} +- {{ toYaml (dict "name" (tpl .name $root)) | trim }} +{{- else }} +- name: {{ tpl . $root }} +{{- end }} +{{- end }} +{{- end }} + + +{{/* + Checks whether or not the configSecret secret has to be created + */}} +{{- define "grafana.shouldCreateConfigSecret" -}} +{{- $secretFound := false -}} +{{- range $key, $value := .Values.datasources }} + {{- if hasKey $value "secret" }} + {{- $secretFound = true}} + {{- end }} +{{- end }} +{{- range $key, $value := .Values.notifiers }} + {{- if hasKey $value "secret" }} + {{- $secretFound = true}} + {{- end }} +{{- end }} +{{- range $key, $value := .Values.alerting }} + {{- if (or (hasKey $value "secret") (hasKey $value "secretFile")) }} + {{- $secretFound = true}} + {{- end }} +{{- end }} +{{- $secretFound}} +{{- end -}} diff --git a/operations/deployment/helm/grafana/templates/_pod.tpl b/operations/deployment/helm/grafana/templates/_pod.tpl new file mode 100644 index 0000000..f4b9fd3 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/_pod.tpl @@ -0,0 +1,1276 @@ +{{- define "grafana.pod" -}} +{{- $sts := list "sts" "StatefulSet" "statefulset" -}} +{{- $root := . -}} +{{- with .Values.schedulerName }} +schedulerName: "{{ . }}" +{{- end }} +serviceAccountName: {{ include "grafana.serviceAccountName" . }} +automountServiceAccountToken: {{ .Values.serviceAccount.autoMount }} +{{- with .Values.securityContext }} +securityContext: + {{- toYaml . | nindent 2 }} +{{- end }} +{{- with .Values.hostAliases }} +hostAliases: + {{- toYaml . | nindent 2 }} +{{- end }} +{{- with .Values.priorityClassName }} +priorityClassName: {{ . }} +{{- end }} +{{- if ( or .Values.persistence.enabled .Values.dashboards .Values.extraInitContainers (and .Values.sidecar.alerts.enabled .Values.sidecar.alerts.initAlerts) (and .Values.sidecar.datasources.enabled .Values.sidecar.datasources.initDatasources) (and .Values.sidecar.notifiers.enabled .Values.sidecar.notifiers.initNotifiers)) }} +initContainers: +{{- end }} +{{- if ( and .Values.persistence.enabled .Values.initChownData.enabled ) }} + - name: init-chown-data + {{- $registry := .Values.global.imageRegistry | default .Values.initChownData.image.registry -}} + {{- if .Values.initChownData.image.sha }} + image: "{{ $registry }}/{{ .Values.initChownData.image.repository }}:{{ .Values.initChownData.image.tag }}@sha256:{{ .Values.initChownData.image.sha }}" + {{- else }} + image: "{{ $registry }}/{{ .Values.initChownData.image.repository }}:{{ .Values.initChownData.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.initChownData.image.pullPolicy }} + {{- with .Values.initChownData.securityContext }} + securityContext: + {{- toYaml . | nindent 6 }} + {{- end }} + command: + - chown + - -R + - {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.runAsGroup }} + - /var/lib/grafana + {{- with .Values.initChownData.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} + volumeMounts: + - name: storage + mountPath: "/var/lib/grafana" + {{- with .Values.persistence.subPath }} + subPath: {{ tpl . $root }} + {{- end }} +{{- end }} +{{- if .Values.dashboards }} + - name: download-dashboards + {{- $registry := .Values.global.imageRegistry | default .Values.downloadDashboardsImage.registry -}} + {{- if .Values.downloadDashboardsImage.sha }} + image: "{{ $registry }}/{{ .Values.downloadDashboardsImage.repository }}:{{ .Values.downloadDashboardsImage.tag }}@sha256:{{ .Values.downloadDashboardsImage.sha }}" + {{- else }} + image: "{{ $registry }}/{{ .Values.downloadDashboardsImage.repository }}:{{ .Values.downloadDashboardsImage.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.downloadDashboardsImage.pullPolicy }} + command: ["/bin/sh"] + args: [ "-c", "mkdir -p /var/lib/grafana/dashboards/default && /bin/sh -x /etc/grafana/download_dashboards.sh" ] + {{- with .Values.downloadDashboards.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} + env: + {{- range $key, $value := .Values.downloadDashboards.env }} + - name: "{{ $key }}" + value: "{{ $value }}" + {{- end }} + {{- range $key, $value := .Values.downloadDashboards.envValueFrom }} + - name: {{ $key | quote }} + valueFrom: + {{- tpl (toYaml $value) $ | nindent 10 }} + {{- end }} + {{- with .Values.downloadDashboards.securityContext }} + securityContext: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.downloadDashboards.envFromSecret }} + envFrom: + - secretRef: + name: {{ tpl . $root }} + {{- end }} + volumeMounts: + - name: config + mountPath: "/etc/grafana/download_dashboards.sh" + subPath: download_dashboards.sh + - name: storage + mountPath: "/var/lib/grafana" + {{- with .Values.persistence.subPath }} + subPath: {{ tpl . $root }} + {{- end }} + {{- range .Values.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + readOnly: {{ .readOnly }} + {{- end }} +{{- end }} +{{- if and .Values.sidecar.alerts.enabled .Values.sidecar.alerts.initAlerts }} + - name: {{ include "grafana.name" . }}-init-sc-alerts + {{- $registry := .Values.global.imageRegistry | default .Values.sidecar.image.registry -}} + {{- if .Values.sidecar.image.sha }} + image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" + {{- else }} + image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} + env: + {{- range $key, $value := .Values.sidecar.alerts.env }} + - name: "{{ $key }}" + value: "{{ $value }}" + {{- end }} + {{- if .Values.sidecar.alerts.ignoreAlreadyProcessed }} + - name: IGNORE_ALREADY_PROCESSED + value: "true" + {{- end }} + - name: METHOD + value: "LIST" + - name: LABEL + value: "{{ .Values.sidecar.alerts.label }}" + {{- with .Values.sidecar.alerts.labelValue }} + - name: LABEL_VALUE + value: {{ quote . }} + {{- end }} + {{- if or .Values.sidecar.logLevel .Values.sidecar.alerts.logLevel }} + - name: LOG_LEVEL + value: {{ default .Values.sidecar.logLevel .Values.sidecar.alerts.logLevel }} + {{- end }} + - name: FOLDER + value: "/etc/grafana/provisioning/alerting" + - name: RESOURCE + value: {{ quote .Values.sidecar.alerts.resource }} + {{- with .Values.sidecar.enableUniqueFilenames }} + - name: UNIQUE_FILENAMES + value: "{{ . }}" + {{- end }} + {{- with .Values.sidecar.alerts.searchNamespace }} + - name: NAMESPACE + value: {{ . | join "," | quote }} + {{- end }} + {{- with .Values.sidecar.alerts.skipTlsVerify }} + - name: SKIP_TLS_VERIFY + value: {{ quote . }} + {{- end }} + {{- with .Values.sidecar.alerts.script }} + - name: SCRIPT + value: {{ quote . }} + {{- end }} + {{- with .Values.sidecar.livenessProbe }} + livenessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.readinessProbe }} + readinessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.securityContext }} + securityContext: + {{- toYaml . | nindent 6 }} + {{- end }} + volumeMounts: + - name: sc-alerts-volume + mountPath: "/etc/grafana/provisioning/alerting" + {{- with .Values.sidecar.alerts.extraMounts }} + {{- toYaml . | trim | nindent 6 }} + {{- end }} +{{- end }} +{{- if and .Values.sidecar.datasources.enabled .Values.sidecar.datasources.initDatasources }} + - name: {{ include "grafana.name" . }}-init-sc-datasources + {{- $registry := .Values.global.imageRegistry | default .Values.sidecar.image.registry -}} + {{- if .Values.sidecar.image.sha }} + image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" + {{- else }} + image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} + env: + {{- range $key, $value := .Values.sidecar.datasources.env }} + - name: "{{ $key }}" + value: "{{ $value }}" + {{- end }} + {{- if .Values.sidecar.datasources.ignoreAlreadyProcessed }} + - name: IGNORE_ALREADY_PROCESSED + value: "true" + {{- end }} + - name: METHOD + value: "LIST" + - name: LABEL + value: "{{ .Values.sidecar.datasources.label }}" + {{- with .Values.sidecar.datasources.labelValue }} + - name: LABEL_VALUE + value: {{ quote . }} + {{- end }} + {{- if or .Values.sidecar.logLevel .Values.sidecar.datasources.logLevel }} + - name: LOG_LEVEL + value: {{ default .Values.sidecar.logLevel .Values.sidecar.datasources.logLevel }} + {{- end }} + - name: FOLDER + value: "/etc/grafana/provisioning/datasources" + - name: RESOURCE + value: {{ quote .Values.sidecar.datasources.resource }} + {{- with .Values.sidecar.enableUniqueFilenames }} + - name: UNIQUE_FILENAMES + value: "{{ . }}" + {{- end }} + {{- if .Values.sidecar.datasources.searchNamespace }} + - name: NAMESPACE + value: "{{ tpl (.Values.sidecar.datasources.searchNamespace | join ",") . }}" + {{- end }} + {{- with .Values.sidecar.skipTlsVerify }} + - name: SKIP_TLS_VERIFY + value: "{{ . }}" + {{- end }} + {{- with .Values.sidecar.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.securityContext }} + securityContext: + {{- toYaml . | nindent 6 }} + {{- end }} + volumeMounts: + - name: sc-datasources-volume + mountPath: "/etc/grafana/provisioning/datasources" +{{- end }} +{{- if and .Values.sidecar.notifiers.enabled .Values.sidecar.notifiers.initNotifiers }} + - name: {{ include "grafana.name" . }}-init-sc-notifiers + {{- $registry := .Values.global.imageRegistry | default .Values.sidecar.image.registry -}} + {{- if .Values.sidecar.image.sha }} + image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" + {{- else }} + image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} + env: + {{- range $key, $value := .Values.sidecar.notifiers.env }} + - name: "{{ $key }}" + value: "{{ $value }}" + {{- end }} + {{- if .Values.sidecar.notifiers.ignoreAlreadyProcessed }} + - name: IGNORE_ALREADY_PROCESSED + value: "true" + {{- end }} + - name: METHOD + value: LIST + - name: LABEL + value: "{{ .Values.sidecar.notifiers.label }}" + {{- with .Values.sidecar.notifiers.labelValue }} + - name: LABEL_VALUE + value: {{ quote . }} + {{- end }} + {{- if or .Values.sidecar.logLevel .Values.sidecar.notifiers.logLevel }} + - name: LOG_LEVEL + value: {{ default .Values.sidecar.logLevel .Values.sidecar.notifiers.logLevel }} + {{- end }} + - name: FOLDER + value: "/etc/grafana/provisioning/notifiers" + - name: RESOURCE + value: {{ quote .Values.sidecar.notifiers.resource }} + {{- with .Values.sidecar.enableUniqueFilenames }} + - name: UNIQUE_FILENAMES + value: "{{ . }}" + {{- end }} + {{- with .Values.sidecar.notifiers.searchNamespace }} + - name: NAMESPACE + value: "{{ tpl (. | join ",") $root }}" + {{- end }} + {{- with .Values.sidecar.skipTlsVerify }} + - name: SKIP_TLS_VERIFY + value: "{{ . }}" + {{- end }} + {{- with .Values.sidecar.livenessProbe }} + livenessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.readinessProbe }} + readinessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.securityContext }} + securityContext: + {{- toYaml . | nindent 6 }} + {{- end }} + volumeMounts: + - name: sc-notifiers-volume + mountPath: "/etc/grafana/provisioning/notifiers" +{{- end}} +{{- with .Values.extraInitContainers }} + {{- tpl (toYaml .) $root | nindent 2 }} +{{- end }} +{{- if or .Values.image.pullSecrets .Values.global.imagePullSecrets }} +imagePullSecrets: + {{- include "grafana.imagePullSecrets" (dict "root" $root "imagePullSecrets" .Values.image.pullSecrets) | nindent 2 }} +{{- end }} +{{- if not .Values.enableKubeBackwardCompatibility }} +enableServiceLinks: {{ .Values.enableServiceLinks }} +{{- end }} +containers: +{{- if and .Values.sidecar.alerts.enabled (not .Values.sidecar.alerts.initAlerts) }} + - name: {{ include "grafana.name" . }}-sc-alerts + {{- $registry := .Values.global.imageRegistry | default .Values.sidecar.image.registry -}} + {{- if .Values.sidecar.image.sha }} + image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" + {{- else }} + image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} + env: + {{- range $key, $value := .Values.sidecar.alerts.env }} + - name: "{{ $key }}" + value: "{{ $value }}" + {{- end }} + {{- if .Values.sidecar.alerts.ignoreAlreadyProcessed }} + - name: IGNORE_ALREADY_PROCESSED + value: "true" + {{- end }} + - name: METHOD + value: {{ .Values.sidecar.alerts.watchMethod }} + - name: LABEL + value: "{{ .Values.sidecar.alerts.label }}" + {{- with .Values.sidecar.alerts.labelValue }} + - name: LABEL_VALUE + value: {{ quote . }} + {{- end }} + {{- if or .Values.sidecar.logLevel .Values.sidecar.alerts.logLevel }} + - name: LOG_LEVEL + value: {{ default .Values.sidecar.logLevel .Values.sidecar.alerts.logLevel }} + {{- end }} + - name: FOLDER + value: "/etc/grafana/provisioning/alerting" + - name: RESOURCE + value: {{ quote .Values.sidecar.alerts.resource }} + {{- with .Values.sidecar.enableUniqueFilenames }} + - name: UNIQUE_FILENAMES + value: "{{ . }}" + {{- end }} + {{- with .Values.sidecar.alerts.searchNamespace }} + - name: NAMESPACE + value: {{ . | join "," | quote }} + {{- end }} + {{- with .Values.sidecar.alerts.skipTlsVerify }} + - name: SKIP_TLS_VERIFY + value: {{ quote . }} + {{- end }} + {{- with .Values.sidecar.alerts.script }} + - name: SCRIPT + value: {{ quote . }} + {{- end }} + {{- if and (not .Values.env.GF_SECURITY_ADMIN_USER) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + - name: REQ_USERNAME + valueFrom: + secretKeyRef: + name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.userKey | default "admin-user" }} + {{- end }} + {{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + - name: REQ_PASSWORD + valueFrom: + secretKeyRef: + name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.passwordKey | default "admin-password" }} + {{- end }} + {{- if not .Values.sidecar.alerts.skipReload }} + - name: REQ_URL + value: {{ .Values.sidecar.alerts.reloadURL }} + - name: REQ_METHOD + value: POST + {{- end }} + {{- if .Values.sidecar.alerts.watchServerTimeout }} + {{- if ne .Values.sidecar.alerts.watchMethod "WATCH" }} + {{- fail (printf "Cannot use .Values.sidecar.alerts.watchServerTimeout with .Values.sidecar.alerts.watchMethod %s" .Values.sidecar.alerts.watchMethod) }} + {{- end }} + - name: WATCH_SERVER_TIMEOUT + value: "{{ .Values.sidecar.alerts.watchServerTimeout }}" + {{- end }} + {{- if .Values.sidecar.alerts.watchClientTimeout }} + {{- if ne .Values.sidecar.alerts.watchMethod "WATCH" }} + {{- fail (printf "Cannot use .Values.sidecar.alerts.watchClientTimeout with .Values.sidecar.alerts.watchMethod %s" .Values.sidecar.alerts.watchMethod) }} + {{- end }} + - name: WATCH_CLIENT_TIMEOUT + value: "{{ .Values.sidecar.alerts.watchClientTimeout }}" + {{- end }} + {{- with .Values.sidecar.livenessProbe }} + livenessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.readinessProbe }} + readinessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.securityContext }} + securityContext: + {{- toYaml . | nindent 6 }} + {{- end }} + volumeMounts: + - name: sc-alerts-volume + mountPath: "/etc/grafana/provisioning/alerting" + {{- with .Values.sidecar.alerts.extraMounts }} + {{- toYaml . | trim | nindent 6 }} + {{- end }} +{{- end}} +{{- if .Values.sidecar.dashboards.enabled }} + - name: {{ include "grafana.name" . }}-sc-dashboard + {{- $registry := .Values.global.imageRegistry | default .Values.sidecar.image.registry -}} + {{- if .Values.sidecar.image.sha }} + image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" + {{- else }} + image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} + env: + {{- range $key, $value := .Values.sidecar.dashboards.env }} + - name: "{{ $key }}" + value: "{{ $value }}" + {{- end }} + {{- if .Values.sidecar.dashboards.ignoreAlreadyProcessed }} + - name: IGNORE_ALREADY_PROCESSED + value: "true" + {{- end }} + - name: METHOD + value: {{ .Values.sidecar.dashboards.watchMethod }} + - name: LABEL + value: "{{ .Values.sidecar.dashboards.label }}" + {{- with .Values.sidecar.dashboards.labelValue }} + - name: LABEL_VALUE + value: {{ quote . }} + {{- end }} + {{- if or .Values.sidecar.logLevel .Values.sidecar.dashboards.logLevel }} + - name: LOG_LEVEL + value: {{ default .Values.sidecar.logLevel .Values.sidecar.dashboards.logLevel }} + {{- end }} + - name: FOLDER + value: "{{ .Values.sidecar.dashboards.folder }}{{- with .Values.sidecar.dashboards.defaultFolderName }}/{{ . }}{{- end }}" + - name: RESOURCE + value: {{ quote .Values.sidecar.dashboards.resource }} + {{- with .Values.sidecar.enableUniqueFilenames }} + - name: UNIQUE_FILENAMES + value: "{{ . }}" + {{- end }} + {{- with .Values.sidecar.dashboards.searchNamespace }} + - name: NAMESPACE + value: "{{ tpl (. | join ",") $root }}" + {{- end }} + {{- with .Values.sidecar.skipTlsVerify }} + - name: SKIP_TLS_VERIFY + value: "{{ . }}" + {{- end }} + {{- with .Values.sidecar.dashboards.folderAnnotation }} + - name: FOLDER_ANNOTATION + value: "{{ . }}" + {{- end }} + {{- with .Values.sidecar.dashboards.script }} + - name: SCRIPT + value: "{{ . }}" + {{- end }} + {{- if not .Values.sidecar.dashboards.skipReload }} + {{- if and (not .Values.env.GF_SECURITY_ADMIN_USER) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + - name: REQ_USERNAME + valueFrom: + secretKeyRef: + name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.userKey | default "admin-user" }} + {{- end }} + {{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + - name: REQ_PASSWORD + valueFrom: + secretKeyRef: + name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.passwordKey | default "admin-password" }} + {{- end }} + - name: REQ_URL + value: {{ .Values.sidecar.dashboards.reloadURL }} + - name: REQ_METHOD + value: POST + {{- end }} + {{- if .Values.sidecar.dashboards.watchServerTimeout }} + {{- if ne .Values.sidecar.dashboards.watchMethod "WATCH" }} + {{- fail (printf "Cannot use .Values.sidecar.dashboards.watchServerTimeout with .Values.sidecar.dashboards.watchMethod %s" .Values.sidecar.dashboards.watchMethod) }} + {{- end }} + - name: WATCH_SERVER_TIMEOUT + value: "{{ .Values.sidecar.dashboards.watchServerTimeout }}" + {{- end }} + {{- if .Values.sidecar.dashboards.watchClientTimeout }} + {{- if ne .Values.sidecar.dashboards.watchMethod "WATCH" }} + {{- fail (printf "Cannot use .Values.sidecar.dashboards.watchClientTimeout with .Values.sidecar.dashboards.watchMethod %s" .Values.sidecar.dashboards.watchMethod) }} + {{- end }} + - name: WATCH_CLIENT_TIMEOUT + value: {{ .Values.sidecar.dashboards.watchClientTimeout | quote }} + {{- end }} + {{- with .Values.sidecar.livenessProbe }} + livenessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.readinessProbe }} + readinessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.securityContext }} + securityContext: + {{- toYaml . | nindent 6 }} + {{- end }} + volumeMounts: + - name: sc-dashboard-volume + mountPath: {{ .Values.sidecar.dashboards.folder | quote }} + {{- with .Values.sidecar.dashboards.extraMounts }} + {{- toYaml . | trim | nindent 6 }} + {{- end }} +{{- end}} +{{- if and .Values.sidecar.datasources.enabled (not .Values.sidecar.datasources.initDatasources) }} + - name: {{ include "grafana.name" . }}-sc-datasources + {{- $registry := .Values.global.imageRegistry | default .Values.sidecar.image.registry -}} + {{- if .Values.sidecar.image.sha }} + image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" + {{- else }} + image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} + env: + {{- range $key, $value := .Values.sidecar.datasources.env }} + - name: "{{ $key }}" + value: "{{ $value }}" + {{- end }} + {{- if .Values.sidecar.datasources.ignoreAlreadyProcessed }} + - name: IGNORE_ALREADY_PROCESSED + value: "true" + {{- end }} + - name: METHOD + value: {{ .Values.sidecar.datasources.watchMethod }} + - name: LABEL + value: "{{ .Values.sidecar.datasources.label }}" + {{- with .Values.sidecar.datasources.labelValue }} + - name: LABEL_VALUE + value: {{ quote . }} + {{- end }} + {{- if or .Values.sidecar.logLevel .Values.sidecar.datasources.logLevel }} + - name: LOG_LEVEL + value: {{ default .Values.sidecar.logLevel .Values.sidecar.datasources.logLevel }} + {{- end }} + - name: FOLDER + value: "/etc/grafana/provisioning/datasources" + - name: RESOURCE + value: {{ quote .Values.sidecar.datasources.resource }} + {{- with .Values.sidecar.enableUniqueFilenames }} + - name: UNIQUE_FILENAMES + value: "{{ . }}" + {{- end }} + {{- with .Values.sidecar.datasources.searchNamespace }} + - name: NAMESPACE + value: "{{ tpl (. | join ",") $root }}" + {{- end }} + {{- if .Values.sidecar.skipTlsVerify }} + - name: SKIP_TLS_VERIFY + value: "{{ .Values.sidecar.skipTlsVerify }}" + {{- end }} + {{- if .Values.sidecar.datasources.script }} + - name: SCRIPT + value: "{{ .Values.sidecar.datasources.script }}" + {{- end }} + {{- if and (not .Values.env.GF_SECURITY_ADMIN_USER) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + - name: REQ_USERNAME + valueFrom: + secretKeyRef: + name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.userKey | default "admin-user" }} + {{- end }} + {{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + - name: REQ_PASSWORD + valueFrom: + secretKeyRef: + name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.passwordKey | default "admin-password" }} + {{- end }} + {{- if not .Values.sidecar.datasources.skipReload }} + - name: REQ_URL + value: {{ .Values.sidecar.datasources.reloadURL }} + - name: REQ_METHOD + value: POST + {{- end }} + {{- if .Values.sidecar.datasources.watchServerTimeout }} + {{- if ne .Values.sidecar.datasources.watchMethod "WATCH" }} + {{- fail (printf "Cannot use .Values.sidecar.datasources.watchServerTimeout with .Values.sidecar.datasources.watchMethod %s" .Values.sidecar.datasources.watchMethod) }} + {{- end }} + - name: WATCH_SERVER_TIMEOUT + value: "{{ .Values.sidecar.datasources.watchServerTimeout }}" + {{- end }} + {{- if .Values.sidecar.datasources.watchClientTimeout }} + {{- if ne .Values.sidecar.datasources.watchMethod "WATCH" }} + {{- fail (printf "Cannot use .Values.sidecar.datasources.watchClientTimeout with .Values.sidecar.datasources.watchMethod %s" .Values.sidecar.datasources.watchMethod) }} + {{- end }} + - name: WATCH_CLIENT_TIMEOUT + value: "{{ .Values.sidecar.datasources.watchClientTimeout }}" + {{- end }} + {{- with .Values.sidecar.livenessProbe }} + livenessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.readinessProbe }} + readinessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.securityContext }} + securityContext: + {{- toYaml . | nindent 6 }} + {{- end }} + volumeMounts: + - name: sc-datasources-volume + mountPath: "/etc/grafana/provisioning/datasources" +{{- end}} +{{- if .Values.sidecar.notifiers.enabled }} + - name: {{ include "grafana.name" . }}-sc-notifiers + {{- $registry := .Values.global.imageRegistry | default .Values.sidecar.image.registry -}} + {{- if .Values.sidecar.image.sha }} + image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" + {{- else }} + image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} + env: + {{- range $key, $value := .Values.sidecar.notifiers.env }} + - name: "{{ $key }}" + value: "{{ $value }}" + {{- end }} + {{- if .Values.sidecar.notifiers.ignoreAlreadyProcessed }} + - name: IGNORE_ALREADY_PROCESSED + value: "true" + {{- end }} + - name: METHOD + value: {{ .Values.sidecar.notifiers.watchMethod }} + - name: LABEL + value: "{{ .Values.sidecar.notifiers.label }}" + {{- with .Values.sidecar.notifiers.labelValue }} + - name: LABEL_VALUE + value: {{ quote . }} + {{- end }} + {{- if or .Values.sidecar.logLevel .Values.sidecar.notifiers.logLevel }} + - name: LOG_LEVEL + value: {{ default .Values.sidecar.logLevel .Values.sidecar.notifiers.logLevel }} + {{- end }} + - name: FOLDER + value: "/etc/grafana/provisioning/notifiers" + - name: RESOURCE + value: {{ quote .Values.sidecar.notifiers.resource }} + {{- if .Values.sidecar.enableUniqueFilenames }} + - name: UNIQUE_FILENAMES + value: "{{ .Values.sidecar.enableUniqueFilenames }}" + {{- end }} + {{- with .Values.sidecar.notifiers.searchNamespace }} + - name: NAMESPACE + value: "{{ tpl (. | join ",") $root }}" + {{- end }} + {{- with .Values.sidecar.skipTlsVerify }} + - name: SKIP_TLS_VERIFY + value: "{{ . }}" + {{- end }} + {{- if .Values.sidecar.notifiers.script }} + - name: SCRIPT + value: "{{ .Values.sidecar.notifiers.script }}" + {{- end }} + {{- if and (not .Values.env.GF_SECURITY_ADMIN_USER) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + - name: REQ_USERNAME + valueFrom: + secretKeyRef: + name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.userKey | default "admin-user" }} + {{- end }} + {{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + - name: REQ_PASSWORD + valueFrom: + secretKeyRef: + name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.passwordKey | default "admin-password" }} + {{- end }} + {{- if not .Values.sidecar.notifiers.skipReload }} + - name: REQ_URL + value: {{ .Values.sidecar.notifiers.reloadURL }} + - name: REQ_METHOD + value: POST + {{- end }} + {{- if .Values.sidecar.notifiers.watchServerTimeout }} + {{- if ne .Values.sidecar.notifiers.watchMethod "WATCH" }} + {{- fail (printf "Cannot use .Values.sidecar.notifiers.watchServerTimeout with .Values.sidecar.notifiers.watchMethod %s" .Values.sidecar.notifiers.watchMethod) }} + {{- end }} + - name: WATCH_SERVER_TIMEOUT + value: "{{ .Values.sidecar.notifiers.watchServerTimeout }}" + {{- end }} + {{- if .Values.sidecar.notifiers.watchClientTimeout }} + {{- if ne .Values.sidecar.notifiers.watchMethod "WATCH" }} + {{- fail (printf "Cannot use .Values.sidecar.notifiers.watchClientTimeout with .Values.sidecar.notifiers.watchMethod %s" .Values.sidecar.notifiers.watchMethod) }} + {{- end }} + - name: WATCH_CLIENT_TIMEOUT + value: "{{ .Values.sidecar.notifiers.watchClientTimeout }}" + {{- end }} + {{- with .Values.sidecar.livenessProbe }} + livenessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.readinessProbe }} + readinessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.securityContext }} + securityContext: + {{- toYaml . | nindent 6 }} + {{- end }} + volumeMounts: + - name: sc-notifiers-volume + mountPath: "/etc/grafana/provisioning/notifiers" +{{- end}} +{{- if .Values.sidecar.plugins.enabled }} + - name: {{ include "grafana.name" . }}-sc-plugins + {{- $registry := .Values.global.imageRegistry | default .Values.sidecar.image.registry -}} + {{- if .Values.sidecar.image.sha }} + image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" + {{- else }} + image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} + env: + {{- range $key, $value := .Values.sidecar.plugins.env }} + - name: "{{ $key }}" + value: "{{ $value }}" + {{- end }} + {{- if .Values.sidecar.plugins.ignoreAlreadyProcessed }} + - name: IGNORE_ALREADY_PROCESSED + value: "true" + {{- end }} + - name: METHOD + value: {{ .Values.sidecar.plugins.watchMethod }} + - name: LABEL + value: "{{ .Values.sidecar.plugins.label }}" + {{- if .Values.sidecar.plugins.labelValue }} + - name: LABEL_VALUE + value: {{ quote .Values.sidecar.plugins.labelValue }} + {{- end }} + {{- if or .Values.sidecar.logLevel .Values.sidecar.plugins.logLevel }} + - name: LOG_LEVEL + value: {{ default .Values.sidecar.logLevel .Values.sidecar.plugins.logLevel }} + {{- end }} + - name: FOLDER + value: "/etc/grafana/provisioning/plugins" + - name: RESOURCE + value: {{ quote .Values.sidecar.plugins.resource }} + {{- with .Values.sidecar.enableUniqueFilenames }} + - name: UNIQUE_FILENAMES + value: "{{ . }}" + {{- end }} + {{- with .Values.sidecar.plugins.searchNamespace }} + - name: NAMESPACE + value: "{{ tpl (. | join ",") $root }}" + {{- end }} + {{- with .Values.sidecar.plugins.script }} + - name: SCRIPT + value: "{{ . }}" + {{- end }} + {{- with .Values.sidecar.skipTlsVerify }} + - name: SKIP_TLS_VERIFY + value: "{{ . }}" + {{- end }} + {{- if and (not .Values.env.GF_SECURITY_ADMIN_USER) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + - name: REQ_USERNAME + valueFrom: + secretKeyRef: + name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.userKey | default "admin-user" }} + {{- end }} + {{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + - name: REQ_PASSWORD + valueFrom: + secretKeyRef: + name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.passwordKey | default "admin-password" }} + {{- end }} + {{- if not .Values.sidecar.plugins.skipReload }} + - name: REQ_URL + value: {{ .Values.sidecar.plugins.reloadURL }} + - name: REQ_METHOD + value: POST + {{- end }} + {{- if .Values.sidecar.plugins.watchServerTimeout }} + {{- if ne .Values.sidecar.plugins.watchMethod "WATCH" }} + {{- fail (printf "Cannot use .Values.sidecar.plugins.watchServerTimeout with .Values.sidecar.plugins.watchMethod %s" .Values.sidecar.plugins.watchMethod) }} + {{- end }} + - name: WATCH_SERVER_TIMEOUT + value: "{{ .Values.sidecar.plugins.watchServerTimeout }}" + {{- end }} + {{- if .Values.sidecar.plugins.watchClientTimeout }} + {{- if ne .Values.sidecar.plugins.watchMethod "WATCH" }} + {{- fail (printf "Cannot use .Values.sidecar.plugins.watchClientTimeout with .Values.sidecar.plugins.watchMethod %s" .Values.sidecar.plugins.watchMethod) }} + {{- end }} + - name: WATCH_CLIENT_TIMEOUT + value: "{{ .Values.sidecar.plugins.watchClientTimeout }}" + {{- end }} + {{- with .Values.sidecar.livenessProbe }} + livenessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.readinessProbe }} + readinessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.securityContext }} + securityContext: + {{- toYaml . | nindent 6 }} + {{- end }} + volumeMounts: + - name: sc-plugins-volume + mountPath: "/etc/grafana/provisioning/plugins" +{{- end}} + - name: {{ .Chart.Name }} + {{- $registry := .Values.global.imageRegistry | default .Values.image.registry -}} + {{- if .Values.image.sha }} + image: "{{ $registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}@sha256:{{ .Values.image.sha }}" + {{- else }} + image: "{{ $registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + {{- end }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- if .Values.command }} + command: + {{- range .Values.command }} + - {{ . | quote }} + {{- end }} + {{- end }} + {{- if .Values.args }} + args: + {{- range .Values.args }} + - {{ . | quote }} + {{- end }} + {{- end }} + {{- with .Values.containerSecurityContext }} + securityContext: + {{- toYaml . | nindent 6 }} + {{- end }} + volumeMounts: + - name: config + mountPath: "/etc/grafana/grafana.ini" + subPath: grafana.ini + {{- if .Values.ldap.enabled }} + - name: ldap + mountPath: "/etc/grafana/ldap.toml" + subPath: ldap.toml + {{- end }} + {{- range .Values.extraConfigmapMounts }} + - name: {{ tpl .name $root }} + mountPath: {{ tpl .mountPath $root }} + subPath: {{ tpl (.subPath | default "") $root }} + readOnly: {{ .readOnly }} + {{- end }} + - name: storage + mountPath: "/var/lib/grafana" + {{- with .Values.persistence.subPath }} + subPath: {{ tpl . $root }} + {{- end }} + {{- with .Values.dashboards }} + {{- range $provider, $dashboards := . }} + {{- range $key, $value := $dashboards }} + {{- if (or (hasKey $value "json") (hasKey $value "file")) }} + - name: dashboards-{{ $provider }} + mountPath: "/var/lib/grafana/dashboards/{{ $provider }}/{{ $key }}.json" + subPath: "{{ $key }}.json" + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- with .Values.dashboardsConfigMaps }} + {{- range (keys . | sortAlpha) }} + - name: dashboards-{{ . }} + mountPath: "/var/lib/grafana/dashboards/{{ . }}" + {{- end }} + {{- end }} + {{- with .Values.datasources }} + {{- $datasources := . }} + {{- range (keys . | sortAlpha) }} + {{- if (or (hasKey (index $datasources .) "secret")) }} {{/*check if current datasource should be handeled as secret */}} + - name: config-secret + mountPath: "/etc/grafana/provisioning/datasources/{{ . }}" + subPath: {{ . | quote }} + {{- else }} + - name: config + mountPath: "/etc/grafana/provisioning/datasources/{{ . }}" + subPath: {{ . | quote }} + {{- end }} + {{- end }} + {{- end }} + {{- with .Values.notifiers }} + {{- $notifiers := . }} + {{- range (keys . | sortAlpha) }} + {{- if (or (hasKey (index $notifiers .) "secret")) }} {{/*check if current notifier should be handeled as secret */}} + - name: config-secret + mountPath: "/etc/grafana/provisioning/notifiers/{{ . }}" + subPath: {{ . | quote }} + {{- else }} + - name: config + mountPath: "/etc/grafana/provisioning/notifiers/{{ . }}" + subPath: {{ . | quote }} + {{- end }} + {{- end }} + {{- end }} + {{- with .Values.alerting }} + {{- $alertingmap := .}} + {{- range (keys . | sortAlpha) }} + {{- if (or (hasKey (index $.Values.alerting .) "secret") (hasKey (index $.Values.alerting .) "secretFile")) }} {{/*check if current alerting entry should be handeled as secret */}} + - name: config-secret + mountPath: "/etc/grafana/provisioning/alerting/{{ . }}" + subPath: {{ . | quote }} + {{- else }} + - name: config + mountPath: "/etc/grafana/provisioning/alerting/{{ . }}" + subPath: {{ . | quote }} + {{- end }} + {{- end }} + {{- end }} + {{- with .Values.dashboardProviders }} + {{- range (keys . | sortAlpha) }} + - name: config + mountPath: "/etc/grafana/provisioning/dashboards/{{ . }}" + subPath: {{ . | quote }} + {{- end }} + {{- end }} + {{- with .Values.sidecar.alerts.enabled }} + - name: sc-alerts-volume + mountPath: "/etc/grafana/provisioning/alerting" + {{- end}} + {{- if .Values.sidecar.dashboards.enabled }} + - name: sc-dashboard-volume + mountPath: {{ .Values.sidecar.dashboards.folder | quote }} + {{- if .Values.sidecar.dashboards.SCProvider }} + - name: sc-dashboard-provider + mountPath: "/etc/grafana/provisioning/dashboards/sc-dashboardproviders.yaml" + subPath: provider.yaml + {{- end}} + {{- end}} + {{- if .Values.sidecar.datasources.enabled }} + - name: sc-datasources-volume + mountPath: "/etc/grafana/provisioning/datasources" + {{- end}} + {{- if .Values.sidecar.plugins.enabled }} + - name: sc-plugins-volume + mountPath: "/etc/grafana/provisioning/plugins" + {{- end}} + {{- if .Values.sidecar.notifiers.enabled }} + - name: sc-notifiers-volume + mountPath: "/etc/grafana/provisioning/notifiers" + {{- end}} + {{- range .Values.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + readOnly: {{ .readOnly }} + subPath: {{ .subPath | default "" }} + {{- end }} + {{- range .Values.extraVolumeMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath | default "" }} + readOnly: {{ .readOnly }} + {{- end }} + {{- range .Values.extraEmptyDirMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + {{- end }} + ports: + - name: {{ .Values.podPortName }} + containerPort: {{ .Values.service.targetPort }} + protocol: TCP + - name: {{ .Values.gossipPortName }}-tcp + containerPort: 9094 + protocol: TCP + - name: {{ .Values.gossipPortName }}-udp + containerPort: 9094 + protocol: UDP + env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + {{- if and (not .Values.env.GF_SECURITY_ADMIN_USER) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + - name: GF_SECURITY_ADMIN_USER + valueFrom: + secretKeyRef: + name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.userKey | default "admin-user" }} + {{- end }} + {{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + - name: GF_SECURITY_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.passwordKey | default "admin-password" }} + {{- end }} + {{- if .Values.plugins }} + - name: GF_INSTALL_PLUGINS + valueFrom: + configMapKeyRef: + name: {{ include "grafana.fullname" . }} + key: plugins + {{- end }} + {{- if .Values.smtp.existingSecret }} + - name: GF_SMTP_USER + valueFrom: + secretKeyRef: + name: {{ .Values.smtp.existingSecret }} + key: {{ .Values.smtp.userKey | default "user" }} + - name: GF_SMTP_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .Values.smtp.existingSecret }} + key: {{ .Values.smtp.passwordKey | default "password" }} + {{- end }} + {{- if .Values.imageRenderer.enabled }} + - name: GF_RENDERING_SERVER_URL + value: http://{{ include "grafana.fullname" . }}-image-renderer.{{ include "grafana.namespace" . }}:{{ .Values.imageRenderer.service.port }}/render + - name: GF_RENDERING_CALLBACK_URL + value: {{ .Values.imageRenderer.grafanaProtocol }}://{{ include "grafana.fullname" . }}.{{ include "grafana.namespace" . }}:{{ .Values.service.port }}/{{ .Values.imageRenderer.grafanaSubPath }} + {{- end }} + - name: GF_PATHS_DATA + value: {{ (get .Values "grafana.ini").paths.data }} + - name: GF_PATHS_LOGS + value: {{ (get .Values "grafana.ini").paths.logs }} + - name: GF_PATHS_PLUGINS + value: {{ (get .Values "grafana.ini").paths.plugins }} + - name: GF_PATHS_PROVISIONING + value: {{ (get .Values "grafana.ini").paths.provisioning }} + {{- range $key, $value := .Values.envValueFrom }} + - name: {{ $key | quote }} + valueFrom: + {{- tpl (toYaml $value) $ | nindent 10 }} + {{- end }} + {{- range $key, $value := .Values.env }} + - name: "{{ tpl $key $ }}" + value: "{{ tpl (print $value) $ }}" + {{- end }} + {{- if or .Values.envFromSecret (or .Values.envRenderSecret .Values.envFromSecrets) .Values.envFromConfigMaps }} + envFrom: + {{- if .Values.envFromSecret }} + - secretRef: + name: {{ tpl .Values.envFromSecret . }} + {{- end }} + {{- if .Values.envRenderSecret }} + - secretRef: + name: {{ include "grafana.fullname" . }}-env + {{- end }} + {{- range .Values.envFromSecrets }} + - secretRef: + name: {{ tpl .name $ }} + optional: {{ .optional | default false }} + {{- end }} + {{- range .Values.envFromConfigMaps }} + - configMapRef: + name: {{ tpl .name $ }} + optional: {{ .optional | default false }} + {{- end }} + {{- end }} + {{- with .Values.livenessProbe }} + livenessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.readinessProbe }} + readinessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.lifecycleHooks }} + lifecycle: + {{- tpl (toYaml .) $root | nindent 6 }} + {{- end }} + {{- with .Values.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} +{{- with .Values.extraContainers }} + {{- tpl . $ | nindent 2 }} +{{- end }} +{{- with .Values.nodeSelector }} +nodeSelector: + {{- toYaml . | nindent 2 }} +{{- end }} +{{- with .Values.affinity }} +affinity: + {{- tpl (toYaml .) $root | nindent 2 }} +{{- end }} +{{- with .Values.topologySpreadConstraints }} +topologySpreadConstraints: + {{- toYaml . | nindent 2 }} +{{- end }} +{{- with .Values.tolerations }} +tolerations: + {{- toYaml . | nindent 2 }} +{{- end }} +volumes: + - name: config + configMap: + name: {{ include "grafana.fullname" . }} + {{- $createConfigSecret := eq (include "grafana.shouldCreateConfigSecret" .) "true" -}} + {{- if and .Values.createConfigmap $createConfigSecret }} + - name: config-secret + secret: + secretName: {{ include "grafana.fullname" . }}-config-secret + {{- end }} + {{- range .Values.extraConfigmapMounts }} + - name: {{ tpl .name $root }} + configMap: + name: {{ tpl .configMap $root }} + {{- with .items }} + items: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- end }} + {{- if .Values.dashboards }} + {{- range (keys .Values.dashboards | sortAlpha) }} + - name: dashboards-{{ . }} + configMap: + name: {{ include "grafana.fullname" $ }}-dashboards-{{ . }} + {{- end }} + {{- end }} + {{- if .Values.dashboardsConfigMaps }} + {{- range $provider, $name := .Values.dashboardsConfigMaps }} + - name: dashboards-{{ $provider }} + configMap: + name: {{ tpl $name $root }} + {{- end }} + {{- end }} + {{- if .Values.ldap.enabled }} + - name: ldap + secret: + {{- if .Values.ldap.existingSecret }} + secretName: {{ .Values.ldap.existingSecret }} + {{- else }} + secretName: {{ include "grafana.fullname" . }} + {{- end }} + items: + - key: ldap-toml + path: ldap.toml + {{- end }} + {{- if and .Values.persistence.enabled (eq .Values.persistence.type "pvc") }} + - name: storage + persistentVolumeClaim: + claimName: {{ tpl (.Values.persistence.existingClaim | default (include "grafana.fullname" .)) . }} + {{- else if and .Values.persistence.enabled (has .Values.persistence.type $sts) }} + {{/* nothing */}} + {{- else }} + - name: storage + {{- if .Values.persistence.inMemory.enabled }} + emptyDir: + medium: Memory + {{- with .Values.persistence.inMemory.sizeLimit }} + sizeLimit: {{ . }} + {{- end }} + {{- else }} + emptyDir: {} + {{- end }} + {{- end }} + {{- if .Values.sidecar.alerts.enabled }} + - name: sc-alerts-volume + emptyDir: + {{- with .Values.sidecar.alerts.sizeLimit }} + sizeLimit: {{ . }} + {{- else }} + {} + {{- end }} + {{- end }} + {{- if .Values.sidecar.dashboards.enabled }} + - name: sc-dashboard-volume + emptyDir: + {{- with .Values.sidecar.dashboards.sizeLimit }} + sizeLimit: {{ . }} + {{- else }} + {} + {{- end }} + {{- if .Values.sidecar.dashboards.SCProvider }} + - name: sc-dashboard-provider + configMap: + name: {{ include "grafana.fullname" . }}-config-dashboards + {{- end }} + {{- end }} + {{- if .Values.sidecar.datasources.enabled }} + - name: sc-datasources-volume + emptyDir: + {{- with .Values.sidecar.datasources.sizeLimit }} + sizeLimit: {{ . }} + {{- else }} + {} + {{- end }} + {{- end }} + {{- if .Values.sidecar.plugins.enabled }} + - name: sc-plugins-volume + emptyDir: + {{- with .Values.sidecar.plugins.sizeLimit }} + sizeLimit: {{ . }} + {{- else }} + {} + {{- end }} + {{- end }} + {{- if .Values.sidecar.notifiers.enabled }} + - name: sc-notifiers-volume + emptyDir: + {{- with .Values.sidecar.notifiers.sizeLimit }} + sizeLimit: {{ . }} + {{- else }} + {} + {{- end }} + {{- end }} + {{- range .Values.extraSecretMounts }} + {{- if .secretName }} + - name: {{ .name }} + secret: + secretName: {{ .secretName }} + defaultMode: {{ .defaultMode }} + {{- with .items }} + items: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- else if .projected }} + - name: {{ .name }} + projected: + {{- toYaml .projected | nindent 6 }} + {{- else if .csi }} + - name: {{ .name }} + csi: + {{- toYaml .csi | nindent 6 }} + {{- end }} + {{- end }} + {{- range .Values.extraVolumes }} + - name: {{ .name }} + {{- if .existingClaim }} + persistentVolumeClaim: + claimName: {{ .existingClaim }} + {{- else if .hostPath }} + hostPath: + {{ toYaml .hostPath | nindent 6 }} + {{- else if .csi }} + csi: + {{- toYaml .data | nindent 6 }} + {{- else if .configMap }} + configMap: + {{- toYaml .configMap | nindent 6 }} + {{- else }} + emptyDir: {} + {{- end }} + {{- end }} + {{- range .Values.extraEmptyDirMounts }} + - name: {{ .name }} + emptyDir: {} + {{- end }} + {{- with .Values.extraContainerVolumes }} + {{- tpl (toYaml .) $root | nindent 2 }} + {{- end }} +{{- end }} + diff --git a/operations/deployment/helm/grafana/templates/clusterrole.yaml b/operations/deployment/helm/grafana/templates/clusterrole.yaml new file mode 100644 index 0000000..3af4b62 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/clusterrole.yaml @@ -0,0 +1,25 @@ +{{- if and .Values.rbac.create (or (not .Values.rbac.namespaced) .Values.rbac.extraClusterRoleRules) (not .Values.rbac.useExistingClusterRole) }} +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "grafana.fullname" . }}-clusterrole +{{- if or .Values.sidecar.dashboards.enabled .Values.rbac.extraClusterRoleRules .Values.sidecar.datasources.enabled .Values.sidecar.plugins.enabled .Values.sidecar.alerts.enabled }} +rules: + {{- if or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled .Values.sidecar.plugins.enabled .Values.sidecar.alerts.enabled }} + - apiGroups: [""] # "" indicates the core API group + resources: ["configmaps", "secrets"] + verbs: ["get", "watch", "list"] + {{- end}} + {{- with .Values.rbac.extraClusterRoleRules }} + {{- toYaml . | nindent 2 }} + {{- end}} +{{- else }} +rules: [] +{{- end}} +{{- end}} diff --git a/operations/deployment/helm/grafana/templates/clusterrolebinding.yaml b/operations/deployment/helm/grafana/templates/clusterrolebinding.yaml new file mode 100644 index 0000000..bda9431 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/clusterrolebinding.yaml @@ -0,0 +1,24 @@ +{{- if and .Values.rbac.create (or (not .Values.rbac.namespaced) .Values.rbac.extraClusterRoleRules) }} +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ include "grafana.fullname" . }}-clusterrolebinding + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +subjects: + - kind: ServiceAccount + name: {{ include "grafana.serviceAccountName" . }} + namespace: {{ include "grafana.namespace" . }} +roleRef: + kind: ClusterRole + {{- if .Values.rbac.useExistingClusterRole }} + name: {{ .Values.rbac.useExistingClusterRole }} + {{- else }} + name: {{ include "grafana.fullname" . }}-clusterrole + {{- end }} + apiGroup: rbac.authorization.k8s.io +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/configSecret.yaml b/operations/deployment/helm/grafana/templates/configSecret.yaml new file mode 100644 index 0000000..f8937cc --- /dev/null +++ b/operations/deployment/helm/grafana/templates/configSecret.yaml @@ -0,0 +1,43 @@ +{{- $createConfigSecret := eq (include "grafana.shouldCreateConfigSecret" .) "true" -}} +{{- if and .Values.createConfigmap $createConfigSecret }} +{{- $files := .Files }} +{{- $root := . -}} +apiVersion: v1 +kind: Secret +metadata: + name: "{{ include "grafana.fullname" . }}-config-secret" + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +data: +{{- range $key, $value := .Values.alerting }} + {{- if (hasKey $value "secretFile") }} + {{- $key | nindent 2 }}: + {{- toYaml ( $files.Get $value.secretFile ) | b64enc | nindent 4}} + {{/* as of https://helm.sh/docs/chart_template_guide/accessing_files/ this will only work if you fork this chart and add files to it*/}} + {{- end }} +{{- end }} +stringData: +{{- range $key, $value := .Values.datasources }} +{{- if (hasKey $value "secret") }} +{{- $key | nindent 2 }}: | + {{- tpl (toYaml $value | nindent 4) $root }} +{{- end }} +{{- end }} +{{- range $key, $value := .Values.notifiers }} +{{- if (hasKey $value "secret") }} +{{- $key | nindent 2 }}: | + {{- tpl (toYaml $value | nindent 4) $root }} +{{- end }} +{{- end }} +{{- range $key, $value := .Values.alerting }} +{{ if (hasKey $value "secret") }} + {{- $key | nindent 2 }}: | + {{- tpl (toYaml $value.secret | nindent 4) $root }} + {{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/operations/deployment/helm/grafana/templates/configmap-dashboard-provider.yaml b/operations/deployment/helm/grafana/templates/configmap-dashboard-provider.yaml new file mode 100644 index 0000000..1f706a8 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/configmap-dashboard-provider.yaml @@ -0,0 +1,29 @@ +{{- if and .Values.sidecar.dashboards.enabled .Values.sidecar.dashboards.SCProvider }} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "grafana.fullname" . }}-config-dashboards + namespace: {{ include "grafana.namespace" . }} +data: + provider.yaml: |- + apiVersion: 1 + providers: + - name: '{{ .Values.sidecar.dashboards.provider.name }}' + orgId: {{ .Values.sidecar.dashboards.provider.orgid }} + {{- if not .Values.sidecar.dashboards.provider.foldersFromFilesStructure }} + folder: '{{ .Values.sidecar.dashboards.provider.folder }}' + {{- end }} + type: {{ .Values.sidecar.dashboards.provider.type }} + disableDeletion: {{ .Values.sidecar.dashboards.provider.disableDelete }} + allowUiUpdates: {{ .Values.sidecar.dashboards.provider.allowUiUpdates }} + updateIntervalSeconds: {{ .Values.sidecar.dashboards.provider.updateIntervalSeconds | default 30 }} + options: + foldersFromFilesStructure: {{ .Values.sidecar.dashboards.provider.foldersFromFilesStructure }} + path: {{ .Values.sidecar.dashboards.folder }}{{- with .Values.sidecar.dashboards.defaultFolderName }}/{{ . }}{{- end }} +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/configmap.yaml b/operations/deployment/helm/grafana/templates/configmap.yaml new file mode 100644 index 0000000..7b837d9 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/configmap.yaml @@ -0,0 +1,144 @@ +{{- if .Values.createConfigmap }} +{{- $files := .Files }} +{{- $root := . -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "grafana.fullname" . }} + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +data: + {{- with .Values.plugins }} + plugins: {{ join "," . }} + {{- end }} + grafana.ini: | + {{- range $elem, $elemVal := index .Values "grafana.ini" }} + {{- if not (kindIs "map" $elemVal) }} + {{- if kindIs "invalid" $elemVal }} + {{ $elem }} = + {{- else if kindIs "string" $elemVal }} + {{ $elem }} = {{ tpl $elemVal $ }} + {{- else }} + {{ $elem }} = {{ $elemVal }} + {{- end }} + {{- end }} + {{- end }} + {{- range $key, $value := index .Values "grafana.ini" }} + {{- if kindIs "map" $value }} + [{{ $key }}] + {{- range $elem, $elemVal := $value }} + {{- if kindIs "invalid" $elemVal }} + {{ $elem }} = + {{- else if kindIs "string" $elemVal }} + {{ $elem }} = {{ tpl $elemVal $ }} + {{- else }} + {{ $elem }} = {{ $elemVal }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + + {{- range $key, $value := .Values.datasources }} + {{- if not (hasKey $value "secret") }} + {{- $key | nindent 2 }}: | + {{- tpl (toYaml $value | nindent 4) $root }} + {{- end }} + {{- end }} + + {{- range $key, $value := .Values.notifiers }} + {{- if not (hasKey $value "secret") }} + {{- $key | nindent 2 }}: | + {{- toYaml $value | nindent 4 }} + {{- end }} + {{- end }} + + {{- range $key, $value := .Values.alerting }} + {{- if (hasKey $value "file") }} + {{- $key | nindent 2 }}: + {{- toYaml ( $files.Get $value.file ) | nindent 4}} + {{- else if (or (hasKey $value "secret") (hasKey $value "secretFile"))}} + {{/* will be stored inside secret generated by "configSecret.yaml"*/}} + {{- else }} + {{- $key | nindent 2 }}: | + {{- tpl (toYaml $value | nindent 4) $root }} + {{- end }} + {{- end }} + + {{- range $key, $value := .Values.dashboardProviders }} + {{- $key | nindent 2 }}: | + {{- toYaml $value | nindent 4 }} + {{- end }} + +{{- if .Values.dashboards }} + download_dashboards.sh: | + #!/usr/bin/env sh + set -euf + {{- if .Values.dashboardProviders }} + {{- range $key, $value := .Values.dashboardProviders }} + {{- range $value.providers }} + mkdir -p {{ .options.path }} + {{- end }} + {{- end }} + {{- end }} + {{ $dashboardProviders := .Values.dashboardProviders }} + {{- range $provider, $dashboards := .Values.dashboards }} + {{- range $key, $value := $dashboards }} + {{- if (or (hasKey $value "gnetId") (hasKey $value "url")) }} + curl -skf \ + --connect-timeout 60 \ + --max-time 60 \ + {{- if not $value.b64content }} + {{- if not $value.acceptHeader }} + -H "Accept: application/json" \ + {{- else }} + -H "Accept: {{ $value.acceptHeader }}" \ + {{- end }} + {{- if $value.token }} + -H "Authorization: token {{ $value.token }}" \ + {{- end }} + {{- if $value.bearerToken }} + -H "Authorization: Bearer {{ $value.bearerToken }}" \ + {{- end }} + {{- if $value.basic }} + -H "Authorization: Basic {{ $value.basic }}" \ + {{- end }} + {{- if $value.gitlabToken }} + -H "PRIVATE-TOKEN: {{ $value.gitlabToken }}" \ + {{- end }} + -H "Content-Type: application/json;charset=UTF-8" \ + {{- end }} + {{- $dpPath := "" -}} + {{- range $kd := (index $dashboardProviders "dashboardproviders.yaml").providers }} + {{- if eq $kd.name $provider }} + {{- $dpPath = $kd.options.path }} + {{- end }} + {{- end }} + {{- if $value.url }} + "{{ $value.url }}" \ + {{- else }} + "https://grafana.com/api/dashboards/{{ $value.gnetId }}/revisions/{{- if $value.revision -}}{{ $value.revision }}{{- else -}}1{{- end -}}/download" \ + {{- end }} + {{- if $value.datasource }} + {{- if kindIs "string" $value.datasource }} + | sed '/-- .* --/! s/"datasource":.*,/"datasource": "{{ $value.datasource }}",/g' \ + {{- end }} + {{- if kindIs "slice" $value.datasource }} + {{- range $value.datasource }} + | sed '/-- .* --/! s/${{"{"}}{{ .name }}}/{{ .value }}/g' \ + {{- end }} + {{- end }} + {{- end }} + {{- if $value.b64content }} + | base64 -d \ + {{- end }} + > "{{- if $dpPath -}}{{ $dpPath }}{{- else -}}/var/lib/grafana/dashboards/{{ $provider }}{{- end -}}/{{ $key }}.json" + {{ end }} + {{- end }} + {{- end }} +{{- end }} +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/dashboards-json-configmap.yaml b/operations/deployment/helm/grafana/templates/dashboards-json-configmap.yaml new file mode 100644 index 0000000..b96ce72 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/dashboards-json-configmap.yaml @@ -0,0 +1,38 @@ +{{- if .Values.dashboards }} +{{ $files := .Files }} +{{- range $provider, $dashboards := .Values.dashboards }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "grafana.fullname" $ }}-dashboards-{{ $provider }} + namespace: {{ include "grafana.namespace" $ }} + labels: + {{- include "grafana.labels" $ | nindent 4 }} + dashboard-provider: {{ $provider }} + {{- if $.Values.sidecar.dashboards.enabled }} + {{ $.Values.sidecar.dashboards.label }}: {{ $.Values.sidecar.dashboards.labelValue | quote }} + {{- end }} +{{- if $dashboards }} +data: +{{- $dashboardFound := false }} +{{- range $key, $value := $dashboards }} +{{- if (or (hasKey $value "json") (hasKey $value "file")) }} +{{- $dashboardFound = true }} + {{- print $key | nindent 2 }}.json: + {{- if hasKey $value "json" }} + |- + {{- $value.json | nindent 6 }} + {{- end }} + {{- if hasKey $value "file" }} + {{- toYaml ( $files.Get $value.file ) | nindent 4}} + {{- end }} +{{- end }} +{{- end }} +{{- if not $dashboardFound }} + {} +{{- end }} +{{- end }} +--- +{{- end }} + +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/deployment.yaml b/operations/deployment/helm/grafana/templates/deployment.yaml new file mode 100644 index 0000000..bfa26bb --- /dev/null +++ b/operations/deployment/helm/grafana/templates/deployment.yaml @@ -0,0 +1,51 @@ +{{- if (and (not .Values.useStatefulSet) (or (not .Values.persistence.enabled) (eq .Values.persistence.type "pvc"))) }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "grafana.fullname" . }} + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if and (not .Values.autoscaling.enabled) (.Values.replicas) }} + replicas: {{ .Values.replicas }} + {{- end }} + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + selector: + matchLabels: + {{- include "grafana.selectorLabels" . | nindent 6 }} + {{- with .Values.deploymentStrategy }} + strategy: + {{- toYaml . | trim | nindent 4 }} + {{- end }} + template: + metadata: + labels: + {{- include "grafana.selectorLabels" . | nindent 8 }} + {{- with .Values.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/dashboards-json-config: {{ include (print $.Template.BasePath "/dashboards-json-configmap.yaml") . | sha256sum }} + checksum/sc-dashboard-provider-config: {{ include (print $.Template.BasePath "/configmap-dashboard-provider.yaml") . | sha256sum }} + {{- if and (or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret))) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.envRenderSecret }} + checksum/secret-env: {{ include (print $.Template.BasePath "/secret-env.yaml") . | sha256sum }} + {{- end }} + kubectl.kubernetes.io/default-container: {{ .Chart.Name }} + {{- with .Values.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- include "grafana.pod" . | nindent 6 }} +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/extra-manifests.yaml b/operations/deployment/helm/grafana/templates/extra-manifests.yaml new file mode 100644 index 0000000..a9bb3b6 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/extra-manifests.yaml @@ -0,0 +1,4 @@ +{{ range .Values.extraObjects }} +--- +{{ tpl (toYaml .) $ }} +{{ end }} diff --git a/operations/deployment/helm/grafana/templates/headless-service.yaml b/operations/deployment/helm/grafana/templates/headless-service.yaml new file mode 100644 index 0000000..3028589 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/headless-service.yaml @@ -0,0 +1,22 @@ +{{- $sts := list "sts" "StatefulSet" "statefulset" -}} +{{- if or .Values.headlessService (and .Values.persistence.enabled (not .Values.persistence.existingClaim) (has .Values.persistence.type $sts)) }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "grafana.fullname" . }}-headless + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + clusterIP: None + selector: + {{- include "grafana.selectorLabels" . | nindent 4 }} + type: ClusterIP + ports: + - name: {{ .Values.gossipPortName }}-tcp + port: 9094 +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/hpa.yaml b/operations/deployment/helm/grafana/templates/hpa.yaml new file mode 100644 index 0000000..46bbcb4 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/hpa.yaml @@ -0,0 +1,52 @@ +{{- $sts := list "sts" "StatefulSet" "statefulset" -}} +{{- if .Values.autoscaling.enabled }} +apiVersion: {{ include "grafana.hpa.apiVersion" . }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "grafana.fullname" . }} + namespace: {{ include "grafana.namespace" . }} + labels: + app.kubernetes.io/name: {{ include "grafana.name" . }} + helm.sh/chart: {{ include "grafana.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + {{- if has .Values.persistence.type $sts }} + kind: StatefulSet + {{- else }} + kind: Deployment + {{- end }} + name: {{ include "grafana.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetMemory }} + - type: Resource + resource: + name: memory + {{- if eq (include "grafana.hpa.apiVersion" .) "autoscaling/v2beta1" }} + targetAverageUtilization: {{ .Values.autoscaling.targetMemory }} + {{- else }} + target: + type: Utilization + averageUtilization: {{ .Values.autoscaling.targetMemory }} + {{- end }} + {{- end }} + {{- if .Values.autoscaling.targetCPU }} + - type: Resource + resource: + name: cpu + {{- if eq (include "grafana.hpa.apiVersion" .) "autoscaling/v2beta1" }} + targetAverageUtilization: {{ .Values.autoscaling.targetCPU }} + {{- else }} + target: + type: Utilization + averageUtilization: {{ .Values.autoscaling.targetCPU }} + {{- end }} + {{- end }} + {{- if .Values.autoscaling.behavior }} + behavior: {{ toYaml .Values.autoscaling.behavior | nindent 4 }} + {{- end }} +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/image-renderer-deployment.yaml b/operations/deployment/helm/grafana/templates/image-renderer-deployment.yaml new file mode 100644 index 0000000..ea97969 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/image-renderer-deployment.yaml @@ -0,0 +1,131 @@ +{{ if .Values.imageRenderer.enabled }} +{{- $root := . -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "grafana.fullname" . }}-image-renderer + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.imageRenderer.labels" . | nindent 4 }} + {{- with .Values.imageRenderer.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.imageRenderer.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if and (not .Values.imageRenderer.autoscaling.enabled) (.Values.imageRenderer.replicas) }} + replicas: {{ .Values.imageRenderer.replicas }} + {{- end }} + revisionHistoryLimit: {{ .Values.imageRenderer.revisionHistoryLimit }} + selector: + matchLabels: + {{- include "grafana.imageRenderer.selectorLabels" . | nindent 6 }} + + {{- with .Values.imageRenderer.deploymentStrategy }} + strategy: + {{- toYaml . | trim | nindent 4 }} + {{- end }} + template: + metadata: + labels: + {{- include "grafana.imageRenderer.selectorLabels" . | nindent 8 }} + {{- with .Values.imageRenderer.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- with .Values.imageRenderer.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- with .Values.imageRenderer.schedulerName }} + schedulerName: "{{ . }}" + {{- end }} + {{- with .Values.imageRenderer.serviceAccountName }} + serviceAccountName: "{{ . }}" + {{- end }} + {{- with .Values.imageRenderer.securityContext }} + securityContext: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.imageRenderer.hostAliases }} + hostAliases: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.imageRenderer.priorityClassName }} + priorityClassName: {{ . }} + {{- end }} + {{- with .Values.imageRenderer.image.pullSecrets }} + imagePullSecrets: + {{- range . }} + - name: {{ tpl . $root }} + {{- end}} + {{- end }} + containers: + - name: {{ .Chart.Name }}-image-renderer + {{- $registry := .Values.global.imageRegistry | default .Values.imageRenderer.image.registry -}} + {{- if .Values.imageRenderer.image.sha }} + image: "{{ $registry }}/{{ .Values.imageRenderer.image.repository }}:{{ .Values.imageRenderer.image.tag }}@sha256:{{ .Values.imageRenderer.image.sha }}" + {{- else }} + image: "{{ $registry }}/{{ .Values.imageRenderer.image.repository }}:{{ .Values.imageRenderer.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.imageRenderer.image.pullPolicy }} + {{- if .Values.imageRenderer.command }} + command: + {{- range .Values.imageRenderer.command }} + - {{ . }} + {{- end }} + {{- end}} + ports: + - name: {{ .Values.imageRenderer.service.portName }} + containerPort: {{ .Values.imageRenderer.service.targetPort }} + protocol: TCP + livenessProbe: + httpGet: + path: / + port: {{ .Values.imageRenderer.service.portName }} + env: + - name: HTTP_PORT + value: {{ .Values.imageRenderer.service.targetPort | quote }} + {{- if .Values.imageRenderer.serviceMonitor.enabled }} + - name: ENABLE_METRICS + value: "true" + {{- end }} + {{- range $key, $value := .Values.imageRenderer.envValueFrom }} + - name: {{ $key | quote }} + valueFrom: + {{- tpl (toYaml $value) $ | nindent 16 }} + {{- end }} + {{- range $key, $value := .Values.imageRenderer.env }} + - name: {{ $key | quote }} + value: {{ $value | quote }} + {{- end }} + {{- with .Values.imageRenderer.containerSecurityContext }} + securityContext: + {{- toYaml . | nindent 12 }} + {{- end }} + volumeMounts: + - mountPath: /tmp + name: image-renderer-tmpfs + {{- with .Values.imageRenderer.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.imageRenderer.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.imageRenderer.affinity }} + affinity: + {{- tpl (toYaml .) $root | nindent 8 }} + {{- end }} + {{- with .Values.imageRenderer.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + - name: image-renderer-tmpfs + emptyDir: {} +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/image-renderer-hpa.yaml b/operations/deployment/helm/grafana/templates/image-renderer-hpa.yaml new file mode 100644 index 0000000..b0f0059 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/image-renderer-hpa.yaml @@ -0,0 +1,47 @@ +{{- if and .Values.imageRenderer.enabled .Values.imageRenderer.autoscaling.enabled }} +apiVersion: {{ include "grafana.hpa.apiVersion" . }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "grafana.fullname" . }}-image-renderer + namespace: {{ include "grafana.namespace" . }} + labels: + app.kubernetes.io/name: {{ include "grafana.name" . }}-image-renderer + helm.sh/chart: {{ include "grafana.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "grafana.fullname" . }}-image-renderer + minReplicas: {{ .Values.imageRenderer.autoscaling.minReplicas }} + maxReplicas: {{ .Values.imageRenderer.autoscaling.maxReplicas }} + metrics: + {{- if .Values.imageRenderer.autoscaling.targetMemory }} + - type: Resource + resource: + name: memory + {{- if eq (include "grafana.hpa.apiVersion" .) "autoscaling/v2beta1" }} + targetAverageUtilization: {{ .Values.imageRenderer.autoscaling.targetMemory }} + {{- else }} + target: + type: Utilization + averageUtilization: {{ .Values.imageRenderer.autoscaling.targetMemory }} + {{- end }} + {{- end }} + {{- if .Values.imageRenderer.autoscaling.targetCPU }} + - type: Resource + resource: + name: cpu + {{- if eq (include "grafana.hpa.apiVersion" .) "autoscaling/v2beta1" }} + targetAverageUtilization: {{ .Values.imageRenderer.autoscaling.targetCPU }} + {{- else }} + target: + type: Utilization + averageUtilization: {{ .Values.imageRenderer.autoscaling.targetCPU }} + {{- end }} + {{- end }} + {{- if .Values.imageRenderer.autoscaling.behavior }} + behavior: {{ toYaml .Values.imageRenderer.autoscaling.behavior | nindent 4 }} + {{- end }} +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/image-renderer-network-policy.yaml b/operations/deployment/helm/grafana/templates/image-renderer-network-policy.yaml new file mode 100644 index 0000000..d1a0eb3 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/image-renderer-network-policy.yaml @@ -0,0 +1,79 @@ +{{- if and .Values.imageRenderer.enabled .Values.imageRenderer.networkPolicy.limitIngress }} +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ include "grafana.fullname" . }}-image-renderer-ingress + namespace: {{ include "grafana.namespace" . }} + annotations: + comment: Limit image-renderer ingress traffic from grafana +spec: + podSelector: + matchLabels: + {{- include "grafana.imageRenderer.selectorLabels" . | nindent 6 }} + {{- with .Values.imageRenderer.podLabels }} + {{- toYaml . | nindent 6 }} + {{- end }} + + policyTypes: + - Ingress + ingress: + - ports: + - port: {{ .Values.imageRenderer.service.targetPort }} + protocol: TCP + from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: {{ include "grafana.namespace" . }} + podSelector: + matchLabels: + {{- include "grafana.selectorLabels" . | nindent 14 }} + {{- with .Values.podLabels }} + {{- toYaml . | nindent 14 }} + {{- end }} + {{- with .Values.imageRenderer.networkPolicy.extraIngressSelectors -}} + {{ toYaml . | nindent 8 }} + {{- end }} +{{- end }} + +{{- if and .Values.imageRenderer.enabled .Values.imageRenderer.networkPolicy.limitEgress }} +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ include "grafana.fullname" . }}-image-renderer-egress + namespace: {{ include "grafana.namespace" . }} + annotations: + comment: Limit image-renderer egress traffic to grafana +spec: + podSelector: + matchLabels: + {{- include "grafana.imageRenderer.selectorLabels" . | nindent 6 }} + {{- with .Values.imageRenderer.podLabels }} + {{- toYaml . | nindent 6 }} + {{- end }} + + policyTypes: + - Egress + egress: + # allow dns resolution + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + # talk only to grafana + - ports: + - port: {{ .Values.service.targetPort }} + protocol: TCP + to: + - namespaceSelector: + matchLabels: + name: {{ include "grafana.namespace" . }} + podSelector: + matchLabels: + {{- include "grafana.selectorLabels" . | nindent 14 }} + {{- with .Values.podLabels }} + {{- toYaml . | nindent 14 }} + {{- end }} +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/image-renderer-service.yaml b/operations/deployment/helm/grafana/templates/image-renderer-service.yaml new file mode 100644 index 0000000..f8da127 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/image-renderer-service.yaml @@ -0,0 +1,31 @@ +{{- if and .Values.imageRenderer.enabled .Values.imageRenderer.service.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "grafana.fullname" . }}-image-renderer + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.imageRenderer.labels" . | nindent 4 }} + {{- with .Values.imageRenderer.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.imageRenderer.service.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + {{- with .Values.imageRenderer.service.clusterIP }} + clusterIP: {{ . }} + {{- end }} + ports: + - name: {{ .Values.imageRenderer.service.portName }} + port: {{ .Values.imageRenderer.service.port }} + protocol: TCP + targetPort: {{ .Values.imageRenderer.service.targetPort }} + {{- with .Values.imageRenderer.appProtocol }} + appProtocol: {{ . }} + {{- end }} + selector: + {{- include "grafana.imageRenderer.selectorLabels" . | nindent 4 }} +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/image-renderer-servicemonitor.yaml b/operations/deployment/helm/grafana/templates/image-renderer-servicemonitor.yaml new file mode 100644 index 0000000..5d9f09d --- /dev/null +++ b/operations/deployment/helm/grafana/templates/image-renderer-servicemonitor.yaml @@ -0,0 +1,48 @@ +{{- if .Values.imageRenderer.serviceMonitor.enabled }} +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "grafana.fullname" . }}-image-renderer + {{- if .Values.imageRenderer.serviceMonitor.namespace }} + namespace: {{ tpl .Values.imageRenderer.serviceMonitor.namespace . }} + {{- else }} + namespace: {{ include "grafana.namespace" . }} + {{- end }} + labels: + {{- include "grafana.imageRenderer.labels" . | nindent 4 }} + {{- with .Values.imageRenderer.serviceMonitor.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + endpoints: + - port: {{ .Values.imageRenderer.service.portName }} + {{- with .Values.imageRenderer.serviceMonitor.interval }} + interval: {{ . }} + {{- end }} + {{- with .Values.imageRenderer.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ . }} + {{- end }} + honorLabels: true + path: {{ .Values.imageRenderer.serviceMonitor.path }} + scheme: {{ .Values.imageRenderer.serviceMonitor.scheme }} + {{- with .Values.imageRenderer.serviceMonitor.tlsConfig }} + tlsConfig: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.imageRenderer.serviceMonitor.relabelings }} + relabelings: + {{- toYaml . | nindent 6 }} + {{- end }} + jobLabel: "{{ .Release.Name }}-image-renderer" + selector: + matchLabels: + {{- include "grafana.imageRenderer.selectorLabels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ include "grafana.namespace" . }} + {{- with .Values.imageRenderer.serviceMonitor.targetLabels }} + targetLabels: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/ingress.yaml b/operations/deployment/helm/grafana/templates/ingress.yaml new file mode 100644 index 0000000..063cdfa --- /dev/null +++ b/operations/deployment/helm/grafana/templates/ingress.yaml @@ -0,0 +1,78 @@ +{{- if .Values.ingress.enabled -}} +{{- $ingressApiIsStable := eq (include "grafana.ingress.isStable" .) "true" -}} +{{- $ingressSupportsIngressClassName := eq (include "grafana.ingress.supportsIngressClassName" .) "true" -}} +{{- $ingressSupportsPathType := eq (include "grafana.ingress.supportsPathType" .) "true" -}} +{{- $fullName := include "grafana.fullname" . -}} +{{- $servicePort := .Values.service.port -}} +{{- $ingressPath := .Values.ingress.path -}} +{{- $ingressPathType := .Values.ingress.pathType -}} +{{- $extraPaths := .Values.ingress.extraPaths -}} +apiVersion: {{ include "grafana.ingress.apiVersion" . }} +kind: Ingress +metadata: + name: {{ $fullName }} + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.ingress.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.ingress.annotations }} + annotations: + {{- range $key, $value := . }} + {{ $key }}: {{ tpl $value $ | quote }} + {{- end }} + {{- end }} +spec: + {{- if and $ingressSupportsIngressClassName .Values.ingress.ingressClassName }} + ingressClassName: {{ .Values.ingress.ingressClassName }} + {{- end -}} + {{- with .Values.ingress.tls }} + tls: + {{- tpl (toYaml .) $ | nindent 4 }} + {{- end }} + rules: + {{- if .Values.ingress.hosts }} + {{- range .Values.ingress.hosts }} + - host: {{ tpl . $ }} + http: + paths: + {{- with $extraPaths }} + {{- toYaml . | nindent 10 }} + {{- end }} + - path: {{ $ingressPath }} + {{- if $ingressSupportsPathType }} + pathType: {{ $ingressPathType }} + {{- end }} + backend: + {{- if $ingressApiIsStable }} + service: + name: {{ $fullName }} + port: + number: {{ $servicePort }} + {{- else }} + serviceName: {{ $fullName }} + servicePort: {{ $servicePort }} + {{- end }} + {{- end }} + {{- else }} + - http: + paths: + - backend: + {{- if $ingressApiIsStable }} + service: + name: {{ $fullName }} + port: + number: {{ $servicePort }} + {{- else }} + serviceName: {{ $fullName }} + servicePort: {{ $servicePort }} + {{- end }} + {{- with $ingressPath }} + path: {{ . }} + {{- end }} + {{- if $ingressSupportsPathType }} + pathType: {{ $ingressPathType }} + {{- end }} + {{- end -}} +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/networkpolicy.yaml b/operations/deployment/helm/grafana/templates/networkpolicy.yaml new file mode 100644 index 0000000..4cd3ed6 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/networkpolicy.yaml @@ -0,0 +1,61 @@ +{{- if .Values.networkPolicy.enabled }} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ include "grafana.fullname" . }} + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + policyTypes: + {{- if .Values.networkPolicy.ingress }} + - Ingress + {{- end }} + {{- if .Values.networkPolicy.egress.enabled }} + - Egress + {{- end }} + podSelector: + matchLabels: + {{- include "grafana.selectorLabels" . | nindent 6 }} + + {{- if .Values.networkPolicy.egress.enabled }} + egress: + {{- if not .Values.networkPolicy.egress.blockDNSResolution }} + - ports: + - port: 53 + protocol: UDP + {{- end }} + - ports: + {{ .Values.networkPolicy.egress.ports | toJson }} + {{- with .Values.networkPolicy.egress.to }} + to: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.networkPolicy.ingress }} + ingress: + - ports: + - port: {{ .Values.service.targetPort }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ include "grafana.fullname" . }}-client: "true" + {{- with .Values.networkPolicy.explicitNamespacesSelector }} + - namespaceSelector: + {{- toYaml . | nindent 12 }} + {{- end }} + - podSelector: + matchLabels: + {{- include "grafana.labels" . | nindent 14 }} + role: read + {{- end }} + {{- end }} +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/poddisruptionbudget.yaml b/operations/deployment/helm/grafana/templates/poddisruptionbudget.yaml new file mode 100644 index 0000000..0525121 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/poddisruptionbudget.yaml @@ -0,0 +1,22 @@ +{{- if .Values.podDisruptionBudget }} +apiVersion: {{ include "grafana.podDisruptionBudget.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "grafana.fullname" . }} + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- with .Values.podDisruptionBudget.minAvailable }} + minAvailable: {{ . }} + {{- end }} + {{- with .Values.podDisruptionBudget.maxUnavailable }} + maxUnavailable: {{ . }} + {{- end }} + selector: + matchLabels: + {{- include "grafana.selectorLabels" . | nindent 6 }} +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/podsecuritypolicy.yaml b/operations/deployment/helm/grafana/templates/podsecuritypolicy.yaml new file mode 100644 index 0000000..eed7af9 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/podsecuritypolicy.yaml @@ -0,0 +1,49 @@ +{{- if and .Values.rbac.pspEnabled (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ include "grafana.fullname" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + annotations: + seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default' + seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' + {{- if .Values.rbac.pspUseAppArmor }} + apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default' + apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + {{- end }} +spec: + privileged: false + allowPrivilegeEscalation: false + requiredDropCapabilities: + # Default set from Docker, with DAC_OVERRIDE and CHOWN + - ALL + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'csi' + - 'secret' + - 'downwardAPI' + - 'persistentVolumeClaim' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/pvc.yaml b/operations/deployment/helm/grafana/templates/pvc.yaml new file mode 100644 index 0000000..eb8f87f --- /dev/null +++ b/operations/deployment/helm/grafana/templates/pvc.yaml @@ -0,0 +1,36 @@ +{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) (eq .Values.persistence.type "pvc")}} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ include "grafana.fullname" . }} + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.persistence.extraPvcLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.persistence.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.persistence.finalizers }} + finalizers: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{- with .Values.persistence.storageClassName }} + storageClassName: {{ . }} + {{- end }} + {{- with .Values.persistence.selectorLabels }} + selector: + matchLabels: + {{- toYaml . | nindent 6 }} + {{- end }} +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/role.yaml b/operations/deployment/helm/grafana/templates/role.yaml new file mode 100644 index 0000000..4b5edd9 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/role.yaml @@ -0,0 +1,32 @@ +{{- if and .Values.rbac.create (not .Values.rbac.useExistingRole) -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "grafana.fullname" . }} + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- if or .Values.rbac.pspEnabled (and .Values.rbac.namespaced (or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled .Values.sidecar.plugins.enabled .Values.rbac.extraRoleRules)) }} +rules: + {{- if and .Values.rbac.pspEnabled (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }} + - apiGroups: ['extensions'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: [{{ include "grafana.fullname" . }}] + {{- end }} + {{- if and .Values.rbac.namespaced (or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled .Values.sidecar.plugins.enabled) }} + - apiGroups: [""] # "" indicates the core API group + resources: ["configmaps", "secrets"] + verbs: ["get", "watch", "list"] + {{- end }} + {{- with .Values.rbac.extraRoleRules }} + {{- toYaml . | nindent 2 }} + {{- end}} +{{- else }} +rules: [] +{{- end }} +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/rolebinding.yaml b/operations/deployment/helm/grafana/templates/rolebinding.yaml new file mode 100644 index 0000000..58f77c6 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/rolebinding.yaml @@ -0,0 +1,25 @@ +{{- if .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "grafana.fullname" . }} + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + {{- if .Values.rbac.useExistingRole }} + name: {{ .Values.rbac.useExistingRole }} + {{- else }} + name: {{ include "grafana.fullname" . }} + {{- end }} +subjects: +- kind: ServiceAccount + name: {{ include "grafana.serviceAccountName" . }} + namespace: {{ include "grafana.namespace" . }} +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/secret-env.yaml b/operations/deployment/helm/grafana/templates/secret-env.yaml new file mode 100644 index 0000000..eb14aac --- /dev/null +++ b/operations/deployment/helm/grafana/templates/secret-env.yaml @@ -0,0 +1,14 @@ +{{- if .Values.envRenderSecret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "grafana.fullname" . }}-env + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} +type: Opaque +data: +{{- range $key, $val := .Values.envRenderSecret }} + {{ $key }}: {{ tpl ($val | toString) $ | b64enc | quote }} +{{- end }} +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/secret.yaml b/operations/deployment/helm/grafana/templates/secret.yaml new file mode 100644 index 0000000..5cbd527 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/secret.yaml @@ -0,0 +1,26 @@ +{{- if or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret)) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "grafana.fullname" . }} + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +type: Opaque +data: + {{- if and (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) }} + admin-user: {{ .Values.adminUser | b64enc | quote }} + {{- if .Values.adminPassword }} + admin-password: {{ .Values.adminPassword | b64enc | quote }} + {{- else }} + admin-password: {{ include "grafana.password" . }} + {{- end }} + {{- end }} + {{- if not .Values.ldap.existingSecret }} + ldap-toml: {{ tpl .Values.ldap.config $ | b64enc | quote }} + {{- end }} +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/service.yaml b/operations/deployment/helm/grafana/templates/service.yaml new file mode 100644 index 0000000..9102c1e --- /dev/null +++ b/operations/deployment/helm/grafana/templates/service.yaml @@ -0,0 +1,58 @@ +{{- if .Values.service.enabled }} +{{- $root := . }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "grafana.fullname" . }} + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.service.annotations }} + annotations: + {{- tpl (toYaml . | nindent 4) $root }} + {{- end }} +spec: + {{- if (or (eq .Values.service.type "ClusterIP") (empty .Values.service.type)) }} + type: ClusterIP + {{- with .Values.service.clusterIP }} + clusterIP: {{ . }} + {{- end }} + {{- else if eq .Values.service.type "LoadBalancer" }} + type: {{ .Values.service.type }} + {{- with .Values.service.loadBalancerIP }} + loadBalancerIP: {{ . }} + {{- end }} + {{- with .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- else }} + type: {{ .Values.service.type }} + {{- end }} + {{- with .Values.service.externalIPs }} + externalIPs: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.service.externalTrafficPolicy }} + externalTrafficPolicy: {{ . }} + {{- end }} + ports: + - name: {{ .Values.service.portName }} + port: {{ .Values.service.port }} + protocol: TCP + targetPort: {{ .Values.service.targetPort }} + {{- with .Values.service.appProtocol }} + appProtocol: {{ . }} + {{- end }} + {{- if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort))) }} + nodePort: {{ .Values.service.nodePort }} + {{- end }} + {{- with .Values.extraExposePorts }} + {{- tpl (toYaml . | nindent 4) $root }} + {{- end }} + selector: + {{- include "grafana.selectorLabels" . | nindent 4 }} +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/serviceaccount.yaml b/operations/deployment/helm/grafana/templates/serviceaccount.yaml new file mode 100644 index 0000000..784e71b --- /dev/null +++ b/operations/deployment/helm/grafana/templates/serviceaccount.yaml @@ -0,0 +1,17 @@ +{{- if .Values.serviceAccount.create }} +{{- $root := . -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- tpl (toYaml . | nindent 4) $root }} + {{- end }} + name: {{ include "grafana.serviceAccountName" . }} + namespace: {{ include "grafana.namespace" . }} +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/servicemonitor.yaml b/operations/deployment/helm/grafana/templates/servicemonitor.yaml new file mode 100644 index 0000000..7239682 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/servicemonitor.yaml @@ -0,0 +1,52 @@ +{{- if .Values.serviceMonitor.enabled }} +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "grafana.fullname" . }} + {{- if .Values.serviceMonitor.namespace }} + namespace: {{ tpl .Values.serviceMonitor.namespace . }} + {{- else }} + namespace: {{ include "grafana.namespace" . }} + {{- end }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.serviceMonitor.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + endpoints: + - port: {{ .Values.service.portName }} + {{- with .Values.serviceMonitor.interval }} + interval: {{ . }} + {{- end }} + {{- with .Values.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ . }} + {{- end }} + honorLabels: true + path: {{ .Values.serviceMonitor.path }} + scheme: {{ .Values.serviceMonitor.scheme }} + {{- with .Values.serviceMonitor.tlsConfig }} + tlsConfig: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.serviceMonitor.relabelings }} + relabelings: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml . | nindent 6 }} + {{- end }} + jobLabel: "{{ .Release.Name }}" + selector: + matchLabels: + {{- include "grafana.selectorLabels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ include "grafana.namespace" . }} + {{- with .Values.serviceMonitor.targetLabels }} + targetLabels: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/statefulset.yaml b/operations/deployment/helm/grafana/templates/statefulset.yaml new file mode 100644 index 0000000..e6c944a --- /dev/null +++ b/operations/deployment/helm/grafana/templates/statefulset.yaml @@ -0,0 +1,56 @@ +{{- $sts := list "sts" "StatefulSet" "statefulset" -}} +{{- if (or (.Values.useStatefulSet) (and .Values.persistence.enabled (not .Values.persistence.existingClaim) (has .Values.persistence.type $sts)))}} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "grafana.fullname" . }} + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + replicas: {{ .Values.replicas }} + selector: + matchLabels: + {{- include "grafana.selectorLabels" . | nindent 6 }} + serviceName: {{ include "grafana.fullname" . }}-headless + template: + metadata: + labels: + {{- include "grafana.selectorLabels" . | nindent 8 }} + {{- with .Values.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/dashboards-json-config: {{ include (print $.Template.BasePath "/dashboards-json-configmap.yaml") . | sha256sum }} + checksum/sc-dashboard-provider-config: {{ include (print $.Template.BasePath "/configmap-dashboard-provider.yaml") . | sha256sum }} + {{- if and (or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret))) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- end }} + kubectl.kubernetes.io/default-container: {{ .Chart.Name }} + {{- with .Values.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- include "grafana.pod" . | nindent 6 }} + {{- if .Values.persistence.enabled}} + volumeClaimTemplates: + - metadata: + name: storage + spec: + accessModes: {{ .Values.persistence.accessModes }} + storageClassName: {{ .Values.persistence.storageClassName }} + resources: + requests: + storage: {{ .Values.persistence.size }} + {{- with .Values.persistence.selectorLabels }} + selector: + matchLabels: + {{- toYaml . | nindent 10 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/tests/test-configmap.yaml b/operations/deployment/helm/grafana/templates/tests/test-configmap.yaml new file mode 100644 index 0000000..01c96c9 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/tests/test-configmap.yaml @@ -0,0 +1,20 @@ +{{- if .Values.testFramework.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "grafana.fullname" . }}-test + namespace: {{ include "grafana.namespace" . }} + annotations: + "helm.sh/hook": test-success + "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" + labels: + {{- include "grafana.labels" . | nindent 4 }} +data: + run.sh: |- + @test "Test Health" { + url="http://{{ include "grafana.fullname" . }}/api/health" + + code=$(wget --server-response --spider --timeout 90 --tries 10 ${url} 2>&1 | awk '/^ HTTP/{print $2}') + [ "$code" == "200" ] + } +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/tests/test-podsecuritypolicy.yaml b/operations/deployment/helm/grafana/templates/tests/test-podsecuritypolicy.yaml new file mode 100644 index 0000000..1821772 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/tests/test-podsecuritypolicy.yaml @@ -0,0 +1,32 @@ +{{- if and (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") .Values.testFramework.enabled .Values.rbac.pspEnabled }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ include "grafana.fullname" . }}-test + annotations: + "helm.sh/hook": test-success + "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" + labels: + {{- include "grafana.labels" . | nindent 4 }} +spec: + allowPrivilegeEscalation: true + privileged: false + hostNetwork: false + hostIPC: false + hostPID: false + fsGroup: + rule: RunAsAny + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + runAsUser: + rule: RunAsAny + volumes: + - configMap + - downwardAPI + - emptyDir + - projected + - csi + - secret +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/tests/test-role.yaml b/operations/deployment/helm/grafana/templates/tests/test-role.yaml new file mode 100644 index 0000000..cb4c782 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/tests/test-role.yaml @@ -0,0 +1,17 @@ +{{- if and (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") .Values.testFramework.enabled .Values.rbac.pspEnabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "grafana.fullname" . }}-test + namespace: {{ include "grafana.namespace" . }} + annotations: + "helm.sh/hook": test-success + "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" + labels: + {{- include "grafana.labels" . | nindent 4 }} +rules: + - apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: [{{ include "grafana.fullname" . }}-test] +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/tests/test-rolebinding.yaml b/operations/deployment/helm/grafana/templates/tests/test-rolebinding.yaml new file mode 100644 index 0000000..f40d791 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/tests/test-rolebinding.yaml @@ -0,0 +1,20 @@ +{{- if and (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") .Values.testFramework.enabled .Values.rbac.pspEnabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "grafana.fullname" . }}-test + namespace: {{ include "grafana.namespace" . }} + annotations: + "helm.sh/hook": test-success + "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" + labels: + {{- include "grafana.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "grafana.fullname" . }}-test +subjects: + - kind: ServiceAccount + name: {{ include "grafana.serviceAccountNameTest" . }} + namespace: {{ include "grafana.namespace" . }} +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/tests/test-serviceaccount.yaml b/operations/deployment/helm/grafana/templates/tests/test-serviceaccount.yaml new file mode 100644 index 0000000..38fba35 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/tests/test-serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if and .Values.testFramework.enabled .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "grafana.labels" . | nindent 4 }} + name: {{ include "grafana.serviceAccountNameTest" . }} + namespace: {{ include "grafana.namespace" . }} + annotations: + "helm.sh/hook": test-success + "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/tests/test.yaml b/operations/deployment/helm/grafana/templates/tests/test.yaml new file mode 100644 index 0000000..15067ae --- /dev/null +++ b/operations/deployment/helm/grafana/templates/tests/test.yaml @@ -0,0 +1,49 @@ +{{- if .Values.testFramework.enabled }} +{{- $root := . }} +apiVersion: v1 +kind: Pod +metadata: + name: {{ include "grafana.fullname" . }}-test + labels: + {{- include "grafana.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test-success + "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" + namespace: {{ include "grafana.namespace" . }} +spec: + serviceAccountName: {{ include "grafana.serviceAccountNameTest" . }} + {{- with .Values.testFramework.securityContext }} + securityContext: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- if or .Values.image.pullSecrets .Values.global.imagePullSecrets }} + imagePullSecrets: + {{- include "grafana.imagePullSecrets" (dict "root" $root "imagePullSecrets" .Values.image.pullSecrets) | nindent 4 }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- tpl (toYaml .) $root | nindent 4 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 4 }} + {{- end }} + containers: + - name: {{ .Release.Name }}-test + image: "{{ .Values.global.imageRegistry | default .Values.testFramework.image.registry }}/{{ .Values.testFramework.image.repository }}:{{ .Values.testFramework.image.tag }}" + imagePullPolicy: "{{ .Values.testFramework.imagePullPolicy}}" + command: ["/opt/bats/bin/bats", "-t", "/tests/run.sh"] + volumeMounts: + - mountPath: /tests + name: tests + readOnly: true + volumes: + - name: tests + configMap: + name: {{ include "grafana.fullname" . }}-test + restartPolicy: Never +{{- end }} diff --git a/operations/deployment/helm/grafana/values.yaml b/operations/deployment/helm/grafana/values.yaml new file mode 100644 index 0000000..07502cc --- /dev/null +++ b/operations/deployment/helm/grafana/values.yaml @@ -0,0 +1,1282 @@ +global: + # -- Overrides the Docker registry globally for all images + imageRegistry: null + + # To help compatibility with other charts which use global.imagePullSecrets. + # Allow either an array of {name: pullSecret} maps (k8s-style), or an array of strings (more common helm-style). + # Can be tempalted. + # global: + # imagePullSecrets: + # - name: pullSecret1 + # - name: pullSecret2 + # or + # global: + # imagePullSecrets: + # - pullSecret1 + # - pullSecret2 + imagePullSecrets: [] + +rbac: + create: true + ## Use an existing ClusterRole/Role (depending on rbac.namespaced false/true) + # useExistingRole: name-of-some-role + # useExistingClusterRole: name-of-some-clusterRole + pspEnabled: false + pspUseAppArmor: false + namespaced: false + extraRoleRules: [] + # - apiGroups: [] + # resources: [] + # verbs: [] + extraClusterRoleRules: [] + # - apiGroups: [] + # resources: [] + # verbs: [] +serviceAccount: + create: true + name: + nameTest: + ## ServiceAccount labels. + labels: {} +## Service account annotations. Can be templated. +# annotations: +# eks.amazonaws.com/role-arn: arn:aws:iam::123456789000:role/iam-role-name-here + autoMount: true + +replicas: 1 + +## Create a headless service for the deployment +headlessService: false + +## Create HorizontalPodAutoscaler object for deployment type +# +autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 5 + targetCPU: "60" + targetMemory: "" + behavior: {} + +## See `kubectl explain poddisruptionbudget.spec` for more +## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ +podDisruptionBudget: {} +# apiVersion: "" +# minAvailable: 1 +# maxUnavailable: 1 + +## See `kubectl explain deployment.spec.strategy` for more +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy +deploymentStrategy: + type: RollingUpdate + +readinessProbe: + httpGet: + path: /api/health + port: 3000 + +livenessProbe: + httpGet: + path: /api/health + port: 3000 + initialDelaySeconds: 60 + timeoutSeconds: 30 + failureThreshold: 10 + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: "default-scheduler" + +image: + # -- The Docker registry + registry: docker.io + # -- Docker image repository + repository: grafana/grafana + # Overrides the Grafana image tag whose default is the chart appVersion + tag: "" + sha: "" + pullPolicy: IfNotPresent + + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Can be templated. + ## + pullSecrets: [] + # - myRegistrKeySecretName + +testFramework: + enabled: true + image: + # -- The Docker registry + registry: docker.io + repository: bats/bats + tag: "v1.4.1" + imagePullPolicy: IfNotPresent + securityContext: {} + +securityContext: + runAsNonRoot: true + runAsUser: 472 + runAsGroup: 472 + fsGroup: 472 + +containerSecurityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + +# Enable creating the grafana configmap +createConfigmap: true + +# Extra configmaps to mount in grafana pods +# Values are templated. +extraConfigmapMounts: [] + # - name: certs-configmap + # mountPath: /etc/grafana/ssl/ + # subPath: certificates.crt # (optional) + # configMap: certs-configmap + # readOnly: true + + +extraEmptyDirMounts: [] + # - name: provisioning-notifiers + # mountPath: /etc/grafana/provisioning/notifiers + + +# Apply extra labels to common labels. +extraLabels: {} + +## Assign a PriorityClassName to pods if set +# priorityClassName: + +downloadDashboardsImage: + # -- The Docker registry + registry: docker.io + repository: curlimages/curl + tag: 7.85.0 + sha: "" + pullPolicy: IfNotPresent + +downloadDashboards: + env: {} + envFromSecret: "" + resources: {} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + envValueFrom: {} + # ENV_NAME: + # configMapKeyRef: + # name: configmap-name + # key: value_key + +## Pod Annotations +# podAnnotations: {} + +## Pod Labels +# podLabels: {} + +podPortName: grafana +gossipPortName: gossip +## Deployment annotations +# annotations: {} + +## Expose the grafana service to be accessed from outside the cluster (LoadBalancer service). +## or access it from within the cluster (ClusterIP service). Set the service type and the port to serve it. +## ref: http://kubernetes.io/docs/user-guide/services/ +## +service: + enabled: true + type: ClusterIP + port: 80 + targetPort: 3000 + # targetPort: 4181 To be used with a proxy extraContainer + ## Service annotations. Can be templated. + annotations: {} + labels: {} + portName: service + # Adds the appProtocol field to the service. This allows to work with istio protocol selection. Ex: "http" or "tcp" + appProtocol: "" + +serviceMonitor: + ## If true, a ServiceMonitor CRD is created for a prometheus operator + ## https://github.com/coreos/prometheus-operator + ## + enabled: false + path: /metrics + # namespace: monitoring (defaults to use the namespace this chart is deployed to) + labels: {} + interval: 30s + scheme: http + tlsConfig: {} + scrapeTimeout: 30s + relabelings: [] + metricRelabelings: [] + targetLabels: [] + +extraExposePorts: [] + # - name: keycloak + # port: 8080 + # targetPort: 8080 + +# overrides pod.spec.hostAliases in the grafana deployment's pods +hostAliases: [] + # - ip: "1.2.3.4" + # hostnames: + # - "my.host.com" + +ingress: + enabled: false + # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName + # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress + # ingressClassName: nginx + # Values can be templated + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + labels: {} + path: / + + # pathType is only for k8s >= 1.1= + pathType: Prefix + + hosts: + - chart-example.local + ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. + extraPaths: [] + # - path: /* + # backend: + # serviceName: ssl-redirect + # servicePort: use-annotation + ## Or for k8s > 1.19 + # - path: /* + # pathType: Prefix + # backend: + # service: + # name: ssl-redirect + # port: + # name: use-annotation + + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} +# limits: +# cpu: 100m +# memory: 128Mi +# requests: +# cpu: 100m +# memory: 128Mi + +## Node labels for pod assignment +## ref: https://kubernetes.io/docs/user-guide/node-selection/ +# +nodeSelector: {} + +## Tolerations for pod assignment +## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] + +## Affinity for pod assignment (evaluated as template) +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +affinity: {} + +## Topology Spread Constraints +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ +## +topologySpreadConstraints: [] + +## Additional init containers (evaluated as template) +## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ +## +extraInitContainers: [] + +## Enable an Specify container in extraContainers. This is meant to allow adding an authentication proxy to a grafana pod +extraContainers: "" +# extraContainers: | +# - name: proxy +# image: quay.io/gambol99/keycloak-proxy:latest +# args: +# - -provider=github +# - -client-id= +# - -client-secret= +# - -github-org= +# - -email-domain=* +# - -cookie-secret= +# - -http-address=http://0.0.0.0:4181 +# - -upstream-url=http://127.0.0.1:3000 +# ports: +# - name: proxy-web +# containerPort: 4181 + +## Volumes that can be used in init containers that will not be mounted to deployment pods +extraContainerVolumes: [] +# - name: volume-from-secret +# secret: +# secretName: secret-to-mount +# - name: empty-dir-volume +# emptyDir: {} + +## Enable persistence using Persistent Volume Claims +## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + type: pvc + enabled: false + # storageClassName: default + accessModes: + - ReadWriteOnce + size: 10Gi + # annotations: {} + finalizers: + - kubernetes.io/pvc-protection + # selectorLabels: {} + ## Sub-directory of the PV to mount. Can be templated. + # subPath: "" + ## Name of an existing PVC. Can be templated. + # existingClaim: + ## Extra labels to apply to a PVC. + extraPvcLabels: {} + + ## If persistence is not enabled, this allows to mount the + ## local storage in-memory to improve performance + ## + inMemory: + enabled: false + ## The maximum usage on memory medium EmptyDir would be + ## the minimum value between the SizeLimit specified + ## here and the sum of memory limits of all containers in a pod + ## + # sizeLimit: 300Mi + +initChownData: + ## If false, data ownership will not be reset at startup + ## This allows the grafana-server to be run with an arbitrary user + ## + enabled: true + + ## initChownData container image + ## + image: + # -- The Docker registry + registry: docker.io + repository: library/busybox + tag: "1.31.1" + sha: "" + pullPolicy: IfNotPresent + + ## initChownData resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + securityContext: + runAsNonRoot: false + runAsUser: 0 + seccompProfile: + type: RuntimeDefault + capabilities: + add: + - CHOWN + +# Administrator credentials when not using an existing secret (see below) +adminUser: admin +# adminPassword: strongpassword + +# Use an existing secret for the admin user. +admin: + ## Name of the secret. Can be templated. + existingSecret: "" + userKey: admin-user + passwordKey: admin-password + +## Define command to be executed at startup by grafana container +## Needed if using `vault-env` to manage secrets (ref: https://banzaicloud.com/blog/inject-secrets-into-pods-vault/) +## Default is "run.sh" as defined in grafana's Dockerfile +# command: +# - "sh" +# - "/run.sh" + +## Optionally define args if command is used +## Needed if using `hashicorp/envconsul` to manage secrets +## By default no arguments are set +# args: +# - "-secret" +# - "secret/grafana" +# - "./grafana" + +## Extra environment variables that will be pass onto deployment pods +## +## to provide grafana with access to CloudWatch on AWS EKS: +## 1. create an iam role of type "Web identity" with provider oidc.eks.* (note the provider for later) +## 2. edit the "Trust relationships" of the role, add a line inside the StringEquals clause using the +## same oidc eks provider as noted before (same as the existing line) +## also, replace NAMESPACE and prometheus-operator-grafana with the service account namespace and name +## +## "oidc.eks.us-east-1.amazonaws.com/id/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX:sub": "system:serviceaccount:NAMESPACE:prometheus-operator-grafana", +## +## 3. attach a policy to the role, you can use a built in policy called CloudWatchReadOnlyAccess +## 4. use the following env: (replace 123456789000 and iam-role-name-here with your aws account number and role name) +## +## env: +## AWS_ROLE_ARN: arn:aws:iam::123456789000:role/iam-role-name-here +## AWS_WEB_IDENTITY_TOKEN_FILE: /var/run/secrets/eks.amazonaws.com/serviceaccount/token +## AWS_REGION: us-east-1 +## +## 5. uncomment the EKS section in extraSecretMounts: below +## 6. uncomment the annotation section in the serviceAccount: above +## make sure to replace arn:aws:iam::123456789000:role/iam-role-name-here with your role arn + +env: {} + +## "valueFrom" environment variable references that will be added to deployment pods. Name is templated. +## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#envvarsource-v1-core +## Renders in container spec as: +## env: +## ... +## - name: +## valueFrom: +## +envValueFrom: {} + # ENV_NAME: + # configMapKeyRef: + # name: configmap-name + # key: value_key + +## The name of a secret in the same kubernetes namespace which contain values to be added to the environment +## This can be useful for auth tokens, etc. Value is templated. +envFromSecret: "" + +## Sensible environment variables that will be rendered as new secret object +## This can be useful for auth tokens, etc. +## If the secret values contains "{{", they'll need to be properly escaped so that they are not interpreted by Helm +## ref: https://helm.sh/docs/howto/charts_tips_and_tricks/#using-the-tpl-function +envRenderSecret: {} + +## The names of secrets in the same kubernetes namespace which contain values to be added to the environment +## Each entry should contain a name key, and can optionally specify whether the secret must be defined with an optional key. +## Name is templated. +envFromSecrets: [] +## - name: secret-name +## optional: true + +## The names of conifgmaps in the same kubernetes namespace which contain values to be added to the environment +## Each entry should contain a name key, and can optionally specify whether the configmap must be defined with an optional key. +## Name is templated. +## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#configmapenvsource-v1-core +envFromConfigMaps: [] +## - name: configmap-name +## optional: true + +# Inject Kubernetes services as environment variables. +# See https://kubernetes.io/docs/concepts/services-networking/connect-applications-service/#environment-variables +enableServiceLinks: true + +## Additional grafana server secret mounts +# Defines additional mounts with secrets. Secrets must be manually created in the namespace. +extraSecretMounts: [] + # - name: secret-files + # mountPath: /etc/secrets + # secretName: grafana-secret-files + # readOnly: true + # subPath: "" + # + # for AWS EKS (cloudwatch) use the following (see also instruction in env: above) + # - name: aws-iam-token + # mountPath: /var/run/secrets/eks.amazonaws.com/serviceaccount + # readOnly: true + # projected: + # defaultMode: 420 + # sources: + # - serviceAccountToken: + # audience: sts.amazonaws.com + # expirationSeconds: 86400 + # path: token + # + # for CSI e.g. Azure Key Vault use the following + # - name: secrets-store-inline + # mountPath: /run/secrets + # readOnly: true + # csi: + # driver: secrets-store.csi.k8s.io + # readOnly: true + # volumeAttributes: + # secretProviderClass: "akv-grafana-spc" + # nodePublishSecretRef: # Only required when using service principal mode + # name: grafana-akv-creds # Only required when using service principal mode + +## Additional grafana server volume mounts +# Defines additional volume mounts. +extraVolumeMounts: [] + # - name: extra-volume-0 + # mountPath: /mnt/volume0 + # readOnly: true + # existingClaim: volume-claim + # - name: extra-volume-1 + # mountPath: /mnt/volume1 + # readOnly: true + # hostPath: /usr/shared/ + # - name: grafana-secrets + # mountPath: /mnt/volume2 + # csi: true + # data: + # driver: secrets-store.csi.k8s.io + # readOnly: true + # volumeAttributes: + # secretProviderClass: "grafana-env-spc" + +## Container Lifecycle Hooks. Execute a specific bash command or make an HTTP request +lifecycleHooks: {} + # postStart: + # exec: + # command: [] + +## Pass the plugins you want installed as a list. +## +plugins: [] + # - digrich-bubblechart-panel + # - grafana-clock-panel + ## You can also use other plugin download URL, as long as they are valid zip files, + ## and specify the name of the plugin after the semicolon. Like this: + # - https://grafana.com/api/plugins/marcusolsson-json-datasource/versions/1.3.2/download;marcusolsson-json-datasource + +## Configure grafana datasources +## ref: http://docs.grafana.org/administration/provisioning/#datasources +## +datasources: {} +# datasources.yaml: +# apiVersion: 1 +# datasources: +# - name: Prometheus +# type: prometheus +# url: http://prometheus-prometheus-server +# access: proxy +# isDefault: true +# - name: CloudWatch +# type: cloudwatch +# access: proxy +# uid: cloudwatch +# editable: false +# jsonData: +# authType: default +# defaultRegion: us-east-1 +# deleteDatasources: [] +# - name: Prometheus + +## Configure grafana alerting (can be templated) +## ref: http://docs.grafana.org/administration/provisioning/#alerting +## +alerting: {} + # rules.yaml: + # apiVersion: 1 + # groups: + # - orgId: 1 + # name: '{{ .Chart.Name }}_my_rule_group' + # folder: my_first_folder + # interval: 60s + # rules: + # - uid: my_id_1 + # title: my_first_rule + # condition: A + # data: + # - refId: A + # datasourceUid: '-100' + # model: + # conditions: + # - evaluator: + # params: + # - 3 + # type: gt + # operator: + # type: and + # query: + # params: + # - A + # reducer: + # type: last + # type: query + # datasource: + # type: __expr__ + # uid: '-100' + # expression: 1==0 + # intervalMs: 1000 + # maxDataPoints: 43200 + # refId: A + # type: math + # dashboardUid: my_dashboard + # panelId: 123 + # noDataState: Alerting + # for: 60s + # annotations: + # some_key: some_value + # labels: + # team: sre_team_1 + # contactpoints.yaml: + # secret: + # apiVersion: 1 + # contactPoints: + # - orgId: 1 + # name: cp_1 + # receivers: + # - uid: first_uid + # type: pagerduty + # settings: + # integrationKey: XXX + # severity: critical + # class: ping failure + # component: Grafana + # group: app-stack + # summary: | + # {{ `{{ include "default.message" . }}` }} + +## Configure notifiers +## ref: http://docs.grafana.org/administration/provisioning/#alert-notification-channels +## +notifiers: {} +# notifiers.yaml: +# notifiers: +# - name: email-notifier +# type: email +# uid: email1 +# # either: +# org_id: 1 +# # or +# org_name: Main Org. +# is_default: true +# settings: +# addresses: an_email_address@example.com +# delete_notifiers: + +## Configure grafana dashboard providers +## ref: http://docs.grafana.org/administration/provisioning/#dashboards +## +## `path` must be /var/lib/grafana/dashboards/ +## +dashboardProviders: {} +# dashboardproviders.yaml: +# apiVersion: 1 +# providers: +# - name: 'default' +# orgId: 1 +# folder: '' +# type: file +# disableDeletion: false +# editable: true +# options: +# path: /var/lib/grafana/dashboards/default + +## Configure grafana dashboard to import +## NOTE: To use dashboards you must also enable/configure dashboardProviders +## ref: https://grafana.com/dashboards +## +## dashboards per provider, use provider name as key. +## +dashboards: {} + # default: + # some-dashboard: + # json: | + # $RAW_JSON + # custom-dashboard: + # file: dashboards/custom-dashboard.json + # prometheus-stats: + # gnetId: 2 + # revision: 2 + # datasource: Prometheus + # local-dashboard: + # url: https://example.com/repository/test.json + # token: '' + # local-dashboard-base64: + # url: https://example.com/repository/test-b64.json + # token: '' + # b64content: true + # local-dashboard-gitlab: + # url: https://example.com/repository/test-gitlab.json + # gitlabToken: '' + # local-dashboard-bitbucket: + # url: https://example.com/repository/test-bitbucket.json + # bearerToken: '' + # local-dashboard-azure: + # url: https://example.com/repository/test-azure.json + # basic: '' + # acceptHeader: '*/*' + +## Reference to external ConfigMap per provider. Use provider name as key and ConfigMap name as value. +## A provider dashboards must be defined either by external ConfigMaps or in values.yaml, not in both. +## ConfigMap data example: +## +## data: +## example-dashboard.json: | +## RAW_JSON +## +dashboardsConfigMaps: {} +# default: "" + +## Grafana's primary configuration +## NOTE: values in map will be converted to ini format +## ref: http://docs.grafana.org/installation/configuration/ +## +grafana.ini: + paths: + data: /var/lib/grafana/ + logs: /var/log/grafana + plugins: /var/lib/grafana/plugins + provisioning: /etc/grafana/provisioning + analytics: + check_for_updates: true + log: + mode: console + grafana_net: + url: https://grafana.net + server: + domain: "{{ if (and .Values.ingress.enabled .Values.ingress.hosts) }}{{ .Values.ingress.hosts | first }}{{ else }}''{{ end }}" +## grafana Authentication can be enabled with the following values on grafana.ini + # server: + # The full public facing url you use in browser, used for redirects and emails + # root_url: + # https://grafana.com/docs/grafana/latest/auth/github/#enable-github-in-grafana + # auth.github: + # enabled: false + # allow_sign_up: false + # scopes: user:email,read:org + # auth_url: https://github.com/login/oauth/authorize + # token_url: https://github.com/login/oauth/access_token + # api_url: https://api.github.com/user + # team_ids: + # allowed_organizations: + # client_id: + # client_secret: +## LDAP Authentication can be enabled with the following values on grafana.ini +## NOTE: Grafana will fail to start if the value for ldap.toml is invalid + # auth.ldap: + # enabled: true + # allow_sign_up: true + # config_file: /etc/grafana/ldap.toml + +## Grafana's LDAP configuration +## Templated by the template in _helpers.tpl +## NOTE: To enable the grafana.ini must be configured with auth.ldap.enabled +## ref: http://docs.grafana.org/installation/configuration/#auth-ldap +## ref: http://docs.grafana.org/installation/ldap/#configuration +ldap: + enabled: false + # `existingSecret` is a reference to an existing secret containing the ldap configuration + # for Grafana in a key `ldap-toml`. + existingSecret: "" + # `config` is the content of `ldap.toml` that will be stored in the created secret + config: "" + # config: |- + # verbose_logging = true + + # [[servers]] + # host = "my-ldap-server" + # port = 636 + # use_ssl = true + # start_tls = false + # ssl_skip_verify = false + # bind_dn = "uid=%s,ou=users,dc=myorg,dc=com" + +## Grafana's SMTP configuration +## NOTE: To enable, grafana.ini must be configured with smtp.enabled +## ref: http://docs.grafana.org/installation/configuration/#smtp +smtp: + # `existingSecret` is a reference to an existing secret containing the smtp configuration + # for Grafana. + existingSecret: "" + userKey: "user" + passwordKey: "password" + +## Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders +## Requires at least Grafana 5 to work and can't be used together with parameters dashboardProviders, datasources and dashboards +sidecar: + image: + # -- The Docker registry + registry: quay.io + repository: kiwigrid/k8s-sidecar + tag: 1.25.2 + sha: "" + imagePullPolicy: IfNotPresent + resources: {} +# limits: +# cpu: 100m +# memory: 100Mi +# requests: +# cpu: 50m +# memory: 50Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + # skipTlsVerify Set to true to skip tls verification for kube api calls + # skipTlsVerify: true + enableUniqueFilenames: false + readinessProbe: {} + livenessProbe: {} + # Log level default for all sidecars. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. Defaults to INFO + # logLevel: INFO + alerts: + enabled: false + # Additional environment variables for the alerts sidecar + env: {} + # Do not reprocess already processed unchanged resources on k8s API reconnect. + # ignoreAlreadyProcessed: true + # label that the configmaps with alert are marked with + label: grafana_alert + # value of label that the configmaps with alert are set to + labelValue: "" + # Log level. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. + # logLevel: INFO + # If specified, the sidecar will search for alert config-maps inside this namespace. + # Otherwise the namespace in which the sidecar is running will be used. + # It's also possible to specify ALL to search in all namespaces + searchNamespace: null + # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. + watchMethod: WATCH + # search in configmap, secret or both + resource: both + # watchServerTimeout: request to the server, asking it to cleanly close the connection after that. + # defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S + # watchServerTimeout: 3600 + # + # watchClientTimeout: is a client-side timeout, configuring your local socket. + # If you have a network outage dropping all packets with no RST/FIN, + # this is how long your client waits before realizing & dropping the connection. + # defaults to 66sec (sic!) + # watchClientTimeout: 60 + # + # Endpoint to send request to reload alerts + reloadURL: "http://localhost:3000/api/admin/provisioning/alerting/reload" + # Absolute path to shell script to execute after a alert got reloaded + script: null + skipReload: false + # This is needed if skipReload is true, to load any alerts defined at startup time. + # Deploy the alert sidecar as an initContainer. + initAlerts: false + # Additional alert sidecar volume mounts + extraMounts: [] + # Sets the size limit of the alert sidecar emptyDir volume + sizeLimit: {} + dashboards: + enabled: false + # Additional environment variables for the dashboards sidecar + env: {} + # Do not reprocess already processed unchanged resources on k8s API reconnect. + # ignoreAlreadyProcessed: true + SCProvider: true + # label that the configmaps with dashboards are marked with + label: grafana_dashboard + # value of label that the configmaps with dashboards are set to + labelValue: "" + # Log level. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. + # logLevel: INFO + # folder in the pod that should hold the collected dashboards (unless `defaultFolderName` is set) + folder: /tmp/dashboards + # The default folder name, it will create a subfolder under the `folder` and put dashboards in there instead + defaultFolderName: null + # Namespaces list. If specified, the sidecar will search for config-maps/secrets inside these namespaces. + # Otherwise the namespace in which the sidecar is running will be used. + # It's also possible to specify ALL to search in all namespaces. + searchNamespace: null + # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. + watchMethod: WATCH + # search in configmap, secret or both + resource: both + # If specified, the sidecar will look for annotation with this name to create folder and put graph here. + # You can use this parameter together with `provider.foldersFromFilesStructure`to annotate configmaps and create folder structure. + folderAnnotation: null + # Endpoint to send request to reload alerts + reloadURL: "http://localhost:3000/api/admin/provisioning/dashboards/reload" + # Absolute path to shell script to execute after a configmap got reloaded + script: null + skipReload: false + # watchServerTimeout: request to the server, asking it to cleanly close the connection after that. + # defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S + # watchServerTimeout: 3600 + # + # watchClientTimeout: is a client-side timeout, configuring your local socket. + # If you have a network outage dropping all packets with no RST/FIN, + # this is how long your client waits before realizing & dropping the connection. + # defaults to 66sec (sic!) + # watchClientTimeout: 60 + # + # provider configuration that lets grafana manage the dashboards + provider: + # name of the provider, should be unique + name: sidecarProvider + # orgid as configured in grafana + orgid: 1 + # folder in which the dashboards should be imported in grafana + folder: '' + # type of the provider + type: file + # disableDelete to activate a import-only behaviour + disableDelete: false + # allow updating provisioned dashboards from the UI + allowUiUpdates: false + # allow Grafana to replicate dashboard structure from filesystem + foldersFromFilesStructure: false + # Additional dashboard sidecar volume mounts + extraMounts: [] + # Sets the size limit of the dashboard sidecar emptyDir volume + sizeLimit: {} + datasources: + enabled: false + # Additional environment variables for the datasourcessidecar + env: {} + # Do not reprocess already processed unchanged resources on k8s API reconnect. + # ignoreAlreadyProcessed: true + # label that the configmaps with datasources are marked with + label: grafana_datasource + # value of label that the configmaps with datasources are set to + labelValue: "" + # Log level. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. + # logLevel: INFO + # If specified, the sidecar will search for datasource config-maps inside this namespace. + # Otherwise the namespace in which the sidecar is running will be used. + # It's also possible to specify ALL to search in all namespaces + searchNamespace: null + # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. + watchMethod: WATCH + # search in configmap, secret or both + resource: both + # watchServerTimeout: request to the server, asking it to cleanly close the connection after that. + # defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S + # watchServerTimeout: 3600 + # + # watchClientTimeout: is a client-side timeout, configuring your local socket. + # If you have a network outage dropping all packets with no RST/FIN, + # this is how long your client waits before realizing & dropping the connection. + # defaults to 66sec (sic!) + # watchClientTimeout: 60 + # + # Endpoint to send request to reload datasources + reloadURL: "http://localhost:3000/api/admin/provisioning/datasources/reload" + # Absolute path to shell script to execute after a datasource got reloaded + script: null + skipReload: false + # Deploy the datasource sidecar as an initContainer in addition to a container. + # This is needed if skipReload is true, to load any datasources defined at startup time. + initDatasources: false + # Sets the size limit of the datasource sidecar emptyDir volume + sizeLimit: {} + plugins: + enabled: false + # Additional environment variables for the plugins sidecar + env: {} + # Do not reprocess already processed unchanged resources on k8s API reconnect. + # ignoreAlreadyProcessed: true + # label that the configmaps with plugins are marked with + label: grafana_plugin + # value of label that the configmaps with plugins are set to + labelValue: "" + # Log level. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. + # logLevel: INFO + # If specified, the sidecar will search for plugin config-maps inside this namespace. + # Otherwise the namespace in which the sidecar is running will be used. + # It's also possible to specify ALL to search in all namespaces + searchNamespace: null + # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. + watchMethod: WATCH + # search in configmap, secret or both + resource: both + # watchServerTimeout: request to the server, asking it to cleanly close the connection after that. + # defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S + # watchServerTimeout: 3600 + # + # watchClientTimeout: is a client-side timeout, configuring your local socket. + # If you have a network outage dropping all packets with no RST/FIN, + # this is how long your client waits before realizing & dropping the connection. + # defaults to 66sec (sic!) + # watchClientTimeout: 60 + # + # Endpoint to send request to reload plugins + reloadURL: "http://localhost:3000/api/admin/provisioning/plugins/reload" + # Absolute path to shell script to execute after a plugin got reloaded + script: null + skipReload: false + # Deploy the datasource sidecar as an initContainer in addition to a container. + # This is needed if skipReload is true, to load any plugins defined at startup time. + initPlugins: false + # Sets the size limit of the plugin sidecar emptyDir volume + sizeLimit: {} + notifiers: + enabled: false + # Additional environment variables for the notifierssidecar + env: {} + # Do not reprocess already processed unchanged resources on k8s API reconnect. + # ignoreAlreadyProcessed: true + # label that the configmaps with notifiers are marked with + label: grafana_notifier + # value of label that the configmaps with notifiers are set to + labelValue: "" + # Log level. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. + # logLevel: INFO + # If specified, the sidecar will search for notifier config-maps inside this namespace. + # Otherwise the namespace in which the sidecar is running will be used. + # It's also possible to specify ALL to search in all namespaces + searchNamespace: null + # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. + watchMethod: WATCH + # search in configmap, secret or both + resource: both + # watchServerTimeout: request to the server, asking it to cleanly close the connection after that. + # defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S + # watchServerTimeout: 3600 + # + # watchClientTimeout: is a client-side timeout, configuring your local socket. + # If you have a network outage dropping all packets with no RST/FIN, + # this is how long your client waits before realizing & dropping the connection. + # defaults to 66sec (sic!) + # watchClientTimeout: 60 + # + # Endpoint to send request to reload notifiers + reloadURL: "http://localhost:3000/api/admin/provisioning/notifications/reload" + # Absolute path to shell script to execute after a notifier got reloaded + script: null + skipReload: false + # Deploy the notifier sidecar as an initContainer in addition to a container. + # This is needed if skipReload is true, to load any notifiers defined at startup time. + initNotifiers: false + # Sets the size limit of the notifier sidecar emptyDir volume + sizeLimit: {} + +## Override the deployment namespace +## +namespaceOverride: "" + +## Number of old ReplicaSets to retain +## +revisionHistoryLimit: 10 + +## Add a seperate remote image renderer deployment/service +imageRenderer: + deploymentStrategy: {} + # Enable the image-renderer deployment & service + enabled: false + replicas: 1 + autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 5 + targetCPU: "60" + targetMemory: "" + behavior: {} + image: + # -- The Docker registry + registry: docker.io + # image-renderer Image repository + repository: grafana/grafana-image-renderer + # image-renderer Image tag + tag: latest + # image-renderer Image sha (optional) + sha: "" + # image-renderer ImagePullPolicy + pullPolicy: Always + # extra environment variables + env: + HTTP_HOST: "0.0.0.0" + # RENDERING_ARGS: --no-sandbox,--disable-gpu,--window-size=1280x758 + # RENDERING_MODE: clustered + # IGNORE_HTTPS_ERRORS: true + + ## "valueFrom" environment variable references that will be added to deployment pods. Name is templated. + ## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#envvarsource-v1-core + ## Renders in container spec as: + ## env: + ## ... + ## - name: + ## valueFrom: + ## + envValueFrom: {} + # ENV_NAME: + # configMapKeyRef: + # name: configmap-name + # key: value_key + + # image-renderer deployment serviceAccount + serviceAccountName: "" + # image-renderer deployment securityContext + securityContext: {} + # image-renderer deployment container securityContext + containerSecurityContext: + seccompProfile: + type: RuntimeDefault + capabilities: + drop: ['ALL'] + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + ## image-renderer pod annotation + podAnnotations: {} + # image-renderer deployment Host Aliases + hostAliases: [] + # image-renderer deployment priority class + priorityClassName: '' + service: + # Enable the image-renderer service + enabled: true + # image-renderer service port name + portName: 'http' + # image-renderer service port used by both service and deployment + port: 8081 + targetPort: 8081 + # Adds the appProtocol field to the image-renderer service. This allows to work with istio protocol selection. Ex: "http" or "tcp" + appProtocol: "" + serviceMonitor: + ## If true, a ServiceMonitor CRD is created for a prometheus operator + ## https://github.com/coreos/prometheus-operator + ## + enabled: false + path: /metrics + # namespace: monitoring (defaults to use the namespace this chart is deployed to) + labels: {} + interval: 1m + scheme: http + tlsConfig: {} + scrapeTimeout: 30s + relabelings: [] + # See: https://doc.crds.dev/github.com/prometheus-operator/kube-prometheus/monitoring.coreos.com/ServiceMonitor/v1@v0.11.0#spec-targetLabels + targetLabels: [] + # - targetLabel1 + # - targetLabel2 + # If https is enabled in Grafana, this needs to be set as 'https' to correctly configure the callback used in Grafana + grafanaProtocol: http + # In case a sub_path is used this needs to be added to the image renderer callback + grafanaSubPath: "" + # name of the image-renderer port on the pod + podPortName: http + # number of image-renderer replica sets to keep + revisionHistoryLimit: 10 + networkPolicy: + # Enable a NetworkPolicy to limit inbound traffic to only the created grafana pods + limitIngress: true + # Enable a NetworkPolicy to limit outbound traffic to only the created grafana pods + limitEgress: false + # Allow additional services to access image-renderer (eg. Prometheus operator when ServiceMonitor is enabled) + extraIngressSelectors: [] + resources: {} +# limits: +# cpu: 100m +# memory: 100Mi +# requests: +# cpu: 50m +# memory: 50Mi + ## Node labels for pod assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + # + nodeSelector: {} + + ## Tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + + ## Affinity for pod assignment (evaluated as template) + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## + affinity: {} + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: "default-scheduler" + +networkPolicy: + ## @param networkPolicy.enabled Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. + ## + enabled: false + ## @param networkPolicy.allowExternal Don't require client label for connections + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to grafana port defined. + ## When true, grafana will accept connections from any source + ## (with the correct destination port). + ## + ingress: true + ## @param networkPolicy.ingress When true enables the creation + ## an ingress network policy + ## + allowExternal: true + ## @param networkPolicy.explicitNamespacesSelector A Kubernetes LabelSelector to explicitly select namespaces from which traffic could be allowed + ## If explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace + ## and that match other criteria, the ones that have the good label, can reach the grafana. + ## But sometimes, we want the grafana to be accessible to clients from other namespaces, in this case, we can use this + ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. + ## + ## Example: + ## explicitNamespacesSelector: + ## matchLabels: + ## role: frontend + ## matchExpressions: + ## - {key: role, operator: In, values: [frontend]} + ## + explicitNamespacesSelector: {} + ## + ## + ## + ## + ## + ## + egress: + ## @param networkPolicy.egress.enabled When enabled, an egress network policy will be + ## created allowing grafana to connect to external data sources from kubernetes cluster. + enabled: false + ## + ## @param networkPolicy.egress.blockDNSResolution When enabled, DNS resolution will be blocked + ## for all pods in the grafana namespace. + blockDNSResolution: false + ## + ## @param networkPolicy.egress.ports Add individual ports to be allowed by the egress + ports: [] + ## Add ports to the egress by specifying - port: + ## E.X. + ## - port: 80 + ## - port: 443 + ## + ## @param networkPolicy.egress.to Allow egress traffic to specific destinations + to: [] + ## Add destinations to the egress by specifying - ipBlock: + ## E.X. + ## to: + ## - namespaceSelector: + ## matchExpressions: + ## - {key: role, operator: In, values: [grafana]} + ## + ## + ## + ## + ## + +# Enable backward compatibility of kubernetes where version below 1.13 doesn't have the enableServiceLinks option +enableKubeBackwardCompatibility: false +useStatefulSet: false +# Create a dynamic manifests via values: +extraObjects: [] + # - apiVersion: "kubernetes-client.io/v1" + # kind: ExternalSecret + # metadata: + # name: grafana-secrets + # spec: + # backendType: gcpSecretsManager + # data: + # - key: grafana-admin-password + # name: adminPassword diff --git a/operations/deployment/helm/prometheus/Chart.lock b/operations/deployment/helm/prometheus/Chart.lock deleted file mode 100644 index 83e5660..0000000 --- a/operations/deployment/helm/prometheus/Chart.lock +++ /dev/null @@ -1,15 +0,0 @@ -dependencies: -- name: alertmanager - repository: https://prometheus-community.github.io/helm-charts - version: 1.7.0 -- name: kube-state-metrics - repository: https://prometheus-community.github.io/helm-charts - version: 5.15.2 -- name: prometheus-node-exporter - repository: https://prometheus-community.github.io/helm-charts - version: 4.24.0 -- name: prometheus-pushgateway - repository: https://prometheus-community.github.io/helm-charts - version: 2.4.2 -digest: sha256:83aa9d98623ddd76cceaf5a17b69372ae34ab715db5ccb84dbc472d5fb78a01e -generated: "2023-11-17T18:47:14.928659+02:00" diff --git a/operations/deployment/helm/prometheus/Chart.yaml b/operations/deployment/helm/prometheus/Chart.yaml deleted file mode 100644 index 4d0fbc2..0000000 --- a/operations/deployment/helm/prometheus/Chart.yaml +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: v2 -name: prometheus chart wrapper -description: A Helm chart wrapper for prometheus -version: 0.1.0 - -dependencies: -- name: prometheus - version: 13.8.0 - repository: https://prometheus-community.github.io/helm-charts \ No newline at end of file diff --git a/operations/deployment/helm/prometheus/ENV_FILE b/operations/deployment/helm/prometheus/ENV_FILE deleted file mode 100644 index 8ce79ba..0000000 --- a/operations/deployment/helm/prometheus/ENV_FILE +++ /dev/null @@ -1,2 +0,0 @@ -GF_SECURITY_ADMIN_USER=admin -GF_SECURITY_ADMIN_PASSWORD=grafana \ No newline at end of file diff --git a/operations/deployment/helm/prometheus/README.md b/operations/deployment/helm/prometheus/README.md deleted file mode 100644 index 2cb744c..0000000 --- a/operations/deployment/helm/prometheus/README.md +++ /dev/null @@ -1,382 +0,0 @@ -# Prometheus - -[Prometheus](https://prometheus.io/), a [Cloud Native Computing Foundation](https://cncf.io/) project, is a systems and service monitoring system. It collects metrics from configured targets at given intervals, evaluates rule expressions, displays the results, and can trigger alerts if some condition is observed to be true. - -This chart bootstraps a [Prometheus](https://prometheus.io/) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. - -## Prerequisites - -- Kubernetes 1.19+ -- Helm 3.7+ - -## Get Repository Info - -```console -helm repo add prometheus-community https://prometheus-community.github.io/helm-charts -helm repo update -``` - -_See [helm repository](https://helm.sh/docs/helm/helm_repo/) for command documentation._ - -## Install Chart - -Starting with version 16.0, the Prometheus chart requires Helm 3.7+ in order to install successfully. Please check your `helm` release before installation. - -```console -helm install [RELEASE_NAME] prometheus-community/prometheus -``` - -_See [configuration](#configuration) below._ - -_See [helm install](https://helm.sh/docs/helm/helm_install/) for command documentation._ - -## Dependencies - -By default this chart installs additional, dependent charts: - -- [alertmanager](https://github.com/prometheus-community/helm-charts/tree/main/charts/alertmanager) -- [kube-state-metrics](https://github.com/prometheus-community/helm-charts/tree/main/charts/kube-state-metrics) -- [prometheus-node-exporter](https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-node-exporter) -- [prometheus-pushgateway](https://github.com/walker-tom/helm-charts/tree/main/charts/prometheus-pushgateway) - -To disable the dependency during installation, set `alertmanager.enabled`, `kube-state-metrics.enabled`, `prometheus-node-exporter.enabled` and `prometheus-pushgateway.enabled` to `false`. - -_See [helm dependency](https://helm.sh/docs/helm/helm_dependency/) for command documentation._ - -## Uninstall Chart - -```console -helm uninstall [RELEASE_NAME] -``` - -This removes all the Kubernetes components associated with the chart and deletes the release. - -_See [helm uninstall](https://helm.sh/docs/helm/helm_uninstall/) for command documentation._ - -## Updating values.schema.json - -A [`values.schema.json`](https://helm.sh/docs/topics/charts/#schema-files) file has been added to validate chart values. When `values.yaml` file has a structure change (i.e. add a new field, change value type, etc.), modify `values.schema.json` file manually or run `helm schema-gen values.yaml > values.schema.json` to ensure the schema is aligned with the latest values. Refer to [helm plugin `helm-schema-gen`](https://github.com/karuppiah7890/helm-schema-gen) for plugin installation instructions. - -## Upgrading Chart - -```console -helm upgrade [RELEASE_NAME] prometheus-community/prometheus --install -``` - -_See [helm upgrade](https://helm.sh/docs/helm/helm_upgrade/) for command documentation._ - -### To 25.0 - -The `server.remoteRead[].url` and `server.remoteWrite[].url` fields now support templating. Allowing for `url` values such as `https://{{ .Release.Name }}.example.com`. - -Any entries in these which previously included `{{` or `}}` must be escaped with `{{ "{{" }}` and `{{ "}}" }}` respectively. Entries which did not previously include the template-like syntax will not be affected. - -### To 24.0 - -Require Kubernetes 1.19+ - -Release 1.0.0 of the _alertmanager_ replaced [configmap-reload](https://github.com/jimmidyson/configmap-reload) with [prometheus-config-reloader](https://github.com/prometheus-operator/prometheus-operator/tree/main/cmd/prometheus-config-reloader). -Extra command-line arguments specified via `configmapReload.prometheus.extraArgs` are not compatible and will break with the new prometheus-config-reloader. Please, refer to the [sources](https://github.com/prometheus-operator/prometheus-operator/blob/main/cmd/prometheus-config-reloader/main.go) in order to make the appropriate adjustment to the extra command-line arguments. - -### To 23.0 - -Release 5.0.0 of the _kube-state-metrics_ chart introduced a separation of the `image.repository` value in two distinct values: - -```console - image: - registry: registry.k8s.io - repository: kube-state-metrics/kube-state-metrics -``` - -If a custom values file or CLI flags set `kube-state.metrics.image.repository`, please, set the new values accordingly. - -If you are upgrading _prometheus-pushgateway_ with the chart and _prometheus-pushgateway_ has been deployed as a statefulset with a persistent volume, the statefulset must be deleted before upgrading the chart, e.g.: - -```bash -kubectl delete sts -l app.kubernetes.io/name=prometheus-pushgateway -n monitoring --cascade=orphan -``` - -Users are advised to review changes in the corresponding chart releases before upgrading. - -### To 22.0 - -The `app.kubernetes.io/version` label has been removed from the pod selector. - -Therefore, you must delete the previous StatefulSet or Deployment before upgrading. Performing this operation will cause **Prometheus to stop functioning** until the upgrade is complete. - -```console -kubectl delete deploy,sts -l app.kubernetes.io/name=prometheus -``` - -### To 21.0 - -The Kubernetes labels have been updated to follow [Helm 3 label and annotation best practices](https://helm.sh/docs/chart_best_practices/labels/). -Specifically, labels mapping is listed below: - -| OLD | NEW | -|--------------------|------------------------------| -|heritage | app.kubernetes.io/managed-by | -|chart | helm.sh/chart | -|[container version] | app.kubernetes.io/version | -|app | app.kubernetes.io/name | -|release | app.kubernetes.io/instance | - -Therefore, depending on the way you've configured the chart, the previous StatefulSet or Deployment need to be deleted before upgrade. - -If `runAsStatefulSet: false` (this is the default): - -```console -kubectl delete deploy -l app=prometheus -``` - -If `runAsStatefulSet: true`: - -```console -kubectl delete sts -l app=prometheus -``` - -After that do the actual upgrade: - -```console -helm upgrade -i prometheus prometheus-community/prometheus -``` - -### To 20.0 - -The [configmap-reload](https://github.com/jimmidyson/configmap-reload) container was replaced by the [prometheus-config-reloader](https://github.com/prometheus-operator/prometheus-operator/tree/main/cmd/prometheus-config-reloader). -Extra command-line arguments specified via configmapReload.prometheus.extraArgs are not compatible and will break with the new prometheus-config-reloader, refer to the [sources](https://github.com/prometheus-operator/prometheus-operator/blob/main/cmd/prometheus-config-reloader/main.go) in order to make the appropriate adjustment to the extra command-line arguments. - -### To 19.0 - -Prometheus has been updated to version v2.40.5. - -Prometheus-pushgateway was updated to version 2.0.0 which adapted [Helm label and annotation best practices](https://helm.sh/docs/chart_best_practices/labels/). -See the [upgrade docs of the prometheus-pushgateway chart](https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-pushgateway#to-200) to see whats to do, before you upgrade Prometheus! - -The condition in Chart.yaml to disable kube-state-metrics has been changed from `kubeStateMetrics.enabled` to `kube-state-metrics.enabled` - -The Docker image tag is used from appVersion field in Chart.yaml by default. - -Unused subchart configs has been removed and subchart config is now on the bottom of the config file. - -If Prometheus is used as deployment the updatestrategy has been changed to "Recreate" by default, so Helm updates work out of the box. - -`.Values.server.extraTemplates` & `.Values.server.extraObjects` has been removed in favour of `.Values.extraManifests`, which can do the same. - -`.Values.server.enabled` has been removed as it's useless now that all components are created by subcharts. - -All files in `templates/server` directory has been moved to `templates` directory. - -```bash -helm upgrade [RELEASE_NAME] prometheus-community/prometheus --version 19.0.0 -``` - -### To 18.0 - -Version 18.0.0 uses alertmanager service from the [alertmanager chart](https://github.com/prometheus-community/helm-charts/tree/main/charts/alertmanager). If you've made some config changes, please check the old `alertmanager` and the new `alertmanager` configuration section in values.yaml for differences. - -Note that the `configmapReload` section for `alertmanager` was moved out of dedicated section (`configmapReload.alertmanager`) to alertmanager embedded (`alertmanager.configmapReload`). - -Before you update, please scale down the `prometheus-server` deployment to `0` then perform upgrade: - -```bash -# In 17.x -kubectl scale deploy prometheus-server --replicas=0 -# Upgrade -helm upgrade [RELEASE_NAME] prometheus-community/prometheus --version 18.0.0 -``` - -### To 17.0 - -Version 17.0.0 uses pushgateway service from the [prometheus-pushgateway chart](https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-pushgateway). If you've made some config changes, please check the old `pushgateway` and the new `prometheus-pushgateway` configuration section in values.yaml for differences. - -Before you update, please scale down the `prometheus-server` deployment to `0` then perform upgrade: - -```bash -# In 16.x -kubectl scale deploy prometheus-server --replicas=0 -# Upgrade -helm upgrade [RELEASE_NAME] prometheus-community/prometheus --version 17.0.0 -``` - -### To 16.0 - -Starting from version 16.0 embedded services (like alertmanager, node-exporter etc.) are moved out of Prometheus chart and the respecting charts from this repository are used as dependencies. Version 16.0.0 moves node-exporter service to [prometheus-node-exporter chart](https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-node-exporter). If you've made some config changes, please check the old `nodeExporter` and the new `prometheus-node-exporter` configuration section in values.yaml for differences. - -Before you update, please scale down the `prometheus-server` deployment to `0` then perform upgrade: - -```bash -# In 15.x -kubectl scale deploy prometheus-server --replicas=0 -# Upgrade -helm upgrade [RELEASE_NAME] prometheus-community/prometheus --version 16.0.0 -``` - -### To 15.0 - -Version 15.0.0 changes the relabeling config, aligning it with the [Prometheus community conventions](https://github.com/prometheus/prometheus/pull/9832). If you've made manual changes to the relabeling config, you have to adapt your changes. - -Before you update please execute the following command, to be able to update kube-state-metrics: - -```bash -kubectl delete deployments.apps -l app.kubernetes.io/instance=prometheus,app.kubernetes.io/name=kube-state-metrics --cascade=orphan -``` - -### To 9.0 - -Version 9.0 adds a new option to enable or disable the Prometheus Server. This supports the use case of running a Prometheus server in one k8s cluster and scraping exporters in another cluster while using the same chart for each deployment. To install the server `server.enabled` must be set to `true`. - -### To 5.0 - -As of version 5.0, this chart uses Prometheus 2.x. This version of prometheus introduces a new data format and is not compatible with prometheus 1.x. It is recommended to install this as a new release, as updating existing releases will not work. See the [prometheus docs](https://prometheus.io/docs/prometheus/latest/migration/#storage) for instructions on retaining your old data. - -Prometheus version 2.x has made changes to alertmanager, storage and recording rules. Check out the migration guide [here](https://prometheus.io/docs/prometheus/2.0/migration/). - -Users of this chart will need to update their alerting rules to the new format before they can upgrade. - -### Example Migration - -Assuming you have an existing release of the prometheus chart, named `prometheus-old`. In order to update to prometheus 2.x while keeping your old data do the following: - -1. Update the `prometheus-old` release. Disable scraping on every component besides the prometheus server, similar to the configuration below: - - ```yaml - alertmanager: - enabled: false - alertmanagerFiles: - alertmanager.yml: "" - kubeStateMetrics: - enabled: false - nodeExporter: - enabled: false - pushgateway: - enabled: false - server: - extraArgs: - storage.local.retention: 720h - serverFiles: - alerts: "" - prometheus.yml: "" - rules: "" - ``` - -1. Deploy a new release of the chart with version 5.0+ using prometheus 2.x. In the values.yaml set the scrape config as usual, and also add the `prometheus-old` instance as a remote-read target. - - ```yaml - prometheus.yml: - ... - remote_read: - - url: http://prometheus-old/api/v1/read - ... - ``` - - Old data will be available when you query the new prometheus instance. - -## Configuration - -See [Customizing the Chart Before Installing](https://helm.sh/docs/intro/using_helm/#customizing-the-chart-before-installing). To see all configurable options with detailed comments, visit the chart's [values.yaml](./values.yaml), or run these configuration commands: - -```console -helm show values prometheus-community/prometheus -``` - -You may similarly use the above configuration commands on each chart [dependency](#dependencies) to see its configurations. - -### Scraping Pod Metrics via Annotations - -This chart uses a default configuration that causes prometheus to scrape a variety of kubernetes resource types, provided they have the correct annotations. In this section we describe how to configure pods to be scraped; for information on how other resource types can be scraped you can do a `helm template` to get the kubernetes resource definitions, and then reference the prometheus configuration in the ConfigMap against the prometheus documentation for [relabel_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config) and [kubernetes_sd_config](https://prometheus.io/docs/prometheus/latest/configuration/configuration/#kubernetes_sd_config). - -In order to get prometheus to scrape pods, you must add annotations to the pods as below: - -```yaml -metadata: - annotations: - prometheus.io/scrape: "true" - prometheus.io/path: /metrics - prometheus.io/port: "8080" -``` - -You should adjust `prometheus.io/path` based on the URL that your pod serves metrics from. `prometheus.io/port` should be set to the port that your pod serves metrics from. Note that the values for `prometheus.io/scrape` and `prometheus.io/port` must be enclosed in double quotes. - -### Sharing Alerts Between Services - -Note that when [installing](#install-chart) or [upgrading](#upgrading-chart) you may use multiple values override files. This is particularly useful when you have alerts belonging to multiple services in the cluster. For example, - -```yaml -# values.yaml -# ... - -# service1-alert.yaml -serverFiles: - alerts: - service1: - - alert: anAlert - # ... - -# service2-alert.yaml -serverFiles: - alerts: - service2: - - alert: anAlert - # ... -``` - -```console -helm install [RELEASE_NAME] prometheus-community/prometheus -f values.yaml -f service1-alert.yaml -f service2-alert.yaml -``` - -### RBAC Configuration - -Roles and RoleBindings resources will be created automatically for `server` service. - -To manually setup RBAC you need to set the parameter `rbac.create=false` and specify the service account to be used for each service by setting the parameters: `serviceAccounts.{{ component }}.create` to `false` and `serviceAccounts.{{ component }}.name` to the name of a pre-existing service account. - -> **Tip**: You can refer to the default `*-clusterrole.yaml` and `*-clusterrolebinding.yaml` files in [templates](templates/) to customize your own. - -### ConfigMap Files - -AlertManager is configured through [alertmanager.yml](https://prometheus.io/docs/alerting/configuration/). This file (and any others listed in `alertmanagerFiles`) will be mounted into the `alertmanager` pod. - -Prometheus is configured through [prometheus.yml](https://prometheus.io/docs/operating/configuration/). This file (and any others listed in `serverFiles`) will be mounted into the `server` pod. - -### Ingress TLS - -If your cluster allows automatic creation/retrieval of TLS certificates (e.g. [cert-manager](https://github.com/jetstack/cert-manager)), please refer to the documentation for that mechanism. - -To manually configure TLS, first create/retrieve a key & certificate pair for the address(es) you wish to protect. Then create a TLS secret in the namespace: - -```console -kubectl create secret tls prometheus-server-tls --cert=path/to/tls.cert --key=path/to/tls.key -``` - -Include the secret's name, along with the desired hostnames, in the alertmanager/server Ingress TLS section of your custom `values.yaml` file: - -```yaml -server: - ingress: - ## If true, Prometheus server Ingress will be created - ## - enabled: true - - ## Prometheus server Ingress hostnames - ## Must be provided if Ingress is enabled - ## - hosts: - - prometheus.domain.com - - ## Prometheus server Ingress TLS configuration - ## Secrets must be manually created in the namespace - ## - tls: - - secretName: prometheus-server-tls - hosts: - - prometheus.domain.com -``` - -### NetworkPolicy - -Enabling Network Policy for Prometheus will secure connections to Alert Manager and Kube State Metrics by only accepting connections from Prometheus Server. All inbound connections to Prometheus Server are still allowed. - -To enable network policy for Prometheus, install a networking plugin that implements the Kubernetes NetworkPolicy spec, and set `networkPolicy.enabled` to true. - -If NetworkPolicy is enabled for Prometheus' scrape targets, you may also need to manually create a networkpolicy which allows it. diff --git a/operations/deployment/helm/prometheus/bitops.config.yaml b/operations/deployment/helm/prometheus/bitops.config.yaml deleted file mode 100644 index 26d9981..0000000 --- a/operations/deployment/helm/prometheus/bitops.config.yaml +++ /dev/null @@ -1,14 +0,0 @@ -helm: - cli: - namespace: prometheus - timeout: 500s - debug: true - atomic: true - force: false - dry-run: false - options: - release-name: prometheus - skip-deploy: false - kubeconfig: - fetch: - enabled: true \ No newline at end of file diff --git a/operations/deployment/helm/prometheus/templates/NOTES.txt b/operations/deployment/helm/prometheus/templates/NOTES.txt deleted file mode 100644 index fc03c2a..0000000 --- a/operations/deployment/helm/prometheus/templates/NOTES.txt +++ /dev/null @@ -1,113 +0,0 @@ -The Prometheus server can be accessed via port {{ .Values.server.service.servicePort }} on the following DNS name from within your cluster: -{{ template "prometheus.server.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local - -{{ if .Values.server.ingress.enabled -}} -From outside the cluster, the server URL(s) are: -{{- range .Values.server.ingress.hosts }} -http://{{ . }} -{{- end }} -{{- else }} -Get the Prometheus server URL by running these commands in the same shell: -{{- if contains "NodePort" .Values.server.service.type }} - export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "prometheus.server.fullname" . }}) - export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") - echo http://$NODE_IP:$NODE_PORT -{{- else if contains "LoadBalancer" .Values.server.service.type }} - NOTE: It may take a few minutes for the LoadBalancer IP to be available. - You can watch the status of by running 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "prometheus.server.fullname" . }}' - - export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "prometheus.server.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') - echo http://$SERVICE_IP:{{ .Values.server.service.servicePort }} -{{- else if contains "ClusterIP" .Values.server.service.type }} - export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "prometheus.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") - kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 9090 -{{- end }} - - -{{- if .Values.server.persistentVolume.enabled }} -{{- else }} -################################################################################# -###### WARNING: Persistence is disabled!!! You will lose your data when ##### -###### the Server pod is terminated. ##### -################################################################################# -{{- end }} -{{- end }} - -{{ if .Values.alertmanager.enabled }} -The Prometheus alertmanager can be accessed via port {{ .Values.alertmanager.service.port }} on the following DNS name from within your cluster: -{{ template "prometheus.alertmanager.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local - -{{ if .Values.alertmanager.ingress.enabled -}} -From outside the cluster, the alertmanager URL(s) are: -{{- range .Values.alertmanager.ingress.hosts }} -http://{{ . }} -{{- end }} -{{- else }} -Get the Alertmanager URL by running these commands in the same shell: -{{- if contains "NodePort" .Values.alertmanager.service.type }} - export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "prometheus.alertmanager.fullname" . }}) - export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") - echo http://$NODE_IP:$NODE_PORT -{{- else if contains "LoadBalancer" .Values.alertmanager.service.type }} - NOTE: It may take a few minutes for the LoadBalancer IP to be available. - You can watch the status of by running 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "prometheus.alertmanager.fullname" . }}' - - export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "prometheus.alertmanager.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') - echo http://$SERVICE_IP:{{ .Values.alertmanager.service.servicePort }} -{{- else if contains "ClusterIP" .Values.alertmanager.service.type }} - export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "alertmanager.name" .Subcharts.alertmanager }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") - kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 9093 -{{- end }} -{{- end }} - -{{- if .Values.alertmanager.persistence.enabled }} -{{- else }} -################################################################################# -###### WARNING: Persistence is disabled!!! You will lose your data when ##### -###### the AlertManager pod is terminated. ##### -################################################################################# -{{- end }} -{{- end }} - -{{- if (index .Values "prometheus-node-exporter" "enabled") }} -################################################################################# -###### WARNING: Pod Security Policy has been disabled by default since ##### -###### it deprecated after k8s 1.25+. use ##### -###### (index .Values "prometheus-node-exporter" "rbac" ##### -###### . "pspEnabled") with (index .Values ##### -###### "prometheus-node-exporter" "rbac" "pspAnnotations") ##### -###### in case you still need it. ##### -################################################################################# -{{- end }} - -{{ if (index .Values "prometheus-pushgateway" "enabled") }} -The Prometheus PushGateway can be accessed via port {{ index .Values "prometheus-pushgateway" "service" "port" }} on the following DNS name from within your cluster: -{{ include "prometheus-pushgateway.fullname" (index .Subcharts "prometheus-pushgateway") }}.{{ .Release.Namespace }}.svc.cluster.local - -{{ if (index .Values "prometheus-pushgateway" "ingress" "enabled") -}} -From outside the cluster, the pushgateway URL(s) are: -{{- range (index .Values "prometheus-pushgateway" "ingress" "hosts") }} -http://{{ . }} -{{- end }} -{{- else }} -Get the PushGateway URL by running these commands in the same shell: -{{- $pushgateway_svc_type := index .Values "prometheus-pushgateway" "service" "type" -}} -{{- if contains "NodePort" $pushgateway_svc_type }} - export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "prometheus-pushgateway.fullname" (index .Subcharts "prometheus-pushgateway") }}) - export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") - echo http://$NODE_IP:$NODE_PORT -{{- else if contains "LoadBalancer" $pushgateway_svc_type }} - NOTE: It may take a few minutes for the LoadBalancer IP to be available. - You can watch the status of by running 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ include "prometheus-pushgateway.fullname" (index .Subcharts "prometheus-pushgateway") }}' - - export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "prometheus-pushgateway.fullname" (index .Subcharts "prometheus-pushgateway") }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') - echo http://$SERVICE_IP:{{ index .Values "prometheus-pushgateway" "service" "port" }} -{{- else if contains "ClusterIP" $pushgateway_svc_type }} - export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app={{ include "prometheus.name" (index .Subcharts "prometheus-pushgateway") }},component=pushgateway" -o jsonpath="{.items[0].metadata.name}") - kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 9091 -{{- end }} -{{- end }} -{{- end }} - -For more information on running Prometheus, visit: -https://prometheus.io/ diff --git a/operations/deployment/helm/prometheus/templates/_helpers.tpl b/operations/deployment/helm/prometheus/templates/_helpers.tpl deleted file mode 100644 index 0810e3c..0000000 --- a/operations/deployment/helm/prometheus/templates/_helpers.tpl +++ /dev/null @@ -1,234 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Expand the name of the chart. -*/}} -{{- define "prometheus.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "prometheus.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} -{{- end -}} - -{{/* -Create labels for prometheus -*/}} -{{- define "prometheus.common.matchLabels" -}} -app.kubernetes.io/name: {{ include "prometheus.name" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -{{- end -}} - -{{/* -Create unified labels for prometheus components -*/}} -{{- define "prometheus.common.metaLabels" -}} -app.kubernetes.io/version: {{ .Chart.AppVersion }} -helm.sh/chart: {{ include "prometheus.chart" . }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -app.kubernetes.io/part-of: {{ include "prometheus.name" . }} -{{- with .Values.commonMetaLabels}} -{{ toYaml . }} -{{- end }} -{{- end -}} - -{{- define "prometheus.server.labels" -}} -{{ include "prometheus.server.matchLabels" . }} -{{ include "prometheus.common.metaLabels" . }} -{{- end -}} - -{{- define "prometheus.server.matchLabels" -}} -app.kubernetes.io/component: {{ .Values.server.name }} -{{ include "prometheus.common.matchLabels" . }} -{{- end -}} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -*/}} -{{- define "prometheus.fullname" -}} -{{- if .Values.fullnameOverride -}} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- if contains $name .Release.Name -}} -{{- .Release.Name | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} -{{- end -}} - -{{/* -Create a fully qualified ClusterRole name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -*/}} -{{- define "prometheus.clusterRoleName" -}} -{{- if .Values.server.clusterRoleNameOverride -}} -{{ .Values.server.clusterRoleNameOverride | trunc 63 | trimSuffix "-" }} -{{- else -}} -{{ include "prometheus.server.fullname" . }} -{{- end -}} -{{- end -}} - -{{/* -Create a fully qualified alertmanager name for communicating with the user via NOTES.txt -*/}} -{{- define "prometheus.alertmanager.fullname" -}} -{{- template "alertmanager.fullname" .Subcharts.alertmanager -}} -{{- end -}} - -{{/* -Create a fully qualified Prometheus server name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -*/}} -{{- define "prometheus.server.fullname" -}} -{{- if .Values.server.fullnameOverride -}} -{{- .Values.server.fullnameOverride | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- $name := default .Chart.Name .Values.nameOverride -}} -{{- if contains $name .Release.Name -}} -{{- printf "%s-%s" .Release.Name .Values.server.name | trunc 63 | trimSuffix "-" -}} -{{- else -}} -{{- printf "%s-%s-%s" .Release.Name $name .Values.server.name | trunc 63 | trimSuffix "-" -}} -{{- end -}} -{{- end -}} -{{- end -}} - -{{/* -Get KubeVersion removing pre-release information. -*/}} -{{- define "prometheus.kubeVersion" -}} - {{- default .Capabilities.KubeVersion.Version (regexFind "v[0-9]+\\.[0-9]+\\.[0-9]+" .Capabilities.KubeVersion.Version) -}} -{{- end -}} - -{{/* -Return the appropriate apiVersion for deployment. -*/}} -{{- define "prometheus.deployment.apiVersion" -}} -{{- print "apps/v1" -}} -{{- end -}} - -{{/* -Return the appropriate apiVersion for networkpolicy. -*/}} -{{- define "prometheus.networkPolicy.apiVersion" -}} -{{- print "networking.k8s.io/v1" -}} -{{- end -}} - -{{/* -Return the appropriate apiVersion for poddisruptionbudget. -*/}} -{{- define "prometheus.podDisruptionBudget.apiVersion" -}} -{{- if .Capabilities.APIVersions.Has "policy/v1" }} -{{- print "policy/v1" -}} -{{- else -}} -{{- print "policy/v1beta1" -}} -{{- end -}} -{{- end -}} - -{{/* -Return the appropriate apiVersion for rbac. -*/}} -{{- define "rbac.apiVersion" -}} -{{- if .Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1" }} -{{- print "rbac.authorization.k8s.io/v1" -}} -{{- else -}} -{{- print "rbac.authorization.k8s.io/v1beta1" -}} -{{- end -}} -{{- end -}} - -{{/* -Return the appropriate apiVersion for ingress. -*/}} -{{- define "ingress.apiVersion" -}} - {{- if and (.Capabilities.APIVersions.Has "networking.k8s.io/v1") (semverCompare ">= 1.19.x" (include "prometheus.kubeVersion" .)) -}} - {{- print "networking.k8s.io/v1" -}} - {{- else if .Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" -}} - {{- print "networking.k8s.io/v1beta1" -}} - {{- else -}} - {{- print "extensions/v1beta1" -}} - {{- end -}} -{{- end -}} - -{{/* -Return if ingress is stable. -*/}} -{{- define "ingress.isStable" -}} - {{- eq (include "ingress.apiVersion" .) "networking.k8s.io/v1" -}} -{{- end -}} - -{{/* -Return if ingress supports ingressClassName. -*/}} -{{- define "ingress.supportsIngressClassName" -}} - {{- or (eq (include "ingress.isStable" .) "true") (and (eq (include "ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18.x" (include "prometheus.kubeVersion" .))) -}} -{{- end -}} - -{{/* -Return if ingress supports pathType. -*/}} -{{- define "ingress.supportsPathType" -}} - {{- or (eq (include "ingress.isStable" .) "true") (and (eq (include "ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18.x" (include "prometheus.kubeVersion" .))) -}} -{{- end -}} - -{{/* -Create the name of the service account to use for the server component -*/}} -{{- define "prometheus.serviceAccountName.server" -}} -{{- if .Values.serviceAccounts.server.create -}} - {{ default (include "prometheus.server.fullname" .) .Values.serviceAccounts.server.name }} -{{- else -}} - {{ default "default" .Values.serviceAccounts.server.name }} -{{- end -}} -{{- end -}} - -{{/* -Define the prometheus.namespace template if set with forceNamespace or .Release.Namespace is set -*/}} -{{- define "prometheus.namespace" -}} - {{- default .Release.Namespace .Values.forceNamespace -}} -{{- end }} - -{{/* -Define template prometheus.namespaces producing a list of namespaces to monitor -*/}} -{{- define "prometheus.namespaces" -}} -{{- $namespaces := list }} -{{- if and .Values.rbac.create .Values.server.useExistingClusterRoleName }} - {{- if .Values.server.namespaces -}} - {{- range $ns := join "," .Values.server.namespaces | split "," }} - {{- $namespaces = append $namespaces (tpl $ns $) }} - {{- end -}} - {{- end -}} - {{- if .Values.server.releaseNamespace -}} - {{- $namespaces = append $namespaces (include "prometheus.namespace" .) }} - {{- end -}} -{{- end -}} -{{ mustToJson $namespaces }} -{{- end -}} - -{{/* -Define prometheus.server.remoteWrite producing a list of remoteWrite configurations with URL templating -*/}} -{{- define "prometheus.server.remoteWrite" -}} -{{- $remoteWrites := list }} -{{- range $remoteWrite := .Values.server.remoteWrite }} - {{- $remoteWrites = tpl $remoteWrite.url $ | set $remoteWrite "url" | append $remoteWrites }} -{{- end -}} -{{ toYaml $remoteWrites }} -{{- end -}} - -{{/* -Define prometheus.server.remoteRead producing a list of remoteRead configurations with URL templating -*/}} -{{- define "prometheus.server.remoteRead" -}} -{{- $remoteReads := list }} -{{- range $remoteRead := .Values.server.remoteRead }} - {{- $remoteReads = tpl $remoteRead.url $ | set $remoteRead "url" | append $remoteReads }} -{{- end -}} -{{ toYaml $remoteReads }} -{{- end -}} - diff --git a/operations/deployment/helm/prometheus/templates/clusterrole.yaml b/operations/deployment/helm/prometheus/templates/clusterrole.yaml deleted file mode 100644 index 25e3cec..0000000 --- a/operations/deployment/helm/prometheus/templates/clusterrole.yaml +++ /dev/null @@ -1,56 +0,0 @@ -{{- if and .Values.rbac.create (empty .Values.server.useExistingClusterRoleName) -}} -apiVersion: {{ template "rbac.apiVersion" . }} -kind: ClusterRole -metadata: - labels: - {{- include "prometheus.server.labels" . | nindent 4 }} - name: {{ include "prometheus.clusterRoleName" . }} -rules: -{{- if and .Values.podSecurityPolicy.enabled (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }} - - apiGroups: - - extensions - resources: - - podsecuritypolicies - verbs: - - use - resourceNames: - - {{ template "prometheus.server.fullname" . }} -{{- end }} - - apiGroups: - - "" - resources: - - nodes - - nodes/proxy - - nodes/metrics - - services - - endpoints - - pods - - ingresses - - configmaps - verbs: - - get - - list - - watch - - apiGroups: - - "extensions" - - "networking.k8s.io" - resources: - - ingresses/status - - ingresses - verbs: - - get - - list - - watch - - apiGroups: - - "discovery.k8s.io" - resources: - - endpointslices - verbs: - - get - - list - - watch - - nonResourceURLs: - - "/metrics" - verbs: - - get -{{- end }} diff --git a/operations/deployment/helm/prometheus/templates/clusterrolebinding.yaml b/operations/deployment/helm/prometheus/templates/clusterrolebinding.yaml deleted file mode 100644 index 28f4bda..0000000 --- a/operations/deployment/helm/prometheus/templates/clusterrolebinding.yaml +++ /dev/null @@ -1,16 +0,0 @@ -{{- if and .Values.rbac.create (empty .Values.server.namespaces) (empty .Values.server.useExistingClusterRoleName) -}} -apiVersion: {{ template "rbac.apiVersion" . }} -kind: ClusterRoleBinding -metadata: - labels: - {{- include "prometheus.server.labels" . | nindent 4 }} - name: {{ include "prometheus.clusterRoleName" . }} -subjects: - - kind: ServiceAccount - name: {{ template "prometheus.serviceAccountName.server" . }} - namespace: {{ include "prometheus.namespace" . }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: {{ include "prometheus.clusterRoleName" . }} -{{- end }} diff --git a/operations/deployment/helm/prometheus/templates/cm.yaml b/operations/deployment/helm/prometheus/templates/cm.yaml deleted file mode 100644 index c670666..0000000 --- a/operations/deployment/helm/prometheus/templates/cm.yaml +++ /dev/null @@ -1,99 +0,0 @@ -{{- if (empty .Values.server.configMapOverrideName) -}} -apiVersion: v1 -kind: ConfigMap -metadata: - labels: - {{- include "prometheus.server.labels" . | nindent 4 }} - {{- with .Values.server.extraConfigmapLabels }} - {{- toYaml . | nindent 4 }} - {{- end }} - name: {{ template "prometheus.server.fullname" . }} - namespace: {{ include "prometheus.namespace" . }} -data: - allow-snippet-annotations: "false" -{{- $root := . -}} -{{- range $key, $value := .Values.ruleFiles }} - {{ $key }}: {{- toYaml $value | indent 2 }} -{{- end }} -{{- range $key, $value := .Values.serverFiles }} - {{ $key }}: | -{{- if eq $key "prometheus.yml" }} - global: -{{ $root.Values.server.global | toYaml | trimSuffix "\n" | indent 6 }} -{{- if $root.Values.server.remoteWrite }} - remote_write: -{{- include "prometheus.server.remoteWrite" $root | nindent 4 }} -{{- end }} -{{- if $root.Values.server.remoteRead }} - remote_read: -{{- include "prometheus.server.remoteRead" $root | nindent 4 }} -{{- end }} -{{- if or $root.Values.server.tsdb $root.Values.server.exemplars }} - storage: -{{- if $root.Values.server.tsdb }} - tsdb: -{{ $root.Values.server.tsdb | toYaml | indent 8 }} -{{- end }} -{{- if $root.Values.server.exemplars }} - exemplars: -{{ $root.Values.server.exemplars | toYaml | indent 8 }} -{{- end }} -{{- end }} -{{- if $root.Values.scrapeConfigFiles }} - scrape_config_files: -{{ toYaml $root.Values.scrapeConfigFiles | indent 4 }} -{{- end }} -{{- end }} -{{- if eq $key "alerts" }} -{{- if and (not (empty $value)) (empty $value.groups) }} - groups: -{{- range $ruleKey, $ruleValue := $value }} - - name: {{ $ruleKey -}}.rules - rules: -{{ $ruleValue | toYaml | trimSuffix "\n" | indent 6 }} -{{- end }} -{{- else }} -{{ toYaml $value | indent 4 }} -{{- end }} -{{- else }} -{{ toYaml $value | default "{}" | indent 4 }} -{{- end }} -{{- if eq $key "prometheus.yml" -}} -{{- if $root.Values.extraScrapeConfigs }} -{{ tpl $root.Values.extraScrapeConfigs $root | indent 4 }} -{{- end -}} -{{- if or ($root.Values.alertmanager.enabled) ($root.Values.server.alertmanagers) }} - alerting: -{{- if $root.Values.alertRelabelConfigs }} -{{ $root.Values.alertRelabelConfigs | toYaml | trimSuffix "\n" | indent 6 }} -{{- end }} - alertmanagers: -{{- if $root.Values.server.alertmanagers }} -{{ toYaml $root.Values.server.alertmanagers | indent 8 }} -{{- else }} - - kubernetes_sd_configs: - - role: pod - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - {{- if $root.Values.alertmanager.prefixURL }} - path_prefix: {{ $root.Values.alertmanager.prefixURL }} - {{- end }} - relabel_configs: - - source_labels: [__meta_kubernetes_namespace] - regex: {{ $root.Release.Namespace }} - action: keep - - source_labels: [__meta_kubernetes_pod_label_app_kubernetes_io_instance] - regex: {{ $root.Release.Name }} - action: keep - - source_labels: [__meta_kubernetes_pod_label_app_kubernetes_io_name] - regex: {{ default "alertmanager" $root.Values.alertmanager.nameOverride | trunc 63 | trimSuffix "-" }} - action: keep - - source_labels: [__meta_kubernetes_pod_container_port_number] - regex: "9093" - action: keep -{{- end -}} -{{- end -}} -{{- end -}} -{{- end -}} -{{- end -}} diff --git a/operations/deployment/helm/prometheus/templates/deploy.yaml b/operations/deployment/helm/prometheus/templates/deploy.yaml deleted file mode 100644 index 93f93c4..0000000 --- a/operations/deployment/helm/prometheus/templates/deploy.yaml +++ /dev/null @@ -1,363 +0,0 @@ -{{- if not .Values.server.statefulSet.enabled -}} -apiVersion: {{ template "prometheus.deployment.apiVersion" . }} -kind: Deployment -metadata: -{{- if .Values.server.deploymentAnnotations }} - annotations: - {{ toYaml .Values.server.deploymentAnnotations | nindent 4 }} -{{- end }} - labels: - {{- include "prometheus.server.labels" . | nindent 4 }} - name: {{ template "prometheus.server.fullname" . }} - namespace: {{ include "prometheus.namespace" . }} -spec: - selector: - matchLabels: - {{- include "prometheus.server.matchLabels" . | nindent 6 }} - replicas: {{ .Values.server.replicaCount }} - revisionHistoryLimit: {{ .Values.server.revisionHistoryLimit }} - {{- if .Values.server.strategy }} - strategy: -{{ toYaml .Values.server.strategy | trim | indent 4 }} - {{ if eq .Values.server.strategy.type "Recreate" }}rollingUpdate: null{{ end }} -{{- end }} - template: - metadata: - {{- if .Values.server.podAnnotations }} - annotations: - {{ toYaml .Values.server.podAnnotations | nindent 8 }} - {{- end }} - labels: - {{- include "prometheus.server.labels" . | nindent 8 }} - {{- if .Values.server.podLabels}} - {{ toYaml .Values.server.podLabels | nindent 8 }} - {{- end}} - spec: -{{- if .Values.server.priorityClassName }} - priorityClassName: "{{ .Values.server.priorityClassName }}" -{{- end }} -{{- if .Values.server.schedulerName }} - schedulerName: "{{ .Values.server.schedulerName }}" -{{- end }} -{{- if semverCompare ">=1.13-0" .Capabilities.KubeVersion.GitVersion }} - {{- if or (.Values.server.enableServiceLinks) (eq (.Values.server.enableServiceLinks | toString) "") }} - enableServiceLinks: true - {{- else }} - enableServiceLinks: false - {{- end }} -{{- end }} - serviceAccountName: {{ template "prometheus.serviceAccountName.server" . }} -{{- if kindIs "bool" .Values.server.automountServiceAccountToken }} - automountServiceAccountToken: {{ .Values.server.automountServiceAccountToken }} -{{- end }} - {{- if .Values.server.extraInitContainers }} - initContainers: -{{ toYaml .Values.server.extraInitContainers | indent 8 }} - {{- end }} - containers: - {{- if .Values.configmapReload.prometheus.enabled }} - - name: {{ template "prometheus.name" . }}-{{ .Values.server.name }}-{{ .Values.configmapReload.prometheus.name }} - {{- if .Values.configmapReload.prometheus.image.digest }} - image: "{{ .Values.configmapReload.prometheus.image.repository }}@{{ .Values.configmapReload.prometheus.image.digest }}" - {{- else }} - image: "{{ .Values.configmapReload.prometheus.image.repository }}:{{ .Values.configmapReload.prometheus.image.tag }}" - {{- end }} - imagePullPolicy: "{{ .Values.configmapReload.prometheus.image.pullPolicy }}" - {{- with .Values.configmapReload.prometheus.containerSecurityContext }} - securityContext: - {{- toYaml . | nindent 12 }} - {{- end }} - args: - - --watched-dir=/etc/config - {{- $default_url := "http://127.0.0.1:9090/-/reload" }} - {{- with .Values.server.prefixURL }} - {{- $default_url = printf "http://127.0.0.1:9090%s/-/reload" . }} - {{- end }} - - --reload-url={{ default $default_url .Values.configmapReload.reloadUrl }} - {{- range $key, $value := .Values.configmapReload.prometheus.extraArgs }} - - --{{ $key }}={{ $value }} - {{- end }} - {{- range .Values.configmapReload.prometheus.extraVolumeDirs }} - - --watched-dir={{ . }} - {{- end }} - {{- with .Values.configmapReload.env }} - env: - {{- toYaml . | nindent 12 }} - {{- end }} - {{- if .Values.configmapReload.prometheus.containerPort }} - ports: - - containerPort: {{ .Values.configmapReload.prometheus.containerPort }} - {{- end }} - {{- with .Values.configmapReload.prometheus.resources }} - resources: - {{- toYaml . | nindent 12 }} - {{- end }} - volumeMounts: - - name: config-volume - mountPath: /etc/config - readOnly: true - {{- range .Values.configmapReload.prometheus.extraConfigmapMounts }} - - name: {{ $.Values.configmapReload.prometheus.name }}-{{ .name }} - mountPath: {{ .mountPath }} - subPath: {{ .subPath }} - readOnly: {{ .readOnly }} - {{- end }} - {{- with .Values.configmapReload.prometheus.extraVolumeMounts }} - {{ toYaml . | nindent 12 }} - {{- end }} - {{- end }} - - - name: {{ template "prometheus.name" . }}-{{ .Values.server.name }} - {{- if .Values.server.image.digest }} - image: "{{ .Values.server.image.repository }}@{{ .Values.server.image.digest }}" - {{- else }} - image: "{{ .Values.server.image.repository }}:{{ .Values.server.image.tag | default .Chart.AppVersion}}" - {{- end }} - imagePullPolicy: "{{ .Values.server.image.pullPolicy }}" - {{- with .Values.server.command }} - command: - {{- toYaml . | nindent 12 }} - {{- end }} - {{- if .Values.server.env }} - env: -{{ toYaml .Values.server.env | indent 12}} - {{- end }} - args: - {{- if .Values.server.defaultFlagsOverride }} - {{ toYaml .Values.server.defaultFlagsOverride | nindent 12}} - {{- else }} - {{- if .Values.server.retention }} - - --storage.tsdb.retention.time={{ .Values.server.retention }} - {{- end }} - {{- if .Values.server.retentionSize }} - - --storage.tsdb.retention.size={{ .Values.server.retentionSize }} - {{- end }} - - --config.file={{ .Values.server.configPath }} - {{- if .Values.server.storagePath }} - - --storage.tsdb.path={{ .Values.server.storagePath }} - {{- else }} - - --storage.tsdb.path={{ .Values.server.persistentVolume.mountPath }} - {{- end }} - - --web.console.libraries=/etc/prometheus/console_libraries - - --web.console.templates=/etc/prometheus/consoles - {{- range .Values.server.extraFlags }} - - --{{ . }} - {{- end }} - {{- range $key, $value := .Values.server.extraArgs }} - - --{{ $key }}={{ $value }} - {{- end }} - {{- if .Values.server.prefixURL }} - - --web.route-prefix={{ .Values.server.prefixURL }} - {{- end }} - {{- if .Values.server.baseURL }} - - --web.external-url={{ .Values.server.baseURL }} - {{- end }} - {{- end }} - ports: - - containerPort: 9090 - {{- if .Values.server.portName }} - name: {{ .Values.server.portName }} - {{- end }} - {{- if .Values.server.hostPort }} - hostPort: {{ .Values.server.hostPort }} - {{- end }} - readinessProbe: - {{- if not .Values.server.tcpSocketProbeEnabled }} - httpGet: - path: {{ .Values.server.prefixURL }}/-/ready - port: 9090 - scheme: {{ .Values.server.probeScheme }} - {{- with .Values.server.probeHeaders }} - httpHeaders: -{{- toYaml . | nindent 14 }} - {{- end }} - {{- else }} - tcpSocket: - port: 9090 - {{- end }} - initialDelaySeconds: {{ .Values.server.readinessProbeInitialDelay }} - periodSeconds: {{ .Values.server.readinessProbePeriodSeconds }} - timeoutSeconds: {{ .Values.server.readinessProbeTimeout }} - failureThreshold: {{ .Values.server.readinessProbeFailureThreshold }} - successThreshold: {{ .Values.server.readinessProbeSuccessThreshold }} - livenessProbe: - {{- if not .Values.server.tcpSocketProbeEnabled }} - httpGet: - path: {{ .Values.server.prefixURL }}/-/healthy - port: 9090 - scheme: {{ .Values.server.probeScheme }} - {{- with .Values.server.probeHeaders }} - httpHeaders: -{{- toYaml . | nindent 14 }} - {{- end }} - {{- else }} - tcpSocket: - port: 9090 - {{- end }} - initialDelaySeconds: {{ .Values.server.livenessProbeInitialDelay }} - periodSeconds: {{ .Values.server.livenessProbePeriodSeconds }} - timeoutSeconds: {{ .Values.server.livenessProbeTimeout }} - failureThreshold: {{ .Values.server.livenessProbeFailureThreshold }} - successThreshold: {{ .Values.server.livenessProbeSuccessThreshold }} - {{- if .Values.server.startupProbe.enabled }} - startupProbe: - {{- if not .Values.server.tcpSocketProbeEnabled }} - httpGet: - path: {{ .Values.server.prefixURL }}/-/healthy - port: 9090 - scheme: {{ .Values.server.probeScheme }} - {{- if .Values.server.probeHeaders }} - httpHeaders: - {{- range .Values.server.probeHeaders}} - - name: {{ .name }} - value: {{ .value }} - {{- end }} - {{- end }} - {{- else }} - tcpSocket: - port: 9090 - {{- end }} - failureThreshold: {{ .Values.server.startupProbe.failureThreshold }} - periodSeconds: {{ .Values.server.startupProbe.periodSeconds }} - timeoutSeconds: {{ .Values.server.startupProbe.timeoutSeconds }} - {{- end }} - {{- with .Values.server.resources }} - resources: - {{- toYaml . | nindent 12 }} - {{- end }} - volumeMounts: - - name: config-volume - mountPath: /etc/config - - name: storage-volume - mountPath: {{ .Values.server.persistentVolume.mountPath }} - subPath: "{{ .Values.server.persistentVolume.subPath }}" - {{- range .Values.server.extraHostPathMounts }} - - name: {{ .name }} - mountPath: {{ .mountPath }} - subPath: {{ .subPath }} - readOnly: {{ .readOnly }} - {{- end }} - {{- range .Values.server.extraConfigmapMounts }} - - name: {{ $.Values.server.name }}-{{ .name }} - mountPath: {{ .mountPath }} - subPath: {{ .subPath }} - readOnly: {{ .readOnly }} - {{- end }} - {{- range .Values.server.extraSecretMounts }} - - name: {{ .name }} - mountPath: {{ .mountPath }} - subPath: {{ .subPath }} - readOnly: {{ .readOnly }} - {{- end }} - {{- if .Values.server.extraVolumeMounts }} - {{ toYaml .Values.server.extraVolumeMounts | nindent 12 }} - {{- end }} - {{- with .Values.server.containerSecurityContext }} - securityContext: - {{- toYaml . | nindent 12 }} - {{- end }} - {{- if .Values.server.sidecarContainers }} - {{- range $name, $spec := .Values.server.sidecarContainers }} - - name: {{ $name }} - {{- if kindIs "string" $spec }} - {{- tpl $spec $ | nindent 10 }} - {{- else }} - {{- toYaml $spec | nindent 10 }} - {{- end }} - {{- end }} - {{- end }} - {{- if .Values.server.hostNetwork }} - hostNetwork: true - dnsPolicy: ClusterFirstWithHostNet - {{- else }} - dnsPolicy: {{ .Values.server.dnsPolicy }} - {{- end }} - {{- if .Values.imagePullSecrets }} - imagePullSecrets: -{{ toYaml .Values.imagePullSecrets | indent 8 }} - {{- end }} - {{- if .Values.server.nodeSelector }} - nodeSelector: -{{ toYaml .Values.server.nodeSelector | indent 8 }} - {{- end }} - {{- if .Values.server.hostAliases }} - hostAliases: -{{ toYaml .Values.server.hostAliases | indent 8 }} - {{- end }} - {{- if .Values.server.dnsConfig }} - dnsConfig: -{{ toYaml .Values.server.dnsConfig | indent 8 }} - {{- end }} - {{- with .Values.server.securityContext }} - securityContext: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- if .Values.server.tolerations }} - tolerations: -{{ toYaml .Values.server.tolerations | indent 8 }} - {{- end }} - {{- if .Values.server.affinity }} - affinity: -{{ toYaml .Values.server.affinity | indent 8 }} - {{- end }} - {{- with .Values.server.topologySpreadConstraints }} - topologySpreadConstraints: - {{- toYaml . | nindent 8 }} - {{- end }} - terminationGracePeriodSeconds: {{ .Values.server.terminationGracePeriodSeconds }} - volumes: - - name: config-volume - {{- if empty .Values.server.configFromSecret }} - configMap: - name: {{ if .Values.server.configMapOverrideName }}{{ .Release.Name }}-{{ .Values.server.configMapOverrideName }}{{- else }}{{ template "prometheus.server.fullname" . }}{{- end }} - {{- else }} - secret: - secretName: {{ .Values.server.configFromSecret }} - {{- end }} - {{- range .Values.server.extraHostPathMounts }} - - name: {{ .name }} - hostPath: - path: {{ .hostPath }} - {{- end }} - {{- range .Values.configmapReload.prometheus.extraConfigmapMounts }} - - name: {{ $.Values.configmapReload.prometheus.name }}-{{ .name }} - configMap: - name: {{ .configMap }} - {{- end }} - {{- range .Values.server.extraConfigmapMounts }} - - name: {{ $.Values.server.name }}-{{ .name }} - configMap: - name: {{ .configMap }} - {{- end }} - {{- range .Values.server.extraSecretMounts }} - - name: {{ .name }} - secret: - secretName: {{ .secretName }} - {{- with .optional }} - optional: {{ . }} - {{- end }} - {{- end }} - {{- range .Values.configmapReload.prometheus.extraConfigmapMounts }} - - name: {{ .name }} - configMap: - name: {{ .configMap }} - {{- with .optional }} - optional: {{ . }} - {{- end }} - {{- end }} -{{- if .Values.server.extraVolumes }} -{{ toYaml .Values.server.extraVolumes | indent 8}} -{{- end }} - - name: storage-volume - {{- if .Values.server.persistentVolume.enabled }} - persistentVolumeClaim: - claimName: {{ if .Values.server.persistentVolume.existingClaim }}{{ .Values.server.persistentVolume.existingClaim }}{{- else }}{{ template "prometheus.server.fullname" . }}{{- end }} - {{- else }} - emptyDir: - {{- if .Values.server.emptyDir.sizeLimit }} - sizeLimit: {{ .Values.server.emptyDir.sizeLimit }} - {{- else }} - {} - {{- end -}} - {{- end -}} -{{- end -}} diff --git a/operations/deployment/helm/prometheus/templates/extra-manifests.yaml b/operations/deployment/helm/prometheus/templates/extra-manifests.yaml deleted file mode 100644 index 2b21b71..0000000 --- a/operations/deployment/helm/prometheus/templates/extra-manifests.yaml +++ /dev/null @@ -1,4 +0,0 @@ -{{ range .Values.extraManifests }} ---- -{{ tpl . $ }} -{{ end }} diff --git a/operations/deployment/helm/prometheus/templates/headless-svc.yaml b/operations/deployment/helm/prometheus/templates/headless-svc.yaml deleted file mode 100644 index df9db99..0000000 --- a/operations/deployment/helm/prometheus/templates/headless-svc.yaml +++ /dev/null @@ -1,35 +0,0 @@ -{{- if .Values.server.statefulSet.enabled -}} -apiVersion: v1 -kind: Service -metadata: -{{- if .Values.server.statefulSet.headless.annotations }} - annotations: -{{ toYaml .Values.server.statefulSet.headless.annotations | indent 4 }} -{{- end }} - labels: - {{- include "prometheus.server.labels" . | nindent 4 }} -{{- if .Values.server.statefulSet.headless.labels }} -{{ toYaml .Values.server.statefulSet.headless.labels | indent 4 }} -{{- end }} - name: {{ template "prometheus.server.fullname" . }}-headless - namespace: {{ include "prometheus.namespace" . }} -spec: - clusterIP: None - ports: - - name: http - port: {{ .Values.server.statefulSet.headless.servicePort }} - protocol: TCP - targetPort: 9090 - {{- if .Values.server.statefulSet.headless.gRPC.enabled }} - - name: grpc - port: {{ .Values.server.statefulSet.headless.gRPC.servicePort }} - protocol: TCP - targetPort: 10901 - {{- if .Values.server.statefulSet.headless.gRPC.nodePort }} - nodePort: {{ .Values.server.statefulSet.headless.gRPC.nodePort }} - {{- end }} - {{- end }} - - selector: - {{- include "prometheus.server.matchLabels" . | nindent 4 }} -{{- end -}} diff --git a/operations/deployment/helm/prometheus/templates/ingress.yaml b/operations/deployment/helm/prometheus/templates/ingress.yaml deleted file mode 100644 index 84341a9..0000000 --- a/operations/deployment/helm/prometheus/templates/ingress.yaml +++ /dev/null @@ -1,57 +0,0 @@ -{{- if .Values.server.ingress.enabled -}} -{{- $ingressApiIsStable := eq (include "ingress.isStable" .) "true" -}} -{{- $ingressSupportsIngressClassName := eq (include "ingress.supportsIngressClassName" .) "true" -}} -{{- $ingressSupportsPathType := eq (include "ingress.supportsPathType" .) "true" -}} -{{- $releaseName := .Release.Name -}} -{{- $serviceName := include "prometheus.server.fullname" . }} -{{- $servicePort := .Values.server.ingress.servicePort | default .Values.server.service.servicePort -}} -{{- $ingressPath := .Values.server.ingress.path -}} -{{- $ingressPathType := .Values.server.ingress.pathType -}} -{{- $extraPaths := .Values.server.ingress.extraPaths -}} -apiVersion: {{ template "ingress.apiVersion" . }} -kind: Ingress -metadata: -{{- if .Values.server.ingress.annotations }} - annotations: -{{ toYaml .Values.server.ingress.annotations | indent 4 }} -{{- end }} - labels: - {{- include "prometheus.server.labels" . | nindent 4 }} -{{- range $key, $value := .Values.server.ingress.extraLabels }} - {{ $key }}: {{ $value }} -{{- end }} - name: {{ template "prometheus.server.fullname" . }} - namespace: {{ include "prometheus.namespace" . }} -spec: - {{- if and $ingressSupportsIngressClassName .Values.server.ingress.ingressClassName }} - ingressClassName: {{ .Values.server.ingress.ingressClassName }} - {{- end }} - rules: - {{- range .Values.server.ingress.hosts }} - {{- $url := splitList "/" . }} - - host: {{ first $url }} - http: - paths: -{{ if $extraPaths }} -{{ toYaml $extraPaths | indent 10 }} -{{- end }} - - path: {{ $ingressPath }} - {{- if $ingressSupportsPathType }} - pathType: {{ $ingressPathType }} - {{- end }} - backend: - {{- if $ingressApiIsStable }} - service: - name: {{ $serviceName }} - port: - number: {{ $servicePort }} - {{- else }} - serviceName: {{ $serviceName }} - servicePort: {{ $servicePort }} - {{- end }} - {{- end -}} -{{- if .Values.server.ingress.tls }} - tls: -{{ toYaml .Values.server.ingress.tls | indent 4 }} - {{- end -}} -{{- end -}} diff --git a/operations/deployment/helm/prometheus/templates/network-policy.yaml b/operations/deployment/helm/prometheus/templates/network-policy.yaml deleted file mode 100644 index 3254ffc..0000000 --- a/operations/deployment/helm/prometheus/templates/network-policy.yaml +++ /dev/null @@ -1,16 +0,0 @@ -{{- if .Values.networkPolicy.enabled }} -apiVersion: {{ template "prometheus.networkPolicy.apiVersion" . }} -kind: NetworkPolicy -metadata: - name: {{ template "prometheus.server.fullname" . }} - namespace: {{ include "prometheus.namespace" . }} - labels: - {{- include "prometheus.server.labels" . | nindent 4 }} -spec: - podSelector: - matchLabels: - {{- include "prometheus.server.matchLabels" . | nindent 6 }} - ingress: - - ports: - - port: 9090 -{{- end }} diff --git a/operations/deployment/helm/prometheus/templates/pdb.yaml b/operations/deployment/helm/prometheus/templates/pdb.yaml deleted file mode 100644 index 7ffe673..0000000 --- a/operations/deployment/helm/prometheus/templates/pdb.yaml +++ /dev/null @@ -1,15 +0,0 @@ -{{- if .Values.server.podDisruptionBudget.enabled }} -{{- $pdbSpec := omit .Values.server.podDisruptionBudget "enabled" }} -apiVersion: {{ template "prometheus.podDisruptionBudget.apiVersion" . }} -kind: PodDisruptionBudget -metadata: - name: {{ template "prometheus.server.fullname" . }} - namespace: {{ include "prometheus.namespace" . }} - labels: - {{- include "prometheus.server.labels" . | nindent 4 }} -spec: - selector: - matchLabels: - {{- include "prometheus.server.matchLabels" . | nindent 6 }} - {{- toYaml $pdbSpec | nindent 2 }} -{{- end }} diff --git a/operations/deployment/helm/prometheus/templates/psp.yaml b/operations/deployment/helm/prometheus/templates/psp.yaml deleted file mode 100644 index 5776e25..0000000 --- a/operations/deployment/helm/prometheus/templates/psp.yaml +++ /dev/null @@ -1,53 +0,0 @@ -{{- if and .Values.rbac.create .Values.podSecurityPolicy.enabled }} -{{- if .Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy" }} -apiVersion: policy/v1beta1 -kind: PodSecurityPolicy -metadata: - name: {{ template "prometheus.server.fullname" . }} - labels: - {{- include "prometheus.server.labels" . | nindent 4 }} - {{- with .Values.server.podSecurityPolicy.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -spec: - privileged: false - allowPrivilegeEscalation: false - allowedCapabilities: - - 'CHOWN' - volumes: - - 'configMap' - - 'persistentVolumeClaim' - - 'emptyDir' - - 'secret' - - 'hostPath' - allowedHostPaths: - - pathPrefix: /etc - readOnly: true - - pathPrefix: {{ .Values.server.persistentVolume.mountPath }} - {{- range .Values.server.extraHostPathMounts }} - - pathPrefix: {{ .hostPath }} - readOnly: {{ .readOnly }} - {{- end }} - hostNetwork: false - hostPID: false - hostIPC: false - runAsUser: - rule: 'RunAsAny' - seLinux: - rule: 'RunAsAny' - supplementalGroups: - rule: 'MustRunAs' - ranges: - # Forbid adding the root group. - - min: 1 - max: 65535 - fsGroup: - rule: 'MustRunAs' - ranges: - # Forbid adding the root group. - - min: 1 - max: 65535 - readOnlyRootFilesystem: false -{{- end }} -{{- end }} diff --git a/operations/deployment/helm/prometheus/templates/pvc.yaml b/operations/deployment/helm/prometheus/templates/pvc.yaml deleted file mode 100644 index a9dc4fc..0000000 --- a/operations/deployment/helm/prometheus/templates/pvc.yaml +++ /dev/null @@ -1,43 +0,0 @@ -{{- if not .Values.server.statefulSet.enabled -}} -{{- if .Values.server.persistentVolume.enabled -}} -{{- if not .Values.server.persistentVolume.existingClaim -}} -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - {{- if .Values.server.persistentVolume.annotations }} - annotations: -{{ toYaml .Values.server.persistentVolume.annotations | indent 4 }} - {{- end }} - labels: - {{- include "prometheus.server.labels" . | nindent 4 }} - {{- with .Values.server.persistentVolume.labels }} - {{- toYaml . | nindent 4 }} - {{- end }} - name: {{ template "prometheus.server.fullname" . }} - namespace: {{ include "prometheus.namespace" . }} -spec: - accessModes: -{{ toYaml .Values.server.persistentVolume.accessModes | indent 4 }} -{{- if .Values.server.persistentVolume.storageClass }} -{{- if (eq "-" .Values.server.persistentVolume.storageClass) }} - storageClassName: "" -{{- else }} - storageClassName: "{{ .Values.server.persistentVolume.storageClass }}" -{{- end }} -{{- end }} -{{- if .Values.server.persistentVolume.volumeBindingMode }} - volumeBindingMode: "{{ .Values.server.persistentVolume.volumeBindingMode }}" -{{- end }} - resources: - requests: - storage: "{{ .Values.server.persistentVolume.size }}" -{{- if .Values.server.persistentVolume.selector }} - selector: - {{- toYaml .Values.server.persistentVolume.selector | nindent 4 }} -{{- end -}} -{{- if .Values.server.persistentVolume.volumeName }} - volumeName: "{{ .Values.server.persistentVolume.volumeName }}" -{{- end -}} -{{- end -}} -{{- end -}} -{{- end -}} diff --git a/operations/deployment/helm/prometheus/templates/rolebinding.yaml b/operations/deployment/helm/prometheus/templates/rolebinding.yaml deleted file mode 100644 index 721b388..0000000 --- a/operations/deployment/helm/prometheus/templates/rolebinding.yaml +++ /dev/null @@ -1,18 +0,0 @@ -{{- range include "prometheus.namespaces" . | fromJsonArray }} ---- -apiVersion: {{ template "rbac.apiVersion" $ }} -kind: RoleBinding -metadata: - labels: - {{- include "prometheus.server.labels" $ | nindent 4 }} - name: {{ template "prometheus.server.fullname" $ }} - namespace: {{ . }} -subjects: - - kind: ServiceAccount - name: {{ template "prometheus.serviceAccountName.server" $ }} - namespace: {{ include "prometheus.namespace" $ }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: ClusterRole - name: {{ $.Values.server.useExistingClusterRoleName }} -{{ end -}} diff --git a/operations/deployment/helm/prometheus/templates/service.yaml b/operations/deployment/helm/prometheus/templates/service.yaml deleted file mode 100644 index 069f327..0000000 --- a/operations/deployment/helm/prometheus/templates/service.yaml +++ /dev/null @@ -1,63 +0,0 @@ -{{- if .Values.server.service.enabled -}} -apiVersion: v1 -kind: Service -metadata: -{{- if .Values.server.service.annotations }} - annotations: -{{ toYaml .Values.server.service.annotations | indent 4 }} -{{- end }} - labels: - {{- include "prometheus.server.labels" . | nindent 4 }} -{{- if .Values.server.service.labels }} -{{ toYaml .Values.server.service.labels | indent 4 }} -{{- end }} - name: {{ template "prometheus.server.fullname" . }} - namespace: {{ include "prometheus.namespace" . }} -spec: -{{- if .Values.server.service.clusterIP }} - clusterIP: {{ .Values.server.service.clusterIP }} -{{- end }} -{{- if .Values.server.service.externalIPs }} - externalIPs: -{{ toYaml .Values.server.service.externalIPs | indent 4 }} -{{- end }} -{{- if .Values.server.service.loadBalancerIP }} - loadBalancerIP: {{ .Values.server.service.loadBalancerIP }} -{{- end }} -{{- if .Values.server.service.loadBalancerSourceRanges }} - loadBalancerSourceRanges: - {{- range $cidr := .Values.server.service.loadBalancerSourceRanges }} - - {{ $cidr }} - {{- end }} -{{- end }} - ports: - - name: http - port: {{ .Values.server.service.servicePort }} - protocol: TCP - targetPort: 9090 - {{- if .Values.server.service.nodePort }} - nodePort: {{ .Values.server.service.nodePort }} - {{- end }} - {{- if .Values.server.service.gRPC.enabled }} - - name: grpc - port: {{ .Values.server.service.gRPC.servicePort }} - protocol: TCP - targetPort: 10901 - {{- if .Values.server.service.gRPC.nodePort }} - nodePort: {{ .Values.server.service.gRPC.nodePort }} - {{- end }} - {{- end }} -{{- if .Values.server.service.additionalPorts }} -{{ toYaml .Values.server.service.additionalPorts | indent 4 }} -{{- end }} - selector: - {{- if and .Values.server.statefulSet.enabled .Values.server.service.statefulsetReplica.enabled }} - statefulset.kubernetes.io/pod-name: {{ template "prometheus.server.fullname" . }}-{{ .Values.server.service.statefulsetReplica.replica }} - {{- else -}} - {{- include "prometheus.server.matchLabels" . | nindent 4 }} -{{- if .Values.server.service.sessionAffinity }} - sessionAffinity: {{ .Values.server.service.sessionAffinity }} -{{- end }} - {{- end }} - type: "{{ .Values.server.service.type }}" -{{- end -}} diff --git a/operations/deployment/helm/prometheus/templates/serviceaccount.yaml b/operations/deployment/helm/prometheus/templates/serviceaccount.yaml deleted file mode 100644 index 6d5ab0c..0000000 --- a/operations/deployment/helm/prometheus/templates/serviceaccount.yaml +++ /dev/null @@ -1,16 +0,0 @@ -{{- if .Values.serviceAccounts.server.create }} -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - {{- include "prometheus.server.labels" . | nindent 4 }} - name: {{ template "prometheus.serviceAccountName.server" . }} - namespace: {{ include "prometheus.namespace" . }} - annotations: -{{ toYaml .Values.serviceAccounts.server.annotations | indent 4 }} -{{- if kindIs "bool" .Values.server.automountServiceAccountToken }} -automountServiceAccountToken: {{ .Values.server.automountServiceAccountToken }} -{{- else if kindIs "bool" .Values.serviceAccounts.server.automountServiceAccountToken }} -automountServiceAccountToken: {{ .Values.serviceAccounts.server.automountServiceAccountToken }} -{{- end }} -{{- end }} diff --git a/operations/deployment/helm/prometheus/templates/sts.yaml b/operations/deployment/helm/prometheus/templates/sts.yaml deleted file mode 100644 index 63851c4..0000000 --- a/operations/deployment/helm/prometheus/templates/sts.yaml +++ /dev/null @@ -1,385 +0,0 @@ -{{- if .Values.server.statefulSet.enabled -}} -apiVersion: apps/v1 -kind: StatefulSet -metadata: -{{- if .Values.server.statefulSet.annotations }} - annotations: - {{ toYaml .Values.server.statefulSet.annotations | nindent 4 }} -{{- end }} - labels: - {{- include "prometheus.server.labels" . | nindent 4 }} - {{- if .Values.server.statefulSet.labels}} - {{ toYaml .Values.server.statefulSet.labels | nindent 4 }} - {{- end}} - name: {{ template "prometheus.server.fullname" . }} - namespace: {{ include "prometheus.namespace" . }} -spec: - {{- if semverCompare ">= 1.27.x" (include "prometheus.kubeVersion" .) }} - persistentVolumeClaimRetentionPolicy: - whenDeleted: {{ ternary "Delete" "Retain" .Values.server.statefulSet.pvcDeleteOnStsDelete }} - whenScaled: {{ ternary "Delete" "Retain" .Values.server.statefulSet.pvcDeleteOnStsScale }} - {{- end }} - serviceName: {{ template "prometheus.server.fullname" . }}-headless - selector: - matchLabels: - {{- include "prometheus.server.matchLabels" . | nindent 6 }} - replicas: {{ .Values.server.replicaCount }} - revisionHistoryLimit: {{ .Values.server.revisionHistoryLimit }} - podManagementPolicy: {{ .Values.server.statefulSet.podManagementPolicy }} - template: - metadata: - {{- if .Values.server.podAnnotations }} - annotations: - {{ toYaml .Values.server.podAnnotations | nindent 8 }} - {{- end }} - labels: - {{- include "prometheus.server.labels" . | nindent 8 }} - {{- if .Values.server.podLabels}} - {{ toYaml .Values.server.podLabels | nindent 8 }} - {{- end}} - spec: -{{- if .Values.server.priorityClassName }} - priorityClassName: "{{ .Values.server.priorityClassName }}" -{{- end }} -{{- if .Values.server.schedulerName }} - schedulerName: "{{ .Values.server.schedulerName }}" -{{- end }} -{{- if semverCompare ">=1.13-0" .Capabilities.KubeVersion.GitVersion }} - {{- if or (.Values.server.enableServiceLinks) (eq (.Values.server.enableServiceLinks | toString) "") }} - enableServiceLinks: true - {{- else }} - enableServiceLinks: false - {{- end }} -{{- end }} - serviceAccountName: {{ template "prometheus.serviceAccountName.server" . }} -{{- if kindIs "bool" .Values.server.automountServiceAccountToken }} - automountServiceAccountToken: {{ .Values.server.automountServiceAccountToken }} -{{- end }} - {{- if .Values.server.extraInitContainers }} - initContainers: -{{ toYaml .Values.server.extraInitContainers | indent 8 }} - {{- end }} - containers: - {{- if .Values.configmapReload.prometheus.enabled }} - - name: {{ template "prometheus.name" . }}-{{ .Values.server.name }}-{{ .Values.configmapReload.prometheus.name }} - {{- if .Values.configmapReload.prometheus.image.digest }} - image: "{{ .Values.configmapReload.prometheus.image.repository }}@{{ .Values.configmapReload.prometheus.image.digest }}" - {{- else }} - image: "{{ .Values.configmapReload.prometheus.image.repository }}:{{ .Values.configmapReload.prometheus.image.tag }}" - {{- end }} - imagePullPolicy: "{{ .Values.configmapReload.prometheus.image.pullPolicy }}" - {{- with .Values.configmapReload.prometheus.containerSecurityContext }} - securityContext: - {{- toYaml . | nindent 12 }} - {{- end }} - args: - - --watched-dir=/etc/config - {{- $default_url := "http://127.0.0.1:9090/-/reload" }} - {{- with .Values.server.prefixURL }} - {{- $default_url = printf "http://127.0.0.1:9090%s/-/reload" . }} - {{- end }} - - --reload-url={{ default $default_url .Values.configmapReload.reloadUrl }} - {{- range $key, $value := .Values.configmapReload.prometheus.extraArgs }} - - --{{ $key }}={{ $value }} - {{- end }} - {{- range .Values.configmapReload.prometheus.extraVolumeDirs }} - - --watched-dir={{ . }} - {{- end }} - {{- with .Values.configmapReload.env }} - env: - {{- toYaml . | nindent 12 }} - {{- end }} - {{- if .Values.configmapReload.prometheus.containerPort }} - ports: - - containerPort: {{ .Values.configmapReload.prometheus.containerPort }} - {{- end }} - {{- with .Values.configmapReload.prometheus.resources }} - resources: - {{- toYaml . | nindent 12 }} - {{- end }} - volumeMounts: - - name: config-volume - mountPath: /etc/config - readOnly: true - {{- range .Values.configmapReload.prometheus.extraConfigmapMounts }} - - name: {{ $.Values.configmapReload.prometheus.name }}-{{ .name }} - mountPath: {{ .mountPath }} - subPath: {{ .subPath }} - readOnly: {{ .readOnly }} - {{- end }} - {{- end }} - - - name: {{ template "prometheus.name" . }}-{{ .Values.server.name }} - {{- if .Values.server.image.digest }} - image: "{{ .Values.server.image.repository }}@{{ .Values.server.image.digest }}" - {{- else }} - image: "{{ .Values.server.image.repository }}:{{ .Values.server.image.tag | default .Chart.AppVersion }}" - {{- end }} - imagePullPolicy: "{{ .Values.server.image.pullPolicy }}" - {{- with .Values.server.command }} - command: - {{- toYaml . | nindent 12 }} - {{- end }} - {{- if .Values.server.env }} - env: -{{ toYaml .Values.server.env | indent 12}} - {{- end }} - args: - {{- if .Values.server.defaultFlagsOverride }} - {{ toYaml .Values.server.defaultFlagsOverride | nindent 12}} - {{- else }} - {{- if .Values.server.prefixURL }} - - --web.route-prefix={{ .Values.server.prefixURL }} - {{- end }} - {{- if .Values.server.retention }} - - --storage.tsdb.retention.time={{ .Values.server.retention }} - {{- end }} - {{- if .Values.server.retentionSize }} - - --storage.tsdb.retention.size={{ .Values.server.retentionSize }} - {{- end }} - - --config.file={{ .Values.server.configPath }} - {{- if .Values.server.storagePath }} - - --storage.tsdb.path={{ .Values.server.storagePath }} - {{- else }} - - --storage.tsdb.path={{ .Values.server.persistentVolume.mountPath }} - {{- end }} - - --web.console.libraries=/etc/prometheus/console_libraries - - --web.console.templates=/etc/prometheus/consoles - {{- range .Values.server.extraFlags }} - - --{{ . }} - {{- end }} - {{- range $key, $value := .Values.server.extraArgs }} - - --{{ $key }}={{ $value }} - {{- end }} - {{- if .Values.server.baseURL }} - - --web.external-url={{ .Values.server.baseURL }} - {{- end }} - {{- end }} - ports: - - containerPort: 9090 - {{- if .Values.server.portName }} - name: {{ .Values.server.portName }} - {{- end }} - {{- if .Values.server.hostPort }} - hostPort: {{ .Values.server.hostPort }} - {{- end }} - readinessProbe: - {{- if not .Values.server.tcpSocketProbeEnabled }} - httpGet: - path: {{ .Values.server.prefixURL }}/-/ready - port: 9090 - scheme: {{ .Values.server.probeScheme }} - {{- with .Values.server.probeHeaders }} - httpHeaders: -{{- toYaml . | nindent 14 }} - {{- end }} - {{- else }} - tcpSocket: - port: 9090 - {{- end }} - initialDelaySeconds: {{ .Values.server.readinessProbeInitialDelay }} - periodSeconds: {{ .Values.server.readinessProbePeriodSeconds }} - timeoutSeconds: {{ .Values.server.readinessProbeTimeout }} - failureThreshold: {{ .Values.server.readinessProbeFailureThreshold }} - successThreshold: {{ .Values.server.readinessProbeSuccessThreshold }} - livenessProbe: - {{- if not .Values.server.tcpSocketProbeEnabled }} - httpGet: - path: {{ .Values.server.prefixURL }}/-/healthy - port: 9090 - scheme: {{ .Values.server.probeScheme }} - {{- with .Values.server.probeHeaders }} - httpHeaders: -{{- toYaml . | nindent 14 }} - {{- end }} - {{- else }} - tcpSocket: - port: 9090 - {{- end }} - initialDelaySeconds: {{ .Values.server.livenessProbeInitialDelay }} - periodSeconds: {{ .Values.server.livenessProbePeriodSeconds }} - timeoutSeconds: {{ .Values.server.livenessProbeTimeout }} - failureThreshold: {{ .Values.server.livenessProbeFailureThreshold }} - successThreshold: {{ .Values.server.livenessProbeSuccessThreshold }} - {{- if .Values.server.startupProbe.enabled }} - startupProbe: - {{- if not .Values.server.tcpSocketProbeEnabled }} - httpGet: - path: {{ .Values.server.prefixURL }}/-/healthy - port: 9090 - scheme: {{ .Values.server.probeScheme }} - {{- if .Values.server.probeHeaders }} - httpHeaders: - {{- range .Values.server.probeHeaders}} - - name: {{ .name }} - value: {{ .value }} - {{- end }} - {{- end }} - {{- else }} - tcpSocket: - port: 9090 - {{- end }} - failureThreshold: {{ .Values.server.startupProbe.failureThreshold }} - periodSeconds: {{ .Values.server.startupProbe.periodSeconds }} - timeoutSeconds: {{ .Values.server.startupProbe.timeoutSeconds }} - {{- end }} - {{- with .Values.server.resources }} - resources: - {{- toYaml . | nindent 12 }} - {{- end }} - volumeMounts: - - name: config-volume - mountPath: /etc/config - - name: {{ ternary .Values.server.persistentVolume.statefulSetNameOverride "storage-volume" (and .Values.server.persistentVolume.enabled (not (empty .Values.server.persistentVolume.statefulSetNameOverride))) }} - mountPath: {{ .Values.server.persistentVolume.mountPath }} - subPath: "{{ .Values.server.persistentVolume.subPath }}" - {{- range .Values.server.extraHostPathMounts }} - - name: {{ .name }} - mountPath: {{ .mountPath }} - subPath: {{ .subPath }} - readOnly: {{ .readOnly }} - {{- end }} - {{- range .Values.server.extraConfigmapMounts }} - - name: {{ $.Values.server.name }}-{{ .name }} - mountPath: {{ .mountPath }} - subPath: {{ .subPath }} - readOnly: {{ .readOnly }} - {{- end }} - {{- range .Values.server.extraSecretMounts }} - - name: {{ .name }} - mountPath: {{ .mountPath }} - subPath: {{ .subPath }} - readOnly: {{ .readOnly }} - {{- end }} - {{- if .Values.server.extraVolumeMounts }} - {{ toYaml .Values.server.extraVolumeMounts | nindent 12 }} - {{- end }} - {{- with .Values.server.containerSecurityContext }} - securityContext: - {{- toYaml . | nindent 12 }} - {{- end }} - {{- if .Values.server.sidecarContainers }} - {{- range $name, $spec := .Values.server.sidecarContainers }} - - name: {{ $name }} - {{- if kindIs "string" $spec }} - {{- tpl $spec $ | nindent 10 }} - {{- else }} - {{- toYaml $spec | nindent 10 }} - {{- end }} - {{- end }} - {{- end }} - hostNetwork: {{ .Values.server.hostNetwork }} - {{- if .Values.server.dnsPolicy }} - dnsPolicy: {{ .Values.server.dnsPolicy }} - {{- end }} - {{- if .Values.imagePullSecrets }} - imagePullSecrets: -{{ toYaml .Values.imagePullSecrets | indent 8 }} - {{- end }} - {{- if .Values.server.nodeSelector }} - nodeSelector: -{{ toYaml .Values.server.nodeSelector | indent 8 }} - {{- end }} - {{- if .Values.server.hostAliases }} - hostAliases: -{{ toYaml .Values.server.hostAliases | indent 8 }} - {{- end }} - {{- if .Values.server.dnsConfig }} - dnsConfig: -{{ toYaml .Values.server.dnsConfig | indent 8 }} - {{- end }} - {{- with .Values.server.securityContext }} - securityContext: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- if .Values.server.tolerations }} - tolerations: -{{ toYaml .Values.server.tolerations | indent 8 }} - {{- end }} - {{- if .Values.server.affinity }} - affinity: -{{ toYaml .Values.server.affinity | indent 8 }} - {{- end }} - {{- with .Values.server.topologySpreadConstraints }} - topologySpreadConstraints: - {{- toYaml . | nindent 8 }} - {{- end }} - terminationGracePeriodSeconds: {{ .Values.server.terminationGracePeriodSeconds }} - volumes: - - name: config-volume - {{- if empty .Values.server.configFromSecret }} - configMap: - name: {{ if .Values.server.configMapOverrideName }}{{ .Release.Name }}-{{ .Values.server.configMapOverrideName }}{{- else }}{{ template "prometheus.server.fullname" . }}{{- end }} - {{- else }} - secret: - secretName: {{ .Values.server.configFromSecret }} - {{- end }} - {{- range .Values.server.extraHostPathMounts }} - - name: {{ .name }} - hostPath: - path: {{ .hostPath }} - {{- end }} - {{- range .Values.configmapReload.prometheus.extraConfigmapMounts }} - - name: {{ $.Values.configmapReload.prometheus.name }}-{{ .name }} - configMap: - name: {{ .configMap }} - {{- end }} - {{- range .Values.server.extraConfigmapMounts }} - - name: {{ $.Values.server.name }}-{{ .name }} - configMap: - name: {{ .configMap }} - {{- end }} - {{- range .Values.server.extraSecretMounts }} - - name: {{ .name }} - secret: - secretName: {{ .secretName }} - {{- with .optional }} - optional: {{ . }} - {{- end }} - {{- end }} - {{- range .Values.configmapReload.prometheus.extraConfigmapMounts }} - - name: {{ .name }} - configMap: - name: {{ .configMap }} - {{- with .optional }} - optional: {{ . }} - {{- end }} - {{- end }} -{{- if .Values.server.extraVolumes }} -{{ toYaml .Values.server.extraVolumes | indent 8}} -{{- end }} -{{- if .Values.server.persistentVolume.enabled }} - volumeClaimTemplates: - - metadata: - name: {{ .Values.server.persistentVolume.statefulSetNameOverride | default "storage-volume" }} - {{- if .Values.server.persistentVolume.annotations }} - annotations: -{{ toYaml .Values.server.persistentVolume.annotations | indent 10 }} - {{- end }} - {{- if .Values.server.persistentVolume.labels }} - labels: -{{ toYaml .Values.server.persistentVolume.labels | indent 10 }} - {{- end }} - spec: - accessModes: -{{ toYaml .Values.server.persistentVolume.accessModes | indent 10 }} - resources: - requests: - storage: "{{ .Values.server.persistentVolume.size }}" - {{- if .Values.server.persistentVolume.storageClass }} - {{- if (eq "-" .Values.server.persistentVolume.storageClass) }} - storageClassName: "" - {{- else }} - storageClassName: "{{ .Values.server.persistentVolume.storageClass }}" - {{- end }} - {{- end }} -{{- else }} - - name: storage-volume - emptyDir: - {{- if .Values.server.emptyDir.sizeLimit }} - sizeLimit: {{ .Values.server.emptyDir.sizeLimit }} - {{- else }} - {} - {{- end -}} -{{- end }} -{{- end }} diff --git a/operations/deployment/helm/prometheus/templates/vpa.yaml b/operations/deployment/helm/prometheus/templates/vpa.yaml deleted file mode 100644 index cd07ad8..0000000 --- a/operations/deployment/helm/prometheus/templates/vpa.yaml +++ /dev/null @@ -1,26 +0,0 @@ -{{- if .Values.server.verticalAutoscaler.enabled -}} -{{- if .Capabilities.APIVersions.Has "autoscaling.k8s.io/v1/VerticalPodAutoscaler" }} -apiVersion: autoscaling.k8s.io/v1 -{{- else }} -apiVersion: autoscaling.k8s.io/v1beta2 -{{- end }} -kind: VerticalPodAutoscaler -metadata: - name: {{ template "prometheus.server.fullname" . }}-vpa - namespace: {{ include "prometheus.namespace" . }} - labels: - {{- include "prometheus.server.labels" . | nindent 4 }} -spec: - targetRef: - apiVersion: "apps/v1" -{{- if .Values.server.statefulSet.enabled }} - kind: StatefulSet -{{- else }} - kind: Deployment -{{- end }} - name: {{ template "prometheus.server.fullname" . }} - updatePolicy: - updateMode: {{ .Values.server.verticalAutoscaler.updateMode | default "Off" | quote }} - resourcePolicy: - containerPolicies: {{ .Values.server.verticalAutoscaler.containerPolicies | default list | toYaml | trim | nindent 4 }} -{{- end -}} diff --git a/operations/deployment/helm/prometheus/values.schema.json b/operations/deployment/helm/prometheus/values.schema.json deleted file mode 100644 index 1828064..0000000 --- a/operations/deployment/helm/prometheus/values.schema.json +++ /dev/null @@ -1,738 +0,0 @@ -{ - "$schema": "http://json-schema.org/schema#", - "type": "object", - "properties": { - "alertRelabelConfigs": { - "type": "object" - }, - "alertmanager": { - "type": "object", - "properties": { - "enabled": { - "type": "boolean" - }, - "persistence": { - "type": "object", - "properties": { - "size": { - "type": "string" - } - } - }, - "podSecurityContext": { - "type": "object", - "properties": { - "fsGroup": { - "type": "integer" - }, - "runAsGroup": { - "type": "integer" - }, - "runAsNonRoot": { - "type": "boolean" - }, - "runAsUser": { - "type": "integer" - } - } - } - } - }, - "configmapReload": { - "type": "object", - "properties": { - "env": { - "type": "array" - }, - "prometheus": { - "type": "object", - "properties": { - "containerSecurityContext": { - "type": "object" - }, - "enabled": { - "type": "boolean" - }, - "extraArgs": { - "type": "object" - }, - "extraConfigmapMounts": { - "type": "array" - }, - "extraVolumeDirs": { - "type": "array" - }, - "extraVolumeMounts": { - "type": "array" - }, - "image": { - "type": "object", - "properties": { - "digest": { - "type": "string" - }, - "pullPolicy": { - "type": "string" - }, - "repository": { - "type": "string" - }, - "tag": { - "type": "string" - } - } - }, - "name": { - "type": "string" - }, - "resources": { - "type": "object" - } - } - }, - "reloadUrl": { - "type": "string" - } - } - }, - "extraManifests": { - "type": "array" - }, - "extraScrapeConfigs": { - "type": "string" - }, - "forceNamespace": { - "type": "string" - }, - "imagePullSecrets": { - "type": "array" - }, - "kube-state-metrics": { - "type": "object", - "properties": { - "enabled": { - "type": "boolean" - } - } - }, - "networkPolicy": { - "type": "object", - "properties": { - "enabled": { - "type": "boolean" - } - } - }, - "podSecurityPolicy": { - "type": "object", - "properties": { - "enabled": { - "type": "boolean" - } - } - }, - "prometheus-node-exporter": { - "type": "object", - "properties": { - "containerSecurityContext": { - "type": "object", - "properties": { - "allowPrivilegeEscalation": { - "type": "boolean" - } - } - }, - "enabled": { - "type": "boolean" - }, - "rbac": { - "type": "object", - "properties": { - "pspEnabled": { - "type": "boolean" - } - } - } - } - }, - "prometheus-pushgateway": { - "type": "object", - "properties": { - "enabled": { - "type": "boolean" - }, - "serviceAnnotations": { - "type": "object", - "properties": { - "prometheus.io/probe": { - "type": "string" - } - } - } - } - }, - "rbac": { - "type": "object", - "properties": { - "create": { - "type": "boolean" - } - } - }, - "ruleFiles": { - "type": "object" - }, - "server": { - "type": "object", - "properties": { - "affinity": { - "type": "object" - }, - "alertmanagers": { - "type": "array" - }, - "baseURL": { - "type": "string" - }, - "clusterRoleNameOverride": { - "type": "string" - }, - "command": { - "type": "array" - }, - "configMapOverrideName": { - "type": "string" - }, - "configPath": { - "type": "string" - }, - "containerSecurityContext": { - "type": "object" - }, - "defaultFlagsOverride": { - "type": "array" - }, - "deploymentAnnotations": { - "type": "object" - }, - "dnsConfig": { - "type": "object" - }, - "dnsPolicy": { - "type": "string" - }, - "emptyDir": { - "type": "object", - "properties": { - "sizeLimit": { - "type": "string" - } - } - }, - "enableServiceLinks": { - "type": "boolean" - }, - "env": { - "type": "array" - }, - "exemplars": { - "type": "object" - }, - "extraArgs": { - "type": "object" - }, - "extraConfigmapLabels": { - "type": "object" - }, - "extraConfigmapMounts": { - "type": "array" - }, - "extraFlags": { - "type": "array", - "items": { - "type": "string" - } - }, - "extraHostPathMounts": { - "type": "array" - }, - "extraInitContainers": { - "type": "array" - }, - "extraSecretMounts": { - "type": "array" - }, - "extraVolumeMounts": { - "type": "array" - }, - "extraVolumes": { - "type": "array" - }, - "global": { - "type": "object", - "properties": { - "evaluation_interval": { - "type": "string" - }, - "scrape_interval": { - "type": "string" - }, - "scrape_timeout": { - "type": "string" - } - } - }, - "hostAliases": { - "type": "array" - }, - "hostNetwork": { - "type": "boolean" - }, - "image": { - "type": "object", - "properties": { - "digest": { - "type": "string" - }, - "pullPolicy": { - "type": "string" - }, - "repository": { - "type": "string" - }, - "tag": { - "type": "string" - } - } - }, - "ingress": { - "type": "object", - "properties": { - "annotations": { - "type": "object" - }, - "enabled": { - "type": "boolean" - }, - "extraLabels": { - "type": "object" - }, - "extraPaths": { - "type": "array" - }, - "hosts": { - "type": "array" - }, - "path": { - "type": "string" - }, - "pathType": { - "type": "string" - }, - "tls": { - "type": "array" - } - } - }, - "livenessProbeFailureThreshold": { - "type": "integer" - }, - "livenessProbeInitialDelay": { - "type": "integer" - }, - "livenessProbePeriodSeconds": { - "type": "integer" - }, - "livenessProbeSuccessThreshold": { - "type": "integer" - }, - "livenessProbeTimeout": { - "type": "integer" - }, - "name": { - "type": "string" - }, - "nodeSelector": { - "type": "object" - }, - "persistentVolume": { - "type": "object", - "properties": { - "accessModes": { - "type": "array", - "items": { - "type": "string" - } - }, - "annotations": { - "type": "object" - }, - "enabled": { - "type": "boolean" - }, - "existingClaim": { - "type": "string" - }, - "labels": { - "type": "object" - }, - "mountPath": { - "type": "string" - }, - "size": { - "type": "string" - }, - "statefulSetNameOverride": { - "type": "string" - }, - "subPath": { - "type": "string" - } - } - }, - "podAnnotations": { - "type": "object" - }, - "podDisruptionBudget": { - "type": "object", - "properties": { - "enabled": { - "type": "boolean" - }, - "maxUnavailable": { - "type": [ - "string", - "integer" - ] - } - } - }, - "podLabels": { - "type": "object" - }, - "podSecurityPolicy": { - "type": "object", - "properties": { - "annotations": { - "type": "object" - } - } - }, - "portName": { - "type": "string" - }, - "prefixURL": { - "type": "string" - }, - "priorityClassName": { - "type": "string" - }, - "probeHeaders": { - "type": "array" - }, - "probeScheme": { - "type": "string" - }, - "readinessProbeFailureThreshold": { - "type": "integer" - }, - "readinessProbeInitialDelay": { - "type": "integer" - }, - "readinessProbePeriodSeconds": { - "type": "integer" - }, - "readinessProbeSuccessThreshold": { - "type": "integer" - }, - "readinessProbeTimeout": { - "type": "integer" - }, - "releaseNamespace": { - "type": "boolean" - }, - "remoteRead": { - "type": "array" - }, - "remoteWrite": { - "type": "array" - }, - "replicaCount": { - "type": "integer" - }, - "resources": { - "type": "object" - }, - "retention": { - "type": "string" - }, - "retentionSize": { - "type": "string" - }, - "revisionHistoryLimit": { - "type": "integer" - }, - "securityContext": { - "type": "object", - "properties": { - "fsGroup": { - "type": "integer" - }, - "runAsGroup": { - "type": "integer" - }, - "runAsNonRoot": { - "type": "boolean" - }, - "runAsUser": { - "type": "integer" - } - } - }, - "service": { - "type": "object", - "properties": { - "additionalPorts": { - "type": "array" - }, - "annotations": { - "type": "object" - }, - "clusterIP": { - "type": "string" - }, - "enabled": { - "type": "boolean" - }, - "externalIPs": { - "type": "array" - }, - "gRPC": { - "type": "object", - "properties": { - "enabled": { - "type": "boolean" - }, - "servicePort": { - "type": "integer" - } - } - }, - "labels": { - "type": "object" - }, - "loadBalancerIP": { - "type": "string" - }, - "loadBalancerSourceRanges": { - "type": "array" - }, - "servicePort": { - "type": "integer" - }, - "sessionAffinity": { - "type": "string" - }, - "statefulsetReplica": { - "type": "object", - "properties": { - "enabled": { - "type": "boolean" - }, - "replica": { - "type": "integer" - } - } - }, - "type": { - "type": "string" - } - } - }, - "sidecarContainers": { - "type": "object" - }, - "sidecarTemplateValues": { - "type": "object" - }, - "startupProbe": { - "type": "object", - "properties": { - "enabled": { - "type": "boolean" - }, - "failureThreshold": { - "type": "integer" - }, - "periodSeconds": { - "type": "integer" - }, - "timeoutSeconds": { - "type": "integer" - } - } - }, - "statefulSet": { - "type": "object", - "properties": { - "annotations": { - "type": "object" - }, - "enabled": { - "type": "boolean" - }, - "headless": { - "type": "object", - "properties": { - "annotations": { - "type": "object" - }, - "gRPC": { - "type": "object", - "properties": { - "enabled": { - "type": "boolean" - }, - "servicePort": { - "type": "integer" - } - } - }, - "labels": { - "type": "object" - }, - "servicePort": { - "type": "integer" - } - } - }, - "labels": { - "type": "object" - }, - "podManagementPolicy": { - "type": "string" - }, - "pvcDeleteOnStsDelete": { - "type": "boolean" - }, - "pvcDeleteOnStsScale": { - "type": "boolean" - } - } - }, - "storagePath": { - "type": "string" - }, - "strategy": { - "type": "object", - "properties": { - "type": { - "type": "string" - } - } - }, - "tcpSocketProbeEnabled": { - "type": "boolean" - }, - "terminationGracePeriodSeconds": { - "type": "integer" - }, - "tolerations": { - "type": "array" - }, - "topologySpreadConstraints": { - "type": "array" - }, - "tsdb": { - "type": "object" - }, - "verticalAutoscaler": { - "type": "object", - "properties": { - "enabled": { - "type": "boolean" - } - } - } - } - }, - "scrapeConfigFiles": { - "type": "array" - }, - "serverFiles": { - "type": "object", - "properties": { - "alerting_rules.yml": { - "type": "object" - }, - "alerts": { - "type": "object" - }, - "prometheus.yml": { - "type": "object", - "properties": { - "rule_files": { - "type": "array", - "items": { - "type": "string" - } - }, - "scrape_configs": { - "type": "array", - "items": { - "type": "object", - "properties": { - "job_name": { - "type": "string" - }, - "static_configs": { - "type": "array", - "items": { - "type": "object", - "properties": { - "targets": { - "type": "array", - "items": { - "type": "string" - } - } - } - } - } - } - } - } - } - }, - "recording_rules.yml": { - "type": "object" - }, - "rules": { - "type": "object" - } - } - }, - "serviceAccounts": { - "type": "object", - "properties": { - "server": { - "type": "object", - "properties": { - "annotations": { - "type": "object" - }, - "create": { - "type": "boolean" - }, - "name": { - "type": "string" - }, - "automountServiceAccountToken": { - "type": "boolean" - } - } - } - } - } - } -} diff --git a/operations/deployment/helm/prometheus/values.yaml b/operations/deployment/helm/prometheus/values.yaml deleted file mode 100644 index 528b068..0000000 --- a/operations/deployment/helm/prometheus/values.yaml +++ /dev/null @@ -1,1691 +0,0 @@ -prometheus: - rbac: - create: true - - podSecurityPolicy: - enabled: false - - imagePullSecrets: - # - name: "image-pull-secret" - - ## Define serviceAccount names for components. Defaults to component's fully qualified name. - ## - serviceAccounts: - alertmanager: - create: true - name: - annotations: {} - nodeExporter: - create: true - name: - annotations: {} - pushgateway: - create: true - name: - annotations: {} - server: - create: true - name: - annotations: {} - - alertmanager: - ## If false, alertmanager will not be installed - ## - enabled: true - - ## Use a ClusterRole (and ClusterRoleBinding) - ## - If set to false - we define a Role and RoleBinding in the defined namespaces ONLY - ## This makes alertmanager work - for users who do not have ClusterAdmin privs, but wants alertmanager to operate on their own namespaces, instead of clusterwide. - useClusterRole: true - - ## Set to a rolename to use existing role - skipping role creating - but still doing serviceaccount and rolebinding to the rolename set here. - useExistingRole: false - - ## alertmanager container name - ## - name: alertmanager - - ## alertmanager container image - ## - image: - repository: quay.io/prometheus/alertmanager - tag: v0.21.0 - pullPolicy: IfNotPresent - - ## alertmanager priorityClassName - ## - priorityClassName: "" - - ## Additional alertmanager container arguments - ## - extraArgs: {} - - ## Additional InitContainers to initialize the pod - ## - extraInitContainers: [] - - ## The URL prefix at which the container can be accessed. Useful in the case the '-web.external-url' includes a slug - ## so that the various internal URLs are still able to access as they are in the default case. - ## (Optional) - prefixURL: "" - - ## External URL which can access alertmanager - baseURL: "http://localhost:9093" - - ## Additional alertmanager container environment variable - ## For instance to add a http_proxy - ## - extraEnv: {} - - ## Additional alertmanager Secret mounts - # Defines additional mounts with secrets. Secrets must be manually created in the namespace. - extraSecretMounts: [] - # - name: secret-files - # mountPath: /etc/secrets - # subPath: "" - # secretName: alertmanager-secret-files - # readOnly: true - - ## ConfigMap override where fullname is {{.Release.Name}}-{{.Values.alertmanager.configMapOverrideName}} - ## Defining configMapOverrideName will cause templates/alertmanager-configmap.yaml - ## to NOT generate a ConfigMap resource - ## - configMapOverrideName: "" - - ## The name of a secret in the same kubernetes namespace which contains the Alertmanager config - ## Defining configFromSecret will cause templates/alertmanager-configmap.yaml - ## to NOT generate a ConfigMap resource - ## - configFromSecret: "" - - ## The configuration file name to be loaded to alertmanager - ## Must match the key within configuration loaded from ConfigMap/Secret - ## - configFileName: alertmanager.yml - - ingress: - ## If true, alertmanager Ingress will be created - ## - enabled: false - - ## alertmanager Ingress annotations - ## - annotations: {} - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: 'true' - - ## alertmanager Ingress additional labels - ## - extraLabels: {} - - ## alertmanager Ingress hostnames with optional path - ## Must be provided if Ingress is enabled - ## - hosts: [] - # - alertmanager.domain.com - # - domain.com/alertmanager - - ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. - extraPaths: [] - # - path: /* - # backend: - # serviceName: ssl-redirect - # servicePort: use-annotation - - ## alertmanager Ingress TLS configuration - ## Secrets must be manually created in the namespace - ## - tls: [] - # - secretName: prometheus-alerts-tls - # hosts: - # - alertmanager.domain.com - - ## Alertmanager Deployment Strategy type - # strategy: - # type: Recreate - - ## Node tolerations for alertmanager scheduling to nodes with taints - ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ - ## - tolerations: [] - # - key: "key" - # operator: "Equal|Exists" - # value: "value" - # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" - - ## Node labels for alertmanager pod assignment - ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ - ## - nodeSelector: {} - - ## Pod affinity - ## - affinity: {} - - ## PodDisruptionBudget settings - ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ - ## - podDisruptionBudget: - enabled: false - maxUnavailable: 1 - - ## Use an alternate scheduler, e.g. "stork". - ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ - ## - # schedulerName: - - persistentVolume: - ## If true, alertmanager will create/use a Persistent Volume Claim - ## If false, use emptyDir - ## - enabled: true - - ## alertmanager data Persistent Volume access modes - ## Must match those of existing PV or dynamic provisioner - ## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ - ## - accessModes: - - ReadWriteOnce - - ## alertmanager data Persistent Volume Claim annotations - ## - annotations: {} - - ## alertmanager data Persistent Volume existing claim name - ## Requires alertmanager.persistentVolume.enabled: true - ## If defined, PVC must be created manually before volume will be bound - existingClaim: "" - - ## alertmanager data Persistent Volume mount root path - ## - mountPath: /data - - ## alertmanager data Persistent Volume size - ## - size: 2Gi - - ## alertmanager data Persistent Volume Storage Class - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. (gp2 on AWS, standard on - ## GKE, AWS & OpenStack) - ## - # storageClass: "-" - - ## alertmanager data Persistent Volume Binding Mode - ## If defined, volumeBindingMode: - ## If undefined (the default) or set to null, no volumeBindingMode spec is - ## set, choosing the default mode. - ## - # volumeBindingMode: "" - - ## Subdirectory of alertmanager data Persistent Volume to mount - ## Useful if the volume's root directory is not empty - ## - subPath: "" - - emptyDir: - ## alertmanager emptyDir volume size limit - ## - sizeLimit: "" - - ## Annotations to be added to alertmanager pods - ## - podAnnotations: {} - ## Tell prometheus to use a specific set of alertmanager pods - ## instead of all alertmanager pods found in the same namespace - ## Useful if you deploy multiple releases within the same namespace - ## - ## prometheus.io/probe: alertmanager-teamA - - ## Labels to be added to Prometheus AlertManager pods - ## - podLabels: {} - - ## Specify if a Pod Security Policy for node-exporter must be created - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ - ## - podSecurityPolicy: - annotations: {} - ## Specify pod annotations - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl - ## - # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' - # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' - # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' - - ## Use a StatefulSet if replicaCount needs to be greater than 1 (see below) - ## - replicaCount: 1 - - ## Annotations to be added to deployment - ## - deploymentAnnotations: {} - - statefulSet: - ## If true, use a statefulset instead of a deployment for pod management. - ## This allows to scale replicas to more than 1 pod - ## - enabled: false - - annotations: {} - labels: {} - podManagementPolicy: OrderedReady - - ## Alertmanager headless service to use for the statefulset - ## - headless: - annotations: {} - labels: {} - - ## Enabling peer mesh service end points for enabling the HA alert manager - ## Ref: https://github.com/prometheus/alertmanager/blob/master/README.md - enableMeshPeer: false - - servicePort: 80 - - ## alertmanager resource requests and limits - ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ - ## - resources: {} - # limits: - # cpu: 10m - # memory: 32Mi - # requests: - # cpu: 10m - # memory: 32Mi - - # Custom DNS configuration to be added to alertmanager pods - dnsConfig: {} - # nameservers: - # - 1.2.3.4 - # searches: - # - ns1.svc.cluster-domain.example - # - my.dns.search.suffix - # options: - # - name: ndots - # value: "2" - # - name: edns0 - - ## Security context to be added to alertmanager pods - ## - securityContext: - runAsUser: 65534 - runAsNonRoot: true - runAsGroup: 65534 - fsGroup: 65534 - - service: - annotations: {} - labels: {} - clusterIP: "" - - ## Enabling peer mesh service end points for enabling the HA alert manager - ## Ref: https://github.com/prometheus/alertmanager/blob/master/README.md - # enableMeshPeer : true - - ## List of IP addresses at which the alertmanager service is available - ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips - ## - externalIPs: [] - - loadBalancerIP: "" - loadBalancerSourceRanges: [] - servicePort: 80 - # nodePort: 30000 - sessionAffinity: None - type: ClusterIP - - ## Monitors ConfigMap changes and POSTs to a URL - ## Ref: https://github.com/jimmidyson/configmap-reload - ## - configmapReload: - prometheus: - ## If false, the configmap-reload container will not be deployed - ## - enabled: true - - ## configmap-reload container name - ## - name: configmap-reload - - ## configmap-reload container image - ## - image: - repository: jimmidyson/configmap-reload - tag: v0.5.0 - pullPolicy: IfNotPresent - - ## Additional configmap-reload container arguments - ## - extraArgs: {} - ## Additional configmap-reload volume directories - ## - extraVolumeDirs: [] - - - ## Additional configmap-reload mounts - ## - extraConfigmapMounts: [] - # - name: prometheus-alerts - # mountPath: /etc/alerts.d - # subPath: "" - # configMap: prometheus-alerts - # readOnly: true - - - ## configmap-reload resource requests and limits - ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ - ## - resources: {} - alertmanager: - ## If false, the configmap-reload container will not be deployed - ## - enabled: true - - ## configmap-reload container name - ## - name: configmap-reload - - ## configmap-reload container image - ## - image: - repository: jimmidyson/configmap-reload - tag: v0.5.0 - pullPolicy: IfNotPresent - - ## Additional configmap-reload container arguments - ## - extraArgs: {} - ## Additional configmap-reload volume directories - ## - extraVolumeDirs: [] - - - ## Additional configmap-reload mounts - ## - extraConfigmapMounts: [] - # - name: prometheus-alerts - # mountPath: /etc/alerts.d - # subPath: "" - # configMap: prometheus-alerts - # readOnly: true - - - ## configmap-reload resource requests and limits - ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ - ## - resources: {} - - kubeStateMetrics: - ## If false, kube-state-metrics sub-chart will not be installed - ## - enabled: true - - ## kube-state-metrics sub-chart configurable values - ## Please see https://github.com/kubernetes/kube-state-metrics/tree/master/charts/kube-state-metrics - ## - # kube-state-metrics: - - nodeExporter: - ## If false, node-exporter will not be installed - ## - enabled: true - - ## If true, node-exporter pods share the host network namespace - ## - hostNetwork: true - - ## If true, node-exporter pods share the host PID namespace - ## - hostPID: true - - ## If true, node-exporter pods mounts host / at /host/root - ## - hostRootfs: true - - ## node-exporter container name - ## - name: node-exporter - - ## node-exporter container image - ## - image: - repository: quay.io/prometheus/node-exporter - tag: v1.1.2 - pullPolicy: IfNotPresent - - ## Specify if a Pod Security Policy for node-exporter must be created - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ - ## - podSecurityPolicy: - annotations: {} - ## Specify pod annotations - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl - ## - # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' - # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' - # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' - - ## node-exporter priorityClassName - ## - priorityClassName: "" - - ## Custom Update Strategy - ## - updateStrategy: - type: RollingUpdate - - ## Additional node-exporter container arguments - ## - extraArgs: {} - - ## Additional InitContainers to initialize the pod - ## - extraInitContainers: [] - - ## Additional node-exporter hostPath mounts - ## - extraHostPathMounts: [] - # - name: textfile-dir - # mountPath: /srv/txt_collector - # hostPath: /var/lib/node-exporter - # readOnly: true - # mountPropagation: HostToContainer - - extraConfigmapMounts: [] - # - name: certs-configmap - # mountPath: /prometheus - # configMap: certs-configmap - # readOnly: true - - ## Node tolerations for node-exporter scheduling to nodes with taints - ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ - ## - tolerations: [] - # - key: "key" - # operator: "Equal|Exists" - # value: "value" - # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" - - ## Node labels for node-exporter pod assignment - ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ - ## - nodeSelector: {} - - ## Annotations to be added to node-exporter pods - ## - podAnnotations: {} - - ## Labels to be added to node-exporter pods - ## - pod: - labels: {} - - ## PodDisruptionBudget settings - ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ - ## - podDisruptionBudget: - enabled: false - maxUnavailable: 1 - - ## node-exporter resource limits & requests - ## Ref: https://kubernetes.io/docs/user-guide/compute-resources/ - ## - resources: {} - # limits: - # cpu: 200m - # memory: 50Mi - # requests: - # cpu: 100m - # memory: 30Mi - - # Custom DNS configuration to be added to node-exporter pods - dnsConfig: {} - # nameservers: - # - 1.2.3.4 - # searches: - # - ns1.svc.cluster-domain.example - # - my.dns.search.suffix - # options: - # - name: ndots - # value: "2" - # - name: edns0 - - ## Security context to be added to node-exporter pods - ## - securityContext: - fsGroup: 65534 - runAsGroup: 65534 - runAsNonRoot: true - runAsUser: 65534 - - service: - annotations: - prometheus.io/scrape: "true" - labels: {} - - # Exposed as a headless service: - # https://kubernetes.io/docs/concepts/services-networking/service/#headless-services - clusterIP: None - - ## List of IP addresses at which the node-exporter service is available - ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips - ## - externalIPs: [] - - hostPort: 9100 - loadBalancerIP: "" - loadBalancerSourceRanges: [] - servicePort: 9100 - type: ClusterIP - - server: - ## Prometheus server container name - ## - enabled: true - - ## Use a ClusterRole (and ClusterRoleBinding) - ## - If set to false - we define a RoleBinding in the defined namespaces ONLY - ## - ## NB: because we need a Role with nonResourceURL's ("/metrics") - you must get someone with Cluster-admin privileges to define this role for you, before running with this setting enabled. - ## This makes prometheus work - for users who do not have ClusterAdmin privs, but wants prometheus to operate on their own namespaces, instead of clusterwide. - ## - ## You MUST also set namespaces to the ones you have access to and want monitored by Prometheus. - ## - # useExistingClusterRoleName: nameofclusterrole - - ## namespaces to monitor (instead of monitoring all - clusterwide). Needed if you want to run without Cluster-admin privileges. - # namespaces: - # - yournamespace - - name: server - - # sidecarContainers - add more containers to prometheus server - # Key/Value where Key is the sidecar `- name: ` - # Example: - # sidecarContainers: - # webserver: - # image: nginx - sidecarContainers: {} - - ## Prometheus server container image - ## - image: - repository: quay.io/prometheus/prometheus - tag: v2.26.0 - pullPolicy: IfNotPresent - - ## prometheus server priorityClassName - ## - priorityClassName: "" - - ## EnableServiceLinks indicates whether information about services should be injected - ## into pod's environment variables, matching the syntax of Docker links. - ## WARNING: the field is unsupported and will be skipped in K8s prior to v1.13.0. - ## - enableServiceLinks: true - - ## The URL prefix at which the container can be accessed. Useful in the case the '-web.external-url' includes a slug - ## so that the various internal URLs are still able to access as they are in the default case. - ## (Optional) - prefixURL: "" - - ## External URL which can access prometheus - ## Maybe same with Ingress host name - baseURL: "" - - ## Additional server container environment variables - ## - ## You specify this manually like you would a raw deployment manifest. - ## This means you can bind in environment variables from secrets. - ## - ## e.g. static environment variable: - ## - name: DEMO_GREETING - ## value: "Hello from the environment" - ## - ## e.g. secret environment variable: - ## - name: USERNAME - ## valueFrom: - ## secretKeyRef: - ## name: mysecret - ## key: username - env: [] - - extraFlags: - - web.enable-lifecycle - ## web.enable-admin-api flag controls access to the administrative HTTP API which includes functionality such as - ## deleting time series. This is disabled by default. - # - web.enable-admin-api - ## - ## storage.tsdb.no-lockfile flag controls BD locking - # - storage.tsdb.no-lockfile - ## - ## storage.tsdb.wal-compression flag enables compression of the write-ahead log (WAL) - # - storage.tsdb.wal-compression - - ## Path to a configuration file on prometheus server container FS - configPath: /etc/config/prometheus.yml - - global: - ## How frequently to scrape targets by default - ## - scrape_interval: 1m - ## How long until a scrape request times out - ## - scrape_timeout: 10s - ## How frequently to evaluate rules - ## - evaluation_interval: 1m - ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_write - ## - remoteWrite: [] - ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#remote_read - ## - remoteRead: [] - - ## Additional Prometheus server container arguments - ## - extraArgs: {} - - ## Additional InitContainers to initialize the pod - ## - extraInitContainers: [] - - ## Additional Prometheus server Volume mounts - ## - extraVolumeMounts: [] - - ## Additional Prometheus server Volumes - ## - extraVolumes: [] - - ## Additional Prometheus server hostPath mounts - ## - extraHostPathMounts: [] - # - name: certs-dir - # mountPath: /etc/kubernetes/certs - # subPath: "" - # hostPath: /etc/kubernetes/certs - # readOnly: true - - extraConfigmapMounts: [] - # - name: certs-configmap - # mountPath: /prometheus - # subPath: "" - # configMap: certs-configmap - # readOnly: true - - ## Additional Prometheus server Secret mounts - # Defines additional mounts with secrets. Secrets must be manually created in the namespace. - extraSecretMounts: [] - # - name: secret-files - # mountPath: /etc/secrets - # subPath: "" - # secretName: prom-secret-files - # readOnly: true - - ## ConfigMap override where fullname is {{.Release.Name}}-{{.Values.server.configMapOverrideName}} - ## Defining configMapOverrideName will cause templates/server-configmap.yaml - ## to NOT generate a ConfigMap resource - ## - configMapOverrideName: "" - - ingress: - ## If true, Prometheus server Ingress will be created - ## - enabled: false - - ## Prometheus server Ingress annotations - ## - annotations: {} - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: 'true' - - ## Prometheus server Ingress additional labels - ## - extraLabels: {} - - ## Prometheus server Ingress hostnames with optional path - ## Must be provided if Ingress is enabled - ## - hosts: [] - # - prometheus.domain.com - # - domain.com/prometheus - - ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. - extraPaths: [] - # - path: /* - # backend: - # serviceName: ssl-redirect - # servicePort: use-annotation - - ## Prometheus server Ingress TLS configuration - ## Secrets must be manually created in the namespace - ## - tls: [] - # - secretName: prometheus-server-tls - # hosts: - # - prometheus.domain.com - - ## Server Deployment Strategy type - # strategy: - # type: Recreate - - ## hostAliases allows adding entries to /etc/hosts inside the containers - hostAliases: [] - # - ip: "127.0.0.1" - # hostnames: - # - "example.com" - - ## Node tolerations for server scheduling to nodes with taints - ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ - ## - tolerations: [] - # - key: "key" - # operator: "Equal|Exists" - # value: "value" - # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" - - ## Node labels for Prometheus server pod assignment - ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ - ## - nodeSelector: {} - - ## Pod affinity - ## - affinity: {} - - ## PodDisruptionBudget settings - ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ - ## - podDisruptionBudget: - enabled: false - maxUnavailable: 1 - - ## Use an alternate scheduler, e.g. "stork". - ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ - ## - # schedulerName: - - persistentVolume: - ## If true, Prometheus server will create/use a Persistent Volume Claim - ## If false, use emptyDir - ## - enabled: true - - ## Prometheus server data Persistent Volume access modes - ## Must match those of existing PV or dynamic provisioner - ## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ - ## - accessModes: - - ReadWriteOnce - - ## Prometheus server data Persistent Volume annotations - ## - annotations: {} - - ## Prometheus server data Persistent Volume existing claim name - ## Requires server.persistentVolume.enabled: true - ## If defined, PVC must be created manually before volume will be bound - existingClaim: "" - - ## Prometheus server data Persistent Volume mount root path - ## - mountPath: /data - - ## Prometheus server data Persistent Volume size - ## - size: 8Gi - - ## Prometheus server data Persistent Volume Storage Class - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. (gp2 on AWS, standard on - ## GKE, AWS & OpenStack) - ## - # storageClass: "-" - - ## Prometheus server data Persistent Volume Binding Mode - ## If defined, volumeBindingMode: - ## If undefined (the default) or set to null, no volumeBindingMode spec is - ## set, choosing the default mode. - ## - # volumeBindingMode: "" - - ## Subdirectory of Prometheus server data Persistent Volume to mount - ## Useful if the volume's root directory is not empty - ## - subPath: "" - - emptyDir: - ## Prometheus server emptyDir volume size limit - ## - sizeLimit: "" - - ## Annotations to be added to Prometheus server pods - ## - podAnnotations: {} - # iam.amazonaws.com/role: prometheus - - ## Labels to be added to Prometheus server pods - ## - podLabels: {} - - ## Prometheus AlertManager configuration - ## - alertmanagers: [] - - ## Specify if a Pod Security Policy for node-exporter must be created - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ - ## - podSecurityPolicy: - annotations: {} - ## Specify pod annotations - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl - ## - # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' - # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' - # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' - - ## Use a StatefulSet if replicaCount needs to be greater than 1 (see below) - ## - replicaCount: 1 - - ## Annotations to be added to deployment - ## - deploymentAnnotations: {} - - statefulSet: - ## If true, use a statefulset instead of a deployment for pod management. - ## This allows to scale replicas to more than 1 pod - ## - enabled: false - - annotations: {} - labels: {} - podManagementPolicy: OrderedReady - - ## Alertmanager headless service to use for the statefulset - ## - headless: - annotations: {} - labels: {} - servicePort: 80 - ## Enable gRPC port on service to allow auto discovery with thanos-querier - gRPC: - enabled: false - servicePort: 10901 - # nodePort: 10901 - - ## Prometheus server readiness and liveness probe initial delay and timeout - ## Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/ - ## - readinessProbeInitialDelay: 30 - readinessProbePeriodSeconds: 5 - readinessProbeTimeout: 4 - readinessProbeFailureThreshold: 3 - readinessProbeSuccessThreshold: 1 - livenessProbeInitialDelay: 30 - livenessProbePeriodSeconds: 15 - livenessProbeTimeout: 10 - livenessProbeFailureThreshold: 3 - livenessProbeSuccessThreshold: 1 - - ## Prometheus server resource requests and limits - ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ - ## - resources: {} - # limits: - # cpu: 500m - # memory: 512Mi - # requests: - # cpu: 500m - # memory: 512Mi - - # Required for use in managed kubernetes clusters (such as AWS EKS) with custom CNI (such as calico), - # because control-plane managed by AWS cannot communicate with pods' IP CIDR and admission webhooks are not working - ## - hostNetwork: false - - # When hostNetwork is enabled, you probably want to set this to ClusterFirstWithHostNet - dnsPolicy: ClusterFirst - - ## Vertical Pod Autoscaler config - ## Ref: https://github.com/kubernetes/autoscaler/tree/master/vertical-pod-autoscaler - verticalAutoscaler: - ## If true a VPA object will be created for the controller (either StatefulSet or Deployemnt, based on above configs) - enabled: false - # updateMode: "Auto" - # containerPolicies: - # - containerName: 'prometheus-server' - - # Custom DNS configuration to be added to prometheus server pods - dnsConfig: {} - # nameservers: - # - 1.2.3.4 - # searches: - # - ns1.svc.cluster-domain.example - # - my.dns.search.suffix - # options: - # - name: ndots - # value: "2" - # - name: edns0 - ## Security context to be added to server pods - ## - securityContext: - runAsUser: 65534 - runAsNonRoot: true - runAsGroup: 65534 - fsGroup: 65534 - - service: - annotations: {} - labels: {} - clusterIP: "" - - ## List of IP addresses at which the Prometheus server service is available - ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips - ## - externalIPs: [] - - loadBalancerIP: "" - loadBalancerSourceRanges: [] - servicePort: 80 - sessionAffinity: None - type: ClusterIP - - ## Enable gRPC port on service to allow auto discovery with thanos-querier - gRPC: - enabled: false - servicePort: 10901 - # nodePort: 10901 - - ## If using a statefulSet (statefulSet.enabled=true), configure the - ## service to connect to a specific replica to have a consistent view - ## of the data. - statefulsetReplica: - enabled: false - replica: 0 - - ## Prometheus server pod termination grace period - ## - terminationGracePeriodSeconds: 300 - - ## Prometheus data retention period (default if not specified is 15 days) - ## - retention: "15d" - - pushgateway: - ## If false, pushgateway will not be installed - ## - enabled: true - - ## Use an alternate scheduler, e.g. "stork". - ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ - ## - # schedulerName: - - ## pushgateway container name - ## - name: pushgateway - - ## pushgateway container image - ## - image: - repository: prom/pushgateway - tag: v1.3.1 - pullPolicy: IfNotPresent - - ## pushgateway priorityClassName - ## - priorityClassName: "" - - ## Additional pushgateway container arguments - ## - ## for example: persistence.file: /data/pushgateway.data - extraArgs: {} - - ## Additional InitContainers to initialize the pod - ## - extraInitContainers: [] - - ingress: - ## If true, pushgateway Ingress will be created - ## - enabled: false - - ## pushgateway Ingress annotations - ## - annotations: {} - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: 'true' - - ## pushgateway Ingress hostnames with optional path - ## Must be provided if Ingress is enabled - ## - hosts: [] - # - pushgateway.domain.com - # - domain.com/pushgateway - - ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. - extraPaths: [] - # - path: /* - # backend: - # serviceName: ssl-redirect - # servicePort: use-annotation - - ## pushgateway Ingress TLS configuration - ## Secrets must be manually created in the namespace - ## - tls: [] - # - secretName: prometheus-alerts-tls - # hosts: - # - pushgateway.domain.com - - ## Node tolerations for pushgateway scheduling to nodes with taints - ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/ - ## - tolerations: [] - # - key: "key" - # operator: "Equal|Exists" - # value: "value" - # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" - - ## Node labels for pushgateway pod assignment - ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ - ## - nodeSelector: {} - - ## Annotations to be added to pushgateway pods - ## - podAnnotations: {} - - ## Labels to be added to pushgateway pods - ## - podLabels: {} - - ## Specify if a Pod Security Policy for node-exporter must be created - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ - ## - podSecurityPolicy: - annotations: {} - ## Specify pod annotations - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#apparmor - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#seccomp - ## Ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/#sysctl - ## - # seccomp.security.alpha.kubernetes.io/allowedProfileNames: '*' - # seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' - # apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' - - replicaCount: 1 - - ## Annotations to be added to deployment - ## - deploymentAnnotations: {} - - ## PodDisruptionBudget settings - ## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ - ## - podDisruptionBudget: - enabled: false - maxUnavailable: 1 - - ## pushgateway resource requests and limits - ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ - ## - resources: {} - # limits: - # cpu: 10m - # memory: 32Mi - # requests: - # cpu: 10m - # memory: 32Mi - - # Custom DNS configuration to be added to push-gateway pods - dnsConfig: {} - # nameservers: - # - 1.2.3.4 - # searches: - # - ns1.svc.cluster-domain.example - # - my.dns.search.suffix - # options: - # - name: ndots - # value: "2" - # - name: edns0 - - ## Security context to be added to push-gateway pods - ## - securityContext: - runAsUser: 65534 - runAsNonRoot: true - - service: - annotations: - prometheus.io/probe: pushgateway - labels: {} - clusterIP: "" - - ## List of IP addresses at which the pushgateway service is available - ## Ref: https://kubernetes.io/docs/user-guide/services/#external-ips - ## - externalIPs: [] - - loadBalancerIP: "" - loadBalancerSourceRanges: [] - servicePort: 9091 - type: ClusterIP - - ## pushgateway Deployment Strategy type - # strategy: - # type: Recreate - - persistentVolume: - ## If true, pushgateway will create/use a Persistent Volume Claim - ## - enabled: false - - ## pushgateway data Persistent Volume access modes - ## Must match those of existing PV or dynamic provisioner - ## Ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ - ## - accessModes: - - ReadWriteOnce - - ## pushgateway data Persistent Volume Claim annotations - ## - annotations: {} - - ## pushgateway data Persistent Volume existing claim name - ## Requires pushgateway.persistentVolume.enabled: true - ## If defined, PVC must be created manually before volume will be bound - existingClaim: "" - - ## pushgateway data Persistent Volume mount root path - ## - mountPath: /data - - ## pushgateway data Persistent Volume size - ## - size: 2Gi - - ## pushgateway data Persistent Volume Storage Class - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. (gp2 on AWS, standard on - ## GKE, AWS & OpenStack) - ## - # storageClass: "-" - - ## pushgateway data Persistent Volume Binding Mode - ## If defined, volumeBindingMode: - ## If undefined (the default) or set to null, no volumeBindingMode spec is - ## set, choosing the default mode. - ## - # volumeBindingMode: "" - - ## Subdirectory of pushgateway data Persistent Volume to mount - ## Useful if the volume's root directory is not empty - ## - subPath: "" - - - ## alertmanager ConfigMap entries - ## - alertmanagerFiles: - alertmanager.yml: - global: {} - # slack_api_url: '' - - receivers: - - name: default-receiver - # slack_configs: - # - channel: '@you' - # send_resolved: true - - route: - group_wait: 10s - group_interval: 5m - receiver: default-receiver - repeat_interval: 3h - - ## Prometheus server ConfigMap entries - ## - serverFiles: - - ## Alerts configuration - ## Ref: https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/ - alerting_rules.yml: {} - # groups: - # - name: Instances - # rules: - # - alert: InstanceDown - # expr: up == 0 - # for: 5m - # labels: - # severity: page - # annotations: - # description: '{{ $labels.instance }} of job {{ $labels.job }} has been down for more than 5 minutes.' - # summary: 'Instance {{ $labels.instance }} down' - ## DEPRECATED DEFAULT VALUE, unless explicitly naming your files, please use alerting_rules.yml - alerts: {} - - ## Records configuration - ## Ref: https://prometheus.io/docs/prometheus/latest/configuration/recording_rules/ - recording_rules.yml: {} - ## DEPRECATED DEFAULT VALUE, unless explicitly naming your files, please use recording_rules.yml - rules: {} - - prometheus.yml: - rule_files: - - /etc/config/recording_rules.yml - - /etc/config/alerting_rules.yml - ## Below two files are DEPRECATED will be removed from this default values file - - /etc/config/rules - - /etc/config/alerts - - scrape_configs: - - job_name: prometheus - static_configs: - - targets: - - localhost:9090 - - # A scrape configuration for running Prometheus on a Kubernetes cluster. - # This uses separate scrape configs for cluster components (i.e. API server, node) - # and services to allow each to use different authentication configs. - # - # Kubernetes labels will be added as Prometheus labels on metrics via the - # `labelmap` relabeling action. - - # Scrape config for API servers. - # - # Kubernetes exposes API servers as endpoints to the default/kubernetes - # service so this uses `endpoints` role and uses relabelling to only keep - # the endpoints associated with the default/kubernetes service using the - # default named port `https`. This works for single API server deployments as - # well as HA API server deployments. - - job_name: 'kubernetes-apiservers' - - kubernetes_sd_configs: - - role: endpoints - - # Default to scraping over https. If required, just disable this or change to - # `http`. - scheme: https - - # This TLS & bearer token file config is used to connect to the actual scrape - # endpoints for cluster components. This is separate to discovery auth - # configuration because discovery & scraping are two separate concerns in - # Prometheus. The discovery auth config is automatic if Prometheus runs inside - # the cluster. Otherwise, more config options have to be provided within the - # . - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - # If your node certificates are self-signed or use a different CA to the - # master CA, then disable certificate verification below. Note that - # certificate verification is an integral part of a secure infrastructure - # so this should only be disabled in a controlled environment. You can - # disable certificate verification by uncommenting the line below. - # - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - # Keep only the default/kubernetes service endpoints for the https port. This - # will add targets for each API server which Kubernetes adds an endpoint to - # the default/kubernetes service. - relabel_configs: - - source_labels: [__meta_kubernetes_namespace, __meta_kubernetes_service_name, __meta_kubernetes_endpoint_port_name] - action: keep - regex: default;kubernetes;https - - - job_name: 'kubernetes-nodes' - - # Default to scraping over https. If required, just disable this or change to - # `http`. - scheme: https - - # This TLS & bearer token file config is used to connect to the actual scrape - # endpoints for cluster components. This is separate to discovery auth - # configuration because discovery & scraping are two separate concerns in - # Prometheus. The discovery auth config is automatic if Prometheus runs inside - # the cluster. Otherwise, more config options have to be provided within the - # . - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - # If your node certificates are self-signed or use a different CA to the - # master CA, then disable certificate verification below. Note that - # certificate verification is an integral part of a secure infrastructure - # so this should only be disabled in a controlled environment. You can - # disable certificate verification by uncommenting the line below. - # - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics - - - - job_name: 'kubernetes-nodes-cadvisor' - - # Default to scraping over https. If required, just disable this or change to - # `http`. - scheme: https - - # This TLS & bearer token file config is used to connect to the actual scrape - # endpoints for cluster components. This is separate to discovery auth - # configuration because discovery & scraping are two separate concerns in - # Prometheus. The discovery auth config is automatic if Prometheus runs inside - # the cluster. Otherwise, more config options have to be provided within the - # . - tls_config: - ca_file: /var/run/secrets/kubernetes.io/serviceaccount/ca.crt - # If your node certificates are self-signed or use a different CA to the - # master CA, then disable certificate verification below. Note that - # certificate verification is an integral part of a secure infrastructure - # so this should only be disabled in a controlled environment. You can - # disable certificate verification by uncommenting the line below. - # - insecure_skip_verify: true - bearer_token_file: /var/run/secrets/kubernetes.io/serviceaccount/token - - kubernetes_sd_configs: - - role: node - - # This configuration will work only on kubelet 1.7.3+ - # As the scrape endpoints for cAdvisor have changed - # if you are using older version you need to change the replacement to - # replacement: /api/v1/nodes/$1:4194/proxy/metrics - # more info here https://github.com/coreos/prometheus-operator/issues/633 - relabel_configs: - - action: labelmap - regex: __meta_kubernetes_node_label_(.+) - - target_label: __address__ - replacement: kubernetes.default.svc:443 - - source_labels: [__meta_kubernetes_node_name] - regex: (.+) - target_label: __metrics_path__ - replacement: /api/v1/nodes/$1/proxy/metrics/cadvisor - - # Scrape config for service endpoints. - # - # The relabeling allows the actual service scrape endpoint to be configured - # via the following annotations: - # - # * `prometheus.io/scrape`: Only scrape services that have a value of `true` - # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need - # to set this to `https` & most likely set the `tls_config` of the scrape config. - # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. - # * `prometheus.io/port`: If the metrics are exposed on a different port to the - # service then set this appropriately. - - job_name: 'kubernetes-service-endpoints' - - kubernetes_sd_configs: - - role: endpoints - - relabel_configs: - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape] - action: keep - regex: true - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme] - action: replace - target_label: __scheme__ - regex: (https?) - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path] - action: replace - target_label: __metrics_path__ - regex: (.+) - - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port] - action: replace - target_label: __address__ - regex: ([^:]+)(?::\d+)?;(\d+) - replacement: $1:$2 - - action: labelmap - regex: __meta_kubernetes_service_label_(.+) - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: kubernetes_namespace - - source_labels: [__meta_kubernetes_service_name] - action: replace - target_label: kubernetes_name - - source_labels: [__meta_kubernetes_pod_node_name] - action: replace - target_label: kubernetes_node - - # Scrape config for slow service endpoints; same as above, but with a larger - # timeout and a larger interval - # - # The relabeling allows the actual service scrape endpoint to be configured - # via the following annotations: - # - # * `prometheus.io/scrape-slow`: Only scrape services that have a value of `true` - # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need - # to set this to `https` & most likely set the `tls_config` of the scrape config. - # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. - # * `prometheus.io/port`: If the metrics are exposed on a different port to the - # service then set this appropriately. - - job_name: 'kubernetes-service-endpoints-slow' - - scrape_interval: 5m - scrape_timeout: 30s - - kubernetes_sd_configs: - - role: endpoints - - relabel_configs: - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scrape_slow] - action: keep - regex: true - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_scheme] - action: replace - target_label: __scheme__ - regex: (https?) - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_path] - action: replace - target_label: __metrics_path__ - regex: (.+) - - source_labels: [__address__, __meta_kubernetes_service_annotation_prometheus_io_port] - action: replace - target_label: __address__ - regex: ([^:]+)(?::\d+)?;(\d+) - replacement: $1:$2 - - action: labelmap - regex: __meta_kubernetes_service_label_(.+) - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: kubernetes_namespace - - source_labels: [__meta_kubernetes_service_name] - action: replace - target_label: kubernetes_name - - source_labels: [__meta_kubernetes_pod_node_name] - action: replace - target_label: kubernetes_node - - - job_name: 'prometheus-pushgateway' - honor_labels: true - - kubernetes_sd_configs: - - role: service - - relabel_configs: - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe] - action: keep - regex: pushgateway - - # Example scrape config for probing services via the Blackbox Exporter. - # - # The relabeling allows the actual service scrape endpoint to be configured - # via the following annotations: - # - # * `prometheus.io/probe`: Only probe services that have a value of `true` - - job_name: 'kubernetes-services' - - metrics_path: /probe - params: - module: [http_2xx] - - kubernetes_sd_configs: - - role: service - - relabel_configs: - - source_labels: [__meta_kubernetes_service_annotation_prometheus_io_probe] - action: keep - regex: true - - source_labels: [__address__] - target_label: __param_target - - target_label: __address__ - replacement: blackbox - - source_labels: [__param_target] - target_label: instance - - action: labelmap - regex: __meta_kubernetes_service_label_(.+) - - source_labels: [__meta_kubernetes_namespace] - target_label: kubernetes_namespace - - source_labels: [__meta_kubernetes_service_name] - target_label: kubernetes_name - - # Example scrape config for pods - # - # The relabeling allows the actual pod scrape endpoint to be configured via the - # following annotations: - # - # * `prometheus.io/scrape`: Only scrape pods that have a value of `true` - # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need - # to set this to `https` & most likely set the `tls_config` of the scrape config. - # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. - # * `prometheus.io/port`: Scrape the pod on the indicated port instead of the default of `9102`. - - job_name: 'kubernetes-pods' - - kubernetes_sd_configs: - - role: pod - - relabel_configs: - - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape] - action: keep - regex: true - - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scheme] - action: replace - regex: (https?) - target_label: __scheme__ - - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path] - action: replace - target_label: __metrics_path__ - regex: (.+) - - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port] - action: replace - regex: ([^:]+)(?::\d+)?;(\d+) - replacement: $1:$2 - target_label: __address__ - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: kubernetes_namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: kubernetes_pod_name - - source_labels: [__meta_kubernetes_pod_phase] - regex: Pending|Succeeded|Failed - action: drop - - # Example Scrape config for pods which should be scraped slower. An useful example - # would be stackriver-exporter which queries an API on every scrape of the pod - # - # The relabeling allows the actual pod scrape endpoint to be configured via the - # following annotations: - # - # * `prometheus.io/scrape-slow`: Only scrape pods that have a value of `true` - # * `prometheus.io/scheme`: If the metrics endpoint is secured then you will need - # to set this to `https` & most likely set the `tls_config` of the scrape config. - # * `prometheus.io/path`: If the metrics path is not `/metrics` override this. - # * `prometheus.io/port`: Scrape the pod on the indicated port instead of the default of `9102`. - - job_name: 'kubernetes-pods-slow' - - scrape_interval: 5m - scrape_timeout: 30s - - kubernetes_sd_configs: - - role: pod - - relabel_configs: - - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scrape_slow] - action: keep - regex: true - - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_scheme] - action: replace - regex: (https?) - target_label: __scheme__ - - source_labels: [__meta_kubernetes_pod_annotation_prometheus_io_path] - action: replace - target_label: __metrics_path__ - regex: (.+) - - source_labels: [__address__, __meta_kubernetes_pod_annotation_prometheus_io_port] - action: replace - regex: ([^:]+)(?::\d+)?;(\d+) - replacement: $1:$2 - target_label: __address__ - - action: labelmap - regex: __meta_kubernetes_pod_label_(.+) - - source_labels: [__meta_kubernetes_namespace] - action: replace - target_label: kubernetes_namespace - - source_labels: [__meta_kubernetes_pod_name] - action: replace - target_label: kubernetes_pod_name - - source_labels: [__meta_kubernetes_pod_phase] - regex: Pending|Succeeded|Failed - action: drop - - # adds additional scrape configs to prometheus.yml - # must be a string so you have to add a | after extraScrapeConfigs: - # example adds prometheus-blackbox-exporter scrape config - extraScrapeConfigs: - # - job_name: 'prometheus-blackbox-exporter' - # metrics_path: /probe - # params: - # module: [http_2xx] - # static_configs: - # - targets: - # - https://example.com - # relabel_configs: - # - source_labels: [__address__] - # target_label: __param_target - # - source_labels: [__param_target] - # target_label: instance - # - target_label: __address__ - # replacement: prometheus-blackbox-exporter:9115 - - # Adds option to add alert_relabel_configs to avoid duplicate alerts in alertmanager - # useful in H/A prometheus with different external labels but the same alerts - alertRelabelConfigs: - # alert_relabel_configs: - # - source_labels: [dc] - # regex: (.+)\d+ - # target_label: dc - - networkPolicy: - ## Enable creation of NetworkPolicy resources. - ## - enabled: false - - # Force namespace of namespaced resources - forceNamespace: null \ No newline at end of file From bbe8a526f248d7dbcf5806cc92bcc4e803c4f973 Mon Sep 17 00:00:00 2001 From: karamveer28 Date: Thu, 7 Dec 2023 22:27:38 +0530 Subject: [PATCH 20/99] Added env file --- operations/deployment/helm/ENV_FILE | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 operations/deployment/helm/ENV_FILE diff --git a/operations/deployment/helm/ENV_FILE b/operations/deployment/helm/ENV_FILE new file mode 100644 index 0000000..8ce79ba --- /dev/null +++ b/operations/deployment/helm/ENV_FILE @@ -0,0 +1,2 @@ +GF_SECURITY_ADMIN_USER=admin +GF_SECURITY_ADMIN_PASSWORD=grafana \ No newline at end of file From e266ded8bbc3d134787dd04bb73f6f69e57e8998 Mon Sep 17 00:00:00 2001 From: karamveer28 Date: Fri, 8 Dec 2023 16:40:18 +0530 Subject: [PATCH 21/99] Added ENvironment in correct folder --- operations/deployment/helm/{ => grafana}/ENV_FILE | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename operations/deployment/helm/{ => grafana}/ENV_FILE (100%) diff --git a/operations/deployment/helm/ENV_FILE b/operations/deployment/helm/grafana/ENV_FILE similarity index 100% rename from operations/deployment/helm/ENV_FILE rename to operations/deployment/helm/grafana/ENV_FILE From e486b6afced10ef71a0866d86a35c9ba15736283 Mon Sep 17 00:00:00 2001 From: karamveer28 Date: Fri, 8 Dec 2023 17:49:43 +0530 Subject: [PATCH 22/99] Added operations recruting chart --- .../deployment/helm/grafana/.helmignore | 23 - operations/deployment/helm/grafana/Chart.yaml | 40 +- operations/deployment/helm/grafana/ENV_FILE | 2 - operations/deployment/helm/grafana/README.md | 757 --------- .../helm/grafana/bitops.config.yaml | 16 + .../helm/grafana/ci/default-values.yaml | 1 - .../helm/grafana/ci/with-affinity-values.yaml | 16 - .../ci/with-dashboard-json-values.yaml | 53 - .../grafana/ci/with-dashboard-values.yaml | 19 - .../ci/with-extraconfigmapmounts-values.yaml | 7 - .../ci/with-image-renderer-values.yaml | 19 - .../helm/grafana/ci/with-persistence.yaml | 3 - .../grafana/dashboards/custom-dashboard.json | 1 - .../helm/grafana/templates/NOTES.txt | 55 - .../helm/grafana/templates/_helpers.tpl | 227 --- .../helm/grafana/templates/_pod.tpl | 1276 -------------- .../helm/grafana/templates/clusterrole.yaml | 25 - .../grafana/templates/clusterrolebinding.yaml | 24 - .../helm/grafana/templates/configSecret.yaml | 43 - .../configmap-dashboard-provider.yaml | 29 - .../helm/grafana/templates/configmap.yaml | 144 -- .../templates/dashboards-json-configmap.yaml | 38 - .../helm/grafana/templates/deployment.yaml | 51 - .../grafana/templates/extra-manifests.yaml | 4 - .../grafana/templates/headless-service.yaml | 22 - .../helm/grafana/templates/hpa.yaml | 52 - .../templates/image-renderer-deployment.yaml | 131 -- .../grafana/templates/image-renderer-hpa.yaml | 47 - .../image-renderer-network-policy.yaml | 79 - .../templates/image-renderer-service.yaml | 31 - .../image-renderer-servicemonitor.yaml | 48 - .../helm/grafana/templates/ingress.yaml | 78 - .../helm/grafana/templates/networkpolicy.yaml | 61 - .../templates/poddisruptionbudget.yaml | 22 - .../grafana/templates/podsecuritypolicy.yaml | 49 - .../helm/grafana/templates/pvc.yaml | 36 - .../helm/grafana/templates/role.yaml | 32 - .../helm/grafana/templates/rolebinding.yaml | 25 - .../helm/grafana/templates/secret-env.yaml | 14 - .../helm/grafana/templates/secret.yaml | 26 - .../helm/grafana/templates/service.yaml | 58 - .../grafana/templates/serviceaccount.yaml | 17 - .../grafana/templates/servicemonitor.yaml | 52 - .../helm/grafana/templates/statefulset.yaml | 56 - .../templates/tests/test-configmap.yaml | 20 - .../tests/test-podsecuritypolicy.yaml | 32 - .../grafana/templates/tests/test-role.yaml | 17 - .../templates/tests/test-rolebinding.yaml | 20 - .../templates/tests/test-serviceaccount.yaml | 12 - .../helm/grafana/templates/tests/test.yaml | 49 - .../deployment/helm/grafana/values.yaml | 1482 +++-------------- 51 files changed, 233 insertions(+), 5208 deletions(-) delete mode 100644 operations/deployment/helm/grafana/.helmignore delete mode 100644 operations/deployment/helm/grafana/ENV_FILE delete mode 100644 operations/deployment/helm/grafana/README.md create mode 100644 operations/deployment/helm/grafana/bitops.config.yaml delete mode 100644 operations/deployment/helm/grafana/ci/default-values.yaml delete mode 100644 operations/deployment/helm/grafana/ci/with-affinity-values.yaml delete mode 100644 operations/deployment/helm/grafana/ci/with-dashboard-json-values.yaml delete mode 100644 operations/deployment/helm/grafana/ci/with-dashboard-values.yaml delete mode 100644 operations/deployment/helm/grafana/ci/with-extraconfigmapmounts-values.yaml delete mode 100644 operations/deployment/helm/grafana/ci/with-image-renderer-values.yaml delete mode 100644 operations/deployment/helm/grafana/ci/with-persistence.yaml delete mode 100644 operations/deployment/helm/grafana/dashboards/custom-dashboard.json delete mode 100644 operations/deployment/helm/grafana/templates/NOTES.txt delete mode 100644 operations/deployment/helm/grafana/templates/_helpers.tpl delete mode 100644 operations/deployment/helm/grafana/templates/_pod.tpl delete mode 100644 operations/deployment/helm/grafana/templates/clusterrole.yaml delete mode 100644 operations/deployment/helm/grafana/templates/clusterrolebinding.yaml delete mode 100644 operations/deployment/helm/grafana/templates/configSecret.yaml delete mode 100644 operations/deployment/helm/grafana/templates/configmap-dashboard-provider.yaml delete mode 100644 operations/deployment/helm/grafana/templates/configmap.yaml delete mode 100644 operations/deployment/helm/grafana/templates/dashboards-json-configmap.yaml delete mode 100644 operations/deployment/helm/grafana/templates/deployment.yaml delete mode 100644 operations/deployment/helm/grafana/templates/extra-manifests.yaml delete mode 100644 operations/deployment/helm/grafana/templates/headless-service.yaml delete mode 100644 operations/deployment/helm/grafana/templates/hpa.yaml delete mode 100644 operations/deployment/helm/grafana/templates/image-renderer-deployment.yaml delete mode 100644 operations/deployment/helm/grafana/templates/image-renderer-hpa.yaml delete mode 100644 operations/deployment/helm/grafana/templates/image-renderer-network-policy.yaml delete mode 100644 operations/deployment/helm/grafana/templates/image-renderer-service.yaml delete mode 100644 operations/deployment/helm/grafana/templates/image-renderer-servicemonitor.yaml delete mode 100644 operations/deployment/helm/grafana/templates/ingress.yaml delete mode 100644 operations/deployment/helm/grafana/templates/networkpolicy.yaml delete mode 100644 operations/deployment/helm/grafana/templates/poddisruptionbudget.yaml delete mode 100644 operations/deployment/helm/grafana/templates/podsecuritypolicy.yaml delete mode 100644 operations/deployment/helm/grafana/templates/pvc.yaml delete mode 100644 operations/deployment/helm/grafana/templates/role.yaml delete mode 100644 operations/deployment/helm/grafana/templates/rolebinding.yaml delete mode 100644 operations/deployment/helm/grafana/templates/secret-env.yaml delete mode 100644 operations/deployment/helm/grafana/templates/secret.yaml delete mode 100644 operations/deployment/helm/grafana/templates/service.yaml delete mode 100644 operations/deployment/helm/grafana/templates/serviceaccount.yaml delete mode 100644 operations/deployment/helm/grafana/templates/servicemonitor.yaml delete mode 100644 operations/deployment/helm/grafana/templates/statefulset.yaml delete mode 100644 operations/deployment/helm/grafana/templates/tests/test-configmap.yaml delete mode 100644 operations/deployment/helm/grafana/templates/tests/test-podsecuritypolicy.yaml delete mode 100644 operations/deployment/helm/grafana/templates/tests/test-role.yaml delete mode 100644 operations/deployment/helm/grafana/templates/tests/test-rolebinding.yaml delete mode 100644 operations/deployment/helm/grafana/templates/tests/test-serviceaccount.yaml delete mode 100644 operations/deployment/helm/grafana/templates/tests/test.yaml diff --git a/operations/deployment/helm/grafana/.helmignore b/operations/deployment/helm/grafana/.helmignore deleted file mode 100644 index 8cade13..0000000 --- a/operations/deployment/helm/grafana/.helmignore +++ /dev/null @@ -1,23 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*~ -# Various IDEs -.vscode -.project -.idea/ -*.tmproj -OWNERS diff --git a/operations/deployment/helm/grafana/Chart.yaml b/operations/deployment/helm/grafana/Chart.yaml index 387f2ab..12e9ab9 100644 --- a/operations/deployment/helm/grafana/Chart.yaml +++ b/operations/deployment/helm/grafana/Chart.yaml @@ -1,33 +1,9 @@ apiVersion: v2 -name: grafana -version: 7.0.13 -appVersion: 10.2.2 -kubeVersion: "^1.8.0-0" -description: The leading tool for querying and visualizing time series and metrics. -home: https://grafana.com -icon: https://artifacthub.io/image/b4fed1a7-6c8f-4945-b99d-096efa3e4116 -sources: - - https://github.com/grafana/grafana - - https://github.com/grafana/helm-charts -annotations: - "artifacthub.io/license": AGPL-3.0-only - "artifacthub.io/links": | - - name: Chart Source - url: https://github.com/grafana/helm-charts - - name: Upstream Project - url: https://github.com/grafana/grafana -maintainers: - - name: zanhsieh - email: zanhsieh@gmail.com - - name: rtluckie - email: rluckie@cisco.com - - name: maorfr - email: maor.friedman@redhat.com - - name: Xtigyro - email: miroslav.hadzhiev@gmail.com - - name: torstenwalter - email: mail@torstenwalter.de -type: application -keywords: - - monitoring - - metric +name: grafana chart wrapper +description: A Helm chart wrapper for grafana +version: 0.1.0 + +dependencies: +- name: grafana + version: 6.31.1 + repository: https://grafana.github.io/helm-charts/ \ No newline at end of file diff --git a/operations/deployment/helm/grafana/ENV_FILE b/operations/deployment/helm/grafana/ENV_FILE deleted file mode 100644 index 8ce79ba..0000000 --- a/operations/deployment/helm/grafana/ENV_FILE +++ /dev/null @@ -1,2 +0,0 @@ -GF_SECURITY_ADMIN_USER=admin -GF_SECURITY_ADMIN_PASSWORD=grafana \ No newline at end of file diff --git a/operations/deployment/helm/grafana/README.md b/operations/deployment/helm/grafana/README.md deleted file mode 100644 index 5420545..0000000 --- a/operations/deployment/helm/grafana/README.md +++ /dev/null @@ -1,757 +0,0 @@ -# Grafana Helm Chart - -* Installs the web dashboarding system [Grafana](http://grafana.org/) - -## Get Repo Info - -```console -helm repo add grafana https://grafana.github.io/helm-charts -helm repo update -``` - -_See [helm repo](https://helm.sh/docs/helm/helm_repo/) for command documentation._ - -## Installing the Chart - -To install the chart with the release name `my-release`: - -```console -helm install my-release grafana/grafana -``` - -## Uninstalling the Chart - -To uninstall/delete the my-release deployment: - -```console -helm delete my-release -``` - -The command removes all the Kubernetes components associated with the chart and deletes the release. - -## Upgrading an existing Release to a new major version - -A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an -incompatible breaking change needing manual actions. - -### To 4.0.0 (And 3.12.1) - -This version requires Helm >= 2.12.0. - -### To 5.0.0 - -You have to add --force to your helm upgrade command as the labels of the chart have changed. - -### To 6.0.0 - -This version requires Helm >= 3.1.0. - -### To 7.0.0 - -For consistency with other Helm charts, the `global.image.registry` parameter was renamed -to `global.imageRegistry`. If you were not previously setting `global.image.registry`, no action -is required on upgrade. If you were previously setting `global.image.registry`, you will -need to instead set `global.imageRegistry`. - -## Configuration - -| Parameter | Description | Default | -|-------------------------------------------|-----------------------------------------------|---------------------------------------------------------| -| `replicas` | Number of nodes | `1` | -| `podDisruptionBudget.minAvailable` | Pod disruption minimum available | `nil` | -| `podDisruptionBudget.maxUnavailable` | Pod disruption maximum unavailable | `nil` | -| `podDisruptionBudget.apiVersion` | Pod disruption apiVersion | `nil` | -| `deploymentStrategy` | Deployment strategy | `{ "type": "RollingUpdate" }` | -| `livenessProbe` | Liveness Probe settings | `{ "httpGet": { "path": "/api/health", "port": 3000 } "initialDelaySeconds": 60, "timeoutSeconds": 30, "failureThreshold": 10 }` | -| `readinessProbe` | Readiness Probe settings | `{ "httpGet": { "path": "/api/health", "port": 3000 } }`| -| `securityContext` | Deployment securityContext | `{"runAsUser": 472, "runAsGroup": 472, "fsGroup": 472}` | -| `priorityClassName` | Name of Priority Class to assign pods | `nil` | -| `image.registry` | Image registry | `docker.io` | -| `image.repository` | Image repository | `grafana/grafana` | -| `image.tag` | Overrides the Grafana image tag whose default is the chart appVersion (`Must be >= 5.0.0`) | `` | -| `image.sha` | Image sha (optional) | `` | -| `image.pullPolicy` | Image pull policy | `IfNotPresent` | -| `image.pullSecrets` | Image pull secrets (can be templated) | `[]` | -| `service.enabled` | Enable grafana service | `true` | -| `service.type` | Kubernetes service type | `ClusterIP` | -| `service.port` | Kubernetes port where service is exposed | `80` | -| `service.portName` | Name of the port on the service | `service` | -| `service.appProtocol` | Adds the appProtocol field to the service | `` | -| `service.targetPort` | Internal service is port | `3000` | -| `service.nodePort` | Kubernetes service nodePort | `nil` | -| `service.annotations` | Service annotations (can be templated) | `{}` | -| `service.labels` | Custom labels | `{}` | -| `service.clusterIP` | internal cluster service IP | `nil` | -| `service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `nil` | -| `service.loadBalancerSourceRanges` | list of IP CIDRs allowed access to lb (if supported) | `[]` | -| `service.externalIPs` | service external IP addresses | `[]` | -| `service.externalTrafficPolicy` | change the default externalTrafficPolicy | `nil` | -| `headlessService` | Create a headless service | `false` | -| `extraExposePorts` | Additional service ports for sidecar containers| `[]` | -| `hostAliases` | adds rules to the pod's /etc/hosts | `[]` | -| `ingress.enabled` | Enables Ingress | `false` | -| `ingress.annotations` | Ingress annotations (values are templated) | `{}` | -| `ingress.labels` | Custom labels | `{}` | -| `ingress.path` | Ingress accepted path | `/` | -| `ingress.pathType` | Ingress type of path | `Prefix` | -| `ingress.hosts` | Ingress accepted hostnames | `["chart-example.local"]` | -| `ingress.extraPaths` | Ingress extra paths to prepend to every host configuration. Useful when configuring [custom actions with AWS ALB Ingress Controller](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.6/guide/ingress/annotations/#actions). Requires `ingress.hosts` to have one or more host entries. | `[]` | -| `ingress.tls` | Ingress TLS configuration | `[]` | -| `ingress.ingressClassName` | Ingress Class Name. MAY be required for Kubernetes versions >= 1.18 | `""` | -| `resources` | CPU/Memory resource requests/limits | `{}` | -| `nodeSelector` | Node labels for pod assignment | `{}` | -| `tolerations` | Toleration labels for pod assignment | `[]` | -| `affinity` | Affinity settings for pod assignment | `{}` | -| `extraInitContainers` | Init containers to add to the grafana pod | `{}` | -| `extraContainers` | Sidecar containers to add to the grafana pod | `""` | -| `extraContainerVolumes` | Volumes that can be mounted in sidecar containers | `[]` | -| `extraLabels` | Custom labels for all manifests | `{}` | -| `schedulerName` | Name of the k8s scheduler (other than default) | `nil` | -| `persistence.enabled` | Use persistent volume to store data | `false` | -| `persistence.type` | Type of persistence (`pvc` or `statefulset`) | `pvc` | -| `persistence.size` | Size of persistent volume claim | `10Gi` | -| `persistence.existingClaim` | Use an existing PVC to persist data (can be templated) | `nil` | -| `persistence.storageClassName` | Type of persistent volume claim | `nil` | -| `persistence.accessModes` | Persistence access modes | `[ReadWriteOnce]` | -| `persistence.annotations` | PersistentVolumeClaim annotations | `{}` | -| `persistence.finalizers` | PersistentVolumeClaim finalizers | `[ "kubernetes.io/pvc-protection" ]` | -| `persistence.extraPvcLabels` | Extra labels to apply to a PVC. | `{}` | -| `persistence.subPath` | Mount a sub dir of the persistent volume (can be templated) | `nil` | -| `persistence.inMemory.enabled` | If persistence is not enabled, whether to mount the local storage in-memory to improve performance | `false` | -| `persistence.inMemory.sizeLimit` | SizeLimit for the in-memory local storage | `nil` | -| `initChownData.enabled` | If false, don't reset data ownership at startup | true | -| `initChownData.image.registry` | init-chown-data container image registry | `docker.io` | -| `initChownData.image.repository` | init-chown-data container image repository | `busybox` | -| `initChownData.image.tag` | init-chown-data container image tag | `1.31.1` | -| `initChownData.image.sha` | init-chown-data container image sha (optional)| `""` | -| `initChownData.image.pullPolicy` | init-chown-data container image pull policy | `IfNotPresent` | -| `initChownData.resources` | init-chown-data pod resource requests & limits | `{}` | -| `schedulerName` | Alternate scheduler name | `nil` | -| `env` | Extra environment variables passed to pods | `{}` | -| `envValueFrom` | Environment variables from alternate sources. See the API docs on [EnvVarSource](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#envvarsource-v1-core) for format details. Can be templated | `{}` | -| `envFromSecret` | Name of a Kubernetes secret (must be manually created in the same namespace) containing values to be added to the environment. Can be templated | `""` | -| `envFromSecrets` | List of Kubernetes secrets (must be manually created in the same namespace) containing values to be added to the environment. Can be templated | `[]` | -| `envFromConfigMaps` | List of Kubernetes ConfigMaps (must be manually created in the same namespace) containing values to be added to the environment. Can be templated | `[]` | -| `envRenderSecret` | Sensible environment variables passed to pods and stored as secret. (passed through [tpl](https://helm.sh/docs/howto/charts_tips_and_tricks/#using-the-tpl-function)) | `{}` | -| `enableServiceLinks` | Inject Kubernetes services as environment variables. | `true` | -| `extraSecretMounts` | Additional grafana server secret mounts | `[]` | -| `extraVolumeMounts` | Additional grafana server volume mounts | `[]` | -| `createConfigmap` | Enable creating the grafana configmap | `true` | -| `extraConfigmapMounts` | Additional grafana server configMap volume mounts (values are templated) | `[]` | -| `extraEmptyDirMounts` | Additional grafana server emptyDir volume mounts | `[]` | -| `plugins` | Plugins to be loaded along with Grafana | `[]` | -| `datasources` | Configure grafana datasources (passed through tpl) | `{}` | -| `alerting` | Configure grafana alerting (passed through tpl) | `{}` | -| `notifiers` | Configure grafana notifiers | `{}` | -| `dashboardProviders` | Configure grafana dashboard providers | `{}` | -| `dashboards` | Dashboards to import | `{}` | -| `dashboardsConfigMaps` | ConfigMaps reference that contains dashboards | `{}` | -| `grafana.ini` | Grafana's primary configuration | `{}` | -| `global.imageRegistry` | Global image pull registry for all images. | `null` | -| `global.imagePullSecrets` | Global image pull secrets (can be templated). Allows either an array of {name: pullSecret} maps (k8s-style), or an array of strings (more common helm-style). | `[]` | -| `ldap.enabled` | Enable LDAP authentication | `false` | -| `ldap.existingSecret` | The name of an existing secret containing the `ldap.toml` file, this must have the key `ldap-toml`. | `""` | -| `ldap.config` | Grafana's LDAP configuration | `""` | -| `annotations` | Deployment annotations | `{}` | -| `labels` | Deployment labels | `{}` | -| `podAnnotations` | Pod annotations | `{}` | -| `podLabels` | Pod labels | `{}` | -| `podPortName` | Name of the grafana port on the pod | `grafana` | -| `lifecycleHooks` | Lifecycle hooks for podStart and preStop [Example](https://kubernetes.io/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/#define-poststart-and-prestop-handlers) | `{}` | -| `sidecar.image.registry` | Sidecar image registry | `quay.io` | -| `sidecar.image.repository` | Sidecar image repository | `kiwigrid/k8s-sidecar` | -| `sidecar.image.tag` | Sidecar image tag | `1.24.6` | -| `sidecar.image.sha` | Sidecar image sha (optional) | `""` | -| `sidecar.imagePullPolicy` | Sidecar image pull policy | `IfNotPresent` | -| `sidecar.resources` | Sidecar resources | `{}` | -| `sidecar.securityContext` | Sidecar securityContext | `{}` | -| `sidecar.enableUniqueFilenames` | Sets the kiwigrid/k8s-sidecar UNIQUE_FILENAMES environment variable. If set to `true` the sidecar will create unique filenames where duplicate data keys exist between ConfigMaps and/or Secrets within the same or multiple Namespaces. | `false` | -| `sidecar.alerts.enabled` | Enables the cluster wide search for alerts and adds/updates/deletes them in grafana |`false` | -| `sidecar.alerts.label` | Label that config maps with alerts should have to be added | `grafana_alert` | -| `sidecar.alerts.labelValue` | Label value that config maps with alerts should have to be added | `""` | -| `sidecar.alerts.searchNamespace` | Namespaces list. If specified, the sidecar will search for alerts config-maps inside these namespaces. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces. | `nil` | -| `sidecar.alerts.watchMethod` | Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. | `WATCH` | -| `sidecar.alerts.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` | -| `sidecar.alerts.reloadURL` | Full url of datasource configuration reload API endpoint, to invoke after a config-map change | `"http://localhost:3000/api/admin/provisioning/alerting/reload"` | -| `sidecar.alerts.skipReload` | Enabling this omits defining the REQ_URL and REQ_METHOD environment variables | `false` | -| `sidecar.alerts.initDatasources` | Set to true to deploy the datasource sidecar as an initContainer. This is needed if skipReload is true, to load any alerts defined at startup time. | `false` | -| `sidecar.alerts.extraMounts` | Additional alerts sidecar volume mounts. | `[]` | -| `sidecar.dashboards.enabled` | Enables the cluster wide search for dashboards and adds/updates/deletes them in grafana | `false` | -| `sidecar.dashboards.SCProvider` | Enables creation of sidecar provider | `true` | -| `sidecar.dashboards.provider.name` | Unique name of the grafana provider | `sidecarProvider` | -| `sidecar.dashboards.provider.orgid` | Id of the organisation, to which the dashboards should be added | `1` | -| `sidecar.dashboards.provider.folder` | Logical folder in which grafana groups dashboards | `""` | -| `sidecar.dashboards.provider.disableDelete` | Activate to avoid the deletion of imported dashboards | `false` | -| `sidecar.dashboards.provider.allowUiUpdates` | Allow updating provisioned dashboards from the UI | `false` | -| `sidecar.dashboards.provider.type` | Provider type | `file` | -| `sidecar.dashboards.provider.foldersFromFilesStructure` | Allow Grafana to replicate dashboard structure from filesystem. | `false` | -| `sidecar.dashboards.watchMethod` | Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. | `WATCH` | -| `sidecar.skipTlsVerify` | Set to true to skip tls verification for kube api calls | `nil` | -| `sidecar.dashboards.label` | Label that config maps with dashboards should have to be added | `grafana_dashboard` | -| `sidecar.dashboards.labelValue` | Label value that config maps with dashboards should have to be added | `""` | -| `sidecar.dashboards.folder` | Folder in the pod that should hold the collected dashboards (unless `sidecar.dashboards.defaultFolderName` is set). This path will be mounted. | `/tmp/dashboards` | -| `sidecar.dashboards.folderAnnotation` | The annotation the sidecar will look for in configmaps to override the destination folder for files | `nil` | -| `sidecar.dashboards.defaultFolderName` | The default folder name, it will create a subfolder under the `sidecar.dashboards.folder` and put dashboards in there instead | `nil` | -| `sidecar.dashboards.searchNamespace` | Namespaces list. If specified, the sidecar will search for dashboards config-maps inside these namespaces. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces. | `nil` | -| `sidecar.dashboards.script` | Absolute path to shell script to execute after a configmap got reloaded. | `nil` | -| `sidecar.dashboards.reloadURL` | Full url of dashboards configuration reload API endpoint, to invoke after a config-map change | `"http://localhost:3000/api/admin/provisioning/dashboards/reload"` | -| `sidecar.dashboards.skipReload` | Enabling this omits defining the REQ_USERNAME, REQ_PASSWORD, REQ_URL and REQ_METHOD environment variables | `false` | -| `sidecar.dashboards.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` | -| `sidecar.dashboards.extraMounts` | Additional dashboard sidecar volume mounts. | `[]` | -| `sidecar.datasources.enabled` | Enables the cluster wide search for datasources and adds/updates/deletes them in grafana |`false` | -| `sidecar.datasources.label` | Label that config maps with datasources should have to be added | `grafana_datasource` | -| `sidecar.datasources.labelValue` | Label value that config maps with datasources should have to be added | `""` | -| `sidecar.datasources.searchNamespace` | Namespaces list. If specified, the sidecar will search for datasources config-maps inside these namespaces. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces. | `nil` | -| `sidecar.datasources.watchMethod` | Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. | `WATCH` | -| `sidecar.datasources.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` | -| `sidecar.datasources.reloadURL` | Full url of datasource configuration reload API endpoint, to invoke after a config-map change | `"http://localhost:3000/api/admin/provisioning/datasources/reload"` | -| `sidecar.datasources.skipReload` | Enabling this omits defining the REQ_URL and REQ_METHOD environment variables | `false` | -| `sidecar.datasources.initDatasources` | Set to true to deploy the datasource sidecar as an initContainer in addition to a container. This is needed if skipReload is true, to load any datasources defined at startup time. | `false` | -| `sidecar.notifiers.enabled` | Enables the cluster wide search for notifiers and adds/updates/deletes them in grafana | `false` | -| `sidecar.notifiers.label` | Label that config maps with notifiers should have to be added | `grafana_notifier` | -| `sidecar.notifiers.labelValue` | Label value that config maps with notifiers should have to be added | `""` | -| `sidecar.notifiers.searchNamespace` | Namespaces list. If specified, the sidecar will search for notifiers config-maps (or secrets) inside these namespaces. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces. | `nil` | -| `sidecar.notifiers.watchMethod` | Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. | `WATCH` | -| `sidecar.notifiers.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` | -| `sidecar.notifiers.reloadURL` | Full url of notifier configuration reload API endpoint, to invoke after a config-map change | `"http://localhost:3000/api/admin/provisioning/notifications/reload"` | -| `sidecar.notifiers.skipReload` | Enabling this omits defining the REQ_URL and REQ_METHOD environment variables | `false` | -| `sidecar.notifiers.initNotifiers` | Set to true to deploy the notifier sidecar as an initContainer in addition to a container. This is needed if skipReload is true, to load any notifiers defined at startup time. | `false` | -| `smtp.existingSecret` | The name of an existing secret containing the SMTP credentials. | `""` | -| `smtp.userKey` | The key in the existing SMTP secret containing the username. | `"user"` | -| `smtp.passwordKey` | The key in the existing SMTP secret containing the password. | `"password"` | -| `admin.existingSecret` | The name of an existing secret containing the admin credentials (can be templated). | `""` | -| `admin.userKey` | The key in the existing admin secret containing the username. | `"admin-user"` | -| `admin.passwordKey` | The key in the existing admin secret containing the password. | `"admin-password"` | -| `serviceAccount.autoMount` | Automount the service account token in the pod| `true` | -| `serviceAccount.annotations` | ServiceAccount annotations | | -| `serviceAccount.create` | Create service account | `true` | -| `serviceAccount.labels` | ServiceAccount labels | `{}` | -| `serviceAccount.name` | Service account name to use, when empty will be set to created account if `serviceAccount.create` is set else to `default` | `` | -| `serviceAccount.nameTest` | Service account name to use for test, when empty will be set to created account if `serviceAccount.create` is set else to `default` | `nil` | -| `rbac.create` | Create and use RBAC resources | `true` | -| `rbac.namespaced` | Creates Role and Rolebinding instead of the default ClusterRole and ClusteRoleBindings for the grafana instance | `false` | -| `rbac.useExistingRole` | Set to a rolename to use existing role - skipping role creating - but still doing serviceaccount and rolebinding to the rolename set here. | `nil` | -| `rbac.pspEnabled` | Create PodSecurityPolicy (with `rbac.create`, grant roles permissions as well) | `false` | -| `rbac.pspUseAppArmor` | Enforce AppArmor in created PodSecurityPolicy (requires `rbac.pspEnabled`) | `false` | -| `rbac.extraRoleRules` | Additional rules to add to the Role | [] | -| `rbac.extraClusterRoleRules` | Additional rules to add to the ClusterRole | [] | -| `command` | Define command to be executed by grafana container at startup | `nil` | -| `args` | Define additional args if command is used | `nil` | -| `testFramework.enabled` | Whether to create test-related resources | `true` | -| `testFramework.image.registry` | `test-framework` image registry. | `docker.io` | -| `testFramework.image.repository` | `test-framework` image repository. | `bats/bats` | -| `testFramework.image.tag` | `test-framework` image tag. | `v1.4.1` | -| `testFramework.imagePullPolicy` | `test-framework` image pull policy. | `IfNotPresent` | -| `testFramework.securityContext` | `test-framework` securityContext | `{}` | -| `downloadDashboards.env` | Environment variables to be passed to the `download-dashboards` container | `{}` | -| `downloadDashboards.envFromSecret` | Name of a Kubernetes secret (must be manually created in the same namespace) containing values to be added to the environment. Can be templated | `""` | -| `downloadDashboards.resources` | Resources of `download-dashboards` container | `{}` | -| `downloadDashboardsImage.registry` | Curl docker image registry | `docker.io` | -| `downloadDashboardsImage.repository` | Curl docker image repository | `curlimages/curl` | -| `downloadDashboardsImage.tag` | Curl docker image tag | `7.73.0` | -| `downloadDashboardsImage.sha` | Curl docker image sha (optional) | `""` | -| `downloadDashboardsImage.pullPolicy` | Curl docker image pull policy | `IfNotPresent` | -| `namespaceOverride` | Override the deployment namespace | `""` (`Release.Namespace`) | -| `serviceMonitor.enabled` | Use servicemonitor from prometheus operator | `false` | -| `serviceMonitor.namespace` | Namespace this servicemonitor is installed in | | -| `serviceMonitor.interval` | How frequently Prometheus should scrape | `1m` | -| `serviceMonitor.path` | Path to scrape | `/metrics` | -| `serviceMonitor.scheme` | Scheme to use for metrics scraping | `http` | -| `serviceMonitor.tlsConfig` | TLS configuration block for the endpoint | `{}` | -| `serviceMonitor.labels` | Labels for the servicemonitor passed to Prometheus Operator | `{}` | -| `serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `30s` | -| `serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping. | `[]` | -| `serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion. | `[]` | -| `revisionHistoryLimit` | Number of old ReplicaSets to retain | `10` | -| `imageRenderer.enabled` | Enable the image-renderer deployment & service | `false` | -| `imageRenderer.image.registry` | image-renderer Image registry | `docker.io` | -| `imageRenderer.image.repository` | image-renderer Image repository | `grafana/grafana-image-renderer` | -| `imageRenderer.image.tag` | image-renderer Image tag | `latest` | -| `imageRenderer.image.sha` | image-renderer Image sha (optional) | `""` | -| `imageRenderer.image.pullPolicy` | image-renderer ImagePullPolicy | `Always` | -| `imageRenderer.env` | extra env-vars for image-renderer | `{}` | -| `imageRenderer.envValueFrom` | Environment variables for image-renderer from alternate sources. See the API docs on [EnvVarSource](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#envvarsource-v1-core) for format details. Can be templated | `{}` | -| `imageRenderer.serviceAccountName` | image-renderer deployment serviceAccountName | `""` | -| `imageRenderer.securityContext` | image-renderer deployment securityContext | `{}` | -| `imageRenderer.podAnnotations ` | image-renderer image-renderer pod annotation | `{}` | -| `imageRenderer.hostAliases` | image-renderer deployment Host Aliases | `[]` | -| `imageRenderer.priorityClassName` | image-renderer deployment priority class | `''` | -| `imageRenderer.service.enabled` | Enable the image-renderer service | `true` | -| `imageRenderer.service.portName` | image-renderer service port name | `http` | -| `imageRenderer.service.port` | image-renderer port used by deployment | `8081` | -| `imageRenderer.service.targetPort` | image-renderer service port used by service | `8081` | -| `imageRenderer.appProtocol` | Adds the appProtocol field to the service | `` | -| `imageRenderer.grafanaSubPath` | Grafana sub path to use for image renderer callback url | `''` | -| `imageRenderer.podPortName` | name of the image-renderer port on the pod | `http` | -| `imageRenderer.revisionHistoryLimit` | number of image-renderer replica sets to keep | `10` | -| `imageRenderer.networkPolicy.limitIngress` | Enable a NetworkPolicy to limit inbound traffic from only the created grafana pods | `true` | -| `imageRenderer.networkPolicy.limitEgress` | Enable a NetworkPolicy to limit outbound traffic to only the created grafana pods | `false` | -| `imageRenderer.resources` | Set resource limits for image-renderer pods | `{}` | -| `imageRenderer.nodeSelector` | Node labels for pod assignment | `{}` | -| `imageRenderer.tolerations` | Toleration labels for pod assignment | `[]` | -| `imageRenderer.affinity` | Affinity settings for pod assignment | `{}` | -| `networkPolicy.enabled` | Enable creation of NetworkPolicy resources. | `false` | -| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | -| `networkPolicy.explicitNamespacesSelector` | A Kubernetes LabelSelector to explicitly select namespaces from which traffic could be allowed | `{}` | -| `networkPolicy.ingress` | Enable the creation of an ingress network policy | `true` | -| `networkPolicy.egress.enabled` | Enable the creation of an egress network policy | `false` | -| `networkPolicy.egress.ports` | An array of ports to allow for the egress | `[]` | -| `enableKubeBackwardCompatibility` | Enable backward compatibility of kubernetes where pod's defintion version below 1.13 doesn't have the enableServiceLinks option | `false` | - -### Example ingress with path - -With grafana 6.3 and above - -```yaml -grafana.ini: - server: - domain: monitoring.example.com - root_url: "%(protocol)s://%(domain)s/grafana" - serve_from_sub_path: true -ingress: - enabled: true - hosts: - - "monitoring.example.com" - path: "/grafana" -``` - -### Example of extraVolumeMounts - -Volume can be type persistentVolumeClaim or hostPath but not both at same time. -If neither existingClaim or hostPath argument is given then type is emptyDir. - -```yaml -- extraVolumeMounts: - - name: plugins - mountPath: /var/lib/grafana/plugins - subPath: configs/grafana/plugins - existingClaim: existing-grafana-claim - readOnly: false - - name: dashboards - mountPath: /var/lib/grafana/dashboards - hostPath: /usr/shared/grafana/dashboards - readOnly: false -``` - -## Import dashboards - -There are a few methods to import dashboards to Grafana. Below are some examples and explanations as to how to use each method: - -```yaml -dashboards: - default: - some-dashboard: - json: | - { - "annotations": - - ... - # Complete json file here - ... - - "title": "Some Dashboard", - "uid": "abcd1234", - "version": 1 - } - custom-dashboard: - # This is a path to a file inside the dashboards directory inside the chart directory - file: dashboards/custom-dashboard.json - prometheus-stats: - # Ref: https://grafana.com/dashboards/2 - gnetId: 2 - revision: 2 - datasource: Prometheus - loki-dashboard-quick-search: - gnetId: 12019 - revision: 2 - datasource: - - name: DS_PROMETHEUS - value: Prometheus - - name: DS_LOKI - value: Loki - local-dashboard: - url: https://raw.githubusercontent.com/user/repository/master/dashboards/dashboard.json -``` - -## BASE64 dashboards - -Dashboards could be stored on a server that does not return JSON directly and instead of it returns a Base64 encoded file (e.g. Gerrit) -A new parameter has been added to the url use case so if you specify a b64content value equals to true after the url entry a Base64 decoding is applied before save the file to disk. -If this entry is not set or is equals to false not decoding is applied to the file before saving it to disk. - -### Gerrit use case - -Gerrit API for download files has the following schema: where {project-name} and -{file-id} usually has '/' in their values and so they MUST be replaced by %2F so if project-name is user/repo, branch-id is master and file-id is equals to dir1/dir2/dashboard -the url value is - -## Sidecar for dashboards - -If the parameter `sidecar.dashboards.enabled` is set, a sidecar container is deployed in the grafana -pod. This container watches all configmaps (or secrets) in the cluster and filters out the ones with -a label as defined in `sidecar.dashboards.label`. The files defined in those configmaps are written -to a folder and accessed by grafana. Changes to the configmaps are monitored and the imported -dashboards are deleted/updated. - -A recommendation is to use one configmap per dashboard, as a reduction of multiple dashboards inside -one configmap is currently not properly mirrored in grafana. - -Example dashboard config: - -```yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: sample-grafana-dashboard - labels: - grafana_dashboard: "1" -data: - k8s-dashboard.json: |- - [...] -``` - -## Sidecar for datasources - -If the parameter `sidecar.datasources.enabled` is set, an init container is deployed in the grafana -pod. This container lists all secrets (or configmaps, though not recommended) in the cluster and -filters out the ones with a label as defined in `sidecar.datasources.label`. The files defined in -those secrets are written to a folder and accessed by grafana on startup. Using these yaml files, -the data sources in grafana can be imported. - -Should you aim for reloading datasources in Grafana each time the config is changed, set `sidecar.datasources.skipReload: false` and adjust `sidecar.datasources.reloadURL` to `http://..svc.cluster.local/api/admin/provisioning/datasources/reload`. - -Secrets are recommended over configmaps for this usecase because datasources usually contain private -data like usernames and passwords. Secrets are the more appropriate cluster resource to manage those. - -Example values to add a postgres datasource as a kubernetes secret: -```yaml -apiVersion: v1 -kind: Secret -metadata: - name: grafana-datasources - labels: - grafana_datasource: 'true' # default value for: sidecar.datasources.label -stringData: - pg-db.yaml: |- - apiVersion: 1 - datasources: - - name: My pg db datasource - type: postgres - url: my-postgresql-db:5432 - user: db-readonly-user - secureJsonData: - password: 'SUperSEcretPa$$word' - jsonData: - database: my_datase - sslmode: 'disable' # disable/require/verify-ca/verify-full - maxOpenConns: 0 # Grafana v5.4+ - maxIdleConns: 2 # Grafana v5.4+ - connMaxLifetime: 14400 # Grafana v5.4+ - postgresVersion: 1000 # 903=9.3, 904=9.4, 905=9.5, 906=9.6, 1000=10 - timescaledb: false - # allow users to edit datasources from the UI. - editable: false -``` - -Example values to add a datasource adapted from [Grafana](http://docs.grafana.org/administration/provisioning/#example-datasource-config-file): - -```yaml -datasources: - datasources.yaml: - apiVersion: 1 - datasources: - # name of the datasource. Required - - name: Graphite - # datasource type. Required - type: graphite - # access mode. proxy or direct (Server or Browser in the UI). Required - access: proxy - # org id. will default to orgId 1 if not specified - orgId: 1 - # url - url: http://localhost:8080 - # database password, if used - password: - # database user, if used - user: - # database name, if used - database: - # enable/disable basic auth - basicAuth: - # basic auth username - basicAuthUser: - # basic auth password - basicAuthPassword: - # enable/disable with credentials headers - withCredentials: - # mark as default datasource. Max one per org - isDefault: - # fields that will be converted to json and stored in json_data - jsonData: - graphiteVersion: "1.1" - tlsAuth: true - tlsAuthWithCACert: true - # json object of data that will be encrypted. - secureJsonData: - tlsCACert: "..." - tlsClientCert: "..." - tlsClientKey: "..." - version: 1 - # allow users to edit datasources from the UI. - editable: false -``` - -## Sidecar for notifiers - -If the parameter `sidecar.notifiers.enabled` is set, an init container is deployed in the grafana -pod. This container lists all secrets (or configmaps, though not recommended) in the cluster and -filters out the ones with a label as defined in `sidecar.notifiers.label`. The files defined in -those secrets are written to a folder and accessed by grafana on startup. Using these yaml files, -the notification channels in grafana can be imported. The secrets must be created before -`helm install` so that the notifiers init container can list the secrets. - -Secrets are recommended over configmaps for this usecase because alert notification channels usually contain -private data like SMTP usernames and passwords. Secrets are the more appropriate cluster resource to manage those. - -Example datasource config adapted from [Grafana](https://grafana.com/docs/grafana/latest/administration/provisioning/#alert-notification-channels): - -```yaml -notifiers: - - name: notification-channel-1 - type: slack - uid: notifier1 - # either - org_id: 2 - # or - org_name: Main Org. - is_default: true - send_reminder: true - frequency: 1h - disable_resolve_message: false - # See `Supported Settings` section for settings supporter for each - # alert notification type. - settings: - recipient: 'XXX' - token: 'xoxb' - uploadImage: true - url: https://slack.com - -delete_notifiers: - - name: notification-channel-1 - uid: notifier1 - org_id: 2 - - name: notification-channel-2 - # default org_id: 1 -``` - -## Sidecar for alerting resources - -If the parameter `sidecar.alerts.enabled` is set, a sidecar container is deployed in the grafana -pod. This container watches all configmaps (or secrets) in the cluster (namespace defined by `sidecar.alerts.searchNamespace`) and filters out the ones with -a label as defined in `sidecar.alerts.label` (default is `grafana_alert`). The files defined in those configmaps are written -to a folder and accessed by grafana. Changes to the configmaps are monitored and the imported alerting resources are updated, however, deletions are a little more complicated (see below). - -This sidecar can be used to provision alert rules, contact points, notification policies, notification templates and mute timings as shown in [Grafana Documentation](https://grafana.com/docs/grafana/next/alerting/set-up/provision-alerting-resources/file-provisioning/). - -To fetch the alert config which will be provisioned, use the alert provisioning API ([Grafana Documentation](https://grafana.com/docs/grafana/next/developers/http_api/alerting_provisioning/)). -You can use either JSON or YAML format. - -Example config for an alert rule: - -```yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: sample-grafana-alert - labels: - grafana_alert: "1" -data: - k8s-alert.yml: |- - apiVersion: 1 - groups: - - orgId: 1 - name: k8s-alert - [...] -``` - -To delete provisioned alert rules is a two step process, you need to delete the configmap which defined the alert rule -and then create a configuration which deletes the alert rule. - -Example deletion configuration: -```yaml -apiVersion: v1 -kind: ConfigMap -metadata: - name: delete-sample-grafana-alert - namespace: monitoring - labels: - grafana_alert: "1" -data: - delete-k8s-alert.yml: |- - apiVersion: 1 - deleteRules: - - orgId: 1 - uid: 16624780-6564-45dc-825c-8bded4ad92d3 -``` - -## Statically provision alerting resources -If you don't need to change alerting resources (alert rules, contact points, notification policies and notification templates) regularly you could use the `alerting` config option instead of the sidecar option above. -This will grab the alerting config and apply it statically at build time for the helm file. - -There are two methods to statically provision alerting configuration in Grafana. Below are some examples and explanations as to how to use each method: - -```yaml -alerting: - team1-alert-rules.yaml: - file: alerting/team1/rules.yaml - team2-alert-rules.yaml: - file: alerting/team2/rules.yaml - team3-alert-rules.yaml: - file: alerting/team3/rules.yaml - notification-policies.yaml: - file: alerting/shared/notification-policies.yaml - notification-templates.yaml: - file: alerting/shared/notification-templates.yaml - contactpoints.yaml: - apiVersion: 1 - contactPoints: - - orgId: 1 - name: Slack channel - receivers: - - uid: default-receiver - type: slack - settings: - # Webhook URL to be filled in - url: "" - # We need to escape double curly braces for the tpl function. - text: '{{ `{{ template "default.message" . }}` }}' - title: '{{ `{{ template "default.title" . }}` }}' -``` - -The two possibilities for static alerting resource provisioning are: - -* Inlining the file contents as shown for contact points in the above example. -* Importing a file using a relative path starting from the chart root directory as shown for the alert rules in the above example. - -### Important notes on file provisioning - -* The format of the files is defined in the [Grafana documentation](https://grafana.com/docs/grafana/next/alerting/set-up/provision-alerting-resources/file-provisioning/) on file provisioning. -* The chart supports importing YAML and JSON files. -* The filename must be unique, otherwise one volume mount will overwrite the other. -* In case of inlining, double curly braces that arise from the Grafana configuration format and are not intended as templates for the chart must be escaped. -* The number of total files under `alerting:` is not limited. Each file will end up as a volume mount in the corresponding provisioning folder of the deployed Grafana instance. -* The file size for each import is limited by what the function `.Files.Get` can handle, which suffices for most cases. - -## How to serve Grafana with a path prefix (/grafana) - -In order to serve Grafana with a prefix (e.g., ), add the following to your values.yaml. - -```yaml -ingress: - enabled: true - annotations: - kubernetes.io/ingress.class: "nginx" - nginx.ingress.kubernetes.io/rewrite-target: /$1 - nginx.ingress.kubernetes.io/use-regex: "true" - - path: /grafana/?(.*) - hosts: - - k8s.example.dev - -grafana.ini: - server: - root_url: http://localhost:3000/grafana # this host can be localhost -``` - -## How to securely reference secrets in grafana.ini - -This example uses Grafana [file providers](https://grafana.com/docs/grafana/latest/administration/configuration/#file-provider) for secret values and the `extraSecretMounts` configuration flag (Additional grafana server secret mounts) to mount the secrets. - -In grafana.ini: - -```yaml -grafana.ini: - [auth.generic_oauth] - enabled = true - client_id = $__file{/etc/secrets/auth_generic_oauth/client_id} - client_secret = $__file{/etc/secrets/auth_generic_oauth/client_secret} -``` - -Existing secret, or created along with helm: - -```yaml ---- -apiVersion: v1 -kind: Secret -metadata: - name: auth-generic-oauth-secret -type: Opaque -stringData: - client_id: - client_secret: -``` - -Include in the `extraSecretMounts` configuration flag: - -```yaml -- extraSecretMounts: - - name: auth-generic-oauth-secret-mount - secretName: auth-generic-oauth-secret - defaultMode: 0440 - mountPath: /etc/secrets/auth_generic_oauth - readOnly: true -``` - -### extraSecretMounts using a Container Storage Interface (CSI) provider - -This example uses a CSI driver e.g. retrieving secrets using [Azure Key Vault Provider](https://github.com/Azure/secrets-store-csi-driver-provider-azure) - -```yaml -- extraSecretMounts: - - name: secrets-store-inline - mountPath: /run/secrets - readOnly: true - csi: - driver: secrets-store.csi.k8s.io - readOnly: true - volumeAttributes: - secretProviderClass: "my-provider" - nodePublishSecretRef: - name: akv-creds -``` - -## Image Renderer Plug-In - -This chart supports enabling [remote image rendering](https://github.com/grafana/grafana-image-renderer/blob/master/README.md#run-in-docker) - -```yaml -imageRenderer: - enabled: true -``` - -### Image Renderer NetworkPolicy - -By default the image-renderer pods will have a network policy which only allows ingress traffic from the created grafana instance - -### High Availability for unified alerting - -If you want to run Grafana in a high availability cluster you need to enable -the headless service by setting `headlessService: true` in your `values.yaml` -file. - -As next step you have to setup the `grafana.ini` in your `values.yaml` in a way -that it will make use of the headless service to obtain all the IPs of the -cluster. You should replace ``{{ Name }}`` with the name of your helm deployment. - -```yaml -grafana.ini: - ... - unified_alerting: - enabled: true - ha_peers: {{ Name }}-headless:9094 - ha_listen_address: ${POD_IP}:9094 - ha_advertise_address: ${POD_IP}:9094 - - alerting: - enabled: false -``` diff --git a/operations/deployment/helm/grafana/bitops.config.yaml b/operations/deployment/helm/grafana/bitops.config.yaml new file mode 100644 index 0000000..4dd878a --- /dev/null +++ b/operations/deployment/helm/grafana/bitops.config.yaml @@ -0,0 +1,16 @@ +helm: + cli: + namespace: grafana + timeout: 120s + debug: true + atomic: true + force: false + dry-run: false + options: + release-name: grafana + skip-deploy: false + kubeconfig: + fetch: + enabled: true + cluster-name: recruiting-recruiting-recruiting-ekscluster + diff --git a/operations/deployment/helm/grafana/ci/default-values.yaml b/operations/deployment/helm/grafana/ci/default-values.yaml deleted file mode 100644 index fc2ba60..0000000 --- a/operations/deployment/helm/grafana/ci/default-values.yaml +++ /dev/null @@ -1 +0,0 @@ -# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml. diff --git a/operations/deployment/helm/grafana/ci/with-affinity-values.yaml b/operations/deployment/helm/grafana/ci/with-affinity-values.yaml deleted file mode 100644 index f5b9b53..0000000 --- a/operations/deployment/helm/grafana/ci/with-affinity-values.yaml +++ /dev/null @@ -1,16 +0,0 @@ -affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - podAffinityTerm: - labelSelector: - matchLabels: - app.kubernetes.io/instance: grafana-test - app.kubernetes.io/name: grafana - topologyKey: failure-domain.beta.kubernetes.io/zone - weight: 100 - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - app.kubernetes.io/instance: grafana-test - app.kubernetes.io/name: grafana - topologyKey: kubernetes.io/hostname diff --git a/operations/deployment/helm/grafana/ci/with-dashboard-json-values.yaml b/operations/deployment/helm/grafana/ci/with-dashboard-json-values.yaml deleted file mode 100644 index e0c4e41..0000000 --- a/operations/deployment/helm/grafana/ci/with-dashboard-json-values.yaml +++ /dev/null @@ -1,53 +0,0 @@ -dashboards: - my-provider: - my-awesome-dashboard: - # An empty but valid dashboard - json: | - { - "__inputs": [], - "__requires": [ - { - "type": "grafana", - "id": "grafana", - "name": "Grafana", - "version": "6.3.5" - } - ], - "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": "-- Grafana --", - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - } - ] - }, - "editable": true, - "gnetId": null, - "graphTooltip": 0, - "id": null, - "links": [], - "panels": [], - "schemaVersion": 19, - "style": "dark", - "tags": [], - "templating": { - "list": [] - }, - "time": { - "from": "now-6h", - "to": "now" - }, - "timepicker": { - "refresh_intervals": ["5s"] - }, - "timezone": "", - "title": "Dummy Dashboard", - "uid": "IdcYQooWk", - "version": 1 - } - datasource: Prometheus diff --git a/operations/deployment/helm/grafana/ci/with-dashboard-values.yaml b/operations/deployment/helm/grafana/ci/with-dashboard-values.yaml deleted file mode 100644 index 7b662c5..0000000 --- a/operations/deployment/helm/grafana/ci/with-dashboard-values.yaml +++ /dev/null @@ -1,19 +0,0 @@ -dashboards: - my-provider: - my-awesome-dashboard: - gnetId: 10000 - revision: 1 - datasource: Prometheus -dashboardProviders: - dashboardproviders.yaml: - apiVersion: 1 - providers: - - name: 'my-provider' - orgId: 1 - folder: '' - type: file - updateIntervalSeconds: 10 - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards/my-provider diff --git a/operations/deployment/helm/grafana/ci/with-extraconfigmapmounts-values.yaml b/operations/deployment/helm/grafana/ci/with-extraconfigmapmounts-values.yaml deleted file mode 100644 index 5cc44a0..0000000 --- a/operations/deployment/helm/grafana/ci/with-extraconfigmapmounts-values.yaml +++ /dev/null @@ -1,7 +0,0 @@ -extraConfigmapMounts: - - name: '{{ include "grafana.fullname" . }}' - configMap: '{{ include "grafana.fullname" . }}' - mountPath: /var/lib/grafana/dashboards/test-dashboard.json - # This is not a realistic test, but for this we only care about extraConfigmapMounts not being empty and pointing to an existing ConfigMap - subPath: grafana.ini - readOnly: true diff --git a/operations/deployment/helm/grafana/ci/with-image-renderer-values.yaml b/operations/deployment/helm/grafana/ci/with-image-renderer-values.yaml deleted file mode 100644 index 32f3074..0000000 --- a/operations/deployment/helm/grafana/ci/with-image-renderer-values.yaml +++ /dev/null @@ -1,19 +0,0 @@ -podLabels: - customLableA: Aaaaa -imageRenderer: - enabled: true - env: - RENDERING_ARGS: --disable-gpu,--window-size=1280x758 - RENDERING_MODE: clustered - podLabels: - customLableB: Bbbbb - networkPolicy: - limitIngress: true - limitEgress: true - resources: - limits: - cpu: 1000m - memory: 1000Mi - requests: - cpu: 500m - memory: 50Mi diff --git a/operations/deployment/helm/grafana/ci/with-persistence.yaml b/operations/deployment/helm/grafana/ci/with-persistence.yaml deleted file mode 100644 index b92ca02..0000000 --- a/operations/deployment/helm/grafana/ci/with-persistence.yaml +++ /dev/null @@ -1,3 +0,0 @@ -persistence: - type: pvc - enabled: true diff --git a/operations/deployment/helm/grafana/dashboards/custom-dashboard.json b/operations/deployment/helm/grafana/dashboards/custom-dashboard.json deleted file mode 100644 index 9e26dfe..0000000 --- a/operations/deployment/helm/grafana/dashboards/custom-dashboard.json +++ /dev/null @@ -1 +0,0 @@ -{} \ No newline at end of file diff --git a/operations/deployment/helm/grafana/templates/NOTES.txt b/operations/deployment/helm/grafana/templates/NOTES.txt deleted file mode 100644 index d86419f..0000000 --- a/operations/deployment/helm/grafana/templates/NOTES.txt +++ /dev/null @@ -1,55 +0,0 @@ -1. Get your '{{ .Values.adminUser }}' user password by running: - - kubectl get secret --namespace {{ include "grafana.namespace" . }} {{ .Values.admin.existingSecret | default (include "grafana.fullname" .) }} -o jsonpath="{.data.{{ .Values.admin.passwordKey | default "admin-password" }}}" | base64 --decode ; echo - - -2. The Grafana server can be accessed via port {{ .Values.service.port }} on the following DNS name from within your cluster: - - {{ include "grafana.fullname" . }}.{{ include "grafana.namespace" . }}.svc.cluster.local -{{ if .Values.ingress.enabled }} - If you bind grafana to 80, please update values in values.yaml and reinstall: - ``` - securityContext: - runAsUser: 0 - runAsGroup: 0 - fsGroup: 0 - - command: - - "setcap" - - "'cap_net_bind_service=+ep'" - - "/usr/sbin/grafana-server &&" - - "sh" - - "/run.sh" - ``` - Details refer to https://grafana.com/docs/installation/configuration/#http-port. - Or grafana would always crash. - - From outside the cluster, the server URL(s) are: - {{- range .Values.ingress.hosts }} - http://{{ . }} - {{- end }} -{{- else }} - Get the Grafana URL to visit by running these commands in the same shell: - {{- if contains "NodePort" .Values.service.type }} - export NODE_PORT=$(kubectl get --namespace {{ include "grafana.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "grafana.fullname" . }}) - export NODE_IP=$(kubectl get nodes --namespace {{ include "grafana.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}") - echo http://$NODE_IP:$NODE_PORT - {{- else if contains "LoadBalancer" .Values.service.type }} - NOTE: It may take a few minutes for the LoadBalancer IP to be available. - You can watch the status of by running 'kubectl get svc --namespace {{ include "grafana.namespace" . }} -w {{ include "grafana.fullname" . }}' - export SERVICE_IP=$(kubectl get svc --namespace {{ include "grafana.namespace" . }} {{ include "grafana.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') - http://$SERVICE_IP:{{ .Values.service.port -}} - {{- else if contains "ClusterIP" .Values.service.type }} - export POD_NAME=$(kubectl get pods --namespace {{ include "grafana.namespace" . }} -l "app.kubernetes.io/name={{ include "grafana.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") - kubectl --namespace {{ include "grafana.namespace" . }} port-forward $POD_NAME 3000 - {{- end }} -{{- end }} - -3. Login with the password from step 1 and the username: {{ .Values.adminUser }} - -{{- if not .Values.persistence.enabled }} -################################################################################# -###### WARNING: Persistence is disabled!!! You will lose your data when ##### -###### the Grafana pod is terminated. ##### -################################################################################# -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/_helpers.tpl b/operations/deployment/helm/grafana/templates/_helpers.tpl deleted file mode 100644 index ead2449..0000000 --- a/operations/deployment/helm/grafana/templates/_helpers.tpl +++ /dev/null @@ -1,227 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Expand the name of the chart. -*/}} -{{- define "grafana.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "grafana.fullname" -}} -{{- if .Values.fullnameOverride }} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- $name := default .Chart.Name .Values.nameOverride }} -{{- if contains $name .Release.Name }} -{{- .Release.Name | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} -{{- end }} -{{- end }} -{{- end }} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "grafana.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Create the name of the service account -*/}} -{{- define "grafana.serviceAccountName" -}} -{{- if .Values.serviceAccount.create }} -{{- default (include "grafana.fullname" .) .Values.serviceAccount.name }} -{{- else }} -{{- default "default" .Values.serviceAccount.name }} -{{- end }} -{{- end }} - -{{- define "grafana.serviceAccountNameTest" -}} -{{- if .Values.serviceAccount.create }} -{{- default (print (include "grafana.fullname" .) "-test") .Values.serviceAccount.nameTest }} -{{- else }} -{{- default "default" .Values.serviceAccount.nameTest }} -{{- end }} -{{- end }} - -{{/* -Allow the release namespace to be overridden for multi-namespace deployments in combined charts -*/}} -{{- define "grafana.namespace" -}} -{{- if .Values.namespaceOverride }} -{{- .Values.namespaceOverride }} -{{- else }} -{{- .Release.Namespace }} -{{- end }} -{{- end }} - -{{/* -Common labels -*/}} -{{- define "grafana.labels" -}} -helm.sh/chart: {{ include "grafana.chart" . }} -{{ include "grafana.selectorLabels" . }} -{{- if or .Chart.AppVersion .Values.image.tag }} -app.kubernetes.io/version: {{ mustRegexReplaceAllLiteral "@sha.*" .Values.image.tag "" | default .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- with .Values.extraLabels }} -{{ toYaml . }} -{{- end }} -{{- end }} - -{{/* -Selector labels -*/}} -{{- define "grafana.selectorLabels" -}} -app.kubernetes.io/name: {{ include "grafana.name" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -{{- end }} - -{{/* -Common labels -*/}} -{{- define "grafana.imageRenderer.labels" -}} -helm.sh/chart: {{ include "grafana.chart" . }} -{{ include "grafana.imageRenderer.selectorLabels" . }} -{{- if or .Chart.AppVersion .Values.image.tag }} -app.kubernetes.io/version: {{ mustRegexReplaceAllLiteral "@sha.*" .Values.image.tag "" | default .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end }} - -{{/* -Selector labels ImageRenderer -*/}} -{{- define "grafana.imageRenderer.selectorLabels" -}} -app.kubernetes.io/name: {{ include "grafana.name" . }}-image-renderer -app.kubernetes.io/instance: {{ .Release.Name }} -{{- end }} - -{{/* -Looks if there's an existing secret and reuse its password. If not it generates -new password and use it. -*/}} -{{- define "grafana.password" -}} -{{- $secret := (lookup "v1" "Secret" (include "grafana.namespace" .) (include "grafana.fullname" .) ) }} -{{- if $secret }} -{{- index $secret "data" "admin-password" }} -{{- else }} -{{- (randAlphaNum 40) | b64enc | quote }} -{{- end }} -{{- end }} - -{{/* -Return the appropriate apiVersion for rbac. -*/}} -{{- define "grafana.rbac.apiVersion" -}} -{{- if $.Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1" }} -{{- print "rbac.authorization.k8s.io/v1" }} -{{- else }} -{{- print "rbac.authorization.k8s.io/v1beta1" }} -{{- end }} -{{- end }} - -{{/* -Return the appropriate apiVersion for ingress. -*/}} -{{- define "grafana.ingress.apiVersion" -}} -{{- if and ($.Capabilities.APIVersions.Has "networking.k8s.io/v1") (semverCompare ">= 1.19-0" .Capabilities.KubeVersion.Version) }} -{{- print "networking.k8s.io/v1" }} -{{- else if $.Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" }} -{{- print "networking.k8s.io/v1beta1" }} -{{- else }} -{{- print "extensions/v1beta1" }} -{{- end }} -{{- end }} - -{{/* -Return the appropriate apiVersion for Horizontal Pod Autoscaler. -*/}} -{{- define "grafana.hpa.apiVersion" -}} -{{- if $.Capabilities.APIVersions.Has "autoscaling/v2/HorizontalPodAutoscaler" }} -{{- print "autoscaling/v2" }} -{{- else if $.Capabilities.APIVersions.Has "autoscaling/v2beta2/HorizontalPodAutoscaler" }} -{{- print "autoscaling/v2beta2" }} -{{- else }} -{{- print "autoscaling/v2beta1" }} -{{- end }} -{{- end }} - -{{/* -Return the appropriate apiVersion for podDisruptionBudget. -*/}} -{{- define "grafana.podDisruptionBudget.apiVersion" -}} -{{- if $.Values.podDisruptionBudget.apiVersion }} -{{- print $.Values.podDisruptionBudget.apiVersion }} -{{- else if $.Capabilities.APIVersions.Has "policy/v1/PodDisruptionBudget" }} -{{- print "policy/v1" }} -{{- else }} -{{- print "policy/v1beta1" }} -{{- end }} -{{- end }} - -{{/* -Return if ingress is stable. -*/}} -{{- define "grafana.ingress.isStable" -}} -{{- eq (include "grafana.ingress.apiVersion" .) "networking.k8s.io/v1" }} -{{- end }} - -{{/* -Return if ingress supports ingressClassName. -*/}} -{{- define "grafana.ingress.supportsIngressClassName" -}} -{{- or (eq (include "grafana.ingress.isStable" .) "true") (and (eq (include "grafana.ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18-0" .Capabilities.KubeVersion.Version)) }} -{{- end }} - -{{/* -Return if ingress supports pathType. -*/}} -{{- define "grafana.ingress.supportsPathType" -}} -{{- or (eq (include "grafana.ingress.isStable" .) "true") (and (eq (include "grafana.ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18-0" .Capabilities.KubeVersion.Version)) }} -{{- end }} - -{{/* -Formats imagePullSecrets. Input is (dict "root" . "imagePullSecrets" .{specific imagePullSecrets}) -*/}} -{{- define "grafana.imagePullSecrets" -}} -{{- $root := .root }} -{{- range (concat .root.Values.global.imagePullSecrets .imagePullSecrets) }} -{{- if eq (typeOf .) "map[string]interface {}" }} -- {{ toYaml (dict "name" (tpl .name $root)) | trim }} -{{- else }} -- name: {{ tpl . $root }} -{{- end }} -{{- end }} -{{- end }} - - -{{/* - Checks whether or not the configSecret secret has to be created - */}} -{{- define "grafana.shouldCreateConfigSecret" -}} -{{- $secretFound := false -}} -{{- range $key, $value := .Values.datasources }} - {{- if hasKey $value "secret" }} - {{- $secretFound = true}} - {{- end }} -{{- end }} -{{- range $key, $value := .Values.notifiers }} - {{- if hasKey $value "secret" }} - {{- $secretFound = true}} - {{- end }} -{{- end }} -{{- range $key, $value := .Values.alerting }} - {{- if (or (hasKey $value "secret") (hasKey $value "secretFile")) }} - {{- $secretFound = true}} - {{- end }} -{{- end }} -{{- $secretFound}} -{{- end -}} diff --git a/operations/deployment/helm/grafana/templates/_pod.tpl b/operations/deployment/helm/grafana/templates/_pod.tpl deleted file mode 100644 index f4b9fd3..0000000 --- a/operations/deployment/helm/grafana/templates/_pod.tpl +++ /dev/null @@ -1,1276 +0,0 @@ -{{- define "grafana.pod" -}} -{{- $sts := list "sts" "StatefulSet" "statefulset" -}} -{{- $root := . -}} -{{- with .Values.schedulerName }} -schedulerName: "{{ . }}" -{{- end }} -serviceAccountName: {{ include "grafana.serviceAccountName" . }} -automountServiceAccountToken: {{ .Values.serviceAccount.autoMount }} -{{- with .Values.securityContext }} -securityContext: - {{- toYaml . | nindent 2 }} -{{- end }} -{{- with .Values.hostAliases }} -hostAliases: - {{- toYaml . | nindent 2 }} -{{- end }} -{{- with .Values.priorityClassName }} -priorityClassName: {{ . }} -{{- end }} -{{- if ( or .Values.persistence.enabled .Values.dashboards .Values.extraInitContainers (and .Values.sidecar.alerts.enabled .Values.sidecar.alerts.initAlerts) (and .Values.sidecar.datasources.enabled .Values.sidecar.datasources.initDatasources) (and .Values.sidecar.notifiers.enabled .Values.sidecar.notifiers.initNotifiers)) }} -initContainers: -{{- end }} -{{- if ( and .Values.persistence.enabled .Values.initChownData.enabled ) }} - - name: init-chown-data - {{- $registry := .Values.global.imageRegistry | default .Values.initChownData.image.registry -}} - {{- if .Values.initChownData.image.sha }} - image: "{{ $registry }}/{{ .Values.initChownData.image.repository }}:{{ .Values.initChownData.image.tag }}@sha256:{{ .Values.initChownData.image.sha }}" - {{- else }} - image: "{{ $registry }}/{{ .Values.initChownData.image.repository }}:{{ .Values.initChownData.image.tag }}" - {{- end }} - imagePullPolicy: {{ .Values.initChownData.image.pullPolicy }} - {{- with .Values.initChownData.securityContext }} - securityContext: - {{- toYaml . | nindent 6 }} - {{- end }} - command: - - chown - - -R - - {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.runAsGroup }} - - /var/lib/grafana - {{- with .Values.initChownData.resources }} - resources: - {{- toYaml . | nindent 6 }} - {{- end }} - volumeMounts: - - name: storage - mountPath: "/var/lib/grafana" - {{- with .Values.persistence.subPath }} - subPath: {{ tpl . $root }} - {{- end }} -{{- end }} -{{- if .Values.dashboards }} - - name: download-dashboards - {{- $registry := .Values.global.imageRegistry | default .Values.downloadDashboardsImage.registry -}} - {{- if .Values.downloadDashboardsImage.sha }} - image: "{{ $registry }}/{{ .Values.downloadDashboardsImage.repository }}:{{ .Values.downloadDashboardsImage.tag }}@sha256:{{ .Values.downloadDashboardsImage.sha }}" - {{- else }} - image: "{{ $registry }}/{{ .Values.downloadDashboardsImage.repository }}:{{ .Values.downloadDashboardsImage.tag }}" - {{- end }} - imagePullPolicy: {{ .Values.downloadDashboardsImage.pullPolicy }} - command: ["/bin/sh"] - args: [ "-c", "mkdir -p /var/lib/grafana/dashboards/default && /bin/sh -x /etc/grafana/download_dashboards.sh" ] - {{- with .Values.downloadDashboards.resources }} - resources: - {{- toYaml . | nindent 6 }} - {{- end }} - env: - {{- range $key, $value := .Values.downloadDashboards.env }} - - name: "{{ $key }}" - value: "{{ $value }}" - {{- end }} - {{- range $key, $value := .Values.downloadDashboards.envValueFrom }} - - name: {{ $key | quote }} - valueFrom: - {{- tpl (toYaml $value) $ | nindent 10 }} - {{- end }} - {{- with .Values.downloadDashboards.securityContext }} - securityContext: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.downloadDashboards.envFromSecret }} - envFrom: - - secretRef: - name: {{ tpl . $root }} - {{- end }} - volumeMounts: - - name: config - mountPath: "/etc/grafana/download_dashboards.sh" - subPath: download_dashboards.sh - - name: storage - mountPath: "/var/lib/grafana" - {{- with .Values.persistence.subPath }} - subPath: {{ tpl . $root }} - {{- end }} - {{- range .Values.extraSecretMounts }} - - name: {{ .name }} - mountPath: {{ .mountPath }} - readOnly: {{ .readOnly }} - {{- end }} -{{- end }} -{{- if and .Values.sidecar.alerts.enabled .Values.sidecar.alerts.initAlerts }} - - name: {{ include "grafana.name" . }}-init-sc-alerts - {{- $registry := .Values.global.imageRegistry | default .Values.sidecar.image.registry -}} - {{- if .Values.sidecar.image.sha }} - image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" - {{- else }} - image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" - {{- end }} - imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} - env: - {{- range $key, $value := .Values.sidecar.alerts.env }} - - name: "{{ $key }}" - value: "{{ $value }}" - {{- end }} - {{- if .Values.sidecar.alerts.ignoreAlreadyProcessed }} - - name: IGNORE_ALREADY_PROCESSED - value: "true" - {{- end }} - - name: METHOD - value: "LIST" - - name: LABEL - value: "{{ .Values.sidecar.alerts.label }}" - {{- with .Values.sidecar.alerts.labelValue }} - - name: LABEL_VALUE - value: {{ quote . }} - {{- end }} - {{- if or .Values.sidecar.logLevel .Values.sidecar.alerts.logLevel }} - - name: LOG_LEVEL - value: {{ default .Values.sidecar.logLevel .Values.sidecar.alerts.logLevel }} - {{- end }} - - name: FOLDER - value: "/etc/grafana/provisioning/alerting" - - name: RESOURCE - value: {{ quote .Values.sidecar.alerts.resource }} - {{- with .Values.sidecar.enableUniqueFilenames }} - - name: UNIQUE_FILENAMES - value: "{{ . }}" - {{- end }} - {{- with .Values.sidecar.alerts.searchNamespace }} - - name: NAMESPACE - value: {{ . | join "," | quote }} - {{- end }} - {{- with .Values.sidecar.alerts.skipTlsVerify }} - - name: SKIP_TLS_VERIFY - value: {{ quote . }} - {{- end }} - {{- with .Values.sidecar.alerts.script }} - - name: SCRIPT - value: {{ quote . }} - {{- end }} - {{- with .Values.sidecar.livenessProbe }} - livenessProbe: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.sidecar.readinessProbe }} - readinessProbe: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.sidecar.resources }} - resources: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.sidecar.securityContext }} - securityContext: - {{- toYaml . | nindent 6 }} - {{- end }} - volumeMounts: - - name: sc-alerts-volume - mountPath: "/etc/grafana/provisioning/alerting" - {{- with .Values.sidecar.alerts.extraMounts }} - {{- toYaml . | trim | nindent 6 }} - {{- end }} -{{- end }} -{{- if and .Values.sidecar.datasources.enabled .Values.sidecar.datasources.initDatasources }} - - name: {{ include "grafana.name" . }}-init-sc-datasources - {{- $registry := .Values.global.imageRegistry | default .Values.sidecar.image.registry -}} - {{- if .Values.sidecar.image.sha }} - image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" - {{- else }} - image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" - {{- end }} - imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} - env: - {{- range $key, $value := .Values.sidecar.datasources.env }} - - name: "{{ $key }}" - value: "{{ $value }}" - {{- end }} - {{- if .Values.sidecar.datasources.ignoreAlreadyProcessed }} - - name: IGNORE_ALREADY_PROCESSED - value: "true" - {{- end }} - - name: METHOD - value: "LIST" - - name: LABEL - value: "{{ .Values.sidecar.datasources.label }}" - {{- with .Values.sidecar.datasources.labelValue }} - - name: LABEL_VALUE - value: {{ quote . }} - {{- end }} - {{- if or .Values.sidecar.logLevel .Values.sidecar.datasources.logLevel }} - - name: LOG_LEVEL - value: {{ default .Values.sidecar.logLevel .Values.sidecar.datasources.logLevel }} - {{- end }} - - name: FOLDER - value: "/etc/grafana/provisioning/datasources" - - name: RESOURCE - value: {{ quote .Values.sidecar.datasources.resource }} - {{- with .Values.sidecar.enableUniqueFilenames }} - - name: UNIQUE_FILENAMES - value: "{{ . }}" - {{- end }} - {{- if .Values.sidecar.datasources.searchNamespace }} - - name: NAMESPACE - value: "{{ tpl (.Values.sidecar.datasources.searchNamespace | join ",") . }}" - {{- end }} - {{- with .Values.sidecar.skipTlsVerify }} - - name: SKIP_TLS_VERIFY - value: "{{ . }}" - {{- end }} - {{- with .Values.sidecar.resources }} - resources: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.sidecar.securityContext }} - securityContext: - {{- toYaml . | nindent 6 }} - {{- end }} - volumeMounts: - - name: sc-datasources-volume - mountPath: "/etc/grafana/provisioning/datasources" -{{- end }} -{{- if and .Values.sidecar.notifiers.enabled .Values.sidecar.notifiers.initNotifiers }} - - name: {{ include "grafana.name" . }}-init-sc-notifiers - {{- $registry := .Values.global.imageRegistry | default .Values.sidecar.image.registry -}} - {{- if .Values.sidecar.image.sha }} - image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" - {{- else }} - image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" - {{- end }} - imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} - env: - {{- range $key, $value := .Values.sidecar.notifiers.env }} - - name: "{{ $key }}" - value: "{{ $value }}" - {{- end }} - {{- if .Values.sidecar.notifiers.ignoreAlreadyProcessed }} - - name: IGNORE_ALREADY_PROCESSED - value: "true" - {{- end }} - - name: METHOD - value: LIST - - name: LABEL - value: "{{ .Values.sidecar.notifiers.label }}" - {{- with .Values.sidecar.notifiers.labelValue }} - - name: LABEL_VALUE - value: {{ quote . }} - {{- end }} - {{- if or .Values.sidecar.logLevel .Values.sidecar.notifiers.logLevel }} - - name: LOG_LEVEL - value: {{ default .Values.sidecar.logLevel .Values.sidecar.notifiers.logLevel }} - {{- end }} - - name: FOLDER - value: "/etc/grafana/provisioning/notifiers" - - name: RESOURCE - value: {{ quote .Values.sidecar.notifiers.resource }} - {{- with .Values.sidecar.enableUniqueFilenames }} - - name: UNIQUE_FILENAMES - value: "{{ . }}" - {{- end }} - {{- with .Values.sidecar.notifiers.searchNamespace }} - - name: NAMESPACE - value: "{{ tpl (. | join ",") $root }}" - {{- end }} - {{- with .Values.sidecar.skipTlsVerify }} - - name: SKIP_TLS_VERIFY - value: "{{ . }}" - {{- end }} - {{- with .Values.sidecar.livenessProbe }} - livenessProbe: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.sidecar.readinessProbe }} - readinessProbe: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.sidecar.resources }} - resources: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.sidecar.securityContext }} - securityContext: - {{- toYaml . | nindent 6 }} - {{- end }} - volumeMounts: - - name: sc-notifiers-volume - mountPath: "/etc/grafana/provisioning/notifiers" -{{- end}} -{{- with .Values.extraInitContainers }} - {{- tpl (toYaml .) $root | nindent 2 }} -{{- end }} -{{- if or .Values.image.pullSecrets .Values.global.imagePullSecrets }} -imagePullSecrets: - {{- include "grafana.imagePullSecrets" (dict "root" $root "imagePullSecrets" .Values.image.pullSecrets) | nindent 2 }} -{{- end }} -{{- if not .Values.enableKubeBackwardCompatibility }} -enableServiceLinks: {{ .Values.enableServiceLinks }} -{{- end }} -containers: -{{- if and .Values.sidecar.alerts.enabled (not .Values.sidecar.alerts.initAlerts) }} - - name: {{ include "grafana.name" . }}-sc-alerts - {{- $registry := .Values.global.imageRegistry | default .Values.sidecar.image.registry -}} - {{- if .Values.sidecar.image.sha }} - image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" - {{- else }} - image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" - {{- end }} - imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} - env: - {{- range $key, $value := .Values.sidecar.alerts.env }} - - name: "{{ $key }}" - value: "{{ $value }}" - {{- end }} - {{- if .Values.sidecar.alerts.ignoreAlreadyProcessed }} - - name: IGNORE_ALREADY_PROCESSED - value: "true" - {{- end }} - - name: METHOD - value: {{ .Values.sidecar.alerts.watchMethod }} - - name: LABEL - value: "{{ .Values.sidecar.alerts.label }}" - {{- with .Values.sidecar.alerts.labelValue }} - - name: LABEL_VALUE - value: {{ quote . }} - {{- end }} - {{- if or .Values.sidecar.logLevel .Values.sidecar.alerts.logLevel }} - - name: LOG_LEVEL - value: {{ default .Values.sidecar.logLevel .Values.sidecar.alerts.logLevel }} - {{- end }} - - name: FOLDER - value: "/etc/grafana/provisioning/alerting" - - name: RESOURCE - value: {{ quote .Values.sidecar.alerts.resource }} - {{- with .Values.sidecar.enableUniqueFilenames }} - - name: UNIQUE_FILENAMES - value: "{{ . }}" - {{- end }} - {{- with .Values.sidecar.alerts.searchNamespace }} - - name: NAMESPACE - value: {{ . | join "," | quote }} - {{- end }} - {{- with .Values.sidecar.alerts.skipTlsVerify }} - - name: SKIP_TLS_VERIFY - value: {{ quote . }} - {{- end }} - {{- with .Values.sidecar.alerts.script }} - - name: SCRIPT - value: {{ quote . }} - {{- end }} - {{- if and (not .Values.env.GF_SECURITY_ADMIN_USER) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} - - name: REQ_USERNAME - valueFrom: - secretKeyRef: - name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} - key: {{ .Values.admin.userKey | default "admin-user" }} - {{- end }} - {{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} - - name: REQ_PASSWORD - valueFrom: - secretKeyRef: - name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} - key: {{ .Values.admin.passwordKey | default "admin-password" }} - {{- end }} - {{- if not .Values.sidecar.alerts.skipReload }} - - name: REQ_URL - value: {{ .Values.sidecar.alerts.reloadURL }} - - name: REQ_METHOD - value: POST - {{- end }} - {{- if .Values.sidecar.alerts.watchServerTimeout }} - {{- if ne .Values.sidecar.alerts.watchMethod "WATCH" }} - {{- fail (printf "Cannot use .Values.sidecar.alerts.watchServerTimeout with .Values.sidecar.alerts.watchMethod %s" .Values.sidecar.alerts.watchMethod) }} - {{- end }} - - name: WATCH_SERVER_TIMEOUT - value: "{{ .Values.sidecar.alerts.watchServerTimeout }}" - {{- end }} - {{- if .Values.sidecar.alerts.watchClientTimeout }} - {{- if ne .Values.sidecar.alerts.watchMethod "WATCH" }} - {{- fail (printf "Cannot use .Values.sidecar.alerts.watchClientTimeout with .Values.sidecar.alerts.watchMethod %s" .Values.sidecar.alerts.watchMethod) }} - {{- end }} - - name: WATCH_CLIENT_TIMEOUT - value: "{{ .Values.sidecar.alerts.watchClientTimeout }}" - {{- end }} - {{- with .Values.sidecar.livenessProbe }} - livenessProbe: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.sidecar.readinessProbe }} - readinessProbe: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.sidecar.resources }} - resources: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.sidecar.securityContext }} - securityContext: - {{- toYaml . | nindent 6 }} - {{- end }} - volumeMounts: - - name: sc-alerts-volume - mountPath: "/etc/grafana/provisioning/alerting" - {{- with .Values.sidecar.alerts.extraMounts }} - {{- toYaml . | trim | nindent 6 }} - {{- end }} -{{- end}} -{{- if .Values.sidecar.dashboards.enabled }} - - name: {{ include "grafana.name" . }}-sc-dashboard - {{- $registry := .Values.global.imageRegistry | default .Values.sidecar.image.registry -}} - {{- if .Values.sidecar.image.sha }} - image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" - {{- else }} - image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" - {{- end }} - imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} - env: - {{- range $key, $value := .Values.sidecar.dashboards.env }} - - name: "{{ $key }}" - value: "{{ $value }}" - {{- end }} - {{- if .Values.sidecar.dashboards.ignoreAlreadyProcessed }} - - name: IGNORE_ALREADY_PROCESSED - value: "true" - {{- end }} - - name: METHOD - value: {{ .Values.sidecar.dashboards.watchMethod }} - - name: LABEL - value: "{{ .Values.sidecar.dashboards.label }}" - {{- with .Values.sidecar.dashboards.labelValue }} - - name: LABEL_VALUE - value: {{ quote . }} - {{- end }} - {{- if or .Values.sidecar.logLevel .Values.sidecar.dashboards.logLevel }} - - name: LOG_LEVEL - value: {{ default .Values.sidecar.logLevel .Values.sidecar.dashboards.logLevel }} - {{- end }} - - name: FOLDER - value: "{{ .Values.sidecar.dashboards.folder }}{{- with .Values.sidecar.dashboards.defaultFolderName }}/{{ . }}{{- end }}" - - name: RESOURCE - value: {{ quote .Values.sidecar.dashboards.resource }} - {{- with .Values.sidecar.enableUniqueFilenames }} - - name: UNIQUE_FILENAMES - value: "{{ . }}" - {{- end }} - {{- with .Values.sidecar.dashboards.searchNamespace }} - - name: NAMESPACE - value: "{{ tpl (. | join ",") $root }}" - {{- end }} - {{- with .Values.sidecar.skipTlsVerify }} - - name: SKIP_TLS_VERIFY - value: "{{ . }}" - {{- end }} - {{- with .Values.sidecar.dashboards.folderAnnotation }} - - name: FOLDER_ANNOTATION - value: "{{ . }}" - {{- end }} - {{- with .Values.sidecar.dashboards.script }} - - name: SCRIPT - value: "{{ . }}" - {{- end }} - {{- if not .Values.sidecar.dashboards.skipReload }} - {{- if and (not .Values.env.GF_SECURITY_ADMIN_USER) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} - - name: REQ_USERNAME - valueFrom: - secretKeyRef: - name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} - key: {{ .Values.admin.userKey | default "admin-user" }} - {{- end }} - {{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} - - name: REQ_PASSWORD - valueFrom: - secretKeyRef: - name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} - key: {{ .Values.admin.passwordKey | default "admin-password" }} - {{- end }} - - name: REQ_URL - value: {{ .Values.sidecar.dashboards.reloadURL }} - - name: REQ_METHOD - value: POST - {{- end }} - {{- if .Values.sidecar.dashboards.watchServerTimeout }} - {{- if ne .Values.sidecar.dashboards.watchMethod "WATCH" }} - {{- fail (printf "Cannot use .Values.sidecar.dashboards.watchServerTimeout with .Values.sidecar.dashboards.watchMethod %s" .Values.sidecar.dashboards.watchMethod) }} - {{- end }} - - name: WATCH_SERVER_TIMEOUT - value: "{{ .Values.sidecar.dashboards.watchServerTimeout }}" - {{- end }} - {{- if .Values.sidecar.dashboards.watchClientTimeout }} - {{- if ne .Values.sidecar.dashboards.watchMethod "WATCH" }} - {{- fail (printf "Cannot use .Values.sidecar.dashboards.watchClientTimeout with .Values.sidecar.dashboards.watchMethod %s" .Values.sidecar.dashboards.watchMethod) }} - {{- end }} - - name: WATCH_CLIENT_TIMEOUT - value: {{ .Values.sidecar.dashboards.watchClientTimeout | quote }} - {{- end }} - {{- with .Values.sidecar.livenessProbe }} - livenessProbe: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.sidecar.readinessProbe }} - readinessProbe: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.sidecar.resources }} - resources: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.sidecar.securityContext }} - securityContext: - {{- toYaml . | nindent 6 }} - {{- end }} - volumeMounts: - - name: sc-dashboard-volume - mountPath: {{ .Values.sidecar.dashboards.folder | quote }} - {{- with .Values.sidecar.dashboards.extraMounts }} - {{- toYaml . | trim | nindent 6 }} - {{- end }} -{{- end}} -{{- if and .Values.sidecar.datasources.enabled (not .Values.sidecar.datasources.initDatasources) }} - - name: {{ include "grafana.name" . }}-sc-datasources - {{- $registry := .Values.global.imageRegistry | default .Values.sidecar.image.registry -}} - {{- if .Values.sidecar.image.sha }} - image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" - {{- else }} - image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" - {{- end }} - imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} - env: - {{- range $key, $value := .Values.sidecar.datasources.env }} - - name: "{{ $key }}" - value: "{{ $value }}" - {{- end }} - {{- if .Values.sidecar.datasources.ignoreAlreadyProcessed }} - - name: IGNORE_ALREADY_PROCESSED - value: "true" - {{- end }} - - name: METHOD - value: {{ .Values.sidecar.datasources.watchMethod }} - - name: LABEL - value: "{{ .Values.sidecar.datasources.label }}" - {{- with .Values.sidecar.datasources.labelValue }} - - name: LABEL_VALUE - value: {{ quote . }} - {{- end }} - {{- if or .Values.sidecar.logLevel .Values.sidecar.datasources.logLevel }} - - name: LOG_LEVEL - value: {{ default .Values.sidecar.logLevel .Values.sidecar.datasources.logLevel }} - {{- end }} - - name: FOLDER - value: "/etc/grafana/provisioning/datasources" - - name: RESOURCE - value: {{ quote .Values.sidecar.datasources.resource }} - {{- with .Values.sidecar.enableUniqueFilenames }} - - name: UNIQUE_FILENAMES - value: "{{ . }}" - {{- end }} - {{- with .Values.sidecar.datasources.searchNamespace }} - - name: NAMESPACE - value: "{{ tpl (. | join ",") $root }}" - {{- end }} - {{- if .Values.sidecar.skipTlsVerify }} - - name: SKIP_TLS_VERIFY - value: "{{ .Values.sidecar.skipTlsVerify }}" - {{- end }} - {{- if .Values.sidecar.datasources.script }} - - name: SCRIPT - value: "{{ .Values.sidecar.datasources.script }}" - {{- end }} - {{- if and (not .Values.env.GF_SECURITY_ADMIN_USER) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} - - name: REQ_USERNAME - valueFrom: - secretKeyRef: - name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} - key: {{ .Values.admin.userKey | default "admin-user" }} - {{- end }} - {{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} - - name: REQ_PASSWORD - valueFrom: - secretKeyRef: - name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} - key: {{ .Values.admin.passwordKey | default "admin-password" }} - {{- end }} - {{- if not .Values.sidecar.datasources.skipReload }} - - name: REQ_URL - value: {{ .Values.sidecar.datasources.reloadURL }} - - name: REQ_METHOD - value: POST - {{- end }} - {{- if .Values.sidecar.datasources.watchServerTimeout }} - {{- if ne .Values.sidecar.datasources.watchMethod "WATCH" }} - {{- fail (printf "Cannot use .Values.sidecar.datasources.watchServerTimeout with .Values.sidecar.datasources.watchMethod %s" .Values.sidecar.datasources.watchMethod) }} - {{- end }} - - name: WATCH_SERVER_TIMEOUT - value: "{{ .Values.sidecar.datasources.watchServerTimeout }}" - {{- end }} - {{- if .Values.sidecar.datasources.watchClientTimeout }} - {{- if ne .Values.sidecar.datasources.watchMethod "WATCH" }} - {{- fail (printf "Cannot use .Values.sidecar.datasources.watchClientTimeout with .Values.sidecar.datasources.watchMethod %s" .Values.sidecar.datasources.watchMethod) }} - {{- end }} - - name: WATCH_CLIENT_TIMEOUT - value: "{{ .Values.sidecar.datasources.watchClientTimeout }}" - {{- end }} - {{- with .Values.sidecar.livenessProbe }} - livenessProbe: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.sidecar.readinessProbe }} - readinessProbe: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.sidecar.resources }} - resources: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.sidecar.securityContext }} - securityContext: - {{- toYaml . | nindent 6 }} - {{- end }} - volumeMounts: - - name: sc-datasources-volume - mountPath: "/etc/grafana/provisioning/datasources" -{{- end}} -{{- if .Values.sidecar.notifiers.enabled }} - - name: {{ include "grafana.name" . }}-sc-notifiers - {{- $registry := .Values.global.imageRegistry | default .Values.sidecar.image.registry -}} - {{- if .Values.sidecar.image.sha }} - image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" - {{- else }} - image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" - {{- end }} - imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} - env: - {{- range $key, $value := .Values.sidecar.notifiers.env }} - - name: "{{ $key }}" - value: "{{ $value }}" - {{- end }} - {{- if .Values.sidecar.notifiers.ignoreAlreadyProcessed }} - - name: IGNORE_ALREADY_PROCESSED - value: "true" - {{- end }} - - name: METHOD - value: {{ .Values.sidecar.notifiers.watchMethod }} - - name: LABEL - value: "{{ .Values.sidecar.notifiers.label }}" - {{- with .Values.sidecar.notifiers.labelValue }} - - name: LABEL_VALUE - value: {{ quote . }} - {{- end }} - {{- if or .Values.sidecar.logLevel .Values.sidecar.notifiers.logLevel }} - - name: LOG_LEVEL - value: {{ default .Values.sidecar.logLevel .Values.sidecar.notifiers.logLevel }} - {{- end }} - - name: FOLDER - value: "/etc/grafana/provisioning/notifiers" - - name: RESOURCE - value: {{ quote .Values.sidecar.notifiers.resource }} - {{- if .Values.sidecar.enableUniqueFilenames }} - - name: UNIQUE_FILENAMES - value: "{{ .Values.sidecar.enableUniqueFilenames }}" - {{- end }} - {{- with .Values.sidecar.notifiers.searchNamespace }} - - name: NAMESPACE - value: "{{ tpl (. | join ",") $root }}" - {{- end }} - {{- with .Values.sidecar.skipTlsVerify }} - - name: SKIP_TLS_VERIFY - value: "{{ . }}" - {{- end }} - {{- if .Values.sidecar.notifiers.script }} - - name: SCRIPT - value: "{{ .Values.sidecar.notifiers.script }}" - {{- end }} - {{- if and (not .Values.env.GF_SECURITY_ADMIN_USER) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} - - name: REQ_USERNAME - valueFrom: - secretKeyRef: - name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} - key: {{ .Values.admin.userKey | default "admin-user" }} - {{- end }} - {{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} - - name: REQ_PASSWORD - valueFrom: - secretKeyRef: - name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} - key: {{ .Values.admin.passwordKey | default "admin-password" }} - {{- end }} - {{- if not .Values.sidecar.notifiers.skipReload }} - - name: REQ_URL - value: {{ .Values.sidecar.notifiers.reloadURL }} - - name: REQ_METHOD - value: POST - {{- end }} - {{- if .Values.sidecar.notifiers.watchServerTimeout }} - {{- if ne .Values.sidecar.notifiers.watchMethod "WATCH" }} - {{- fail (printf "Cannot use .Values.sidecar.notifiers.watchServerTimeout with .Values.sidecar.notifiers.watchMethod %s" .Values.sidecar.notifiers.watchMethod) }} - {{- end }} - - name: WATCH_SERVER_TIMEOUT - value: "{{ .Values.sidecar.notifiers.watchServerTimeout }}" - {{- end }} - {{- if .Values.sidecar.notifiers.watchClientTimeout }} - {{- if ne .Values.sidecar.notifiers.watchMethod "WATCH" }} - {{- fail (printf "Cannot use .Values.sidecar.notifiers.watchClientTimeout with .Values.sidecar.notifiers.watchMethod %s" .Values.sidecar.notifiers.watchMethod) }} - {{- end }} - - name: WATCH_CLIENT_TIMEOUT - value: "{{ .Values.sidecar.notifiers.watchClientTimeout }}" - {{- end }} - {{- with .Values.sidecar.livenessProbe }} - livenessProbe: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.sidecar.readinessProbe }} - readinessProbe: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.sidecar.resources }} - resources: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.sidecar.securityContext }} - securityContext: - {{- toYaml . | nindent 6 }} - {{- end }} - volumeMounts: - - name: sc-notifiers-volume - mountPath: "/etc/grafana/provisioning/notifiers" -{{- end}} -{{- if .Values.sidecar.plugins.enabled }} - - name: {{ include "grafana.name" . }}-sc-plugins - {{- $registry := .Values.global.imageRegistry | default .Values.sidecar.image.registry -}} - {{- if .Values.sidecar.image.sha }} - image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" - {{- else }} - image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" - {{- end }} - imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} - env: - {{- range $key, $value := .Values.sidecar.plugins.env }} - - name: "{{ $key }}" - value: "{{ $value }}" - {{- end }} - {{- if .Values.sidecar.plugins.ignoreAlreadyProcessed }} - - name: IGNORE_ALREADY_PROCESSED - value: "true" - {{- end }} - - name: METHOD - value: {{ .Values.sidecar.plugins.watchMethod }} - - name: LABEL - value: "{{ .Values.sidecar.plugins.label }}" - {{- if .Values.sidecar.plugins.labelValue }} - - name: LABEL_VALUE - value: {{ quote .Values.sidecar.plugins.labelValue }} - {{- end }} - {{- if or .Values.sidecar.logLevel .Values.sidecar.plugins.logLevel }} - - name: LOG_LEVEL - value: {{ default .Values.sidecar.logLevel .Values.sidecar.plugins.logLevel }} - {{- end }} - - name: FOLDER - value: "/etc/grafana/provisioning/plugins" - - name: RESOURCE - value: {{ quote .Values.sidecar.plugins.resource }} - {{- with .Values.sidecar.enableUniqueFilenames }} - - name: UNIQUE_FILENAMES - value: "{{ . }}" - {{- end }} - {{- with .Values.sidecar.plugins.searchNamespace }} - - name: NAMESPACE - value: "{{ tpl (. | join ",") $root }}" - {{- end }} - {{- with .Values.sidecar.plugins.script }} - - name: SCRIPT - value: "{{ . }}" - {{- end }} - {{- with .Values.sidecar.skipTlsVerify }} - - name: SKIP_TLS_VERIFY - value: "{{ . }}" - {{- end }} - {{- if and (not .Values.env.GF_SECURITY_ADMIN_USER) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} - - name: REQ_USERNAME - valueFrom: - secretKeyRef: - name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} - key: {{ .Values.admin.userKey | default "admin-user" }} - {{- end }} - {{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} - - name: REQ_PASSWORD - valueFrom: - secretKeyRef: - name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} - key: {{ .Values.admin.passwordKey | default "admin-password" }} - {{- end }} - {{- if not .Values.sidecar.plugins.skipReload }} - - name: REQ_URL - value: {{ .Values.sidecar.plugins.reloadURL }} - - name: REQ_METHOD - value: POST - {{- end }} - {{- if .Values.sidecar.plugins.watchServerTimeout }} - {{- if ne .Values.sidecar.plugins.watchMethod "WATCH" }} - {{- fail (printf "Cannot use .Values.sidecar.plugins.watchServerTimeout with .Values.sidecar.plugins.watchMethod %s" .Values.sidecar.plugins.watchMethod) }} - {{- end }} - - name: WATCH_SERVER_TIMEOUT - value: "{{ .Values.sidecar.plugins.watchServerTimeout }}" - {{- end }} - {{- if .Values.sidecar.plugins.watchClientTimeout }} - {{- if ne .Values.sidecar.plugins.watchMethod "WATCH" }} - {{- fail (printf "Cannot use .Values.sidecar.plugins.watchClientTimeout with .Values.sidecar.plugins.watchMethod %s" .Values.sidecar.plugins.watchMethod) }} - {{- end }} - - name: WATCH_CLIENT_TIMEOUT - value: "{{ .Values.sidecar.plugins.watchClientTimeout }}" - {{- end }} - {{- with .Values.sidecar.livenessProbe }} - livenessProbe: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.sidecar.readinessProbe }} - readinessProbe: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.sidecar.resources }} - resources: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.sidecar.securityContext }} - securityContext: - {{- toYaml . | nindent 6 }} - {{- end }} - volumeMounts: - - name: sc-plugins-volume - mountPath: "/etc/grafana/provisioning/plugins" -{{- end}} - - name: {{ .Chart.Name }} - {{- $registry := .Values.global.imageRegistry | default .Values.image.registry -}} - {{- if .Values.image.sha }} - image: "{{ $registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}@sha256:{{ .Values.image.sha }}" - {{- else }} - image: "{{ $registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" - {{- end }} - imagePullPolicy: {{ .Values.image.pullPolicy }} - {{- if .Values.command }} - command: - {{- range .Values.command }} - - {{ . | quote }} - {{- end }} - {{- end }} - {{- if .Values.args }} - args: - {{- range .Values.args }} - - {{ . | quote }} - {{- end }} - {{- end }} - {{- with .Values.containerSecurityContext }} - securityContext: - {{- toYaml . | nindent 6 }} - {{- end }} - volumeMounts: - - name: config - mountPath: "/etc/grafana/grafana.ini" - subPath: grafana.ini - {{- if .Values.ldap.enabled }} - - name: ldap - mountPath: "/etc/grafana/ldap.toml" - subPath: ldap.toml - {{- end }} - {{- range .Values.extraConfigmapMounts }} - - name: {{ tpl .name $root }} - mountPath: {{ tpl .mountPath $root }} - subPath: {{ tpl (.subPath | default "") $root }} - readOnly: {{ .readOnly }} - {{- end }} - - name: storage - mountPath: "/var/lib/grafana" - {{- with .Values.persistence.subPath }} - subPath: {{ tpl . $root }} - {{- end }} - {{- with .Values.dashboards }} - {{- range $provider, $dashboards := . }} - {{- range $key, $value := $dashboards }} - {{- if (or (hasKey $value "json") (hasKey $value "file")) }} - - name: dashboards-{{ $provider }} - mountPath: "/var/lib/grafana/dashboards/{{ $provider }}/{{ $key }}.json" - subPath: "{{ $key }}.json" - {{- end }} - {{- end }} - {{- end }} - {{- end }} - {{- with .Values.dashboardsConfigMaps }} - {{- range (keys . | sortAlpha) }} - - name: dashboards-{{ . }} - mountPath: "/var/lib/grafana/dashboards/{{ . }}" - {{- end }} - {{- end }} - {{- with .Values.datasources }} - {{- $datasources := . }} - {{- range (keys . | sortAlpha) }} - {{- if (or (hasKey (index $datasources .) "secret")) }} {{/*check if current datasource should be handeled as secret */}} - - name: config-secret - mountPath: "/etc/grafana/provisioning/datasources/{{ . }}" - subPath: {{ . | quote }} - {{- else }} - - name: config - mountPath: "/etc/grafana/provisioning/datasources/{{ . }}" - subPath: {{ . | quote }} - {{- end }} - {{- end }} - {{- end }} - {{- with .Values.notifiers }} - {{- $notifiers := . }} - {{- range (keys . | sortAlpha) }} - {{- if (or (hasKey (index $notifiers .) "secret")) }} {{/*check if current notifier should be handeled as secret */}} - - name: config-secret - mountPath: "/etc/grafana/provisioning/notifiers/{{ . }}" - subPath: {{ . | quote }} - {{- else }} - - name: config - mountPath: "/etc/grafana/provisioning/notifiers/{{ . }}" - subPath: {{ . | quote }} - {{- end }} - {{- end }} - {{- end }} - {{- with .Values.alerting }} - {{- $alertingmap := .}} - {{- range (keys . | sortAlpha) }} - {{- if (or (hasKey (index $.Values.alerting .) "secret") (hasKey (index $.Values.alerting .) "secretFile")) }} {{/*check if current alerting entry should be handeled as secret */}} - - name: config-secret - mountPath: "/etc/grafana/provisioning/alerting/{{ . }}" - subPath: {{ . | quote }} - {{- else }} - - name: config - mountPath: "/etc/grafana/provisioning/alerting/{{ . }}" - subPath: {{ . | quote }} - {{- end }} - {{- end }} - {{- end }} - {{- with .Values.dashboardProviders }} - {{- range (keys . | sortAlpha) }} - - name: config - mountPath: "/etc/grafana/provisioning/dashboards/{{ . }}" - subPath: {{ . | quote }} - {{- end }} - {{- end }} - {{- with .Values.sidecar.alerts.enabled }} - - name: sc-alerts-volume - mountPath: "/etc/grafana/provisioning/alerting" - {{- end}} - {{- if .Values.sidecar.dashboards.enabled }} - - name: sc-dashboard-volume - mountPath: {{ .Values.sidecar.dashboards.folder | quote }} - {{- if .Values.sidecar.dashboards.SCProvider }} - - name: sc-dashboard-provider - mountPath: "/etc/grafana/provisioning/dashboards/sc-dashboardproviders.yaml" - subPath: provider.yaml - {{- end}} - {{- end}} - {{- if .Values.sidecar.datasources.enabled }} - - name: sc-datasources-volume - mountPath: "/etc/grafana/provisioning/datasources" - {{- end}} - {{- if .Values.sidecar.plugins.enabled }} - - name: sc-plugins-volume - mountPath: "/etc/grafana/provisioning/plugins" - {{- end}} - {{- if .Values.sidecar.notifiers.enabled }} - - name: sc-notifiers-volume - mountPath: "/etc/grafana/provisioning/notifiers" - {{- end}} - {{- range .Values.extraSecretMounts }} - - name: {{ .name }} - mountPath: {{ .mountPath }} - readOnly: {{ .readOnly }} - subPath: {{ .subPath | default "" }} - {{- end }} - {{- range .Values.extraVolumeMounts }} - - name: {{ .name }} - mountPath: {{ .mountPath }} - subPath: {{ .subPath | default "" }} - readOnly: {{ .readOnly }} - {{- end }} - {{- range .Values.extraEmptyDirMounts }} - - name: {{ .name }} - mountPath: {{ .mountPath }} - {{- end }} - ports: - - name: {{ .Values.podPortName }} - containerPort: {{ .Values.service.targetPort }} - protocol: TCP - - name: {{ .Values.gossipPortName }}-tcp - containerPort: 9094 - protocol: TCP - - name: {{ .Values.gossipPortName }}-udp - containerPort: 9094 - protocol: UDP - env: - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - {{- if and (not .Values.env.GF_SECURITY_ADMIN_USER) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} - - name: GF_SECURITY_ADMIN_USER - valueFrom: - secretKeyRef: - name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} - key: {{ .Values.admin.userKey | default "admin-user" }} - {{- end }} - {{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} - - name: GF_SECURITY_ADMIN_PASSWORD - valueFrom: - secretKeyRef: - name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} - key: {{ .Values.admin.passwordKey | default "admin-password" }} - {{- end }} - {{- if .Values.plugins }} - - name: GF_INSTALL_PLUGINS - valueFrom: - configMapKeyRef: - name: {{ include "grafana.fullname" . }} - key: plugins - {{- end }} - {{- if .Values.smtp.existingSecret }} - - name: GF_SMTP_USER - valueFrom: - secretKeyRef: - name: {{ .Values.smtp.existingSecret }} - key: {{ .Values.smtp.userKey | default "user" }} - - name: GF_SMTP_PASSWORD - valueFrom: - secretKeyRef: - name: {{ .Values.smtp.existingSecret }} - key: {{ .Values.smtp.passwordKey | default "password" }} - {{- end }} - {{- if .Values.imageRenderer.enabled }} - - name: GF_RENDERING_SERVER_URL - value: http://{{ include "grafana.fullname" . }}-image-renderer.{{ include "grafana.namespace" . }}:{{ .Values.imageRenderer.service.port }}/render - - name: GF_RENDERING_CALLBACK_URL - value: {{ .Values.imageRenderer.grafanaProtocol }}://{{ include "grafana.fullname" . }}.{{ include "grafana.namespace" . }}:{{ .Values.service.port }}/{{ .Values.imageRenderer.grafanaSubPath }} - {{- end }} - - name: GF_PATHS_DATA - value: {{ (get .Values "grafana.ini").paths.data }} - - name: GF_PATHS_LOGS - value: {{ (get .Values "grafana.ini").paths.logs }} - - name: GF_PATHS_PLUGINS - value: {{ (get .Values "grafana.ini").paths.plugins }} - - name: GF_PATHS_PROVISIONING - value: {{ (get .Values "grafana.ini").paths.provisioning }} - {{- range $key, $value := .Values.envValueFrom }} - - name: {{ $key | quote }} - valueFrom: - {{- tpl (toYaml $value) $ | nindent 10 }} - {{- end }} - {{- range $key, $value := .Values.env }} - - name: "{{ tpl $key $ }}" - value: "{{ tpl (print $value) $ }}" - {{- end }} - {{- if or .Values.envFromSecret (or .Values.envRenderSecret .Values.envFromSecrets) .Values.envFromConfigMaps }} - envFrom: - {{- if .Values.envFromSecret }} - - secretRef: - name: {{ tpl .Values.envFromSecret . }} - {{- end }} - {{- if .Values.envRenderSecret }} - - secretRef: - name: {{ include "grafana.fullname" . }}-env - {{- end }} - {{- range .Values.envFromSecrets }} - - secretRef: - name: {{ tpl .name $ }} - optional: {{ .optional | default false }} - {{- end }} - {{- range .Values.envFromConfigMaps }} - - configMapRef: - name: {{ tpl .name $ }} - optional: {{ .optional | default false }} - {{- end }} - {{- end }} - {{- with .Values.livenessProbe }} - livenessProbe: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.readinessProbe }} - readinessProbe: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.lifecycleHooks }} - lifecycle: - {{- tpl (toYaml .) $root | nindent 6 }} - {{- end }} - {{- with .Values.resources }} - resources: - {{- toYaml . | nindent 6 }} - {{- end }} -{{- with .Values.extraContainers }} - {{- tpl . $ | nindent 2 }} -{{- end }} -{{- with .Values.nodeSelector }} -nodeSelector: - {{- toYaml . | nindent 2 }} -{{- end }} -{{- with .Values.affinity }} -affinity: - {{- tpl (toYaml .) $root | nindent 2 }} -{{- end }} -{{- with .Values.topologySpreadConstraints }} -topologySpreadConstraints: - {{- toYaml . | nindent 2 }} -{{- end }} -{{- with .Values.tolerations }} -tolerations: - {{- toYaml . | nindent 2 }} -{{- end }} -volumes: - - name: config - configMap: - name: {{ include "grafana.fullname" . }} - {{- $createConfigSecret := eq (include "grafana.shouldCreateConfigSecret" .) "true" -}} - {{- if and .Values.createConfigmap $createConfigSecret }} - - name: config-secret - secret: - secretName: {{ include "grafana.fullname" . }}-config-secret - {{- end }} - {{- range .Values.extraConfigmapMounts }} - - name: {{ tpl .name $root }} - configMap: - name: {{ tpl .configMap $root }} - {{- with .items }} - items: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- end }} - {{- if .Values.dashboards }} - {{- range (keys .Values.dashboards | sortAlpha) }} - - name: dashboards-{{ . }} - configMap: - name: {{ include "grafana.fullname" $ }}-dashboards-{{ . }} - {{- end }} - {{- end }} - {{- if .Values.dashboardsConfigMaps }} - {{- range $provider, $name := .Values.dashboardsConfigMaps }} - - name: dashboards-{{ $provider }} - configMap: - name: {{ tpl $name $root }} - {{- end }} - {{- end }} - {{- if .Values.ldap.enabled }} - - name: ldap - secret: - {{- if .Values.ldap.existingSecret }} - secretName: {{ .Values.ldap.existingSecret }} - {{- else }} - secretName: {{ include "grafana.fullname" . }} - {{- end }} - items: - - key: ldap-toml - path: ldap.toml - {{- end }} - {{- if and .Values.persistence.enabled (eq .Values.persistence.type "pvc") }} - - name: storage - persistentVolumeClaim: - claimName: {{ tpl (.Values.persistence.existingClaim | default (include "grafana.fullname" .)) . }} - {{- else if and .Values.persistence.enabled (has .Values.persistence.type $sts) }} - {{/* nothing */}} - {{- else }} - - name: storage - {{- if .Values.persistence.inMemory.enabled }} - emptyDir: - medium: Memory - {{- with .Values.persistence.inMemory.sizeLimit }} - sizeLimit: {{ . }} - {{- end }} - {{- else }} - emptyDir: {} - {{- end }} - {{- end }} - {{- if .Values.sidecar.alerts.enabled }} - - name: sc-alerts-volume - emptyDir: - {{- with .Values.sidecar.alerts.sizeLimit }} - sizeLimit: {{ . }} - {{- else }} - {} - {{- end }} - {{- end }} - {{- if .Values.sidecar.dashboards.enabled }} - - name: sc-dashboard-volume - emptyDir: - {{- with .Values.sidecar.dashboards.sizeLimit }} - sizeLimit: {{ . }} - {{- else }} - {} - {{- end }} - {{- if .Values.sidecar.dashboards.SCProvider }} - - name: sc-dashboard-provider - configMap: - name: {{ include "grafana.fullname" . }}-config-dashboards - {{- end }} - {{- end }} - {{- if .Values.sidecar.datasources.enabled }} - - name: sc-datasources-volume - emptyDir: - {{- with .Values.sidecar.datasources.sizeLimit }} - sizeLimit: {{ . }} - {{- else }} - {} - {{- end }} - {{- end }} - {{- if .Values.sidecar.plugins.enabled }} - - name: sc-plugins-volume - emptyDir: - {{- with .Values.sidecar.plugins.sizeLimit }} - sizeLimit: {{ . }} - {{- else }} - {} - {{- end }} - {{- end }} - {{- if .Values.sidecar.notifiers.enabled }} - - name: sc-notifiers-volume - emptyDir: - {{- with .Values.sidecar.notifiers.sizeLimit }} - sizeLimit: {{ . }} - {{- else }} - {} - {{- end }} - {{- end }} - {{- range .Values.extraSecretMounts }} - {{- if .secretName }} - - name: {{ .name }} - secret: - secretName: {{ .secretName }} - defaultMode: {{ .defaultMode }} - {{- with .items }} - items: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- else if .projected }} - - name: {{ .name }} - projected: - {{- toYaml .projected | nindent 6 }} - {{- else if .csi }} - - name: {{ .name }} - csi: - {{- toYaml .csi | nindent 6 }} - {{- end }} - {{- end }} - {{- range .Values.extraVolumes }} - - name: {{ .name }} - {{- if .existingClaim }} - persistentVolumeClaim: - claimName: {{ .existingClaim }} - {{- else if .hostPath }} - hostPath: - {{ toYaml .hostPath | nindent 6 }} - {{- else if .csi }} - csi: - {{- toYaml .data | nindent 6 }} - {{- else if .configMap }} - configMap: - {{- toYaml .configMap | nindent 6 }} - {{- else }} - emptyDir: {} - {{- end }} - {{- end }} - {{- range .Values.extraEmptyDirMounts }} - - name: {{ .name }} - emptyDir: {} - {{- end }} - {{- with .Values.extraContainerVolumes }} - {{- tpl (toYaml .) $root | nindent 2 }} - {{- end }} -{{- end }} - diff --git a/operations/deployment/helm/grafana/templates/clusterrole.yaml b/operations/deployment/helm/grafana/templates/clusterrole.yaml deleted file mode 100644 index 3af4b62..0000000 --- a/operations/deployment/helm/grafana/templates/clusterrole.yaml +++ /dev/null @@ -1,25 +0,0 @@ -{{- if and .Values.rbac.create (or (not .Values.rbac.namespaced) .Values.rbac.extraClusterRoleRules) (not .Values.rbac.useExistingClusterRole) }} -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - labels: - {{- include "grafana.labels" . | nindent 4 }} - {{- with .Values.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} - name: {{ include "grafana.fullname" . }}-clusterrole -{{- if or .Values.sidecar.dashboards.enabled .Values.rbac.extraClusterRoleRules .Values.sidecar.datasources.enabled .Values.sidecar.plugins.enabled .Values.sidecar.alerts.enabled }} -rules: - {{- if or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled .Values.sidecar.plugins.enabled .Values.sidecar.alerts.enabled }} - - apiGroups: [""] # "" indicates the core API group - resources: ["configmaps", "secrets"] - verbs: ["get", "watch", "list"] - {{- end}} - {{- with .Values.rbac.extraClusterRoleRules }} - {{- toYaml . | nindent 2 }} - {{- end}} -{{- else }} -rules: [] -{{- end}} -{{- end}} diff --git a/operations/deployment/helm/grafana/templates/clusterrolebinding.yaml b/operations/deployment/helm/grafana/templates/clusterrolebinding.yaml deleted file mode 100644 index bda9431..0000000 --- a/operations/deployment/helm/grafana/templates/clusterrolebinding.yaml +++ /dev/null @@ -1,24 +0,0 @@ -{{- if and .Values.rbac.create (or (not .Values.rbac.namespaced) .Values.rbac.extraClusterRoleRules) }} -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: {{ include "grafana.fullname" . }}-clusterrolebinding - labels: - {{- include "grafana.labels" . | nindent 4 }} - {{- with .Values.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -subjects: - - kind: ServiceAccount - name: {{ include "grafana.serviceAccountName" . }} - namespace: {{ include "grafana.namespace" . }} -roleRef: - kind: ClusterRole - {{- if .Values.rbac.useExistingClusterRole }} - name: {{ .Values.rbac.useExistingClusterRole }} - {{- else }} - name: {{ include "grafana.fullname" . }}-clusterrole - {{- end }} - apiGroup: rbac.authorization.k8s.io -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/configSecret.yaml b/operations/deployment/helm/grafana/templates/configSecret.yaml deleted file mode 100644 index f8937cc..0000000 --- a/operations/deployment/helm/grafana/templates/configSecret.yaml +++ /dev/null @@ -1,43 +0,0 @@ -{{- $createConfigSecret := eq (include "grafana.shouldCreateConfigSecret" .) "true" -}} -{{- if and .Values.createConfigmap $createConfigSecret }} -{{- $files := .Files }} -{{- $root := . -}} -apiVersion: v1 -kind: Secret -metadata: - name: "{{ include "grafana.fullname" . }}-config-secret" - namespace: {{ include "grafana.namespace" . }} - labels: - {{- include "grafana.labels" . | nindent 4 }} - {{- with .Values.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -data: -{{- range $key, $value := .Values.alerting }} - {{- if (hasKey $value "secretFile") }} - {{- $key | nindent 2 }}: - {{- toYaml ( $files.Get $value.secretFile ) | b64enc | nindent 4}} - {{/* as of https://helm.sh/docs/chart_template_guide/accessing_files/ this will only work if you fork this chart and add files to it*/}} - {{- end }} -{{- end }} -stringData: -{{- range $key, $value := .Values.datasources }} -{{- if (hasKey $value "secret") }} -{{- $key | nindent 2 }}: | - {{- tpl (toYaml $value | nindent 4) $root }} -{{- end }} -{{- end }} -{{- range $key, $value := .Values.notifiers }} -{{- if (hasKey $value "secret") }} -{{- $key | nindent 2 }}: | - {{- tpl (toYaml $value | nindent 4) $root }} -{{- end }} -{{- end }} -{{- range $key, $value := .Values.alerting }} -{{ if (hasKey $value "secret") }} - {{- $key | nindent 2 }}: | - {{- tpl (toYaml $value.secret | nindent 4) $root }} - {{- end }} -{{- end }} -{{- end }} \ No newline at end of file diff --git a/operations/deployment/helm/grafana/templates/configmap-dashboard-provider.yaml b/operations/deployment/helm/grafana/templates/configmap-dashboard-provider.yaml deleted file mode 100644 index 1f706a8..0000000 --- a/operations/deployment/helm/grafana/templates/configmap-dashboard-provider.yaml +++ /dev/null @@ -1,29 +0,0 @@ -{{- if and .Values.sidecar.dashboards.enabled .Values.sidecar.dashboards.SCProvider }} -apiVersion: v1 -kind: ConfigMap -metadata: - labels: - {{- include "grafana.labels" . | nindent 4 }} - {{- with .Values.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} - name: {{ include "grafana.fullname" . }}-config-dashboards - namespace: {{ include "grafana.namespace" . }} -data: - provider.yaml: |- - apiVersion: 1 - providers: - - name: '{{ .Values.sidecar.dashboards.provider.name }}' - orgId: {{ .Values.sidecar.dashboards.provider.orgid }} - {{- if not .Values.sidecar.dashboards.provider.foldersFromFilesStructure }} - folder: '{{ .Values.sidecar.dashboards.provider.folder }}' - {{- end }} - type: {{ .Values.sidecar.dashboards.provider.type }} - disableDeletion: {{ .Values.sidecar.dashboards.provider.disableDelete }} - allowUiUpdates: {{ .Values.sidecar.dashboards.provider.allowUiUpdates }} - updateIntervalSeconds: {{ .Values.sidecar.dashboards.provider.updateIntervalSeconds | default 30 }} - options: - foldersFromFilesStructure: {{ .Values.sidecar.dashboards.provider.foldersFromFilesStructure }} - path: {{ .Values.sidecar.dashboards.folder }}{{- with .Values.sidecar.dashboards.defaultFolderName }}/{{ . }}{{- end }} -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/configmap.yaml b/operations/deployment/helm/grafana/templates/configmap.yaml deleted file mode 100644 index 7b837d9..0000000 --- a/operations/deployment/helm/grafana/templates/configmap.yaml +++ /dev/null @@ -1,144 +0,0 @@ -{{- if .Values.createConfigmap }} -{{- $files := .Files }} -{{- $root := . -}} -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ include "grafana.fullname" . }} - namespace: {{ include "grafana.namespace" . }} - labels: - {{- include "grafana.labels" . | nindent 4 }} - {{- with .Values.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -data: - {{- with .Values.plugins }} - plugins: {{ join "," . }} - {{- end }} - grafana.ini: | - {{- range $elem, $elemVal := index .Values "grafana.ini" }} - {{- if not (kindIs "map" $elemVal) }} - {{- if kindIs "invalid" $elemVal }} - {{ $elem }} = - {{- else if kindIs "string" $elemVal }} - {{ $elem }} = {{ tpl $elemVal $ }} - {{- else }} - {{ $elem }} = {{ $elemVal }} - {{- end }} - {{- end }} - {{- end }} - {{- range $key, $value := index .Values "grafana.ini" }} - {{- if kindIs "map" $value }} - [{{ $key }}] - {{- range $elem, $elemVal := $value }} - {{- if kindIs "invalid" $elemVal }} - {{ $elem }} = - {{- else if kindIs "string" $elemVal }} - {{ $elem }} = {{ tpl $elemVal $ }} - {{- else }} - {{ $elem }} = {{ $elemVal }} - {{- end }} - {{- end }} - {{- end }} - {{- end }} - - {{- range $key, $value := .Values.datasources }} - {{- if not (hasKey $value "secret") }} - {{- $key | nindent 2 }}: | - {{- tpl (toYaml $value | nindent 4) $root }} - {{- end }} - {{- end }} - - {{- range $key, $value := .Values.notifiers }} - {{- if not (hasKey $value "secret") }} - {{- $key | nindent 2 }}: | - {{- toYaml $value | nindent 4 }} - {{- end }} - {{- end }} - - {{- range $key, $value := .Values.alerting }} - {{- if (hasKey $value "file") }} - {{- $key | nindent 2 }}: - {{- toYaml ( $files.Get $value.file ) | nindent 4}} - {{- else if (or (hasKey $value "secret") (hasKey $value "secretFile"))}} - {{/* will be stored inside secret generated by "configSecret.yaml"*/}} - {{- else }} - {{- $key | nindent 2 }}: | - {{- tpl (toYaml $value | nindent 4) $root }} - {{- end }} - {{- end }} - - {{- range $key, $value := .Values.dashboardProviders }} - {{- $key | nindent 2 }}: | - {{- toYaml $value | nindent 4 }} - {{- end }} - -{{- if .Values.dashboards }} - download_dashboards.sh: | - #!/usr/bin/env sh - set -euf - {{- if .Values.dashboardProviders }} - {{- range $key, $value := .Values.dashboardProviders }} - {{- range $value.providers }} - mkdir -p {{ .options.path }} - {{- end }} - {{- end }} - {{- end }} - {{ $dashboardProviders := .Values.dashboardProviders }} - {{- range $provider, $dashboards := .Values.dashboards }} - {{- range $key, $value := $dashboards }} - {{- if (or (hasKey $value "gnetId") (hasKey $value "url")) }} - curl -skf \ - --connect-timeout 60 \ - --max-time 60 \ - {{- if not $value.b64content }} - {{- if not $value.acceptHeader }} - -H "Accept: application/json" \ - {{- else }} - -H "Accept: {{ $value.acceptHeader }}" \ - {{- end }} - {{- if $value.token }} - -H "Authorization: token {{ $value.token }}" \ - {{- end }} - {{- if $value.bearerToken }} - -H "Authorization: Bearer {{ $value.bearerToken }}" \ - {{- end }} - {{- if $value.basic }} - -H "Authorization: Basic {{ $value.basic }}" \ - {{- end }} - {{- if $value.gitlabToken }} - -H "PRIVATE-TOKEN: {{ $value.gitlabToken }}" \ - {{- end }} - -H "Content-Type: application/json;charset=UTF-8" \ - {{- end }} - {{- $dpPath := "" -}} - {{- range $kd := (index $dashboardProviders "dashboardproviders.yaml").providers }} - {{- if eq $kd.name $provider }} - {{- $dpPath = $kd.options.path }} - {{- end }} - {{- end }} - {{- if $value.url }} - "{{ $value.url }}" \ - {{- else }} - "https://grafana.com/api/dashboards/{{ $value.gnetId }}/revisions/{{- if $value.revision -}}{{ $value.revision }}{{- else -}}1{{- end -}}/download" \ - {{- end }} - {{- if $value.datasource }} - {{- if kindIs "string" $value.datasource }} - | sed '/-- .* --/! s/"datasource":.*,/"datasource": "{{ $value.datasource }}",/g' \ - {{- end }} - {{- if kindIs "slice" $value.datasource }} - {{- range $value.datasource }} - | sed '/-- .* --/! s/${{"{"}}{{ .name }}}/{{ .value }}/g' \ - {{- end }} - {{- end }} - {{- end }} - {{- if $value.b64content }} - | base64 -d \ - {{- end }} - > "{{- if $dpPath -}}{{ $dpPath }}{{- else -}}/var/lib/grafana/dashboards/{{ $provider }}{{- end -}}/{{ $key }}.json" - {{ end }} - {{- end }} - {{- end }} -{{- end }} -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/dashboards-json-configmap.yaml b/operations/deployment/helm/grafana/templates/dashboards-json-configmap.yaml deleted file mode 100644 index b96ce72..0000000 --- a/operations/deployment/helm/grafana/templates/dashboards-json-configmap.yaml +++ /dev/null @@ -1,38 +0,0 @@ -{{- if .Values.dashboards }} -{{ $files := .Files }} -{{- range $provider, $dashboards := .Values.dashboards }} -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ include "grafana.fullname" $ }}-dashboards-{{ $provider }} - namespace: {{ include "grafana.namespace" $ }} - labels: - {{- include "grafana.labels" $ | nindent 4 }} - dashboard-provider: {{ $provider }} - {{- if $.Values.sidecar.dashboards.enabled }} - {{ $.Values.sidecar.dashboards.label }}: {{ $.Values.sidecar.dashboards.labelValue | quote }} - {{- end }} -{{- if $dashboards }} -data: -{{- $dashboardFound := false }} -{{- range $key, $value := $dashboards }} -{{- if (or (hasKey $value "json") (hasKey $value "file")) }} -{{- $dashboardFound = true }} - {{- print $key | nindent 2 }}.json: - {{- if hasKey $value "json" }} - |- - {{- $value.json | nindent 6 }} - {{- end }} - {{- if hasKey $value "file" }} - {{- toYaml ( $files.Get $value.file ) | nindent 4}} - {{- end }} -{{- end }} -{{- end }} -{{- if not $dashboardFound }} - {} -{{- end }} -{{- end }} ---- -{{- end }} - -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/deployment.yaml b/operations/deployment/helm/grafana/templates/deployment.yaml deleted file mode 100644 index bfa26bb..0000000 --- a/operations/deployment/helm/grafana/templates/deployment.yaml +++ /dev/null @@ -1,51 +0,0 @@ -{{- if (and (not .Values.useStatefulSet) (or (not .Values.persistence.enabled) (eq .Values.persistence.type "pvc"))) }} -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ include "grafana.fullname" . }} - namespace: {{ include "grafana.namespace" . }} - labels: - {{- include "grafana.labels" . | nindent 4 }} - {{- with .Values.labels }} - {{- toYaml . | nindent 4 }} - {{- end }} - {{- with .Values.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -spec: - {{- if and (not .Values.autoscaling.enabled) (.Values.replicas) }} - replicas: {{ .Values.replicas }} - {{- end }} - revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} - selector: - matchLabels: - {{- include "grafana.selectorLabels" . | nindent 6 }} - {{- with .Values.deploymentStrategy }} - strategy: - {{- toYaml . | trim | nindent 4 }} - {{- end }} - template: - metadata: - labels: - {{- include "grafana.selectorLabels" . | nindent 8 }} - {{- with .Values.podLabels }} - {{- toYaml . | nindent 8 }} - {{- end }} - annotations: - checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} - checksum/dashboards-json-config: {{ include (print $.Template.BasePath "/dashboards-json-configmap.yaml") . | sha256sum }} - checksum/sc-dashboard-provider-config: {{ include (print $.Template.BasePath "/configmap-dashboard-provider.yaml") . | sha256sum }} - {{- if and (or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret))) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} - checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} - {{- end }} - {{- if .Values.envRenderSecret }} - checksum/secret-env: {{ include (print $.Template.BasePath "/secret-env.yaml") . | sha256sum }} - {{- end }} - kubectl.kubernetes.io/default-container: {{ .Chart.Name }} - {{- with .Values.podAnnotations }} - {{- toYaml . | nindent 8 }} - {{- end }} - spec: - {{- include "grafana.pod" . | nindent 6 }} -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/extra-manifests.yaml b/operations/deployment/helm/grafana/templates/extra-manifests.yaml deleted file mode 100644 index a9bb3b6..0000000 --- a/operations/deployment/helm/grafana/templates/extra-manifests.yaml +++ /dev/null @@ -1,4 +0,0 @@ -{{ range .Values.extraObjects }} ---- -{{ tpl (toYaml .) $ }} -{{ end }} diff --git a/operations/deployment/helm/grafana/templates/headless-service.yaml b/operations/deployment/helm/grafana/templates/headless-service.yaml deleted file mode 100644 index 3028589..0000000 --- a/operations/deployment/helm/grafana/templates/headless-service.yaml +++ /dev/null @@ -1,22 +0,0 @@ -{{- $sts := list "sts" "StatefulSet" "statefulset" -}} -{{- if or .Values.headlessService (and .Values.persistence.enabled (not .Values.persistence.existingClaim) (has .Values.persistence.type $sts)) }} -apiVersion: v1 -kind: Service -metadata: - name: {{ include "grafana.fullname" . }}-headless - namespace: {{ include "grafana.namespace" . }} - labels: - {{- include "grafana.labels" . | nindent 4 }} - {{- with .Values.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -spec: - clusterIP: None - selector: - {{- include "grafana.selectorLabels" . | nindent 4 }} - type: ClusterIP - ports: - - name: {{ .Values.gossipPortName }}-tcp - port: 9094 -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/hpa.yaml b/operations/deployment/helm/grafana/templates/hpa.yaml deleted file mode 100644 index 46bbcb4..0000000 --- a/operations/deployment/helm/grafana/templates/hpa.yaml +++ /dev/null @@ -1,52 +0,0 @@ -{{- $sts := list "sts" "StatefulSet" "statefulset" -}} -{{- if .Values.autoscaling.enabled }} -apiVersion: {{ include "grafana.hpa.apiVersion" . }} -kind: HorizontalPodAutoscaler -metadata: - name: {{ include "grafana.fullname" . }} - namespace: {{ include "grafana.namespace" . }} - labels: - app.kubernetes.io/name: {{ include "grafana.name" . }} - helm.sh/chart: {{ include "grafana.chart" . }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} -spec: - scaleTargetRef: - apiVersion: apps/v1 - {{- if has .Values.persistence.type $sts }} - kind: StatefulSet - {{- else }} - kind: Deployment - {{- end }} - name: {{ include "grafana.fullname" . }} - minReplicas: {{ .Values.autoscaling.minReplicas }} - maxReplicas: {{ .Values.autoscaling.maxReplicas }} - metrics: - {{- if .Values.autoscaling.targetMemory }} - - type: Resource - resource: - name: memory - {{- if eq (include "grafana.hpa.apiVersion" .) "autoscaling/v2beta1" }} - targetAverageUtilization: {{ .Values.autoscaling.targetMemory }} - {{- else }} - target: - type: Utilization - averageUtilization: {{ .Values.autoscaling.targetMemory }} - {{- end }} - {{- end }} - {{- if .Values.autoscaling.targetCPU }} - - type: Resource - resource: - name: cpu - {{- if eq (include "grafana.hpa.apiVersion" .) "autoscaling/v2beta1" }} - targetAverageUtilization: {{ .Values.autoscaling.targetCPU }} - {{- else }} - target: - type: Utilization - averageUtilization: {{ .Values.autoscaling.targetCPU }} - {{- end }} - {{- end }} - {{- if .Values.autoscaling.behavior }} - behavior: {{ toYaml .Values.autoscaling.behavior | nindent 4 }} - {{- end }} -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/image-renderer-deployment.yaml b/operations/deployment/helm/grafana/templates/image-renderer-deployment.yaml deleted file mode 100644 index ea97969..0000000 --- a/operations/deployment/helm/grafana/templates/image-renderer-deployment.yaml +++ /dev/null @@ -1,131 +0,0 @@ -{{ if .Values.imageRenderer.enabled }} -{{- $root := . -}} -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ include "grafana.fullname" . }}-image-renderer - namespace: {{ include "grafana.namespace" . }} - labels: - {{- include "grafana.imageRenderer.labels" . | nindent 4 }} - {{- with .Values.imageRenderer.labels }} - {{- toYaml . | nindent 4 }} - {{- end }} - {{- with .Values.imageRenderer.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -spec: - {{- if and (not .Values.imageRenderer.autoscaling.enabled) (.Values.imageRenderer.replicas) }} - replicas: {{ .Values.imageRenderer.replicas }} - {{- end }} - revisionHistoryLimit: {{ .Values.imageRenderer.revisionHistoryLimit }} - selector: - matchLabels: - {{- include "grafana.imageRenderer.selectorLabels" . | nindent 6 }} - - {{- with .Values.imageRenderer.deploymentStrategy }} - strategy: - {{- toYaml . | trim | nindent 4 }} - {{- end }} - template: - metadata: - labels: - {{- include "grafana.imageRenderer.selectorLabels" . | nindent 8 }} - {{- with .Values.imageRenderer.podLabels }} - {{- toYaml . | nindent 8 }} - {{- end }} - annotations: - checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} - {{- with .Values.imageRenderer.podAnnotations }} - {{- toYaml . | nindent 8 }} - {{- end }} - spec: - {{- with .Values.imageRenderer.schedulerName }} - schedulerName: "{{ . }}" - {{- end }} - {{- with .Values.imageRenderer.serviceAccountName }} - serviceAccountName: "{{ . }}" - {{- end }} - {{- with .Values.imageRenderer.securityContext }} - securityContext: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.imageRenderer.hostAliases }} - hostAliases: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.imageRenderer.priorityClassName }} - priorityClassName: {{ . }} - {{- end }} - {{- with .Values.imageRenderer.image.pullSecrets }} - imagePullSecrets: - {{- range . }} - - name: {{ tpl . $root }} - {{- end}} - {{- end }} - containers: - - name: {{ .Chart.Name }}-image-renderer - {{- $registry := .Values.global.imageRegistry | default .Values.imageRenderer.image.registry -}} - {{- if .Values.imageRenderer.image.sha }} - image: "{{ $registry }}/{{ .Values.imageRenderer.image.repository }}:{{ .Values.imageRenderer.image.tag }}@sha256:{{ .Values.imageRenderer.image.sha }}" - {{- else }} - image: "{{ $registry }}/{{ .Values.imageRenderer.image.repository }}:{{ .Values.imageRenderer.image.tag }}" - {{- end }} - imagePullPolicy: {{ .Values.imageRenderer.image.pullPolicy }} - {{- if .Values.imageRenderer.command }} - command: - {{- range .Values.imageRenderer.command }} - - {{ . }} - {{- end }} - {{- end}} - ports: - - name: {{ .Values.imageRenderer.service.portName }} - containerPort: {{ .Values.imageRenderer.service.targetPort }} - protocol: TCP - livenessProbe: - httpGet: - path: / - port: {{ .Values.imageRenderer.service.portName }} - env: - - name: HTTP_PORT - value: {{ .Values.imageRenderer.service.targetPort | quote }} - {{- if .Values.imageRenderer.serviceMonitor.enabled }} - - name: ENABLE_METRICS - value: "true" - {{- end }} - {{- range $key, $value := .Values.imageRenderer.envValueFrom }} - - name: {{ $key | quote }} - valueFrom: - {{- tpl (toYaml $value) $ | nindent 16 }} - {{- end }} - {{- range $key, $value := .Values.imageRenderer.env }} - - name: {{ $key | quote }} - value: {{ $value | quote }} - {{- end }} - {{- with .Values.imageRenderer.containerSecurityContext }} - securityContext: - {{- toYaml . | nindent 12 }} - {{- end }} - volumeMounts: - - mountPath: /tmp - name: image-renderer-tmpfs - {{- with .Values.imageRenderer.resources }} - resources: - {{- toYaml . | nindent 12 }} - {{- end }} - {{- with .Values.imageRenderer.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.imageRenderer.affinity }} - affinity: - {{- tpl (toYaml .) $root | nindent 8 }} - {{- end }} - {{- with .Values.imageRenderer.tolerations }} - tolerations: - {{- toYaml . | nindent 8 }} - {{- end }} - volumes: - - name: image-renderer-tmpfs - emptyDir: {} -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/image-renderer-hpa.yaml b/operations/deployment/helm/grafana/templates/image-renderer-hpa.yaml deleted file mode 100644 index b0f0059..0000000 --- a/operations/deployment/helm/grafana/templates/image-renderer-hpa.yaml +++ /dev/null @@ -1,47 +0,0 @@ -{{- if and .Values.imageRenderer.enabled .Values.imageRenderer.autoscaling.enabled }} -apiVersion: {{ include "grafana.hpa.apiVersion" . }} -kind: HorizontalPodAutoscaler -metadata: - name: {{ include "grafana.fullname" . }}-image-renderer - namespace: {{ include "grafana.namespace" . }} - labels: - app.kubernetes.io/name: {{ include "grafana.name" . }}-image-renderer - helm.sh/chart: {{ include "grafana.chart" . }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} -spec: - scaleTargetRef: - apiVersion: apps/v1 - kind: Deployment - name: {{ include "grafana.fullname" . }}-image-renderer - minReplicas: {{ .Values.imageRenderer.autoscaling.minReplicas }} - maxReplicas: {{ .Values.imageRenderer.autoscaling.maxReplicas }} - metrics: - {{- if .Values.imageRenderer.autoscaling.targetMemory }} - - type: Resource - resource: - name: memory - {{- if eq (include "grafana.hpa.apiVersion" .) "autoscaling/v2beta1" }} - targetAverageUtilization: {{ .Values.imageRenderer.autoscaling.targetMemory }} - {{- else }} - target: - type: Utilization - averageUtilization: {{ .Values.imageRenderer.autoscaling.targetMemory }} - {{- end }} - {{- end }} - {{- if .Values.imageRenderer.autoscaling.targetCPU }} - - type: Resource - resource: - name: cpu - {{- if eq (include "grafana.hpa.apiVersion" .) "autoscaling/v2beta1" }} - targetAverageUtilization: {{ .Values.imageRenderer.autoscaling.targetCPU }} - {{- else }} - target: - type: Utilization - averageUtilization: {{ .Values.imageRenderer.autoscaling.targetCPU }} - {{- end }} - {{- end }} - {{- if .Values.imageRenderer.autoscaling.behavior }} - behavior: {{ toYaml .Values.imageRenderer.autoscaling.behavior | nindent 4 }} - {{- end }} -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/image-renderer-network-policy.yaml b/operations/deployment/helm/grafana/templates/image-renderer-network-policy.yaml deleted file mode 100644 index d1a0eb3..0000000 --- a/operations/deployment/helm/grafana/templates/image-renderer-network-policy.yaml +++ /dev/null @@ -1,79 +0,0 @@ -{{- if and .Values.imageRenderer.enabled .Values.imageRenderer.networkPolicy.limitIngress }} ---- -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: {{ include "grafana.fullname" . }}-image-renderer-ingress - namespace: {{ include "grafana.namespace" . }} - annotations: - comment: Limit image-renderer ingress traffic from grafana -spec: - podSelector: - matchLabels: - {{- include "grafana.imageRenderer.selectorLabels" . | nindent 6 }} - {{- with .Values.imageRenderer.podLabels }} - {{- toYaml . | nindent 6 }} - {{- end }} - - policyTypes: - - Ingress - ingress: - - ports: - - port: {{ .Values.imageRenderer.service.targetPort }} - protocol: TCP - from: - - namespaceSelector: - matchLabels: - kubernetes.io/metadata.name: {{ include "grafana.namespace" . }} - podSelector: - matchLabels: - {{- include "grafana.selectorLabels" . | nindent 14 }} - {{- with .Values.podLabels }} - {{- toYaml . | nindent 14 }} - {{- end }} - {{- with .Values.imageRenderer.networkPolicy.extraIngressSelectors -}} - {{ toYaml . | nindent 8 }} - {{- end }} -{{- end }} - -{{- if and .Values.imageRenderer.enabled .Values.imageRenderer.networkPolicy.limitEgress }} ---- -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: {{ include "grafana.fullname" . }}-image-renderer-egress - namespace: {{ include "grafana.namespace" . }} - annotations: - comment: Limit image-renderer egress traffic to grafana -spec: - podSelector: - matchLabels: - {{- include "grafana.imageRenderer.selectorLabels" . | nindent 6 }} - {{- with .Values.imageRenderer.podLabels }} - {{- toYaml . | nindent 6 }} - {{- end }} - - policyTypes: - - Egress - egress: - # allow dns resolution - - ports: - - port: 53 - protocol: UDP - - port: 53 - protocol: TCP - # talk only to grafana - - ports: - - port: {{ .Values.service.targetPort }} - protocol: TCP - to: - - namespaceSelector: - matchLabels: - name: {{ include "grafana.namespace" . }} - podSelector: - matchLabels: - {{- include "grafana.selectorLabels" . | nindent 14 }} - {{- with .Values.podLabels }} - {{- toYaml . | nindent 14 }} - {{- end }} -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/image-renderer-service.yaml b/operations/deployment/helm/grafana/templates/image-renderer-service.yaml deleted file mode 100644 index f8da127..0000000 --- a/operations/deployment/helm/grafana/templates/image-renderer-service.yaml +++ /dev/null @@ -1,31 +0,0 @@ -{{- if and .Values.imageRenderer.enabled .Values.imageRenderer.service.enabled }} -apiVersion: v1 -kind: Service -metadata: - name: {{ include "grafana.fullname" . }}-image-renderer - namespace: {{ include "grafana.namespace" . }} - labels: - {{- include "grafana.imageRenderer.labels" . | nindent 4 }} - {{- with .Values.imageRenderer.service.labels }} - {{- toYaml . | nindent 4 }} - {{- end }} - {{- with .Values.imageRenderer.service.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -spec: - type: ClusterIP - {{- with .Values.imageRenderer.service.clusterIP }} - clusterIP: {{ . }} - {{- end }} - ports: - - name: {{ .Values.imageRenderer.service.portName }} - port: {{ .Values.imageRenderer.service.port }} - protocol: TCP - targetPort: {{ .Values.imageRenderer.service.targetPort }} - {{- with .Values.imageRenderer.appProtocol }} - appProtocol: {{ . }} - {{- end }} - selector: - {{- include "grafana.imageRenderer.selectorLabels" . | nindent 4 }} -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/image-renderer-servicemonitor.yaml b/operations/deployment/helm/grafana/templates/image-renderer-servicemonitor.yaml deleted file mode 100644 index 5d9f09d..0000000 --- a/operations/deployment/helm/grafana/templates/image-renderer-servicemonitor.yaml +++ /dev/null @@ -1,48 +0,0 @@ -{{- if .Values.imageRenderer.serviceMonitor.enabled }} ---- -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - name: {{ include "grafana.fullname" . }}-image-renderer - {{- if .Values.imageRenderer.serviceMonitor.namespace }} - namespace: {{ tpl .Values.imageRenderer.serviceMonitor.namespace . }} - {{- else }} - namespace: {{ include "grafana.namespace" . }} - {{- end }} - labels: - {{- include "grafana.imageRenderer.labels" . | nindent 4 }} - {{- with .Values.imageRenderer.serviceMonitor.labels }} - {{- toYaml . | nindent 4 }} - {{- end }} -spec: - endpoints: - - port: {{ .Values.imageRenderer.service.portName }} - {{- with .Values.imageRenderer.serviceMonitor.interval }} - interval: {{ . }} - {{- end }} - {{- with .Values.imageRenderer.serviceMonitor.scrapeTimeout }} - scrapeTimeout: {{ . }} - {{- end }} - honorLabels: true - path: {{ .Values.imageRenderer.serviceMonitor.path }} - scheme: {{ .Values.imageRenderer.serviceMonitor.scheme }} - {{- with .Values.imageRenderer.serviceMonitor.tlsConfig }} - tlsConfig: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.imageRenderer.serviceMonitor.relabelings }} - relabelings: - {{- toYaml . | nindent 6 }} - {{- end }} - jobLabel: "{{ .Release.Name }}-image-renderer" - selector: - matchLabels: - {{- include "grafana.imageRenderer.selectorLabels" . | nindent 6 }} - namespaceSelector: - matchNames: - - {{ include "grafana.namespace" . }} - {{- with .Values.imageRenderer.serviceMonitor.targetLabels }} - targetLabels: - {{- toYaml . | nindent 4 }} - {{- end }} -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/ingress.yaml b/operations/deployment/helm/grafana/templates/ingress.yaml deleted file mode 100644 index 063cdfa..0000000 --- a/operations/deployment/helm/grafana/templates/ingress.yaml +++ /dev/null @@ -1,78 +0,0 @@ -{{- if .Values.ingress.enabled -}} -{{- $ingressApiIsStable := eq (include "grafana.ingress.isStable" .) "true" -}} -{{- $ingressSupportsIngressClassName := eq (include "grafana.ingress.supportsIngressClassName" .) "true" -}} -{{- $ingressSupportsPathType := eq (include "grafana.ingress.supportsPathType" .) "true" -}} -{{- $fullName := include "grafana.fullname" . -}} -{{- $servicePort := .Values.service.port -}} -{{- $ingressPath := .Values.ingress.path -}} -{{- $ingressPathType := .Values.ingress.pathType -}} -{{- $extraPaths := .Values.ingress.extraPaths -}} -apiVersion: {{ include "grafana.ingress.apiVersion" . }} -kind: Ingress -metadata: - name: {{ $fullName }} - namespace: {{ include "grafana.namespace" . }} - labels: - {{- include "grafana.labels" . | nindent 4 }} - {{- with .Values.ingress.labels }} - {{- toYaml . | nindent 4 }} - {{- end }} - {{- with .Values.ingress.annotations }} - annotations: - {{- range $key, $value := . }} - {{ $key }}: {{ tpl $value $ | quote }} - {{- end }} - {{- end }} -spec: - {{- if and $ingressSupportsIngressClassName .Values.ingress.ingressClassName }} - ingressClassName: {{ .Values.ingress.ingressClassName }} - {{- end -}} - {{- with .Values.ingress.tls }} - tls: - {{- tpl (toYaml .) $ | nindent 4 }} - {{- end }} - rules: - {{- if .Values.ingress.hosts }} - {{- range .Values.ingress.hosts }} - - host: {{ tpl . $ }} - http: - paths: - {{- with $extraPaths }} - {{- toYaml . | nindent 10 }} - {{- end }} - - path: {{ $ingressPath }} - {{- if $ingressSupportsPathType }} - pathType: {{ $ingressPathType }} - {{- end }} - backend: - {{- if $ingressApiIsStable }} - service: - name: {{ $fullName }} - port: - number: {{ $servicePort }} - {{- else }} - serviceName: {{ $fullName }} - servicePort: {{ $servicePort }} - {{- end }} - {{- end }} - {{- else }} - - http: - paths: - - backend: - {{- if $ingressApiIsStable }} - service: - name: {{ $fullName }} - port: - number: {{ $servicePort }} - {{- else }} - serviceName: {{ $fullName }} - servicePort: {{ $servicePort }} - {{- end }} - {{- with $ingressPath }} - path: {{ . }} - {{- end }} - {{- if $ingressSupportsPathType }} - pathType: {{ $ingressPathType }} - {{- end }} - {{- end -}} -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/networkpolicy.yaml b/operations/deployment/helm/grafana/templates/networkpolicy.yaml deleted file mode 100644 index 4cd3ed6..0000000 --- a/operations/deployment/helm/grafana/templates/networkpolicy.yaml +++ /dev/null @@ -1,61 +0,0 @@ -{{- if .Values.networkPolicy.enabled }} -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: {{ include "grafana.fullname" . }} - namespace: {{ include "grafana.namespace" . }} - labels: - {{- include "grafana.labels" . | nindent 4 }} - {{- with .Values.labels }} - {{- toYaml . | nindent 4 }} - {{- end }} - {{- with .Values.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -spec: - policyTypes: - {{- if .Values.networkPolicy.ingress }} - - Ingress - {{- end }} - {{- if .Values.networkPolicy.egress.enabled }} - - Egress - {{- end }} - podSelector: - matchLabels: - {{- include "grafana.selectorLabels" . | nindent 6 }} - - {{- if .Values.networkPolicy.egress.enabled }} - egress: - {{- if not .Values.networkPolicy.egress.blockDNSResolution }} - - ports: - - port: 53 - protocol: UDP - {{- end }} - - ports: - {{ .Values.networkPolicy.egress.ports | toJson }} - {{- with .Values.networkPolicy.egress.to }} - to: - {{- toYaml . | nindent 12 }} - {{- end }} - {{- end }} - {{- if .Values.networkPolicy.ingress }} - ingress: - - ports: - - port: {{ .Values.service.targetPort }} - {{- if not .Values.networkPolicy.allowExternal }} - from: - - podSelector: - matchLabels: - {{ include "grafana.fullname" . }}-client: "true" - {{- with .Values.networkPolicy.explicitNamespacesSelector }} - - namespaceSelector: - {{- toYaml . | nindent 12 }} - {{- end }} - - podSelector: - matchLabels: - {{- include "grafana.labels" . | nindent 14 }} - role: read - {{- end }} - {{- end }} -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/poddisruptionbudget.yaml b/operations/deployment/helm/grafana/templates/poddisruptionbudget.yaml deleted file mode 100644 index 0525121..0000000 --- a/operations/deployment/helm/grafana/templates/poddisruptionbudget.yaml +++ /dev/null @@ -1,22 +0,0 @@ -{{- if .Values.podDisruptionBudget }} -apiVersion: {{ include "grafana.podDisruptionBudget.apiVersion" . }} -kind: PodDisruptionBudget -metadata: - name: {{ include "grafana.fullname" . }} - namespace: {{ include "grafana.namespace" . }} - labels: - {{- include "grafana.labels" . | nindent 4 }} - {{- with .Values.labels }} - {{- toYaml . | nindent 4 }} - {{- end }} -spec: - {{- with .Values.podDisruptionBudget.minAvailable }} - minAvailable: {{ . }} - {{- end }} - {{- with .Values.podDisruptionBudget.maxUnavailable }} - maxUnavailable: {{ . }} - {{- end }} - selector: - matchLabels: - {{- include "grafana.selectorLabels" . | nindent 6 }} -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/podsecuritypolicy.yaml b/operations/deployment/helm/grafana/templates/podsecuritypolicy.yaml deleted file mode 100644 index eed7af9..0000000 --- a/operations/deployment/helm/grafana/templates/podsecuritypolicy.yaml +++ /dev/null @@ -1,49 +0,0 @@ -{{- if and .Values.rbac.pspEnabled (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }} -apiVersion: policy/v1beta1 -kind: PodSecurityPolicy -metadata: - name: {{ include "grafana.fullname" . }} - labels: - {{- include "grafana.labels" . | nindent 4 }} - annotations: - seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default' - seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' - {{- if .Values.rbac.pspUseAppArmor }} - apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default' - apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' - {{- end }} -spec: - privileged: false - allowPrivilegeEscalation: false - requiredDropCapabilities: - # Default set from Docker, with DAC_OVERRIDE and CHOWN - - ALL - volumes: - - 'configMap' - - 'emptyDir' - - 'projected' - - 'csi' - - 'secret' - - 'downwardAPI' - - 'persistentVolumeClaim' - hostNetwork: false - hostIPC: false - hostPID: false - runAsUser: - rule: 'RunAsAny' - seLinux: - rule: 'RunAsAny' - supplementalGroups: - rule: 'MustRunAs' - ranges: - # Forbid adding the root group. - - min: 1 - max: 65535 - fsGroup: - rule: 'MustRunAs' - ranges: - # Forbid adding the root group. - - min: 1 - max: 65535 - readOnlyRootFilesystem: false -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/pvc.yaml b/operations/deployment/helm/grafana/templates/pvc.yaml deleted file mode 100644 index eb8f87f..0000000 --- a/operations/deployment/helm/grafana/templates/pvc.yaml +++ /dev/null @@ -1,36 +0,0 @@ -{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) (eq .Values.persistence.type "pvc")}} -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: {{ include "grafana.fullname" . }} - namespace: {{ include "grafana.namespace" . }} - labels: - {{- include "grafana.labels" . | nindent 4 }} - {{- with .Values.persistence.extraPvcLabels }} - {{- toYaml . | nindent 4 }} - {{- end }} - {{- with .Values.persistence.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} - {{- with .Values.persistence.finalizers }} - finalizers: - {{- toYaml . | nindent 4 }} - {{- end }} -spec: - accessModes: - {{- range .Values.persistence.accessModes }} - - {{ . | quote }} - {{- end }} - resources: - requests: - storage: {{ .Values.persistence.size | quote }} - {{- with .Values.persistence.storageClassName }} - storageClassName: {{ . }} - {{- end }} - {{- with .Values.persistence.selectorLabels }} - selector: - matchLabels: - {{- toYaml . | nindent 6 }} - {{- end }} -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/role.yaml b/operations/deployment/helm/grafana/templates/role.yaml deleted file mode 100644 index 4b5edd9..0000000 --- a/operations/deployment/helm/grafana/templates/role.yaml +++ /dev/null @@ -1,32 +0,0 @@ -{{- if and .Values.rbac.create (not .Values.rbac.useExistingRole) -}} -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: {{ include "grafana.fullname" . }} - namespace: {{ include "grafana.namespace" . }} - labels: - {{- include "grafana.labels" . | nindent 4 }} - {{- with .Values.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -{{- if or .Values.rbac.pspEnabled (and .Values.rbac.namespaced (or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled .Values.sidecar.plugins.enabled .Values.rbac.extraRoleRules)) }} -rules: - {{- if and .Values.rbac.pspEnabled (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }} - - apiGroups: ['extensions'] - resources: ['podsecuritypolicies'] - verbs: ['use'] - resourceNames: [{{ include "grafana.fullname" . }}] - {{- end }} - {{- if and .Values.rbac.namespaced (or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled .Values.sidecar.plugins.enabled) }} - - apiGroups: [""] # "" indicates the core API group - resources: ["configmaps", "secrets"] - verbs: ["get", "watch", "list"] - {{- end }} - {{- with .Values.rbac.extraRoleRules }} - {{- toYaml . | nindent 2 }} - {{- end}} -{{- else }} -rules: [] -{{- end }} -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/rolebinding.yaml b/operations/deployment/helm/grafana/templates/rolebinding.yaml deleted file mode 100644 index 58f77c6..0000000 --- a/operations/deployment/helm/grafana/templates/rolebinding.yaml +++ /dev/null @@ -1,25 +0,0 @@ -{{- if .Values.rbac.create }} -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: {{ include "grafana.fullname" . }} - namespace: {{ include "grafana.namespace" . }} - labels: - {{- include "grafana.labels" . | nindent 4 }} - {{- with .Values.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - {{- if .Values.rbac.useExistingRole }} - name: {{ .Values.rbac.useExistingRole }} - {{- else }} - name: {{ include "grafana.fullname" . }} - {{- end }} -subjects: -- kind: ServiceAccount - name: {{ include "grafana.serviceAccountName" . }} - namespace: {{ include "grafana.namespace" . }} -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/secret-env.yaml b/operations/deployment/helm/grafana/templates/secret-env.yaml deleted file mode 100644 index eb14aac..0000000 --- a/operations/deployment/helm/grafana/templates/secret-env.yaml +++ /dev/null @@ -1,14 +0,0 @@ -{{- if .Values.envRenderSecret }} -apiVersion: v1 -kind: Secret -metadata: - name: {{ include "grafana.fullname" . }}-env - namespace: {{ include "grafana.namespace" . }} - labels: - {{- include "grafana.labels" . | nindent 4 }} -type: Opaque -data: -{{- range $key, $val := .Values.envRenderSecret }} - {{ $key }}: {{ tpl ($val | toString) $ | b64enc | quote }} -{{- end }} -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/secret.yaml b/operations/deployment/helm/grafana/templates/secret.yaml deleted file mode 100644 index 5cbd527..0000000 --- a/operations/deployment/helm/grafana/templates/secret.yaml +++ /dev/null @@ -1,26 +0,0 @@ -{{- if or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret)) }} -apiVersion: v1 -kind: Secret -metadata: - name: {{ include "grafana.fullname" . }} - namespace: {{ include "grafana.namespace" . }} - labels: - {{- include "grafana.labels" . | nindent 4 }} - {{- with .Values.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -type: Opaque -data: - {{- if and (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) }} - admin-user: {{ .Values.adminUser | b64enc | quote }} - {{- if .Values.adminPassword }} - admin-password: {{ .Values.adminPassword | b64enc | quote }} - {{- else }} - admin-password: {{ include "grafana.password" . }} - {{- end }} - {{- end }} - {{- if not .Values.ldap.existingSecret }} - ldap-toml: {{ tpl .Values.ldap.config $ | b64enc | quote }} - {{- end }} -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/service.yaml b/operations/deployment/helm/grafana/templates/service.yaml deleted file mode 100644 index 9102c1e..0000000 --- a/operations/deployment/helm/grafana/templates/service.yaml +++ /dev/null @@ -1,58 +0,0 @@ -{{- if .Values.service.enabled }} -{{- $root := . }} -apiVersion: v1 -kind: Service -metadata: - name: {{ include "grafana.fullname" . }} - namespace: {{ include "grafana.namespace" . }} - labels: - {{- include "grafana.labels" . | nindent 4 }} - {{- with .Values.service.labels }} - {{- toYaml . | nindent 4 }} - {{- end }} - {{- with .Values.service.annotations }} - annotations: - {{- tpl (toYaml . | nindent 4) $root }} - {{- end }} -spec: - {{- if (or (eq .Values.service.type "ClusterIP") (empty .Values.service.type)) }} - type: ClusterIP - {{- with .Values.service.clusterIP }} - clusterIP: {{ . }} - {{- end }} - {{- else if eq .Values.service.type "LoadBalancer" }} - type: {{ .Values.service.type }} - {{- with .Values.service.loadBalancerIP }} - loadBalancerIP: {{ . }} - {{- end }} - {{- with .Values.service.loadBalancerSourceRanges }} - loadBalancerSourceRanges: - {{- toYaml . | nindent 4 }} - {{- end }} - {{- else }} - type: {{ .Values.service.type }} - {{- end }} - {{- with .Values.service.externalIPs }} - externalIPs: - {{- toYaml . | nindent 4 }} - {{- end }} - {{- with .Values.service.externalTrafficPolicy }} - externalTrafficPolicy: {{ . }} - {{- end }} - ports: - - name: {{ .Values.service.portName }} - port: {{ .Values.service.port }} - protocol: TCP - targetPort: {{ .Values.service.targetPort }} - {{- with .Values.service.appProtocol }} - appProtocol: {{ . }} - {{- end }} - {{- if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort))) }} - nodePort: {{ .Values.service.nodePort }} - {{- end }} - {{- with .Values.extraExposePorts }} - {{- tpl (toYaml . | nindent 4) $root }} - {{- end }} - selector: - {{- include "grafana.selectorLabels" . | nindent 4 }} -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/serviceaccount.yaml b/operations/deployment/helm/grafana/templates/serviceaccount.yaml deleted file mode 100644 index 784e71b..0000000 --- a/operations/deployment/helm/grafana/templates/serviceaccount.yaml +++ /dev/null @@ -1,17 +0,0 @@ -{{- if .Values.serviceAccount.create }} -{{- $root := . -}} -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - {{- include "grafana.labels" . | nindent 4 }} - {{- with .Values.serviceAccount.labels }} - {{- toYaml . | nindent 4 }} - {{- end }} - {{- with .Values.serviceAccount.annotations }} - annotations: - {{- tpl (toYaml . | nindent 4) $root }} - {{- end }} - name: {{ include "grafana.serviceAccountName" . }} - namespace: {{ include "grafana.namespace" . }} -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/servicemonitor.yaml b/operations/deployment/helm/grafana/templates/servicemonitor.yaml deleted file mode 100644 index 7239682..0000000 --- a/operations/deployment/helm/grafana/templates/servicemonitor.yaml +++ /dev/null @@ -1,52 +0,0 @@ -{{- if .Values.serviceMonitor.enabled }} ---- -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - name: {{ include "grafana.fullname" . }} - {{- if .Values.serviceMonitor.namespace }} - namespace: {{ tpl .Values.serviceMonitor.namespace . }} - {{- else }} - namespace: {{ include "grafana.namespace" . }} - {{- end }} - labels: - {{- include "grafana.labels" . | nindent 4 }} - {{- with .Values.serviceMonitor.labels }} - {{- toYaml . | nindent 4 }} - {{- end }} -spec: - endpoints: - - port: {{ .Values.service.portName }} - {{- with .Values.serviceMonitor.interval }} - interval: {{ . }} - {{- end }} - {{- with .Values.serviceMonitor.scrapeTimeout }} - scrapeTimeout: {{ . }} - {{- end }} - honorLabels: true - path: {{ .Values.serviceMonitor.path }} - scheme: {{ .Values.serviceMonitor.scheme }} - {{- with .Values.serviceMonitor.tlsConfig }} - tlsConfig: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.serviceMonitor.relabelings }} - relabelings: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.serviceMonitor.metricRelabelings }} - metricRelabelings: - {{- toYaml . | nindent 6 }} - {{- end }} - jobLabel: "{{ .Release.Name }}" - selector: - matchLabels: - {{- include "grafana.selectorLabels" . | nindent 6 }} - namespaceSelector: - matchNames: - - {{ include "grafana.namespace" . }} - {{- with .Values.serviceMonitor.targetLabels }} - targetLabels: - {{- toYaml . | nindent 4 }} - {{- end }} -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/statefulset.yaml b/operations/deployment/helm/grafana/templates/statefulset.yaml deleted file mode 100644 index e6c944a..0000000 --- a/operations/deployment/helm/grafana/templates/statefulset.yaml +++ /dev/null @@ -1,56 +0,0 @@ -{{- $sts := list "sts" "StatefulSet" "statefulset" -}} -{{- if (or (.Values.useStatefulSet) (and .Values.persistence.enabled (not .Values.persistence.existingClaim) (has .Values.persistence.type $sts)))}} -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: {{ include "grafana.fullname" . }} - namespace: {{ include "grafana.namespace" . }} - labels: - {{- include "grafana.labels" . | nindent 4 }} - {{- with .Values.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -spec: - replicas: {{ .Values.replicas }} - selector: - matchLabels: - {{- include "grafana.selectorLabels" . | nindent 6 }} - serviceName: {{ include "grafana.fullname" . }}-headless - template: - metadata: - labels: - {{- include "grafana.selectorLabels" . | nindent 8 }} - {{- with .Values.podLabels }} - {{- toYaml . | nindent 8 }} - {{- end }} - annotations: - checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} - checksum/dashboards-json-config: {{ include (print $.Template.BasePath "/dashboards-json-configmap.yaml") . | sha256sum }} - checksum/sc-dashboard-provider-config: {{ include (print $.Template.BasePath "/configmap-dashboard-provider.yaml") . | sha256sum }} - {{- if and (or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret))) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} - checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} - {{- end }} - kubectl.kubernetes.io/default-container: {{ .Chart.Name }} - {{- with .Values.podAnnotations }} - {{- toYaml . | nindent 8 }} - {{- end }} - spec: - {{- include "grafana.pod" . | nindent 6 }} - {{- if .Values.persistence.enabled}} - volumeClaimTemplates: - - metadata: - name: storage - spec: - accessModes: {{ .Values.persistence.accessModes }} - storageClassName: {{ .Values.persistence.storageClassName }} - resources: - requests: - storage: {{ .Values.persistence.size }} - {{- with .Values.persistence.selectorLabels }} - selector: - matchLabels: - {{- toYaml . | nindent 10 }} - {{- end }} - {{- end }} -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/tests/test-configmap.yaml b/operations/deployment/helm/grafana/templates/tests/test-configmap.yaml deleted file mode 100644 index 01c96c9..0000000 --- a/operations/deployment/helm/grafana/templates/tests/test-configmap.yaml +++ /dev/null @@ -1,20 +0,0 @@ -{{- if .Values.testFramework.enabled }} -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ include "grafana.fullname" . }}-test - namespace: {{ include "grafana.namespace" . }} - annotations: - "helm.sh/hook": test-success - "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" - labels: - {{- include "grafana.labels" . | nindent 4 }} -data: - run.sh: |- - @test "Test Health" { - url="http://{{ include "grafana.fullname" . }}/api/health" - - code=$(wget --server-response --spider --timeout 90 --tries 10 ${url} 2>&1 | awk '/^ HTTP/{print $2}') - [ "$code" == "200" ] - } -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/tests/test-podsecuritypolicy.yaml b/operations/deployment/helm/grafana/templates/tests/test-podsecuritypolicy.yaml deleted file mode 100644 index 1821772..0000000 --- a/operations/deployment/helm/grafana/templates/tests/test-podsecuritypolicy.yaml +++ /dev/null @@ -1,32 +0,0 @@ -{{- if and (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") .Values.testFramework.enabled .Values.rbac.pspEnabled }} -apiVersion: policy/v1beta1 -kind: PodSecurityPolicy -metadata: - name: {{ include "grafana.fullname" . }}-test - annotations: - "helm.sh/hook": test-success - "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" - labels: - {{- include "grafana.labels" . | nindent 4 }} -spec: - allowPrivilegeEscalation: true - privileged: false - hostNetwork: false - hostIPC: false - hostPID: false - fsGroup: - rule: RunAsAny - seLinux: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - runAsUser: - rule: RunAsAny - volumes: - - configMap - - downwardAPI - - emptyDir - - projected - - csi - - secret -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/tests/test-role.yaml b/operations/deployment/helm/grafana/templates/tests/test-role.yaml deleted file mode 100644 index cb4c782..0000000 --- a/operations/deployment/helm/grafana/templates/tests/test-role.yaml +++ /dev/null @@ -1,17 +0,0 @@ -{{- if and (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") .Values.testFramework.enabled .Values.rbac.pspEnabled }} -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: {{ include "grafana.fullname" . }}-test - namespace: {{ include "grafana.namespace" . }} - annotations: - "helm.sh/hook": test-success - "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" - labels: - {{- include "grafana.labels" . | nindent 4 }} -rules: - - apiGroups: ['policy'] - resources: ['podsecuritypolicies'] - verbs: ['use'] - resourceNames: [{{ include "grafana.fullname" . }}-test] -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/tests/test-rolebinding.yaml b/operations/deployment/helm/grafana/templates/tests/test-rolebinding.yaml deleted file mode 100644 index f40d791..0000000 --- a/operations/deployment/helm/grafana/templates/tests/test-rolebinding.yaml +++ /dev/null @@ -1,20 +0,0 @@ -{{- if and (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") .Values.testFramework.enabled .Values.rbac.pspEnabled }} -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: {{ include "grafana.fullname" . }}-test - namespace: {{ include "grafana.namespace" . }} - annotations: - "helm.sh/hook": test-success - "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" - labels: - {{- include "grafana.labels" . | nindent 4 }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: {{ include "grafana.fullname" . }}-test -subjects: - - kind: ServiceAccount - name: {{ include "grafana.serviceAccountNameTest" . }} - namespace: {{ include "grafana.namespace" . }} -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/tests/test-serviceaccount.yaml b/operations/deployment/helm/grafana/templates/tests/test-serviceaccount.yaml deleted file mode 100644 index 38fba35..0000000 --- a/operations/deployment/helm/grafana/templates/tests/test-serviceaccount.yaml +++ /dev/null @@ -1,12 +0,0 @@ -{{- if and .Values.testFramework.enabled .Values.serviceAccount.create }} -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - {{- include "grafana.labels" . | nindent 4 }} - name: {{ include "grafana.serviceAccountNameTest" . }} - namespace: {{ include "grafana.namespace" . }} - annotations: - "helm.sh/hook": test-success - "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/tests/test.yaml b/operations/deployment/helm/grafana/templates/tests/test.yaml deleted file mode 100644 index 15067ae..0000000 --- a/operations/deployment/helm/grafana/templates/tests/test.yaml +++ /dev/null @@ -1,49 +0,0 @@ -{{- if .Values.testFramework.enabled }} -{{- $root := . }} -apiVersion: v1 -kind: Pod -metadata: - name: {{ include "grafana.fullname" . }}-test - labels: - {{- include "grafana.labels" . | nindent 4 }} - annotations: - "helm.sh/hook": test-success - "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" - namespace: {{ include "grafana.namespace" . }} -spec: - serviceAccountName: {{ include "grafana.serviceAccountNameTest" . }} - {{- with .Values.testFramework.securityContext }} - securityContext: - {{- toYaml . | nindent 4 }} - {{- end }} - {{- if or .Values.image.pullSecrets .Values.global.imagePullSecrets }} - imagePullSecrets: - {{- include "grafana.imagePullSecrets" (dict "root" $root "imagePullSecrets" .Values.image.pullSecrets) | nindent 4 }} - {{- end }} - {{- with .Values.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 4 }} - {{- end }} - {{- with .Values.affinity }} - affinity: - {{- tpl (toYaml .) $root | nindent 4 }} - {{- end }} - {{- with .Values.tolerations }} - tolerations: - {{- toYaml . | nindent 4 }} - {{- end }} - containers: - - name: {{ .Release.Name }}-test - image: "{{ .Values.global.imageRegistry | default .Values.testFramework.image.registry }}/{{ .Values.testFramework.image.repository }}:{{ .Values.testFramework.image.tag }}" - imagePullPolicy: "{{ .Values.testFramework.imagePullPolicy}}" - command: ["/opt/bats/bin/bats", "-t", "/tests/run.sh"] - volumeMounts: - - mountPath: /tests - name: tests - readOnly: true - volumes: - - name: tests - configMap: - name: {{ include "grafana.fullname" . }}-test - restartPolicy: Never -{{- end }} diff --git a/operations/deployment/helm/grafana/values.yaml b/operations/deployment/helm/grafana/values.yaml index 07502cc..3b339df 100644 --- a/operations/deployment/helm/grafana/values.yaml +++ b/operations/deployment/helm/grafana/values.yaml @@ -1,1282 +1,218 @@ -global: - # -- Overrides the Docker registry globally for all images - imageRegistry: null - - # To help compatibility with other charts which use global.imagePullSecrets. - # Allow either an array of {name: pullSecret} maps (k8s-style), or an array of strings (more common helm-style). - # Can be tempalted. - # global: - # imagePullSecrets: - # - name: pullSecret1 - # - name: pullSecret2 - # or - # global: - # imagePullSecrets: - # - pullSecret1 - # - pullSecret2 - imagePullSecrets: [] - -rbac: - create: true - ## Use an existing ClusterRole/Role (depending on rbac.namespaced false/true) - # useExistingRole: name-of-some-role - # useExistingClusterRole: name-of-some-clusterRole - pspEnabled: false - pspUseAppArmor: false - namespaced: false - extraRoleRules: [] - # - apiGroups: [] - # resources: [] - # verbs: [] - extraClusterRoleRules: [] - # - apiGroups: [] - # resources: [] - # verbs: [] -serviceAccount: - create: true - name: - nameTest: - ## ServiceAccount labels. - labels: {} -## Service account annotations. Can be templated. -# annotations: -# eks.amazonaws.com/role-arn: arn:aws:iam::123456789000:role/iam-role-name-here - autoMount: true - -replicas: 1 - -## Create a headless service for the deployment -headlessService: false - -## Create HorizontalPodAutoscaler object for deployment type -# -autoscaling: - enabled: false - minReplicas: 1 - maxReplicas: 5 - targetCPU: "60" - targetMemory: "" - behavior: {} - -## See `kubectl explain poddisruptionbudget.spec` for more -## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ -podDisruptionBudget: {} -# apiVersion: "" -# minAvailable: 1 -# maxUnavailable: 1 - -## See `kubectl explain deployment.spec.strategy` for more -## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy -deploymentStrategy: - type: RollingUpdate - -readinessProbe: - httpGet: - path: /api/health - port: 3000 - -livenessProbe: - httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 60 - timeoutSeconds: 30 - failureThreshold: 10 - -## Use an alternate scheduler, e.g. "stork". -## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ -## -# schedulerName: "default-scheduler" - -image: - # -- The Docker registry - registry: docker.io - # -- Docker image repository - repository: grafana/grafana - # Overrides the Grafana image tag whose default is the chart appVersion - tag: "" - sha: "" - pullPolicy: IfNotPresent - - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## Can be templated. - ## - pullSecrets: [] - # - myRegistrKeySecretName - -testFramework: - enabled: true - image: - # -- The Docker registry - registry: docker.io - repository: bats/bats - tag: "v1.4.1" - imagePullPolicy: IfNotPresent - securityContext: {} - -securityContext: - runAsNonRoot: true - runAsUser: 472 - runAsGroup: 472 - fsGroup: 472 - -containerSecurityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - seccompProfile: - type: RuntimeDefault - -# Enable creating the grafana configmap -createConfigmap: true - -# Extra configmaps to mount in grafana pods -# Values are templated. -extraConfigmapMounts: [] - # - name: certs-configmap - # mountPath: /etc/grafana/ssl/ - # subPath: certificates.crt # (optional) - # configMap: certs-configmap - # readOnly: true - - -extraEmptyDirMounts: [] - # - name: provisioning-notifiers - # mountPath: /etc/grafana/provisioning/notifiers - - -# Apply extra labels to common labels. -extraLabels: {} - -## Assign a PriorityClassName to pods if set -# priorityClassName: - -downloadDashboardsImage: - # -- The Docker registry - registry: docker.io - repository: curlimages/curl - tag: 7.85.0 - sha: "" - pullPolicy: IfNotPresent - -downloadDashboards: - env: {} - envFromSecret: "" - resources: {} - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - seccompProfile: - type: RuntimeDefault - envValueFrom: {} - # ENV_NAME: - # configMapKeyRef: - # name: configmap-name - # key: value_key - -## Pod Annotations -# podAnnotations: {} - -## Pod Labels -# podLabels: {} - -podPortName: grafana -gossipPortName: gossip -## Deployment annotations -# annotations: {} - -## Expose the grafana service to be accessed from outside the cluster (LoadBalancer service). -## or access it from within the cluster (ClusterIP service). Set the service type and the port to serve it. -## ref: http://kubernetes.io/docs/user-guide/services/ -## -service: - enabled: true - type: ClusterIP - port: 80 - targetPort: 3000 - # targetPort: 4181 To be used with a proxy extraContainer - ## Service annotations. Can be templated. - annotations: {} - labels: {} - portName: service - # Adds the appProtocol field to the service. This allows to work with istio protocol selection. Ex: "http" or "tcp" - appProtocol: "" - -serviceMonitor: - ## If true, a ServiceMonitor CRD is created for a prometheus operator - ## https://github.com/coreos/prometheus-operator - ## - enabled: false - path: /metrics - # namespace: monitoring (defaults to use the namespace this chart is deployed to) - labels: {} - interval: 30s - scheme: http - tlsConfig: {} - scrapeTimeout: 30s - relabelings: [] - metricRelabelings: [] - targetLabels: [] - -extraExposePorts: [] - # - name: keycloak - # port: 8080 - # targetPort: 8080 - -# overrides pod.spec.hostAliases in the grafana deployment's pods -hostAliases: [] - # - ip: "1.2.3.4" - # hostnames: - # - "my.host.com" - -ingress: - enabled: false - # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName - # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress - # ingressClassName: nginx - # Values can be templated - annotations: {} - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: "true" - labels: {} - path: / - - # pathType is only for k8s >= 1.1= - pathType: Prefix - - hosts: - - chart-example.local - ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. - extraPaths: [] - # - path: /* - # backend: - # serviceName: ssl-redirect - # servicePort: use-annotation - ## Or for k8s > 1.19 - # - path: /* - # pathType: Prefix - # backend: - # service: - # name: ssl-redirect - # port: - # name: use-annotation - - - tls: [] - # - secretName: chart-example-tls - # hosts: - # - chart-example.local - -resources: {} -# limits: -# cpu: 100m -# memory: 128Mi -# requests: -# cpu: 100m -# memory: 128Mi - -## Node labels for pod assignment -## ref: https://kubernetes.io/docs/user-guide/node-selection/ -# -nodeSelector: {} - -## Tolerations for pod assignment -## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ -## -tolerations: [] - -## Affinity for pod assignment (evaluated as template) -## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity -## -affinity: {} - -## Topology Spread Constraints -## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ -## -topologySpreadConstraints: [] - -## Additional init containers (evaluated as template) -## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ -## -extraInitContainers: [] - -## Enable an Specify container in extraContainers. This is meant to allow adding an authentication proxy to a grafana pod -extraContainers: "" -# extraContainers: | -# - name: proxy -# image: quay.io/gambol99/keycloak-proxy:latest -# args: -# - -provider=github -# - -client-id= -# - -client-secret= -# - -github-org= -# - -email-domain=* -# - -cookie-secret= -# - -http-address=http://0.0.0.0:4181 -# - -upstream-url=http://127.0.0.1:3000 -# ports: -# - name: proxy-web -# containerPort: 4181 - -## Volumes that can be used in init containers that will not be mounted to deployment pods -extraContainerVolumes: [] -# - name: volume-from-secret -# secret: -# secretName: secret-to-mount -# - name: empty-dir-volume -# emptyDir: {} - -## Enable persistence using Persistent Volume Claims -## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ -## -persistence: - type: pvc - enabled: false - # storageClassName: default - accessModes: - - ReadWriteOnce - size: 10Gi - # annotations: {} - finalizers: - - kubernetes.io/pvc-protection - # selectorLabels: {} - ## Sub-directory of the PV to mount. Can be templated. - # subPath: "" - ## Name of an existing PVC. Can be templated. - # existingClaim: - ## Extra labels to apply to a PVC. - extraPvcLabels: {} - - ## If persistence is not enabled, this allows to mount the - ## local storage in-memory to improve performance - ## - inMemory: - enabled: false - ## The maximum usage on memory medium EmptyDir would be - ## the minimum value between the SizeLimit specified - ## here and the sum of memory limits of all containers in a pod - ## - # sizeLimit: 300Mi - -initChownData: - ## If false, data ownership will not be reset at startup - ## This allows the grafana-server to be run with an arbitrary user - ## - enabled: true - - ## initChownData container image - ## +grafana: + replicas: 1 + image: - # -- The Docker registry - registry: docker.io - repository: library/busybox - tag: "1.31.1" + repository: grafana/grafana + tag: 9.0.1 sha: "" pullPolicy: IfNotPresent + + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistrKeySecretName + + serviceAccount: + autoMount: true - ## initChownData resource requests and limits - ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ - ## - resources: {} - # limits: - # cpu: 100m - # memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi - securityContext: - runAsNonRoot: false - runAsUser: 0 - seccompProfile: - type: RuntimeDefault - capabilities: - add: - - CHOWN - -# Administrator credentials when not using an existing secret (see below) -adminUser: admin -# adminPassword: strongpassword - -# Use an existing secret for the admin user. -admin: - ## Name of the secret. Can be templated. - existingSecret: "" - userKey: admin-user - passwordKey: admin-password - -## Define command to be executed at startup by grafana container -## Needed if using `vault-env` to manage secrets (ref: https://banzaicloud.com/blog/inject-secrets-into-pods-vault/) -## Default is "run.sh" as defined in grafana's Dockerfile -# command: -# - "sh" -# - "/run.sh" - -## Optionally define args if command is used -## Needed if using `hashicorp/envconsul` to manage secrets -## By default no arguments are set -# args: -# - "-secret" -# - "secret/grafana" -# - "./grafana" - -## Extra environment variables that will be pass onto deployment pods -## -## to provide grafana with access to CloudWatch on AWS EKS: -## 1. create an iam role of type "Web identity" with provider oidc.eks.* (note the provider for later) -## 2. edit the "Trust relationships" of the role, add a line inside the StringEquals clause using the -## same oidc eks provider as noted before (same as the existing line) -## also, replace NAMESPACE and prometheus-operator-grafana with the service account namespace and name -## -## "oidc.eks.us-east-1.amazonaws.com/id/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX:sub": "system:serviceaccount:NAMESPACE:prometheus-operator-grafana", -## -## 3. attach a policy to the role, you can use a built in policy called CloudWatchReadOnlyAccess -## 4. use the following env: (replace 123456789000 and iam-role-name-here with your aws account number and role name) -## -## env: -## AWS_ROLE_ARN: arn:aws:iam::123456789000:role/iam-role-name-here -## AWS_WEB_IDENTITY_TOKEN_FILE: /var/run/secrets/eks.amazonaws.com/serviceaccount/token -## AWS_REGION: us-east-1 -## -## 5. uncomment the EKS section in extraSecretMounts: below -## 6. uncomment the annotation section in the serviceAccount: above -## make sure to replace arn:aws:iam::123456789000:role/iam-role-name-here with your role arn - -env: {} - -## "valueFrom" environment variable references that will be added to deployment pods. Name is templated. -## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#envvarsource-v1-core -## Renders in container spec as: -## env: -## ... -## - name: -## valueFrom: -## -envValueFrom: {} - # ENV_NAME: - # configMapKeyRef: - # name: configmap-name - # key: value_key - -## The name of a secret in the same kubernetes namespace which contain values to be added to the environment -## This can be useful for auth tokens, etc. Value is templated. -envFromSecret: "" - -## Sensible environment variables that will be rendered as new secret object -## This can be useful for auth tokens, etc. -## If the secret values contains "{{", they'll need to be properly escaped so that they are not interpreted by Helm -## ref: https://helm.sh/docs/howto/charts_tips_and_tricks/#using-the-tpl-function -envRenderSecret: {} - -## The names of secrets in the same kubernetes namespace which contain values to be added to the environment -## Each entry should contain a name key, and can optionally specify whether the secret must be defined with an optional key. -## Name is templated. -envFromSecrets: [] -## - name: secret-name -## optional: true - -## The names of conifgmaps in the same kubernetes namespace which contain values to be added to the environment -## Each entry should contain a name key, and can optionally specify whether the configmap must be defined with an optional key. -## Name is templated. -## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#configmapenvsource-v1-core -envFromConfigMaps: [] -## - name: configmap-name -## optional: true - -# Inject Kubernetes services as environment variables. -# See https://kubernetes.io/docs/concepts/services-networking/connect-applications-service/#environment-variables -enableServiceLinks: true - -## Additional grafana server secret mounts -# Defines additional mounts with secrets. Secrets must be manually created in the namespace. -extraSecretMounts: [] - # - name: secret-files - # mountPath: /etc/secrets - # secretName: grafana-secret-files - # readOnly: true - # subPath: "" - # - # for AWS EKS (cloudwatch) use the following (see also instruction in env: above) - # - name: aws-iam-token - # mountPath: /var/run/secrets/eks.amazonaws.com/serviceaccount - # readOnly: true - # projected: - # defaultMode: 420 - # sources: - # - serviceAccountToken: - # audience: sts.amazonaws.com - # expirationSeconds: 86400 - # path: token - # - # for CSI e.g. Azure Key Vault use the following - # - name: secrets-store-inline - # mountPath: /run/secrets - # readOnly: true - # csi: - # driver: secrets-store.csi.k8s.io - # readOnly: true - # volumeAttributes: - # secretProviderClass: "akv-grafana-spc" - # nodePublishSecretRef: # Only required when using service principal mode - # name: grafana-akv-creds # Only required when using service principal mode - -## Additional grafana server volume mounts -# Defines additional volume mounts. -extraVolumeMounts: [] - # - name: extra-volume-0 - # mountPath: /mnt/volume0 - # readOnly: true - # existingClaim: volume-claim - # - name: extra-volume-1 - # mountPath: /mnt/volume1 - # readOnly: true - # hostPath: /usr/shared/ - # - name: grafana-secrets - # mountPath: /mnt/volume2 - # csi: true - # data: - # driver: secrets-store.csi.k8s.io - # readOnly: true - # volumeAttributes: - # secretProviderClass: "grafana-env-spc" - -## Container Lifecycle Hooks. Execute a specific bash command or make an HTTP request -lifecycleHooks: {} - # postStart: - # exec: - # command: [] - -## Pass the plugins you want installed as a list. -## -plugins: [] - # - digrich-bubblechart-panel - # - grafana-clock-panel - ## You can also use other plugin download URL, as long as they are valid zip files, - ## and specify the name of the plugin after the semicolon. Like this: - # - https://grafana.com/api/plugins/marcusolsson-json-datasource/versions/1.3.2/download;marcusolsson-json-datasource - -## Configure grafana datasources -## ref: http://docs.grafana.org/administration/provisioning/#datasources -## -datasources: {} -# datasources.yaml: -# apiVersion: 1 -# datasources: -# - name: Prometheus -# type: prometheus -# url: http://prometheus-prometheus-server -# access: proxy -# isDefault: true -# - name: CloudWatch -# type: cloudwatch -# access: proxy -# uid: cloudwatch -# editable: false -# jsonData: -# authType: default -# defaultRegion: us-east-1 -# deleteDatasources: [] -# - name: Prometheus - -## Configure grafana alerting (can be templated) -## ref: http://docs.grafana.org/administration/provisioning/#alerting -## -alerting: {} - # rules.yaml: - # apiVersion: 1 - # groups: - # - orgId: 1 - # name: '{{ .Chart.Name }}_my_rule_group' - # folder: my_first_folder - # interval: 60s - # rules: - # - uid: my_id_1 - # title: my_first_rule - # condition: A - # data: - # - refId: A - # datasourceUid: '-100' - # model: - # conditions: - # - evaluator: - # params: - # - 3 - # type: gt - # operator: - # type: and - # query: - # params: - # - A - # reducer: - # type: last - # type: query - # datasource: - # type: __expr__ - # uid: '-100' - # expression: 1==0 - # intervalMs: 1000 - # maxDataPoints: 43200 - # refId: A - # type: math - # dashboardUid: my_dashboard - # panelId: 123 - # noDataState: Alerting - # for: 60s - # annotations: - # some_key: some_value - # labels: - # team: sre_team_1 - # contactpoints.yaml: - # secret: - # apiVersion: 1 - # contactPoints: - # - orgId: 1 - # name: cp_1 - # receivers: - # - uid: first_uid - # type: pagerduty - # settings: - # integrationKey: XXX - # severity: critical - # class: ping failure - # component: Grafana - # group: app-stack - # summary: | - # {{ `{{ include "default.message" . }}` }} - -## Configure notifiers -## ref: http://docs.grafana.org/administration/provisioning/#alert-notification-channels -## -notifiers: {} -# notifiers.yaml: -# notifiers: -# - name: email-notifier -# type: email -# uid: email1 -# # either: -# org_id: 1 -# # or -# org_name: Main Org. -# is_default: true -# settings: -# addresses: an_email_address@example.com -# delete_notifiers: - -## Configure grafana dashboard providers -## ref: http://docs.grafana.org/administration/provisioning/#dashboards -## -## `path` must be /var/lib/grafana/dashboards/ -## -dashboardProviders: {} -# dashboardproviders.yaml: -# apiVersion: 1 -# providers: -# - name: 'default' -# orgId: 1 -# folder: '' -# type: file -# disableDeletion: false -# editable: true -# options: -# path: /var/lib/grafana/dashboards/default - -## Configure grafana dashboard to import -## NOTE: To use dashboards you must also enable/configure dashboardProviders -## ref: https://grafana.com/dashboards -## -## dashboards per provider, use provider name as key. -## -dashboards: {} - # default: - # some-dashboard: - # json: | - # $RAW_JSON - # custom-dashboard: - # file: dashboards/custom-dashboard.json - # prometheus-stats: - # gnetId: 2 - # revision: 2 - # datasource: Prometheus - # local-dashboard: - # url: https://example.com/repository/test.json - # token: '' - # local-dashboard-base64: - # url: https://example.com/repository/test-b64.json - # token: '' - # b64content: true - # local-dashboard-gitlab: - # url: https://example.com/repository/test-gitlab.json - # gitlabToken: '' - # local-dashboard-bitbucket: - # url: https://example.com/repository/test-bitbucket.json - # bearerToken: '' - # local-dashboard-azure: - # url: https://example.com/repository/test-azure.json - # basic: '' - # acceptHeader: '*/*' - -## Reference to external ConfigMap per provider. Use provider name as key and ConfigMap name as value. -## A provider dashboards must be defined either by external ConfigMaps or in values.yaml, not in both. -## ConfigMap data example: -## -## data: -## example-dashboard.json: | -## RAW_JSON -## -dashboardsConfigMaps: {} -# default: "" - -## Grafana's primary configuration -## NOTE: values in map will be converted to ini format -## ref: http://docs.grafana.org/installation/configuration/ -## -grafana.ini: - paths: - data: /var/lib/grafana/ - logs: /var/log/grafana - plugins: /var/lib/grafana/plugins - provisioning: /etc/grafana/provisioning - analytics: - check_for_updates: true - log: - mode: console - grafana_net: - url: https://grafana.net - server: - domain: "{{ if (and .Values.ingress.enabled .Values.ingress.hosts) }}{{ .Values.ingress.hosts | first }}{{ else }}''{{ end }}" -## grafana Authentication can be enabled with the following values on grafana.ini - # server: - # The full public facing url you use in browser, used for redirects and emails - # root_url: - # https://grafana.com/docs/grafana/latest/auth/github/#enable-github-in-grafana - # auth.github: - # enabled: false - # allow_sign_up: false - # scopes: user:email,read:org - # auth_url: https://github.com/login/oauth/authorize - # token_url: https://github.com/login/oauth/access_token - # api_url: https://api.github.com/user - # team_ids: - # allowed_organizations: - # client_id: - # client_secret: -## LDAP Authentication can be enabled with the following values on grafana.ini -## NOTE: Grafana will fail to start if the value for ldap.toml is invalid - # auth.ldap: - # enabled: true - # allow_sign_up: true - # config_file: /etc/grafana/ldap.toml - -## Grafana's LDAP configuration -## Templated by the template in _helpers.tpl -## NOTE: To enable the grafana.ini must be configured with auth.ldap.enabled -## ref: http://docs.grafana.org/installation/configuration/#auth-ldap -## ref: http://docs.grafana.org/installation/ldap/#configuration -ldap: - enabled: false - # `existingSecret` is a reference to an existing secret containing the ldap configuration - # for Grafana in a key `ldap-toml`. - existingSecret: "" - # `config` is the content of `ldap.toml` that will be stored in the created secret - config: "" - # config: |- - # verbose_logging = true - - # [[servers]] - # host = "my-ldap-server" - # port = 636 - # use_ssl = true - # start_tls = false - # ssl_skip_verify = false - # bind_dn = "uid=%s,ou=users,dc=myorg,dc=com" - -## Grafana's SMTP configuration -## NOTE: To enable, grafana.ini must be configured with smtp.enabled -## ref: http://docs.grafana.org/installation/configuration/#smtp -smtp: - # `existingSecret` is a reference to an existing secret containing the smtp configuration - # for Grafana. - existingSecret: "" - userKey: "user" - passwordKey: "password" - -## Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders -## Requires at least Grafana 5 to work and can't be used together with parameters dashboardProviders, datasources and dashboards -sidecar: - image: - # -- The Docker registry - registry: quay.io - repository: kiwigrid/k8s-sidecar - tag: 1.25.2 - sha: "" - imagePullPolicy: IfNotPresent - resources: {} -# limits: -# cpu: 100m -# memory: 100Mi -# requests: -# cpu: 50m -# memory: 50Mi - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - seccompProfile: - type: RuntimeDefault - # skipTlsVerify Set to true to skip tls verification for kube api calls - # skipTlsVerify: true - enableUniqueFilenames: false - readinessProbe: {} - livenessProbe: {} - # Log level default for all sidecars. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. Defaults to INFO - # logLevel: INFO - alerts: - enabled: false - # Additional environment variables for the alerts sidecar - env: {} - # Do not reprocess already processed unchanged resources on k8s API reconnect. - # ignoreAlreadyProcessed: true - # label that the configmaps with alert are marked with - label: grafana_alert - # value of label that the configmaps with alert are set to - labelValue: "" - # Log level. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. - # logLevel: INFO - # If specified, the sidecar will search for alert config-maps inside this namespace. - # Otherwise the namespace in which the sidecar is running will be used. - # It's also possible to specify ALL to search in all namespaces - searchNamespace: null - # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. - watchMethod: WATCH - # search in configmap, secret or both - resource: both - # watchServerTimeout: request to the server, asking it to cleanly close the connection after that. - # defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S - # watchServerTimeout: 3600 - # - # watchClientTimeout: is a client-side timeout, configuring your local socket. - # If you have a network outage dropping all packets with no RST/FIN, - # this is how long your client waits before realizing & dropping the connection. - # defaults to 66sec (sic!) - # watchClientTimeout: 60 - # - # Endpoint to send request to reload alerts - reloadURL: "http://localhost:3000/api/admin/provisioning/alerting/reload" - # Absolute path to shell script to execute after a alert got reloaded - script: null - skipReload: false - # This is needed if skipReload is true, to load any alerts defined at startup time. - # Deploy the alert sidecar as an initContainer. - initAlerts: false - # Additional alert sidecar volume mounts - extraMounts: [] - # Sets the size limit of the alert sidecar emptyDir volume - sizeLimit: {} - dashboards: + ingress: enabled: false - # Additional environment variables for the dashboards sidecar - env: {} - # Do not reprocess already processed unchanged resources on k8s API reconnect. - # ignoreAlreadyProcessed: true - SCProvider: true - # label that the configmaps with dashboards are marked with - label: grafana_dashboard - # value of label that the configmaps with dashboards are set to - labelValue: "" - # Log level. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. - # logLevel: INFO - # folder in the pod that should hold the collected dashboards (unless `defaultFolderName` is set) - folder: /tmp/dashboards - # The default folder name, it will create a subfolder under the `folder` and put dashboards in there instead - defaultFolderName: null - # Namespaces list. If specified, the sidecar will search for config-maps/secrets inside these namespaces. - # Otherwise the namespace in which the sidecar is running will be used. - # It's also possible to specify ALL to search in all namespaces. - searchNamespace: null - # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. - watchMethod: WATCH - # search in configmap, secret or both - resource: both - # If specified, the sidecar will look for annotation with this name to create folder and put graph here. - # You can use this parameter together with `provider.foldersFromFilesStructure`to annotate configmaps and create folder structure. - folderAnnotation: null - # Endpoint to send request to reload alerts - reloadURL: "http://localhost:3000/api/admin/provisioning/dashboards/reload" - # Absolute path to shell script to execute after a configmap got reloaded - script: null - skipReload: false - # watchServerTimeout: request to the server, asking it to cleanly close the connection after that. - # defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S - # watchServerTimeout: 3600 - # - # watchClientTimeout: is a client-side timeout, configuring your local socket. - # If you have a network outage dropping all packets with no RST/FIN, - # this is how long your client waits before realizing & dropping the connection. - # defaults to 66sec (sic!) - # watchClientTimeout: 60 - # - # provider configuration that lets grafana manage the dashboards - provider: - # name of the provider, should be unique - name: sidecarProvider - # orgid as configured in grafana - orgid: 1 - # folder in which the dashboards should be imported in grafana - folder: '' - # type of the provider - type: file - # disableDelete to activate a import-only behaviour - disableDelete: false - # allow updating provisioned dashboards from the UI - allowUiUpdates: false - # allow Grafana to replicate dashboard structure from filesystem - foldersFromFilesStructure: false - # Additional dashboard sidecar volume mounts - extraMounts: [] - # Sets the size limit of the dashboard sidecar emptyDir volume - sizeLimit: {} - datasources: - enabled: false - # Additional environment variables for the datasourcessidecar - env: {} - # Do not reprocess already processed unchanged resources on k8s API reconnect. - # ignoreAlreadyProcessed: true - # label that the configmaps with datasources are marked with - label: grafana_datasource - # value of label that the configmaps with datasources are set to - labelValue: "" - # Log level. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. - # logLevel: INFO - # If specified, the sidecar will search for datasource config-maps inside this namespace. - # Otherwise the namespace in which the sidecar is running will be used. - # It's also possible to specify ALL to search in all namespaces - searchNamespace: null - # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. - watchMethod: WATCH - # search in configmap, secret or both - resource: both - # watchServerTimeout: request to the server, asking it to cleanly close the connection after that. - # defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S - # watchServerTimeout: 3600 - # - # watchClientTimeout: is a client-side timeout, configuring your local socket. - # If you have a network outage dropping all packets with no RST/FIN, - # this is how long your client waits before realizing & dropping the connection. - # defaults to 66sec (sic!) - # watchClientTimeout: 60 - # - # Endpoint to send request to reload datasources - reloadURL: "http://localhost:3000/api/admin/provisioning/datasources/reload" - # Absolute path to shell script to execute after a datasource got reloaded - script: null - skipReload: false - # Deploy the datasource sidecar as an initContainer in addition to a container. - # This is needed if skipReload is true, to load any datasources defined at startup time. - initDatasources: false - # Sets the size limit of the datasource sidecar emptyDir volume - sizeLimit: {} - plugins: - enabled: false - # Additional environment variables for the plugins sidecar - env: {} - # Do not reprocess already processed unchanged resources on k8s API reconnect. - # ignoreAlreadyProcessed: true - # label that the configmaps with plugins are marked with - label: grafana_plugin - # value of label that the configmaps with plugins are set to - labelValue: "" - # Log level. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. - # logLevel: INFO - # If specified, the sidecar will search for plugin config-maps inside this namespace. - # Otherwise the namespace in which the sidecar is running will be used. - # It's also possible to specify ALL to search in all namespaces - searchNamespace: null - # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. - watchMethod: WATCH - # search in configmap, secret or both - resource: both - # watchServerTimeout: request to the server, asking it to cleanly close the connection after that. - # defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S - # watchServerTimeout: 3600 - # - # watchClientTimeout: is a client-side timeout, configuring your local socket. - # If you have a network outage dropping all packets with no RST/FIN, - # this is how long your client waits before realizing & dropping the connection. - # defaults to 66sec (sic!) - # watchClientTimeout: 60 - # - # Endpoint to send request to reload plugins - reloadURL: "http://localhost:3000/api/admin/provisioning/plugins/reload" - # Absolute path to shell script to execute after a plugin got reloaded - script: null - skipReload: false - # Deploy the datasource sidecar as an initContainer in addition to a container. - # This is needed if skipReload is true, to load any plugins defined at startup time. - initPlugins: false - # Sets the size limit of the plugin sidecar emptyDir volume - sizeLimit: {} - notifiers: - enabled: false - # Additional environment variables for the notifierssidecar - env: {} - # Do not reprocess already processed unchanged resources on k8s API reconnect. - # ignoreAlreadyProcessed: true - # label that the configmaps with notifiers are marked with - label: grafana_notifier - # value of label that the configmaps with notifiers are set to - labelValue: "" - # Log level. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. - # logLevel: INFO - # If specified, the sidecar will search for notifier config-maps inside this namespace. - # Otherwise the namespace in which the sidecar is running will be used. - # It's also possible to specify ALL to search in all namespaces - searchNamespace: null - # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. - watchMethod: WATCH - # search in configmap, secret or both - resource: both - # watchServerTimeout: request to the server, asking it to cleanly close the connection after that. - # defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S - # watchServerTimeout: 3600 - # - # watchClientTimeout: is a client-side timeout, configuring your local socket. - # If you have a network outage dropping all packets with no RST/FIN, - # this is how long your client waits before realizing & dropping the connection. - # defaults to 66sec (sic!) - # watchClientTimeout: 60 - # - # Endpoint to send request to reload notifiers - reloadURL: "http://localhost:3000/api/admin/provisioning/notifications/reload" - # Absolute path to shell script to execute after a notifier got reloaded - script: null - skipReload: false - # Deploy the notifier sidecar as an initContainer in addition to a container. - # This is needed if skipReload is true, to load any notifiers defined at startup time. - initNotifiers: false - # Sets the size limit of the notifier sidecar emptyDir volume - sizeLimit: {} - -## Override the deployment namespace -## -namespaceOverride: "" + # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName + # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress + # ingressClassName: nginx + # Values can be templated + annotations: + kubernetes.io/ingress.class: nginx + cert-manager.io/cluster-issuer: letsencrypt-prod + # cert-manager.io/cluster-issuer: letsencrypt-staging + + # https://cert-manager.io/docs/faq/acme/#:~:text=If%20you%20receive%20a%20tls,the%20actual%20certificate%20is%20issued + cert-manager.io/issue-temporary-certificate: "true" + acme.cert-manager.io/http01-edit-in-place: "true" -## Number of old ReplicaSets to retain -## -revisionHistoryLimit: 10 - -## Add a seperate remote image renderer deployment/service -imageRenderer: - deploymentStrategy: {} - # Enable the image-renderer deployment & service - enabled: false - replicas: 1 - autoscaling: - enabled: false - minReplicas: 1 - maxReplicas: 5 - targetCPU: "60" - targetMemory: "" - behavior: {} - image: - # -- The Docker registry - registry: docker.io - # image-renderer Image repository - repository: grafana/grafana-image-renderer - # image-renderer Image tag - tag: latest - # image-renderer Image sha (optional) - sha: "" - # image-renderer ImagePullPolicy - pullPolicy: Always - # extra environment variables - env: - HTTP_HOST: "0.0.0.0" - # RENDERING_ARGS: --no-sandbox,--disable-gpu,--window-size=1280x758 - # RENDERING_MODE: clustered - # IGNORE_HTTPS_ERRORS: true - - ## "valueFrom" environment variable references that will be added to deployment pods. Name is templated. - ## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#envvarsource-v1-core - ## Renders in container spec as: - ## env: - ## ... - ## - name: - ## valueFrom: - ## - envValueFrom: {} - # ENV_NAME: - # configMapKeyRef: - # name: configmap-name - # key: value_key - - # image-renderer deployment serviceAccount - serviceAccountName: "" - # image-renderer deployment securityContext - securityContext: {} - # image-renderer deployment container securityContext - containerSecurityContext: - seccompProfile: - type: RuntimeDefault - capabilities: - drop: ['ALL'] - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - ## image-renderer pod annotation - podAnnotations: {} - # image-renderer deployment Host Aliases - hostAliases: [] - # image-renderer deployment priority class - priorityClassName: '' - service: - # Enable the image-renderer service - enabled: true - # image-renderer service port name - portName: 'http' - # image-renderer service port used by both service and deployment - port: 8081 - targetPort: 8081 - # Adds the appProtocol field to the image-renderer service. This allows to work with istio protocol selection. Ex: "http" or "tcp" - appProtocol: "" - serviceMonitor: - ## If true, a ServiceMonitor CRD is created for a prometheus operator - ## https://github.com/coreos/prometheus-operator - ## - enabled: false - path: /metrics - # namespace: monitoring (defaults to use the namespace this chart is deployed to) labels: {} - interval: 1m - scheme: http - tlsConfig: {} - scrapeTimeout: 30s - relabelings: [] - # See: https://doc.crds.dev/github.com/prometheus-operator/kube-prometheus/monitoring.coreos.com/ServiceMonitor/v1@v0.11.0#spec-targetLabels - targetLabels: [] - # - targetLabel1 - # - targetLabel2 - # If https is enabled in Grafana, this needs to be set as 'https' to correctly configure the callback used in Grafana - grafanaProtocol: http - # In case a sub_path is used this needs to be added to the image renderer callback - grafanaSubPath: "" - # name of the image-renderer port on the pod - podPortName: http - # number of image-renderer replica sets to keep - revisionHistoryLimit: 10 - networkPolicy: - # Enable a NetworkPolicy to limit inbound traffic to only the created grafana pods - limitIngress: true - # Enable a NetworkPolicy to limit outbound traffic to only the created grafana pods - limitEgress: false - # Allow additional services to access image-renderer (eg. Prometheus operator when ServiceMonitor is enabled) - extraIngressSelectors: [] - resources: {} -# limits: -# cpu: 100m -# memory: 100Mi -# requests: -# cpu: 50m -# memory: 50Mi - ## Node labels for pod assignment - ## ref: https://kubernetes.io/docs/user-guide/node-selection/ - # - nodeSelector: {} - - ## Tolerations for pod assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ - ## - tolerations: [] - - ## Affinity for pod assignment (evaluated as template) - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity - ## - affinity: {} - - ## Use an alternate scheduler, e.g. "stork". - ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ - ## - # schedulerName: "default-scheduler" - -networkPolicy: - ## @param networkPolicy.enabled Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. - ## - enabled: false - ## @param networkPolicy.allowExternal Don't require client label for connections - ## The Policy model to apply. When set to false, only pods with the correct - ## client label will have network access to grafana port defined. - ## When true, grafana will accept connections from any source - ## (with the correct destination port). - ## - ingress: true - ## @param networkPolicy.ingress When true enables the creation - ## an ingress network policy - ## - allowExternal: true - ## @param networkPolicy.explicitNamespacesSelector A Kubernetes LabelSelector to explicitly select namespaces from which traffic could be allowed - ## If explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace - ## and that match other criteria, the ones that have the good label, can reach the grafana. - ## But sometimes, we want the grafana to be accessible to clients from other namespaces, in this case, we can use this - ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. - ## - ## Example: - ## explicitNamespacesSelector: - ## matchLabels: - ## role: frontend - ## matchExpressions: - ## - {key: role, operator: In, values: [frontend]} + path: / + + # pathType is only for k8s > 1.19 + pathType: Prefix + + hosts: + - grafana.dev.bitovi-staffing.com + + + ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. + extraPaths: [] + # - path: /* + # backend: + # serviceName: ssl-redirect + # servicePort: use-annotation + ## Or for k8s > 1.19 + # - path: /* + # pathType: Prefix + # backend: + # service: + # name: ssl-redirect + # port: + # name: service + + + tls: + - secretName: grafana-tls + hosts: + - grafana.dev.bitovi-staffing.com + + # Administrator credentials when not using an existing secret (see below) + adminUser: admin + # adminPassword: set via values-secrets.yaml + + # Use an existing secret for the admin user. + admin: + existingSecret: "" + userKey: admin-user + passwordKey: admin-password + + ## Configure grafana dashboard providers + ## ref: http://docs.grafana.org/administration/provisioning/#dashboards + ## + ## `path` must be /var/lib/grafana/dashboards/ + ## + dashboardProviders: + dashboardproviders.yaml: + apiVersion: 1 + providers: + - name: 'kubernetes' + orgId: 1 + folder: 'kubernetes' + type: file + disableDeletion: false + editable: true + options: + path: /var/lib/grafana/dashboards/kubernetes + ## Configure grafana dashboard to import + ## NOTE: To use dashboards you must also enable/configure dashboardProviders + ## ref: https://grafana.com/dashboards + ## + ## dashboards per provider, use provider name as key. ## - explicitNamespacesSelector: {} - ## - ## - ## - ## - ## - ## - egress: - ## @param networkPolicy.egress.enabled When enabled, an egress network policy will be - ## created allowing grafana to connect to external data sources from kubernetes cluster. + dashboards: + # https://github.com/dotdc/grafana-dashboards-kubernetes + kubernetes: + k8s-addons-starboard-operator: + # k8s-addons-starboard-operator.json + gnetId: 16337 + datasource: Prometheus + k8s-system-api-server: + # k8s-system-api-server.json + gnetId: 15761 + datasource: Prometheus + k8s-system-coredns: + # k8s-system-coredns.json + gnetId: 15762 + datasource: Prometheus + k8s-views-global: + # k8s-views-global.json + gnetId: 15757 + datasource: Prometheus + k8s-views-namespaces: + # k8s-views-namespaces.json + gnetId: 15758 + datasource: Prometheus + k8s-views-nodes: + # k8s-views-nodes.json + gnetId: 15759 + datasource: Prometheus + k8s-views-pods: + # k8s-views-pods.json + gnetId: 15760 + datasource: Prometheus + # default: + # some-dashboard: + # json: | + # $RAW_JSON + # custom-dashboard: + # file: dashboards/custom-dashboard.json + # prometheus-stats: + # gnetId: 2 + # revision: 2 + # datasource: Prometheus + # local-dashboard: + # url: https://example.com/repository/test.json + # token: '' + # local-dashboard-base64: + # url: https://example.com/repository/test-b64.json + # token: '' + # b64content: true + + ## Grafana's primary configuration + ## NOTE: values in map will be converted to ini format + ## ref: http://docs.grafana.org/installation/configuration/ + ## + grafana.ini: + paths: + data: /var/lib/grafana/data + logs: /var/log/grafana + plugins: /var/lib/grafana/plugins + provisioning: /etc/grafana/provisioning + analytics: + check_for_updates: true + log: + mode: console + grafana_net: + url: https://grafana.net + ## grafana Authentication can be enabled with the following values on grafana.ini + # server: + # The full public facing url you use in browser, used for redirects and emails + # root_url: + # https://grafana.com/docs/grafana/latest/auth/github/#enable-github-in-grafana + # auth.github: + # enabled: false + # allow_sign_up: false + # scopes: user:email,read:org + # auth_url: https://github.com/login/oauth/authorize + # token_url: https://github.com/login/oauth/access_token + # api_url: https://api.github.com/user + # team_ids: + # allowed_organizations: + # client_id: + # client_secret: + ## LDAP Authentication can be enabled with the following values on grafana.ini + ## NOTE: Grafana will fail to start if the value for ldap.toml is invalid + # auth.ldap: + # enabled: true + # allow_sign_up: true + # config_file: /etc/grafana/ldap.toml + + ## Grafana's LDAP configuration + ## Templated by the template in _helpers.tpl + ## NOTE: To enable the grafana.ini must be configured with auth.ldap.enabled + ## ref: http://docs.grafana.org/installation/configuration/#auth-ldap + ## ref: http://docs.grafana.org/installation/ldap/#configuration + ldap: enabled: false - ## - ## @param networkPolicy.egress.blockDNSResolution When enabled, DNS resolution will be blocked - ## for all pods in the grafana namespace. - blockDNSResolution: false - ## - ## @param networkPolicy.egress.ports Add individual ports to be allowed by the egress - ports: [] - ## Add ports to the egress by specifying - port: - ## E.X. - ## - port: 80 - ## - port: 443 - ## - ## @param networkPolicy.egress.to Allow egress traffic to specific destinations - to: [] - ## Add destinations to the egress by specifying - ipBlock: - ## E.X. - ## to: - ## - namespaceSelector: - ## matchExpressions: - ## - {key: role, operator: In, values: [grafana]} - ## - ## - ## - ## - ## - -# Enable backward compatibility of kubernetes where version below 1.13 doesn't have the enableServiceLinks option -enableKubeBackwardCompatibility: false -useStatefulSet: false -# Create a dynamic manifests via values: -extraObjects: [] - # - apiVersion: "kubernetes-client.io/v1" - # kind: ExternalSecret - # metadata: - # name: grafana-secrets - # spec: - # backendType: gcpSecretsManager - # data: - # - key: grafana-admin-password - # name: adminPassword + # `existingSecret` is a reference to an existing secret containing the ldap configuration + # for Grafana in a key `ldap-toml`. + existingSecret: "" + # `config` is the content of `ldap.toml` that will be stored in the created secret + config: "" + # config: |- + # verbose_logging = true + + # [[servers]] + # host = "my-ldap-server" + # port = 636 + # use_ssl = true + # start_tls = false + # ssl_skip_verify = false + # bind_dn = "uid=%s,ou=users,dc=myorg,dc=com" + + ## Grafana's SMTP configuration + ## NOTE: To enable, grafana.ini must be configured with smtp.enabled + ## ref: http://docs.grafana.org/installation/configuration/#smtp + smtp: + # `existingSecret` is a reference to an existing secret containing the smtp configuration + # for Grafana. + existingSecret: "" + userKey: "user" + passwordKey: "password" From 6d4e95392c85256f1062093418a6cdc8f9f59d31 Mon Sep 17 00:00:00 2001 From: karamveer28 Date: Fri, 8 Dec 2023 18:03:35 +0530 Subject: [PATCH 23/99] try with aws auth --- .../deployment/helm/aws-auth/Chart.yaml | 5 +++++ .../helm/aws-auth/bitops.config.yaml | 14 ++++++++++++ .../helm/aws-auth/example.bitops.config.yaml | 15 +++++++++++++ .../helm/aws-auth/templates/aws-auth.yaml | 22 +++++++++++++++++++ .../deployment/helm/aws-auth/values.yaml | 21 ++++++++++++++++++ 5 files changed, 77 insertions(+) create mode 100644 operations/deployment/helm/aws-auth/Chart.yaml create mode 100644 operations/deployment/helm/aws-auth/bitops.config.yaml create mode 100644 operations/deployment/helm/aws-auth/example.bitops.config.yaml create mode 100644 operations/deployment/helm/aws-auth/templates/aws-auth.yaml create mode 100644 operations/deployment/helm/aws-auth/values.yaml diff --git a/operations/deployment/helm/aws-auth/Chart.yaml b/operations/deployment/helm/aws-auth/Chart.yaml new file mode 100644 index 0000000..db7b2d3 --- /dev/null +++ b/operations/deployment/helm/aws-auth/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v2 +appVersion: "1.0" +description: aws-auth configmap for EKS +name: aws-auth +version: 0.1.0 \ No newline at end of file diff --git a/operations/deployment/helm/aws-auth/bitops.config.yaml b/operations/deployment/helm/aws-auth/bitops.config.yaml new file mode 100644 index 0000000..60d7548 --- /dev/null +++ b/operations/deployment/helm/aws-auth/bitops.config.yaml @@ -0,0 +1,14 @@ +helm: + cli: + namespace: kube-system + timeout: 200s + debug: true + atomic: false + force: false + dry-run: false + options: + release-name: aws-auth + skip-deploy: false + k8s: + fetch: + kubeconfig: true \ No newline at end of file diff --git a/operations/deployment/helm/aws-auth/example.bitops.config.yaml b/operations/deployment/helm/aws-auth/example.bitops.config.yaml new file mode 100644 index 0000000..eddb4b4 --- /dev/null +++ b/operations/deployment/helm/aws-auth/example.bitops.config.yaml @@ -0,0 +1,15 @@ +helm: + cli: + namespace: kube-system + timeout: 200s + debug: true + atomic: false + force: false + dry-run: false + options: + release-name: aws-auth + skip-deploy: false + k8s: + fetch: + kubeconfig: true + cluster-name: prod-ekscluster \ No newline at end of file diff --git a/operations/deployment/helm/aws-auth/templates/aws-auth.yaml b/operations/deployment/helm/aws-auth/templates/aws-auth.yaml new file mode 100644 index 0000000..e41dea6 --- /dev/null +++ b/operations/deployment/helm/aws-auth/templates/aws-auth.yaml @@ -0,0 +1,22 @@ +{{- if .Values.configmap.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Values.configmap.name }} +data: +{{- if .Values.data.mapAccounts }} + mapAccounts: | + {{ .Values.data.mapAccounts| nindent 4 | trim }} +{{- end }} + +{{- if .Values.data.mapRoles }} + mapRoles: | + {{ .Values.data.mapRoles| nindent 4 | trim }} +{{- end }} + +{{- if .Values.data.mapUsers }} + mapUsers: | + {{ .Values.data.mapUsers| nindent 4 | trim }} +{{- end }} + +{{- end }} \ No newline at end of file diff --git a/operations/deployment/helm/aws-auth/values.yaml b/operations/deployment/helm/aws-auth/values.yaml new file mode 100644 index 0000000..90f836a --- /dev/null +++ b/operations/deployment/helm/aws-auth/values.yaml @@ -0,0 +1,21 @@ +configmap: + enabled: true + name: aws-auth +data: + + # account number + mapAccounts: | + - "000000000000" + + # add eksworkers to nodes and bootstrappers + # ensure accounts match + # add sso and other users to masters as necessry + mapRoles: | + - "groups": + - "system:bootstrappers" + - "system:nodes" + "rolearn": "arn:aws:iam::000000000:role/prod-eksworker" + "username": "system:node:{{EC2PrivateDNSName}}" + - "groups": + - "system:masters" + "rolearn": "arn:aws:iam::000000000:role/AWSReservedSSO_AdministratorAccess_1111111111" \ No newline at end of file From 739f7223e50ce71f6785311e2eba937fa0c79f74 Mon Sep 17 00:00:00 2001 From: karamveer28 Date: Mon, 11 Dec 2023 19:53:08 +0530 Subject: [PATCH 24/99] Test without charts --- .../deployment/helm/aws-auth/Chart.yaml | 5 - .../helm/aws-auth/bitops.config.yaml | 14 -- .../helm/aws-auth/example.bitops.config.yaml | 15 -- .../helm/aws-auth/templates/aws-auth.yaml | 22 -- .../deployment/helm/aws-auth/values.yaml | 21 -- operations/deployment/helm/grafana/Chart.yaml | 9 - .../helm/grafana/bitops.config.yaml | 16 -- .../deployment/helm/grafana/values.yaml | 218 ------------------ 8 files changed, 320 deletions(-) delete mode 100644 operations/deployment/helm/aws-auth/Chart.yaml delete mode 100644 operations/deployment/helm/aws-auth/bitops.config.yaml delete mode 100644 operations/deployment/helm/aws-auth/example.bitops.config.yaml delete mode 100644 operations/deployment/helm/aws-auth/templates/aws-auth.yaml delete mode 100644 operations/deployment/helm/aws-auth/values.yaml delete mode 100644 operations/deployment/helm/grafana/Chart.yaml delete mode 100644 operations/deployment/helm/grafana/bitops.config.yaml delete mode 100644 operations/deployment/helm/grafana/values.yaml diff --git a/operations/deployment/helm/aws-auth/Chart.yaml b/operations/deployment/helm/aws-auth/Chart.yaml deleted file mode 100644 index db7b2d3..0000000 --- a/operations/deployment/helm/aws-auth/Chart.yaml +++ /dev/null @@ -1,5 +0,0 @@ -apiVersion: v2 -appVersion: "1.0" -description: aws-auth configmap for EKS -name: aws-auth -version: 0.1.0 \ No newline at end of file diff --git a/operations/deployment/helm/aws-auth/bitops.config.yaml b/operations/deployment/helm/aws-auth/bitops.config.yaml deleted file mode 100644 index 60d7548..0000000 --- a/operations/deployment/helm/aws-auth/bitops.config.yaml +++ /dev/null @@ -1,14 +0,0 @@ -helm: - cli: - namespace: kube-system - timeout: 200s - debug: true - atomic: false - force: false - dry-run: false - options: - release-name: aws-auth - skip-deploy: false - k8s: - fetch: - kubeconfig: true \ No newline at end of file diff --git a/operations/deployment/helm/aws-auth/example.bitops.config.yaml b/operations/deployment/helm/aws-auth/example.bitops.config.yaml deleted file mode 100644 index eddb4b4..0000000 --- a/operations/deployment/helm/aws-auth/example.bitops.config.yaml +++ /dev/null @@ -1,15 +0,0 @@ -helm: - cli: - namespace: kube-system - timeout: 200s - debug: true - atomic: false - force: false - dry-run: false - options: - release-name: aws-auth - skip-deploy: false - k8s: - fetch: - kubeconfig: true - cluster-name: prod-ekscluster \ No newline at end of file diff --git a/operations/deployment/helm/aws-auth/templates/aws-auth.yaml b/operations/deployment/helm/aws-auth/templates/aws-auth.yaml deleted file mode 100644 index e41dea6..0000000 --- a/operations/deployment/helm/aws-auth/templates/aws-auth.yaml +++ /dev/null @@ -1,22 +0,0 @@ -{{- if .Values.configmap.enabled }} -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ .Values.configmap.name }} -data: -{{- if .Values.data.mapAccounts }} - mapAccounts: | - {{ .Values.data.mapAccounts| nindent 4 | trim }} -{{- end }} - -{{- if .Values.data.mapRoles }} - mapRoles: | - {{ .Values.data.mapRoles| nindent 4 | trim }} -{{- end }} - -{{- if .Values.data.mapUsers }} - mapUsers: | - {{ .Values.data.mapUsers| nindent 4 | trim }} -{{- end }} - -{{- end }} \ No newline at end of file diff --git a/operations/deployment/helm/aws-auth/values.yaml b/operations/deployment/helm/aws-auth/values.yaml deleted file mode 100644 index 90f836a..0000000 --- a/operations/deployment/helm/aws-auth/values.yaml +++ /dev/null @@ -1,21 +0,0 @@ -configmap: - enabled: true - name: aws-auth -data: - - # account number - mapAccounts: | - - "000000000000" - - # add eksworkers to nodes and bootstrappers - # ensure accounts match - # add sso and other users to masters as necessry - mapRoles: | - - "groups": - - "system:bootstrappers" - - "system:nodes" - "rolearn": "arn:aws:iam::000000000:role/prod-eksworker" - "username": "system:node:{{EC2PrivateDNSName}}" - - "groups": - - "system:masters" - "rolearn": "arn:aws:iam::000000000:role/AWSReservedSSO_AdministratorAccess_1111111111" \ No newline at end of file diff --git a/operations/deployment/helm/grafana/Chart.yaml b/operations/deployment/helm/grafana/Chart.yaml deleted file mode 100644 index 12e9ab9..0000000 --- a/operations/deployment/helm/grafana/Chart.yaml +++ /dev/null @@ -1,9 +0,0 @@ -apiVersion: v2 -name: grafana chart wrapper -description: A Helm chart wrapper for grafana -version: 0.1.0 - -dependencies: -- name: grafana - version: 6.31.1 - repository: https://grafana.github.io/helm-charts/ \ No newline at end of file diff --git a/operations/deployment/helm/grafana/bitops.config.yaml b/operations/deployment/helm/grafana/bitops.config.yaml deleted file mode 100644 index 4dd878a..0000000 --- a/operations/deployment/helm/grafana/bitops.config.yaml +++ /dev/null @@ -1,16 +0,0 @@ -helm: - cli: - namespace: grafana - timeout: 120s - debug: true - atomic: true - force: false - dry-run: false - options: - release-name: grafana - skip-deploy: false - kubeconfig: - fetch: - enabled: true - cluster-name: recruiting-recruiting-recruiting-ekscluster - diff --git a/operations/deployment/helm/grafana/values.yaml b/operations/deployment/helm/grafana/values.yaml deleted file mode 100644 index 3b339df..0000000 --- a/operations/deployment/helm/grafana/values.yaml +++ /dev/null @@ -1,218 +0,0 @@ -grafana: - replicas: 1 - - image: - repository: grafana/grafana - tag: 9.0.1 - sha: "" - pullPolicy: IfNotPresent - - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## - # pullSecrets: - # - myRegistrKeySecretName - - serviceAccount: - autoMount: true - - ingress: - enabled: false - # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName - # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress - # ingressClassName: nginx - # Values can be templated - annotations: - kubernetes.io/ingress.class: nginx - cert-manager.io/cluster-issuer: letsencrypt-prod - # cert-manager.io/cluster-issuer: letsencrypt-staging - - # https://cert-manager.io/docs/faq/acme/#:~:text=If%20you%20receive%20a%20tls,the%20actual%20certificate%20is%20issued - cert-manager.io/issue-temporary-certificate: "true" - acme.cert-manager.io/http01-edit-in-place: "true" - - labels: {} - path: / - - # pathType is only for k8s > 1.19 - pathType: Prefix - - hosts: - - grafana.dev.bitovi-staffing.com - - - ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. - extraPaths: [] - # - path: /* - # backend: - # serviceName: ssl-redirect - # servicePort: use-annotation - ## Or for k8s > 1.19 - # - path: /* - # pathType: Prefix - # backend: - # service: - # name: ssl-redirect - # port: - # name: service - - - tls: - - secretName: grafana-tls - hosts: - - grafana.dev.bitovi-staffing.com - - # Administrator credentials when not using an existing secret (see below) - adminUser: admin - # adminPassword: set via values-secrets.yaml - - # Use an existing secret for the admin user. - admin: - existingSecret: "" - userKey: admin-user - passwordKey: admin-password - - ## Configure grafana dashboard providers - ## ref: http://docs.grafana.org/administration/provisioning/#dashboards - ## - ## `path` must be /var/lib/grafana/dashboards/ - ## - dashboardProviders: - dashboardproviders.yaml: - apiVersion: 1 - providers: - - name: 'kubernetes' - orgId: 1 - folder: 'kubernetes' - type: file - disableDeletion: false - editable: true - options: - path: /var/lib/grafana/dashboards/kubernetes - ## Configure grafana dashboard to import - ## NOTE: To use dashboards you must also enable/configure dashboardProviders - ## ref: https://grafana.com/dashboards - ## - ## dashboards per provider, use provider name as key. - ## - dashboards: - # https://github.com/dotdc/grafana-dashboards-kubernetes - kubernetes: - k8s-addons-starboard-operator: - # k8s-addons-starboard-operator.json - gnetId: 16337 - datasource: Prometheus - k8s-system-api-server: - # k8s-system-api-server.json - gnetId: 15761 - datasource: Prometheus - k8s-system-coredns: - # k8s-system-coredns.json - gnetId: 15762 - datasource: Prometheus - k8s-views-global: - # k8s-views-global.json - gnetId: 15757 - datasource: Prometheus - k8s-views-namespaces: - # k8s-views-namespaces.json - gnetId: 15758 - datasource: Prometheus - k8s-views-nodes: - # k8s-views-nodes.json - gnetId: 15759 - datasource: Prometheus - k8s-views-pods: - # k8s-views-pods.json - gnetId: 15760 - datasource: Prometheus - # default: - # some-dashboard: - # json: | - # $RAW_JSON - # custom-dashboard: - # file: dashboards/custom-dashboard.json - # prometheus-stats: - # gnetId: 2 - # revision: 2 - # datasource: Prometheus - # local-dashboard: - # url: https://example.com/repository/test.json - # token: '' - # local-dashboard-base64: - # url: https://example.com/repository/test-b64.json - # token: '' - # b64content: true - - ## Grafana's primary configuration - ## NOTE: values in map will be converted to ini format - ## ref: http://docs.grafana.org/installation/configuration/ - ## - grafana.ini: - paths: - data: /var/lib/grafana/data - logs: /var/log/grafana - plugins: /var/lib/grafana/plugins - provisioning: /etc/grafana/provisioning - analytics: - check_for_updates: true - log: - mode: console - grafana_net: - url: https://grafana.net - ## grafana Authentication can be enabled with the following values on grafana.ini - # server: - # The full public facing url you use in browser, used for redirects and emails - # root_url: - # https://grafana.com/docs/grafana/latest/auth/github/#enable-github-in-grafana - # auth.github: - # enabled: false - # allow_sign_up: false - # scopes: user:email,read:org - # auth_url: https://github.com/login/oauth/authorize - # token_url: https://github.com/login/oauth/access_token - # api_url: https://api.github.com/user - # team_ids: - # allowed_organizations: - # client_id: - # client_secret: - ## LDAP Authentication can be enabled with the following values on grafana.ini - ## NOTE: Grafana will fail to start if the value for ldap.toml is invalid - # auth.ldap: - # enabled: true - # allow_sign_up: true - # config_file: /etc/grafana/ldap.toml - - ## Grafana's LDAP configuration - ## Templated by the template in _helpers.tpl - ## NOTE: To enable the grafana.ini must be configured with auth.ldap.enabled - ## ref: http://docs.grafana.org/installation/configuration/#auth-ldap - ## ref: http://docs.grafana.org/installation/ldap/#configuration - ldap: - enabled: false - # `existingSecret` is a reference to an existing secret containing the ldap configuration - # for Grafana in a key `ldap-toml`. - existingSecret: "" - # `config` is the content of `ldap.toml` that will be stored in the created secret - config: "" - # config: |- - # verbose_logging = true - - # [[servers]] - # host = "my-ldap-server" - # port = 636 - # use_ssl = true - # start_tls = false - # ssl_skip_verify = false - # bind_dn = "uid=%s,ou=users,dc=myorg,dc=com" - - ## Grafana's SMTP configuration - ## NOTE: To enable, grafana.ini must be configured with smtp.enabled - ## ref: http://docs.grafana.org/installation/configuration/#smtp - smtp: - # `existingSecret` is a reference to an existing secret containing the smtp configuration - # for Grafana. - existingSecret: "" - userKey: "user" - passwordKey: "password" From 4245635dd2eb029efa76cc744f85a5c353250a82 Mon Sep 17 00:00:00 2001 From: karamveer28 Date: Mon, 11 Dec 2023 20:00:20 +0530 Subject: [PATCH 25/99] Test after destroying --- operations/deployment/helm/grafana/Chart.yaml | 9 + .../helm/grafana/bitops.config.yaml | 16 ++ .../deployment/helm/grafana/values.yaml | 218 ++++++++++++++++++ 3 files changed, 243 insertions(+) create mode 100644 operations/deployment/helm/grafana/Chart.yaml create mode 100644 operations/deployment/helm/grafana/bitops.config.yaml create mode 100644 operations/deployment/helm/grafana/values.yaml diff --git a/operations/deployment/helm/grafana/Chart.yaml b/operations/deployment/helm/grafana/Chart.yaml new file mode 100644 index 0000000..12e9ab9 --- /dev/null +++ b/operations/deployment/helm/grafana/Chart.yaml @@ -0,0 +1,9 @@ +apiVersion: v2 +name: grafana chart wrapper +description: A Helm chart wrapper for grafana +version: 0.1.0 + +dependencies: +- name: grafana + version: 6.31.1 + repository: https://grafana.github.io/helm-charts/ \ No newline at end of file diff --git a/operations/deployment/helm/grafana/bitops.config.yaml b/operations/deployment/helm/grafana/bitops.config.yaml new file mode 100644 index 0000000..4dd878a --- /dev/null +++ b/operations/deployment/helm/grafana/bitops.config.yaml @@ -0,0 +1,16 @@ +helm: + cli: + namespace: grafana + timeout: 120s + debug: true + atomic: true + force: false + dry-run: false + options: + release-name: grafana + skip-deploy: false + kubeconfig: + fetch: + enabled: true + cluster-name: recruiting-recruiting-recruiting-ekscluster + diff --git a/operations/deployment/helm/grafana/values.yaml b/operations/deployment/helm/grafana/values.yaml new file mode 100644 index 0000000..3b339df --- /dev/null +++ b/operations/deployment/helm/grafana/values.yaml @@ -0,0 +1,218 @@ +grafana: + replicas: 1 + + image: + repository: grafana/grafana + tag: 9.0.1 + sha: "" + pullPolicy: IfNotPresent + + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistrKeySecretName + + serviceAccount: + autoMount: true + + ingress: + enabled: false + # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName + # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress + # ingressClassName: nginx + # Values can be templated + annotations: + kubernetes.io/ingress.class: nginx + cert-manager.io/cluster-issuer: letsencrypt-prod + # cert-manager.io/cluster-issuer: letsencrypt-staging + + # https://cert-manager.io/docs/faq/acme/#:~:text=If%20you%20receive%20a%20tls,the%20actual%20certificate%20is%20issued + cert-manager.io/issue-temporary-certificate: "true" + acme.cert-manager.io/http01-edit-in-place: "true" + + labels: {} + path: / + + # pathType is only for k8s > 1.19 + pathType: Prefix + + hosts: + - grafana.dev.bitovi-staffing.com + + + ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. + extraPaths: [] + # - path: /* + # backend: + # serviceName: ssl-redirect + # servicePort: use-annotation + ## Or for k8s > 1.19 + # - path: /* + # pathType: Prefix + # backend: + # service: + # name: ssl-redirect + # port: + # name: service + + + tls: + - secretName: grafana-tls + hosts: + - grafana.dev.bitovi-staffing.com + + # Administrator credentials when not using an existing secret (see below) + adminUser: admin + # adminPassword: set via values-secrets.yaml + + # Use an existing secret for the admin user. + admin: + existingSecret: "" + userKey: admin-user + passwordKey: admin-password + + ## Configure grafana dashboard providers + ## ref: http://docs.grafana.org/administration/provisioning/#dashboards + ## + ## `path` must be /var/lib/grafana/dashboards/ + ## + dashboardProviders: + dashboardproviders.yaml: + apiVersion: 1 + providers: + - name: 'kubernetes' + orgId: 1 + folder: 'kubernetes' + type: file + disableDeletion: false + editable: true + options: + path: /var/lib/grafana/dashboards/kubernetes + ## Configure grafana dashboard to import + ## NOTE: To use dashboards you must also enable/configure dashboardProviders + ## ref: https://grafana.com/dashboards + ## + ## dashboards per provider, use provider name as key. + ## + dashboards: + # https://github.com/dotdc/grafana-dashboards-kubernetes + kubernetes: + k8s-addons-starboard-operator: + # k8s-addons-starboard-operator.json + gnetId: 16337 + datasource: Prometheus + k8s-system-api-server: + # k8s-system-api-server.json + gnetId: 15761 + datasource: Prometheus + k8s-system-coredns: + # k8s-system-coredns.json + gnetId: 15762 + datasource: Prometheus + k8s-views-global: + # k8s-views-global.json + gnetId: 15757 + datasource: Prometheus + k8s-views-namespaces: + # k8s-views-namespaces.json + gnetId: 15758 + datasource: Prometheus + k8s-views-nodes: + # k8s-views-nodes.json + gnetId: 15759 + datasource: Prometheus + k8s-views-pods: + # k8s-views-pods.json + gnetId: 15760 + datasource: Prometheus + # default: + # some-dashboard: + # json: | + # $RAW_JSON + # custom-dashboard: + # file: dashboards/custom-dashboard.json + # prometheus-stats: + # gnetId: 2 + # revision: 2 + # datasource: Prometheus + # local-dashboard: + # url: https://example.com/repository/test.json + # token: '' + # local-dashboard-base64: + # url: https://example.com/repository/test-b64.json + # token: '' + # b64content: true + + ## Grafana's primary configuration + ## NOTE: values in map will be converted to ini format + ## ref: http://docs.grafana.org/installation/configuration/ + ## + grafana.ini: + paths: + data: /var/lib/grafana/data + logs: /var/log/grafana + plugins: /var/lib/grafana/plugins + provisioning: /etc/grafana/provisioning + analytics: + check_for_updates: true + log: + mode: console + grafana_net: + url: https://grafana.net + ## grafana Authentication can be enabled with the following values on grafana.ini + # server: + # The full public facing url you use in browser, used for redirects and emails + # root_url: + # https://grafana.com/docs/grafana/latest/auth/github/#enable-github-in-grafana + # auth.github: + # enabled: false + # allow_sign_up: false + # scopes: user:email,read:org + # auth_url: https://github.com/login/oauth/authorize + # token_url: https://github.com/login/oauth/access_token + # api_url: https://api.github.com/user + # team_ids: + # allowed_organizations: + # client_id: + # client_secret: + ## LDAP Authentication can be enabled with the following values on grafana.ini + ## NOTE: Grafana will fail to start if the value for ldap.toml is invalid + # auth.ldap: + # enabled: true + # allow_sign_up: true + # config_file: /etc/grafana/ldap.toml + + ## Grafana's LDAP configuration + ## Templated by the template in _helpers.tpl + ## NOTE: To enable the grafana.ini must be configured with auth.ldap.enabled + ## ref: http://docs.grafana.org/installation/configuration/#auth-ldap + ## ref: http://docs.grafana.org/installation/ldap/#configuration + ldap: + enabled: false + # `existingSecret` is a reference to an existing secret containing the ldap configuration + # for Grafana in a key `ldap-toml`. + existingSecret: "" + # `config` is the content of `ldap.toml` that will be stored in the created secret + config: "" + # config: |- + # verbose_logging = true + + # [[servers]] + # host = "my-ldap-server" + # port = 636 + # use_ssl = true + # start_tls = false + # ssl_skip_verify = false + # bind_dn = "uid=%s,ou=users,dc=myorg,dc=com" + + ## Grafana's SMTP configuration + ## NOTE: To enable, grafana.ini must be configured with smtp.enabled + ## ref: http://docs.grafana.org/installation/configuration/#smtp + smtp: + # `existingSecret` is a reference to an existing secret containing the smtp configuration + # for Grafana. + existingSecret: "" + userKey: "user" + passwordKey: "password" From 10f87ffdee8cb17ca9f5b95a9e1ef7da6bb6bba4 Mon Sep 17 00:00:00 2001 From: karamveer28 Date: Mon, 11 Dec 2023 21:16:52 +0530 Subject: [PATCH 26/99] Test after destroying --- .../deployment/helm/grafana/.helmignore | 23 + operations/deployment/helm/grafana/Chart.yaml | 40 +- operations/deployment/helm/grafana/README.md | 757 +++++++++ .../helm/grafana/bitops.config.yaml | 16 - .../helm/grafana/ci/default-values.yaml | 1 + .../helm/grafana/ci/with-affinity-values.yaml | 16 + .../ci/with-dashboard-json-values.yaml | 53 + .../grafana/ci/with-dashboard-values.yaml | 19 + .../ci/with-extraconfigmapmounts-values.yaml | 7 + .../ci/with-image-renderer-values.yaml | 19 + .../helm/grafana/ci/with-persistence.yaml | 3 + .../grafana/dashboards/custom-dashboard.json | 1 + .../helm/grafana/templates/NOTES.txt | 55 + .../helm/grafana/templates/_helpers.tpl | 227 +++ .../helm/grafana/templates/_pod.tpl | 1276 ++++++++++++++ .../helm/grafana/templates/clusterrole.yaml | 25 + .../grafana/templates/clusterrolebinding.yaml | 24 + .../helm/grafana/templates/configSecret.yaml | 43 + .../configmap-dashboard-provider.yaml | 29 + .../helm/grafana/templates/configmap.yaml | 144 ++ .../templates/dashboards-json-configmap.yaml | 38 + .../helm/grafana/templates/deployment.yaml | 51 + .../grafana/templates/extra-manifests.yaml | 4 + .../grafana/templates/headless-service.yaml | 22 + .../helm/grafana/templates/hpa.yaml | 52 + .../templates/image-renderer-deployment.yaml | 131 ++ .../grafana/templates/image-renderer-hpa.yaml | 47 + .../image-renderer-network-policy.yaml | 79 + .../templates/image-renderer-service.yaml | 31 + .../image-renderer-servicemonitor.yaml | 48 + .../helm/grafana/templates/ingress.yaml | 78 + .../helm/grafana/templates/networkpolicy.yaml | 61 + .../templates/poddisruptionbudget.yaml | 22 + .../grafana/templates/podsecuritypolicy.yaml | 49 + .../helm/grafana/templates/pvc.yaml | 36 + .../helm/grafana/templates/role.yaml | 32 + .../helm/grafana/templates/rolebinding.yaml | 25 + .../helm/grafana/templates/secret-env.yaml | 14 + .../helm/grafana/templates/secret.yaml | 26 + .../helm/grafana/templates/service.yaml | 58 + .../grafana/templates/serviceaccount.yaml | 17 + .../grafana/templates/servicemonitor.yaml | 52 + .../helm/grafana/templates/statefulset.yaml | 56 + .../templates/tests/test-configmap.yaml | 20 + .../tests/test-podsecuritypolicy.yaml | 32 + .../grafana/templates/tests/test-role.yaml | 17 + .../templates/tests/test-rolebinding.yaml | 20 + .../templates/tests/test-serviceaccount.yaml | 12 + .../helm/grafana/templates/tests/test.yaml | 49 + .../deployment/helm/grafana/values.yaml | 1482 ++++++++++++++--- 50 files changed, 5206 insertions(+), 233 deletions(-) create mode 100644 operations/deployment/helm/grafana/.helmignore create mode 100644 operations/deployment/helm/grafana/README.md delete mode 100644 operations/deployment/helm/grafana/bitops.config.yaml create mode 100644 operations/deployment/helm/grafana/ci/default-values.yaml create mode 100644 operations/deployment/helm/grafana/ci/with-affinity-values.yaml create mode 100644 operations/deployment/helm/grafana/ci/with-dashboard-json-values.yaml create mode 100644 operations/deployment/helm/grafana/ci/with-dashboard-values.yaml create mode 100644 operations/deployment/helm/grafana/ci/with-extraconfigmapmounts-values.yaml create mode 100644 operations/deployment/helm/grafana/ci/with-image-renderer-values.yaml create mode 100644 operations/deployment/helm/grafana/ci/with-persistence.yaml create mode 100644 operations/deployment/helm/grafana/dashboards/custom-dashboard.json create mode 100644 operations/deployment/helm/grafana/templates/NOTES.txt create mode 100644 operations/deployment/helm/grafana/templates/_helpers.tpl create mode 100644 operations/deployment/helm/grafana/templates/_pod.tpl create mode 100644 operations/deployment/helm/grafana/templates/clusterrole.yaml create mode 100644 operations/deployment/helm/grafana/templates/clusterrolebinding.yaml create mode 100644 operations/deployment/helm/grafana/templates/configSecret.yaml create mode 100644 operations/deployment/helm/grafana/templates/configmap-dashboard-provider.yaml create mode 100644 operations/deployment/helm/grafana/templates/configmap.yaml create mode 100644 operations/deployment/helm/grafana/templates/dashboards-json-configmap.yaml create mode 100644 operations/deployment/helm/grafana/templates/deployment.yaml create mode 100644 operations/deployment/helm/grafana/templates/extra-manifests.yaml create mode 100644 operations/deployment/helm/grafana/templates/headless-service.yaml create mode 100644 operations/deployment/helm/grafana/templates/hpa.yaml create mode 100644 operations/deployment/helm/grafana/templates/image-renderer-deployment.yaml create mode 100644 operations/deployment/helm/grafana/templates/image-renderer-hpa.yaml create mode 100644 operations/deployment/helm/grafana/templates/image-renderer-network-policy.yaml create mode 100644 operations/deployment/helm/grafana/templates/image-renderer-service.yaml create mode 100644 operations/deployment/helm/grafana/templates/image-renderer-servicemonitor.yaml create mode 100644 operations/deployment/helm/grafana/templates/ingress.yaml create mode 100644 operations/deployment/helm/grafana/templates/networkpolicy.yaml create mode 100644 operations/deployment/helm/grafana/templates/poddisruptionbudget.yaml create mode 100644 operations/deployment/helm/grafana/templates/podsecuritypolicy.yaml create mode 100644 operations/deployment/helm/grafana/templates/pvc.yaml create mode 100644 operations/deployment/helm/grafana/templates/role.yaml create mode 100644 operations/deployment/helm/grafana/templates/rolebinding.yaml create mode 100644 operations/deployment/helm/grafana/templates/secret-env.yaml create mode 100644 operations/deployment/helm/grafana/templates/secret.yaml create mode 100644 operations/deployment/helm/grafana/templates/service.yaml create mode 100644 operations/deployment/helm/grafana/templates/serviceaccount.yaml create mode 100644 operations/deployment/helm/grafana/templates/servicemonitor.yaml create mode 100644 operations/deployment/helm/grafana/templates/statefulset.yaml create mode 100644 operations/deployment/helm/grafana/templates/tests/test-configmap.yaml create mode 100644 operations/deployment/helm/grafana/templates/tests/test-podsecuritypolicy.yaml create mode 100644 operations/deployment/helm/grafana/templates/tests/test-role.yaml create mode 100644 operations/deployment/helm/grafana/templates/tests/test-rolebinding.yaml create mode 100644 operations/deployment/helm/grafana/templates/tests/test-serviceaccount.yaml create mode 100644 operations/deployment/helm/grafana/templates/tests/test.yaml diff --git a/operations/deployment/helm/grafana/.helmignore b/operations/deployment/helm/grafana/.helmignore new file mode 100644 index 0000000..8cade13 --- /dev/null +++ b/operations/deployment/helm/grafana/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.vscode +.project +.idea/ +*.tmproj +OWNERS diff --git a/operations/deployment/helm/grafana/Chart.yaml b/operations/deployment/helm/grafana/Chart.yaml index 12e9ab9..adf084f 100644 --- a/operations/deployment/helm/grafana/Chart.yaml +++ b/operations/deployment/helm/grafana/Chart.yaml @@ -1,9 +1,33 @@ apiVersion: v2 -name: grafana chart wrapper -description: A Helm chart wrapper for grafana -version: 0.1.0 - -dependencies: -- name: grafana - version: 6.31.1 - repository: https://grafana.github.io/helm-charts/ \ No newline at end of file +name: grafana +version: 7.0.14 +appVersion: 10.2.2 +kubeVersion: "^1.8.0-0" +description: The leading tool for querying and visualizing time series and metrics. +home: https://grafana.com +icon: https://artifacthub.io/image/b4fed1a7-6c8f-4945-b99d-096efa3e4116 +sources: + - https://github.com/grafana/grafana + - https://github.com/grafana/helm-charts +annotations: + "artifacthub.io/license": AGPL-3.0-only + "artifacthub.io/links": | + - name: Chart Source + url: https://github.com/grafana/helm-charts + - name: Upstream Project + url: https://github.com/grafana/grafana +maintainers: + - name: zanhsieh + email: zanhsieh@gmail.com + - name: rtluckie + email: rluckie@cisco.com + - name: maorfr + email: maor.friedman@redhat.com + - name: Xtigyro + email: miroslav.hadzhiev@gmail.com + - name: torstenwalter + email: mail@torstenwalter.de +type: application +keywords: + - monitoring + - metric diff --git a/operations/deployment/helm/grafana/README.md b/operations/deployment/helm/grafana/README.md new file mode 100644 index 0000000..5420545 --- /dev/null +++ b/operations/deployment/helm/grafana/README.md @@ -0,0 +1,757 @@ +# Grafana Helm Chart + +* Installs the web dashboarding system [Grafana](http://grafana.org/) + +## Get Repo Info + +```console +helm repo add grafana https://grafana.github.io/helm-charts +helm repo update +``` + +_See [helm repo](https://helm.sh/docs/helm/helm_repo/) for command documentation._ + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +helm install my-release grafana/grafana +``` + +## Uninstalling the Chart + +To uninstall/delete the my-release deployment: + +```console +helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Upgrading an existing Release to a new major version + +A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an +incompatible breaking change needing manual actions. + +### To 4.0.0 (And 3.12.1) + +This version requires Helm >= 2.12.0. + +### To 5.0.0 + +You have to add --force to your helm upgrade command as the labels of the chart have changed. + +### To 6.0.0 + +This version requires Helm >= 3.1.0. + +### To 7.0.0 + +For consistency with other Helm charts, the `global.image.registry` parameter was renamed +to `global.imageRegistry`. If you were not previously setting `global.image.registry`, no action +is required on upgrade. If you were previously setting `global.image.registry`, you will +need to instead set `global.imageRegistry`. + +## Configuration + +| Parameter | Description | Default | +|-------------------------------------------|-----------------------------------------------|---------------------------------------------------------| +| `replicas` | Number of nodes | `1` | +| `podDisruptionBudget.minAvailable` | Pod disruption minimum available | `nil` | +| `podDisruptionBudget.maxUnavailable` | Pod disruption maximum unavailable | `nil` | +| `podDisruptionBudget.apiVersion` | Pod disruption apiVersion | `nil` | +| `deploymentStrategy` | Deployment strategy | `{ "type": "RollingUpdate" }` | +| `livenessProbe` | Liveness Probe settings | `{ "httpGet": { "path": "/api/health", "port": 3000 } "initialDelaySeconds": 60, "timeoutSeconds": 30, "failureThreshold": 10 }` | +| `readinessProbe` | Readiness Probe settings | `{ "httpGet": { "path": "/api/health", "port": 3000 } }`| +| `securityContext` | Deployment securityContext | `{"runAsUser": 472, "runAsGroup": 472, "fsGroup": 472}` | +| `priorityClassName` | Name of Priority Class to assign pods | `nil` | +| `image.registry` | Image registry | `docker.io` | +| `image.repository` | Image repository | `grafana/grafana` | +| `image.tag` | Overrides the Grafana image tag whose default is the chart appVersion (`Must be >= 5.0.0`) | `` | +| `image.sha` | Image sha (optional) | `` | +| `image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Image pull secrets (can be templated) | `[]` | +| `service.enabled` | Enable grafana service | `true` | +| `service.type` | Kubernetes service type | `ClusterIP` | +| `service.port` | Kubernetes port where service is exposed | `80` | +| `service.portName` | Name of the port on the service | `service` | +| `service.appProtocol` | Adds the appProtocol field to the service | `` | +| `service.targetPort` | Internal service is port | `3000` | +| `service.nodePort` | Kubernetes service nodePort | `nil` | +| `service.annotations` | Service annotations (can be templated) | `{}` | +| `service.labels` | Custom labels | `{}` | +| `service.clusterIP` | internal cluster service IP | `nil` | +| `service.loadBalancerIP` | IP address to assign to load balancer (if supported) | `nil` | +| `service.loadBalancerSourceRanges` | list of IP CIDRs allowed access to lb (if supported) | `[]` | +| `service.externalIPs` | service external IP addresses | `[]` | +| `service.externalTrafficPolicy` | change the default externalTrafficPolicy | `nil` | +| `headlessService` | Create a headless service | `false` | +| `extraExposePorts` | Additional service ports for sidecar containers| `[]` | +| `hostAliases` | adds rules to the pod's /etc/hosts | `[]` | +| `ingress.enabled` | Enables Ingress | `false` | +| `ingress.annotations` | Ingress annotations (values are templated) | `{}` | +| `ingress.labels` | Custom labels | `{}` | +| `ingress.path` | Ingress accepted path | `/` | +| `ingress.pathType` | Ingress type of path | `Prefix` | +| `ingress.hosts` | Ingress accepted hostnames | `["chart-example.local"]` | +| `ingress.extraPaths` | Ingress extra paths to prepend to every host configuration. Useful when configuring [custom actions with AWS ALB Ingress Controller](https://kubernetes-sigs.github.io/aws-load-balancer-controller/v2.6/guide/ingress/annotations/#actions). Requires `ingress.hosts` to have one or more host entries. | `[]` | +| `ingress.tls` | Ingress TLS configuration | `[]` | +| `ingress.ingressClassName` | Ingress Class Name. MAY be required for Kubernetes versions >= 1.18 | `""` | +| `resources` | CPU/Memory resource requests/limits | `{}` | +| `nodeSelector` | Node labels for pod assignment | `{}` | +| `tolerations` | Toleration labels for pod assignment | `[]` | +| `affinity` | Affinity settings for pod assignment | `{}` | +| `extraInitContainers` | Init containers to add to the grafana pod | `{}` | +| `extraContainers` | Sidecar containers to add to the grafana pod | `""` | +| `extraContainerVolumes` | Volumes that can be mounted in sidecar containers | `[]` | +| `extraLabels` | Custom labels for all manifests | `{}` | +| `schedulerName` | Name of the k8s scheduler (other than default) | `nil` | +| `persistence.enabled` | Use persistent volume to store data | `false` | +| `persistence.type` | Type of persistence (`pvc` or `statefulset`) | `pvc` | +| `persistence.size` | Size of persistent volume claim | `10Gi` | +| `persistence.existingClaim` | Use an existing PVC to persist data (can be templated) | `nil` | +| `persistence.storageClassName` | Type of persistent volume claim | `nil` | +| `persistence.accessModes` | Persistence access modes | `[ReadWriteOnce]` | +| `persistence.annotations` | PersistentVolumeClaim annotations | `{}` | +| `persistence.finalizers` | PersistentVolumeClaim finalizers | `[ "kubernetes.io/pvc-protection" ]` | +| `persistence.extraPvcLabels` | Extra labels to apply to a PVC. | `{}` | +| `persistence.subPath` | Mount a sub dir of the persistent volume (can be templated) | `nil` | +| `persistence.inMemory.enabled` | If persistence is not enabled, whether to mount the local storage in-memory to improve performance | `false` | +| `persistence.inMemory.sizeLimit` | SizeLimit for the in-memory local storage | `nil` | +| `initChownData.enabled` | If false, don't reset data ownership at startup | true | +| `initChownData.image.registry` | init-chown-data container image registry | `docker.io` | +| `initChownData.image.repository` | init-chown-data container image repository | `busybox` | +| `initChownData.image.tag` | init-chown-data container image tag | `1.31.1` | +| `initChownData.image.sha` | init-chown-data container image sha (optional)| `""` | +| `initChownData.image.pullPolicy` | init-chown-data container image pull policy | `IfNotPresent` | +| `initChownData.resources` | init-chown-data pod resource requests & limits | `{}` | +| `schedulerName` | Alternate scheduler name | `nil` | +| `env` | Extra environment variables passed to pods | `{}` | +| `envValueFrom` | Environment variables from alternate sources. See the API docs on [EnvVarSource](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#envvarsource-v1-core) for format details. Can be templated | `{}` | +| `envFromSecret` | Name of a Kubernetes secret (must be manually created in the same namespace) containing values to be added to the environment. Can be templated | `""` | +| `envFromSecrets` | List of Kubernetes secrets (must be manually created in the same namespace) containing values to be added to the environment. Can be templated | `[]` | +| `envFromConfigMaps` | List of Kubernetes ConfigMaps (must be manually created in the same namespace) containing values to be added to the environment. Can be templated | `[]` | +| `envRenderSecret` | Sensible environment variables passed to pods and stored as secret. (passed through [tpl](https://helm.sh/docs/howto/charts_tips_and_tricks/#using-the-tpl-function)) | `{}` | +| `enableServiceLinks` | Inject Kubernetes services as environment variables. | `true` | +| `extraSecretMounts` | Additional grafana server secret mounts | `[]` | +| `extraVolumeMounts` | Additional grafana server volume mounts | `[]` | +| `createConfigmap` | Enable creating the grafana configmap | `true` | +| `extraConfigmapMounts` | Additional grafana server configMap volume mounts (values are templated) | `[]` | +| `extraEmptyDirMounts` | Additional grafana server emptyDir volume mounts | `[]` | +| `plugins` | Plugins to be loaded along with Grafana | `[]` | +| `datasources` | Configure grafana datasources (passed through tpl) | `{}` | +| `alerting` | Configure grafana alerting (passed through tpl) | `{}` | +| `notifiers` | Configure grafana notifiers | `{}` | +| `dashboardProviders` | Configure grafana dashboard providers | `{}` | +| `dashboards` | Dashboards to import | `{}` | +| `dashboardsConfigMaps` | ConfigMaps reference that contains dashboards | `{}` | +| `grafana.ini` | Grafana's primary configuration | `{}` | +| `global.imageRegistry` | Global image pull registry for all images. | `null` | +| `global.imagePullSecrets` | Global image pull secrets (can be templated). Allows either an array of {name: pullSecret} maps (k8s-style), or an array of strings (more common helm-style). | `[]` | +| `ldap.enabled` | Enable LDAP authentication | `false` | +| `ldap.existingSecret` | The name of an existing secret containing the `ldap.toml` file, this must have the key `ldap-toml`. | `""` | +| `ldap.config` | Grafana's LDAP configuration | `""` | +| `annotations` | Deployment annotations | `{}` | +| `labels` | Deployment labels | `{}` | +| `podAnnotations` | Pod annotations | `{}` | +| `podLabels` | Pod labels | `{}` | +| `podPortName` | Name of the grafana port on the pod | `grafana` | +| `lifecycleHooks` | Lifecycle hooks for podStart and preStop [Example](https://kubernetes.io/docs/tasks/configure-pod-container/attach-handler-lifecycle-event/#define-poststart-and-prestop-handlers) | `{}` | +| `sidecar.image.registry` | Sidecar image registry | `quay.io` | +| `sidecar.image.repository` | Sidecar image repository | `kiwigrid/k8s-sidecar` | +| `sidecar.image.tag` | Sidecar image tag | `1.24.6` | +| `sidecar.image.sha` | Sidecar image sha (optional) | `""` | +| `sidecar.imagePullPolicy` | Sidecar image pull policy | `IfNotPresent` | +| `sidecar.resources` | Sidecar resources | `{}` | +| `sidecar.securityContext` | Sidecar securityContext | `{}` | +| `sidecar.enableUniqueFilenames` | Sets the kiwigrid/k8s-sidecar UNIQUE_FILENAMES environment variable. If set to `true` the sidecar will create unique filenames where duplicate data keys exist between ConfigMaps and/or Secrets within the same or multiple Namespaces. | `false` | +| `sidecar.alerts.enabled` | Enables the cluster wide search for alerts and adds/updates/deletes them in grafana |`false` | +| `sidecar.alerts.label` | Label that config maps with alerts should have to be added | `grafana_alert` | +| `sidecar.alerts.labelValue` | Label value that config maps with alerts should have to be added | `""` | +| `sidecar.alerts.searchNamespace` | Namespaces list. If specified, the sidecar will search for alerts config-maps inside these namespaces. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces. | `nil` | +| `sidecar.alerts.watchMethod` | Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. | `WATCH` | +| `sidecar.alerts.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` | +| `sidecar.alerts.reloadURL` | Full url of datasource configuration reload API endpoint, to invoke after a config-map change | `"http://localhost:3000/api/admin/provisioning/alerting/reload"` | +| `sidecar.alerts.skipReload` | Enabling this omits defining the REQ_URL and REQ_METHOD environment variables | `false` | +| `sidecar.alerts.initDatasources` | Set to true to deploy the datasource sidecar as an initContainer. This is needed if skipReload is true, to load any alerts defined at startup time. | `false` | +| `sidecar.alerts.extraMounts` | Additional alerts sidecar volume mounts. | `[]` | +| `sidecar.dashboards.enabled` | Enables the cluster wide search for dashboards and adds/updates/deletes them in grafana | `false` | +| `sidecar.dashboards.SCProvider` | Enables creation of sidecar provider | `true` | +| `sidecar.dashboards.provider.name` | Unique name of the grafana provider | `sidecarProvider` | +| `sidecar.dashboards.provider.orgid` | Id of the organisation, to which the dashboards should be added | `1` | +| `sidecar.dashboards.provider.folder` | Logical folder in which grafana groups dashboards | `""` | +| `sidecar.dashboards.provider.disableDelete` | Activate to avoid the deletion of imported dashboards | `false` | +| `sidecar.dashboards.provider.allowUiUpdates` | Allow updating provisioned dashboards from the UI | `false` | +| `sidecar.dashboards.provider.type` | Provider type | `file` | +| `sidecar.dashboards.provider.foldersFromFilesStructure` | Allow Grafana to replicate dashboard structure from filesystem. | `false` | +| `sidecar.dashboards.watchMethod` | Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. | `WATCH` | +| `sidecar.skipTlsVerify` | Set to true to skip tls verification for kube api calls | `nil` | +| `sidecar.dashboards.label` | Label that config maps with dashboards should have to be added | `grafana_dashboard` | +| `sidecar.dashboards.labelValue` | Label value that config maps with dashboards should have to be added | `""` | +| `sidecar.dashboards.folder` | Folder in the pod that should hold the collected dashboards (unless `sidecar.dashboards.defaultFolderName` is set). This path will be mounted. | `/tmp/dashboards` | +| `sidecar.dashboards.folderAnnotation` | The annotation the sidecar will look for in configmaps to override the destination folder for files | `nil` | +| `sidecar.dashboards.defaultFolderName` | The default folder name, it will create a subfolder under the `sidecar.dashboards.folder` and put dashboards in there instead | `nil` | +| `sidecar.dashboards.searchNamespace` | Namespaces list. If specified, the sidecar will search for dashboards config-maps inside these namespaces. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces. | `nil` | +| `sidecar.dashboards.script` | Absolute path to shell script to execute after a configmap got reloaded. | `nil` | +| `sidecar.dashboards.reloadURL` | Full url of dashboards configuration reload API endpoint, to invoke after a config-map change | `"http://localhost:3000/api/admin/provisioning/dashboards/reload"` | +| `sidecar.dashboards.skipReload` | Enabling this omits defining the REQ_USERNAME, REQ_PASSWORD, REQ_URL and REQ_METHOD environment variables | `false` | +| `sidecar.dashboards.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` | +| `sidecar.dashboards.extraMounts` | Additional dashboard sidecar volume mounts. | `[]` | +| `sidecar.datasources.enabled` | Enables the cluster wide search for datasources and adds/updates/deletes them in grafana |`false` | +| `sidecar.datasources.label` | Label that config maps with datasources should have to be added | `grafana_datasource` | +| `sidecar.datasources.labelValue` | Label value that config maps with datasources should have to be added | `""` | +| `sidecar.datasources.searchNamespace` | Namespaces list. If specified, the sidecar will search for datasources config-maps inside these namespaces. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces. | `nil` | +| `sidecar.datasources.watchMethod` | Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. | `WATCH` | +| `sidecar.datasources.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` | +| `sidecar.datasources.reloadURL` | Full url of datasource configuration reload API endpoint, to invoke after a config-map change | `"http://localhost:3000/api/admin/provisioning/datasources/reload"` | +| `sidecar.datasources.skipReload` | Enabling this omits defining the REQ_URL and REQ_METHOD environment variables | `false` | +| `sidecar.datasources.initDatasources` | Set to true to deploy the datasource sidecar as an initContainer in addition to a container. This is needed if skipReload is true, to load any datasources defined at startup time. | `false` | +| `sidecar.notifiers.enabled` | Enables the cluster wide search for notifiers and adds/updates/deletes them in grafana | `false` | +| `sidecar.notifiers.label` | Label that config maps with notifiers should have to be added | `grafana_notifier` | +| `sidecar.notifiers.labelValue` | Label value that config maps with notifiers should have to be added | `""` | +| `sidecar.notifiers.searchNamespace` | Namespaces list. If specified, the sidecar will search for notifiers config-maps (or secrets) inside these namespaces. Otherwise the namespace in which the sidecar is running will be used. It's also possible to specify ALL to search in all namespaces. | `nil` | +| `sidecar.notifiers.watchMethod` | Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. | `WATCH` | +| `sidecar.notifiers.resource` | Should the sidecar looks into secrets, configmaps or both. | `both` | +| `sidecar.notifiers.reloadURL` | Full url of notifier configuration reload API endpoint, to invoke after a config-map change | `"http://localhost:3000/api/admin/provisioning/notifications/reload"` | +| `sidecar.notifiers.skipReload` | Enabling this omits defining the REQ_URL and REQ_METHOD environment variables | `false` | +| `sidecar.notifiers.initNotifiers` | Set to true to deploy the notifier sidecar as an initContainer in addition to a container. This is needed if skipReload is true, to load any notifiers defined at startup time. | `false` | +| `smtp.existingSecret` | The name of an existing secret containing the SMTP credentials. | `""` | +| `smtp.userKey` | The key in the existing SMTP secret containing the username. | `"user"` | +| `smtp.passwordKey` | The key in the existing SMTP secret containing the password. | `"password"` | +| `admin.existingSecret` | The name of an existing secret containing the admin credentials (can be templated). | `""` | +| `admin.userKey` | The key in the existing admin secret containing the username. | `"admin-user"` | +| `admin.passwordKey` | The key in the existing admin secret containing the password. | `"admin-password"` | +| `serviceAccount.autoMount` | Automount the service account token in the pod| `true` | +| `serviceAccount.annotations` | ServiceAccount annotations | | +| `serviceAccount.create` | Create service account | `true` | +| `serviceAccount.labels` | ServiceAccount labels | `{}` | +| `serviceAccount.name` | Service account name to use, when empty will be set to created account if `serviceAccount.create` is set else to `default` | `` | +| `serviceAccount.nameTest` | Service account name to use for test, when empty will be set to created account if `serviceAccount.create` is set else to `default` | `nil` | +| `rbac.create` | Create and use RBAC resources | `true` | +| `rbac.namespaced` | Creates Role and Rolebinding instead of the default ClusterRole and ClusteRoleBindings for the grafana instance | `false` | +| `rbac.useExistingRole` | Set to a rolename to use existing role - skipping role creating - but still doing serviceaccount and rolebinding to the rolename set here. | `nil` | +| `rbac.pspEnabled` | Create PodSecurityPolicy (with `rbac.create`, grant roles permissions as well) | `false` | +| `rbac.pspUseAppArmor` | Enforce AppArmor in created PodSecurityPolicy (requires `rbac.pspEnabled`) | `false` | +| `rbac.extraRoleRules` | Additional rules to add to the Role | [] | +| `rbac.extraClusterRoleRules` | Additional rules to add to the ClusterRole | [] | +| `command` | Define command to be executed by grafana container at startup | `nil` | +| `args` | Define additional args if command is used | `nil` | +| `testFramework.enabled` | Whether to create test-related resources | `true` | +| `testFramework.image.registry` | `test-framework` image registry. | `docker.io` | +| `testFramework.image.repository` | `test-framework` image repository. | `bats/bats` | +| `testFramework.image.tag` | `test-framework` image tag. | `v1.4.1` | +| `testFramework.imagePullPolicy` | `test-framework` image pull policy. | `IfNotPresent` | +| `testFramework.securityContext` | `test-framework` securityContext | `{}` | +| `downloadDashboards.env` | Environment variables to be passed to the `download-dashboards` container | `{}` | +| `downloadDashboards.envFromSecret` | Name of a Kubernetes secret (must be manually created in the same namespace) containing values to be added to the environment. Can be templated | `""` | +| `downloadDashboards.resources` | Resources of `download-dashboards` container | `{}` | +| `downloadDashboardsImage.registry` | Curl docker image registry | `docker.io` | +| `downloadDashboardsImage.repository` | Curl docker image repository | `curlimages/curl` | +| `downloadDashboardsImage.tag` | Curl docker image tag | `7.73.0` | +| `downloadDashboardsImage.sha` | Curl docker image sha (optional) | `""` | +| `downloadDashboardsImage.pullPolicy` | Curl docker image pull policy | `IfNotPresent` | +| `namespaceOverride` | Override the deployment namespace | `""` (`Release.Namespace`) | +| `serviceMonitor.enabled` | Use servicemonitor from prometheus operator | `false` | +| `serviceMonitor.namespace` | Namespace this servicemonitor is installed in | | +| `serviceMonitor.interval` | How frequently Prometheus should scrape | `1m` | +| `serviceMonitor.path` | Path to scrape | `/metrics` | +| `serviceMonitor.scheme` | Scheme to use for metrics scraping | `http` | +| `serviceMonitor.tlsConfig` | TLS configuration block for the endpoint | `{}` | +| `serviceMonitor.labels` | Labels for the servicemonitor passed to Prometheus Operator | `{}` | +| `serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `30s` | +| `serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping. | `[]` | +| `serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion. | `[]` | +| `revisionHistoryLimit` | Number of old ReplicaSets to retain | `10` | +| `imageRenderer.enabled` | Enable the image-renderer deployment & service | `false` | +| `imageRenderer.image.registry` | image-renderer Image registry | `docker.io` | +| `imageRenderer.image.repository` | image-renderer Image repository | `grafana/grafana-image-renderer` | +| `imageRenderer.image.tag` | image-renderer Image tag | `latest` | +| `imageRenderer.image.sha` | image-renderer Image sha (optional) | `""` | +| `imageRenderer.image.pullPolicy` | image-renderer ImagePullPolicy | `Always` | +| `imageRenderer.env` | extra env-vars for image-renderer | `{}` | +| `imageRenderer.envValueFrom` | Environment variables for image-renderer from alternate sources. See the API docs on [EnvVarSource](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.17/#envvarsource-v1-core) for format details. Can be templated | `{}` | +| `imageRenderer.serviceAccountName` | image-renderer deployment serviceAccountName | `""` | +| `imageRenderer.securityContext` | image-renderer deployment securityContext | `{}` | +| `imageRenderer.podAnnotations ` | image-renderer image-renderer pod annotation | `{}` | +| `imageRenderer.hostAliases` | image-renderer deployment Host Aliases | `[]` | +| `imageRenderer.priorityClassName` | image-renderer deployment priority class | `''` | +| `imageRenderer.service.enabled` | Enable the image-renderer service | `true` | +| `imageRenderer.service.portName` | image-renderer service port name | `http` | +| `imageRenderer.service.port` | image-renderer port used by deployment | `8081` | +| `imageRenderer.service.targetPort` | image-renderer service port used by service | `8081` | +| `imageRenderer.appProtocol` | Adds the appProtocol field to the service | `` | +| `imageRenderer.grafanaSubPath` | Grafana sub path to use for image renderer callback url | `''` | +| `imageRenderer.podPortName` | name of the image-renderer port on the pod | `http` | +| `imageRenderer.revisionHistoryLimit` | number of image-renderer replica sets to keep | `10` | +| `imageRenderer.networkPolicy.limitIngress` | Enable a NetworkPolicy to limit inbound traffic from only the created grafana pods | `true` | +| `imageRenderer.networkPolicy.limitEgress` | Enable a NetworkPolicy to limit outbound traffic to only the created grafana pods | `false` | +| `imageRenderer.resources` | Set resource limits for image-renderer pods | `{}` | +| `imageRenderer.nodeSelector` | Node labels for pod assignment | `{}` | +| `imageRenderer.tolerations` | Toleration labels for pod assignment | `[]` | +| `imageRenderer.affinity` | Affinity settings for pod assignment | `{}` | +| `networkPolicy.enabled` | Enable creation of NetworkPolicy resources. | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.explicitNamespacesSelector` | A Kubernetes LabelSelector to explicitly select namespaces from which traffic could be allowed | `{}` | +| `networkPolicy.ingress` | Enable the creation of an ingress network policy | `true` | +| `networkPolicy.egress.enabled` | Enable the creation of an egress network policy | `false` | +| `networkPolicy.egress.ports` | An array of ports to allow for the egress | `[]` | +| `enableKubeBackwardCompatibility` | Enable backward compatibility of kubernetes where pod's defintion version below 1.13 doesn't have the enableServiceLinks option | `false` | + +### Example ingress with path + +With grafana 6.3 and above + +```yaml +grafana.ini: + server: + domain: monitoring.example.com + root_url: "%(protocol)s://%(domain)s/grafana" + serve_from_sub_path: true +ingress: + enabled: true + hosts: + - "monitoring.example.com" + path: "/grafana" +``` + +### Example of extraVolumeMounts + +Volume can be type persistentVolumeClaim or hostPath but not both at same time. +If neither existingClaim or hostPath argument is given then type is emptyDir. + +```yaml +- extraVolumeMounts: + - name: plugins + mountPath: /var/lib/grafana/plugins + subPath: configs/grafana/plugins + existingClaim: existing-grafana-claim + readOnly: false + - name: dashboards + mountPath: /var/lib/grafana/dashboards + hostPath: /usr/shared/grafana/dashboards + readOnly: false +``` + +## Import dashboards + +There are a few methods to import dashboards to Grafana. Below are some examples and explanations as to how to use each method: + +```yaml +dashboards: + default: + some-dashboard: + json: | + { + "annotations": + + ... + # Complete json file here + ... + + "title": "Some Dashboard", + "uid": "abcd1234", + "version": 1 + } + custom-dashboard: + # This is a path to a file inside the dashboards directory inside the chart directory + file: dashboards/custom-dashboard.json + prometheus-stats: + # Ref: https://grafana.com/dashboards/2 + gnetId: 2 + revision: 2 + datasource: Prometheus + loki-dashboard-quick-search: + gnetId: 12019 + revision: 2 + datasource: + - name: DS_PROMETHEUS + value: Prometheus + - name: DS_LOKI + value: Loki + local-dashboard: + url: https://raw.githubusercontent.com/user/repository/master/dashboards/dashboard.json +``` + +## BASE64 dashboards + +Dashboards could be stored on a server that does not return JSON directly and instead of it returns a Base64 encoded file (e.g. Gerrit) +A new parameter has been added to the url use case so if you specify a b64content value equals to true after the url entry a Base64 decoding is applied before save the file to disk. +If this entry is not set or is equals to false not decoding is applied to the file before saving it to disk. + +### Gerrit use case + +Gerrit API for download files has the following schema: where {project-name} and +{file-id} usually has '/' in their values and so they MUST be replaced by %2F so if project-name is user/repo, branch-id is master and file-id is equals to dir1/dir2/dashboard +the url value is + +## Sidecar for dashboards + +If the parameter `sidecar.dashboards.enabled` is set, a sidecar container is deployed in the grafana +pod. This container watches all configmaps (or secrets) in the cluster and filters out the ones with +a label as defined in `sidecar.dashboards.label`. The files defined in those configmaps are written +to a folder and accessed by grafana. Changes to the configmaps are monitored and the imported +dashboards are deleted/updated. + +A recommendation is to use one configmap per dashboard, as a reduction of multiple dashboards inside +one configmap is currently not properly mirrored in grafana. + +Example dashboard config: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: sample-grafana-dashboard + labels: + grafana_dashboard: "1" +data: + k8s-dashboard.json: |- + [...] +``` + +## Sidecar for datasources + +If the parameter `sidecar.datasources.enabled` is set, an init container is deployed in the grafana +pod. This container lists all secrets (or configmaps, though not recommended) in the cluster and +filters out the ones with a label as defined in `sidecar.datasources.label`. The files defined in +those secrets are written to a folder and accessed by grafana on startup. Using these yaml files, +the data sources in grafana can be imported. + +Should you aim for reloading datasources in Grafana each time the config is changed, set `sidecar.datasources.skipReload: false` and adjust `sidecar.datasources.reloadURL` to `http://..svc.cluster.local/api/admin/provisioning/datasources/reload`. + +Secrets are recommended over configmaps for this usecase because datasources usually contain private +data like usernames and passwords. Secrets are the more appropriate cluster resource to manage those. + +Example values to add a postgres datasource as a kubernetes secret: +```yaml +apiVersion: v1 +kind: Secret +metadata: + name: grafana-datasources + labels: + grafana_datasource: 'true' # default value for: sidecar.datasources.label +stringData: + pg-db.yaml: |- + apiVersion: 1 + datasources: + - name: My pg db datasource + type: postgres + url: my-postgresql-db:5432 + user: db-readonly-user + secureJsonData: + password: 'SUperSEcretPa$$word' + jsonData: + database: my_datase + sslmode: 'disable' # disable/require/verify-ca/verify-full + maxOpenConns: 0 # Grafana v5.4+ + maxIdleConns: 2 # Grafana v5.4+ + connMaxLifetime: 14400 # Grafana v5.4+ + postgresVersion: 1000 # 903=9.3, 904=9.4, 905=9.5, 906=9.6, 1000=10 + timescaledb: false + # allow users to edit datasources from the UI. + editable: false +``` + +Example values to add a datasource adapted from [Grafana](http://docs.grafana.org/administration/provisioning/#example-datasource-config-file): + +```yaml +datasources: + datasources.yaml: + apiVersion: 1 + datasources: + # name of the datasource. Required + - name: Graphite + # datasource type. Required + type: graphite + # access mode. proxy or direct (Server or Browser in the UI). Required + access: proxy + # org id. will default to orgId 1 if not specified + orgId: 1 + # url + url: http://localhost:8080 + # database password, if used + password: + # database user, if used + user: + # database name, if used + database: + # enable/disable basic auth + basicAuth: + # basic auth username + basicAuthUser: + # basic auth password + basicAuthPassword: + # enable/disable with credentials headers + withCredentials: + # mark as default datasource. Max one per org + isDefault: + # fields that will be converted to json and stored in json_data + jsonData: + graphiteVersion: "1.1" + tlsAuth: true + tlsAuthWithCACert: true + # json object of data that will be encrypted. + secureJsonData: + tlsCACert: "..." + tlsClientCert: "..." + tlsClientKey: "..." + version: 1 + # allow users to edit datasources from the UI. + editable: false +``` + +## Sidecar for notifiers + +If the parameter `sidecar.notifiers.enabled` is set, an init container is deployed in the grafana +pod. This container lists all secrets (or configmaps, though not recommended) in the cluster and +filters out the ones with a label as defined in `sidecar.notifiers.label`. The files defined in +those secrets are written to a folder and accessed by grafana on startup. Using these yaml files, +the notification channels in grafana can be imported. The secrets must be created before +`helm install` so that the notifiers init container can list the secrets. + +Secrets are recommended over configmaps for this usecase because alert notification channels usually contain +private data like SMTP usernames and passwords. Secrets are the more appropriate cluster resource to manage those. + +Example datasource config adapted from [Grafana](https://grafana.com/docs/grafana/latest/administration/provisioning/#alert-notification-channels): + +```yaml +notifiers: + - name: notification-channel-1 + type: slack + uid: notifier1 + # either + org_id: 2 + # or + org_name: Main Org. + is_default: true + send_reminder: true + frequency: 1h + disable_resolve_message: false + # See `Supported Settings` section for settings supporter for each + # alert notification type. + settings: + recipient: 'XXX' + token: 'xoxb' + uploadImage: true + url: https://slack.com + +delete_notifiers: + - name: notification-channel-1 + uid: notifier1 + org_id: 2 + - name: notification-channel-2 + # default org_id: 1 +``` + +## Sidecar for alerting resources + +If the parameter `sidecar.alerts.enabled` is set, a sidecar container is deployed in the grafana +pod. This container watches all configmaps (or secrets) in the cluster (namespace defined by `sidecar.alerts.searchNamespace`) and filters out the ones with +a label as defined in `sidecar.alerts.label` (default is `grafana_alert`). The files defined in those configmaps are written +to a folder and accessed by grafana. Changes to the configmaps are monitored and the imported alerting resources are updated, however, deletions are a little more complicated (see below). + +This sidecar can be used to provision alert rules, contact points, notification policies, notification templates and mute timings as shown in [Grafana Documentation](https://grafana.com/docs/grafana/next/alerting/set-up/provision-alerting-resources/file-provisioning/). + +To fetch the alert config which will be provisioned, use the alert provisioning API ([Grafana Documentation](https://grafana.com/docs/grafana/next/developers/http_api/alerting_provisioning/)). +You can use either JSON or YAML format. + +Example config for an alert rule: + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: sample-grafana-alert + labels: + grafana_alert: "1" +data: + k8s-alert.yml: |- + apiVersion: 1 + groups: + - orgId: 1 + name: k8s-alert + [...] +``` + +To delete provisioned alert rules is a two step process, you need to delete the configmap which defined the alert rule +and then create a configuration which deletes the alert rule. + +Example deletion configuration: +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: delete-sample-grafana-alert + namespace: monitoring + labels: + grafana_alert: "1" +data: + delete-k8s-alert.yml: |- + apiVersion: 1 + deleteRules: + - orgId: 1 + uid: 16624780-6564-45dc-825c-8bded4ad92d3 +``` + +## Statically provision alerting resources +If you don't need to change alerting resources (alert rules, contact points, notification policies and notification templates) regularly you could use the `alerting` config option instead of the sidecar option above. +This will grab the alerting config and apply it statically at build time for the helm file. + +There are two methods to statically provision alerting configuration in Grafana. Below are some examples and explanations as to how to use each method: + +```yaml +alerting: + team1-alert-rules.yaml: + file: alerting/team1/rules.yaml + team2-alert-rules.yaml: + file: alerting/team2/rules.yaml + team3-alert-rules.yaml: + file: alerting/team3/rules.yaml + notification-policies.yaml: + file: alerting/shared/notification-policies.yaml + notification-templates.yaml: + file: alerting/shared/notification-templates.yaml + contactpoints.yaml: + apiVersion: 1 + contactPoints: + - orgId: 1 + name: Slack channel + receivers: + - uid: default-receiver + type: slack + settings: + # Webhook URL to be filled in + url: "" + # We need to escape double curly braces for the tpl function. + text: '{{ `{{ template "default.message" . }}` }}' + title: '{{ `{{ template "default.title" . }}` }}' +``` + +The two possibilities for static alerting resource provisioning are: + +* Inlining the file contents as shown for contact points in the above example. +* Importing a file using a relative path starting from the chart root directory as shown for the alert rules in the above example. + +### Important notes on file provisioning + +* The format of the files is defined in the [Grafana documentation](https://grafana.com/docs/grafana/next/alerting/set-up/provision-alerting-resources/file-provisioning/) on file provisioning. +* The chart supports importing YAML and JSON files. +* The filename must be unique, otherwise one volume mount will overwrite the other. +* In case of inlining, double curly braces that arise from the Grafana configuration format and are not intended as templates for the chart must be escaped. +* The number of total files under `alerting:` is not limited. Each file will end up as a volume mount in the corresponding provisioning folder of the deployed Grafana instance. +* The file size for each import is limited by what the function `.Files.Get` can handle, which suffices for most cases. + +## How to serve Grafana with a path prefix (/grafana) + +In order to serve Grafana with a prefix (e.g., ), add the following to your values.yaml. + +```yaml +ingress: + enabled: true + annotations: + kubernetes.io/ingress.class: "nginx" + nginx.ingress.kubernetes.io/rewrite-target: /$1 + nginx.ingress.kubernetes.io/use-regex: "true" + + path: /grafana/?(.*) + hosts: + - k8s.example.dev + +grafana.ini: + server: + root_url: http://localhost:3000/grafana # this host can be localhost +``` + +## How to securely reference secrets in grafana.ini + +This example uses Grafana [file providers](https://grafana.com/docs/grafana/latest/administration/configuration/#file-provider) for secret values and the `extraSecretMounts` configuration flag (Additional grafana server secret mounts) to mount the secrets. + +In grafana.ini: + +```yaml +grafana.ini: + [auth.generic_oauth] + enabled = true + client_id = $__file{/etc/secrets/auth_generic_oauth/client_id} + client_secret = $__file{/etc/secrets/auth_generic_oauth/client_secret} +``` + +Existing secret, or created along with helm: + +```yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: auth-generic-oauth-secret +type: Opaque +stringData: + client_id: + client_secret: +``` + +Include in the `extraSecretMounts` configuration flag: + +```yaml +- extraSecretMounts: + - name: auth-generic-oauth-secret-mount + secretName: auth-generic-oauth-secret + defaultMode: 0440 + mountPath: /etc/secrets/auth_generic_oauth + readOnly: true +``` + +### extraSecretMounts using a Container Storage Interface (CSI) provider + +This example uses a CSI driver e.g. retrieving secrets using [Azure Key Vault Provider](https://github.com/Azure/secrets-store-csi-driver-provider-azure) + +```yaml +- extraSecretMounts: + - name: secrets-store-inline + mountPath: /run/secrets + readOnly: true + csi: + driver: secrets-store.csi.k8s.io + readOnly: true + volumeAttributes: + secretProviderClass: "my-provider" + nodePublishSecretRef: + name: akv-creds +``` + +## Image Renderer Plug-In + +This chart supports enabling [remote image rendering](https://github.com/grafana/grafana-image-renderer/blob/master/README.md#run-in-docker) + +```yaml +imageRenderer: + enabled: true +``` + +### Image Renderer NetworkPolicy + +By default the image-renderer pods will have a network policy which only allows ingress traffic from the created grafana instance + +### High Availability for unified alerting + +If you want to run Grafana in a high availability cluster you need to enable +the headless service by setting `headlessService: true` in your `values.yaml` +file. + +As next step you have to setup the `grafana.ini` in your `values.yaml` in a way +that it will make use of the headless service to obtain all the IPs of the +cluster. You should replace ``{{ Name }}`` with the name of your helm deployment. + +```yaml +grafana.ini: + ... + unified_alerting: + enabled: true + ha_peers: {{ Name }}-headless:9094 + ha_listen_address: ${POD_IP}:9094 + ha_advertise_address: ${POD_IP}:9094 + + alerting: + enabled: false +``` diff --git a/operations/deployment/helm/grafana/bitops.config.yaml b/operations/deployment/helm/grafana/bitops.config.yaml deleted file mode 100644 index 4dd878a..0000000 --- a/operations/deployment/helm/grafana/bitops.config.yaml +++ /dev/null @@ -1,16 +0,0 @@ -helm: - cli: - namespace: grafana - timeout: 120s - debug: true - atomic: true - force: false - dry-run: false - options: - release-name: grafana - skip-deploy: false - kubeconfig: - fetch: - enabled: true - cluster-name: recruiting-recruiting-recruiting-ekscluster - diff --git a/operations/deployment/helm/grafana/ci/default-values.yaml b/operations/deployment/helm/grafana/ci/default-values.yaml new file mode 100644 index 0000000..fc2ba60 --- /dev/null +++ b/operations/deployment/helm/grafana/ci/default-values.yaml @@ -0,0 +1 @@ +# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml. diff --git a/operations/deployment/helm/grafana/ci/with-affinity-values.yaml b/operations/deployment/helm/grafana/ci/with-affinity-values.yaml new file mode 100644 index 0000000..f5b9b53 --- /dev/null +++ b/operations/deployment/helm/grafana/ci/with-affinity-values.yaml @@ -0,0 +1,16 @@ +affinity: + podAntiAffinity: + preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: + app.kubernetes.io/instance: grafana-test + app.kubernetes.io/name: grafana + topologyKey: failure-domain.beta.kubernetes.io/zone + weight: 100 + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: + app.kubernetes.io/instance: grafana-test + app.kubernetes.io/name: grafana + topologyKey: kubernetes.io/hostname diff --git a/operations/deployment/helm/grafana/ci/with-dashboard-json-values.yaml b/operations/deployment/helm/grafana/ci/with-dashboard-json-values.yaml new file mode 100644 index 0000000..e0c4e41 --- /dev/null +++ b/operations/deployment/helm/grafana/ci/with-dashboard-json-values.yaml @@ -0,0 +1,53 @@ +dashboards: + my-provider: + my-awesome-dashboard: + # An empty but valid dashboard + json: | + { + "__inputs": [], + "__requires": [ + { + "type": "grafana", + "id": "grafana", + "name": "Grafana", + "version": "6.3.5" + } + ], + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "-- Grafana --", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": true, + "gnetId": null, + "graphTooltip": 0, + "id": null, + "links": [], + "panels": [], + "schemaVersion": 19, + "style": "dark", + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-6h", + "to": "now" + }, + "timepicker": { + "refresh_intervals": ["5s"] + }, + "timezone": "", + "title": "Dummy Dashboard", + "uid": "IdcYQooWk", + "version": 1 + } + datasource: Prometheus diff --git a/operations/deployment/helm/grafana/ci/with-dashboard-values.yaml b/operations/deployment/helm/grafana/ci/with-dashboard-values.yaml new file mode 100644 index 0000000..7b662c5 --- /dev/null +++ b/operations/deployment/helm/grafana/ci/with-dashboard-values.yaml @@ -0,0 +1,19 @@ +dashboards: + my-provider: + my-awesome-dashboard: + gnetId: 10000 + revision: 1 + datasource: Prometheus +dashboardProviders: + dashboardproviders.yaml: + apiVersion: 1 + providers: + - name: 'my-provider' + orgId: 1 + folder: '' + type: file + updateIntervalSeconds: 10 + disableDeletion: true + editable: true + options: + path: /var/lib/grafana/dashboards/my-provider diff --git a/operations/deployment/helm/grafana/ci/with-extraconfigmapmounts-values.yaml b/operations/deployment/helm/grafana/ci/with-extraconfigmapmounts-values.yaml new file mode 100644 index 0000000..5cc44a0 --- /dev/null +++ b/operations/deployment/helm/grafana/ci/with-extraconfigmapmounts-values.yaml @@ -0,0 +1,7 @@ +extraConfigmapMounts: + - name: '{{ include "grafana.fullname" . }}' + configMap: '{{ include "grafana.fullname" . }}' + mountPath: /var/lib/grafana/dashboards/test-dashboard.json + # This is not a realistic test, but for this we only care about extraConfigmapMounts not being empty and pointing to an existing ConfigMap + subPath: grafana.ini + readOnly: true diff --git a/operations/deployment/helm/grafana/ci/with-image-renderer-values.yaml b/operations/deployment/helm/grafana/ci/with-image-renderer-values.yaml new file mode 100644 index 0000000..32f3074 --- /dev/null +++ b/operations/deployment/helm/grafana/ci/with-image-renderer-values.yaml @@ -0,0 +1,19 @@ +podLabels: + customLableA: Aaaaa +imageRenderer: + enabled: true + env: + RENDERING_ARGS: --disable-gpu,--window-size=1280x758 + RENDERING_MODE: clustered + podLabels: + customLableB: Bbbbb + networkPolicy: + limitIngress: true + limitEgress: true + resources: + limits: + cpu: 1000m + memory: 1000Mi + requests: + cpu: 500m + memory: 50Mi diff --git a/operations/deployment/helm/grafana/ci/with-persistence.yaml b/operations/deployment/helm/grafana/ci/with-persistence.yaml new file mode 100644 index 0000000..b92ca02 --- /dev/null +++ b/operations/deployment/helm/grafana/ci/with-persistence.yaml @@ -0,0 +1,3 @@ +persistence: + type: pvc + enabled: true diff --git a/operations/deployment/helm/grafana/dashboards/custom-dashboard.json b/operations/deployment/helm/grafana/dashboards/custom-dashboard.json new file mode 100644 index 0000000..9e26dfe --- /dev/null +++ b/operations/deployment/helm/grafana/dashboards/custom-dashboard.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/operations/deployment/helm/grafana/templates/NOTES.txt b/operations/deployment/helm/grafana/templates/NOTES.txt new file mode 100644 index 0000000..d86419f --- /dev/null +++ b/operations/deployment/helm/grafana/templates/NOTES.txt @@ -0,0 +1,55 @@ +1. Get your '{{ .Values.adminUser }}' user password by running: + + kubectl get secret --namespace {{ include "grafana.namespace" . }} {{ .Values.admin.existingSecret | default (include "grafana.fullname" .) }} -o jsonpath="{.data.{{ .Values.admin.passwordKey | default "admin-password" }}}" | base64 --decode ; echo + + +2. The Grafana server can be accessed via port {{ .Values.service.port }} on the following DNS name from within your cluster: + + {{ include "grafana.fullname" . }}.{{ include "grafana.namespace" . }}.svc.cluster.local +{{ if .Values.ingress.enabled }} + If you bind grafana to 80, please update values in values.yaml and reinstall: + ``` + securityContext: + runAsUser: 0 + runAsGroup: 0 + fsGroup: 0 + + command: + - "setcap" + - "'cap_net_bind_service=+ep'" + - "/usr/sbin/grafana-server &&" + - "sh" + - "/run.sh" + ``` + Details refer to https://grafana.com/docs/installation/configuration/#http-port. + Or grafana would always crash. + + From outside the cluster, the server URL(s) are: + {{- range .Values.ingress.hosts }} + http://{{ . }} + {{- end }} +{{- else }} + Get the Grafana URL to visit by running these commands in the same shell: + {{- if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ include "grafana.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "grafana.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ include "grafana.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT + {{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get svc --namespace {{ include "grafana.namespace" . }} -w {{ include "grafana.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ include "grafana.namespace" . }} {{ include "grafana.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + http://$SERVICE_IP:{{ .Values.service.port -}} + {{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ include "grafana.namespace" . }} -l "app.kubernetes.io/name={{ include "grafana.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + kubectl --namespace {{ include "grafana.namespace" . }} port-forward $POD_NAME 3000 + {{- end }} +{{- end }} + +3. Login with the password from step 1 and the username: {{ .Values.adminUser }} + +{{- if not .Values.persistence.enabled }} +################################################################################# +###### WARNING: Persistence is disabled!!! You will lose your data when ##### +###### the Grafana pod is terminated. ##### +################################################################################# +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/_helpers.tpl b/operations/deployment/helm/grafana/templates/_helpers.tpl new file mode 100644 index 0000000..ead2449 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/_helpers.tpl @@ -0,0 +1,227 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "grafana.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "grafana.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "grafana.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create the name of the service account +*/}} +{{- define "grafana.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "grafana.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} + +{{- define "grafana.serviceAccountNameTest" -}} +{{- if .Values.serviceAccount.create }} +{{- default (print (include "grafana.fullname" .) "-test") .Values.serviceAccount.nameTest }} +{{- else }} +{{- default "default" .Values.serviceAccount.nameTest }} +{{- end }} +{{- end }} + +{{/* +Allow the release namespace to be overridden for multi-namespace deployments in combined charts +*/}} +{{- define "grafana.namespace" -}} +{{- if .Values.namespaceOverride }} +{{- .Values.namespaceOverride }} +{{- else }} +{{- .Release.Namespace }} +{{- end }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "grafana.labels" -}} +helm.sh/chart: {{ include "grafana.chart" . }} +{{ include "grafana.selectorLabels" . }} +{{- if or .Chart.AppVersion .Values.image.tag }} +app.kubernetes.io/version: {{ mustRegexReplaceAllLiteral "@sha.*" .Values.image.tag "" | default .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- with .Values.extraLabels }} +{{ toYaml . }} +{{- end }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "grafana.selectorLabels" -}} +app.kubernetes.io/name: {{ include "grafana.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "grafana.imageRenderer.labels" -}} +helm.sh/chart: {{ include "grafana.chart" . }} +{{ include "grafana.imageRenderer.selectorLabels" . }} +{{- if or .Chart.AppVersion .Values.image.tag }} +app.kubernetes.io/version: {{ mustRegexReplaceAllLiteral "@sha.*" .Values.image.tag "" | default .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels ImageRenderer +*/}} +{{- define "grafana.imageRenderer.selectorLabels" -}} +app.kubernetes.io/name: {{ include "grafana.name" . }}-image-renderer +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Looks if there's an existing secret and reuse its password. If not it generates +new password and use it. +*/}} +{{- define "grafana.password" -}} +{{- $secret := (lookup "v1" "Secret" (include "grafana.namespace" .) (include "grafana.fullname" .) ) }} +{{- if $secret }} +{{- index $secret "data" "admin-password" }} +{{- else }} +{{- (randAlphaNum 40) | b64enc | quote }} +{{- end }} +{{- end }} + +{{/* +Return the appropriate apiVersion for rbac. +*/}} +{{- define "grafana.rbac.apiVersion" -}} +{{- if $.Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1" }} +{{- print "rbac.authorization.k8s.io/v1" }} +{{- else }} +{{- print "rbac.authorization.k8s.io/v1beta1" }} +{{- end }} +{{- end }} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "grafana.ingress.apiVersion" -}} +{{- if and ($.Capabilities.APIVersions.Has "networking.k8s.io/v1") (semverCompare ">= 1.19-0" .Capabilities.KubeVersion.Version) }} +{{- print "networking.k8s.io/v1" }} +{{- else if $.Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" }} +{{- print "networking.k8s.io/v1beta1" }} +{{- else }} +{{- print "extensions/v1beta1" }} +{{- end }} +{{- end }} + +{{/* +Return the appropriate apiVersion for Horizontal Pod Autoscaler. +*/}} +{{- define "grafana.hpa.apiVersion" -}} +{{- if $.Capabilities.APIVersions.Has "autoscaling/v2/HorizontalPodAutoscaler" }} +{{- print "autoscaling/v2" }} +{{- else if $.Capabilities.APIVersions.Has "autoscaling/v2beta2/HorizontalPodAutoscaler" }} +{{- print "autoscaling/v2beta2" }} +{{- else }} +{{- print "autoscaling/v2beta1" }} +{{- end }} +{{- end }} + +{{/* +Return the appropriate apiVersion for podDisruptionBudget. +*/}} +{{- define "grafana.podDisruptionBudget.apiVersion" -}} +{{- if $.Values.podDisruptionBudget.apiVersion }} +{{- print $.Values.podDisruptionBudget.apiVersion }} +{{- else if $.Capabilities.APIVersions.Has "policy/v1/PodDisruptionBudget" }} +{{- print "policy/v1" }} +{{- else }} +{{- print "policy/v1beta1" }} +{{- end }} +{{- end }} + +{{/* +Return if ingress is stable. +*/}} +{{- define "grafana.ingress.isStable" -}} +{{- eq (include "grafana.ingress.apiVersion" .) "networking.k8s.io/v1" }} +{{- end }} + +{{/* +Return if ingress supports ingressClassName. +*/}} +{{- define "grafana.ingress.supportsIngressClassName" -}} +{{- or (eq (include "grafana.ingress.isStable" .) "true") (and (eq (include "grafana.ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18-0" .Capabilities.KubeVersion.Version)) }} +{{- end }} + +{{/* +Return if ingress supports pathType. +*/}} +{{- define "grafana.ingress.supportsPathType" -}} +{{- or (eq (include "grafana.ingress.isStable" .) "true") (and (eq (include "grafana.ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18-0" .Capabilities.KubeVersion.Version)) }} +{{- end }} + +{{/* +Formats imagePullSecrets. Input is (dict "root" . "imagePullSecrets" .{specific imagePullSecrets}) +*/}} +{{- define "grafana.imagePullSecrets" -}} +{{- $root := .root }} +{{- range (concat .root.Values.global.imagePullSecrets .imagePullSecrets) }} +{{- if eq (typeOf .) "map[string]interface {}" }} +- {{ toYaml (dict "name" (tpl .name $root)) | trim }} +{{- else }} +- name: {{ tpl . $root }} +{{- end }} +{{- end }} +{{- end }} + + +{{/* + Checks whether or not the configSecret secret has to be created + */}} +{{- define "grafana.shouldCreateConfigSecret" -}} +{{- $secretFound := false -}} +{{- range $key, $value := .Values.datasources }} + {{- if hasKey $value "secret" }} + {{- $secretFound = true}} + {{- end }} +{{- end }} +{{- range $key, $value := .Values.notifiers }} + {{- if hasKey $value "secret" }} + {{- $secretFound = true}} + {{- end }} +{{- end }} +{{- range $key, $value := .Values.alerting }} + {{- if (or (hasKey $value "secret") (hasKey $value "secretFile")) }} + {{- $secretFound = true}} + {{- end }} +{{- end }} +{{- $secretFound}} +{{- end -}} diff --git a/operations/deployment/helm/grafana/templates/_pod.tpl b/operations/deployment/helm/grafana/templates/_pod.tpl new file mode 100644 index 0000000..f4b9fd3 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/_pod.tpl @@ -0,0 +1,1276 @@ +{{- define "grafana.pod" -}} +{{- $sts := list "sts" "StatefulSet" "statefulset" -}} +{{- $root := . -}} +{{- with .Values.schedulerName }} +schedulerName: "{{ . }}" +{{- end }} +serviceAccountName: {{ include "grafana.serviceAccountName" . }} +automountServiceAccountToken: {{ .Values.serviceAccount.autoMount }} +{{- with .Values.securityContext }} +securityContext: + {{- toYaml . | nindent 2 }} +{{- end }} +{{- with .Values.hostAliases }} +hostAliases: + {{- toYaml . | nindent 2 }} +{{- end }} +{{- with .Values.priorityClassName }} +priorityClassName: {{ . }} +{{- end }} +{{- if ( or .Values.persistence.enabled .Values.dashboards .Values.extraInitContainers (and .Values.sidecar.alerts.enabled .Values.sidecar.alerts.initAlerts) (and .Values.sidecar.datasources.enabled .Values.sidecar.datasources.initDatasources) (and .Values.sidecar.notifiers.enabled .Values.sidecar.notifiers.initNotifiers)) }} +initContainers: +{{- end }} +{{- if ( and .Values.persistence.enabled .Values.initChownData.enabled ) }} + - name: init-chown-data + {{- $registry := .Values.global.imageRegistry | default .Values.initChownData.image.registry -}} + {{- if .Values.initChownData.image.sha }} + image: "{{ $registry }}/{{ .Values.initChownData.image.repository }}:{{ .Values.initChownData.image.tag }}@sha256:{{ .Values.initChownData.image.sha }}" + {{- else }} + image: "{{ $registry }}/{{ .Values.initChownData.image.repository }}:{{ .Values.initChownData.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.initChownData.image.pullPolicy }} + {{- with .Values.initChownData.securityContext }} + securityContext: + {{- toYaml . | nindent 6 }} + {{- end }} + command: + - chown + - -R + - {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.runAsGroup }} + - /var/lib/grafana + {{- with .Values.initChownData.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} + volumeMounts: + - name: storage + mountPath: "/var/lib/grafana" + {{- with .Values.persistence.subPath }} + subPath: {{ tpl . $root }} + {{- end }} +{{- end }} +{{- if .Values.dashboards }} + - name: download-dashboards + {{- $registry := .Values.global.imageRegistry | default .Values.downloadDashboardsImage.registry -}} + {{- if .Values.downloadDashboardsImage.sha }} + image: "{{ $registry }}/{{ .Values.downloadDashboardsImage.repository }}:{{ .Values.downloadDashboardsImage.tag }}@sha256:{{ .Values.downloadDashboardsImage.sha }}" + {{- else }} + image: "{{ $registry }}/{{ .Values.downloadDashboardsImage.repository }}:{{ .Values.downloadDashboardsImage.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.downloadDashboardsImage.pullPolicy }} + command: ["/bin/sh"] + args: [ "-c", "mkdir -p /var/lib/grafana/dashboards/default && /bin/sh -x /etc/grafana/download_dashboards.sh" ] + {{- with .Values.downloadDashboards.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} + env: + {{- range $key, $value := .Values.downloadDashboards.env }} + - name: "{{ $key }}" + value: "{{ $value }}" + {{- end }} + {{- range $key, $value := .Values.downloadDashboards.envValueFrom }} + - name: {{ $key | quote }} + valueFrom: + {{- tpl (toYaml $value) $ | nindent 10 }} + {{- end }} + {{- with .Values.downloadDashboards.securityContext }} + securityContext: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.downloadDashboards.envFromSecret }} + envFrom: + - secretRef: + name: {{ tpl . $root }} + {{- end }} + volumeMounts: + - name: config + mountPath: "/etc/grafana/download_dashboards.sh" + subPath: download_dashboards.sh + - name: storage + mountPath: "/var/lib/grafana" + {{- with .Values.persistence.subPath }} + subPath: {{ tpl . $root }} + {{- end }} + {{- range .Values.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + readOnly: {{ .readOnly }} + {{- end }} +{{- end }} +{{- if and .Values.sidecar.alerts.enabled .Values.sidecar.alerts.initAlerts }} + - name: {{ include "grafana.name" . }}-init-sc-alerts + {{- $registry := .Values.global.imageRegistry | default .Values.sidecar.image.registry -}} + {{- if .Values.sidecar.image.sha }} + image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" + {{- else }} + image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} + env: + {{- range $key, $value := .Values.sidecar.alerts.env }} + - name: "{{ $key }}" + value: "{{ $value }}" + {{- end }} + {{- if .Values.sidecar.alerts.ignoreAlreadyProcessed }} + - name: IGNORE_ALREADY_PROCESSED + value: "true" + {{- end }} + - name: METHOD + value: "LIST" + - name: LABEL + value: "{{ .Values.sidecar.alerts.label }}" + {{- with .Values.sidecar.alerts.labelValue }} + - name: LABEL_VALUE + value: {{ quote . }} + {{- end }} + {{- if or .Values.sidecar.logLevel .Values.sidecar.alerts.logLevel }} + - name: LOG_LEVEL + value: {{ default .Values.sidecar.logLevel .Values.sidecar.alerts.logLevel }} + {{- end }} + - name: FOLDER + value: "/etc/grafana/provisioning/alerting" + - name: RESOURCE + value: {{ quote .Values.sidecar.alerts.resource }} + {{- with .Values.sidecar.enableUniqueFilenames }} + - name: UNIQUE_FILENAMES + value: "{{ . }}" + {{- end }} + {{- with .Values.sidecar.alerts.searchNamespace }} + - name: NAMESPACE + value: {{ . | join "," | quote }} + {{- end }} + {{- with .Values.sidecar.alerts.skipTlsVerify }} + - name: SKIP_TLS_VERIFY + value: {{ quote . }} + {{- end }} + {{- with .Values.sidecar.alerts.script }} + - name: SCRIPT + value: {{ quote . }} + {{- end }} + {{- with .Values.sidecar.livenessProbe }} + livenessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.readinessProbe }} + readinessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.securityContext }} + securityContext: + {{- toYaml . | nindent 6 }} + {{- end }} + volumeMounts: + - name: sc-alerts-volume + mountPath: "/etc/grafana/provisioning/alerting" + {{- with .Values.sidecar.alerts.extraMounts }} + {{- toYaml . | trim | nindent 6 }} + {{- end }} +{{- end }} +{{- if and .Values.sidecar.datasources.enabled .Values.sidecar.datasources.initDatasources }} + - name: {{ include "grafana.name" . }}-init-sc-datasources + {{- $registry := .Values.global.imageRegistry | default .Values.sidecar.image.registry -}} + {{- if .Values.sidecar.image.sha }} + image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" + {{- else }} + image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} + env: + {{- range $key, $value := .Values.sidecar.datasources.env }} + - name: "{{ $key }}" + value: "{{ $value }}" + {{- end }} + {{- if .Values.sidecar.datasources.ignoreAlreadyProcessed }} + - name: IGNORE_ALREADY_PROCESSED + value: "true" + {{- end }} + - name: METHOD + value: "LIST" + - name: LABEL + value: "{{ .Values.sidecar.datasources.label }}" + {{- with .Values.sidecar.datasources.labelValue }} + - name: LABEL_VALUE + value: {{ quote . }} + {{- end }} + {{- if or .Values.sidecar.logLevel .Values.sidecar.datasources.logLevel }} + - name: LOG_LEVEL + value: {{ default .Values.sidecar.logLevel .Values.sidecar.datasources.logLevel }} + {{- end }} + - name: FOLDER + value: "/etc/grafana/provisioning/datasources" + - name: RESOURCE + value: {{ quote .Values.sidecar.datasources.resource }} + {{- with .Values.sidecar.enableUniqueFilenames }} + - name: UNIQUE_FILENAMES + value: "{{ . }}" + {{- end }} + {{- if .Values.sidecar.datasources.searchNamespace }} + - name: NAMESPACE + value: "{{ tpl (.Values.sidecar.datasources.searchNamespace | join ",") . }}" + {{- end }} + {{- with .Values.sidecar.skipTlsVerify }} + - name: SKIP_TLS_VERIFY + value: "{{ . }}" + {{- end }} + {{- with .Values.sidecar.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.securityContext }} + securityContext: + {{- toYaml . | nindent 6 }} + {{- end }} + volumeMounts: + - name: sc-datasources-volume + mountPath: "/etc/grafana/provisioning/datasources" +{{- end }} +{{- if and .Values.sidecar.notifiers.enabled .Values.sidecar.notifiers.initNotifiers }} + - name: {{ include "grafana.name" . }}-init-sc-notifiers + {{- $registry := .Values.global.imageRegistry | default .Values.sidecar.image.registry -}} + {{- if .Values.sidecar.image.sha }} + image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" + {{- else }} + image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} + env: + {{- range $key, $value := .Values.sidecar.notifiers.env }} + - name: "{{ $key }}" + value: "{{ $value }}" + {{- end }} + {{- if .Values.sidecar.notifiers.ignoreAlreadyProcessed }} + - name: IGNORE_ALREADY_PROCESSED + value: "true" + {{- end }} + - name: METHOD + value: LIST + - name: LABEL + value: "{{ .Values.sidecar.notifiers.label }}" + {{- with .Values.sidecar.notifiers.labelValue }} + - name: LABEL_VALUE + value: {{ quote . }} + {{- end }} + {{- if or .Values.sidecar.logLevel .Values.sidecar.notifiers.logLevel }} + - name: LOG_LEVEL + value: {{ default .Values.sidecar.logLevel .Values.sidecar.notifiers.logLevel }} + {{- end }} + - name: FOLDER + value: "/etc/grafana/provisioning/notifiers" + - name: RESOURCE + value: {{ quote .Values.sidecar.notifiers.resource }} + {{- with .Values.sidecar.enableUniqueFilenames }} + - name: UNIQUE_FILENAMES + value: "{{ . }}" + {{- end }} + {{- with .Values.sidecar.notifiers.searchNamespace }} + - name: NAMESPACE + value: "{{ tpl (. | join ",") $root }}" + {{- end }} + {{- with .Values.sidecar.skipTlsVerify }} + - name: SKIP_TLS_VERIFY + value: "{{ . }}" + {{- end }} + {{- with .Values.sidecar.livenessProbe }} + livenessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.readinessProbe }} + readinessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.securityContext }} + securityContext: + {{- toYaml . | nindent 6 }} + {{- end }} + volumeMounts: + - name: sc-notifiers-volume + mountPath: "/etc/grafana/provisioning/notifiers" +{{- end}} +{{- with .Values.extraInitContainers }} + {{- tpl (toYaml .) $root | nindent 2 }} +{{- end }} +{{- if or .Values.image.pullSecrets .Values.global.imagePullSecrets }} +imagePullSecrets: + {{- include "grafana.imagePullSecrets" (dict "root" $root "imagePullSecrets" .Values.image.pullSecrets) | nindent 2 }} +{{- end }} +{{- if not .Values.enableKubeBackwardCompatibility }} +enableServiceLinks: {{ .Values.enableServiceLinks }} +{{- end }} +containers: +{{- if and .Values.sidecar.alerts.enabled (not .Values.sidecar.alerts.initAlerts) }} + - name: {{ include "grafana.name" . }}-sc-alerts + {{- $registry := .Values.global.imageRegistry | default .Values.sidecar.image.registry -}} + {{- if .Values.sidecar.image.sha }} + image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" + {{- else }} + image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} + env: + {{- range $key, $value := .Values.sidecar.alerts.env }} + - name: "{{ $key }}" + value: "{{ $value }}" + {{- end }} + {{- if .Values.sidecar.alerts.ignoreAlreadyProcessed }} + - name: IGNORE_ALREADY_PROCESSED + value: "true" + {{- end }} + - name: METHOD + value: {{ .Values.sidecar.alerts.watchMethod }} + - name: LABEL + value: "{{ .Values.sidecar.alerts.label }}" + {{- with .Values.sidecar.alerts.labelValue }} + - name: LABEL_VALUE + value: {{ quote . }} + {{- end }} + {{- if or .Values.sidecar.logLevel .Values.sidecar.alerts.logLevel }} + - name: LOG_LEVEL + value: {{ default .Values.sidecar.logLevel .Values.sidecar.alerts.logLevel }} + {{- end }} + - name: FOLDER + value: "/etc/grafana/provisioning/alerting" + - name: RESOURCE + value: {{ quote .Values.sidecar.alerts.resource }} + {{- with .Values.sidecar.enableUniqueFilenames }} + - name: UNIQUE_FILENAMES + value: "{{ . }}" + {{- end }} + {{- with .Values.sidecar.alerts.searchNamespace }} + - name: NAMESPACE + value: {{ . | join "," | quote }} + {{- end }} + {{- with .Values.sidecar.alerts.skipTlsVerify }} + - name: SKIP_TLS_VERIFY + value: {{ quote . }} + {{- end }} + {{- with .Values.sidecar.alerts.script }} + - name: SCRIPT + value: {{ quote . }} + {{- end }} + {{- if and (not .Values.env.GF_SECURITY_ADMIN_USER) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + - name: REQ_USERNAME + valueFrom: + secretKeyRef: + name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.userKey | default "admin-user" }} + {{- end }} + {{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + - name: REQ_PASSWORD + valueFrom: + secretKeyRef: + name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.passwordKey | default "admin-password" }} + {{- end }} + {{- if not .Values.sidecar.alerts.skipReload }} + - name: REQ_URL + value: {{ .Values.sidecar.alerts.reloadURL }} + - name: REQ_METHOD + value: POST + {{- end }} + {{- if .Values.sidecar.alerts.watchServerTimeout }} + {{- if ne .Values.sidecar.alerts.watchMethod "WATCH" }} + {{- fail (printf "Cannot use .Values.sidecar.alerts.watchServerTimeout with .Values.sidecar.alerts.watchMethod %s" .Values.sidecar.alerts.watchMethod) }} + {{- end }} + - name: WATCH_SERVER_TIMEOUT + value: "{{ .Values.sidecar.alerts.watchServerTimeout }}" + {{- end }} + {{- if .Values.sidecar.alerts.watchClientTimeout }} + {{- if ne .Values.sidecar.alerts.watchMethod "WATCH" }} + {{- fail (printf "Cannot use .Values.sidecar.alerts.watchClientTimeout with .Values.sidecar.alerts.watchMethod %s" .Values.sidecar.alerts.watchMethod) }} + {{- end }} + - name: WATCH_CLIENT_TIMEOUT + value: "{{ .Values.sidecar.alerts.watchClientTimeout }}" + {{- end }} + {{- with .Values.sidecar.livenessProbe }} + livenessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.readinessProbe }} + readinessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.securityContext }} + securityContext: + {{- toYaml . | nindent 6 }} + {{- end }} + volumeMounts: + - name: sc-alerts-volume + mountPath: "/etc/grafana/provisioning/alerting" + {{- with .Values.sidecar.alerts.extraMounts }} + {{- toYaml . | trim | nindent 6 }} + {{- end }} +{{- end}} +{{- if .Values.sidecar.dashboards.enabled }} + - name: {{ include "grafana.name" . }}-sc-dashboard + {{- $registry := .Values.global.imageRegistry | default .Values.sidecar.image.registry -}} + {{- if .Values.sidecar.image.sha }} + image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" + {{- else }} + image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} + env: + {{- range $key, $value := .Values.sidecar.dashboards.env }} + - name: "{{ $key }}" + value: "{{ $value }}" + {{- end }} + {{- if .Values.sidecar.dashboards.ignoreAlreadyProcessed }} + - name: IGNORE_ALREADY_PROCESSED + value: "true" + {{- end }} + - name: METHOD + value: {{ .Values.sidecar.dashboards.watchMethod }} + - name: LABEL + value: "{{ .Values.sidecar.dashboards.label }}" + {{- with .Values.sidecar.dashboards.labelValue }} + - name: LABEL_VALUE + value: {{ quote . }} + {{- end }} + {{- if or .Values.sidecar.logLevel .Values.sidecar.dashboards.logLevel }} + - name: LOG_LEVEL + value: {{ default .Values.sidecar.logLevel .Values.sidecar.dashboards.logLevel }} + {{- end }} + - name: FOLDER + value: "{{ .Values.sidecar.dashboards.folder }}{{- with .Values.sidecar.dashboards.defaultFolderName }}/{{ . }}{{- end }}" + - name: RESOURCE + value: {{ quote .Values.sidecar.dashboards.resource }} + {{- with .Values.sidecar.enableUniqueFilenames }} + - name: UNIQUE_FILENAMES + value: "{{ . }}" + {{- end }} + {{- with .Values.sidecar.dashboards.searchNamespace }} + - name: NAMESPACE + value: "{{ tpl (. | join ",") $root }}" + {{- end }} + {{- with .Values.sidecar.skipTlsVerify }} + - name: SKIP_TLS_VERIFY + value: "{{ . }}" + {{- end }} + {{- with .Values.sidecar.dashboards.folderAnnotation }} + - name: FOLDER_ANNOTATION + value: "{{ . }}" + {{- end }} + {{- with .Values.sidecar.dashboards.script }} + - name: SCRIPT + value: "{{ . }}" + {{- end }} + {{- if not .Values.sidecar.dashboards.skipReload }} + {{- if and (not .Values.env.GF_SECURITY_ADMIN_USER) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + - name: REQ_USERNAME + valueFrom: + secretKeyRef: + name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.userKey | default "admin-user" }} + {{- end }} + {{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + - name: REQ_PASSWORD + valueFrom: + secretKeyRef: + name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.passwordKey | default "admin-password" }} + {{- end }} + - name: REQ_URL + value: {{ .Values.sidecar.dashboards.reloadURL }} + - name: REQ_METHOD + value: POST + {{- end }} + {{- if .Values.sidecar.dashboards.watchServerTimeout }} + {{- if ne .Values.sidecar.dashboards.watchMethod "WATCH" }} + {{- fail (printf "Cannot use .Values.sidecar.dashboards.watchServerTimeout with .Values.sidecar.dashboards.watchMethod %s" .Values.sidecar.dashboards.watchMethod) }} + {{- end }} + - name: WATCH_SERVER_TIMEOUT + value: "{{ .Values.sidecar.dashboards.watchServerTimeout }}" + {{- end }} + {{- if .Values.sidecar.dashboards.watchClientTimeout }} + {{- if ne .Values.sidecar.dashboards.watchMethod "WATCH" }} + {{- fail (printf "Cannot use .Values.sidecar.dashboards.watchClientTimeout with .Values.sidecar.dashboards.watchMethod %s" .Values.sidecar.dashboards.watchMethod) }} + {{- end }} + - name: WATCH_CLIENT_TIMEOUT + value: {{ .Values.sidecar.dashboards.watchClientTimeout | quote }} + {{- end }} + {{- with .Values.sidecar.livenessProbe }} + livenessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.readinessProbe }} + readinessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.securityContext }} + securityContext: + {{- toYaml . | nindent 6 }} + {{- end }} + volumeMounts: + - name: sc-dashboard-volume + mountPath: {{ .Values.sidecar.dashboards.folder | quote }} + {{- with .Values.sidecar.dashboards.extraMounts }} + {{- toYaml . | trim | nindent 6 }} + {{- end }} +{{- end}} +{{- if and .Values.sidecar.datasources.enabled (not .Values.sidecar.datasources.initDatasources) }} + - name: {{ include "grafana.name" . }}-sc-datasources + {{- $registry := .Values.global.imageRegistry | default .Values.sidecar.image.registry -}} + {{- if .Values.sidecar.image.sha }} + image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" + {{- else }} + image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} + env: + {{- range $key, $value := .Values.sidecar.datasources.env }} + - name: "{{ $key }}" + value: "{{ $value }}" + {{- end }} + {{- if .Values.sidecar.datasources.ignoreAlreadyProcessed }} + - name: IGNORE_ALREADY_PROCESSED + value: "true" + {{- end }} + - name: METHOD + value: {{ .Values.sidecar.datasources.watchMethod }} + - name: LABEL + value: "{{ .Values.sidecar.datasources.label }}" + {{- with .Values.sidecar.datasources.labelValue }} + - name: LABEL_VALUE + value: {{ quote . }} + {{- end }} + {{- if or .Values.sidecar.logLevel .Values.sidecar.datasources.logLevel }} + - name: LOG_LEVEL + value: {{ default .Values.sidecar.logLevel .Values.sidecar.datasources.logLevel }} + {{- end }} + - name: FOLDER + value: "/etc/grafana/provisioning/datasources" + - name: RESOURCE + value: {{ quote .Values.sidecar.datasources.resource }} + {{- with .Values.sidecar.enableUniqueFilenames }} + - name: UNIQUE_FILENAMES + value: "{{ . }}" + {{- end }} + {{- with .Values.sidecar.datasources.searchNamespace }} + - name: NAMESPACE + value: "{{ tpl (. | join ",") $root }}" + {{- end }} + {{- if .Values.sidecar.skipTlsVerify }} + - name: SKIP_TLS_VERIFY + value: "{{ .Values.sidecar.skipTlsVerify }}" + {{- end }} + {{- if .Values.sidecar.datasources.script }} + - name: SCRIPT + value: "{{ .Values.sidecar.datasources.script }}" + {{- end }} + {{- if and (not .Values.env.GF_SECURITY_ADMIN_USER) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + - name: REQ_USERNAME + valueFrom: + secretKeyRef: + name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.userKey | default "admin-user" }} + {{- end }} + {{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + - name: REQ_PASSWORD + valueFrom: + secretKeyRef: + name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.passwordKey | default "admin-password" }} + {{- end }} + {{- if not .Values.sidecar.datasources.skipReload }} + - name: REQ_URL + value: {{ .Values.sidecar.datasources.reloadURL }} + - name: REQ_METHOD + value: POST + {{- end }} + {{- if .Values.sidecar.datasources.watchServerTimeout }} + {{- if ne .Values.sidecar.datasources.watchMethod "WATCH" }} + {{- fail (printf "Cannot use .Values.sidecar.datasources.watchServerTimeout with .Values.sidecar.datasources.watchMethod %s" .Values.sidecar.datasources.watchMethod) }} + {{- end }} + - name: WATCH_SERVER_TIMEOUT + value: "{{ .Values.sidecar.datasources.watchServerTimeout }}" + {{- end }} + {{- if .Values.sidecar.datasources.watchClientTimeout }} + {{- if ne .Values.sidecar.datasources.watchMethod "WATCH" }} + {{- fail (printf "Cannot use .Values.sidecar.datasources.watchClientTimeout with .Values.sidecar.datasources.watchMethod %s" .Values.sidecar.datasources.watchMethod) }} + {{- end }} + - name: WATCH_CLIENT_TIMEOUT + value: "{{ .Values.sidecar.datasources.watchClientTimeout }}" + {{- end }} + {{- with .Values.sidecar.livenessProbe }} + livenessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.readinessProbe }} + readinessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.securityContext }} + securityContext: + {{- toYaml . | nindent 6 }} + {{- end }} + volumeMounts: + - name: sc-datasources-volume + mountPath: "/etc/grafana/provisioning/datasources" +{{- end}} +{{- if .Values.sidecar.notifiers.enabled }} + - name: {{ include "grafana.name" . }}-sc-notifiers + {{- $registry := .Values.global.imageRegistry | default .Values.sidecar.image.registry -}} + {{- if .Values.sidecar.image.sha }} + image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" + {{- else }} + image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} + env: + {{- range $key, $value := .Values.sidecar.notifiers.env }} + - name: "{{ $key }}" + value: "{{ $value }}" + {{- end }} + {{- if .Values.sidecar.notifiers.ignoreAlreadyProcessed }} + - name: IGNORE_ALREADY_PROCESSED + value: "true" + {{- end }} + - name: METHOD + value: {{ .Values.sidecar.notifiers.watchMethod }} + - name: LABEL + value: "{{ .Values.sidecar.notifiers.label }}" + {{- with .Values.sidecar.notifiers.labelValue }} + - name: LABEL_VALUE + value: {{ quote . }} + {{- end }} + {{- if or .Values.sidecar.logLevel .Values.sidecar.notifiers.logLevel }} + - name: LOG_LEVEL + value: {{ default .Values.sidecar.logLevel .Values.sidecar.notifiers.logLevel }} + {{- end }} + - name: FOLDER + value: "/etc/grafana/provisioning/notifiers" + - name: RESOURCE + value: {{ quote .Values.sidecar.notifiers.resource }} + {{- if .Values.sidecar.enableUniqueFilenames }} + - name: UNIQUE_FILENAMES + value: "{{ .Values.sidecar.enableUniqueFilenames }}" + {{- end }} + {{- with .Values.sidecar.notifiers.searchNamespace }} + - name: NAMESPACE + value: "{{ tpl (. | join ",") $root }}" + {{- end }} + {{- with .Values.sidecar.skipTlsVerify }} + - name: SKIP_TLS_VERIFY + value: "{{ . }}" + {{- end }} + {{- if .Values.sidecar.notifiers.script }} + - name: SCRIPT + value: "{{ .Values.sidecar.notifiers.script }}" + {{- end }} + {{- if and (not .Values.env.GF_SECURITY_ADMIN_USER) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + - name: REQ_USERNAME + valueFrom: + secretKeyRef: + name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.userKey | default "admin-user" }} + {{- end }} + {{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + - name: REQ_PASSWORD + valueFrom: + secretKeyRef: + name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.passwordKey | default "admin-password" }} + {{- end }} + {{- if not .Values.sidecar.notifiers.skipReload }} + - name: REQ_URL + value: {{ .Values.sidecar.notifiers.reloadURL }} + - name: REQ_METHOD + value: POST + {{- end }} + {{- if .Values.sidecar.notifiers.watchServerTimeout }} + {{- if ne .Values.sidecar.notifiers.watchMethod "WATCH" }} + {{- fail (printf "Cannot use .Values.sidecar.notifiers.watchServerTimeout with .Values.sidecar.notifiers.watchMethod %s" .Values.sidecar.notifiers.watchMethod) }} + {{- end }} + - name: WATCH_SERVER_TIMEOUT + value: "{{ .Values.sidecar.notifiers.watchServerTimeout }}" + {{- end }} + {{- if .Values.sidecar.notifiers.watchClientTimeout }} + {{- if ne .Values.sidecar.notifiers.watchMethod "WATCH" }} + {{- fail (printf "Cannot use .Values.sidecar.notifiers.watchClientTimeout with .Values.sidecar.notifiers.watchMethod %s" .Values.sidecar.notifiers.watchMethod) }} + {{- end }} + - name: WATCH_CLIENT_TIMEOUT + value: "{{ .Values.sidecar.notifiers.watchClientTimeout }}" + {{- end }} + {{- with .Values.sidecar.livenessProbe }} + livenessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.readinessProbe }} + readinessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.securityContext }} + securityContext: + {{- toYaml . | nindent 6 }} + {{- end }} + volumeMounts: + - name: sc-notifiers-volume + mountPath: "/etc/grafana/provisioning/notifiers" +{{- end}} +{{- if .Values.sidecar.plugins.enabled }} + - name: {{ include "grafana.name" . }}-sc-plugins + {{- $registry := .Values.global.imageRegistry | default .Values.sidecar.image.registry -}} + {{- if .Values.sidecar.image.sha }} + image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" + {{- else }} + image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} + env: + {{- range $key, $value := .Values.sidecar.plugins.env }} + - name: "{{ $key }}" + value: "{{ $value }}" + {{- end }} + {{- if .Values.sidecar.plugins.ignoreAlreadyProcessed }} + - name: IGNORE_ALREADY_PROCESSED + value: "true" + {{- end }} + - name: METHOD + value: {{ .Values.sidecar.plugins.watchMethod }} + - name: LABEL + value: "{{ .Values.sidecar.plugins.label }}" + {{- if .Values.sidecar.plugins.labelValue }} + - name: LABEL_VALUE + value: {{ quote .Values.sidecar.plugins.labelValue }} + {{- end }} + {{- if or .Values.sidecar.logLevel .Values.sidecar.plugins.logLevel }} + - name: LOG_LEVEL + value: {{ default .Values.sidecar.logLevel .Values.sidecar.plugins.logLevel }} + {{- end }} + - name: FOLDER + value: "/etc/grafana/provisioning/plugins" + - name: RESOURCE + value: {{ quote .Values.sidecar.plugins.resource }} + {{- with .Values.sidecar.enableUniqueFilenames }} + - name: UNIQUE_FILENAMES + value: "{{ . }}" + {{- end }} + {{- with .Values.sidecar.plugins.searchNamespace }} + - name: NAMESPACE + value: "{{ tpl (. | join ",") $root }}" + {{- end }} + {{- with .Values.sidecar.plugins.script }} + - name: SCRIPT + value: "{{ . }}" + {{- end }} + {{- with .Values.sidecar.skipTlsVerify }} + - name: SKIP_TLS_VERIFY + value: "{{ . }}" + {{- end }} + {{- if and (not .Values.env.GF_SECURITY_ADMIN_USER) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + - name: REQ_USERNAME + valueFrom: + secretKeyRef: + name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.userKey | default "admin-user" }} + {{- end }} + {{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + - name: REQ_PASSWORD + valueFrom: + secretKeyRef: + name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.passwordKey | default "admin-password" }} + {{- end }} + {{- if not .Values.sidecar.plugins.skipReload }} + - name: REQ_URL + value: {{ .Values.sidecar.plugins.reloadURL }} + - name: REQ_METHOD + value: POST + {{- end }} + {{- if .Values.sidecar.plugins.watchServerTimeout }} + {{- if ne .Values.sidecar.plugins.watchMethod "WATCH" }} + {{- fail (printf "Cannot use .Values.sidecar.plugins.watchServerTimeout with .Values.sidecar.plugins.watchMethod %s" .Values.sidecar.plugins.watchMethod) }} + {{- end }} + - name: WATCH_SERVER_TIMEOUT + value: "{{ .Values.sidecar.plugins.watchServerTimeout }}" + {{- end }} + {{- if .Values.sidecar.plugins.watchClientTimeout }} + {{- if ne .Values.sidecar.plugins.watchMethod "WATCH" }} + {{- fail (printf "Cannot use .Values.sidecar.plugins.watchClientTimeout with .Values.sidecar.plugins.watchMethod %s" .Values.sidecar.plugins.watchMethod) }} + {{- end }} + - name: WATCH_CLIENT_TIMEOUT + value: "{{ .Values.sidecar.plugins.watchClientTimeout }}" + {{- end }} + {{- with .Values.sidecar.livenessProbe }} + livenessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.readinessProbe }} + readinessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.sidecar.securityContext }} + securityContext: + {{- toYaml . | nindent 6 }} + {{- end }} + volumeMounts: + - name: sc-plugins-volume + mountPath: "/etc/grafana/provisioning/plugins" +{{- end}} + - name: {{ .Chart.Name }} + {{- $registry := .Values.global.imageRegistry | default .Values.image.registry -}} + {{- if .Values.image.sha }} + image: "{{ $registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}@sha256:{{ .Values.image.sha }}" + {{- else }} + image: "{{ $registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + {{- end }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- if .Values.command }} + command: + {{- range .Values.command }} + - {{ . | quote }} + {{- end }} + {{- end }} + {{- if .Values.args }} + args: + {{- range .Values.args }} + - {{ . | quote }} + {{- end }} + {{- end }} + {{- with .Values.containerSecurityContext }} + securityContext: + {{- toYaml . | nindent 6 }} + {{- end }} + volumeMounts: + - name: config + mountPath: "/etc/grafana/grafana.ini" + subPath: grafana.ini + {{- if .Values.ldap.enabled }} + - name: ldap + mountPath: "/etc/grafana/ldap.toml" + subPath: ldap.toml + {{- end }} + {{- range .Values.extraConfigmapMounts }} + - name: {{ tpl .name $root }} + mountPath: {{ tpl .mountPath $root }} + subPath: {{ tpl (.subPath | default "") $root }} + readOnly: {{ .readOnly }} + {{- end }} + - name: storage + mountPath: "/var/lib/grafana" + {{- with .Values.persistence.subPath }} + subPath: {{ tpl . $root }} + {{- end }} + {{- with .Values.dashboards }} + {{- range $provider, $dashboards := . }} + {{- range $key, $value := $dashboards }} + {{- if (or (hasKey $value "json") (hasKey $value "file")) }} + - name: dashboards-{{ $provider }} + mountPath: "/var/lib/grafana/dashboards/{{ $provider }}/{{ $key }}.json" + subPath: "{{ $key }}.json" + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- with .Values.dashboardsConfigMaps }} + {{- range (keys . | sortAlpha) }} + - name: dashboards-{{ . }} + mountPath: "/var/lib/grafana/dashboards/{{ . }}" + {{- end }} + {{- end }} + {{- with .Values.datasources }} + {{- $datasources := . }} + {{- range (keys . | sortAlpha) }} + {{- if (or (hasKey (index $datasources .) "secret")) }} {{/*check if current datasource should be handeled as secret */}} + - name: config-secret + mountPath: "/etc/grafana/provisioning/datasources/{{ . }}" + subPath: {{ . | quote }} + {{- else }} + - name: config + mountPath: "/etc/grafana/provisioning/datasources/{{ . }}" + subPath: {{ . | quote }} + {{- end }} + {{- end }} + {{- end }} + {{- with .Values.notifiers }} + {{- $notifiers := . }} + {{- range (keys . | sortAlpha) }} + {{- if (or (hasKey (index $notifiers .) "secret")) }} {{/*check if current notifier should be handeled as secret */}} + - name: config-secret + mountPath: "/etc/grafana/provisioning/notifiers/{{ . }}" + subPath: {{ . | quote }} + {{- else }} + - name: config + mountPath: "/etc/grafana/provisioning/notifiers/{{ . }}" + subPath: {{ . | quote }} + {{- end }} + {{- end }} + {{- end }} + {{- with .Values.alerting }} + {{- $alertingmap := .}} + {{- range (keys . | sortAlpha) }} + {{- if (or (hasKey (index $.Values.alerting .) "secret") (hasKey (index $.Values.alerting .) "secretFile")) }} {{/*check if current alerting entry should be handeled as secret */}} + - name: config-secret + mountPath: "/etc/grafana/provisioning/alerting/{{ . }}" + subPath: {{ . | quote }} + {{- else }} + - name: config + mountPath: "/etc/grafana/provisioning/alerting/{{ . }}" + subPath: {{ . | quote }} + {{- end }} + {{- end }} + {{- end }} + {{- with .Values.dashboardProviders }} + {{- range (keys . | sortAlpha) }} + - name: config + mountPath: "/etc/grafana/provisioning/dashboards/{{ . }}" + subPath: {{ . | quote }} + {{- end }} + {{- end }} + {{- with .Values.sidecar.alerts.enabled }} + - name: sc-alerts-volume + mountPath: "/etc/grafana/provisioning/alerting" + {{- end}} + {{- if .Values.sidecar.dashboards.enabled }} + - name: sc-dashboard-volume + mountPath: {{ .Values.sidecar.dashboards.folder | quote }} + {{- if .Values.sidecar.dashboards.SCProvider }} + - name: sc-dashboard-provider + mountPath: "/etc/grafana/provisioning/dashboards/sc-dashboardproviders.yaml" + subPath: provider.yaml + {{- end}} + {{- end}} + {{- if .Values.sidecar.datasources.enabled }} + - name: sc-datasources-volume + mountPath: "/etc/grafana/provisioning/datasources" + {{- end}} + {{- if .Values.sidecar.plugins.enabled }} + - name: sc-plugins-volume + mountPath: "/etc/grafana/provisioning/plugins" + {{- end}} + {{- if .Values.sidecar.notifiers.enabled }} + - name: sc-notifiers-volume + mountPath: "/etc/grafana/provisioning/notifiers" + {{- end}} + {{- range .Values.extraSecretMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + readOnly: {{ .readOnly }} + subPath: {{ .subPath | default "" }} + {{- end }} + {{- range .Values.extraVolumeMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + subPath: {{ .subPath | default "" }} + readOnly: {{ .readOnly }} + {{- end }} + {{- range .Values.extraEmptyDirMounts }} + - name: {{ .name }} + mountPath: {{ .mountPath }} + {{- end }} + ports: + - name: {{ .Values.podPortName }} + containerPort: {{ .Values.service.targetPort }} + protocol: TCP + - name: {{ .Values.gossipPortName }}-tcp + containerPort: 9094 + protocol: TCP + - name: {{ .Values.gossipPortName }}-udp + containerPort: 9094 + protocol: UDP + env: + - name: POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + {{- if and (not .Values.env.GF_SECURITY_ADMIN_USER) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + - name: GF_SECURITY_ADMIN_USER + valueFrom: + secretKeyRef: + name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.userKey | default "admin-user" }} + {{- end }} + {{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + - name: GF_SECURITY_ADMIN_PASSWORD + valueFrom: + secretKeyRef: + name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} + key: {{ .Values.admin.passwordKey | default "admin-password" }} + {{- end }} + {{- if .Values.plugins }} + - name: GF_INSTALL_PLUGINS + valueFrom: + configMapKeyRef: + name: {{ include "grafana.fullname" . }} + key: plugins + {{- end }} + {{- if .Values.smtp.existingSecret }} + - name: GF_SMTP_USER + valueFrom: + secretKeyRef: + name: {{ .Values.smtp.existingSecret }} + key: {{ .Values.smtp.userKey | default "user" }} + - name: GF_SMTP_PASSWORD + valueFrom: + secretKeyRef: + name: {{ .Values.smtp.existingSecret }} + key: {{ .Values.smtp.passwordKey | default "password" }} + {{- end }} + {{- if .Values.imageRenderer.enabled }} + - name: GF_RENDERING_SERVER_URL + value: http://{{ include "grafana.fullname" . }}-image-renderer.{{ include "grafana.namespace" . }}:{{ .Values.imageRenderer.service.port }}/render + - name: GF_RENDERING_CALLBACK_URL + value: {{ .Values.imageRenderer.grafanaProtocol }}://{{ include "grafana.fullname" . }}.{{ include "grafana.namespace" . }}:{{ .Values.service.port }}/{{ .Values.imageRenderer.grafanaSubPath }} + {{- end }} + - name: GF_PATHS_DATA + value: {{ (get .Values "grafana.ini").paths.data }} + - name: GF_PATHS_LOGS + value: {{ (get .Values "grafana.ini").paths.logs }} + - name: GF_PATHS_PLUGINS + value: {{ (get .Values "grafana.ini").paths.plugins }} + - name: GF_PATHS_PROVISIONING + value: {{ (get .Values "grafana.ini").paths.provisioning }} + {{- range $key, $value := .Values.envValueFrom }} + - name: {{ $key | quote }} + valueFrom: + {{- tpl (toYaml $value) $ | nindent 10 }} + {{- end }} + {{- range $key, $value := .Values.env }} + - name: "{{ tpl $key $ }}" + value: "{{ tpl (print $value) $ }}" + {{- end }} + {{- if or .Values.envFromSecret (or .Values.envRenderSecret .Values.envFromSecrets) .Values.envFromConfigMaps }} + envFrom: + {{- if .Values.envFromSecret }} + - secretRef: + name: {{ tpl .Values.envFromSecret . }} + {{- end }} + {{- if .Values.envRenderSecret }} + - secretRef: + name: {{ include "grafana.fullname" . }}-env + {{- end }} + {{- range .Values.envFromSecrets }} + - secretRef: + name: {{ tpl .name $ }} + optional: {{ .optional | default false }} + {{- end }} + {{- range .Values.envFromConfigMaps }} + - configMapRef: + name: {{ tpl .name $ }} + optional: {{ .optional | default false }} + {{- end }} + {{- end }} + {{- with .Values.livenessProbe }} + livenessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.readinessProbe }} + readinessProbe: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.lifecycleHooks }} + lifecycle: + {{- tpl (toYaml .) $root | nindent 6 }} + {{- end }} + {{- with .Values.resources }} + resources: + {{- toYaml . | nindent 6 }} + {{- end }} +{{- with .Values.extraContainers }} + {{- tpl . $ | nindent 2 }} +{{- end }} +{{- with .Values.nodeSelector }} +nodeSelector: + {{- toYaml . | nindent 2 }} +{{- end }} +{{- with .Values.affinity }} +affinity: + {{- tpl (toYaml .) $root | nindent 2 }} +{{- end }} +{{- with .Values.topologySpreadConstraints }} +topologySpreadConstraints: + {{- toYaml . | nindent 2 }} +{{- end }} +{{- with .Values.tolerations }} +tolerations: + {{- toYaml . | nindent 2 }} +{{- end }} +volumes: + - name: config + configMap: + name: {{ include "grafana.fullname" . }} + {{- $createConfigSecret := eq (include "grafana.shouldCreateConfigSecret" .) "true" -}} + {{- if and .Values.createConfigmap $createConfigSecret }} + - name: config-secret + secret: + secretName: {{ include "grafana.fullname" . }}-config-secret + {{- end }} + {{- range .Values.extraConfigmapMounts }} + - name: {{ tpl .name $root }} + configMap: + name: {{ tpl .configMap $root }} + {{- with .items }} + items: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- end }} + {{- if .Values.dashboards }} + {{- range (keys .Values.dashboards | sortAlpha) }} + - name: dashboards-{{ . }} + configMap: + name: {{ include "grafana.fullname" $ }}-dashboards-{{ . }} + {{- end }} + {{- end }} + {{- if .Values.dashboardsConfigMaps }} + {{- range $provider, $name := .Values.dashboardsConfigMaps }} + - name: dashboards-{{ $provider }} + configMap: + name: {{ tpl $name $root }} + {{- end }} + {{- end }} + {{- if .Values.ldap.enabled }} + - name: ldap + secret: + {{- if .Values.ldap.existingSecret }} + secretName: {{ .Values.ldap.existingSecret }} + {{- else }} + secretName: {{ include "grafana.fullname" . }} + {{- end }} + items: + - key: ldap-toml + path: ldap.toml + {{- end }} + {{- if and .Values.persistence.enabled (eq .Values.persistence.type "pvc") }} + - name: storage + persistentVolumeClaim: + claimName: {{ tpl (.Values.persistence.existingClaim | default (include "grafana.fullname" .)) . }} + {{- else if and .Values.persistence.enabled (has .Values.persistence.type $sts) }} + {{/* nothing */}} + {{- else }} + - name: storage + {{- if .Values.persistence.inMemory.enabled }} + emptyDir: + medium: Memory + {{- with .Values.persistence.inMemory.sizeLimit }} + sizeLimit: {{ . }} + {{- end }} + {{- else }} + emptyDir: {} + {{- end }} + {{- end }} + {{- if .Values.sidecar.alerts.enabled }} + - name: sc-alerts-volume + emptyDir: + {{- with .Values.sidecar.alerts.sizeLimit }} + sizeLimit: {{ . }} + {{- else }} + {} + {{- end }} + {{- end }} + {{- if .Values.sidecar.dashboards.enabled }} + - name: sc-dashboard-volume + emptyDir: + {{- with .Values.sidecar.dashboards.sizeLimit }} + sizeLimit: {{ . }} + {{- else }} + {} + {{- end }} + {{- if .Values.sidecar.dashboards.SCProvider }} + - name: sc-dashboard-provider + configMap: + name: {{ include "grafana.fullname" . }}-config-dashboards + {{- end }} + {{- end }} + {{- if .Values.sidecar.datasources.enabled }} + - name: sc-datasources-volume + emptyDir: + {{- with .Values.sidecar.datasources.sizeLimit }} + sizeLimit: {{ . }} + {{- else }} + {} + {{- end }} + {{- end }} + {{- if .Values.sidecar.plugins.enabled }} + - name: sc-plugins-volume + emptyDir: + {{- with .Values.sidecar.plugins.sizeLimit }} + sizeLimit: {{ . }} + {{- else }} + {} + {{- end }} + {{- end }} + {{- if .Values.sidecar.notifiers.enabled }} + - name: sc-notifiers-volume + emptyDir: + {{- with .Values.sidecar.notifiers.sizeLimit }} + sizeLimit: {{ . }} + {{- else }} + {} + {{- end }} + {{- end }} + {{- range .Values.extraSecretMounts }} + {{- if .secretName }} + - name: {{ .name }} + secret: + secretName: {{ .secretName }} + defaultMode: {{ .defaultMode }} + {{- with .items }} + items: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- else if .projected }} + - name: {{ .name }} + projected: + {{- toYaml .projected | nindent 6 }} + {{- else if .csi }} + - name: {{ .name }} + csi: + {{- toYaml .csi | nindent 6 }} + {{- end }} + {{- end }} + {{- range .Values.extraVolumes }} + - name: {{ .name }} + {{- if .existingClaim }} + persistentVolumeClaim: + claimName: {{ .existingClaim }} + {{- else if .hostPath }} + hostPath: + {{ toYaml .hostPath | nindent 6 }} + {{- else if .csi }} + csi: + {{- toYaml .data | nindent 6 }} + {{- else if .configMap }} + configMap: + {{- toYaml .configMap | nindent 6 }} + {{- else }} + emptyDir: {} + {{- end }} + {{- end }} + {{- range .Values.extraEmptyDirMounts }} + - name: {{ .name }} + emptyDir: {} + {{- end }} + {{- with .Values.extraContainerVolumes }} + {{- tpl (toYaml .) $root | nindent 2 }} + {{- end }} +{{- end }} + diff --git a/operations/deployment/helm/grafana/templates/clusterrole.yaml b/operations/deployment/helm/grafana/templates/clusterrole.yaml new file mode 100644 index 0000000..3af4b62 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/clusterrole.yaml @@ -0,0 +1,25 @@ +{{- if and .Values.rbac.create (or (not .Values.rbac.namespaced) .Values.rbac.extraClusterRoleRules) (not .Values.rbac.useExistingClusterRole) }} +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "grafana.fullname" . }}-clusterrole +{{- if or .Values.sidecar.dashboards.enabled .Values.rbac.extraClusterRoleRules .Values.sidecar.datasources.enabled .Values.sidecar.plugins.enabled .Values.sidecar.alerts.enabled }} +rules: + {{- if or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled .Values.sidecar.plugins.enabled .Values.sidecar.alerts.enabled }} + - apiGroups: [""] # "" indicates the core API group + resources: ["configmaps", "secrets"] + verbs: ["get", "watch", "list"] + {{- end}} + {{- with .Values.rbac.extraClusterRoleRules }} + {{- toYaml . | nindent 2 }} + {{- end}} +{{- else }} +rules: [] +{{- end}} +{{- end}} diff --git a/operations/deployment/helm/grafana/templates/clusterrolebinding.yaml b/operations/deployment/helm/grafana/templates/clusterrolebinding.yaml new file mode 100644 index 0000000..bda9431 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/clusterrolebinding.yaml @@ -0,0 +1,24 @@ +{{- if and .Values.rbac.create (or (not .Values.rbac.namespaced) .Values.rbac.extraClusterRoleRules) }} +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ include "grafana.fullname" . }}-clusterrolebinding + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +subjects: + - kind: ServiceAccount + name: {{ include "grafana.serviceAccountName" . }} + namespace: {{ include "grafana.namespace" . }} +roleRef: + kind: ClusterRole + {{- if .Values.rbac.useExistingClusterRole }} + name: {{ .Values.rbac.useExistingClusterRole }} + {{- else }} + name: {{ include "grafana.fullname" . }}-clusterrole + {{- end }} + apiGroup: rbac.authorization.k8s.io +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/configSecret.yaml b/operations/deployment/helm/grafana/templates/configSecret.yaml new file mode 100644 index 0000000..f8937cc --- /dev/null +++ b/operations/deployment/helm/grafana/templates/configSecret.yaml @@ -0,0 +1,43 @@ +{{- $createConfigSecret := eq (include "grafana.shouldCreateConfigSecret" .) "true" -}} +{{- if and .Values.createConfigmap $createConfigSecret }} +{{- $files := .Files }} +{{- $root := . -}} +apiVersion: v1 +kind: Secret +metadata: + name: "{{ include "grafana.fullname" . }}-config-secret" + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +data: +{{- range $key, $value := .Values.alerting }} + {{- if (hasKey $value "secretFile") }} + {{- $key | nindent 2 }}: + {{- toYaml ( $files.Get $value.secretFile ) | b64enc | nindent 4}} + {{/* as of https://helm.sh/docs/chart_template_guide/accessing_files/ this will only work if you fork this chart and add files to it*/}} + {{- end }} +{{- end }} +stringData: +{{- range $key, $value := .Values.datasources }} +{{- if (hasKey $value "secret") }} +{{- $key | nindent 2 }}: | + {{- tpl (toYaml $value | nindent 4) $root }} +{{- end }} +{{- end }} +{{- range $key, $value := .Values.notifiers }} +{{- if (hasKey $value "secret") }} +{{- $key | nindent 2 }}: | + {{- tpl (toYaml $value | nindent 4) $root }} +{{- end }} +{{- end }} +{{- range $key, $value := .Values.alerting }} +{{ if (hasKey $value "secret") }} + {{- $key | nindent 2 }}: | + {{- tpl (toYaml $value.secret | nindent 4) $root }} + {{- end }} +{{- end }} +{{- end }} \ No newline at end of file diff --git a/operations/deployment/helm/grafana/templates/configmap-dashboard-provider.yaml b/operations/deployment/helm/grafana/templates/configmap-dashboard-provider.yaml new file mode 100644 index 0000000..1f706a8 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/configmap-dashboard-provider.yaml @@ -0,0 +1,29 @@ +{{- if and .Values.sidecar.dashboards.enabled .Values.sidecar.dashboards.SCProvider }} +apiVersion: v1 +kind: ConfigMap +metadata: + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} + name: {{ include "grafana.fullname" . }}-config-dashboards + namespace: {{ include "grafana.namespace" . }} +data: + provider.yaml: |- + apiVersion: 1 + providers: + - name: '{{ .Values.sidecar.dashboards.provider.name }}' + orgId: {{ .Values.sidecar.dashboards.provider.orgid }} + {{- if not .Values.sidecar.dashboards.provider.foldersFromFilesStructure }} + folder: '{{ .Values.sidecar.dashboards.provider.folder }}' + {{- end }} + type: {{ .Values.sidecar.dashboards.provider.type }} + disableDeletion: {{ .Values.sidecar.dashboards.provider.disableDelete }} + allowUiUpdates: {{ .Values.sidecar.dashboards.provider.allowUiUpdates }} + updateIntervalSeconds: {{ .Values.sidecar.dashboards.provider.updateIntervalSeconds | default 30 }} + options: + foldersFromFilesStructure: {{ .Values.sidecar.dashboards.provider.foldersFromFilesStructure }} + path: {{ .Values.sidecar.dashboards.folder }}{{- with .Values.sidecar.dashboards.defaultFolderName }}/{{ . }}{{- end }} +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/configmap.yaml b/operations/deployment/helm/grafana/templates/configmap.yaml new file mode 100644 index 0000000..7b837d9 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/configmap.yaml @@ -0,0 +1,144 @@ +{{- if .Values.createConfigmap }} +{{- $files := .Files }} +{{- $root := . -}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "grafana.fullname" . }} + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +data: + {{- with .Values.plugins }} + plugins: {{ join "," . }} + {{- end }} + grafana.ini: | + {{- range $elem, $elemVal := index .Values "grafana.ini" }} + {{- if not (kindIs "map" $elemVal) }} + {{- if kindIs "invalid" $elemVal }} + {{ $elem }} = + {{- else if kindIs "string" $elemVal }} + {{ $elem }} = {{ tpl $elemVal $ }} + {{- else }} + {{ $elem }} = {{ $elemVal }} + {{- end }} + {{- end }} + {{- end }} + {{- range $key, $value := index .Values "grafana.ini" }} + {{- if kindIs "map" $value }} + [{{ $key }}] + {{- range $elem, $elemVal := $value }} + {{- if kindIs "invalid" $elemVal }} + {{ $elem }} = + {{- else if kindIs "string" $elemVal }} + {{ $elem }} = {{ tpl $elemVal $ }} + {{- else }} + {{ $elem }} = {{ $elemVal }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + + {{- range $key, $value := .Values.datasources }} + {{- if not (hasKey $value "secret") }} + {{- $key | nindent 2 }}: | + {{- tpl (toYaml $value | nindent 4) $root }} + {{- end }} + {{- end }} + + {{- range $key, $value := .Values.notifiers }} + {{- if not (hasKey $value "secret") }} + {{- $key | nindent 2 }}: | + {{- toYaml $value | nindent 4 }} + {{- end }} + {{- end }} + + {{- range $key, $value := .Values.alerting }} + {{- if (hasKey $value "file") }} + {{- $key | nindent 2 }}: + {{- toYaml ( $files.Get $value.file ) | nindent 4}} + {{- else if (or (hasKey $value "secret") (hasKey $value "secretFile"))}} + {{/* will be stored inside secret generated by "configSecret.yaml"*/}} + {{- else }} + {{- $key | nindent 2 }}: | + {{- tpl (toYaml $value | nindent 4) $root }} + {{- end }} + {{- end }} + + {{- range $key, $value := .Values.dashboardProviders }} + {{- $key | nindent 2 }}: | + {{- toYaml $value | nindent 4 }} + {{- end }} + +{{- if .Values.dashboards }} + download_dashboards.sh: | + #!/usr/bin/env sh + set -euf + {{- if .Values.dashboardProviders }} + {{- range $key, $value := .Values.dashboardProviders }} + {{- range $value.providers }} + mkdir -p {{ .options.path }} + {{- end }} + {{- end }} + {{- end }} + {{ $dashboardProviders := .Values.dashboardProviders }} + {{- range $provider, $dashboards := .Values.dashboards }} + {{- range $key, $value := $dashboards }} + {{- if (or (hasKey $value "gnetId") (hasKey $value "url")) }} + curl -skf \ + --connect-timeout 60 \ + --max-time 60 \ + {{- if not $value.b64content }} + {{- if not $value.acceptHeader }} + -H "Accept: application/json" \ + {{- else }} + -H "Accept: {{ $value.acceptHeader }}" \ + {{- end }} + {{- if $value.token }} + -H "Authorization: token {{ $value.token }}" \ + {{- end }} + {{- if $value.bearerToken }} + -H "Authorization: Bearer {{ $value.bearerToken }}" \ + {{- end }} + {{- if $value.basic }} + -H "Authorization: Basic {{ $value.basic }}" \ + {{- end }} + {{- if $value.gitlabToken }} + -H "PRIVATE-TOKEN: {{ $value.gitlabToken }}" \ + {{- end }} + -H "Content-Type: application/json;charset=UTF-8" \ + {{- end }} + {{- $dpPath := "" -}} + {{- range $kd := (index $dashboardProviders "dashboardproviders.yaml").providers }} + {{- if eq $kd.name $provider }} + {{- $dpPath = $kd.options.path }} + {{- end }} + {{- end }} + {{- if $value.url }} + "{{ $value.url }}" \ + {{- else }} + "https://grafana.com/api/dashboards/{{ $value.gnetId }}/revisions/{{- if $value.revision -}}{{ $value.revision }}{{- else -}}1{{- end -}}/download" \ + {{- end }} + {{- if $value.datasource }} + {{- if kindIs "string" $value.datasource }} + | sed '/-- .* --/! s/"datasource":.*,/"datasource": "{{ $value.datasource }}",/g' \ + {{- end }} + {{- if kindIs "slice" $value.datasource }} + {{- range $value.datasource }} + | sed '/-- .* --/! s/${{"{"}}{{ .name }}}/{{ .value }}/g' \ + {{- end }} + {{- end }} + {{- end }} + {{- if $value.b64content }} + | base64 -d \ + {{- end }} + > "{{- if $dpPath -}}{{ $dpPath }}{{- else -}}/var/lib/grafana/dashboards/{{ $provider }}{{- end -}}/{{ $key }}.json" + {{ end }} + {{- end }} + {{- end }} +{{- end }} +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/dashboards-json-configmap.yaml b/operations/deployment/helm/grafana/templates/dashboards-json-configmap.yaml new file mode 100644 index 0000000..b96ce72 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/dashboards-json-configmap.yaml @@ -0,0 +1,38 @@ +{{- if .Values.dashboards }} +{{ $files := .Files }} +{{- range $provider, $dashboards := .Values.dashboards }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "grafana.fullname" $ }}-dashboards-{{ $provider }} + namespace: {{ include "grafana.namespace" $ }} + labels: + {{- include "grafana.labels" $ | nindent 4 }} + dashboard-provider: {{ $provider }} + {{- if $.Values.sidecar.dashboards.enabled }} + {{ $.Values.sidecar.dashboards.label }}: {{ $.Values.sidecar.dashboards.labelValue | quote }} + {{- end }} +{{- if $dashboards }} +data: +{{- $dashboardFound := false }} +{{- range $key, $value := $dashboards }} +{{- if (or (hasKey $value "json") (hasKey $value "file")) }} +{{- $dashboardFound = true }} + {{- print $key | nindent 2 }}.json: + {{- if hasKey $value "json" }} + |- + {{- $value.json | nindent 6 }} + {{- end }} + {{- if hasKey $value "file" }} + {{- toYaml ( $files.Get $value.file ) | nindent 4}} + {{- end }} +{{- end }} +{{- end }} +{{- if not $dashboardFound }} + {} +{{- end }} +{{- end }} +--- +{{- end }} + +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/deployment.yaml b/operations/deployment/helm/grafana/templates/deployment.yaml new file mode 100644 index 0000000..bfa26bb --- /dev/null +++ b/operations/deployment/helm/grafana/templates/deployment.yaml @@ -0,0 +1,51 @@ +{{- if (and (not .Values.useStatefulSet) (or (not .Values.persistence.enabled) (eq .Values.persistence.type "pvc"))) }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "grafana.fullname" . }} + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if and (not .Values.autoscaling.enabled) (.Values.replicas) }} + replicas: {{ .Values.replicas }} + {{- end }} + revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} + selector: + matchLabels: + {{- include "grafana.selectorLabels" . | nindent 6 }} + {{- with .Values.deploymentStrategy }} + strategy: + {{- toYaml . | trim | nindent 4 }} + {{- end }} + template: + metadata: + labels: + {{- include "grafana.selectorLabels" . | nindent 8 }} + {{- with .Values.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/dashboards-json-config: {{ include (print $.Template.BasePath "/dashboards-json-configmap.yaml") . | sha256sum }} + checksum/sc-dashboard-provider-config: {{ include (print $.Template.BasePath "/configmap-dashboard-provider.yaml") . | sha256sum }} + {{- if and (or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret))) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.envRenderSecret }} + checksum/secret-env: {{ include (print $.Template.BasePath "/secret-env.yaml") . | sha256sum }} + {{- end }} + kubectl.kubernetes.io/default-container: {{ .Chart.Name }} + {{- with .Values.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- include "grafana.pod" . | nindent 6 }} +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/extra-manifests.yaml b/operations/deployment/helm/grafana/templates/extra-manifests.yaml new file mode 100644 index 0000000..a9bb3b6 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/extra-manifests.yaml @@ -0,0 +1,4 @@ +{{ range .Values.extraObjects }} +--- +{{ tpl (toYaml .) $ }} +{{ end }} diff --git a/operations/deployment/helm/grafana/templates/headless-service.yaml b/operations/deployment/helm/grafana/templates/headless-service.yaml new file mode 100644 index 0000000..3028589 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/headless-service.yaml @@ -0,0 +1,22 @@ +{{- $sts := list "sts" "StatefulSet" "statefulset" -}} +{{- if or .Values.headlessService (and .Values.persistence.enabled (not .Values.persistence.existingClaim) (has .Values.persistence.type $sts)) }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "grafana.fullname" . }}-headless + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + clusterIP: None + selector: + {{- include "grafana.selectorLabels" . | nindent 4 }} + type: ClusterIP + ports: + - name: {{ .Values.gossipPortName }}-tcp + port: 9094 +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/hpa.yaml b/operations/deployment/helm/grafana/templates/hpa.yaml new file mode 100644 index 0000000..46bbcb4 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/hpa.yaml @@ -0,0 +1,52 @@ +{{- $sts := list "sts" "StatefulSet" "statefulset" -}} +{{- if .Values.autoscaling.enabled }} +apiVersion: {{ include "grafana.hpa.apiVersion" . }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "grafana.fullname" . }} + namespace: {{ include "grafana.namespace" . }} + labels: + app.kubernetes.io/name: {{ include "grafana.name" . }} + helm.sh/chart: {{ include "grafana.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + {{- if has .Values.persistence.type $sts }} + kind: StatefulSet + {{- else }} + kind: Deployment + {{- end }} + name: {{ include "grafana.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetMemory }} + - type: Resource + resource: + name: memory + {{- if eq (include "grafana.hpa.apiVersion" .) "autoscaling/v2beta1" }} + targetAverageUtilization: {{ .Values.autoscaling.targetMemory }} + {{- else }} + target: + type: Utilization + averageUtilization: {{ .Values.autoscaling.targetMemory }} + {{- end }} + {{- end }} + {{- if .Values.autoscaling.targetCPU }} + - type: Resource + resource: + name: cpu + {{- if eq (include "grafana.hpa.apiVersion" .) "autoscaling/v2beta1" }} + targetAverageUtilization: {{ .Values.autoscaling.targetCPU }} + {{- else }} + target: + type: Utilization + averageUtilization: {{ .Values.autoscaling.targetCPU }} + {{- end }} + {{- end }} + {{- if .Values.autoscaling.behavior }} + behavior: {{ toYaml .Values.autoscaling.behavior | nindent 4 }} + {{- end }} +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/image-renderer-deployment.yaml b/operations/deployment/helm/grafana/templates/image-renderer-deployment.yaml new file mode 100644 index 0000000..ea97969 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/image-renderer-deployment.yaml @@ -0,0 +1,131 @@ +{{ if .Values.imageRenderer.enabled }} +{{- $root := . -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "grafana.fullname" . }}-image-renderer + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.imageRenderer.labels" . | nindent 4 }} + {{- with .Values.imageRenderer.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.imageRenderer.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if and (not .Values.imageRenderer.autoscaling.enabled) (.Values.imageRenderer.replicas) }} + replicas: {{ .Values.imageRenderer.replicas }} + {{- end }} + revisionHistoryLimit: {{ .Values.imageRenderer.revisionHistoryLimit }} + selector: + matchLabels: + {{- include "grafana.imageRenderer.selectorLabels" . | nindent 6 }} + + {{- with .Values.imageRenderer.deploymentStrategy }} + strategy: + {{- toYaml . | trim | nindent 4 }} + {{- end }} + template: + metadata: + labels: + {{- include "grafana.imageRenderer.selectorLabels" . | nindent 8 }} + {{- with .Values.imageRenderer.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- with .Values.imageRenderer.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- with .Values.imageRenderer.schedulerName }} + schedulerName: "{{ . }}" + {{- end }} + {{- with .Values.imageRenderer.serviceAccountName }} + serviceAccountName: "{{ . }}" + {{- end }} + {{- with .Values.imageRenderer.securityContext }} + securityContext: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.imageRenderer.hostAliases }} + hostAliases: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.imageRenderer.priorityClassName }} + priorityClassName: {{ . }} + {{- end }} + {{- with .Values.imageRenderer.image.pullSecrets }} + imagePullSecrets: + {{- range . }} + - name: {{ tpl . $root }} + {{- end}} + {{- end }} + containers: + - name: {{ .Chart.Name }}-image-renderer + {{- $registry := .Values.global.imageRegistry | default .Values.imageRenderer.image.registry -}} + {{- if .Values.imageRenderer.image.sha }} + image: "{{ $registry }}/{{ .Values.imageRenderer.image.repository }}:{{ .Values.imageRenderer.image.tag }}@sha256:{{ .Values.imageRenderer.image.sha }}" + {{- else }} + image: "{{ $registry }}/{{ .Values.imageRenderer.image.repository }}:{{ .Values.imageRenderer.image.tag }}" + {{- end }} + imagePullPolicy: {{ .Values.imageRenderer.image.pullPolicy }} + {{- if .Values.imageRenderer.command }} + command: + {{- range .Values.imageRenderer.command }} + - {{ . }} + {{- end }} + {{- end}} + ports: + - name: {{ .Values.imageRenderer.service.portName }} + containerPort: {{ .Values.imageRenderer.service.targetPort }} + protocol: TCP + livenessProbe: + httpGet: + path: / + port: {{ .Values.imageRenderer.service.portName }} + env: + - name: HTTP_PORT + value: {{ .Values.imageRenderer.service.targetPort | quote }} + {{- if .Values.imageRenderer.serviceMonitor.enabled }} + - name: ENABLE_METRICS + value: "true" + {{- end }} + {{- range $key, $value := .Values.imageRenderer.envValueFrom }} + - name: {{ $key | quote }} + valueFrom: + {{- tpl (toYaml $value) $ | nindent 16 }} + {{- end }} + {{- range $key, $value := .Values.imageRenderer.env }} + - name: {{ $key | quote }} + value: {{ $value | quote }} + {{- end }} + {{- with .Values.imageRenderer.containerSecurityContext }} + securityContext: + {{- toYaml . | nindent 12 }} + {{- end }} + volumeMounts: + - mountPath: /tmp + name: image-renderer-tmpfs + {{- with .Values.imageRenderer.resources }} + resources: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- with .Values.imageRenderer.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.imageRenderer.affinity }} + affinity: + {{- tpl (toYaml .) $root | nindent 8 }} + {{- end }} + {{- with .Values.imageRenderer.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + volumes: + - name: image-renderer-tmpfs + emptyDir: {} +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/image-renderer-hpa.yaml b/operations/deployment/helm/grafana/templates/image-renderer-hpa.yaml new file mode 100644 index 0000000..b0f0059 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/image-renderer-hpa.yaml @@ -0,0 +1,47 @@ +{{- if and .Values.imageRenderer.enabled .Values.imageRenderer.autoscaling.enabled }} +apiVersion: {{ include "grafana.hpa.apiVersion" . }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "grafana.fullname" . }}-image-renderer + namespace: {{ include "grafana.namespace" . }} + labels: + app.kubernetes.io/name: {{ include "grafana.name" . }}-image-renderer + helm.sh/chart: {{ include "grafana.chart" . }} + app.kubernetes.io/managed-by: {{ .Release.Service }} + app.kubernetes.io/instance: {{ .Release.Name }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "grafana.fullname" . }}-image-renderer + minReplicas: {{ .Values.imageRenderer.autoscaling.minReplicas }} + maxReplicas: {{ .Values.imageRenderer.autoscaling.maxReplicas }} + metrics: + {{- if .Values.imageRenderer.autoscaling.targetMemory }} + - type: Resource + resource: + name: memory + {{- if eq (include "grafana.hpa.apiVersion" .) "autoscaling/v2beta1" }} + targetAverageUtilization: {{ .Values.imageRenderer.autoscaling.targetMemory }} + {{- else }} + target: + type: Utilization + averageUtilization: {{ .Values.imageRenderer.autoscaling.targetMemory }} + {{- end }} + {{- end }} + {{- if .Values.imageRenderer.autoscaling.targetCPU }} + - type: Resource + resource: + name: cpu + {{- if eq (include "grafana.hpa.apiVersion" .) "autoscaling/v2beta1" }} + targetAverageUtilization: {{ .Values.imageRenderer.autoscaling.targetCPU }} + {{- else }} + target: + type: Utilization + averageUtilization: {{ .Values.imageRenderer.autoscaling.targetCPU }} + {{- end }} + {{- end }} + {{- if .Values.imageRenderer.autoscaling.behavior }} + behavior: {{ toYaml .Values.imageRenderer.autoscaling.behavior | nindent 4 }} + {{- end }} +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/image-renderer-network-policy.yaml b/operations/deployment/helm/grafana/templates/image-renderer-network-policy.yaml new file mode 100644 index 0000000..d1a0eb3 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/image-renderer-network-policy.yaml @@ -0,0 +1,79 @@ +{{- if and .Values.imageRenderer.enabled .Values.imageRenderer.networkPolicy.limitIngress }} +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ include "grafana.fullname" . }}-image-renderer-ingress + namespace: {{ include "grafana.namespace" . }} + annotations: + comment: Limit image-renderer ingress traffic from grafana +spec: + podSelector: + matchLabels: + {{- include "grafana.imageRenderer.selectorLabels" . | nindent 6 }} + {{- with .Values.imageRenderer.podLabels }} + {{- toYaml . | nindent 6 }} + {{- end }} + + policyTypes: + - Ingress + ingress: + - ports: + - port: {{ .Values.imageRenderer.service.targetPort }} + protocol: TCP + from: + - namespaceSelector: + matchLabels: + kubernetes.io/metadata.name: {{ include "grafana.namespace" . }} + podSelector: + matchLabels: + {{- include "grafana.selectorLabels" . | nindent 14 }} + {{- with .Values.podLabels }} + {{- toYaml . | nindent 14 }} + {{- end }} + {{- with .Values.imageRenderer.networkPolicy.extraIngressSelectors -}} + {{ toYaml . | nindent 8 }} + {{- end }} +{{- end }} + +{{- if and .Values.imageRenderer.enabled .Values.imageRenderer.networkPolicy.limitEgress }} +--- +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ include "grafana.fullname" . }}-image-renderer-egress + namespace: {{ include "grafana.namespace" . }} + annotations: + comment: Limit image-renderer egress traffic to grafana +spec: + podSelector: + matchLabels: + {{- include "grafana.imageRenderer.selectorLabels" . | nindent 6 }} + {{- with .Values.imageRenderer.podLabels }} + {{- toYaml . | nindent 6 }} + {{- end }} + + policyTypes: + - Egress + egress: + # allow dns resolution + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + # talk only to grafana + - ports: + - port: {{ .Values.service.targetPort }} + protocol: TCP + to: + - namespaceSelector: + matchLabels: + name: {{ include "grafana.namespace" . }} + podSelector: + matchLabels: + {{- include "grafana.selectorLabels" . | nindent 14 }} + {{- with .Values.podLabels }} + {{- toYaml . | nindent 14 }} + {{- end }} +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/image-renderer-service.yaml b/operations/deployment/helm/grafana/templates/image-renderer-service.yaml new file mode 100644 index 0000000..f8da127 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/image-renderer-service.yaml @@ -0,0 +1,31 @@ +{{- if and .Values.imageRenderer.enabled .Values.imageRenderer.service.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "grafana.fullname" . }}-image-renderer + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.imageRenderer.labels" . | nindent 4 }} + {{- with .Values.imageRenderer.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.imageRenderer.service.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + {{- with .Values.imageRenderer.service.clusterIP }} + clusterIP: {{ . }} + {{- end }} + ports: + - name: {{ .Values.imageRenderer.service.portName }} + port: {{ .Values.imageRenderer.service.port }} + protocol: TCP + targetPort: {{ .Values.imageRenderer.service.targetPort }} + {{- with .Values.imageRenderer.appProtocol }} + appProtocol: {{ . }} + {{- end }} + selector: + {{- include "grafana.imageRenderer.selectorLabels" . | nindent 4 }} +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/image-renderer-servicemonitor.yaml b/operations/deployment/helm/grafana/templates/image-renderer-servicemonitor.yaml new file mode 100644 index 0000000..5d9f09d --- /dev/null +++ b/operations/deployment/helm/grafana/templates/image-renderer-servicemonitor.yaml @@ -0,0 +1,48 @@ +{{- if .Values.imageRenderer.serviceMonitor.enabled }} +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "grafana.fullname" . }}-image-renderer + {{- if .Values.imageRenderer.serviceMonitor.namespace }} + namespace: {{ tpl .Values.imageRenderer.serviceMonitor.namespace . }} + {{- else }} + namespace: {{ include "grafana.namespace" . }} + {{- end }} + labels: + {{- include "grafana.imageRenderer.labels" . | nindent 4 }} + {{- with .Values.imageRenderer.serviceMonitor.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + endpoints: + - port: {{ .Values.imageRenderer.service.portName }} + {{- with .Values.imageRenderer.serviceMonitor.interval }} + interval: {{ . }} + {{- end }} + {{- with .Values.imageRenderer.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ . }} + {{- end }} + honorLabels: true + path: {{ .Values.imageRenderer.serviceMonitor.path }} + scheme: {{ .Values.imageRenderer.serviceMonitor.scheme }} + {{- with .Values.imageRenderer.serviceMonitor.tlsConfig }} + tlsConfig: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.imageRenderer.serviceMonitor.relabelings }} + relabelings: + {{- toYaml . | nindent 6 }} + {{- end }} + jobLabel: "{{ .Release.Name }}-image-renderer" + selector: + matchLabels: + {{- include "grafana.imageRenderer.selectorLabels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ include "grafana.namespace" . }} + {{- with .Values.imageRenderer.serviceMonitor.targetLabels }} + targetLabels: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/ingress.yaml b/operations/deployment/helm/grafana/templates/ingress.yaml new file mode 100644 index 0000000..063cdfa --- /dev/null +++ b/operations/deployment/helm/grafana/templates/ingress.yaml @@ -0,0 +1,78 @@ +{{- if .Values.ingress.enabled -}} +{{- $ingressApiIsStable := eq (include "grafana.ingress.isStable" .) "true" -}} +{{- $ingressSupportsIngressClassName := eq (include "grafana.ingress.supportsIngressClassName" .) "true" -}} +{{- $ingressSupportsPathType := eq (include "grafana.ingress.supportsPathType" .) "true" -}} +{{- $fullName := include "grafana.fullname" . -}} +{{- $servicePort := .Values.service.port -}} +{{- $ingressPath := .Values.ingress.path -}} +{{- $ingressPathType := .Values.ingress.pathType -}} +{{- $extraPaths := .Values.ingress.extraPaths -}} +apiVersion: {{ include "grafana.ingress.apiVersion" . }} +kind: Ingress +metadata: + name: {{ $fullName }} + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.ingress.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.ingress.annotations }} + annotations: + {{- range $key, $value := . }} + {{ $key }}: {{ tpl $value $ | quote }} + {{- end }} + {{- end }} +spec: + {{- if and $ingressSupportsIngressClassName .Values.ingress.ingressClassName }} + ingressClassName: {{ .Values.ingress.ingressClassName }} + {{- end -}} + {{- with .Values.ingress.tls }} + tls: + {{- tpl (toYaml .) $ | nindent 4 }} + {{- end }} + rules: + {{- if .Values.ingress.hosts }} + {{- range .Values.ingress.hosts }} + - host: {{ tpl . $ }} + http: + paths: + {{- with $extraPaths }} + {{- toYaml . | nindent 10 }} + {{- end }} + - path: {{ $ingressPath }} + {{- if $ingressSupportsPathType }} + pathType: {{ $ingressPathType }} + {{- end }} + backend: + {{- if $ingressApiIsStable }} + service: + name: {{ $fullName }} + port: + number: {{ $servicePort }} + {{- else }} + serviceName: {{ $fullName }} + servicePort: {{ $servicePort }} + {{- end }} + {{- end }} + {{- else }} + - http: + paths: + - backend: + {{- if $ingressApiIsStable }} + service: + name: {{ $fullName }} + port: + number: {{ $servicePort }} + {{- else }} + serviceName: {{ $fullName }} + servicePort: {{ $servicePort }} + {{- end }} + {{- with $ingressPath }} + path: {{ . }} + {{- end }} + {{- if $ingressSupportsPathType }} + pathType: {{ $ingressPathType }} + {{- end }} + {{- end -}} +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/networkpolicy.yaml b/operations/deployment/helm/grafana/templates/networkpolicy.yaml new file mode 100644 index 0000000..4cd3ed6 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/networkpolicy.yaml @@ -0,0 +1,61 @@ +{{- if .Values.networkPolicy.enabled }} +apiVersion: networking.k8s.io/v1 +kind: NetworkPolicy +metadata: + name: {{ include "grafana.fullname" . }} + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + policyTypes: + {{- if .Values.networkPolicy.ingress }} + - Ingress + {{- end }} + {{- if .Values.networkPolicy.egress.enabled }} + - Egress + {{- end }} + podSelector: + matchLabels: + {{- include "grafana.selectorLabels" . | nindent 6 }} + + {{- if .Values.networkPolicy.egress.enabled }} + egress: + {{- if not .Values.networkPolicy.egress.blockDNSResolution }} + - ports: + - port: 53 + protocol: UDP + {{- end }} + - ports: + {{ .Values.networkPolicy.egress.ports | toJson }} + {{- with .Values.networkPolicy.egress.to }} + to: + {{- toYaml . | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.networkPolicy.ingress }} + ingress: + - ports: + - port: {{ .Values.service.targetPort }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ include "grafana.fullname" . }}-client: "true" + {{- with .Values.networkPolicy.explicitNamespacesSelector }} + - namespaceSelector: + {{- toYaml . | nindent 12 }} + {{- end }} + - podSelector: + matchLabels: + {{- include "grafana.labels" . | nindent 14 }} + role: read + {{- end }} + {{- end }} +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/poddisruptionbudget.yaml b/operations/deployment/helm/grafana/templates/poddisruptionbudget.yaml new file mode 100644 index 0000000..0525121 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/poddisruptionbudget.yaml @@ -0,0 +1,22 @@ +{{- if .Values.podDisruptionBudget }} +apiVersion: {{ include "grafana.podDisruptionBudget.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ include "grafana.fullname" . }} + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- with .Values.podDisruptionBudget.minAvailable }} + minAvailable: {{ . }} + {{- end }} + {{- with .Values.podDisruptionBudget.maxUnavailable }} + maxUnavailable: {{ . }} + {{- end }} + selector: + matchLabels: + {{- include "grafana.selectorLabels" . | nindent 6 }} +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/podsecuritypolicy.yaml b/operations/deployment/helm/grafana/templates/podsecuritypolicy.yaml new file mode 100644 index 0000000..eed7af9 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/podsecuritypolicy.yaml @@ -0,0 +1,49 @@ +{{- if and .Values.rbac.pspEnabled (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ include "grafana.fullname" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + annotations: + seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default' + seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' + {{- if .Values.rbac.pspUseAppArmor }} + apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default' + apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' + {{- end }} +spec: + privileged: false + allowPrivilegeEscalation: false + requiredDropCapabilities: + # Default set from Docker, with DAC_OVERRIDE and CHOWN + - ALL + volumes: + - 'configMap' + - 'emptyDir' + - 'projected' + - 'csi' + - 'secret' + - 'downwardAPI' + - 'persistentVolumeClaim' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'RunAsAny' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + # Forbid adding the root group. + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/pvc.yaml b/operations/deployment/helm/grafana/templates/pvc.yaml new file mode 100644 index 0000000..eb8f87f --- /dev/null +++ b/operations/deployment/helm/grafana/templates/pvc.yaml @@ -0,0 +1,36 @@ +{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) (eq .Values.persistence.type "pvc")}} +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: {{ include "grafana.fullname" . }} + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.persistence.extraPvcLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.persistence.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.persistence.finalizers }} + finalizers: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{- with .Values.persistence.storageClassName }} + storageClassName: {{ . }} + {{- end }} + {{- with .Values.persistence.selectorLabels }} + selector: + matchLabels: + {{- toYaml . | nindent 6 }} + {{- end }} +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/role.yaml b/operations/deployment/helm/grafana/templates/role.yaml new file mode 100644 index 0000000..4b5edd9 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/role.yaml @@ -0,0 +1,32 @@ +{{- if and .Values.rbac.create (not .Values.rbac.useExistingRole) -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "grafana.fullname" . }} + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- if or .Values.rbac.pspEnabled (and .Values.rbac.namespaced (or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled .Values.sidecar.plugins.enabled .Values.rbac.extraRoleRules)) }} +rules: + {{- if and .Values.rbac.pspEnabled (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }} + - apiGroups: ['extensions'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: [{{ include "grafana.fullname" . }}] + {{- end }} + {{- if and .Values.rbac.namespaced (or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled .Values.sidecar.plugins.enabled) }} + - apiGroups: [""] # "" indicates the core API group + resources: ["configmaps", "secrets"] + verbs: ["get", "watch", "list"] + {{- end }} + {{- with .Values.rbac.extraRoleRules }} + {{- toYaml . | nindent 2 }} + {{- end}} +{{- else }} +rules: [] +{{- end }} +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/rolebinding.yaml b/operations/deployment/helm/grafana/templates/rolebinding.yaml new file mode 100644 index 0000000..58f77c6 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/rolebinding.yaml @@ -0,0 +1,25 @@ +{{- if .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "grafana.fullname" . }} + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + {{- if .Values.rbac.useExistingRole }} + name: {{ .Values.rbac.useExistingRole }} + {{- else }} + name: {{ include "grafana.fullname" . }} + {{- end }} +subjects: +- kind: ServiceAccount + name: {{ include "grafana.serviceAccountName" . }} + namespace: {{ include "grafana.namespace" . }} +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/secret-env.yaml b/operations/deployment/helm/grafana/templates/secret-env.yaml new file mode 100644 index 0000000..eb14aac --- /dev/null +++ b/operations/deployment/helm/grafana/templates/secret-env.yaml @@ -0,0 +1,14 @@ +{{- if .Values.envRenderSecret }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "grafana.fullname" . }}-env + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} +type: Opaque +data: +{{- range $key, $val := .Values.envRenderSecret }} + {{ $key }}: {{ tpl ($val | toString) $ | b64enc | quote }} +{{- end }} +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/secret.yaml b/operations/deployment/helm/grafana/templates/secret.yaml new file mode 100644 index 0000000..5cbd527 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/secret.yaml @@ -0,0 +1,26 @@ +{{- if or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret)) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "grafana.fullname" . }} + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +type: Opaque +data: + {{- if and (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) }} + admin-user: {{ .Values.adminUser | b64enc | quote }} + {{- if .Values.adminPassword }} + admin-password: {{ .Values.adminPassword | b64enc | quote }} + {{- else }} + admin-password: {{ include "grafana.password" . }} + {{- end }} + {{- end }} + {{- if not .Values.ldap.existingSecret }} + ldap-toml: {{ tpl .Values.ldap.config $ | b64enc | quote }} + {{- end }} +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/service.yaml b/operations/deployment/helm/grafana/templates/service.yaml new file mode 100644 index 0000000..9102c1e --- /dev/null +++ b/operations/deployment/helm/grafana/templates/service.yaml @@ -0,0 +1,58 @@ +{{- if .Values.service.enabled }} +{{- $root := . }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "grafana.fullname" . }} + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.service.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.service.annotations }} + annotations: + {{- tpl (toYaml . | nindent 4) $root }} + {{- end }} +spec: + {{- if (or (eq .Values.service.type "ClusterIP") (empty .Values.service.type)) }} + type: ClusterIP + {{- with .Values.service.clusterIP }} + clusterIP: {{ . }} + {{- end }} + {{- else if eq .Values.service.type "LoadBalancer" }} + type: {{ .Values.service.type }} + {{- with .Values.service.loadBalancerIP }} + loadBalancerIP: {{ . }} + {{- end }} + {{- with .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- else }} + type: {{ .Values.service.type }} + {{- end }} + {{- with .Values.service.externalIPs }} + externalIPs: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.service.externalTrafficPolicy }} + externalTrafficPolicy: {{ . }} + {{- end }} + ports: + - name: {{ .Values.service.portName }} + port: {{ .Values.service.port }} + protocol: TCP + targetPort: {{ .Values.service.targetPort }} + {{- with .Values.service.appProtocol }} + appProtocol: {{ . }} + {{- end }} + {{- if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort))) }} + nodePort: {{ .Values.service.nodePort }} + {{- end }} + {{- with .Values.extraExposePorts }} + {{- tpl (toYaml . | nindent 4) $root }} + {{- end }} + selector: + {{- include "grafana.selectorLabels" . | nindent 4 }} +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/serviceaccount.yaml b/operations/deployment/helm/grafana/templates/serviceaccount.yaml new file mode 100644 index 0000000..784e71b --- /dev/null +++ b/operations/deployment/helm/grafana/templates/serviceaccount.yaml @@ -0,0 +1,17 @@ +{{- if .Values.serviceAccount.create }} +{{- $root := . -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- tpl (toYaml . | nindent 4) $root }} + {{- end }} + name: {{ include "grafana.serviceAccountName" . }} + namespace: {{ include "grafana.namespace" . }} +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/servicemonitor.yaml b/operations/deployment/helm/grafana/templates/servicemonitor.yaml new file mode 100644 index 0000000..0359013 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/servicemonitor.yaml @@ -0,0 +1,52 @@ +{{- if .Values.serviceMonitor.enabled }} +--- +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "grafana.fullname" . }} + {{- if .Values.serviceMonitor.namespace }} + namespace: {{ tpl .Values.serviceMonitor.namespace . }} + {{- else }} + namespace: {{ include "grafana.namespace" . }} + {{- end }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.serviceMonitor.labels }} + {{- tpl (toYaml . | nindent 4) $ }} + {{- end }} +spec: + endpoints: + - port: {{ .Values.service.portName }} + {{- with .Values.serviceMonitor.interval }} + interval: {{ . }} + {{- end }} + {{- with .Values.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ . }} + {{- end }} + honorLabels: true + path: {{ .Values.serviceMonitor.path }} + scheme: {{ .Values.serviceMonitor.scheme }} + {{- with .Values.serviceMonitor.tlsConfig }} + tlsConfig: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.serviceMonitor.relabelings }} + relabelings: + {{- toYaml . | nindent 6 }} + {{- end }} + {{- with .Values.serviceMonitor.metricRelabelings }} + metricRelabelings: + {{- toYaml . | nindent 6 }} + {{- end }} + jobLabel: "{{ .Release.Name }}" + selector: + matchLabels: + {{- include "grafana.selectorLabels" . | nindent 6 }} + namespaceSelector: + matchNames: + - {{ include "grafana.namespace" . }} + {{- with .Values.serviceMonitor.targetLabels }} + targetLabels: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/statefulset.yaml b/operations/deployment/helm/grafana/templates/statefulset.yaml new file mode 100644 index 0000000..e6c944a --- /dev/null +++ b/operations/deployment/helm/grafana/templates/statefulset.yaml @@ -0,0 +1,56 @@ +{{- $sts := list "sts" "StatefulSet" "statefulset" -}} +{{- if (or (.Values.useStatefulSet) (and .Values.persistence.enabled (not .Values.persistence.existingClaim) (has .Values.persistence.type $sts)))}} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "grafana.fullname" . }} + namespace: {{ include "grafana.namespace" . }} + labels: + {{- include "grafana.labels" . | nindent 4 }} + {{- with .Values.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + replicas: {{ .Values.replicas }} + selector: + matchLabels: + {{- include "grafana.selectorLabels" . | nindent 6 }} + serviceName: {{ include "grafana.fullname" . }}-headless + template: + metadata: + labels: + {{- include "grafana.selectorLabels" . | nindent 8 }} + {{- with .Values.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + annotations: + checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/dashboards-json-config: {{ include (print $.Template.BasePath "/dashboards-json-configmap.yaml") . | sha256sum }} + checksum/sc-dashboard-provider-config: {{ include (print $.Template.BasePath "/configmap-dashboard-provider.yaml") . | sha256sum }} + {{- if and (or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret))) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- end }} + kubectl.kubernetes.io/default-container: {{ .Chart.Name }} + {{- with .Values.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- include "grafana.pod" . | nindent 6 }} + {{- if .Values.persistence.enabled}} + volumeClaimTemplates: + - metadata: + name: storage + spec: + accessModes: {{ .Values.persistence.accessModes }} + storageClassName: {{ .Values.persistence.storageClassName }} + resources: + requests: + storage: {{ .Values.persistence.size }} + {{- with .Values.persistence.selectorLabels }} + selector: + matchLabels: + {{- toYaml . | nindent 10 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/tests/test-configmap.yaml b/operations/deployment/helm/grafana/templates/tests/test-configmap.yaml new file mode 100644 index 0000000..01c96c9 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/tests/test-configmap.yaml @@ -0,0 +1,20 @@ +{{- if .Values.testFramework.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "grafana.fullname" . }}-test + namespace: {{ include "grafana.namespace" . }} + annotations: + "helm.sh/hook": test-success + "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" + labels: + {{- include "grafana.labels" . | nindent 4 }} +data: + run.sh: |- + @test "Test Health" { + url="http://{{ include "grafana.fullname" . }}/api/health" + + code=$(wget --server-response --spider --timeout 90 --tries 10 ${url} 2>&1 | awk '/^ HTTP/{print $2}') + [ "$code" == "200" ] + } +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/tests/test-podsecuritypolicy.yaml b/operations/deployment/helm/grafana/templates/tests/test-podsecuritypolicy.yaml new file mode 100644 index 0000000..1821772 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/tests/test-podsecuritypolicy.yaml @@ -0,0 +1,32 @@ +{{- if and (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") .Values.testFramework.enabled .Values.rbac.pspEnabled }} +apiVersion: policy/v1beta1 +kind: PodSecurityPolicy +metadata: + name: {{ include "grafana.fullname" . }}-test + annotations: + "helm.sh/hook": test-success + "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" + labels: + {{- include "grafana.labels" . | nindent 4 }} +spec: + allowPrivilegeEscalation: true + privileged: false + hostNetwork: false + hostIPC: false + hostPID: false + fsGroup: + rule: RunAsAny + seLinux: + rule: RunAsAny + supplementalGroups: + rule: RunAsAny + runAsUser: + rule: RunAsAny + volumes: + - configMap + - downwardAPI + - emptyDir + - projected + - csi + - secret +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/tests/test-role.yaml b/operations/deployment/helm/grafana/templates/tests/test-role.yaml new file mode 100644 index 0000000..cb4c782 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/tests/test-role.yaml @@ -0,0 +1,17 @@ +{{- if and (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") .Values.testFramework.enabled .Values.rbac.pspEnabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ include "grafana.fullname" . }}-test + namespace: {{ include "grafana.namespace" . }} + annotations: + "helm.sh/hook": test-success + "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" + labels: + {{- include "grafana.labels" . | nindent 4 }} +rules: + - apiGroups: ['policy'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: [{{ include "grafana.fullname" . }}-test] +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/tests/test-rolebinding.yaml b/operations/deployment/helm/grafana/templates/tests/test-rolebinding.yaml new file mode 100644 index 0000000..f40d791 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/tests/test-rolebinding.yaml @@ -0,0 +1,20 @@ +{{- if and (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") .Values.testFramework.enabled .Values.rbac.pspEnabled }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ include "grafana.fullname" . }}-test + namespace: {{ include "grafana.namespace" . }} + annotations: + "helm.sh/hook": test-success + "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" + labels: + {{- include "grafana.labels" . | nindent 4 }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ include "grafana.fullname" . }}-test +subjects: + - kind: ServiceAccount + name: {{ include "grafana.serviceAccountNameTest" . }} + namespace: {{ include "grafana.namespace" . }} +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/tests/test-serviceaccount.yaml b/operations/deployment/helm/grafana/templates/tests/test-serviceaccount.yaml new file mode 100644 index 0000000..38fba35 --- /dev/null +++ b/operations/deployment/helm/grafana/templates/tests/test-serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if and .Values.testFramework.enabled .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "grafana.labels" . | nindent 4 }} + name: {{ include "grafana.serviceAccountNameTest" . }} + namespace: {{ include "grafana.namespace" . }} + annotations: + "helm.sh/hook": test-success + "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" +{{- end }} diff --git a/operations/deployment/helm/grafana/templates/tests/test.yaml b/operations/deployment/helm/grafana/templates/tests/test.yaml new file mode 100644 index 0000000..15067ae --- /dev/null +++ b/operations/deployment/helm/grafana/templates/tests/test.yaml @@ -0,0 +1,49 @@ +{{- if .Values.testFramework.enabled }} +{{- $root := . }} +apiVersion: v1 +kind: Pod +metadata: + name: {{ include "grafana.fullname" . }}-test + labels: + {{- include "grafana.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test-success + "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" + namespace: {{ include "grafana.namespace" . }} +spec: + serviceAccountName: {{ include "grafana.serviceAccountNameTest" . }} + {{- with .Values.testFramework.securityContext }} + securityContext: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- if or .Values.image.pullSecrets .Values.global.imagePullSecrets }} + imagePullSecrets: + {{- include "grafana.imagePullSecrets" (dict "root" $root "imagePullSecrets" .Values.image.pullSecrets) | nindent 4 }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 4 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- tpl (toYaml .) $root | nindent 4 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 4 }} + {{- end }} + containers: + - name: {{ .Release.Name }}-test + image: "{{ .Values.global.imageRegistry | default .Values.testFramework.image.registry }}/{{ .Values.testFramework.image.repository }}:{{ .Values.testFramework.image.tag }}" + imagePullPolicy: "{{ .Values.testFramework.imagePullPolicy}}" + command: ["/opt/bats/bin/bats", "-t", "/tests/run.sh"] + volumeMounts: + - mountPath: /tests + name: tests + readOnly: true + volumes: + - name: tests + configMap: + name: {{ include "grafana.fullname" . }}-test + restartPolicy: Never +{{- end }} diff --git a/operations/deployment/helm/grafana/values.yaml b/operations/deployment/helm/grafana/values.yaml index 3b339df..07502cc 100644 --- a/operations/deployment/helm/grafana/values.yaml +++ b/operations/deployment/helm/grafana/values.yaml @@ -1,218 +1,1282 @@ -grafana: - replicas: 1 - +global: + # -- Overrides the Docker registry globally for all images + imageRegistry: null + + # To help compatibility with other charts which use global.imagePullSecrets. + # Allow either an array of {name: pullSecret} maps (k8s-style), or an array of strings (more common helm-style). + # Can be tempalted. + # global: + # imagePullSecrets: + # - name: pullSecret1 + # - name: pullSecret2 + # or + # global: + # imagePullSecrets: + # - pullSecret1 + # - pullSecret2 + imagePullSecrets: [] + +rbac: + create: true + ## Use an existing ClusterRole/Role (depending on rbac.namespaced false/true) + # useExistingRole: name-of-some-role + # useExistingClusterRole: name-of-some-clusterRole + pspEnabled: false + pspUseAppArmor: false + namespaced: false + extraRoleRules: [] + # - apiGroups: [] + # resources: [] + # verbs: [] + extraClusterRoleRules: [] + # - apiGroups: [] + # resources: [] + # verbs: [] +serviceAccount: + create: true + name: + nameTest: + ## ServiceAccount labels. + labels: {} +## Service account annotations. Can be templated. +# annotations: +# eks.amazonaws.com/role-arn: arn:aws:iam::123456789000:role/iam-role-name-here + autoMount: true + +replicas: 1 + +## Create a headless service for the deployment +headlessService: false + +## Create HorizontalPodAutoscaler object for deployment type +# +autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 5 + targetCPU: "60" + targetMemory: "" + behavior: {} + +## See `kubectl explain poddisruptionbudget.spec` for more +## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ +podDisruptionBudget: {} +# apiVersion: "" +# minAvailable: 1 +# maxUnavailable: 1 + +## See `kubectl explain deployment.spec.strategy` for more +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy +deploymentStrategy: + type: RollingUpdate + +readinessProbe: + httpGet: + path: /api/health + port: 3000 + +livenessProbe: + httpGet: + path: /api/health + port: 3000 + initialDelaySeconds: 60 + timeoutSeconds: 30 + failureThreshold: 10 + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: "default-scheduler" + +image: + # -- The Docker registry + registry: docker.io + # -- Docker image repository + repository: grafana/grafana + # Overrides the Grafana image tag whose default is the chart appVersion + tag: "" + sha: "" + pullPolicy: IfNotPresent + + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Can be templated. + ## + pullSecrets: [] + # - myRegistrKeySecretName + +testFramework: + enabled: true image: - repository: grafana/grafana - tag: 9.0.1 + # -- The Docker registry + registry: docker.io + repository: bats/bats + tag: "v1.4.1" + imagePullPolicy: IfNotPresent + securityContext: {} + +securityContext: + runAsNonRoot: true + runAsUser: 472 + runAsGroup: 472 + fsGroup: 472 + +containerSecurityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + +# Enable creating the grafana configmap +createConfigmap: true + +# Extra configmaps to mount in grafana pods +# Values are templated. +extraConfigmapMounts: [] + # - name: certs-configmap + # mountPath: /etc/grafana/ssl/ + # subPath: certificates.crt # (optional) + # configMap: certs-configmap + # readOnly: true + + +extraEmptyDirMounts: [] + # - name: provisioning-notifiers + # mountPath: /etc/grafana/provisioning/notifiers + + +# Apply extra labels to common labels. +extraLabels: {} + +## Assign a PriorityClassName to pods if set +# priorityClassName: + +downloadDashboardsImage: + # -- The Docker registry + registry: docker.io + repository: curlimages/curl + tag: 7.85.0 + sha: "" + pullPolicy: IfNotPresent + +downloadDashboards: + env: {} + envFromSecret: "" + resources: {} + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + envValueFrom: {} + # ENV_NAME: + # configMapKeyRef: + # name: configmap-name + # key: value_key + +## Pod Annotations +# podAnnotations: {} + +## Pod Labels +# podLabels: {} + +podPortName: grafana +gossipPortName: gossip +## Deployment annotations +# annotations: {} + +## Expose the grafana service to be accessed from outside the cluster (LoadBalancer service). +## or access it from within the cluster (ClusterIP service). Set the service type and the port to serve it. +## ref: http://kubernetes.io/docs/user-guide/services/ +## +service: + enabled: true + type: ClusterIP + port: 80 + targetPort: 3000 + # targetPort: 4181 To be used with a proxy extraContainer + ## Service annotations. Can be templated. + annotations: {} + labels: {} + portName: service + # Adds the appProtocol field to the service. This allows to work with istio protocol selection. Ex: "http" or "tcp" + appProtocol: "" + +serviceMonitor: + ## If true, a ServiceMonitor CRD is created for a prometheus operator + ## https://github.com/coreos/prometheus-operator + ## + enabled: false + path: /metrics + # namespace: monitoring (defaults to use the namespace this chart is deployed to) + labels: {} + interval: 30s + scheme: http + tlsConfig: {} + scrapeTimeout: 30s + relabelings: [] + metricRelabelings: [] + targetLabels: [] + +extraExposePorts: [] + # - name: keycloak + # port: 8080 + # targetPort: 8080 + +# overrides pod.spec.hostAliases in the grafana deployment's pods +hostAliases: [] + # - ip: "1.2.3.4" + # hostnames: + # - "my.host.com" + +ingress: + enabled: false + # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName + # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress + # ingressClassName: nginx + # Values can be templated + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + labels: {} + path: / + + # pathType is only for k8s >= 1.1= + pathType: Prefix + + hosts: + - chart-example.local + ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. + extraPaths: [] + # - path: /* + # backend: + # serviceName: ssl-redirect + # servicePort: use-annotation + ## Or for k8s > 1.19 + # - path: /* + # pathType: Prefix + # backend: + # service: + # name: ssl-redirect + # port: + # name: use-annotation + + + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} +# limits: +# cpu: 100m +# memory: 128Mi +# requests: +# cpu: 100m +# memory: 128Mi + +## Node labels for pod assignment +## ref: https://kubernetes.io/docs/user-guide/node-selection/ +# +nodeSelector: {} + +## Tolerations for pod assignment +## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] + +## Affinity for pod assignment (evaluated as template) +## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +affinity: {} + +## Topology Spread Constraints +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ +## +topologySpreadConstraints: [] + +## Additional init containers (evaluated as template) +## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ +## +extraInitContainers: [] + +## Enable an Specify container in extraContainers. This is meant to allow adding an authentication proxy to a grafana pod +extraContainers: "" +# extraContainers: | +# - name: proxy +# image: quay.io/gambol99/keycloak-proxy:latest +# args: +# - -provider=github +# - -client-id= +# - -client-secret= +# - -github-org= +# - -email-domain=* +# - -cookie-secret= +# - -http-address=http://0.0.0.0:4181 +# - -upstream-url=http://127.0.0.1:3000 +# ports: +# - name: proxy-web +# containerPort: 4181 + +## Volumes that can be used in init containers that will not be mounted to deployment pods +extraContainerVolumes: [] +# - name: volume-from-secret +# secret: +# secretName: secret-to-mount +# - name: empty-dir-volume +# emptyDir: {} + +## Enable persistence using Persistent Volume Claims +## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + type: pvc + enabled: false + # storageClassName: default + accessModes: + - ReadWriteOnce + size: 10Gi + # annotations: {} + finalizers: + - kubernetes.io/pvc-protection + # selectorLabels: {} + ## Sub-directory of the PV to mount. Can be templated. + # subPath: "" + ## Name of an existing PVC. Can be templated. + # existingClaim: + ## Extra labels to apply to a PVC. + extraPvcLabels: {} + + ## If persistence is not enabled, this allows to mount the + ## local storage in-memory to improve performance + ## + inMemory: + enabled: false + ## The maximum usage on memory medium EmptyDir would be + ## the minimum value between the SizeLimit specified + ## here and the sum of memory limits of all containers in a pod + ## + # sizeLimit: 300Mi + +initChownData: + ## If false, data ownership will not be reset at startup + ## This allows the grafana-server to be run with an arbitrary user + ## + enabled: true + + ## initChownData container image + ## + image: + # -- The Docker registry + registry: docker.io + repository: library/busybox + tag: "1.31.1" sha: "" pullPolicy: IfNotPresent - - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## - # pullSecrets: - # - myRegistrKeySecretName - - serviceAccount: - autoMount: true - ingress: + ## initChownData resource requests and limits + ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: {} + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + securityContext: + runAsNonRoot: false + runAsUser: 0 + seccompProfile: + type: RuntimeDefault + capabilities: + add: + - CHOWN + +# Administrator credentials when not using an existing secret (see below) +adminUser: admin +# adminPassword: strongpassword + +# Use an existing secret for the admin user. +admin: + ## Name of the secret. Can be templated. + existingSecret: "" + userKey: admin-user + passwordKey: admin-password + +## Define command to be executed at startup by grafana container +## Needed if using `vault-env` to manage secrets (ref: https://banzaicloud.com/blog/inject-secrets-into-pods-vault/) +## Default is "run.sh" as defined in grafana's Dockerfile +# command: +# - "sh" +# - "/run.sh" + +## Optionally define args if command is used +## Needed if using `hashicorp/envconsul` to manage secrets +## By default no arguments are set +# args: +# - "-secret" +# - "secret/grafana" +# - "./grafana" + +## Extra environment variables that will be pass onto deployment pods +## +## to provide grafana with access to CloudWatch on AWS EKS: +## 1. create an iam role of type "Web identity" with provider oidc.eks.* (note the provider for later) +## 2. edit the "Trust relationships" of the role, add a line inside the StringEquals clause using the +## same oidc eks provider as noted before (same as the existing line) +## also, replace NAMESPACE and prometheus-operator-grafana with the service account namespace and name +## +## "oidc.eks.us-east-1.amazonaws.com/id/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX:sub": "system:serviceaccount:NAMESPACE:prometheus-operator-grafana", +## +## 3. attach a policy to the role, you can use a built in policy called CloudWatchReadOnlyAccess +## 4. use the following env: (replace 123456789000 and iam-role-name-here with your aws account number and role name) +## +## env: +## AWS_ROLE_ARN: arn:aws:iam::123456789000:role/iam-role-name-here +## AWS_WEB_IDENTITY_TOKEN_FILE: /var/run/secrets/eks.amazonaws.com/serviceaccount/token +## AWS_REGION: us-east-1 +## +## 5. uncomment the EKS section in extraSecretMounts: below +## 6. uncomment the annotation section in the serviceAccount: above +## make sure to replace arn:aws:iam::123456789000:role/iam-role-name-here with your role arn + +env: {} + +## "valueFrom" environment variable references that will be added to deployment pods. Name is templated. +## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#envvarsource-v1-core +## Renders in container spec as: +## env: +## ... +## - name: +## valueFrom: +## +envValueFrom: {} + # ENV_NAME: + # configMapKeyRef: + # name: configmap-name + # key: value_key + +## The name of a secret in the same kubernetes namespace which contain values to be added to the environment +## This can be useful for auth tokens, etc. Value is templated. +envFromSecret: "" + +## Sensible environment variables that will be rendered as new secret object +## This can be useful for auth tokens, etc. +## If the secret values contains "{{", they'll need to be properly escaped so that they are not interpreted by Helm +## ref: https://helm.sh/docs/howto/charts_tips_and_tricks/#using-the-tpl-function +envRenderSecret: {} + +## The names of secrets in the same kubernetes namespace which contain values to be added to the environment +## Each entry should contain a name key, and can optionally specify whether the secret must be defined with an optional key. +## Name is templated. +envFromSecrets: [] +## - name: secret-name +## optional: true + +## The names of conifgmaps in the same kubernetes namespace which contain values to be added to the environment +## Each entry should contain a name key, and can optionally specify whether the configmap must be defined with an optional key. +## Name is templated. +## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#configmapenvsource-v1-core +envFromConfigMaps: [] +## - name: configmap-name +## optional: true + +# Inject Kubernetes services as environment variables. +# See https://kubernetes.io/docs/concepts/services-networking/connect-applications-service/#environment-variables +enableServiceLinks: true + +## Additional grafana server secret mounts +# Defines additional mounts with secrets. Secrets must be manually created in the namespace. +extraSecretMounts: [] + # - name: secret-files + # mountPath: /etc/secrets + # secretName: grafana-secret-files + # readOnly: true + # subPath: "" + # + # for AWS EKS (cloudwatch) use the following (see also instruction in env: above) + # - name: aws-iam-token + # mountPath: /var/run/secrets/eks.amazonaws.com/serviceaccount + # readOnly: true + # projected: + # defaultMode: 420 + # sources: + # - serviceAccountToken: + # audience: sts.amazonaws.com + # expirationSeconds: 86400 + # path: token + # + # for CSI e.g. Azure Key Vault use the following + # - name: secrets-store-inline + # mountPath: /run/secrets + # readOnly: true + # csi: + # driver: secrets-store.csi.k8s.io + # readOnly: true + # volumeAttributes: + # secretProviderClass: "akv-grafana-spc" + # nodePublishSecretRef: # Only required when using service principal mode + # name: grafana-akv-creds # Only required when using service principal mode + +## Additional grafana server volume mounts +# Defines additional volume mounts. +extraVolumeMounts: [] + # - name: extra-volume-0 + # mountPath: /mnt/volume0 + # readOnly: true + # existingClaim: volume-claim + # - name: extra-volume-1 + # mountPath: /mnt/volume1 + # readOnly: true + # hostPath: /usr/shared/ + # - name: grafana-secrets + # mountPath: /mnt/volume2 + # csi: true + # data: + # driver: secrets-store.csi.k8s.io + # readOnly: true + # volumeAttributes: + # secretProviderClass: "grafana-env-spc" + +## Container Lifecycle Hooks. Execute a specific bash command or make an HTTP request +lifecycleHooks: {} + # postStart: + # exec: + # command: [] + +## Pass the plugins you want installed as a list. +## +plugins: [] + # - digrich-bubblechart-panel + # - grafana-clock-panel + ## You can also use other plugin download URL, as long as they are valid zip files, + ## and specify the name of the plugin after the semicolon. Like this: + # - https://grafana.com/api/plugins/marcusolsson-json-datasource/versions/1.3.2/download;marcusolsson-json-datasource + +## Configure grafana datasources +## ref: http://docs.grafana.org/administration/provisioning/#datasources +## +datasources: {} +# datasources.yaml: +# apiVersion: 1 +# datasources: +# - name: Prometheus +# type: prometheus +# url: http://prometheus-prometheus-server +# access: proxy +# isDefault: true +# - name: CloudWatch +# type: cloudwatch +# access: proxy +# uid: cloudwatch +# editable: false +# jsonData: +# authType: default +# defaultRegion: us-east-1 +# deleteDatasources: [] +# - name: Prometheus + +## Configure grafana alerting (can be templated) +## ref: http://docs.grafana.org/administration/provisioning/#alerting +## +alerting: {} + # rules.yaml: + # apiVersion: 1 + # groups: + # - orgId: 1 + # name: '{{ .Chart.Name }}_my_rule_group' + # folder: my_first_folder + # interval: 60s + # rules: + # - uid: my_id_1 + # title: my_first_rule + # condition: A + # data: + # - refId: A + # datasourceUid: '-100' + # model: + # conditions: + # - evaluator: + # params: + # - 3 + # type: gt + # operator: + # type: and + # query: + # params: + # - A + # reducer: + # type: last + # type: query + # datasource: + # type: __expr__ + # uid: '-100' + # expression: 1==0 + # intervalMs: 1000 + # maxDataPoints: 43200 + # refId: A + # type: math + # dashboardUid: my_dashboard + # panelId: 123 + # noDataState: Alerting + # for: 60s + # annotations: + # some_key: some_value + # labels: + # team: sre_team_1 + # contactpoints.yaml: + # secret: + # apiVersion: 1 + # contactPoints: + # - orgId: 1 + # name: cp_1 + # receivers: + # - uid: first_uid + # type: pagerduty + # settings: + # integrationKey: XXX + # severity: critical + # class: ping failure + # component: Grafana + # group: app-stack + # summary: | + # {{ `{{ include "default.message" . }}` }} + +## Configure notifiers +## ref: http://docs.grafana.org/administration/provisioning/#alert-notification-channels +## +notifiers: {} +# notifiers.yaml: +# notifiers: +# - name: email-notifier +# type: email +# uid: email1 +# # either: +# org_id: 1 +# # or +# org_name: Main Org. +# is_default: true +# settings: +# addresses: an_email_address@example.com +# delete_notifiers: + +## Configure grafana dashboard providers +## ref: http://docs.grafana.org/administration/provisioning/#dashboards +## +## `path` must be /var/lib/grafana/dashboards/ +## +dashboardProviders: {} +# dashboardproviders.yaml: +# apiVersion: 1 +# providers: +# - name: 'default' +# orgId: 1 +# folder: '' +# type: file +# disableDeletion: false +# editable: true +# options: +# path: /var/lib/grafana/dashboards/default + +## Configure grafana dashboard to import +## NOTE: To use dashboards you must also enable/configure dashboardProviders +## ref: https://grafana.com/dashboards +## +## dashboards per provider, use provider name as key. +## +dashboards: {} + # default: + # some-dashboard: + # json: | + # $RAW_JSON + # custom-dashboard: + # file: dashboards/custom-dashboard.json + # prometheus-stats: + # gnetId: 2 + # revision: 2 + # datasource: Prometheus + # local-dashboard: + # url: https://example.com/repository/test.json + # token: '' + # local-dashboard-base64: + # url: https://example.com/repository/test-b64.json + # token: '' + # b64content: true + # local-dashboard-gitlab: + # url: https://example.com/repository/test-gitlab.json + # gitlabToken: '' + # local-dashboard-bitbucket: + # url: https://example.com/repository/test-bitbucket.json + # bearerToken: '' + # local-dashboard-azure: + # url: https://example.com/repository/test-azure.json + # basic: '' + # acceptHeader: '*/*' + +## Reference to external ConfigMap per provider. Use provider name as key and ConfigMap name as value. +## A provider dashboards must be defined either by external ConfigMaps or in values.yaml, not in both. +## ConfigMap data example: +## +## data: +## example-dashboard.json: | +## RAW_JSON +## +dashboardsConfigMaps: {} +# default: "" + +## Grafana's primary configuration +## NOTE: values in map will be converted to ini format +## ref: http://docs.grafana.org/installation/configuration/ +## +grafana.ini: + paths: + data: /var/lib/grafana/ + logs: /var/log/grafana + plugins: /var/lib/grafana/plugins + provisioning: /etc/grafana/provisioning + analytics: + check_for_updates: true + log: + mode: console + grafana_net: + url: https://grafana.net + server: + domain: "{{ if (and .Values.ingress.enabled .Values.ingress.hosts) }}{{ .Values.ingress.hosts | first }}{{ else }}''{{ end }}" +## grafana Authentication can be enabled with the following values on grafana.ini + # server: + # The full public facing url you use in browser, used for redirects and emails + # root_url: + # https://grafana.com/docs/grafana/latest/auth/github/#enable-github-in-grafana + # auth.github: + # enabled: false + # allow_sign_up: false + # scopes: user:email,read:org + # auth_url: https://github.com/login/oauth/authorize + # token_url: https://github.com/login/oauth/access_token + # api_url: https://api.github.com/user + # team_ids: + # allowed_organizations: + # client_id: + # client_secret: +## LDAP Authentication can be enabled with the following values on grafana.ini +## NOTE: Grafana will fail to start if the value for ldap.toml is invalid + # auth.ldap: + # enabled: true + # allow_sign_up: true + # config_file: /etc/grafana/ldap.toml + +## Grafana's LDAP configuration +## Templated by the template in _helpers.tpl +## NOTE: To enable the grafana.ini must be configured with auth.ldap.enabled +## ref: http://docs.grafana.org/installation/configuration/#auth-ldap +## ref: http://docs.grafana.org/installation/ldap/#configuration +ldap: + enabled: false + # `existingSecret` is a reference to an existing secret containing the ldap configuration + # for Grafana in a key `ldap-toml`. + existingSecret: "" + # `config` is the content of `ldap.toml` that will be stored in the created secret + config: "" + # config: |- + # verbose_logging = true + + # [[servers]] + # host = "my-ldap-server" + # port = 636 + # use_ssl = true + # start_tls = false + # ssl_skip_verify = false + # bind_dn = "uid=%s,ou=users,dc=myorg,dc=com" + +## Grafana's SMTP configuration +## NOTE: To enable, grafana.ini must be configured with smtp.enabled +## ref: http://docs.grafana.org/installation/configuration/#smtp +smtp: + # `existingSecret` is a reference to an existing secret containing the smtp configuration + # for Grafana. + existingSecret: "" + userKey: "user" + passwordKey: "password" + +## Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders +## Requires at least Grafana 5 to work and can't be used together with parameters dashboardProviders, datasources and dashboards +sidecar: + image: + # -- The Docker registry + registry: quay.io + repository: kiwigrid/k8s-sidecar + tag: 1.25.2 + sha: "" + imagePullPolicy: IfNotPresent + resources: {} +# limits: +# cpu: 100m +# memory: 100Mi +# requests: +# cpu: 50m +# memory: 50Mi + securityContext: + allowPrivilegeEscalation: false + capabilities: + drop: + - ALL + seccompProfile: + type: RuntimeDefault + # skipTlsVerify Set to true to skip tls verification for kube api calls + # skipTlsVerify: true + enableUniqueFilenames: false + readinessProbe: {} + livenessProbe: {} + # Log level default for all sidecars. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. Defaults to INFO + # logLevel: INFO + alerts: + enabled: false + # Additional environment variables for the alerts sidecar + env: {} + # Do not reprocess already processed unchanged resources on k8s API reconnect. + # ignoreAlreadyProcessed: true + # label that the configmaps with alert are marked with + label: grafana_alert + # value of label that the configmaps with alert are set to + labelValue: "" + # Log level. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. + # logLevel: INFO + # If specified, the sidecar will search for alert config-maps inside this namespace. + # Otherwise the namespace in which the sidecar is running will be used. + # It's also possible to specify ALL to search in all namespaces + searchNamespace: null + # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. + watchMethod: WATCH + # search in configmap, secret or both + resource: both + # watchServerTimeout: request to the server, asking it to cleanly close the connection after that. + # defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S + # watchServerTimeout: 3600 + # + # watchClientTimeout: is a client-side timeout, configuring your local socket. + # If you have a network outage dropping all packets with no RST/FIN, + # this is how long your client waits before realizing & dropping the connection. + # defaults to 66sec (sic!) + # watchClientTimeout: 60 + # + # Endpoint to send request to reload alerts + reloadURL: "http://localhost:3000/api/admin/provisioning/alerting/reload" + # Absolute path to shell script to execute after a alert got reloaded + script: null + skipReload: false + # This is needed if skipReload is true, to load any alerts defined at startup time. + # Deploy the alert sidecar as an initContainer. + initAlerts: false + # Additional alert sidecar volume mounts + extraMounts: [] + # Sets the size limit of the alert sidecar emptyDir volume + sizeLimit: {} + dashboards: enabled: false - # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName - # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress - # ingressClassName: nginx - # Values can be templated - annotations: - kubernetes.io/ingress.class: nginx - cert-manager.io/cluster-issuer: letsencrypt-prod - # cert-manager.io/cluster-issuer: letsencrypt-staging - - # https://cert-manager.io/docs/faq/acme/#:~:text=If%20you%20receive%20a%20tls,the%20actual%20certificate%20is%20issued - cert-manager.io/issue-temporary-certificate: "true" - acme.cert-manager.io/http01-edit-in-place: "true" + # Additional environment variables for the dashboards sidecar + env: {} + # Do not reprocess already processed unchanged resources on k8s API reconnect. + # ignoreAlreadyProcessed: true + SCProvider: true + # label that the configmaps with dashboards are marked with + label: grafana_dashboard + # value of label that the configmaps with dashboards are set to + labelValue: "" + # Log level. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. + # logLevel: INFO + # folder in the pod that should hold the collected dashboards (unless `defaultFolderName` is set) + folder: /tmp/dashboards + # The default folder name, it will create a subfolder under the `folder` and put dashboards in there instead + defaultFolderName: null + # Namespaces list. If specified, the sidecar will search for config-maps/secrets inside these namespaces. + # Otherwise the namespace in which the sidecar is running will be used. + # It's also possible to specify ALL to search in all namespaces. + searchNamespace: null + # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. + watchMethod: WATCH + # search in configmap, secret or both + resource: both + # If specified, the sidecar will look for annotation with this name to create folder and put graph here. + # You can use this parameter together with `provider.foldersFromFilesStructure`to annotate configmaps and create folder structure. + folderAnnotation: null + # Endpoint to send request to reload alerts + reloadURL: "http://localhost:3000/api/admin/provisioning/dashboards/reload" + # Absolute path to shell script to execute after a configmap got reloaded + script: null + skipReload: false + # watchServerTimeout: request to the server, asking it to cleanly close the connection after that. + # defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S + # watchServerTimeout: 3600 + # + # watchClientTimeout: is a client-side timeout, configuring your local socket. + # If you have a network outage dropping all packets with no RST/FIN, + # this is how long your client waits before realizing & dropping the connection. + # defaults to 66sec (sic!) + # watchClientTimeout: 60 + # + # provider configuration that lets grafana manage the dashboards + provider: + # name of the provider, should be unique + name: sidecarProvider + # orgid as configured in grafana + orgid: 1 + # folder in which the dashboards should be imported in grafana + folder: '' + # type of the provider + type: file + # disableDelete to activate a import-only behaviour + disableDelete: false + # allow updating provisioned dashboards from the UI + allowUiUpdates: false + # allow Grafana to replicate dashboard structure from filesystem + foldersFromFilesStructure: false + # Additional dashboard sidecar volume mounts + extraMounts: [] + # Sets the size limit of the dashboard sidecar emptyDir volume + sizeLimit: {} + datasources: + enabled: false + # Additional environment variables for the datasourcessidecar + env: {} + # Do not reprocess already processed unchanged resources on k8s API reconnect. + # ignoreAlreadyProcessed: true + # label that the configmaps with datasources are marked with + label: grafana_datasource + # value of label that the configmaps with datasources are set to + labelValue: "" + # Log level. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. + # logLevel: INFO + # If specified, the sidecar will search for datasource config-maps inside this namespace. + # Otherwise the namespace in which the sidecar is running will be used. + # It's also possible to specify ALL to search in all namespaces + searchNamespace: null + # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. + watchMethod: WATCH + # search in configmap, secret or both + resource: both + # watchServerTimeout: request to the server, asking it to cleanly close the connection after that. + # defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S + # watchServerTimeout: 3600 + # + # watchClientTimeout: is a client-side timeout, configuring your local socket. + # If you have a network outage dropping all packets with no RST/FIN, + # this is how long your client waits before realizing & dropping the connection. + # defaults to 66sec (sic!) + # watchClientTimeout: 60 + # + # Endpoint to send request to reload datasources + reloadURL: "http://localhost:3000/api/admin/provisioning/datasources/reload" + # Absolute path to shell script to execute after a datasource got reloaded + script: null + skipReload: false + # Deploy the datasource sidecar as an initContainer in addition to a container. + # This is needed if skipReload is true, to load any datasources defined at startup time. + initDatasources: false + # Sets the size limit of the datasource sidecar emptyDir volume + sizeLimit: {} + plugins: + enabled: false + # Additional environment variables for the plugins sidecar + env: {} + # Do not reprocess already processed unchanged resources on k8s API reconnect. + # ignoreAlreadyProcessed: true + # label that the configmaps with plugins are marked with + label: grafana_plugin + # value of label that the configmaps with plugins are set to + labelValue: "" + # Log level. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. + # logLevel: INFO + # If specified, the sidecar will search for plugin config-maps inside this namespace. + # Otherwise the namespace in which the sidecar is running will be used. + # It's also possible to specify ALL to search in all namespaces + searchNamespace: null + # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. + watchMethod: WATCH + # search in configmap, secret or both + resource: both + # watchServerTimeout: request to the server, asking it to cleanly close the connection after that. + # defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S + # watchServerTimeout: 3600 + # + # watchClientTimeout: is a client-side timeout, configuring your local socket. + # If you have a network outage dropping all packets with no RST/FIN, + # this is how long your client waits before realizing & dropping the connection. + # defaults to 66sec (sic!) + # watchClientTimeout: 60 + # + # Endpoint to send request to reload plugins + reloadURL: "http://localhost:3000/api/admin/provisioning/plugins/reload" + # Absolute path to shell script to execute after a plugin got reloaded + script: null + skipReload: false + # Deploy the datasource sidecar as an initContainer in addition to a container. + # This is needed if skipReload is true, to load any plugins defined at startup time. + initPlugins: false + # Sets the size limit of the plugin sidecar emptyDir volume + sizeLimit: {} + notifiers: + enabled: false + # Additional environment variables for the notifierssidecar + env: {} + # Do not reprocess already processed unchanged resources on k8s API reconnect. + # ignoreAlreadyProcessed: true + # label that the configmaps with notifiers are marked with + label: grafana_notifier + # value of label that the configmaps with notifiers are set to + labelValue: "" + # Log level. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. + # logLevel: INFO + # If specified, the sidecar will search for notifier config-maps inside this namespace. + # Otherwise the namespace in which the sidecar is running will be used. + # It's also possible to specify ALL to search in all namespaces + searchNamespace: null + # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. + watchMethod: WATCH + # search in configmap, secret or both + resource: both + # watchServerTimeout: request to the server, asking it to cleanly close the connection after that. + # defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S + # watchServerTimeout: 3600 + # + # watchClientTimeout: is a client-side timeout, configuring your local socket. + # If you have a network outage dropping all packets with no RST/FIN, + # this is how long your client waits before realizing & dropping the connection. + # defaults to 66sec (sic!) + # watchClientTimeout: 60 + # + # Endpoint to send request to reload notifiers + reloadURL: "http://localhost:3000/api/admin/provisioning/notifications/reload" + # Absolute path to shell script to execute after a notifier got reloaded + script: null + skipReload: false + # Deploy the notifier sidecar as an initContainer in addition to a container. + # This is needed if skipReload is true, to load any notifiers defined at startup time. + initNotifiers: false + # Sets the size limit of the notifier sidecar emptyDir volume + sizeLimit: {} + +## Override the deployment namespace +## +namespaceOverride: "" +## Number of old ReplicaSets to retain +## +revisionHistoryLimit: 10 + +## Add a seperate remote image renderer deployment/service +imageRenderer: + deploymentStrategy: {} + # Enable the image-renderer deployment & service + enabled: false + replicas: 1 + autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 5 + targetCPU: "60" + targetMemory: "" + behavior: {} + image: + # -- The Docker registry + registry: docker.io + # image-renderer Image repository + repository: grafana/grafana-image-renderer + # image-renderer Image tag + tag: latest + # image-renderer Image sha (optional) + sha: "" + # image-renderer ImagePullPolicy + pullPolicy: Always + # extra environment variables + env: + HTTP_HOST: "0.0.0.0" + # RENDERING_ARGS: --no-sandbox,--disable-gpu,--window-size=1280x758 + # RENDERING_MODE: clustered + # IGNORE_HTTPS_ERRORS: true + + ## "valueFrom" environment variable references that will be added to deployment pods. Name is templated. + ## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#envvarsource-v1-core + ## Renders in container spec as: + ## env: + ## ... + ## - name: + ## valueFrom: + ## + envValueFrom: {} + # ENV_NAME: + # configMapKeyRef: + # name: configmap-name + # key: value_key + + # image-renderer deployment serviceAccount + serviceAccountName: "" + # image-renderer deployment securityContext + securityContext: {} + # image-renderer deployment container securityContext + containerSecurityContext: + seccompProfile: + type: RuntimeDefault + capabilities: + drop: ['ALL'] + allowPrivilegeEscalation: false + readOnlyRootFilesystem: true + ## image-renderer pod annotation + podAnnotations: {} + # image-renderer deployment Host Aliases + hostAliases: [] + # image-renderer deployment priority class + priorityClassName: '' + service: + # Enable the image-renderer service + enabled: true + # image-renderer service port name + portName: 'http' + # image-renderer service port used by both service and deployment + port: 8081 + targetPort: 8081 + # Adds the appProtocol field to the image-renderer service. This allows to work with istio protocol selection. Ex: "http" or "tcp" + appProtocol: "" + serviceMonitor: + ## If true, a ServiceMonitor CRD is created for a prometheus operator + ## https://github.com/coreos/prometheus-operator + ## + enabled: false + path: /metrics + # namespace: monitoring (defaults to use the namespace this chart is deployed to) labels: {} - path: / - - # pathType is only for k8s > 1.19 - pathType: Prefix - - hosts: - - grafana.dev.bitovi-staffing.com - - - ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. - extraPaths: [] - # - path: /* - # backend: - # serviceName: ssl-redirect - # servicePort: use-annotation - ## Or for k8s > 1.19 - # - path: /* - # pathType: Prefix - # backend: - # service: - # name: ssl-redirect - # port: - # name: service - - - tls: - - secretName: grafana-tls - hosts: - - grafana.dev.bitovi-staffing.com - - # Administrator credentials when not using an existing secret (see below) - adminUser: admin - # adminPassword: set via values-secrets.yaml - - # Use an existing secret for the admin user. - admin: - existingSecret: "" - userKey: admin-user - passwordKey: admin-password - - ## Configure grafana dashboard providers - ## ref: http://docs.grafana.org/administration/provisioning/#dashboards - ## - ## `path` must be /var/lib/grafana/dashboards/ - ## - dashboardProviders: - dashboardproviders.yaml: - apiVersion: 1 - providers: - - name: 'kubernetes' - orgId: 1 - folder: 'kubernetes' - type: file - disableDeletion: false - editable: true - options: - path: /var/lib/grafana/dashboards/kubernetes - ## Configure grafana dashboard to import - ## NOTE: To use dashboards you must also enable/configure dashboardProviders - ## ref: https://grafana.com/dashboards - ## - ## dashboards per provider, use provider name as key. + interval: 1m + scheme: http + tlsConfig: {} + scrapeTimeout: 30s + relabelings: [] + # See: https://doc.crds.dev/github.com/prometheus-operator/kube-prometheus/monitoring.coreos.com/ServiceMonitor/v1@v0.11.0#spec-targetLabels + targetLabels: [] + # - targetLabel1 + # - targetLabel2 + # If https is enabled in Grafana, this needs to be set as 'https' to correctly configure the callback used in Grafana + grafanaProtocol: http + # In case a sub_path is used this needs to be added to the image renderer callback + grafanaSubPath: "" + # name of the image-renderer port on the pod + podPortName: http + # number of image-renderer replica sets to keep + revisionHistoryLimit: 10 + networkPolicy: + # Enable a NetworkPolicy to limit inbound traffic to only the created grafana pods + limitIngress: true + # Enable a NetworkPolicy to limit outbound traffic to only the created grafana pods + limitEgress: false + # Allow additional services to access image-renderer (eg. Prometheus operator when ServiceMonitor is enabled) + extraIngressSelectors: [] + resources: {} +# limits: +# cpu: 100m +# memory: 100Mi +# requests: +# cpu: 50m +# memory: 50Mi + ## Node labels for pod assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + # + nodeSelector: {} + + ## Tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ ## - dashboards: - # https://github.com/dotdc/grafana-dashboards-kubernetes - kubernetes: - k8s-addons-starboard-operator: - # k8s-addons-starboard-operator.json - gnetId: 16337 - datasource: Prometheus - k8s-system-api-server: - # k8s-system-api-server.json - gnetId: 15761 - datasource: Prometheus - k8s-system-coredns: - # k8s-system-coredns.json - gnetId: 15762 - datasource: Prometheus - k8s-views-global: - # k8s-views-global.json - gnetId: 15757 - datasource: Prometheus - k8s-views-namespaces: - # k8s-views-namespaces.json - gnetId: 15758 - datasource: Prometheus - k8s-views-nodes: - # k8s-views-nodes.json - gnetId: 15759 - datasource: Prometheus - k8s-views-pods: - # k8s-views-pods.json - gnetId: 15760 - datasource: Prometheus - # default: - # some-dashboard: - # json: | - # $RAW_JSON - # custom-dashboard: - # file: dashboards/custom-dashboard.json - # prometheus-stats: - # gnetId: 2 - # revision: 2 - # datasource: Prometheus - # local-dashboard: - # url: https://example.com/repository/test.json - # token: '' - # local-dashboard-base64: - # url: https://example.com/repository/test-b64.json - # token: '' - # b64content: true - - ## Grafana's primary configuration - ## NOTE: values in map will be converted to ini format - ## ref: http://docs.grafana.org/installation/configuration/ - ## - grafana.ini: - paths: - data: /var/lib/grafana/data - logs: /var/log/grafana - plugins: /var/lib/grafana/plugins - provisioning: /etc/grafana/provisioning - analytics: - check_for_updates: true - log: - mode: console - grafana_net: - url: https://grafana.net - ## grafana Authentication can be enabled with the following values on grafana.ini - # server: - # The full public facing url you use in browser, used for redirects and emails - # root_url: - # https://grafana.com/docs/grafana/latest/auth/github/#enable-github-in-grafana - # auth.github: - # enabled: false - # allow_sign_up: false - # scopes: user:email,read:org - # auth_url: https://github.com/login/oauth/authorize - # token_url: https://github.com/login/oauth/access_token - # api_url: https://api.github.com/user - # team_ids: - # allowed_organizations: - # client_id: - # client_secret: - ## LDAP Authentication can be enabled with the following values on grafana.ini - ## NOTE: Grafana will fail to start if the value for ldap.toml is invalid - # auth.ldap: - # enabled: true - # allow_sign_up: true - # config_file: /etc/grafana/ldap.toml - - ## Grafana's LDAP configuration - ## Templated by the template in _helpers.tpl - ## NOTE: To enable the grafana.ini must be configured with auth.ldap.enabled - ## ref: http://docs.grafana.org/installation/configuration/#auth-ldap - ## ref: http://docs.grafana.org/installation/ldap/#configuration - ldap: + tolerations: [] + + ## Affinity for pod assignment (evaluated as template) + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## + affinity: {} + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: "default-scheduler" + +networkPolicy: + ## @param networkPolicy.enabled Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. + ## + enabled: false + ## @param networkPolicy.allowExternal Don't require client label for connections + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to grafana port defined. + ## When true, grafana will accept connections from any source + ## (with the correct destination port). + ## + ingress: true + ## @param networkPolicy.ingress When true enables the creation + ## an ingress network policy + ## + allowExternal: true + ## @param networkPolicy.explicitNamespacesSelector A Kubernetes LabelSelector to explicitly select namespaces from which traffic could be allowed + ## If explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace + ## and that match other criteria, the ones that have the good label, can reach the grafana. + ## But sometimes, we want the grafana to be accessible to clients from other namespaces, in this case, we can use this + ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. + ## + ## Example: + ## explicitNamespacesSelector: + ## matchLabels: + ## role: frontend + ## matchExpressions: + ## - {key: role, operator: In, values: [frontend]} + ## + explicitNamespacesSelector: {} + ## + ## + ## + ## + ## + ## + egress: + ## @param networkPolicy.egress.enabled When enabled, an egress network policy will be + ## created allowing grafana to connect to external data sources from kubernetes cluster. enabled: false - # `existingSecret` is a reference to an existing secret containing the ldap configuration - # for Grafana in a key `ldap-toml`. - existingSecret: "" - # `config` is the content of `ldap.toml` that will be stored in the created secret - config: "" - # config: |- - # verbose_logging = true - - # [[servers]] - # host = "my-ldap-server" - # port = 636 - # use_ssl = true - # start_tls = false - # ssl_skip_verify = false - # bind_dn = "uid=%s,ou=users,dc=myorg,dc=com" - - ## Grafana's SMTP configuration - ## NOTE: To enable, grafana.ini must be configured with smtp.enabled - ## ref: http://docs.grafana.org/installation/configuration/#smtp - smtp: - # `existingSecret` is a reference to an existing secret containing the smtp configuration - # for Grafana. - existingSecret: "" - userKey: "user" - passwordKey: "password" + ## + ## @param networkPolicy.egress.blockDNSResolution When enabled, DNS resolution will be blocked + ## for all pods in the grafana namespace. + blockDNSResolution: false + ## + ## @param networkPolicy.egress.ports Add individual ports to be allowed by the egress + ports: [] + ## Add ports to the egress by specifying - port: + ## E.X. + ## - port: 80 + ## - port: 443 + ## + ## @param networkPolicy.egress.to Allow egress traffic to specific destinations + to: [] + ## Add destinations to the egress by specifying - ipBlock: + ## E.X. + ## to: + ## - namespaceSelector: + ## matchExpressions: + ## - {key: role, operator: In, values: [grafana]} + ## + ## + ## + ## + ## + +# Enable backward compatibility of kubernetes where version below 1.13 doesn't have the enableServiceLinks option +enableKubeBackwardCompatibility: false +useStatefulSet: false +# Create a dynamic manifests via values: +extraObjects: [] + # - apiVersion: "kubernetes-client.io/v1" + # kind: ExternalSecret + # metadata: + # name: grafana-secrets + # spec: + # backendType: gcpSecretsManager + # data: + # - key: grafana-admin-password + # name: adminPassword From 43bf300c297e6ecb57c47f8d86208946383c425c Mon Sep 17 00:00:00 2001 From: karamveer28 Date: Mon, 11 Dec 2023 21:23:58 +0530 Subject: [PATCH 27/99] =?UTF-8?q?Added=20env=20=C6=92ile?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- operations/deployment/helm/grafana/ENV_FILE | 2 ++ 1 file changed, 2 insertions(+) create mode 100644 operations/deployment/helm/grafana/ENV_FILE diff --git a/operations/deployment/helm/grafana/ENV_FILE b/operations/deployment/helm/grafana/ENV_FILE new file mode 100644 index 0000000..8ce79ba --- /dev/null +++ b/operations/deployment/helm/grafana/ENV_FILE @@ -0,0 +1,2 @@ +GF_SECURITY_ADMIN_USER=admin +GF_SECURITY_ADMIN_PASSWORD=grafana \ No newline at end of file From 67b29ba5186fb5c9fd1b7e02ceb0f2a1d0b35d3a Mon Sep 17 00:00:00 2001 From: karamveer28 Date: Mon, 11 Dec 2023 22:25:36 +0530 Subject: [PATCH 28/99] added aws-auth --- .../deployment/helm/aws-auth/Chart.yaml | 5 +++++ .../helm/aws-auth/bitops.config.yaml | 14 ++++++++++++ .../helm/aws-auth/example.bitops.config.yaml | 15 +++++++++++++ .../helm/aws-auth/templates/aws-auth.yaml | 22 +++++++++++++++++++ .../deployment/helm/aws-auth/values.yaml | 21 ++++++++++++++++++ 5 files changed, 77 insertions(+) create mode 100644 operations/deployment/helm/aws-auth/Chart.yaml create mode 100644 operations/deployment/helm/aws-auth/bitops.config.yaml create mode 100644 operations/deployment/helm/aws-auth/example.bitops.config.yaml create mode 100644 operations/deployment/helm/aws-auth/templates/aws-auth.yaml create mode 100644 operations/deployment/helm/aws-auth/values.yaml diff --git a/operations/deployment/helm/aws-auth/Chart.yaml b/operations/deployment/helm/aws-auth/Chart.yaml new file mode 100644 index 0000000..db7b2d3 --- /dev/null +++ b/operations/deployment/helm/aws-auth/Chart.yaml @@ -0,0 +1,5 @@ +apiVersion: v2 +appVersion: "1.0" +description: aws-auth configmap for EKS +name: aws-auth +version: 0.1.0 \ No newline at end of file diff --git a/operations/deployment/helm/aws-auth/bitops.config.yaml b/operations/deployment/helm/aws-auth/bitops.config.yaml new file mode 100644 index 0000000..60d7548 --- /dev/null +++ b/operations/deployment/helm/aws-auth/bitops.config.yaml @@ -0,0 +1,14 @@ +helm: + cli: + namespace: kube-system + timeout: 200s + debug: true + atomic: false + force: false + dry-run: false + options: + release-name: aws-auth + skip-deploy: false + k8s: + fetch: + kubeconfig: true \ No newline at end of file diff --git a/operations/deployment/helm/aws-auth/example.bitops.config.yaml b/operations/deployment/helm/aws-auth/example.bitops.config.yaml new file mode 100644 index 0000000..eddb4b4 --- /dev/null +++ b/operations/deployment/helm/aws-auth/example.bitops.config.yaml @@ -0,0 +1,15 @@ +helm: + cli: + namespace: kube-system + timeout: 200s + debug: true + atomic: false + force: false + dry-run: false + options: + release-name: aws-auth + skip-deploy: false + k8s: + fetch: + kubeconfig: true + cluster-name: prod-ekscluster \ No newline at end of file diff --git a/operations/deployment/helm/aws-auth/templates/aws-auth.yaml b/operations/deployment/helm/aws-auth/templates/aws-auth.yaml new file mode 100644 index 0000000..e41dea6 --- /dev/null +++ b/operations/deployment/helm/aws-auth/templates/aws-auth.yaml @@ -0,0 +1,22 @@ +{{- if .Values.configmap.enabled }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ .Values.configmap.name }} +data: +{{- if .Values.data.mapAccounts }} + mapAccounts: | + {{ .Values.data.mapAccounts| nindent 4 | trim }} +{{- end }} + +{{- if .Values.data.mapRoles }} + mapRoles: | + {{ .Values.data.mapRoles| nindent 4 | trim }} +{{- end }} + +{{- if .Values.data.mapUsers }} + mapUsers: | + {{ .Values.data.mapUsers| nindent 4 | trim }} +{{- end }} + +{{- end }} \ No newline at end of file diff --git a/operations/deployment/helm/aws-auth/values.yaml b/operations/deployment/helm/aws-auth/values.yaml new file mode 100644 index 0000000..90f836a --- /dev/null +++ b/operations/deployment/helm/aws-auth/values.yaml @@ -0,0 +1,21 @@ +configmap: + enabled: true + name: aws-auth +data: + + # account number + mapAccounts: | + - "000000000000" + + # add eksworkers to nodes and bootstrappers + # ensure accounts match + # add sso and other users to masters as necessry + mapRoles: | + - "groups": + - "system:bootstrappers" + - "system:nodes" + "rolearn": "arn:aws:iam::000000000:role/prod-eksworker" + "username": "system:node:{{EC2PrivateDNSName}}" + - "groups": + - "system:masters" + "rolearn": "arn:aws:iam::000000000:role/AWSReservedSSO_AdministratorAccess_1111111111" \ No newline at end of file From ee915f4c4efbf2206e1b067afb6b0bc67b96f521 Mon Sep 17 00:00:00 2001 From: karamveer28 Date: Mon, 11 Dec 2023 22:41:54 +0530 Subject: [PATCH 29/99] check with values file --- .../deployment/helm/aws-auth/Chart.yaml | 5 ----- .../helm/aws-auth/example.bitops.config.yaml | 15 ------------- .../helm/aws-auth/templates/aws-auth.yaml | 22 ------------------- .../deployment/helm/aws-auth/values.yaml | 21 ------------------ .../{aws-auth => grafana}/bitops.config.yaml | 2 +- 5 files changed, 1 insertion(+), 64 deletions(-) delete mode 100644 operations/deployment/helm/aws-auth/Chart.yaml delete mode 100644 operations/deployment/helm/aws-auth/example.bitops.config.yaml delete mode 100644 operations/deployment/helm/aws-auth/templates/aws-auth.yaml delete mode 100644 operations/deployment/helm/aws-auth/values.yaml rename operations/deployment/helm/{aws-auth => grafana}/bitops.config.yaml (88%) diff --git a/operations/deployment/helm/aws-auth/Chart.yaml b/operations/deployment/helm/aws-auth/Chart.yaml deleted file mode 100644 index db7b2d3..0000000 --- a/operations/deployment/helm/aws-auth/Chart.yaml +++ /dev/null @@ -1,5 +0,0 @@ -apiVersion: v2 -appVersion: "1.0" -description: aws-auth configmap for EKS -name: aws-auth -version: 0.1.0 \ No newline at end of file diff --git a/operations/deployment/helm/aws-auth/example.bitops.config.yaml b/operations/deployment/helm/aws-auth/example.bitops.config.yaml deleted file mode 100644 index eddb4b4..0000000 --- a/operations/deployment/helm/aws-auth/example.bitops.config.yaml +++ /dev/null @@ -1,15 +0,0 @@ -helm: - cli: - namespace: kube-system - timeout: 200s - debug: true - atomic: false - force: false - dry-run: false - options: - release-name: aws-auth - skip-deploy: false - k8s: - fetch: - kubeconfig: true - cluster-name: prod-ekscluster \ No newline at end of file diff --git a/operations/deployment/helm/aws-auth/templates/aws-auth.yaml b/operations/deployment/helm/aws-auth/templates/aws-auth.yaml deleted file mode 100644 index e41dea6..0000000 --- a/operations/deployment/helm/aws-auth/templates/aws-auth.yaml +++ /dev/null @@ -1,22 +0,0 @@ -{{- if .Values.configmap.enabled }} -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ .Values.configmap.name }} -data: -{{- if .Values.data.mapAccounts }} - mapAccounts: | - {{ .Values.data.mapAccounts| nindent 4 | trim }} -{{- end }} - -{{- if .Values.data.mapRoles }} - mapRoles: | - {{ .Values.data.mapRoles| nindent 4 | trim }} -{{- end }} - -{{- if .Values.data.mapUsers }} - mapUsers: | - {{ .Values.data.mapUsers| nindent 4 | trim }} -{{- end }} - -{{- end }} \ No newline at end of file diff --git a/operations/deployment/helm/aws-auth/values.yaml b/operations/deployment/helm/aws-auth/values.yaml deleted file mode 100644 index 90f836a..0000000 --- a/operations/deployment/helm/aws-auth/values.yaml +++ /dev/null @@ -1,21 +0,0 @@ -configmap: - enabled: true - name: aws-auth -data: - - # account number - mapAccounts: | - - "000000000000" - - # add eksworkers to nodes and bootstrappers - # ensure accounts match - # add sso and other users to masters as necessry - mapRoles: | - - "groups": - - "system:bootstrappers" - - "system:nodes" - "rolearn": "arn:aws:iam::000000000:role/prod-eksworker" - "username": "system:node:{{EC2PrivateDNSName}}" - - "groups": - - "system:masters" - "rolearn": "arn:aws:iam::000000000:role/AWSReservedSSO_AdministratorAccess_1111111111" \ No newline at end of file diff --git a/operations/deployment/helm/aws-auth/bitops.config.yaml b/operations/deployment/helm/grafana/bitops.config.yaml similarity index 88% rename from operations/deployment/helm/aws-auth/bitops.config.yaml rename to operations/deployment/helm/grafana/bitops.config.yaml index 60d7548..0d5d6df 100644 --- a/operations/deployment/helm/aws-auth/bitops.config.yaml +++ b/operations/deployment/helm/grafana/bitops.config.yaml @@ -7,7 +7,7 @@ helm: force: false dry-run: false options: - release-name: aws-auth + release-name: grafana skip-deploy: false k8s: fetch: From 4a15265c112e5219952a382fc16d0a05761a4657 Mon Sep 17 00:00:00 2001 From: karamveer28 Date: Mon, 11 Dec 2023 22:51:47 +0530 Subject: [PATCH 30/99] Test with values mapping --- operations/deployment/helm/grafana/Chart.yaml | 15 ------ .../helm/grafana/ci/default-values.yaml | 1 - .../helm/grafana/ci/with-affinity-values.yaml | 16 ------ .../ci/with-dashboard-json-values.yaml | 53 ------------------- .../grafana/ci/with-dashboard-values.yaml | 19 ------- .../ci/with-extraconfigmapmounts-values.yaml | 7 --- .../ci/with-image-renderer-values.yaml | 19 ------- .../helm/grafana/ci/with-persistence.yaml | 3 -- .../grafana/dashboards/custom-dashboard.json | 1 - .../deployment/helm/grafana/values.yaml | 21 ++++++++ 10 files changed, 21 insertions(+), 134 deletions(-) delete mode 100644 operations/deployment/helm/grafana/ci/default-values.yaml delete mode 100644 operations/deployment/helm/grafana/ci/with-affinity-values.yaml delete mode 100644 operations/deployment/helm/grafana/ci/with-dashboard-json-values.yaml delete mode 100644 operations/deployment/helm/grafana/ci/with-dashboard-values.yaml delete mode 100644 operations/deployment/helm/grafana/ci/with-extraconfigmapmounts-values.yaml delete mode 100644 operations/deployment/helm/grafana/ci/with-image-renderer-values.yaml delete mode 100644 operations/deployment/helm/grafana/ci/with-persistence.yaml delete mode 100644 operations/deployment/helm/grafana/dashboards/custom-dashboard.json diff --git a/operations/deployment/helm/grafana/Chart.yaml b/operations/deployment/helm/grafana/Chart.yaml index adf084f..f71c5b1 100644 --- a/operations/deployment/helm/grafana/Chart.yaml +++ b/operations/deployment/helm/grafana/Chart.yaml @@ -16,18 +16,3 @@ annotations: url: https://github.com/grafana/helm-charts - name: Upstream Project url: https://github.com/grafana/grafana -maintainers: - - name: zanhsieh - email: zanhsieh@gmail.com - - name: rtluckie - email: rluckie@cisco.com - - name: maorfr - email: maor.friedman@redhat.com - - name: Xtigyro - email: miroslav.hadzhiev@gmail.com - - name: torstenwalter - email: mail@torstenwalter.de -type: application -keywords: - - monitoring - - metric diff --git a/operations/deployment/helm/grafana/ci/default-values.yaml b/operations/deployment/helm/grafana/ci/default-values.yaml deleted file mode 100644 index fc2ba60..0000000 --- a/operations/deployment/helm/grafana/ci/default-values.yaml +++ /dev/null @@ -1 +0,0 @@ -# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml. diff --git a/operations/deployment/helm/grafana/ci/with-affinity-values.yaml b/operations/deployment/helm/grafana/ci/with-affinity-values.yaml deleted file mode 100644 index f5b9b53..0000000 --- a/operations/deployment/helm/grafana/ci/with-affinity-values.yaml +++ /dev/null @@ -1,16 +0,0 @@ -affinity: - podAntiAffinity: - preferredDuringSchedulingIgnoredDuringExecution: - - podAffinityTerm: - labelSelector: - matchLabels: - app.kubernetes.io/instance: grafana-test - app.kubernetes.io/name: grafana - topologyKey: failure-domain.beta.kubernetes.io/zone - weight: 100 - requiredDuringSchedulingIgnoredDuringExecution: - - labelSelector: - matchLabels: - app.kubernetes.io/instance: grafana-test - app.kubernetes.io/name: grafana - topologyKey: kubernetes.io/hostname diff --git a/operations/deployment/helm/grafana/ci/with-dashboard-json-values.yaml b/operations/deployment/helm/grafana/ci/with-dashboard-json-values.yaml deleted file mode 100644 index e0c4e41..0000000 --- a/operations/deployment/helm/grafana/ci/with-dashboard-json-values.yaml +++ /dev/null @@ -1,53 +0,0 @@ -dashboards: - my-provider: - my-awesome-dashboard: - # An empty but valid dashboard - json: | - { - "__inputs": [], - "__requires": [ - { - "type": "grafana", - "id": "grafana", - "name": "Grafana", - "version": "6.3.5" - } - ], - "annotations": { - "list": [ - { - "builtIn": 1, - "datasource": "-- Grafana --", - "enable": true, - "hide": true, - "iconColor": "rgba(0, 211, 255, 1)", - "name": "Annotations & Alerts", - "type": "dashboard" - } - ] - }, - "editable": true, - "gnetId": null, - "graphTooltip": 0, - "id": null, - "links": [], - "panels": [], - "schemaVersion": 19, - "style": "dark", - "tags": [], - "templating": { - "list": [] - }, - "time": { - "from": "now-6h", - "to": "now" - }, - "timepicker": { - "refresh_intervals": ["5s"] - }, - "timezone": "", - "title": "Dummy Dashboard", - "uid": "IdcYQooWk", - "version": 1 - } - datasource: Prometheus diff --git a/operations/deployment/helm/grafana/ci/with-dashboard-values.yaml b/operations/deployment/helm/grafana/ci/with-dashboard-values.yaml deleted file mode 100644 index 7b662c5..0000000 --- a/operations/deployment/helm/grafana/ci/with-dashboard-values.yaml +++ /dev/null @@ -1,19 +0,0 @@ -dashboards: - my-provider: - my-awesome-dashboard: - gnetId: 10000 - revision: 1 - datasource: Prometheus -dashboardProviders: - dashboardproviders.yaml: - apiVersion: 1 - providers: - - name: 'my-provider' - orgId: 1 - folder: '' - type: file - updateIntervalSeconds: 10 - disableDeletion: true - editable: true - options: - path: /var/lib/grafana/dashboards/my-provider diff --git a/operations/deployment/helm/grafana/ci/with-extraconfigmapmounts-values.yaml b/operations/deployment/helm/grafana/ci/with-extraconfigmapmounts-values.yaml deleted file mode 100644 index 5cc44a0..0000000 --- a/operations/deployment/helm/grafana/ci/with-extraconfigmapmounts-values.yaml +++ /dev/null @@ -1,7 +0,0 @@ -extraConfigmapMounts: - - name: '{{ include "grafana.fullname" . }}' - configMap: '{{ include "grafana.fullname" . }}' - mountPath: /var/lib/grafana/dashboards/test-dashboard.json - # This is not a realistic test, but for this we only care about extraConfigmapMounts not being empty and pointing to an existing ConfigMap - subPath: grafana.ini - readOnly: true diff --git a/operations/deployment/helm/grafana/ci/with-image-renderer-values.yaml b/operations/deployment/helm/grafana/ci/with-image-renderer-values.yaml deleted file mode 100644 index 32f3074..0000000 --- a/operations/deployment/helm/grafana/ci/with-image-renderer-values.yaml +++ /dev/null @@ -1,19 +0,0 @@ -podLabels: - customLableA: Aaaaa -imageRenderer: - enabled: true - env: - RENDERING_ARGS: --disable-gpu,--window-size=1280x758 - RENDERING_MODE: clustered - podLabels: - customLableB: Bbbbb - networkPolicy: - limitIngress: true - limitEgress: true - resources: - limits: - cpu: 1000m - memory: 1000Mi - requests: - cpu: 500m - memory: 50Mi diff --git a/operations/deployment/helm/grafana/ci/with-persistence.yaml b/operations/deployment/helm/grafana/ci/with-persistence.yaml deleted file mode 100644 index b92ca02..0000000 --- a/operations/deployment/helm/grafana/ci/with-persistence.yaml +++ /dev/null @@ -1,3 +0,0 @@ -persistence: - type: pvc - enabled: true diff --git a/operations/deployment/helm/grafana/dashboards/custom-dashboard.json b/operations/deployment/helm/grafana/dashboards/custom-dashboard.json deleted file mode 100644 index 9e26dfe..0000000 --- a/operations/deployment/helm/grafana/dashboards/custom-dashboard.json +++ /dev/null @@ -1 +0,0 @@ -{} \ No newline at end of file diff --git a/operations/deployment/helm/grafana/values.yaml b/operations/deployment/helm/grafana/values.yaml index 07502cc..dbd58d4 100644 --- a/operations/deployment/helm/grafana/values.yaml +++ b/operations/deployment/helm/grafana/values.yaml @@ -1280,3 +1280,24 @@ extraObjects: [] # data: # - key: grafana-admin-password # name: adminPassword +configmap: + enabled: true + name: grafana +data: + + # account number + mapAccounts: | + - "000000000000" + + # add eksworkers to nodes and bootstrappers + # ensure accounts match + # add sso and other users to masters as necessry + mapRoles: | + - "groups": + - "system:bootstrappers" + - "system:nodes" + "rolearn": "arn:aws:iam::000000000:role/prod-eksworker" + "username": "system:node:{{EC2PrivateDNSName}}" + - "groups": + - "system:masters" + "rolearn": "arn:aws:iam::000000000:role/AWSReservedSSO_AdministratorAccess_1111111111" \ No newline at end of file From 39590a482b51c982155f4c8e6b748260582c7d93 Mon Sep 17 00:00:00 2001 From: karamveer28 Date: Mon, 11 Dec 2023 23:19:21 +0530 Subject: [PATCH 31/99] Test with values mapping --- .../helm/grafana/templates/NOTES.txt | 55 - .../helm/grafana/templates/_helpers.tpl | 227 --- .../helm/grafana/templates/_pod.tpl | 1276 ----------------- .../helm/grafana/templates/clusterrole.yaml | 25 - .../grafana/templates/clusterrolebinding.yaml | 24 - .../helm/grafana/templates/configSecret.yaml | 43 - .../configmap-dashboard-provider.yaml | 29 - .../helm/grafana/templates/configmap.yaml | 144 -- .../templates/dashboards-json-configmap.yaml | 38 - .../helm/grafana/templates/deployment.yaml | 2 +- .../grafana/templates/extra-manifests.yaml | 4 - .../grafana/templates/headless-service.yaml | 22 - .../helm/grafana/templates/hpa.yaml | 52 - .../templates/image-renderer-deployment.yaml | 131 -- .../grafana/templates/image-renderer-hpa.yaml | 47 - .../image-renderer-network-policy.yaml | 79 - .../templates/image-renderer-service.yaml | 31 - .../image-renderer-servicemonitor.yaml | 48 - .../helm/grafana/templates/ingress.yaml | 78 - .../helm/grafana/templates/networkpolicy.yaml | 61 - .../templates/poddisruptionbudget.yaml | 22 - .../grafana/templates/podsecuritypolicy.yaml | 49 - .../helm/grafana/templates/pvc.yaml | 36 - .../helm/grafana/templates/role.yaml | 32 - .../helm/grafana/templates/rolebinding.yaml | 25 - .../helm/grafana/templates/secret-env.yaml | 14 - .../helm/grafana/templates/secret.yaml | 26 - .../helm/grafana/templates/service.yaml | 58 - .../grafana/templates/serviceaccount.yaml | 17 - .../grafana/templates/servicemonitor.yaml | 52 - .../helm/grafana/templates/statefulset.yaml | 56 - .../templates/tests/test-configmap.yaml | 20 - .../tests/test-podsecuritypolicy.yaml | 32 - .../grafana/templates/tests/test-role.yaml | 17 - .../templates/tests/test-rolebinding.yaml | 20 - .../templates/tests/test-serviceaccount.yaml | 12 - .../helm/grafana/templates/tests/test.yaml | 49 - 37 files changed, 1 insertion(+), 2952 deletions(-) delete mode 100644 operations/deployment/helm/grafana/templates/NOTES.txt delete mode 100644 operations/deployment/helm/grafana/templates/_helpers.tpl delete mode 100644 operations/deployment/helm/grafana/templates/_pod.tpl delete mode 100644 operations/deployment/helm/grafana/templates/clusterrole.yaml delete mode 100644 operations/deployment/helm/grafana/templates/clusterrolebinding.yaml delete mode 100644 operations/deployment/helm/grafana/templates/configSecret.yaml delete mode 100644 operations/deployment/helm/grafana/templates/configmap-dashboard-provider.yaml delete mode 100644 operations/deployment/helm/grafana/templates/configmap.yaml delete mode 100644 operations/deployment/helm/grafana/templates/dashboards-json-configmap.yaml delete mode 100644 operations/deployment/helm/grafana/templates/extra-manifests.yaml delete mode 100644 operations/deployment/helm/grafana/templates/headless-service.yaml delete mode 100644 operations/deployment/helm/grafana/templates/hpa.yaml delete mode 100644 operations/deployment/helm/grafana/templates/image-renderer-deployment.yaml delete mode 100644 operations/deployment/helm/grafana/templates/image-renderer-hpa.yaml delete mode 100644 operations/deployment/helm/grafana/templates/image-renderer-network-policy.yaml delete mode 100644 operations/deployment/helm/grafana/templates/image-renderer-service.yaml delete mode 100644 operations/deployment/helm/grafana/templates/image-renderer-servicemonitor.yaml delete mode 100644 operations/deployment/helm/grafana/templates/ingress.yaml delete mode 100644 operations/deployment/helm/grafana/templates/networkpolicy.yaml delete mode 100644 operations/deployment/helm/grafana/templates/poddisruptionbudget.yaml delete mode 100644 operations/deployment/helm/grafana/templates/podsecuritypolicy.yaml delete mode 100644 operations/deployment/helm/grafana/templates/pvc.yaml delete mode 100644 operations/deployment/helm/grafana/templates/role.yaml delete mode 100644 operations/deployment/helm/grafana/templates/rolebinding.yaml delete mode 100644 operations/deployment/helm/grafana/templates/secret-env.yaml delete mode 100644 operations/deployment/helm/grafana/templates/secret.yaml delete mode 100644 operations/deployment/helm/grafana/templates/service.yaml delete mode 100644 operations/deployment/helm/grafana/templates/serviceaccount.yaml delete mode 100644 operations/deployment/helm/grafana/templates/servicemonitor.yaml delete mode 100644 operations/deployment/helm/grafana/templates/statefulset.yaml delete mode 100644 operations/deployment/helm/grafana/templates/tests/test-configmap.yaml delete mode 100644 operations/deployment/helm/grafana/templates/tests/test-podsecuritypolicy.yaml delete mode 100644 operations/deployment/helm/grafana/templates/tests/test-role.yaml delete mode 100644 operations/deployment/helm/grafana/templates/tests/test-rolebinding.yaml delete mode 100644 operations/deployment/helm/grafana/templates/tests/test-serviceaccount.yaml delete mode 100644 operations/deployment/helm/grafana/templates/tests/test.yaml diff --git a/operations/deployment/helm/grafana/templates/NOTES.txt b/operations/deployment/helm/grafana/templates/NOTES.txt deleted file mode 100644 index d86419f..0000000 --- a/operations/deployment/helm/grafana/templates/NOTES.txt +++ /dev/null @@ -1,55 +0,0 @@ -1. Get your '{{ .Values.adminUser }}' user password by running: - - kubectl get secret --namespace {{ include "grafana.namespace" . }} {{ .Values.admin.existingSecret | default (include "grafana.fullname" .) }} -o jsonpath="{.data.{{ .Values.admin.passwordKey | default "admin-password" }}}" | base64 --decode ; echo - - -2. The Grafana server can be accessed via port {{ .Values.service.port }} on the following DNS name from within your cluster: - - {{ include "grafana.fullname" . }}.{{ include "grafana.namespace" . }}.svc.cluster.local -{{ if .Values.ingress.enabled }} - If you bind grafana to 80, please update values in values.yaml and reinstall: - ``` - securityContext: - runAsUser: 0 - runAsGroup: 0 - fsGroup: 0 - - command: - - "setcap" - - "'cap_net_bind_service=+ep'" - - "/usr/sbin/grafana-server &&" - - "sh" - - "/run.sh" - ``` - Details refer to https://grafana.com/docs/installation/configuration/#http-port. - Or grafana would always crash. - - From outside the cluster, the server URL(s) are: - {{- range .Values.ingress.hosts }} - http://{{ . }} - {{- end }} -{{- else }} - Get the Grafana URL to visit by running these commands in the same shell: - {{- if contains "NodePort" .Values.service.type }} - export NODE_PORT=$(kubectl get --namespace {{ include "grafana.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "grafana.fullname" . }}) - export NODE_IP=$(kubectl get nodes --namespace {{ include "grafana.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}") - echo http://$NODE_IP:$NODE_PORT - {{- else if contains "LoadBalancer" .Values.service.type }} - NOTE: It may take a few minutes for the LoadBalancer IP to be available. - You can watch the status of by running 'kubectl get svc --namespace {{ include "grafana.namespace" . }} -w {{ include "grafana.fullname" . }}' - export SERVICE_IP=$(kubectl get svc --namespace {{ include "grafana.namespace" . }} {{ include "grafana.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') - http://$SERVICE_IP:{{ .Values.service.port -}} - {{- else if contains "ClusterIP" .Values.service.type }} - export POD_NAME=$(kubectl get pods --namespace {{ include "grafana.namespace" . }} -l "app.kubernetes.io/name={{ include "grafana.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") - kubectl --namespace {{ include "grafana.namespace" . }} port-forward $POD_NAME 3000 - {{- end }} -{{- end }} - -3. Login with the password from step 1 and the username: {{ .Values.adminUser }} - -{{- if not .Values.persistence.enabled }} -################################################################################# -###### WARNING: Persistence is disabled!!! You will lose your data when ##### -###### the Grafana pod is terminated. ##### -################################################################################# -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/_helpers.tpl b/operations/deployment/helm/grafana/templates/_helpers.tpl deleted file mode 100644 index ead2449..0000000 --- a/operations/deployment/helm/grafana/templates/_helpers.tpl +++ /dev/null @@ -1,227 +0,0 @@ -{{/* vim: set filetype=mustache: */}} -{{/* -Expand the name of the chart. -*/}} -{{- define "grafana.name" -}} -{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Create a default fully qualified app name. -We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). -If release name contains chart name it will be used as a full name. -*/}} -{{- define "grafana.fullname" -}} -{{- if .Values.fullnameOverride }} -{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- $name := default .Chart.Name .Values.nameOverride }} -{{- if contains $name .Release.Name }} -{{- .Release.Name | trunc 63 | trimSuffix "-" }} -{{- else }} -{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} -{{- end }} -{{- end }} -{{- end }} - -{{/* -Create chart name and version as used by the chart label. -*/}} -{{- define "grafana.chart" -}} -{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} -{{- end }} - -{{/* -Create the name of the service account -*/}} -{{- define "grafana.serviceAccountName" -}} -{{- if .Values.serviceAccount.create }} -{{- default (include "grafana.fullname" .) .Values.serviceAccount.name }} -{{- else }} -{{- default "default" .Values.serviceAccount.name }} -{{- end }} -{{- end }} - -{{- define "grafana.serviceAccountNameTest" -}} -{{- if .Values.serviceAccount.create }} -{{- default (print (include "grafana.fullname" .) "-test") .Values.serviceAccount.nameTest }} -{{- else }} -{{- default "default" .Values.serviceAccount.nameTest }} -{{- end }} -{{- end }} - -{{/* -Allow the release namespace to be overridden for multi-namespace deployments in combined charts -*/}} -{{- define "grafana.namespace" -}} -{{- if .Values.namespaceOverride }} -{{- .Values.namespaceOverride }} -{{- else }} -{{- .Release.Namespace }} -{{- end }} -{{- end }} - -{{/* -Common labels -*/}} -{{- define "grafana.labels" -}} -helm.sh/chart: {{ include "grafana.chart" . }} -{{ include "grafana.selectorLabels" . }} -{{- if or .Chart.AppVersion .Values.image.tag }} -app.kubernetes.io/version: {{ mustRegexReplaceAllLiteral "@sha.*" .Values.image.tag "" | default .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- with .Values.extraLabels }} -{{ toYaml . }} -{{- end }} -{{- end }} - -{{/* -Selector labels -*/}} -{{- define "grafana.selectorLabels" -}} -app.kubernetes.io/name: {{ include "grafana.name" . }} -app.kubernetes.io/instance: {{ .Release.Name }} -{{- end }} - -{{/* -Common labels -*/}} -{{- define "grafana.imageRenderer.labels" -}} -helm.sh/chart: {{ include "grafana.chart" . }} -{{ include "grafana.imageRenderer.selectorLabels" . }} -{{- if or .Chart.AppVersion .Values.image.tag }} -app.kubernetes.io/version: {{ mustRegexReplaceAllLiteral "@sha.*" .Values.image.tag "" | default .Chart.AppVersion | quote }} -{{- end }} -app.kubernetes.io/managed-by: {{ .Release.Service }} -{{- end }} - -{{/* -Selector labels ImageRenderer -*/}} -{{- define "grafana.imageRenderer.selectorLabels" -}} -app.kubernetes.io/name: {{ include "grafana.name" . }}-image-renderer -app.kubernetes.io/instance: {{ .Release.Name }} -{{- end }} - -{{/* -Looks if there's an existing secret and reuse its password. If not it generates -new password and use it. -*/}} -{{- define "grafana.password" -}} -{{- $secret := (lookup "v1" "Secret" (include "grafana.namespace" .) (include "grafana.fullname" .) ) }} -{{- if $secret }} -{{- index $secret "data" "admin-password" }} -{{- else }} -{{- (randAlphaNum 40) | b64enc | quote }} -{{- end }} -{{- end }} - -{{/* -Return the appropriate apiVersion for rbac. -*/}} -{{- define "grafana.rbac.apiVersion" -}} -{{- if $.Capabilities.APIVersions.Has "rbac.authorization.k8s.io/v1" }} -{{- print "rbac.authorization.k8s.io/v1" }} -{{- else }} -{{- print "rbac.authorization.k8s.io/v1beta1" }} -{{- end }} -{{- end }} - -{{/* -Return the appropriate apiVersion for ingress. -*/}} -{{- define "grafana.ingress.apiVersion" -}} -{{- if and ($.Capabilities.APIVersions.Has "networking.k8s.io/v1") (semverCompare ">= 1.19-0" .Capabilities.KubeVersion.Version) }} -{{- print "networking.k8s.io/v1" }} -{{- else if $.Capabilities.APIVersions.Has "networking.k8s.io/v1beta1" }} -{{- print "networking.k8s.io/v1beta1" }} -{{- else }} -{{- print "extensions/v1beta1" }} -{{- end }} -{{- end }} - -{{/* -Return the appropriate apiVersion for Horizontal Pod Autoscaler. -*/}} -{{- define "grafana.hpa.apiVersion" -}} -{{- if $.Capabilities.APIVersions.Has "autoscaling/v2/HorizontalPodAutoscaler" }} -{{- print "autoscaling/v2" }} -{{- else if $.Capabilities.APIVersions.Has "autoscaling/v2beta2/HorizontalPodAutoscaler" }} -{{- print "autoscaling/v2beta2" }} -{{- else }} -{{- print "autoscaling/v2beta1" }} -{{- end }} -{{- end }} - -{{/* -Return the appropriate apiVersion for podDisruptionBudget. -*/}} -{{- define "grafana.podDisruptionBudget.apiVersion" -}} -{{- if $.Values.podDisruptionBudget.apiVersion }} -{{- print $.Values.podDisruptionBudget.apiVersion }} -{{- else if $.Capabilities.APIVersions.Has "policy/v1/PodDisruptionBudget" }} -{{- print "policy/v1" }} -{{- else }} -{{- print "policy/v1beta1" }} -{{- end }} -{{- end }} - -{{/* -Return if ingress is stable. -*/}} -{{- define "grafana.ingress.isStable" -}} -{{- eq (include "grafana.ingress.apiVersion" .) "networking.k8s.io/v1" }} -{{- end }} - -{{/* -Return if ingress supports ingressClassName. -*/}} -{{- define "grafana.ingress.supportsIngressClassName" -}} -{{- or (eq (include "grafana.ingress.isStable" .) "true") (and (eq (include "grafana.ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18-0" .Capabilities.KubeVersion.Version)) }} -{{- end }} - -{{/* -Return if ingress supports pathType. -*/}} -{{- define "grafana.ingress.supportsPathType" -}} -{{- or (eq (include "grafana.ingress.isStable" .) "true") (and (eq (include "grafana.ingress.apiVersion" .) "networking.k8s.io/v1beta1") (semverCompare ">= 1.18-0" .Capabilities.KubeVersion.Version)) }} -{{- end }} - -{{/* -Formats imagePullSecrets. Input is (dict "root" . "imagePullSecrets" .{specific imagePullSecrets}) -*/}} -{{- define "grafana.imagePullSecrets" -}} -{{- $root := .root }} -{{- range (concat .root.Values.global.imagePullSecrets .imagePullSecrets) }} -{{- if eq (typeOf .) "map[string]interface {}" }} -- {{ toYaml (dict "name" (tpl .name $root)) | trim }} -{{- else }} -- name: {{ tpl . $root }} -{{- end }} -{{- end }} -{{- end }} - - -{{/* - Checks whether or not the configSecret secret has to be created - */}} -{{- define "grafana.shouldCreateConfigSecret" -}} -{{- $secretFound := false -}} -{{- range $key, $value := .Values.datasources }} - {{- if hasKey $value "secret" }} - {{- $secretFound = true}} - {{- end }} -{{- end }} -{{- range $key, $value := .Values.notifiers }} - {{- if hasKey $value "secret" }} - {{- $secretFound = true}} - {{- end }} -{{- end }} -{{- range $key, $value := .Values.alerting }} - {{- if (or (hasKey $value "secret") (hasKey $value "secretFile")) }} - {{- $secretFound = true}} - {{- end }} -{{- end }} -{{- $secretFound}} -{{- end -}} diff --git a/operations/deployment/helm/grafana/templates/_pod.tpl b/operations/deployment/helm/grafana/templates/_pod.tpl deleted file mode 100644 index f4b9fd3..0000000 --- a/operations/deployment/helm/grafana/templates/_pod.tpl +++ /dev/null @@ -1,1276 +0,0 @@ -{{- define "grafana.pod" -}} -{{- $sts := list "sts" "StatefulSet" "statefulset" -}} -{{- $root := . -}} -{{- with .Values.schedulerName }} -schedulerName: "{{ . }}" -{{- end }} -serviceAccountName: {{ include "grafana.serviceAccountName" . }} -automountServiceAccountToken: {{ .Values.serviceAccount.autoMount }} -{{- with .Values.securityContext }} -securityContext: - {{- toYaml . | nindent 2 }} -{{- end }} -{{- with .Values.hostAliases }} -hostAliases: - {{- toYaml . | nindent 2 }} -{{- end }} -{{- with .Values.priorityClassName }} -priorityClassName: {{ . }} -{{- end }} -{{- if ( or .Values.persistence.enabled .Values.dashboards .Values.extraInitContainers (and .Values.sidecar.alerts.enabled .Values.sidecar.alerts.initAlerts) (and .Values.sidecar.datasources.enabled .Values.sidecar.datasources.initDatasources) (and .Values.sidecar.notifiers.enabled .Values.sidecar.notifiers.initNotifiers)) }} -initContainers: -{{- end }} -{{- if ( and .Values.persistence.enabled .Values.initChownData.enabled ) }} - - name: init-chown-data - {{- $registry := .Values.global.imageRegistry | default .Values.initChownData.image.registry -}} - {{- if .Values.initChownData.image.sha }} - image: "{{ $registry }}/{{ .Values.initChownData.image.repository }}:{{ .Values.initChownData.image.tag }}@sha256:{{ .Values.initChownData.image.sha }}" - {{- else }} - image: "{{ $registry }}/{{ .Values.initChownData.image.repository }}:{{ .Values.initChownData.image.tag }}" - {{- end }} - imagePullPolicy: {{ .Values.initChownData.image.pullPolicy }} - {{- with .Values.initChownData.securityContext }} - securityContext: - {{- toYaml . | nindent 6 }} - {{- end }} - command: - - chown - - -R - - {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.runAsGroup }} - - /var/lib/grafana - {{- with .Values.initChownData.resources }} - resources: - {{- toYaml . | nindent 6 }} - {{- end }} - volumeMounts: - - name: storage - mountPath: "/var/lib/grafana" - {{- with .Values.persistence.subPath }} - subPath: {{ tpl . $root }} - {{- end }} -{{- end }} -{{- if .Values.dashboards }} - - name: download-dashboards - {{- $registry := .Values.global.imageRegistry | default .Values.downloadDashboardsImage.registry -}} - {{- if .Values.downloadDashboardsImage.sha }} - image: "{{ $registry }}/{{ .Values.downloadDashboardsImage.repository }}:{{ .Values.downloadDashboardsImage.tag }}@sha256:{{ .Values.downloadDashboardsImage.sha }}" - {{- else }} - image: "{{ $registry }}/{{ .Values.downloadDashboardsImage.repository }}:{{ .Values.downloadDashboardsImage.tag }}" - {{- end }} - imagePullPolicy: {{ .Values.downloadDashboardsImage.pullPolicy }} - command: ["/bin/sh"] - args: [ "-c", "mkdir -p /var/lib/grafana/dashboards/default && /bin/sh -x /etc/grafana/download_dashboards.sh" ] - {{- with .Values.downloadDashboards.resources }} - resources: - {{- toYaml . | nindent 6 }} - {{- end }} - env: - {{- range $key, $value := .Values.downloadDashboards.env }} - - name: "{{ $key }}" - value: "{{ $value }}" - {{- end }} - {{- range $key, $value := .Values.downloadDashboards.envValueFrom }} - - name: {{ $key | quote }} - valueFrom: - {{- tpl (toYaml $value) $ | nindent 10 }} - {{- end }} - {{- with .Values.downloadDashboards.securityContext }} - securityContext: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.downloadDashboards.envFromSecret }} - envFrom: - - secretRef: - name: {{ tpl . $root }} - {{- end }} - volumeMounts: - - name: config - mountPath: "/etc/grafana/download_dashboards.sh" - subPath: download_dashboards.sh - - name: storage - mountPath: "/var/lib/grafana" - {{- with .Values.persistence.subPath }} - subPath: {{ tpl . $root }} - {{- end }} - {{- range .Values.extraSecretMounts }} - - name: {{ .name }} - mountPath: {{ .mountPath }} - readOnly: {{ .readOnly }} - {{- end }} -{{- end }} -{{- if and .Values.sidecar.alerts.enabled .Values.sidecar.alerts.initAlerts }} - - name: {{ include "grafana.name" . }}-init-sc-alerts - {{- $registry := .Values.global.imageRegistry | default .Values.sidecar.image.registry -}} - {{- if .Values.sidecar.image.sha }} - image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" - {{- else }} - image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" - {{- end }} - imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} - env: - {{- range $key, $value := .Values.sidecar.alerts.env }} - - name: "{{ $key }}" - value: "{{ $value }}" - {{- end }} - {{- if .Values.sidecar.alerts.ignoreAlreadyProcessed }} - - name: IGNORE_ALREADY_PROCESSED - value: "true" - {{- end }} - - name: METHOD - value: "LIST" - - name: LABEL - value: "{{ .Values.sidecar.alerts.label }}" - {{- with .Values.sidecar.alerts.labelValue }} - - name: LABEL_VALUE - value: {{ quote . }} - {{- end }} - {{- if or .Values.sidecar.logLevel .Values.sidecar.alerts.logLevel }} - - name: LOG_LEVEL - value: {{ default .Values.sidecar.logLevel .Values.sidecar.alerts.logLevel }} - {{- end }} - - name: FOLDER - value: "/etc/grafana/provisioning/alerting" - - name: RESOURCE - value: {{ quote .Values.sidecar.alerts.resource }} - {{- with .Values.sidecar.enableUniqueFilenames }} - - name: UNIQUE_FILENAMES - value: "{{ . }}" - {{- end }} - {{- with .Values.sidecar.alerts.searchNamespace }} - - name: NAMESPACE - value: {{ . | join "," | quote }} - {{- end }} - {{- with .Values.sidecar.alerts.skipTlsVerify }} - - name: SKIP_TLS_VERIFY - value: {{ quote . }} - {{- end }} - {{- with .Values.sidecar.alerts.script }} - - name: SCRIPT - value: {{ quote . }} - {{- end }} - {{- with .Values.sidecar.livenessProbe }} - livenessProbe: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.sidecar.readinessProbe }} - readinessProbe: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.sidecar.resources }} - resources: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.sidecar.securityContext }} - securityContext: - {{- toYaml . | nindent 6 }} - {{- end }} - volumeMounts: - - name: sc-alerts-volume - mountPath: "/etc/grafana/provisioning/alerting" - {{- with .Values.sidecar.alerts.extraMounts }} - {{- toYaml . | trim | nindent 6 }} - {{- end }} -{{- end }} -{{- if and .Values.sidecar.datasources.enabled .Values.sidecar.datasources.initDatasources }} - - name: {{ include "grafana.name" . }}-init-sc-datasources - {{- $registry := .Values.global.imageRegistry | default .Values.sidecar.image.registry -}} - {{- if .Values.sidecar.image.sha }} - image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" - {{- else }} - image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" - {{- end }} - imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} - env: - {{- range $key, $value := .Values.sidecar.datasources.env }} - - name: "{{ $key }}" - value: "{{ $value }}" - {{- end }} - {{- if .Values.sidecar.datasources.ignoreAlreadyProcessed }} - - name: IGNORE_ALREADY_PROCESSED - value: "true" - {{- end }} - - name: METHOD - value: "LIST" - - name: LABEL - value: "{{ .Values.sidecar.datasources.label }}" - {{- with .Values.sidecar.datasources.labelValue }} - - name: LABEL_VALUE - value: {{ quote . }} - {{- end }} - {{- if or .Values.sidecar.logLevel .Values.sidecar.datasources.logLevel }} - - name: LOG_LEVEL - value: {{ default .Values.sidecar.logLevel .Values.sidecar.datasources.logLevel }} - {{- end }} - - name: FOLDER - value: "/etc/grafana/provisioning/datasources" - - name: RESOURCE - value: {{ quote .Values.sidecar.datasources.resource }} - {{- with .Values.sidecar.enableUniqueFilenames }} - - name: UNIQUE_FILENAMES - value: "{{ . }}" - {{- end }} - {{- if .Values.sidecar.datasources.searchNamespace }} - - name: NAMESPACE - value: "{{ tpl (.Values.sidecar.datasources.searchNamespace | join ",") . }}" - {{- end }} - {{- with .Values.sidecar.skipTlsVerify }} - - name: SKIP_TLS_VERIFY - value: "{{ . }}" - {{- end }} - {{- with .Values.sidecar.resources }} - resources: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.sidecar.securityContext }} - securityContext: - {{- toYaml . | nindent 6 }} - {{- end }} - volumeMounts: - - name: sc-datasources-volume - mountPath: "/etc/grafana/provisioning/datasources" -{{- end }} -{{- if and .Values.sidecar.notifiers.enabled .Values.sidecar.notifiers.initNotifiers }} - - name: {{ include "grafana.name" . }}-init-sc-notifiers - {{- $registry := .Values.global.imageRegistry | default .Values.sidecar.image.registry -}} - {{- if .Values.sidecar.image.sha }} - image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" - {{- else }} - image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" - {{- end }} - imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} - env: - {{- range $key, $value := .Values.sidecar.notifiers.env }} - - name: "{{ $key }}" - value: "{{ $value }}" - {{- end }} - {{- if .Values.sidecar.notifiers.ignoreAlreadyProcessed }} - - name: IGNORE_ALREADY_PROCESSED - value: "true" - {{- end }} - - name: METHOD - value: LIST - - name: LABEL - value: "{{ .Values.sidecar.notifiers.label }}" - {{- with .Values.sidecar.notifiers.labelValue }} - - name: LABEL_VALUE - value: {{ quote . }} - {{- end }} - {{- if or .Values.sidecar.logLevel .Values.sidecar.notifiers.logLevel }} - - name: LOG_LEVEL - value: {{ default .Values.sidecar.logLevel .Values.sidecar.notifiers.logLevel }} - {{- end }} - - name: FOLDER - value: "/etc/grafana/provisioning/notifiers" - - name: RESOURCE - value: {{ quote .Values.sidecar.notifiers.resource }} - {{- with .Values.sidecar.enableUniqueFilenames }} - - name: UNIQUE_FILENAMES - value: "{{ . }}" - {{- end }} - {{- with .Values.sidecar.notifiers.searchNamespace }} - - name: NAMESPACE - value: "{{ tpl (. | join ",") $root }}" - {{- end }} - {{- with .Values.sidecar.skipTlsVerify }} - - name: SKIP_TLS_VERIFY - value: "{{ . }}" - {{- end }} - {{- with .Values.sidecar.livenessProbe }} - livenessProbe: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.sidecar.readinessProbe }} - readinessProbe: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.sidecar.resources }} - resources: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.sidecar.securityContext }} - securityContext: - {{- toYaml . | nindent 6 }} - {{- end }} - volumeMounts: - - name: sc-notifiers-volume - mountPath: "/etc/grafana/provisioning/notifiers" -{{- end}} -{{- with .Values.extraInitContainers }} - {{- tpl (toYaml .) $root | nindent 2 }} -{{- end }} -{{- if or .Values.image.pullSecrets .Values.global.imagePullSecrets }} -imagePullSecrets: - {{- include "grafana.imagePullSecrets" (dict "root" $root "imagePullSecrets" .Values.image.pullSecrets) | nindent 2 }} -{{- end }} -{{- if not .Values.enableKubeBackwardCompatibility }} -enableServiceLinks: {{ .Values.enableServiceLinks }} -{{- end }} -containers: -{{- if and .Values.sidecar.alerts.enabled (not .Values.sidecar.alerts.initAlerts) }} - - name: {{ include "grafana.name" . }}-sc-alerts - {{- $registry := .Values.global.imageRegistry | default .Values.sidecar.image.registry -}} - {{- if .Values.sidecar.image.sha }} - image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" - {{- else }} - image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" - {{- end }} - imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} - env: - {{- range $key, $value := .Values.sidecar.alerts.env }} - - name: "{{ $key }}" - value: "{{ $value }}" - {{- end }} - {{- if .Values.sidecar.alerts.ignoreAlreadyProcessed }} - - name: IGNORE_ALREADY_PROCESSED - value: "true" - {{- end }} - - name: METHOD - value: {{ .Values.sidecar.alerts.watchMethod }} - - name: LABEL - value: "{{ .Values.sidecar.alerts.label }}" - {{- with .Values.sidecar.alerts.labelValue }} - - name: LABEL_VALUE - value: {{ quote . }} - {{- end }} - {{- if or .Values.sidecar.logLevel .Values.sidecar.alerts.logLevel }} - - name: LOG_LEVEL - value: {{ default .Values.sidecar.logLevel .Values.sidecar.alerts.logLevel }} - {{- end }} - - name: FOLDER - value: "/etc/grafana/provisioning/alerting" - - name: RESOURCE - value: {{ quote .Values.sidecar.alerts.resource }} - {{- with .Values.sidecar.enableUniqueFilenames }} - - name: UNIQUE_FILENAMES - value: "{{ . }}" - {{- end }} - {{- with .Values.sidecar.alerts.searchNamespace }} - - name: NAMESPACE - value: {{ . | join "," | quote }} - {{- end }} - {{- with .Values.sidecar.alerts.skipTlsVerify }} - - name: SKIP_TLS_VERIFY - value: {{ quote . }} - {{- end }} - {{- with .Values.sidecar.alerts.script }} - - name: SCRIPT - value: {{ quote . }} - {{- end }} - {{- if and (not .Values.env.GF_SECURITY_ADMIN_USER) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} - - name: REQ_USERNAME - valueFrom: - secretKeyRef: - name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} - key: {{ .Values.admin.userKey | default "admin-user" }} - {{- end }} - {{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} - - name: REQ_PASSWORD - valueFrom: - secretKeyRef: - name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} - key: {{ .Values.admin.passwordKey | default "admin-password" }} - {{- end }} - {{- if not .Values.sidecar.alerts.skipReload }} - - name: REQ_URL - value: {{ .Values.sidecar.alerts.reloadURL }} - - name: REQ_METHOD - value: POST - {{- end }} - {{- if .Values.sidecar.alerts.watchServerTimeout }} - {{- if ne .Values.sidecar.alerts.watchMethod "WATCH" }} - {{- fail (printf "Cannot use .Values.sidecar.alerts.watchServerTimeout with .Values.sidecar.alerts.watchMethod %s" .Values.sidecar.alerts.watchMethod) }} - {{- end }} - - name: WATCH_SERVER_TIMEOUT - value: "{{ .Values.sidecar.alerts.watchServerTimeout }}" - {{- end }} - {{- if .Values.sidecar.alerts.watchClientTimeout }} - {{- if ne .Values.sidecar.alerts.watchMethod "WATCH" }} - {{- fail (printf "Cannot use .Values.sidecar.alerts.watchClientTimeout with .Values.sidecar.alerts.watchMethod %s" .Values.sidecar.alerts.watchMethod) }} - {{- end }} - - name: WATCH_CLIENT_TIMEOUT - value: "{{ .Values.sidecar.alerts.watchClientTimeout }}" - {{- end }} - {{- with .Values.sidecar.livenessProbe }} - livenessProbe: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.sidecar.readinessProbe }} - readinessProbe: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.sidecar.resources }} - resources: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.sidecar.securityContext }} - securityContext: - {{- toYaml . | nindent 6 }} - {{- end }} - volumeMounts: - - name: sc-alerts-volume - mountPath: "/etc/grafana/provisioning/alerting" - {{- with .Values.sidecar.alerts.extraMounts }} - {{- toYaml . | trim | nindent 6 }} - {{- end }} -{{- end}} -{{- if .Values.sidecar.dashboards.enabled }} - - name: {{ include "grafana.name" . }}-sc-dashboard - {{- $registry := .Values.global.imageRegistry | default .Values.sidecar.image.registry -}} - {{- if .Values.sidecar.image.sha }} - image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" - {{- else }} - image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" - {{- end }} - imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} - env: - {{- range $key, $value := .Values.sidecar.dashboards.env }} - - name: "{{ $key }}" - value: "{{ $value }}" - {{- end }} - {{- if .Values.sidecar.dashboards.ignoreAlreadyProcessed }} - - name: IGNORE_ALREADY_PROCESSED - value: "true" - {{- end }} - - name: METHOD - value: {{ .Values.sidecar.dashboards.watchMethod }} - - name: LABEL - value: "{{ .Values.sidecar.dashboards.label }}" - {{- with .Values.sidecar.dashboards.labelValue }} - - name: LABEL_VALUE - value: {{ quote . }} - {{- end }} - {{- if or .Values.sidecar.logLevel .Values.sidecar.dashboards.logLevel }} - - name: LOG_LEVEL - value: {{ default .Values.sidecar.logLevel .Values.sidecar.dashboards.logLevel }} - {{- end }} - - name: FOLDER - value: "{{ .Values.sidecar.dashboards.folder }}{{- with .Values.sidecar.dashboards.defaultFolderName }}/{{ . }}{{- end }}" - - name: RESOURCE - value: {{ quote .Values.sidecar.dashboards.resource }} - {{- with .Values.sidecar.enableUniqueFilenames }} - - name: UNIQUE_FILENAMES - value: "{{ . }}" - {{- end }} - {{- with .Values.sidecar.dashboards.searchNamespace }} - - name: NAMESPACE - value: "{{ tpl (. | join ",") $root }}" - {{- end }} - {{- with .Values.sidecar.skipTlsVerify }} - - name: SKIP_TLS_VERIFY - value: "{{ . }}" - {{- end }} - {{- with .Values.sidecar.dashboards.folderAnnotation }} - - name: FOLDER_ANNOTATION - value: "{{ . }}" - {{- end }} - {{- with .Values.sidecar.dashboards.script }} - - name: SCRIPT - value: "{{ . }}" - {{- end }} - {{- if not .Values.sidecar.dashboards.skipReload }} - {{- if and (not .Values.env.GF_SECURITY_ADMIN_USER) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} - - name: REQ_USERNAME - valueFrom: - secretKeyRef: - name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} - key: {{ .Values.admin.userKey | default "admin-user" }} - {{- end }} - {{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} - - name: REQ_PASSWORD - valueFrom: - secretKeyRef: - name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} - key: {{ .Values.admin.passwordKey | default "admin-password" }} - {{- end }} - - name: REQ_URL - value: {{ .Values.sidecar.dashboards.reloadURL }} - - name: REQ_METHOD - value: POST - {{- end }} - {{- if .Values.sidecar.dashboards.watchServerTimeout }} - {{- if ne .Values.sidecar.dashboards.watchMethod "WATCH" }} - {{- fail (printf "Cannot use .Values.sidecar.dashboards.watchServerTimeout with .Values.sidecar.dashboards.watchMethod %s" .Values.sidecar.dashboards.watchMethod) }} - {{- end }} - - name: WATCH_SERVER_TIMEOUT - value: "{{ .Values.sidecar.dashboards.watchServerTimeout }}" - {{- end }} - {{- if .Values.sidecar.dashboards.watchClientTimeout }} - {{- if ne .Values.sidecar.dashboards.watchMethod "WATCH" }} - {{- fail (printf "Cannot use .Values.sidecar.dashboards.watchClientTimeout with .Values.sidecar.dashboards.watchMethod %s" .Values.sidecar.dashboards.watchMethod) }} - {{- end }} - - name: WATCH_CLIENT_TIMEOUT - value: {{ .Values.sidecar.dashboards.watchClientTimeout | quote }} - {{- end }} - {{- with .Values.sidecar.livenessProbe }} - livenessProbe: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.sidecar.readinessProbe }} - readinessProbe: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.sidecar.resources }} - resources: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.sidecar.securityContext }} - securityContext: - {{- toYaml . | nindent 6 }} - {{- end }} - volumeMounts: - - name: sc-dashboard-volume - mountPath: {{ .Values.sidecar.dashboards.folder | quote }} - {{- with .Values.sidecar.dashboards.extraMounts }} - {{- toYaml . | trim | nindent 6 }} - {{- end }} -{{- end}} -{{- if and .Values.sidecar.datasources.enabled (not .Values.sidecar.datasources.initDatasources) }} - - name: {{ include "grafana.name" . }}-sc-datasources - {{- $registry := .Values.global.imageRegistry | default .Values.sidecar.image.registry -}} - {{- if .Values.sidecar.image.sha }} - image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" - {{- else }} - image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" - {{- end }} - imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} - env: - {{- range $key, $value := .Values.sidecar.datasources.env }} - - name: "{{ $key }}" - value: "{{ $value }}" - {{- end }} - {{- if .Values.sidecar.datasources.ignoreAlreadyProcessed }} - - name: IGNORE_ALREADY_PROCESSED - value: "true" - {{- end }} - - name: METHOD - value: {{ .Values.sidecar.datasources.watchMethod }} - - name: LABEL - value: "{{ .Values.sidecar.datasources.label }}" - {{- with .Values.sidecar.datasources.labelValue }} - - name: LABEL_VALUE - value: {{ quote . }} - {{- end }} - {{- if or .Values.sidecar.logLevel .Values.sidecar.datasources.logLevel }} - - name: LOG_LEVEL - value: {{ default .Values.sidecar.logLevel .Values.sidecar.datasources.logLevel }} - {{- end }} - - name: FOLDER - value: "/etc/grafana/provisioning/datasources" - - name: RESOURCE - value: {{ quote .Values.sidecar.datasources.resource }} - {{- with .Values.sidecar.enableUniqueFilenames }} - - name: UNIQUE_FILENAMES - value: "{{ . }}" - {{- end }} - {{- with .Values.sidecar.datasources.searchNamespace }} - - name: NAMESPACE - value: "{{ tpl (. | join ",") $root }}" - {{- end }} - {{- if .Values.sidecar.skipTlsVerify }} - - name: SKIP_TLS_VERIFY - value: "{{ .Values.sidecar.skipTlsVerify }}" - {{- end }} - {{- if .Values.sidecar.datasources.script }} - - name: SCRIPT - value: "{{ .Values.sidecar.datasources.script }}" - {{- end }} - {{- if and (not .Values.env.GF_SECURITY_ADMIN_USER) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} - - name: REQ_USERNAME - valueFrom: - secretKeyRef: - name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} - key: {{ .Values.admin.userKey | default "admin-user" }} - {{- end }} - {{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} - - name: REQ_PASSWORD - valueFrom: - secretKeyRef: - name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} - key: {{ .Values.admin.passwordKey | default "admin-password" }} - {{- end }} - {{- if not .Values.sidecar.datasources.skipReload }} - - name: REQ_URL - value: {{ .Values.sidecar.datasources.reloadURL }} - - name: REQ_METHOD - value: POST - {{- end }} - {{- if .Values.sidecar.datasources.watchServerTimeout }} - {{- if ne .Values.sidecar.datasources.watchMethod "WATCH" }} - {{- fail (printf "Cannot use .Values.sidecar.datasources.watchServerTimeout with .Values.sidecar.datasources.watchMethod %s" .Values.sidecar.datasources.watchMethod) }} - {{- end }} - - name: WATCH_SERVER_TIMEOUT - value: "{{ .Values.sidecar.datasources.watchServerTimeout }}" - {{- end }} - {{- if .Values.sidecar.datasources.watchClientTimeout }} - {{- if ne .Values.sidecar.datasources.watchMethod "WATCH" }} - {{- fail (printf "Cannot use .Values.sidecar.datasources.watchClientTimeout with .Values.sidecar.datasources.watchMethod %s" .Values.sidecar.datasources.watchMethod) }} - {{- end }} - - name: WATCH_CLIENT_TIMEOUT - value: "{{ .Values.sidecar.datasources.watchClientTimeout }}" - {{- end }} - {{- with .Values.sidecar.livenessProbe }} - livenessProbe: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.sidecar.readinessProbe }} - readinessProbe: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.sidecar.resources }} - resources: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.sidecar.securityContext }} - securityContext: - {{- toYaml . | nindent 6 }} - {{- end }} - volumeMounts: - - name: sc-datasources-volume - mountPath: "/etc/grafana/provisioning/datasources" -{{- end}} -{{- if .Values.sidecar.notifiers.enabled }} - - name: {{ include "grafana.name" . }}-sc-notifiers - {{- $registry := .Values.global.imageRegistry | default .Values.sidecar.image.registry -}} - {{- if .Values.sidecar.image.sha }} - image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" - {{- else }} - image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" - {{- end }} - imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} - env: - {{- range $key, $value := .Values.sidecar.notifiers.env }} - - name: "{{ $key }}" - value: "{{ $value }}" - {{- end }} - {{- if .Values.sidecar.notifiers.ignoreAlreadyProcessed }} - - name: IGNORE_ALREADY_PROCESSED - value: "true" - {{- end }} - - name: METHOD - value: {{ .Values.sidecar.notifiers.watchMethod }} - - name: LABEL - value: "{{ .Values.sidecar.notifiers.label }}" - {{- with .Values.sidecar.notifiers.labelValue }} - - name: LABEL_VALUE - value: {{ quote . }} - {{- end }} - {{- if or .Values.sidecar.logLevel .Values.sidecar.notifiers.logLevel }} - - name: LOG_LEVEL - value: {{ default .Values.sidecar.logLevel .Values.sidecar.notifiers.logLevel }} - {{- end }} - - name: FOLDER - value: "/etc/grafana/provisioning/notifiers" - - name: RESOURCE - value: {{ quote .Values.sidecar.notifiers.resource }} - {{- if .Values.sidecar.enableUniqueFilenames }} - - name: UNIQUE_FILENAMES - value: "{{ .Values.sidecar.enableUniqueFilenames }}" - {{- end }} - {{- with .Values.sidecar.notifiers.searchNamespace }} - - name: NAMESPACE - value: "{{ tpl (. | join ",") $root }}" - {{- end }} - {{- with .Values.sidecar.skipTlsVerify }} - - name: SKIP_TLS_VERIFY - value: "{{ . }}" - {{- end }} - {{- if .Values.sidecar.notifiers.script }} - - name: SCRIPT - value: "{{ .Values.sidecar.notifiers.script }}" - {{- end }} - {{- if and (not .Values.env.GF_SECURITY_ADMIN_USER) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} - - name: REQ_USERNAME - valueFrom: - secretKeyRef: - name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} - key: {{ .Values.admin.userKey | default "admin-user" }} - {{- end }} - {{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} - - name: REQ_PASSWORD - valueFrom: - secretKeyRef: - name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} - key: {{ .Values.admin.passwordKey | default "admin-password" }} - {{- end }} - {{- if not .Values.sidecar.notifiers.skipReload }} - - name: REQ_URL - value: {{ .Values.sidecar.notifiers.reloadURL }} - - name: REQ_METHOD - value: POST - {{- end }} - {{- if .Values.sidecar.notifiers.watchServerTimeout }} - {{- if ne .Values.sidecar.notifiers.watchMethod "WATCH" }} - {{- fail (printf "Cannot use .Values.sidecar.notifiers.watchServerTimeout with .Values.sidecar.notifiers.watchMethod %s" .Values.sidecar.notifiers.watchMethod) }} - {{- end }} - - name: WATCH_SERVER_TIMEOUT - value: "{{ .Values.sidecar.notifiers.watchServerTimeout }}" - {{- end }} - {{- if .Values.sidecar.notifiers.watchClientTimeout }} - {{- if ne .Values.sidecar.notifiers.watchMethod "WATCH" }} - {{- fail (printf "Cannot use .Values.sidecar.notifiers.watchClientTimeout with .Values.sidecar.notifiers.watchMethod %s" .Values.sidecar.notifiers.watchMethod) }} - {{- end }} - - name: WATCH_CLIENT_TIMEOUT - value: "{{ .Values.sidecar.notifiers.watchClientTimeout }}" - {{- end }} - {{- with .Values.sidecar.livenessProbe }} - livenessProbe: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.sidecar.readinessProbe }} - readinessProbe: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.sidecar.resources }} - resources: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.sidecar.securityContext }} - securityContext: - {{- toYaml . | nindent 6 }} - {{- end }} - volumeMounts: - - name: sc-notifiers-volume - mountPath: "/etc/grafana/provisioning/notifiers" -{{- end}} -{{- if .Values.sidecar.plugins.enabled }} - - name: {{ include "grafana.name" . }}-sc-plugins - {{- $registry := .Values.global.imageRegistry | default .Values.sidecar.image.registry -}} - {{- if .Values.sidecar.image.sha }} - image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}@sha256:{{ .Values.sidecar.image.sha }}" - {{- else }} - image: "{{ $registry }}/{{ .Values.sidecar.image.repository }}:{{ .Values.sidecar.image.tag }}" - {{- end }} - imagePullPolicy: {{ .Values.sidecar.imagePullPolicy }} - env: - {{- range $key, $value := .Values.sidecar.plugins.env }} - - name: "{{ $key }}" - value: "{{ $value }}" - {{- end }} - {{- if .Values.sidecar.plugins.ignoreAlreadyProcessed }} - - name: IGNORE_ALREADY_PROCESSED - value: "true" - {{- end }} - - name: METHOD - value: {{ .Values.sidecar.plugins.watchMethod }} - - name: LABEL - value: "{{ .Values.sidecar.plugins.label }}" - {{- if .Values.sidecar.plugins.labelValue }} - - name: LABEL_VALUE - value: {{ quote .Values.sidecar.plugins.labelValue }} - {{- end }} - {{- if or .Values.sidecar.logLevel .Values.sidecar.plugins.logLevel }} - - name: LOG_LEVEL - value: {{ default .Values.sidecar.logLevel .Values.sidecar.plugins.logLevel }} - {{- end }} - - name: FOLDER - value: "/etc/grafana/provisioning/plugins" - - name: RESOURCE - value: {{ quote .Values.sidecar.plugins.resource }} - {{- with .Values.sidecar.enableUniqueFilenames }} - - name: UNIQUE_FILENAMES - value: "{{ . }}" - {{- end }} - {{- with .Values.sidecar.plugins.searchNamespace }} - - name: NAMESPACE - value: "{{ tpl (. | join ",") $root }}" - {{- end }} - {{- with .Values.sidecar.plugins.script }} - - name: SCRIPT - value: "{{ . }}" - {{- end }} - {{- with .Values.sidecar.skipTlsVerify }} - - name: SKIP_TLS_VERIFY - value: "{{ . }}" - {{- end }} - {{- if and (not .Values.env.GF_SECURITY_ADMIN_USER) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} - - name: REQ_USERNAME - valueFrom: - secretKeyRef: - name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} - key: {{ .Values.admin.userKey | default "admin-user" }} - {{- end }} - {{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} - - name: REQ_PASSWORD - valueFrom: - secretKeyRef: - name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} - key: {{ .Values.admin.passwordKey | default "admin-password" }} - {{- end }} - {{- if not .Values.sidecar.plugins.skipReload }} - - name: REQ_URL - value: {{ .Values.sidecar.plugins.reloadURL }} - - name: REQ_METHOD - value: POST - {{- end }} - {{- if .Values.sidecar.plugins.watchServerTimeout }} - {{- if ne .Values.sidecar.plugins.watchMethod "WATCH" }} - {{- fail (printf "Cannot use .Values.sidecar.plugins.watchServerTimeout with .Values.sidecar.plugins.watchMethod %s" .Values.sidecar.plugins.watchMethod) }} - {{- end }} - - name: WATCH_SERVER_TIMEOUT - value: "{{ .Values.sidecar.plugins.watchServerTimeout }}" - {{- end }} - {{- if .Values.sidecar.plugins.watchClientTimeout }} - {{- if ne .Values.sidecar.plugins.watchMethod "WATCH" }} - {{- fail (printf "Cannot use .Values.sidecar.plugins.watchClientTimeout with .Values.sidecar.plugins.watchMethod %s" .Values.sidecar.plugins.watchMethod) }} - {{- end }} - - name: WATCH_CLIENT_TIMEOUT - value: "{{ .Values.sidecar.plugins.watchClientTimeout }}" - {{- end }} - {{- with .Values.sidecar.livenessProbe }} - livenessProbe: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.sidecar.readinessProbe }} - readinessProbe: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.sidecar.resources }} - resources: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.sidecar.securityContext }} - securityContext: - {{- toYaml . | nindent 6 }} - {{- end }} - volumeMounts: - - name: sc-plugins-volume - mountPath: "/etc/grafana/provisioning/plugins" -{{- end}} - - name: {{ .Chart.Name }} - {{- $registry := .Values.global.imageRegistry | default .Values.image.registry -}} - {{- if .Values.image.sha }} - image: "{{ $registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}@sha256:{{ .Values.image.sha }}" - {{- else }} - image: "{{ $registry }}/{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" - {{- end }} - imagePullPolicy: {{ .Values.image.pullPolicy }} - {{- if .Values.command }} - command: - {{- range .Values.command }} - - {{ . | quote }} - {{- end }} - {{- end }} - {{- if .Values.args }} - args: - {{- range .Values.args }} - - {{ . | quote }} - {{- end }} - {{- end }} - {{- with .Values.containerSecurityContext }} - securityContext: - {{- toYaml . | nindent 6 }} - {{- end }} - volumeMounts: - - name: config - mountPath: "/etc/grafana/grafana.ini" - subPath: grafana.ini - {{- if .Values.ldap.enabled }} - - name: ldap - mountPath: "/etc/grafana/ldap.toml" - subPath: ldap.toml - {{- end }} - {{- range .Values.extraConfigmapMounts }} - - name: {{ tpl .name $root }} - mountPath: {{ tpl .mountPath $root }} - subPath: {{ tpl (.subPath | default "") $root }} - readOnly: {{ .readOnly }} - {{- end }} - - name: storage - mountPath: "/var/lib/grafana" - {{- with .Values.persistence.subPath }} - subPath: {{ tpl . $root }} - {{- end }} - {{- with .Values.dashboards }} - {{- range $provider, $dashboards := . }} - {{- range $key, $value := $dashboards }} - {{- if (or (hasKey $value "json") (hasKey $value "file")) }} - - name: dashboards-{{ $provider }} - mountPath: "/var/lib/grafana/dashboards/{{ $provider }}/{{ $key }}.json" - subPath: "{{ $key }}.json" - {{- end }} - {{- end }} - {{- end }} - {{- end }} - {{- with .Values.dashboardsConfigMaps }} - {{- range (keys . | sortAlpha) }} - - name: dashboards-{{ . }} - mountPath: "/var/lib/grafana/dashboards/{{ . }}" - {{- end }} - {{- end }} - {{- with .Values.datasources }} - {{- $datasources := . }} - {{- range (keys . | sortAlpha) }} - {{- if (or (hasKey (index $datasources .) "secret")) }} {{/*check if current datasource should be handeled as secret */}} - - name: config-secret - mountPath: "/etc/grafana/provisioning/datasources/{{ . }}" - subPath: {{ . | quote }} - {{- else }} - - name: config - mountPath: "/etc/grafana/provisioning/datasources/{{ . }}" - subPath: {{ . | quote }} - {{- end }} - {{- end }} - {{- end }} - {{- with .Values.notifiers }} - {{- $notifiers := . }} - {{- range (keys . | sortAlpha) }} - {{- if (or (hasKey (index $notifiers .) "secret")) }} {{/*check if current notifier should be handeled as secret */}} - - name: config-secret - mountPath: "/etc/grafana/provisioning/notifiers/{{ . }}" - subPath: {{ . | quote }} - {{- else }} - - name: config - mountPath: "/etc/grafana/provisioning/notifiers/{{ . }}" - subPath: {{ . | quote }} - {{- end }} - {{- end }} - {{- end }} - {{- with .Values.alerting }} - {{- $alertingmap := .}} - {{- range (keys . | sortAlpha) }} - {{- if (or (hasKey (index $.Values.alerting .) "secret") (hasKey (index $.Values.alerting .) "secretFile")) }} {{/*check if current alerting entry should be handeled as secret */}} - - name: config-secret - mountPath: "/etc/grafana/provisioning/alerting/{{ . }}" - subPath: {{ . | quote }} - {{- else }} - - name: config - mountPath: "/etc/grafana/provisioning/alerting/{{ . }}" - subPath: {{ . | quote }} - {{- end }} - {{- end }} - {{- end }} - {{- with .Values.dashboardProviders }} - {{- range (keys . | sortAlpha) }} - - name: config - mountPath: "/etc/grafana/provisioning/dashboards/{{ . }}" - subPath: {{ . | quote }} - {{- end }} - {{- end }} - {{- with .Values.sidecar.alerts.enabled }} - - name: sc-alerts-volume - mountPath: "/etc/grafana/provisioning/alerting" - {{- end}} - {{- if .Values.sidecar.dashboards.enabled }} - - name: sc-dashboard-volume - mountPath: {{ .Values.sidecar.dashboards.folder | quote }} - {{- if .Values.sidecar.dashboards.SCProvider }} - - name: sc-dashboard-provider - mountPath: "/etc/grafana/provisioning/dashboards/sc-dashboardproviders.yaml" - subPath: provider.yaml - {{- end}} - {{- end}} - {{- if .Values.sidecar.datasources.enabled }} - - name: sc-datasources-volume - mountPath: "/etc/grafana/provisioning/datasources" - {{- end}} - {{- if .Values.sidecar.plugins.enabled }} - - name: sc-plugins-volume - mountPath: "/etc/grafana/provisioning/plugins" - {{- end}} - {{- if .Values.sidecar.notifiers.enabled }} - - name: sc-notifiers-volume - mountPath: "/etc/grafana/provisioning/notifiers" - {{- end}} - {{- range .Values.extraSecretMounts }} - - name: {{ .name }} - mountPath: {{ .mountPath }} - readOnly: {{ .readOnly }} - subPath: {{ .subPath | default "" }} - {{- end }} - {{- range .Values.extraVolumeMounts }} - - name: {{ .name }} - mountPath: {{ .mountPath }} - subPath: {{ .subPath | default "" }} - readOnly: {{ .readOnly }} - {{- end }} - {{- range .Values.extraEmptyDirMounts }} - - name: {{ .name }} - mountPath: {{ .mountPath }} - {{- end }} - ports: - - name: {{ .Values.podPortName }} - containerPort: {{ .Values.service.targetPort }} - protocol: TCP - - name: {{ .Values.gossipPortName }}-tcp - containerPort: 9094 - protocol: TCP - - name: {{ .Values.gossipPortName }}-udp - containerPort: 9094 - protocol: UDP - env: - - name: POD_IP - valueFrom: - fieldRef: - fieldPath: status.podIP - {{- if and (not .Values.env.GF_SECURITY_ADMIN_USER) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} - - name: GF_SECURITY_ADMIN_USER - valueFrom: - secretKeyRef: - name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} - key: {{ .Values.admin.userKey | default "admin-user" }} - {{- end }} - {{- if and (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} - - name: GF_SECURITY_ADMIN_PASSWORD - valueFrom: - secretKeyRef: - name: {{ (tpl .Values.admin.existingSecret .) | default (include "grafana.fullname" .) }} - key: {{ .Values.admin.passwordKey | default "admin-password" }} - {{- end }} - {{- if .Values.plugins }} - - name: GF_INSTALL_PLUGINS - valueFrom: - configMapKeyRef: - name: {{ include "grafana.fullname" . }} - key: plugins - {{- end }} - {{- if .Values.smtp.existingSecret }} - - name: GF_SMTP_USER - valueFrom: - secretKeyRef: - name: {{ .Values.smtp.existingSecret }} - key: {{ .Values.smtp.userKey | default "user" }} - - name: GF_SMTP_PASSWORD - valueFrom: - secretKeyRef: - name: {{ .Values.smtp.existingSecret }} - key: {{ .Values.smtp.passwordKey | default "password" }} - {{- end }} - {{- if .Values.imageRenderer.enabled }} - - name: GF_RENDERING_SERVER_URL - value: http://{{ include "grafana.fullname" . }}-image-renderer.{{ include "grafana.namespace" . }}:{{ .Values.imageRenderer.service.port }}/render - - name: GF_RENDERING_CALLBACK_URL - value: {{ .Values.imageRenderer.grafanaProtocol }}://{{ include "grafana.fullname" . }}.{{ include "grafana.namespace" . }}:{{ .Values.service.port }}/{{ .Values.imageRenderer.grafanaSubPath }} - {{- end }} - - name: GF_PATHS_DATA - value: {{ (get .Values "grafana.ini").paths.data }} - - name: GF_PATHS_LOGS - value: {{ (get .Values "grafana.ini").paths.logs }} - - name: GF_PATHS_PLUGINS - value: {{ (get .Values "grafana.ini").paths.plugins }} - - name: GF_PATHS_PROVISIONING - value: {{ (get .Values "grafana.ini").paths.provisioning }} - {{- range $key, $value := .Values.envValueFrom }} - - name: {{ $key | quote }} - valueFrom: - {{- tpl (toYaml $value) $ | nindent 10 }} - {{- end }} - {{- range $key, $value := .Values.env }} - - name: "{{ tpl $key $ }}" - value: "{{ tpl (print $value) $ }}" - {{- end }} - {{- if or .Values.envFromSecret (or .Values.envRenderSecret .Values.envFromSecrets) .Values.envFromConfigMaps }} - envFrom: - {{- if .Values.envFromSecret }} - - secretRef: - name: {{ tpl .Values.envFromSecret . }} - {{- end }} - {{- if .Values.envRenderSecret }} - - secretRef: - name: {{ include "grafana.fullname" . }}-env - {{- end }} - {{- range .Values.envFromSecrets }} - - secretRef: - name: {{ tpl .name $ }} - optional: {{ .optional | default false }} - {{- end }} - {{- range .Values.envFromConfigMaps }} - - configMapRef: - name: {{ tpl .name $ }} - optional: {{ .optional | default false }} - {{- end }} - {{- end }} - {{- with .Values.livenessProbe }} - livenessProbe: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.readinessProbe }} - readinessProbe: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.lifecycleHooks }} - lifecycle: - {{- tpl (toYaml .) $root | nindent 6 }} - {{- end }} - {{- with .Values.resources }} - resources: - {{- toYaml . | nindent 6 }} - {{- end }} -{{- with .Values.extraContainers }} - {{- tpl . $ | nindent 2 }} -{{- end }} -{{- with .Values.nodeSelector }} -nodeSelector: - {{- toYaml . | nindent 2 }} -{{- end }} -{{- with .Values.affinity }} -affinity: - {{- tpl (toYaml .) $root | nindent 2 }} -{{- end }} -{{- with .Values.topologySpreadConstraints }} -topologySpreadConstraints: - {{- toYaml . | nindent 2 }} -{{- end }} -{{- with .Values.tolerations }} -tolerations: - {{- toYaml . | nindent 2 }} -{{- end }} -volumes: - - name: config - configMap: - name: {{ include "grafana.fullname" . }} - {{- $createConfigSecret := eq (include "grafana.shouldCreateConfigSecret" .) "true" -}} - {{- if and .Values.createConfigmap $createConfigSecret }} - - name: config-secret - secret: - secretName: {{ include "grafana.fullname" . }}-config-secret - {{- end }} - {{- range .Values.extraConfigmapMounts }} - - name: {{ tpl .name $root }} - configMap: - name: {{ tpl .configMap $root }} - {{- with .items }} - items: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- end }} - {{- if .Values.dashboards }} - {{- range (keys .Values.dashboards | sortAlpha) }} - - name: dashboards-{{ . }} - configMap: - name: {{ include "grafana.fullname" $ }}-dashboards-{{ . }} - {{- end }} - {{- end }} - {{- if .Values.dashboardsConfigMaps }} - {{- range $provider, $name := .Values.dashboardsConfigMaps }} - - name: dashboards-{{ $provider }} - configMap: - name: {{ tpl $name $root }} - {{- end }} - {{- end }} - {{- if .Values.ldap.enabled }} - - name: ldap - secret: - {{- if .Values.ldap.existingSecret }} - secretName: {{ .Values.ldap.existingSecret }} - {{- else }} - secretName: {{ include "grafana.fullname" . }} - {{- end }} - items: - - key: ldap-toml - path: ldap.toml - {{- end }} - {{- if and .Values.persistence.enabled (eq .Values.persistence.type "pvc") }} - - name: storage - persistentVolumeClaim: - claimName: {{ tpl (.Values.persistence.existingClaim | default (include "grafana.fullname" .)) . }} - {{- else if and .Values.persistence.enabled (has .Values.persistence.type $sts) }} - {{/* nothing */}} - {{- else }} - - name: storage - {{- if .Values.persistence.inMemory.enabled }} - emptyDir: - medium: Memory - {{- with .Values.persistence.inMemory.sizeLimit }} - sizeLimit: {{ . }} - {{- end }} - {{- else }} - emptyDir: {} - {{- end }} - {{- end }} - {{- if .Values.sidecar.alerts.enabled }} - - name: sc-alerts-volume - emptyDir: - {{- with .Values.sidecar.alerts.sizeLimit }} - sizeLimit: {{ . }} - {{- else }} - {} - {{- end }} - {{- end }} - {{- if .Values.sidecar.dashboards.enabled }} - - name: sc-dashboard-volume - emptyDir: - {{- with .Values.sidecar.dashboards.sizeLimit }} - sizeLimit: {{ . }} - {{- else }} - {} - {{- end }} - {{- if .Values.sidecar.dashboards.SCProvider }} - - name: sc-dashboard-provider - configMap: - name: {{ include "grafana.fullname" . }}-config-dashboards - {{- end }} - {{- end }} - {{- if .Values.sidecar.datasources.enabled }} - - name: sc-datasources-volume - emptyDir: - {{- with .Values.sidecar.datasources.sizeLimit }} - sizeLimit: {{ . }} - {{- else }} - {} - {{- end }} - {{- end }} - {{- if .Values.sidecar.plugins.enabled }} - - name: sc-plugins-volume - emptyDir: - {{- with .Values.sidecar.plugins.sizeLimit }} - sizeLimit: {{ . }} - {{- else }} - {} - {{- end }} - {{- end }} - {{- if .Values.sidecar.notifiers.enabled }} - - name: sc-notifiers-volume - emptyDir: - {{- with .Values.sidecar.notifiers.sizeLimit }} - sizeLimit: {{ . }} - {{- else }} - {} - {{- end }} - {{- end }} - {{- range .Values.extraSecretMounts }} - {{- if .secretName }} - - name: {{ .name }} - secret: - secretName: {{ .secretName }} - defaultMode: {{ .defaultMode }} - {{- with .items }} - items: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- else if .projected }} - - name: {{ .name }} - projected: - {{- toYaml .projected | nindent 6 }} - {{- else if .csi }} - - name: {{ .name }} - csi: - {{- toYaml .csi | nindent 6 }} - {{- end }} - {{- end }} - {{- range .Values.extraVolumes }} - - name: {{ .name }} - {{- if .existingClaim }} - persistentVolumeClaim: - claimName: {{ .existingClaim }} - {{- else if .hostPath }} - hostPath: - {{ toYaml .hostPath | nindent 6 }} - {{- else if .csi }} - csi: - {{- toYaml .data | nindent 6 }} - {{- else if .configMap }} - configMap: - {{- toYaml .configMap | nindent 6 }} - {{- else }} - emptyDir: {} - {{- end }} - {{- end }} - {{- range .Values.extraEmptyDirMounts }} - - name: {{ .name }} - emptyDir: {} - {{- end }} - {{- with .Values.extraContainerVolumes }} - {{- tpl (toYaml .) $root | nindent 2 }} - {{- end }} -{{- end }} - diff --git a/operations/deployment/helm/grafana/templates/clusterrole.yaml b/operations/deployment/helm/grafana/templates/clusterrole.yaml deleted file mode 100644 index 3af4b62..0000000 --- a/operations/deployment/helm/grafana/templates/clusterrole.yaml +++ /dev/null @@ -1,25 +0,0 @@ -{{- if and .Values.rbac.create (or (not .Values.rbac.namespaced) .Values.rbac.extraClusterRoleRules) (not .Values.rbac.useExistingClusterRole) }} -kind: ClusterRole -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - labels: - {{- include "grafana.labels" . | nindent 4 }} - {{- with .Values.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} - name: {{ include "grafana.fullname" . }}-clusterrole -{{- if or .Values.sidecar.dashboards.enabled .Values.rbac.extraClusterRoleRules .Values.sidecar.datasources.enabled .Values.sidecar.plugins.enabled .Values.sidecar.alerts.enabled }} -rules: - {{- if or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled .Values.sidecar.plugins.enabled .Values.sidecar.alerts.enabled }} - - apiGroups: [""] # "" indicates the core API group - resources: ["configmaps", "secrets"] - verbs: ["get", "watch", "list"] - {{- end}} - {{- with .Values.rbac.extraClusterRoleRules }} - {{- toYaml . | nindent 2 }} - {{- end}} -{{- else }} -rules: [] -{{- end}} -{{- end}} diff --git a/operations/deployment/helm/grafana/templates/clusterrolebinding.yaml b/operations/deployment/helm/grafana/templates/clusterrolebinding.yaml deleted file mode 100644 index bda9431..0000000 --- a/operations/deployment/helm/grafana/templates/clusterrolebinding.yaml +++ /dev/null @@ -1,24 +0,0 @@ -{{- if and .Values.rbac.create (or (not .Values.rbac.namespaced) .Values.rbac.extraClusterRoleRules) }} -kind: ClusterRoleBinding -apiVersion: rbac.authorization.k8s.io/v1 -metadata: - name: {{ include "grafana.fullname" . }}-clusterrolebinding - labels: - {{- include "grafana.labels" . | nindent 4 }} - {{- with .Values.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -subjects: - - kind: ServiceAccount - name: {{ include "grafana.serviceAccountName" . }} - namespace: {{ include "grafana.namespace" . }} -roleRef: - kind: ClusterRole - {{- if .Values.rbac.useExistingClusterRole }} - name: {{ .Values.rbac.useExistingClusterRole }} - {{- else }} - name: {{ include "grafana.fullname" . }}-clusterrole - {{- end }} - apiGroup: rbac.authorization.k8s.io -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/configSecret.yaml b/operations/deployment/helm/grafana/templates/configSecret.yaml deleted file mode 100644 index f8937cc..0000000 --- a/operations/deployment/helm/grafana/templates/configSecret.yaml +++ /dev/null @@ -1,43 +0,0 @@ -{{- $createConfigSecret := eq (include "grafana.shouldCreateConfigSecret" .) "true" -}} -{{- if and .Values.createConfigmap $createConfigSecret }} -{{- $files := .Files }} -{{- $root := . -}} -apiVersion: v1 -kind: Secret -metadata: - name: "{{ include "grafana.fullname" . }}-config-secret" - namespace: {{ include "grafana.namespace" . }} - labels: - {{- include "grafana.labels" . | nindent 4 }} - {{- with .Values.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -data: -{{- range $key, $value := .Values.alerting }} - {{- if (hasKey $value "secretFile") }} - {{- $key | nindent 2 }}: - {{- toYaml ( $files.Get $value.secretFile ) | b64enc | nindent 4}} - {{/* as of https://helm.sh/docs/chart_template_guide/accessing_files/ this will only work if you fork this chart and add files to it*/}} - {{- end }} -{{- end }} -stringData: -{{- range $key, $value := .Values.datasources }} -{{- if (hasKey $value "secret") }} -{{- $key | nindent 2 }}: | - {{- tpl (toYaml $value | nindent 4) $root }} -{{- end }} -{{- end }} -{{- range $key, $value := .Values.notifiers }} -{{- if (hasKey $value "secret") }} -{{- $key | nindent 2 }}: | - {{- tpl (toYaml $value | nindent 4) $root }} -{{- end }} -{{- end }} -{{- range $key, $value := .Values.alerting }} -{{ if (hasKey $value "secret") }} - {{- $key | nindent 2 }}: | - {{- tpl (toYaml $value.secret | nindent 4) $root }} - {{- end }} -{{- end }} -{{- end }} \ No newline at end of file diff --git a/operations/deployment/helm/grafana/templates/configmap-dashboard-provider.yaml b/operations/deployment/helm/grafana/templates/configmap-dashboard-provider.yaml deleted file mode 100644 index 1f706a8..0000000 --- a/operations/deployment/helm/grafana/templates/configmap-dashboard-provider.yaml +++ /dev/null @@ -1,29 +0,0 @@ -{{- if and .Values.sidecar.dashboards.enabled .Values.sidecar.dashboards.SCProvider }} -apiVersion: v1 -kind: ConfigMap -metadata: - labels: - {{- include "grafana.labels" . | nindent 4 }} - {{- with .Values.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} - name: {{ include "grafana.fullname" . }}-config-dashboards - namespace: {{ include "grafana.namespace" . }} -data: - provider.yaml: |- - apiVersion: 1 - providers: - - name: '{{ .Values.sidecar.dashboards.provider.name }}' - orgId: {{ .Values.sidecar.dashboards.provider.orgid }} - {{- if not .Values.sidecar.dashboards.provider.foldersFromFilesStructure }} - folder: '{{ .Values.sidecar.dashboards.provider.folder }}' - {{- end }} - type: {{ .Values.sidecar.dashboards.provider.type }} - disableDeletion: {{ .Values.sidecar.dashboards.provider.disableDelete }} - allowUiUpdates: {{ .Values.sidecar.dashboards.provider.allowUiUpdates }} - updateIntervalSeconds: {{ .Values.sidecar.dashboards.provider.updateIntervalSeconds | default 30 }} - options: - foldersFromFilesStructure: {{ .Values.sidecar.dashboards.provider.foldersFromFilesStructure }} - path: {{ .Values.sidecar.dashboards.folder }}{{- with .Values.sidecar.dashboards.defaultFolderName }}/{{ . }}{{- end }} -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/configmap.yaml b/operations/deployment/helm/grafana/templates/configmap.yaml deleted file mode 100644 index 7b837d9..0000000 --- a/operations/deployment/helm/grafana/templates/configmap.yaml +++ /dev/null @@ -1,144 +0,0 @@ -{{- if .Values.createConfigmap }} -{{- $files := .Files }} -{{- $root := . -}} -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ include "grafana.fullname" . }} - namespace: {{ include "grafana.namespace" . }} - labels: - {{- include "grafana.labels" . | nindent 4 }} - {{- with .Values.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -data: - {{- with .Values.plugins }} - plugins: {{ join "," . }} - {{- end }} - grafana.ini: | - {{- range $elem, $elemVal := index .Values "grafana.ini" }} - {{- if not (kindIs "map" $elemVal) }} - {{- if kindIs "invalid" $elemVal }} - {{ $elem }} = - {{- else if kindIs "string" $elemVal }} - {{ $elem }} = {{ tpl $elemVal $ }} - {{- else }} - {{ $elem }} = {{ $elemVal }} - {{- end }} - {{- end }} - {{- end }} - {{- range $key, $value := index .Values "grafana.ini" }} - {{- if kindIs "map" $value }} - [{{ $key }}] - {{- range $elem, $elemVal := $value }} - {{- if kindIs "invalid" $elemVal }} - {{ $elem }} = - {{- else if kindIs "string" $elemVal }} - {{ $elem }} = {{ tpl $elemVal $ }} - {{- else }} - {{ $elem }} = {{ $elemVal }} - {{- end }} - {{- end }} - {{- end }} - {{- end }} - - {{- range $key, $value := .Values.datasources }} - {{- if not (hasKey $value "secret") }} - {{- $key | nindent 2 }}: | - {{- tpl (toYaml $value | nindent 4) $root }} - {{- end }} - {{- end }} - - {{- range $key, $value := .Values.notifiers }} - {{- if not (hasKey $value "secret") }} - {{- $key | nindent 2 }}: | - {{- toYaml $value | nindent 4 }} - {{- end }} - {{- end }} - - {{- range $key, $value := .Values.alerting }} - {{- if (hasKey $value "file") }} - {{- $key | nindent 2 }}: - {{- toYaml ( $files.Get $value.file ) | nindent 4}} - {{- else if (or (hasKey $value "secret") (hasKey $value "secretFile"))}} - {{/* will be stored inside secret generated by "configSecret.yaml"*/}} - {{- else }} - {{- $key | nindent 2 }}: | - {{- tpl (toYaml $value | nindent 4) $root }} - {{- end }} - {{- end }} - - {{- range $key, $value := .Values.dashboardProviders }} - {{- $key | nindent 2 }}: | - {{- toYaml $value | nindent 4 }} - {{- end }} - -{{- if .Values.dashboards }} - download_dashboards.sh: | - #!/usr/bin/env sh - set -euf - {{- if .Values.dashboardProviders }} - {{- range $key, $value := .Values.dashboardProviders }} - {{- range $value.providers }} - mkdir -p {{ .options.path }} - {{- end }} - {{- end }} - {{- end }} - {{ $dashboardProviders := .Values.dashboardProviders }} - {{- range $provider, $dashboards := .Values.dashboards }} - {{- range $key, $value := $dashboards }} - {{- if (or (hasKey $value "gnetId") (hasKey $value "url")) }} - curl -skf \ - --connect-timeout 60 \ - --max-time 60 \ - {{- if not $value.b64content }} - {{- if not $value.acceptHeader }} - -H "Accept: application/json" \ - {{- else }} - -H "Accept: {{ $value.acceptHeader }}" \ - {{- end }} - {{- if $value.token }} - -H "Authorization: token {{ $value.token }}" \ - {{- end }} - {{- if $value.bearerToken }} - -H "Authorization: Bearer {{ $value.bearerToken }}" \ - {{- end }} - {{- if $value.basic }} - -H "Authorization: Basic {{ $value.basic }}" \ - {{- end }} - {{- if $value.gitlabToken }} - -H "PRIVATE-TOKEN: {{ $value.gitlabToken }}" \ - {{- end }} - -H "Content-Type: application/json;charset=UTF-8" \ - {{- end }} - {{- $dpPath := "" -}} - {{- range $kd := (index $dashboardProviders "dashboardproviders.yaml").providers }} - {{- if eq $kd.name $provider }} - {{- $dpPath = $kd.options.path }} - {{- end }} - {{- end }} - {{- if $value.url }} - "{{ $value.url }}" \ - {{- else }} - "https://grafana.com/api/dashboards/{{ $value.gnetId }}/revisions/{{- if $value.revision -}}{{ $value.revision }}{{- else -}}1{{- end -}}/download" \ - {{- end }} - {{- if $value.datasource }} - {{- if kindIs "string" $value.datasource }} - | sed '/-- .* --/! s/"datasource":.*,/"datasource": "{{ $value.datasource }}",/g' \ - {{- end }} - {{- if kindIs "slice" $value.datasource }} - {{- range $value.datasource }} - | sed '/-- .* --/! s/${{"{"}}{{ .name }}}/{{ .value }}/g' \ - {{- end }} - {{- end }} - {{- end }} - {{- if $value.b64content }} - | base64 -d \ - {{- end }} - > "{{- if $dpPath -}}{{ $dpPath }}{{- else -}}/var/lib/grafana/dashboards/{{ $provider }}{{- end -}}/{{ $key }}.json" - {{ end }} - {{- end }} - {{- end }} -{{- end }} -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/dashboards-json-configmap.yaml b/operations/deployment/helm/grafana/templates/dashboards-json-configmap.yaml deleted file mode 100644 index b96ce72..0000000 --- a/operations/deployment/helm/grafana/templates/dashboards-json-configmap.yaml +++ /dev/null @@ -1,38 +0,0 @@ -{{- if .Values.dashboards }} -{{ $files := .Files }} -{{- range $provider, $dashboards := .Values.dashboards }} -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ include "grafana.fullname" $ }}-dashboards-{{ $provider }} - namespace: {{ include "grafana.namespace" $ }} - labels: - {{- include "grafana.labels" $ | nindent 4 }} - dashboard-provider: {{ $provider }} - {{- if $.Values.sidecar.dashboards.enabled }} - {{ $.Values.sidecar.dashboards.label }}: {{ $.Values.sidecar.dashboards.labelValue | quote }} - {{- end }} -{{- if $dashboards }} -data: -{{- $dashboardFound := false }} -{{- range $key, $value := $dashboards }} -{{- if (or (hasKey $value "json") (hasKey $value "file")) }} -{{- $dashboardFound = true }} - {{- print $key | nindent 2 }}.json: - {{- if hasKey $value "json" }} - |- - {{- $value.json | nindent 6 }} - {{- end }} - {{- if hasKey $value "file" }} - {{- toYaml ( $files.Get $value.file ) | nindent 4}} - {{- end }} -{{- end }} -{{- end }} -{{- if not $dashboardFound }} - {} -{{- end }} -{{- end }} ---- -{{- end }} - -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/deployment.yaml b/operations/deployment/helm/grafana/templates/deployment.yaml index bfa26bb..9c96d35 100644 --- a/operations/deployment/helm/grafana/templates/deployment.yaml +++ b/operations/deployment/helm/grafana/templates/deployment.yaml @@ -3,7 +3,7 @@ apiVersion: apps/v1 kind: Deployment metadata: name: {{ include "grafana.fullname" . }} - namespace: {{ include "grafana.namespace" . }} + # namespace: {{ include "grafana.namespace" . }} labels: {{- include "grafana.labels" . | nindent 4 }} {{- with .Values.labels }} diff --git a/operations/deployment/helm/grafana/templates/extra-manifests.yaml b/operations/deployment/helm/grafana/templates/extra-manifests.yaml deleted file mode 100644 index a9bb3b6..0000000 --- a/operations/deployment/helm/grafana/templates/extra-manifests.yaml +++ /dev/null @@ -1,4 +0,0 @@ -{{ range .Values.extraObjects }} ---- -{{ tpl (toYaml .) $ }} -{{ end }} diff --git a/operations/deployment/helm/grafana/templates/headless-service.yaml b/operations/deployment/helm/grafana/templates/headless-service.yaml deleted file mode 100644 index 3028589..0000000 --- a/operations/deployment/helm/grafana/templates/headless-service.yaml +++ /dev/null @@ -1,22 +0,0 @@ -{{- $sts := list "sts" "StatefulSet" "statefulset" -}} -{{- if or .Values.headlessService (and .Values.persistence.enabled (not .Values.persistence.existingClaim) (has .Values.persistence.type $sts)) }} -apiVersion: v1 -kind: Service -metadata: - name: {{ include "grafana.fullname" . }}-headless - namespace: {{ include "grafana.namespace" . }} - labels: - {{- include "grafana.labels" . | nindent 4 }} - {{- with .Values.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -spec: - clusterIP: None - selector: - {{- include "grafana.selectorLabels" . | nindent 4 }} - type: ClusterIP - ports: - - name: {{ .Values.gossipPortName }}-tcp - port: 9094 -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/hpa.yaml b/operations/deployment/helm/grafana/templates/hpa.yaml deleted file mode 100644 index 46bbcb4..0000000 --- a/operations/deployment/helm/grafana/templates/hpa.yaml +++ /dev/null @@ -1,52 +0,0 @@ -{{- $sts := list "sts" "StatefulSet" "statefulset" -}} -{{- if .Values.autoscaling.enabled }} -apiVersion: {{ include "grafana.hpa.apiVersion" . }} -kind: HorizontalPodAutoscaler -metadata: - name: {{ include "grafana.fullname" . }} - namespace: {{ include "grafana.namespace" . }} - labels: - app.kubernetes.io/name: {{ include "grafana.name" . }} - helm.sh/chart: {{ include "grafana.chart" . }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} -spec: - scaleTargetRef: - apiVersion: apps/v1 - {{- if has .Values.persistence.type $sts }} - kind: StatefulSet - {{- else }} - kind: Deployment - {{- end }} - name: {{ include "grafana.fullname" . }} - minReplicas: {{ .Values.autoscaling.minReplicas }} - maxReplicas: {{ .Values.autoscaling.maxReplicas }} - metrics: - {{- if .Values.autoscaling.targetMemory }} - - type: Resource - resource: - name: memory - {{- if eq (include "grafana.hpa.apiVersion" .) "autoscaling/v2beta1" }} - targetAverageUtilization: {{ .Values.autoscaling.targetMemory }} - {{- else }} - target: - type: Utilization - averageUtilization: {{ .Values.autoscaling.targetMemory }} - {{- end }} - {{- end }} - {{- if .Values.autoscaling.targetCPU }} - - type: Resource - resource: - name: cpu - {{- if eq (include "grafana.hpa.apiVersion" .) "autoscaling/v2beta1" }} - targetAverageUtilization: {{ .Values.autoscaling.targetCPU }} - {{- else }} - target: - type: Utilization - averageUtilization: {{ .Values.autoscaling.targetCPU }} - {{- end }} - {{- end }} - {{- if .Values.autoscaling.behavior }} - behavior: {{ toYaml .Values.autoscaling.behavior | nindent 4 }} - {{- end }} -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/image-renderer-deployment.yaml b/operations/deployment/helm/grafana/templates/image-renderer-deployment.yaml deleted file mode 100644 index ea97969..0000000 --- a/operations/deployment/helm/grafana/templates/image-renderer-deployment.yaml +++ /dev/null @@ -1,131 +0,0 @@ -{{ if .Values.imageRenderer.enabled }} -{{- $root := . -}} -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ include "grafana.fullname" . }}-image-renderer - namespace: {{ include "grafana.namespace" . }} - labels: - {{- include "grafana.imageRenderer.labels" . | nindent 4 }} - {{- with .Values.imageRenderer.labels }} - {{- toYaml . | nindent 4 }} - {{- end }} - {{- with .Values.imageRenderer.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -spec: - {{- if and (not .Values.imageRenderer.autoscaling.enabled) (.Values.imageRenderer.replicas) }} - replicas: {{ .Values.imageRenderer.replicas }} - {{- end }} - revisionHistoryLimit: {{ .Values.imageRenderer.revisionHistoryLimit }} - selector: - matchLabels: - {{- include "grafana.imageRenderer.selectorLabels" . | nindent 6 }} - - {{- with .Values.imageRenderer.deploymentStrategy }} - strategy: - {{- toYaml . | trim | nindent 4 }} - {{- end }} - template: - metadata: - labels: - {{- include "grafana.imageRenderer.selectorLabels" . | nindent 8 }} - {{- with .Values.imageRenderer.podLabels }} - {{- toYaml . | nindent 8 }} - {{- end }} - annotations: - checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} - {{- with .Values.imageRenderer.podAnnotations }} - {{- toYaml . | nindent 8 }} - {{- end }} - spec: - {{- with .Values.imageRenderer.schedulerName }} - schedulerName: "{{ . }}" - {{- end }} - {{- with .Values.imageRenderer.serviceAccountName }} - serviceAccountName: "{{ . }}" - {{- end }} - {{- with .Values.imageRenderer.securityContext }} - securityContext: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.imageRenderer.hostAliases }} - hostAliases: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.imageRenderer.priorityClassName }} - priorityClassName: {{ . }} - {{- end }} - {{- with .Values.imageRenderer.image.pullSecrets }} - imagePullSecrets: - {{- range . }} - - name: {{ tpl . $root }} - {{- end}} - {{- end }} - containers: - - name: {{ .Chart.Name }}-image-renderer - {{- $registry := .Values.global.imageRegistry | default .Values.imageRenderer.image.registry -}} - {{- if .Values.imageRenderer.image.sha }} - image: "{{ $registry }}/{{ .Values.imageRenderer.image.repository }}:{{ .Values.imageRenderer.image.tag }}@sha256:{{ .Values.imageRenderer.image.sha }}" - {{- else }} - image: "{{ $registry }}/{{ .Values.imageRenderer.image.repository }}:{{ .Values.imageRenderer.image.tag }}" - {{- end }} - imagePullPolicy: {{ .Values.imageRenderer.image.pullPolicy }} - {{- if .Values.imageRenderer.command }} - command: - {{- range .Values.imageRenderer.command }} - - {{ . }} - {{- end }} - {{- end}} - ports: - - name: {{ .Values.imageRenderer.service.portName }} - containerPort: {{ .Values.imageRenderer.service.targetPort }} - protocol: TCP - livenessProbe: - httpGet: - path: / - port: {{ .Values.imageRenderer.service.portName }} - env: - - name: HTTP_PORT - value: {{ .Values.imageRenderer.service.targetPort | quote }} - {{- if .Values.imageRenderer.serviceMonitor.enabled }} - - name: ENABLE_METRICS - value: "true" - {{- end }} - {{- range $key, $value := .Values.imageRenderer.envValueFrom }} - - name: {{ $key | quote }} - valueFrom: - {{- tpl (toYaml $value) $ | nindent 16 }} - {{- end }} - {{- range $key, $value := .Values.imageRenderer.env }} - - name: {{ $key | quote }} - value: {{ $value | quote }} - {{- end }} - {{- with .Values.imageRenderer.containerSecurityContext }} - securityContext: - {{- toYaml . | nindent 12 }} - {{- end }} - volumeMounts: - - mountPath: /tmp - name: image-renderer-tmpfs - {{- with .Values.imageRenderer.resources }} - resources: - {{- toYaml . | nindent 12 }} - {{- end }} - {{- with .Values.imageRenderer.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 8 }} - {{- end }} - {{- with .Values.imageRenderer.affinity }} - affinity: - {{- tpl (toYaml .) $root | nindent 8 }} - {{- end }} - {{- with .Values.imageRenderer.tolerations }} - tolerations: - {{- toYaml . | nindent 8 }} - {{- end }} - volumes: - - name: image-renderer-tmpfs - emptyDir: {} -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/image-renderer-hpa.yaml b/operations/deployment/helm/grafana/templates/image-renderer-hpa.yaml deleted file mode 100644 index b0f0059..0000000 --- a/operations/deployment/helm/grafana/templates/image-renderer-hpa.yaml +++ /dev/null @@ -1,47 +0,0 @@ -{{- if and .Values.imageRenderer.enabled .Values.imageRenderer.autoscaling.enabled }} -apiVersion: {{ include "grafana.hpa.apiVersion" . }} -kind: HorizontalPodAutoscaler -metadata: - name: {{ include "grafana.fullname" . }}-image-renderer - namespace: {{ include "grafana.namespace" . }} - labels: - app.kubernetes.io/name: {{ include "grafana.name" . }}-image-renderer - helm.sh/chart: {{ include "grafana.chart" . }} - app.kubernetes.io/managed-by: {{ .Release.Service }} - app.kubernetes.io/instance: {{ .Release.Name }} -spec: - scaleTargetRef: - apiVersion: apps/v1 - kind: Deployment - name: {{ include "grafana.fullname" . }}-image-renderer - minReplicas: {{ .Values.imageRenderer.autoscaling.minReplicas }} - maxReplicas: {{ .Values.imageRenderer.autoscaling.maxReplicas }} - metrics: - {{- if .Values.imageRenderer.autoscaling.targetMemory }} - - type: Resource - resource: - name: memory - {{- if eq (include "grafana.hpa.apiVersion" .) "autoscaling/v2beta1" }} - targetAverageUtilization: {{ .Values.imageRenderer.autoscaling.targetMemory }} - {{- else }} - target: - type: Utilization - averageUtilization: {{ .Values.imageRenderer.autoscaling.targetMemory }} - {{- end }} - {{- end }} - {{- if .Values.imageRenderer.autoscaling.targetCPU }} - - type: Resource - resource: - name: cpu - {{- if eq (include "grafana.hpa.apiVersion" .) "autoscaling/v2beta1" }} - targetAverageUtilization: {{ .Values.imageRenderer.autoscaling.targetCPU }} - {{- else }} - target: - type: Utilization - averageUtilization: {{ .Values.imageRenderer.autoscaling.targetCPU }} - {{- end }} - {{- end }} - {{- if .Values.imageRenderer.autoscaling.behavior }} - behavior: {{ toYaml .Values.imageRenderer.autoscaling.behavior | nindent 4 }} - {{- end }} -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/image-renderer-network-policy.yaml b/operations/deployment/helm/grafana/templates/image-renderer-network-policy.yaml deleted file mode 100644 index d1a0eb3..0000000 --- a/operations/deployment/helm/grafana/templates/image-renderer-network-policy.yaml +++ /dev/null @@ -1,79 +0,0 @@ -{{- if and .Values.imageRenderer.enabled .Values.imageRenderer.networkPolicy.limitIngress }} ---- -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: {{ include "grafana.fullname" . }}-image-renderer-ingress - namespace: {{ include "grafana.namespace" . }} - annotations: - comment: Limit image-renderer ingress traffic from grafana -spec: - podSelector: - matchLabels: - {{- include "grafana.imageRenderer.selectorLabels" . | nindent 6 }} - {{- with .Values.imageRenderer.podLabels }} - {{- toYaml . | nindent 6 }} - {{- end }} - - policyTypes: - - Ingress - ingress: - - ports: - - port: {{ .Values.imageRenderer.service.targetPort }} - protocol: TCP - from: - - namespaceSelector: - matchLabels: - kubernetes.io/metadata.name: {{ include "grafana.namespace" . }} - podSelector: - matchLabels: - {{- include "grafana.selectorLabels" . | nindent 14 }} - {{- with .Values.podLabels }} - {{- toYaml . | nindent 14 }} - {{- end }} - {{- with .Values.imageRenderer.networkPolicy.extraIngressSelectors -}} - {{ toYaml . | nindent 8 }} - {{- end }} -{{- end }} - -{{- if and .Values.imageRenderer.enabled .Values.imageRenderer.networkPolicy.limitEgress }} ---- -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: {{ include "grafana.fullname" . }}-image-renderer-egress - namespace: {{ include "grafana.namespace" . }} - annotations: - comment: Limit image-renderer egress traffic to grafana -spec: - podSelector: - matchLabels: - {{- include "grafana.imageRenderer.selectorLabels" . | nindent 6 }} - {{- with .Values.imageRenderer.podLabels }} - {{- toYaml . | nindent 6 }} - {{- end }} - - policyTypes: - - Egress - egress: - # allow dns resolution - - ports: - - port: 53 - protocol: UDP - - port: 53 - protocol: TCP - # talk only to grafana - - ports: - - port: {{ .Values.service.targetPort }} - protocol: TCP - to: - - namespaceSelector: - matchLabels: - name: {{ include "grafana.namespace" . }} - podSelector: - matchLabels: - {{- include "grafana.selectorLabels" . | nindent 14 }} - {{- with .Values.podLabels }} - {{- toYaml . | nindent 14 }} - {{- end }} -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/image-renderer-service.yaml b/operations/deployment/helm/grafana/templates/image-renderer-service.yaml deleted file mode 100644 index f8da127..0000000 --- a/operations/deployment/helm/grafana/templates/image-renderer-service.yaml +++ /dev/null @@ -1,31 +0,0 @@ -{{- if and .Values.imageRenderer.enabled .Values.imageRenderer.service.enabled }} -apiVersion: v1 -kind: Service -metadata: - name: {{ include "grafana.fullname" . }}-image-renderer - namespace: {{ include "grafana.namespace" . }} - labels: - {{- include "grafana.imageRenderer.labels" . | nindent 4 }} - {{- with .Values.imageRenderer.service.labels }} - {{- toYaml . | nindent 4 }} - {{- end }} - {{- with .Values.imageRenderer.service.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -spec: - type: ClusterIP - {{- with .Values.imageRenderer.service.clusterIP }} - clusterIP: {{ . }} - {{- end }} - ports: - - name: {{ .Values.imageRenderer.service.portName }} - port: {{ .Values.imageRenderer.service.port }} - protocol: TCP - targetPort: {{ .Values.imageRenderer.service.targetPort }} - {{- with .Values.imageRenderer.appProtocol }} - appProtocol: {{ . }} - {{- end }} - selector: - {{- include "grafana.imageRenderer.selectorLabels" . | nindent 4 }} -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/image-renderer-servicemonitor.yaml b/operations/deployment/helm/grafana/templates/image-renderer-servicemonitor.yaml deleted file mode 100644 index 5d9f09d..0000000 --- a/operations/deployment/helm/grafana/templates/image-renderer-servicemonitor.yaml +++ /dev/null @@ -1,48 +0,0 @@ -{{- if .Values.imageRenderer.serviceMonitor.enabled }} ---- -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - name: {{ include "grafana.fullname" . }}-image-renderer - {{- if .Values.imageRenderer.serviceMonitor.namespace }} - namespace: {{ tpl .Values.imageRenderer.serviceMonitor.namespace . }} - {{- else }} - namespace: {{ include "grafana.namespace" . }} - {{- end }} - labels: - {{- include "grafana.imageRenderer.labels" . | nindent 4 }} - {{- with .Values.imageRenderer.serviceMonitor.labels }} - {{- toYaml . | nindent 4 }} - {{- end }} -spec: - endpoints: - - port: {{ .Values.imageRenderer.service.portName }} - {{- with .Values.imageRenderer.serviceMonitor.interval }} - interval: {{ . }} - {{- end }} - {{- with .Values.imageRenderer.serviceMonitor.scrapeTimeout }} - scrapeTimeout: {{ . }} - {{- end }} - honorLabels: true - path: {{ .Values.imageRenderer.serviceMonitor.path }} - scheme: {{ .Values.imageRenderer.serviceMonitor.scheme }} - {{- with .Values.imageRenderer.serviceMonitor.tlsConfig }} - tlsConfig: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.imageRenderer.serviceMonitor.relabelings }} - relabelings: - {{- toYaml . | nindent 6 }} - {{- end }} - jobLabel: "{{ .Release.Name }}-image-renderer" - selector: - matchLabels: - {{- include "grafana.imageRenderer.selectorLabels" . | nindent 6 }} - namespaceSelector: - matchNames: - - {{ include "grafana.namespace" . }} - {{- with .Values.imageRenderer.serviceMonitor.targetLabels }} - targetLabels: - {{- toYaml . | nindent 4 }} - {{- end }} -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/ingress.yaml b/operations/deployment/helm/grafana/templates/ingress.yaml deleted file mode 100644 index 063cdfa..0000000 --- a/operations/deployment/helm/grafana/templates/ingress.yaml +++ /dev/null @@ -1,78 +0,0 @@ -{{- if .Values.ingress.enabled -}} -{{- $ingressApiIsStable := eq (include "grafana.ingress.isStable" .) "true" -}} -{{- $ingressSupportsIngressClassName := eq (include "grafana.ingress.supportsIngressClassName" .) "true" -}} -{{- $ingressSupportsPathType := eq (include "grafana.ingress.supportsPathType" .) "true" -}} -{{- $fullName := include "grafana.fullname" . -}} -{{- $servicePort := .Values.service.port -}} -{{- $ingressPath := .Values.ingress.path -}} -{{- $ingressPathType := .Values.ingress.pathType -}} -{{- $extraPaths := .Values.ingress.extraPaths -}} -apiVersion: {{ include "grafana.ingress.apiVersion" . }} -kind: Ingress -metadata: - name: {{ $fullName }} - namespace: {{ include "grafana.namespace" . }} - labels: - {{- include "grafana.labels" . | nindent 4 }} - {{- with .Values.ingress.labels }} - {{- toYaml . | nindent 4 }} - {{- end }} - {{- with .Values.ingress.annotations }} - annotations: - {{- range $key, $value := . }} - {{ $key }}: {{ tpl $value $ | quote }} - {{- end }} - {{- end }} -spec: - {{- if and $ingressSupportsIngressClassName .Values.ingress.ingressClassName }} - ingressClassName: {{ .Values.ingress.ingressClassName }} - {{- end -}} - {{- with .Values.ingress.tls }} - tls: - {{- tpl (toYaml .) $ | nindent 4 }} - {{- end }} - rules: - {{- if .Values.ingress.hosts }} - {{- range .Values.ingress.hosts }} - - host: {{ tpl . $ }} - http: - paths: - {{- with $extraPaths }} - {{- toYaml . | nindent 10 }} - {{- end }} - - path: {{ $ingressPath }} - {{- if $ingressSupportsPathType }} - pathType: {{ $ingressPathType }} - {{- end }} - backend: - {{- if $ingressApiIsStable }} - service: - name: {{ $fullName }} - port: - number: {{ $servicePort }} - {{- else }} - serviceName: {{ $fullName }} - servicePort: {{ $servicePort }} - {{- end }} - {{- end }} - {{- else }} - - http: - paths: - - backend: - {{- if $ingressApiIsStable }} - service: - name: {{ $fullName }} - port: - number: {{ $servicePort }} - {{- else }} - serviceName: {{ $fullName }} - servicePort: {{ $servicePort }} - {{- end }} - {{- with $ingressPath }} - path: {{ . }} - {{- end }} - {{- if $ingressSupportsPathType }} - pathType: {{ $ingressPathType }} - {{- end }} - {{- end -}} -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/networkpolicy.yaml b/operations/deployment/helm/grafana/templates/networkpolicy.yaml deleted file mode 100644 index 4cd3ed6..0000000 --- a/operations/deployment/helm/grafana/templates/networkpolicy.yaml +++ /dev/null @@ -1,61 +0,0 @@ -{{- if .Values.networkPolicy.enabled }} -apiVersion: networking.k8s.io/v1 -kind: NetworkPolicy -metadata: - name: {{ include "grafana.fullname" . }} - namespace: {{ include "grafana.namespace" . }} - labels: - {{- include "grafana.labels" . | nindent 4 }} - {{- with .Values.labels }} - {{- toYaml . | nindent 4 }} - {{- end }} - {{- with .Values.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -spec: - policyTypes: - {{- if .Values.networkPolicy.ingress }} - - Ingress - {{- end }} - {{- if .Values.networkPolicy.egress.enabled }} - - Egress - {{- end }} - podSelector: - matchLabels: - {{- include "grafana.selectorLabels" . | nindent 6 }} - - {{- if .Values.networkPolicy.egress.enabled }} - egress: - {{- if not .Values.networkPolicy.egress.blockDNSResolution }} - - ports: - - port: 53 - protocol: UDP - {{- end }} - - ports: - {{ .Values.networkPolicy.egress.ports | toJson }} - {{- with .Values.networkPolicy.egress.to }} - to: - {{- toYaml . | nindent 12 }} - {{- end }} - {{- end }} - {{- if .Values.networkPolicy.ingress }} - ingress: - - ports: - - port: {{ .Values.service.targetPort }} - {{- if not .Values.networkPolicy.allowExternal }} - from: - - podSelector: - matchLabels: - {{ include "grafana.fullname" . }}-client: "true" - {{- with .Values.networkPolicy.explicitNamespacesSelector }} - - namespaceSelector: - {{- toYaml . | nindent 12 }} - {{- end }} - - podSelector: - matchLabels: - {{- include "grafana.labels" . | nindent 14 }} - role: read - {{- end }} - {{- end }} -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/poddisruptionbudget.yaml b/operations/deployment/helm/grafana/templates/poddisruptionbudget.yaml deleted file mode 100644 index 0525121..0000000 --- a/operations/deployment/helm/grafana/templates/poddisruptionbudget.yaml +++ /dev/null @@ -1,22 +0,0 @@ -{{- if .Values.podDisruptionBudget }} -apiVersion: {{ include "grafana.podDisruptionBudget.apiVersion" . }} -kind: PodDisruptionBudget -metadata: - name: {{ include "grafana.fullname" . }} - namespace: {{ include "grafana.namespace" . }} - labels: - {{- include "grafana.labels" . | nindent 4 }} - {{- with .Values.labels }} - {{- toYaml . | nindent 4 }} - {{- end }} -spec: - {{- with .Values.podDisruptionBudget.minAvailable }} - minAvailable: {{ . }} - {{- end }} - {{- with .Values.podDisruptionBudget.maxUnavailable }} - maxUnavailable: {{ . }} - {{- end }} - selector: - matchLabels: - {{- include "grafana.selectorLabels" . | nindent 6 }} -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/podsecuritypolicy.yaml b/operations/deployment/helm/grafana/templates/podsecuritypolicy.yaml deleted file mode 100644 index eed7af9..0000000 --- a/operations/deployment/helm/grafana/templates/podsecuritypolicy.yaml +++ /dev/null @@ -1,49 +0,0 @@ -{{- if and .Values.rbac.pspEnabled (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }} -apiVersion: policy/v1beta1 -kind: PodSecurityPolicy -metadata: - name: {{ include "grafana.fullname" . }} - labels: - {{- include "grafana.labels" . | nindent 4 }} - annotations: - seccomp.security.alpha.kubernetes.io/allowedProfileNames: 'docker/default,runtime/default' - seccomp.security.alpha.kubernetes.io/defaultProfileName: 'docker/default' - {{- if .Values.rbac.pspUseAppArmor }} - apparmor.security.beta.kubernetes.io/allowedProfileNames: 'runtime/default' - apparmor.security.beta.kubernetes.io/defaultProfileName: 'runtime/default' - {{- end }} -spec: - privileged: false - allowPrivilegeEscalation: false - requiredDropCapabilities: - # Default set from Docker, with DAC_OVERRIDE and CHOWN - - ALL - volumes: - - 'configMap' - - 'emptyDir' - - 'projected' - - 'csi' - - 'secret' - - 'downwardAPI' - - 'persistentVolumeClaim' - hostNetwork: false - hostIPC: false - hostPID: false - runAsUser: - rule: 'RunAsAny' - seLinux: - rule: 'RunAsAny' - supplementalGroups: - rule: 'MustRunAs' - ranges: - # Forbid adding the root group. - - min: 1 - max: 65535 - fsGroup: - rule: 'MustRunAs' - ranges: - # Forbid adding the root group. - - min: 1 - max: 65535 - readOnlyRootFilesystem: false -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/pvc.yaml b/operations/deployment/helm/grafana/templates/pvc.yaml deleted file mode 100644 index eb8f87f..0000000 --- a/operations/deployment/helm/grafana/templates/pvc.yaml +++ /dev/null @@ -1,36 +0,0 @@ -{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) (eq .Values.persistence.type "pvc")}} -apiVersion: v1 -kind: PersistentVolumeClaim -metadata: - name: {{ include "grafana.fullname" . }} - namespace: {{ include "grafana.namespace" . }} - labels: - {{- include "grafana.labels" . | nindent 4 }} - {{- with .Values.persistence.extraPvcLabels }} - {{- toYaml . | nindent 4 }} - {{- end }} - {{- with .Values.persistence.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} - {{- with .Values.persistence.finalizers }} - finalizers: - {{- toYaml . | nindent 4 }} - {{- end }} -spec: - accessModes: - {{- range .Values.persistence.accessModes }} - - {{ . | quote }} - {{- end }} - resources: - requests: - storage: {{ .Values.persistence.size | quote }} - {{- with .Values.persistence.storageClassName }} - storageClassName: {{ . }} - {{- end }} - {{- with .Values.persistence.selectorLabels }} - selector: - matchLabels: - {{- toYaml . | nindent 6 }} - {{- end }} -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/role.yaml b/operations/deployment/helm/grafana/templates/role.yaml deleted file mode 100644 index 4b5edd9..0000000 --- a/operations/deployment/helm/grafana/templates/role.yaml +++ /dev/null @@ -1,32 +0,0 @@ -{{- if and .Values.rbac.create (not .Values.rbac.useExistingRole) -}} -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: {{ include "grafana.fullname" . }} - namespace: {{ include "grafana.namespace" . }} - labels: - {{- include "grafana.labels" . | nindent 4 }} - {{- with .Values.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -{{- if or .Values.rbac.pspEnabled (and .Values.rbac.namespaced (or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled .Values.sidecar.plugins.enabled .Values.rbac.extraRoleRules)) }} -rules: - {{- if and .Values.rbac.pspEnabled (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") }} - - apiGroups: ['extensions'] - resources: ['podsecuritypolicies'] - verbs: ['use'] - resourceNames: [{{ include "grafana.fullname" . }}] - {{- end }} - {{- if and .Values.rbac.namespaced (or .Values.sidecar.dashboards.enabled .Values.sidecar.datasources.enabled .Values.sidecar.plugins.enabled) }} - - apiGroups: [""] # "" indicates the core API group - resources: ["configmaps", "secrets"] - verbs: ["get", "watch", "list"] - {{- end }} - {{- with .Values.rbac.extraRoleRules }} - {{- toYaml . | nindent 2 }} - {{- end}} -{{- else }} -rules: [] -{{- end }} -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/rolebinding.yaml b/operations/deployment/helm/grafana/templates/rolebinding.yaml deleted file mode 100644 index 58f77c6..0000000 --- a/operations/deployment/helm/grafana/templates/rolebinding.yaml +++ /dev/null @@ -1,25 +0,0 @@ -{{- if .Values.rbac.create }} -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: {{ include "grafana.fullname" . }} - namespace: {{ include "grafana.namespace" . }} - labels: - {{- include "grafana.labels" . | nindent 4 }} - {{- with .Values.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - {{- if .Values.rbac.useExistingRole }} - name: {{ .Values.rbac.useExistingRole }} - {{- else }} - name: {{ include "grafana.fullname" . }} - {{- end }} -subjects: -- kind: ServiceAccount - name: {{ include "grafana.serviceAccountName" . }} - namespace: {{ include "grafana.namespace" . }} -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/secret-env.yaml b/operations/deployment/helm/grafana/templates/secret-env.yaml deleted file mode 100644 index eb14aac..0000000 --- a/operations/deployment/helm/grafana/templates/secret-env.yaml +++ /dev/null @@ -1,14 +0,0 @@ -{{- if .Values.envRenderSecret }} -apiVersion: v1 -kind: Secret -metadata: - name: {{ include "grafana.fullname" . }}-env - namespace: {{ include "grafana.namespace" . }} - labels: - {{- include "grafana.labels" . | nindent 4 }} -type: Opaque -data: -{{- range $key, $val := .Values.envRenderSecret }} - {{ $key }}: {{ tpl ($val | toString) $ | b64enc | quote }} -{{- end }} -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/secret.yaml b/operations/deployment/helm/grafana/templates/secret.yaml deleted file mode 100644 index 5cbd527..0000000 --- a/operations/deployment/helm/grafana/templates/secret.yaml +++ /dev/null @@ -1,26 +0,0 @@ -{{- if or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret)) }} -apiVersion: v1 -kind: Secret -metadata: - name: {{ include "grafana.fullname" . }} - namespace: {{ include "grafana.namespace" . }} - labels: - {{- include "grafana.labels" . | nindent 4 }} - {{- with .Values.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -type: Opaque -data: - {{- if and (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD) }} - admin-user: {{ .Values.adminUser | b64enc | quote }} - {{- if .Values.adminPassword }} - admin-password: {{ .Values.adminPassword | b64enc | quote }} - {{- else }} - admin-password: {{ include "grafana.password" . }} - {{- end }} - {{- end }} - {{- if not .Values.ldap.existingSecret }} - ldap-toml: {{ tpl .Values.ldap.config $ | b64enc | quote }} - {{- end }} -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/service.yaml b/operations/deployment/helm/grafana/templates/service.yaml deleted file mode 100644 index 9102c1e..0000000 --- a/operations/deployment/helm/grafana/templates/service.yaml +++ /dev/null @@ -1,58 +0,0 @@ -{{- if .Values.service.enabled }} -{{- $root := . }} -apiVersion: v1 -kind: Service -metadata: - name: {{ include "grafana.fullname" . }} - namespace: {{ include "grafana.namespace" . }} - labels: - {{- include "grafana.labels" . | nindent 4 }} - {{- with .Values.service.labels }} - {{- toYaml . | nindent 4 }} - {{- end }} - {{- with .Values.service.annotations }} - annotations: - {{- tpl (toYaml . | nindent 4) $root }} - {{- end }} -spec: - {{- if (or (eq .Values.service.type "ClusterIP") (empty .Values.service.type)) }} - type: ClusterIP - {{- with .Values.service.clusterIP }} - clusterIP: {{ . }} - {{- end }} - {{- else if eq .Values.service.type "LoadBalancer" }} - type: {{ .Values.service.type }} - {{- with .Values.service.loadBalancerIP }} - loadBalancerIP: {{ . }} - {{- end }} - {{- with .Values.service.loadBalancerSourceRanges }} - loadBalancerSourceRanges: - {{- toYaml . | nindent 4 }} - {{- end }} - {{- else }} - type: {{ .Values.service.type }} - {{- end }} - {{- with .Values.service.externalIPs }} - externalIPs: - {{- toYaml . | nindent 4 }} - {{- end }} - {{- with .Values.service.externalTrafficPolicy }} - externalTrafficPolicy: {{ . }} - {{- end }} - ports: - - name: {{ .Values.service.portName }} - port: {{ .Values.service.port }} - protocol: TCP - targetPort: {{ .Values.service.targetPort }} - {{- with .Values.service.appProtocol }} - appProtocol: {{ . }} - {{- end }} - {{- if (and (eq .Values.service.type "NodePort") (not (empty .Values.service.nodePort))) }} - nodePort: {{ .Values.service.nodePort }} - {{- end }} - {{- with .Values.extraExposePorts }} - {{- tpl (toYaml . | nindent 4) $root }} - {{- end }} - selector: - {{- include "grafana.selectorLabels" . | nindent 4 }} -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/serviceaccount.yaml b/operations/deployment/helm/grafana/templates/serviceaccount.yaml deleted file mode 100644 index 784e71b..0000000 --- a/operations/deployment/helm/grafana/templates/serviceaccount.yaml +++ /dev/null @@ -1,17 +0,0 @@ -{{- if .Values.serviceAccount.create }} -{{- $root := . -}} -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - {{- include "grafana.labels" . | nindent 4 }} - {{- with .Values.serviceAccount.labels }} - {{- toYaml . | nindent 4 }} - {{- end }} - {{- with .Values.serviceAccount.annotations }} - annotations: - {{- tpl (toYaml . | nindent 4) $root }} - {{- end }} - name: {{ include "grafana.serviceAccountName" . }} - namespace: {{ include "grafana.namespace" . }} -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/servicemonitor.yaml b/operations/deployment/helm/grafana/templates/servicemonitor.yaml deleted file mode 100644 index 0359013..0000000 --- a/operations/deployment/helm/grafana/templates/servicemonitor.yaml +++ /dev/null @@ -1,52 +0,0 @@ -{{- if .Values.serviceMonitor.enabled }} ---- -apiVersion: monitoring.coreos.com/v1 -kind: ServiceMonitor -metadata: - name: {{ include "grafana.fullname" . }} - {{- if .Values.serviceMonitor.namespace }} - namespace: {{ tpl .Values.serviceMonitor.namespace . }} - {{- else }} - namespace: {{ include "grafana.namespace" . }} - {{- end }} - labels: - {{- include "grafana.labels" . | nindent 4 }} - {{- with .Values.serviceMonitor.labels }} - {{- tpl (toYaml . | nindent 4) $ }} - {{- end }} -spec: - endpoints: - - port: {{ .Values.service.portName }} - {{- with .Values.serviceMonitor.interval }} - interval: {{ . }} - {{- end }} - {{- with .Values.serviceMonitor.scrapeTimeout }} - scrapeTimeout: {{ . }} - {{- end }} - honorLabels: true - path: {{ .Values.serviceMonitor.path }} - scheme: {{ .Values.serviceMonitor.scheme }} - {{- with .Values.serviceMonitor.tlsConfig }} - tlsConfig: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.serviceMonitor.relabelings }} - relabelings: - {{- toYaml . | nindent 6 }} - {{- end }} - {{- with .Values.serviceMonitor.metricRelabelings }} - metricRelabelings: - {{- toYaml . | nindent 6 }} - {{- end }} - jobLabel: "{{ .Release.Name }}" - selector: - matchLabels: - {{- include "grafana.selectorLabels" . | nindent 6 }} - namespaceSelector: - matchNames: - - {{ include "grafana.namespace" . }} - {{- with .Values.serviceMonitor.targetLabels }} - targetLabels: - {{- toYaml . | nindent 4 }} - {{- end }} -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/statefulset.yaml b/operations/deployment/helm/grafana/templates/statefulset.yaml deleted file mode 100644 index e6c944a..0000000 --- a/operations/deployment/helm/grafana/templates/statefulset.yaml +++ /dev/null @@ -1,56 +0,0 @@ -{{- $sts := list "sts" "StatefulSet" "statefulset" -}} -{{- if (or (.Values.useStatefulSet) (and .Values.persistence.enabled (not .Values.persistence.existingClaim) (has .Values.persistence.type $sts)))}} -apiVersion: apps/v1 -kind: StatefulSet -metadata: - name: {{ include "grafana.fullname" . }} - namespace: {{ include "grafana.namespace" . }} - labels: - {{- include "grafana.labels" . | nindent 4 }} - {{- with .Values.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -spec: - replicas: {{ .Values.replicas }} - selector: - matchLabels: - {{- include "grafana.selectorLabels" . | nindent 6 }} - serviceName: {{ include "grafana.fullname" . }}-headless - template: - metadata: - labels: - {{- include "grafana.selectorLabels" . | nindent 8 }} - {{- with .Values.podLabels }} - {{- toYaml . | nindent 8 }} - {{- end }} - annotations: - checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} - checksum/dashboards-json-config: {{ include (print $.Template.BasePath "/dashboards-json-configmap.yaml") . | sha256sum }} - checksum/sc-dashboard-provider-config: {{ include (print $.Template.BasePath "/configmap-dashboard-provider.yaml") . | sha256sum }} - {{- if and (or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret))) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} - checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} - {{- end }} - kubectl.kubernetes.io/default-container: {{ .Chart.Name }} - {{- with .Values.podAnnotations }} - {{- toYaml . | nindent 8 }} - {{- end }} - spec: - {{- include "grafana.pod" . | nindent 6 }} - {{- if .Values.persistence.enabled}} - volumeClaimTemplates: - - metadata: - name: storage - spec: - accessModes: {{ .Values.persistence.accessModes }} - storageClassName: {{ .Values.persistence.storageClassName }} - resources: - requests: - storage: {{ .Values.persistence.size }} - {{- with .Values.persistence.selectorLabels }} - selector: - matchLabels: - {{- toYaml . | nindent 10 }} - {{- end }} - {{- end }} -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/tests/test-configmap.yaml b/operations/deployment/helm/grafana/templates/tests/test-configmap.yaml deleted file mode 100644 index 01c96c9..0000000 --- a/operations/deployment/helm/grafana/templates/tests/test-configmap.yaml +++ /dev/null @@ -1,20 +0,0 @@ -{{- if .Values.testFramework.enabled }} -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ include "grafana.fullname" . }}-test - namespace: {{ include "grafana.namespace" . }} - annotations: - "helm.sh/hook": test-success - "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" - labels: - {{- include "grafana.labels" . | nindent 4 }} -data: - run.sh: |- - @test "Test Health" { - url="http://{{ include "grafana.fullname" . }}/api/health" - - code=$(wget --server-response --spider --timeout 90 --tries 10 ${url} 2>&1 | awk '/^ HTTP/{print $2}') - [ "$code" == "200" ] - } -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/tests/test-podsecuritypolicy.yaml b/operations/deployment/helm/grafana/templates/tests/test-podsecuritypolicy.yaml deleted file mode 100644 index 1821772..0000000 --- a/operations/deployment/helm/grafana/templates/tests/test-podsecuritypolicy.yaml +++ /dev/null @@ -1,32 +0,0 @@ -{{- if and (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") .Values.testFramework.enabled .Values.rbac.pspEnabled }} -apiVersion: policy/v1beta1 -kind: PodSecurityPolicy -metadata: - name: {{ include "grafana.fullname" . }}-test - annotations: - "helm.sh/hook": test-success - "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" - labels: - {{- include "grafana.labels" . | nindent 4 }} -spec: - allowPrivilegeEscalation: true - privileged: false - hostNetwork: false - hostIPC: false - hostPID: false - fsGroup: - rule: RunAsAny - seLinux: - rule: RunAsAny - supplementalGroups: - rule: RunAsAny - runAsUser: - rule: RunAsAny - volumes: - - configMap - - downwardAPI - - emptyDir - - projected - - csi - - secret -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/tests/test-role.yaml b/operations/deployment/helm/grafana/templates/tests/test-role.yaml deleted file mode 100644 index cb4c782..0000000 --- a/operations/deployment/helm/grafana/templates/tests/test-role.yaml +++ /dev/null @@ -1,17 +0,0 @@ -{{- if and (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") .Values.testFramework.enabled .Values.rbac.pspEnabled }} -apiVersion: rbac.authorization.k8s.io/v1 -kind: Role -metadata: - name: {{ include "grafana.fullname" . }}-test - namespace: {{ include "grafana.namespace" . }} - annotations: - "helm.sh/hook": test-success - "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" - labels: - {{- include "grafana.labels" . | nindent 4 }} -rules: - - apiGroups: ['policy'] - resources: ['podsecuritypolicies'] - verbs: ['use'] - resourceNames: [{{ include "grafana.fullname" . }}-test] -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/tests/test-rolebinding.yaml b/operations/deployment/helm/grafana/templates/tests/test-rolebinding.yaml deleted file mode 100644 index f40d791..0000000 --- a/operations/deployment/helm/grafana/templates/tests/test-rolebinding.yaml +++ /dev/null @@ -1,20 +0,0 @@ -{{- if and (.Capabilities.APIVersions.Has "policy/v1beta1/PodSecurityPolicy") .Values.testFramework.enabled .Values.rbac.pspEnabled }} -apiVersion: rbac.authorization.k8s.io/v1 -kind: RoleBinding -metadata: - name: {{ include "grafana.fullname" . }}-test - namespace: {{ include "grafana.namespace" . }} - annotations: - "helm.sh/hook": test-success - "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" - labels: - {{- include "grafana.labels" . | nindent 4 }} -roleRef: - apiGroup: rbac.authorization.k8s.io - kind: Role - name: {{ include "grafana.fullname" . }}-test -subjects: - - kind: ServiceAccount - name: {{ include "grafana.serviceAccountNameTest" . }} - namespace: {{ include "grafana.namespace" . }} -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/tests/test-serviceaccount.yaml b/operations/deployment/helm/grafana/templates/tests/test-serviceaccount.yaml deleted file mode 100644 index 38fba35..0000000 --- a/operations/deployment/helm/grafana/templates/tests/test-serviceaccount.yaml +++ /dev/null @@ -1,12 +0,0 @@ -{{- if and .Values.testFramework.enabled .Values.serviceAccount.create }} -apiVersion: v1 -kind: ServiceAccount -metadata: - labels: - {{- include "grafana.labels" . | nindent 4 }} - name: {{ include "grafana.serviceAccountNameTest" . }} - namespace: {{ include "grafana.namespace" . }} - annotations: - "helm.sh/hook": test-success - "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" -{{- end }} diff --git a/operations/deployment/helm/grafana/templates/tests/test.yaml b/operations/deployment/helm/grafana/templates/tests/test.yaml deleted file mode 100644 index 15067ae..0000000 --- a/operations/deployment/helm/grafana/templates/tests/test.yaml +++ /dev/null @@ -1,49 +0,0 @@ -{{- if .Values.testFramework.enabled }} -{{- $root := . }} -apiVersion: v1 -kind: Pod -metadata: - name: {{ include "grafana.fullname" . }}-test - labels: - {{- include "grafana.labels" . | nindent 4 }} - annotations: - "helm.sh/hook": test-success - "helm.sh/hook-delete-policy": "before-hook-creation,hook-succeeded" - namespace: {{ include "grafana.namespace" . }} -spec: - serviceAccountName: {{ include "grafana.serviceAccountNameTest" . }} - {{- with .Values.testFramework.securityContext }} - securityContext: - {{- toYaml . | nindent 4 }} - {{- end }} - {{- if or .Values.image.pullSecrets .Values.global.imagePullSecrets }} - imagePullSecrets: - {{- include "grafana.imagePullSecrets" (dict "root" $root "imagePullSecrets" .Values.image.pullSecrets) | nindent 4 }} - {{- end }} - {{- with .Values.nodeSelector }} - nodeSelector: - {{- toYaml . | nindent 4 }} - {{- end }} - {{- with .Values.affinity }} - affinity: - {{- tpl (toYaml .) $root | nindent 4 }} - {{- end }} - {{- with .Values.tolerations }} - tolerations: - {{- toYaml . | nindent 4 }} - {{- end }} - containers: - - name: {{ .Release.Name }}-test - image: "{{ .Values.global.imageRegistry | default .Values.testFramework.image.registry }}/{{ .Values.testFramework.image.repository }}:{{ .Values.testFramework.image.tag }}" - imagePullPolicy: "{{ .Values.testFramework.imagePullPolicy}}" - command: ["/opt/bats/bin/bats", "-t", "/tests/run.sh"] - volumeMounts: - - mountPath: /tests - name: tests - readOnly: true - volumes: - - name: tests - configMap: - name: {{ include "grafana.fullname" . }}-test - restartPolicy: Never -{{- end }} From 11d63c0e5295b3c78ae5ce6cb4cad4d1a5973ffa Mon Sep 17 00:00:00 2001 From: karamveer28 Date: Mon, 11 Dec 2023 23:24:25 +0530 Subject: [PATCH 32/99] Test with values mapping --- .../deployment/helm/grafana/.helmignore | 23 --------- .../helm/grafana/bitops.config.yaml | 2 +- .../helm/grafana/templates/deployment.yaml | 51 ------------------- 3 files changed, 1 insertion(+), 75 deletions(-) delete mode 100644 operations/deployment/helm/grafana/.helmignore delete mode 100644 operations/deployment/helm/grafana/templates/deployment.yaml diff --git a/operations/deployment/helm/grafana/.helmignore b/operations/deployment/helm/grafana/.helmignore deleted file mode 100644 index 8cade13..0000000 --- a/operations/deployment/helm/grafana/.helmignore +++ /dev/null @@ -1,23 +0,0 @@ -# Patterns to ignore when building packages. -# This supports shell glob matching, relative path matching, and -# negation (prefixed with !). Only one pattern per line. -.DS_Store -# Common VCS dirs -.git/ -.gitignore -.bzr/ -.bzrignore -.hg/ -.hgignore -.svn/ -# Common backup files -*.swp -*.bak -*.tmp -*~ -# Various IDEs -.vscode -.project -.idea/ -*.tmproj -OWNERS diff --git a/operations/deployment/helm/grafana/bitops.config.yaml b/operations/deployment/helm/grafana/bitops.config.yaml index 0d5d6df..caf46cf 100644 --- a/operations/deployment/helm/grafana/bitops.config.yaml +++ b/operations/deployment/helm/grafana/bitops.config.yaml @@ -1,6 +1,6 @@ helm: cli: - namespace: kube-system + # namespace: kube-system timeout: 200s debug: true atomic: false diff --git a/operations/deployment/helm/grafana/templates/deployment.yaml b/operations/deployment/helm/grafana/templates/deployment.yaml deleted file mode 100644 index 9c96d35..0000000 --- a/operations/deployment/helm/grafana/templates/deployment.yaml +++ /dev/null @@ -1,51 +0,0 @@ -{{- if (and (not .Values.useStatefulSet) (or (not .Values.persistence.enabled) (eq .Values.persistence.type "pvc"))) }} -apiVersion: apps/v1 -kind: Deployment -metadata: - name: {{ include "grafana.fullname" . }} - # namespace: {{ include "grafana.namespace" . }} - labels: - {{- include "grafana.labels" . | nindent 4 }} - {{- with .Values.labels }} - {{- toYaml . | nindent 4 }} - {{- end }} - {{- with .Values.annotations }} - annotations: - {{- toYaml . | nindent 4 }} - {{- end }} -spec: - {{- if and (not .Values.autoscaling.enabled) (.Values.replicas) }} - replicas: {{ .Values.replicas }} - {{- end }} - revisionHistoryLimit: {{ .Values.revisionHistoryLimit }} - selector: - matchLabels: - {{- include "grafana.selectorLabels" . | nindent 6 }} - {{- with .Values.deploymentStrategy }} - strategy: - {{- toYaml . | trim | nindent 4 }} - {{- end }} - template: - metadata: - labels: - {{- include "grafana.selectorLabels" . | nindent 8 }} - {{- with .Values.podLabels }} - {{- toYaml . | nindent 8 }} - {{- end }} - annotations: - checksum/config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} - checksum/dashboards-json-config: {{ include (print $.Template.BasePath "/dashboards-json-configmap.yaml") . | sha256sum }} - checksum/sc-dashboard-provider-config: {{ include (print $.Template.BasePath "/configmap-dashboard-provider.yaml") . | sha256sum }} - {{- if and (or (and (not .Values.admin.existingSecret) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD__FILE) (not .Values.env.GF_SECURITY_ADMIN_PASSWORD)) (and .Values.ldap.enabled (not .Values.ldap.existingSecret))) (not .Values.env.GF_SECURITY_DISABLE_INITIAL_ADMIN_CREATION) }} - checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} - {{- end }} - {{- if .Values.envRenderSecret }} - checksum/secret-env: {{ include (print $.Template.BasePath "/secret-env.yaml") . | sha256sum }} - {{- end }} - kubectl.kubernetes.io/default-container: {{ .Chart.Name }} - {{- with .Values.podAnnotations }} - {{- toYaml . | nindent 8 }} - {{- end }} - spec: - {{- include "grafana.pod" . | nindent 6 }} -{{- end }} From cc32da2778b86573e05432acf62590d05ec2b315 Mon Sep 17 00:00:00 2001 From: karamveer28 Date: Mon, 11 Dec 2023 23:27:20 +0530 Subject: [PATCH 33/99] uncomment namespace --- operations/deployment/helm/grafana/bitops.config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/operations/deployment/helm/grafana/bitops.config.yaml b/operations/deployment/helm/grafana/bitops.config.yaml index caf46cf..0d5d6df 100644 --- a/operations/deployment/helm/grafana/bitops.config.yaml +++ b/operations/deployment/helm/grafana/bitops.config.yaml @@ -1,6 +1,6 @@ helm: cli: - # namespace: kube-system + namespace: kube-system timeout: 200s debug: true atomic: false From 9695841bc43a02fa1b5eed2d8b40d3857b189dae Mon Sep 17 00:00:00 2001 From: karamveer28 Date: Tue, 12 Dec 2023 20:21:02 +0530 Subject: [PATCH 34/99] Added grafana wrapper --- operations/deployment/helm/grafana/Chart.yaml | 25 +- .../deployment/helm/grafana/values.yaml | 1503 +++-------------- 2 files changed, 217 insertions(+), 1311 deletions(-) diff --git a/operations/deployment/helm/grafana/Chart.yaml b/operations/deployment/helm/grafana/Chart.yaml index f71c5b1..12e9ab9 100644 --- a/operations/deployment/helm/grafana/Chart.yaml +++ b/operations/deployment/helm/grafana/Chart.yaml @@ -1,18 +1,9 @@ apiVersion: v2 -name: grafana -version: 7.0.14 -appVersion: 10.2.2 -kubeVersion: "^1.8.0-0" -description: The leading tool for querying and visualizing time series and metrics. -home: https://grafana.com -icon: https://artifacthub.io/image/b4fed1a7-6c8f-4945-b99d-096efa3e4116 -sources: - - https://github.com/grafana/grafana - - https://github.com/grafana/helm-charts -annotations: - "artifacthub.io/license": AGPL-3.0-only - "artifacthub.io/links": | - - name: Chart Source - url: https://github.com/grafana/helm-charts - - name: Upstream Project - url: https://github.com/grafana/grafana +name: grafana chart wrapper +description: A Helm chart wrapper for grafana +version: 0.1.0 + +dependencies: +- name: grafana + version: 6.31.1 + repository: https://grafana.github.io/helm-charts/ \ No newline at end of file diff --git a/operations/deployment/helm/grafana/values.yaml b/operations/deployment/helm/grafana/values.yaml index dbd58d4..1d529db 100644 --- a/operations/deployment/helm/grafana/values.yaml +++ b/operations/deployment/helm/grafana/values.yaml @@ -1,1303 +1,218 @@ -global: - # -- Overrides the Docker registry globally for all images - imageRegistry: null - - # To help compatibility with other charts which use global.imagePullSecrets. - # Allow either an array of {name: pullSecret} maps (k8s-style), or an array of strings (more common helm-style). - # Can be tempalted. - # global: - # imagePullSecrets: - # - name: pullSecret1 - # - name: pullSecret2 - # or - # global: - # imagePullSecrets: - # - pullSecret1 - # - pullSecret2 - imagePullSecrets: [] - -rbac: - create: true - ## Use an existing ClusterRole/Role (depending on rbac.namespaced false/true) - # useExistingRole: name-of-some-role - # useExistingClusterRole: name-of-some-clusterRole - pspEnabled: false - pspUseAppArmor: false - namespaced: false - extraRoleRules: [] - # - apiGroups: [] - # resources: [] - # verbs: [] - extraClusterRoleRules: [] - # - apiGroups: [] - # resources: [] - # verbs: [] -serviceAccount: - create: true - name: - nameTest: - ## ServiceAccount labels. - labels: {} -## Service account annotations. Can be templated. -# annotations: -# eks.amazonaws.com/role-arn: arn:aws:iam::123456789000:role/iam-role-name-here - autoMount: true - -replicas: 1 - -## Create a headless service for the deployment -headlessService: false - -## Create HorizontalPodAutoscaler object for deployment type -# -autoscaling: - enabled: false - minReplicas: 1 - maxReplicas: 5 - targetCPU: "60" - targetMemory: "" - behavior: {} - -## See `kubectl explain poddisruptionbudget.spec` for more -## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ -podDisruptionBudget: {} -# apiVersion: "" -# minAvailable: 1 -# maxUnavailable: 1 - -## See `kubectl explain deployment.spec.strategy` for more -## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy -deploymentStrategy: - type: RollingUpdate - -readinessProbe: - httpGet: - path: /api/health - port: 3000 - -livenessProbe: - httpGet: - path: /api/health - port: 3000 - initialDelaySeconds: 60 - timeoutSeconds: 30 - failureThreshold: 10 - -## Use an alternate scheduler, e.g. "stork". -## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ -## -# schedulerName: "default-scheduler" - -image: - # -- The Docker registry - registry: docker.io - # -- Docker image repository - repository: grafana/grafana - # Overrides the Grafana image tag whose default is the chart appVersion - tag: "" - sha: "" - pullPolicy: IfNotPresent - - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## Can be templated. - ## - pullSecrets: [] - # - myRegistrKeySecretName - -testFramework: - enabled: true - image: - # -- The Docker registry - registry: docker.io - repository: bats/bats - tag: "v1.4.1" - imagePullPolicy: IfNotPresent - securityContext: {} - -securityContext: - runAsNonRoot: true - runAsUser: 472 - runAsGroup: 472 - fsGroup: 472 - -containerSecurityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - seccompProfile: - type: RuntimeDefault - -# Enable creating the grafana configmap -createConfigmap: true - -# Extra configmaps to mount in grafana pods -# Values are templated. -extraConfigmapMounts: [] - # - name: certs-configmap - # mountPath: /etc/grafana/ssl/ - # subPath: certificates.crt # (optional) - # configMap: certs-configmap - # readOnly: true - - -extraEmptyDirMounts: [] - # - name: provisioning-notifiers - # mountPath: /etc/grafana/provisioning/notifiers - - -# Apply extra labels to common labels. -extraLabels: {} - -## Assign a PriorityClassName to pods if set -# priorityClassName: - -downloadDashboardsImage: - # -- The Docker registry - registry: docker.io - repository: curlimages/curl - tag: 7.85.0 - sha: "" - pullPolicy: IfNotPresent - -downloadDashboards: - env: {} - envFromSecret: "" - resources: {} - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - seccompProfile: - type: RuntimeDefault - envValueFrom: {} - # ENV_NAME: - # configMapKeyRef: - # name: configmap-name - # key: value_key - -## Pod Annotations -# podAnnotations: {} - -## Pod Labels -# podLabels: {} - -podPortName: grafana -gossipPortName: gossip -## Deployment annotations -# annotations: {} - -## Expose the grafana service to be accessed from outside the cluster (LoadBalancer service). -## or access it from within the cluster (ClusterIP service). Set the service type and the port to serve it. -## ref: http://kubernetes.io/docs/user-guide/services/ -## -service: - enabled: true - type: ClusterIP - port: 80 - targetPort: 3000 - # targetPort: 4181 To be used with a proxy extraContainer - ## Service annotations. Can be templated. - annotations: {} - labels: {} - portName: service - # Adds the appProtocol field to the service. This allows to work with istio protocol selection. Ex: "http" or "tcp" - appProtocol: "" - -serviceMonitor: - ## If true, a ServiceMonitor CRD is created for a prometheus operator - ## https://github.com/coreos/prometheus-operator - ## - enabled: false - path: /metrics - # namespace: monitoring (defaults to use the namespace this chart is deployed to) - labels: {} - interval: 30s - scheme: http - tlsConfig: {} - scrapeTimeout: 30s - relabelings: [] - metricRelabelings: [] - targetLabels: [] - -extraExposePorts: [] - # - name: keycloak - # port: 8080 - # targetPort: 8080 - -# overrides pod.spec.hostAliases in the grafana deployment's pods -hostAliases: [] - # - ip: "1.2.3.4" - # hostnames: - # - "my.host.com" - -ingress: - enabled: false - # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName - # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress - # ingressClassName: nginx - # Values can be templated - annotations: {} - # kubernetes.io/ingress.class: nginx - # kubernetes.io/tls-acme: "true" - labels: {} - path: / - - # pathType is only for k8s >= 1.1= - pathType: Prefix - - hosts: - - chart-example.local - ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. - extraPaths: [] - # - path: /* - # backend: - # serviceName: ssl-redirect - # servicePort: use-annotation - ## Or for k8s > 1.19 - # - path: /* - # pathType: Prefix - # backend: - # service: - # name: ssl-redirect - # port: - # name: use-annotation - - - tls: [] - # - secretName: chart-example-tls - # hosts: - # - chart-example.local - -resources: {} -# limits: -# cpu: 100m -# memory: 128Mi -# requests: -# cpu: 100m -# memory: 128Mi - -## Node labels for pod assignment -## ref: https://kubernetes.io/docs/user-guide/node-selection/ -# -nodeSelector: {} - -## Tolerations for pod assignment -## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ -## -tolerations: [] - -## Affinity for pod assignment (evaluated as template) -## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity -## -affinity: {} - -## Topology Spread Constraints -## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ -## -topologySpreadConstraints: [] - -## Additional init containers (evaluated as template) -## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ -## -extraInitContainers: [] - -## Enable an Specify container in extraContainers. This is meant to allow adding an authentication proxy to a grafana pod -extraContainers: "" -# extraContainers: | -# - name: proxy -# image: quay.io/gambol99/keycloak-proxy:latest -# args: -# - -provider=github -# - -client-id= -# - -client-secret= -# - -github-org= -# - -email-domain=* -# - -cookie-secret= -# - -http-address=http://0.0.0.0:4181 -# - -upstream-url=http://127.0.0.1:3000 -# ports: -# - name: proxy-web -# containerPort: 4181 - -## Volumes that can be used in init containers that will not be mounted to deployment pods -extraContainerVolumes: [] -# - name: volume-from-secret -# secret: -# secretName: secret-to-mount -# - name: empty-dir-volume -# emptyDir: {} - -## Enable persistence using Persistent Volume Claims -## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ -## -persistence: - type: pvc - enabled: false - # storageClassName: default - accessModes: - - ReadWriteOnce - size: 10Gi - # annotations: {} - finalizers: - - kubernetes.io/pvc-protection - # selectorLabels: {} - ## Sub-directory of the PV to mount. Can be templated. - # subPath: "" - ## Name of an existing PVC. Can be templated. - # existingClaim: - ## Extra labels to apply to a PVC. - extraPvcLabels: {} - - ## If persistence is not enabled, this allows to mount the - ## local storage in-memory to improve performance - ## - inMemory: - enabled: false - ## The maximum usage on memory medium EmptyDir would be - ## the minimum value between the SizeLimit specified - ## here and the sum of memory limits of all containers in a pod - ## - # sizeLimit: 300Mi - -initChownData: - ## If false, data ownership will not be reset at startup - ## This allows the grafana-server to be run with an arbitrary user - ## - enabled: true - - ## initChownData container image - ## +grafana: + replicas: 1 + image: - # -- The Docker registry - registry: docker.io - repository: library/busybox - tag: "1.31.1" + repository: grafana/grafana + tag: 9.0.1 sha: "" pullPolicy: IfNotPresent + + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistrKeySecretName + + serviceAccount: + autoMount: true - ## initChownData resource requests and limits - ## Ref: http://kubernetes.io/docs/user-guide/compute-resources/ - ## - resources: {} - # limits: - # cpu: 100m - # memory: 128Mi - # requests: - # cpu: 100m - # memory: 128Mi - securityContext: - runAsNonRoot: false - runAsUser: 0 - seccompProfile: - type: RuntimeDefault - capabilities: - add: - - CHOWN - -# Administrator credentials when not using an existing secret (see below) -adminUser: admin -# adminPassword: strongpassword - -# Use an existing secret for the admin user. -admin: - ## Name of the secret. Can be templated. - existingSecret: "" - userKey: admin-user - passwordKey: admin-password - -## Define command to be executed at startup by grafana container -## Needed if using `vault-env` to manage secrets (ref: https://banzaicloud.com/blog/inject-secrets-into-pods-vault/) -## Default is "run.sh" as defined in grafana's Dockerfile -# command: -# - "sh" -# - "/run.sh" - -## Optionally define args if command is used -## Needed if using `hashicorp/envconsul` to manage secrets -## By default no arguments are set -# args: -# - "-secret" -# - "secret/grafana" -# - "./grafana" - -## Extra environment variables that will be pass onto deployment pods -## -## to provide grafana with access to CloudWatch on AWS EKS: -## 1. create an iam role of type "Web identity" with provider oidc.eks.* (note the provider for later) -## 2. edit the "Trust relationships" of the role, add a line inside the StringEquals clause using the -## same oidc eks provider as noted before (same as the existing line) -## also, replace NAMESPACE and prometheus-operator-grafana with the service account namespace and name -## -## "oidc.eks.us-east-1.amazonaws.com/id/XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX:sub": "system:serviceaccount:NAMESPACE:prometheus-operator-grafana", -## -## 3. attach a policy to the role, you can use a built in policy called CloudWatchReadOnlyAccess -## 4. use the following env: (replace 123456789000 and iam-role-name-here with your aws account number and role name) -## -## env: -## AWS_ROLE_ARN: arn:aws:iam::123456789000:role/iam-role-name-here -## AWS_WEB_IDENTITY_TOKEN_FILE: /var/run/secrets/eks.amazonaws.com/serviceaccount/token -## AWS_REGION: us-east-1 -## -## 5. uncomment the EKS section in extraSecretMounts: below -## 6. uncomment the annotation section in the serviceAccount: above -## make sure to replace arn:aws:iam::123456789000:role/iam-role-name-here with your role arn - -env: {} - -## "valueFrom" environment variable references that will be added to deployment pods. Name is templated. -## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#envvarsource-v1-core -## Renders in container spec as: -## env: -## ... -## - name: -## valueFrom: -## -envValueFrom: {} - # ENV_NAME: - # configMapKeyRef: - # name: configmap-name - # key: value_key - -## The name of a secret in the same kubernetes namespace which contain values to be added to the environment -## This can be useful for auth tokens, etc. Value is templated. -envFromSecret: "" - -## Sensible environment variables that will be rendered as new secret object -## This can be useful for auth tokens, etc. -## If the secret values contains "{{", they'll need to be properly escaped so that they are not interpreted by Helm -## ref: https://helm.sh/docs/howto/charts_tips_and_tricks/#using-the-tpl-function -envRenderSecret: {} - -## The names of secrets in the same kubernetes namespace which contain values to be added to the environment -## Each entry should contain a name key, and can optionally specify whether the secret must be defined with an optional key. -## Name is templated. -envFromSecrets: [] -## - name: secret-name -## optional: true - -## The names of conifgmaps in the same kubernetes namespace which contain values to be added to the environment -## Each entry should contain a name key, and can optionally specify whether the configmap must be defined with an optional key. -## Name is templated. -## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.23/#configmapenvsource-v1-core -envFromConfigMaps: [] -## - name: configmap-name -## optional: true - -# Inject Kubernetes services as environment variables. -# See https://kubernetes.io/docs/concepts/services-networking/connect-applications-service/#environment-variables -enableServiceLinks: true - -## Additional grafana server secret mounts -# Defines additional mounts with secrets. Secrets must be manually created in the namespace. -extraSecretMounts: [] - # - name: secret-files - # mountPath: /etc/secrets - # secretName: grafana-secret-files - # readOnly: true - # subPath: "" - # - # for AWS EKS (cloudwatch) use the following (see also instruction in env: above) - # - name: aws-iam-token - # mountPath: /var/run/secrets/eks.amazonaws.com/serviceaccount - # readOnly: true - # projected: - # defaultMode: 420 - # sources: - # - serviceAccountToken: - # audience: sts.amazonaws.com - # expirationSeconds: 86400 - # path: token - # - # for CSI e.g. Azure Key Vault use the following - # - name: secrets-store-inline - # mountPath: /run/secrets - # readOnly: true - # csi: - # driver: secrets-store.csi.k8s.io - # readOnly: true - # volumeAttributes: - # secretProviderClass: "akv-grafana-spc" - # nodePublishSecretRef: # Only required when using service principal mode - # name: grafana-akv-creds # Only required when using service principal mode - -## Additional grafana server volume mounts -# Defines additional volume mounts. -extraVolumeMounts: [] - # - name: extra-volume-0 - # mountPath: /mnt/volume0 - # readOnly: true - # existingClaim: volume-claim - # - name: extra-volume-1 - # mountPath: /mnt/volume1 - # readOnly: true - # hostPath: /usr/shared/ - # - name: grafana-secrets - # mountPath: /mnt/volume2 - # csi: true - # data: - # driver: secrets-store.csi.k8s.io - # readOnly: true - # volumeAttributes: - # secretProviderClass: "grafana-env-spc" - -## Container Lifecycle Hooks. Execute a specific bash command or make an HTTP request -lifecycleHooks: {} - # postStart: - # exec: - # command: [] - -## Pass the plugins you want installed as a list. -## -plugins: [] - # - digrich-bubblechart-panel - # - grafana-clock-panel - ## You can also use other plugin download URL, as long as they are valid zip files, - ## and specify the name of the plugin after the semicolon. Like this: - # - https://grafana.com/api/plugins/marcusolsson-json-datasource/versions/1.3.2/download;marcusolsson-json-datasource - -## Configure grafana datasources -## ref: http://docs.grafana.org/administration/provisioning/#datasources -## -datasources: {} -# datasources.yaml: -# apiVersion: 1 -# datasources: -# - name: Prometheus -# type: prometheus -# url: http://prometheus-prometheus-server -# access: proxy -# isDefault: true -# - name: CloudWatch -# type: cloudwatch -# access: proxy -# uid: cloudwatch -# editable: false -# jsonData: -# authType: default -# defaultRegion: us-east-1 -# deleteDatasources: [] -# - name: Prometheus - -## Configure grafana alerting (can be templated) -## ref: http://docs.grafana.org/administration/provisioning/#alerting -## -alerting: {} - # rules.yaml: - # apiVersion: 1 - # groups: - # - orgId: 1 - # name: '{{ .Chart.Name }}_my_rule_group' - # folder: my_first_folder - # interval: 60s - # rules: - # - uid: my_id_1 - # title: my_first_rule - # condition: A - # data: - # - refId: A - # datasourceUid: '-100' - # model: - # conditions: - # - evaluator: - # params: - # - 3 - # type: gt - # operator: - # type: and - # query: - # params: - # - A - # reducer: - # type: last - # type: query - # datasource: - # type: __expr__ - # uid: '-100' - # expression: 1==0 - # intervalMs: 1000 - # maxDataPoints: 43200 - # refId: A - # type: math - # dashboardUid: my_dashboard - # panelId: 123 - # noDataState: Alerting - # for: 60s - # annotations: - # some_key: some_value - # labels: - # team: sre_team_1 - # contactpoints.yaml: - # secret: - # apiVersion: 1 - # contactPoints: - # - orgId: 1 - # name: cp_1 - # receivers: - # - uid: first_uid - # type: pagerduty - # settings: - # integrationKey: XXX - # severity: critical - # class: ping failure - # component: Grafana - # group: app-stack - # summary: | - # {{ `{{ include "default.message" . }}` }} - -## Configure notifiers -## ref: http://docs.grafana.org/administration/provisioning/#alert-notification-channels -## -notifiers: {} -# notifiers.yaml: -# notifiers: -# - name: email-notifier -# type: email -# uid: email1 -# # either: -# org_id: 1 -# # or -# org_name: Main Org. -# is_default: true -# settings: -# addresses: an_email_address@example.com -# delete_notifiers: - -## Configure grafana dashboard providers -## ref: http://docs.grafana.org/administration/provisioning/#dashboards -## -## `path` must be /var/lib/grafana/dashboards/ -## -dashboardProviders: {} -# dashboardproviders.yaml: -# apiVersion: 1 -# providers: -# - name: 'default' -# orgId: 1 -# folder: '' -# type: file -# disableDeletion: false -# editable: true -# options: -# path: /var/lib/grafana/dashboards/default - -## Configure grafana dashboard to import -## NOTE: To use dashboards you must also enable/configure dashboardProviders -## ref: https://grafana.com/dashboards -## -## dashboards per provider, use provider name as key. -## -dashboards: {} - # default: - # some-dashboard: - # json: | - # $RAW_JSON - # custom-dashboard: - # file: dashboards/custom-dashboard.json - # prometheus-stats: - # gnetId: 2 - # revision: 2 - # datasource: Prometheus - # local-dashboard: - # url: https://example.com/repository/test.json - # token: '' - # local-dashboard-base64: - # url: https://example.com/repository/test-b64.json - # token: '' - # b64content: true - # local-dashboard-gitlab: - # url: https://example.com/repository/test-gitlab.json - # gitlabToken: '' - # local-dashboard-bitbucket: - # url: https://example.com/repository/test-bitbucket.json - # bearerToken: '' - # local-dashboard-azure: - # url: https://example.com/repository/test-azure.json - # basic: '' - # acceptHeader: '*/*' - -## Reference to external ConfigMap per provider. Use provider name as key and ConfigMap name as value. -## A provider dashboards must be defined either by external ConfigMaps or in values.yaml, not in both. -## ConfigMap data example: -## -## data: -## example-dashboard.json: | -## RAW_JSON -## -dashboardsConfigMaps: {} -# default: "" - -## Grafana's primary configuration -## NOTE: values in map will be converted to ini format -## ref: http://docs.grafana.org/installation/configuration/ -## -grafana.ini: - paths: - data: /var/lib/grafana/ - logs: /var/log/grafana - plugins: /var/lib/grafana/plugins - provisioning: /etc/grafana/provisioning - analytics: - check_for_updates: true - log: - mode: console - grafana_net: - url: https://grafana.net - server: - domain: "{{ if (and .Values.ingress.enabled .Values.ingress.hosts) }}{{ .Values.ingress.hosts | first }}{{ else }}''{{ end }}" -## grafana Authentication can be enabled with the following values on grafana.ini - # server: - # The full public facing url you use in browser, used for redirects and emails - # root_url: - # https://grafana.com/docs/grafana/latest/auth/github/#enable-github-in-grafana - # auth.github: - # enabled: false - # allow_sign_up: false - # scopes: user:email,read:org - # auth_url: https://github.com/login/oauth/authorize - # token_url: https://github.com/login/oauth/access_token - # api_url: https://api.github.com/user - # team_ids: - # allowed_organizations: - # client_id: - # client_secret: -## LDAP Authentication can be enabled with the following values on grafana.ini -## NOTE: Grafana will fail to start if the value for ldap.toml is invalid - # auth.ldap: - # enabled: true - # allow_sign_up: true - # config_file: /etc/grafana/ldap.toml - -## Grafana's LDAP configuration -## Templated by the template in _helpers.tpl -## NOTE: To enable the grafana.ini must be configured with auth.ldap.enabled -## ref: http://docs.grafana.org/installation/configuration/#auth-ldap -## ref: http://docs.grafana.org/installation/ldap/#configuration -ldap: - enabled: false - # `existingSecret` is a reference to an existing secret containing the ldap configuration - # for Grafana in a key `ldap-toml`. - existingSecret: "" - # `config` is the content of `ldap.toml` that will be stored in the created secret - config: "" - # config: |- - # verbose_logging = true - - # [[servers]] - # host = "my-ldap-server" - # port = 636 - # use_ssl = true - # start_tls = false - # ssl_skip_verify = false - # bind_dn = "uid=%s,ou=users,dc=myorg,dc=com" - -## Grafana's SMTP configuration -## NOTE: To enable, grafana.ini must be configured with smtp.enabled -## ref: http://docs.grafana.org/installation/configuration/#smtp -smtp: - # `existingSecret` is a reference to an existing secret containing the smtp configuration - # for Grafana. - existingSecret: "" - userKey: "user" - passwordKey: "password" - -## Sidecars that collect the configmaps with specified label and stores the included files them into the respective folders -## Requires at least Grafana 5 to work and can't be used together with parameters dashboardProviders, datasources and dashboards -sidecar: - image: - # -- The Docker registry - registry: quay.io - repository: kiwigrid/k8s-sidecar - tag: 1.25.2 - sha: "" - imagePullPolicy: IfNotPresent - resources: {} -# limits: -# cpu: 100m -# memory: 100Mi -# requests: -# cpu: 50m -# memory: 50Mi - securityContext: - allowPrivilegeEscalation: false - capabilities: - drop: - - ALL - seccompProfile: - type: RuntimeDefault - # skipTlsVerify Set to true to skip tls verification for kube api calls - # skipTlsVerify: true - enableUniqueFilenames: false - readinessProbe: {} - livenessProbe: {} - # Log level default for all sidecars. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. Defaults to INFO - # logLevel: INFO - alerts: - enabled: false - # Additional environment variables for the alerts sidecar - env: {} - # Do not reprocess already processed unchanged resources on k8s API reconnect. - # ignoreAlreadyProcessed: true - # label that the configmaps with alert are marked with - label: grafana_alert - # value of label that the configmaps with alert are set to - labelValue: "" - # Log level. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. - # logLevel: INFO - # If specified, the sidecar will search for alert config-maps inside this namespace. - # Otherwise the namespace in which the sidecar is running will be used. - # It's also possible to specify ALL to search in all namespaces - searchNamespace: null - # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. - watchMethod: WATCH - # search in configmap, secret or both - resource: both - # watchServerTimeout: request to the server, asking it to cleanly close the connection after that. - # defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S - # watchServerTimeout: 3600 - # - # watchClientTimeout: is a client-side timeout, configuring your local socket. - # If you have a network outage dropping all packets with no RST/FIN, - # this is how long your client waits before realizing & dropping the connection. - # defaults to 66sec (sic!) - # watchClientTimeout: 60 - # - # Endpoint to send request to reload alerts - reloadURL: "http://localhost:3000/api/admin/provisioning/alerting/reload" - # Absolute path to shell script to execute after a alert got reloaded - script: null - skipReload: false - # This is needed if skipReload is true, to load any alerts defined at startup time. - # Deploy the alert sidecar as an initContainer. - initAlerts: false - # Additional alert sidecar volume mounts - extraMounts: [] - # Sets the size limit of the alert sidecar emptyDir volume - sizeLimit: {} - dashboards: - enabled: false - # Additional environment variables for the dashboards sidecar - env: {} - # Do not reprocess already processed unchanged resources on k8s API reconnect. - # ignoreAlreadyProcessed: true - SCProvider: true - # label that the configmaps with dashboards are marked with - label: grafana_dashboard - # value of label that the configmaps with dashboards are set to - labelValue: "" - # Log level. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. - # logLevel: INFO - # folder in the pod that should hold the collected dashboards (unless `defaultFolderName` is set) - folder: /tmp/dashboards - # The default folder name, it will create a subfolder under the `folder` and put dashboards in there instead - defaultFolderName: null - # Namespaces list. If specified, the sidecar will search for config-maps/secrets inside these namespaces. - # Otherwise the namespace in which the sidecar is running will be used. - # It's also possible to specify ALL to search in all namespaces. - searchNamespace: null - # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. - watchMethod: WATCH - # search in configmap, secret or both - resource: both - # If specified, the sidecar will look for annotation with this name to create folder and put graph here. - # You can use this parameter together with `provider.foldersFromFilesStructure`to annotate configmaps and create folder structure. - folderAnnotation: null - # Endpoint to send request to reload alerts - reloadURL: "http://localhost:3000/api/admin/provisioning/dashboards/reload" - # Absolute path to shell script to execute after a configmap got reloaded - script: null - skipReload: false - # watchServerTimeout: request to the server, asking it to cleanly close the connection after that. - # defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S - # watchServerTimeout: 3600 - # - # watchClientTimeout: is a client-side timeout, configuring your local socket. - # If you have a network outage dropping all packets with no RST/FIN, - # this is how long your client waits before realizing & dropping the connection. - # defaults to 66sec (sic!) - # watchClientTimeout: 60 - # - # provider configuration that lets grafana manage the dashboards - provider: - # name of the provider, should be unique - name: sidecarProvider - # orgid as configured in grafana - orgid: 1 - # folder in which the dashboards should be imported in grafana - folder: '' - # type of the provider - type: file - # disableDelete to activate a import-only behaviour - disableDelete: false - # allow updating provisioned dashboards from the UI - allowUiUpdates: false - # allow Grafana to replicate dashboard structure from filesystem - foldersFromFilesStructure: false - # Additional dashboard sidecar volume mounts - extraMounts: [] - # Sets the size limit of the dashboard sidecar emptyDir volume - sizeLimit: {} - datasources: - enabled: false - # Additional environment variables for the datasourcessidecar - env: {} - # Do not reprocess already processed unchanged resources on k8s API reconnect. - # ignoreAlreadyProcessed: true - # label that the configmaps with datasources are marked with - label: grafana_datasource - # value of label that the configmaps with datasources are set to - labelValue: "" - # Log level. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. - # logLevel: INFO - # If specified, the sidecar will search for datasource config-maps inside this namespace. - # Otherwise the namespace in which the sidecar is running will be used. - # It's also possible to specify ALL to search in all namespaces - searchNamespace: null - # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. - watchMethod: WATCH - # search in configmap, secret or both - resource: both - # watchServerTimeout: request to the server, asking it to cleanly close the connection after that. - # defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S - # watchServerTimeout: 3600 - # - # watchClientTimeout: is a client-side timeout, configuring your local socket. - # If you have a network outage dropping all packets with no RST/FIN, - # this is how long your client waits before realizing & dropping the connection. - # defaults to 66sec (sic!) - # watchClientTimeout: 60 - # - # Endpoint to send request to reload datasources - reloadURL: "http://localhost:3000/api/admin/provisioning/datasources/reload" - # Absolute path to shell script to execute after a datasource got reloaded - script: null - skipReload: false - # Deploy the datasource sidecar as an initContainer in addition to a container. - # This is needed if skipReload is true, to load any datasources defined at startup time. - initDatasources: false - # Sets the size limit of the datasource sidecar emptyDir volume - sizeLimit: {} - plugins: - enabled: false - # Additional environment variables for the plugins sidecar - env: {} - # Do not reprocess already processed unchanged resources on k8s API reconnect. - # ignoreAlreadyProcessed: true - # label that the configmaps with plugins are marked with - label: grafana_plugin - # value of label that the configmaps with plugins are set to - labelValue: "" - # Log level. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. - # logLevel: INFO - # If specified, the sidecar will search for plugin config-maps inside this namespace. - # Otherwise the namespace in which the sidecar is running will be used. - # It's also possible to specify ALL to search in all namespaces - searchNamespace: null - # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. - watchMethod: WATCH - # search in configmap, secret or both - resource: both - # watchServerTimeout: request to the server, asking it to cleanly close the connection after that. - # defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S - # watchServerTimeout: 3600 - # - # watchClientTimeout: is a client-side timeout, configuring your local socket. - # If you have a network outage dropping all packets with no RST/FIN, - # this is how long your client waits before realizing & dropping the connection. - # defaults to 66sec (sic!) - # watchClientTimeout: 60 - # - # Endpoint to send request to reload plugins - reloadURL: "http://localhost:3000/api/admin/provisioning/plugins/reload" - # Absolute path to shell script to execute after a plugin got reloaded - script: null - skipReload: false - # Deploy the datasource sidecar as an initContainer in addition to a container. - # This is needed if skipReload is true, to load any plugins defined at startup time. - initPlugins: false - # Sets the size limit of the plugin sidecar emptyDir volume - sizeLimit: {} - notifiers: - enabled: false - # Additional environment variables for the notifierssidecar - env: {} - # Do not reprocess already processed unchanged resources on k8s API reconnect. - # ignoreAlreadyProcessed: true - # label that the configmaps with notifiers are marked with - label: grafana_notifier - # value of label that the configmaps with notifiers are set to - labelValue: "" - # Log level. Can be one of: DEBUG, INFO, WARN, ERROR, CRITICAL. - # logLevel: INFO - # If specified, the sidecar will search for notifier config-maps inside this namespace. - # Otherwise the namespace in which the sidecar is running will be used. - # It's also possible to specify ALL to search in all namespaces - searchNamespace: null - # Method to use to detect ConfigMap changes. With WATCH the sidecar will do a WATCH requests, with SLEEP it will list all ConfigMaps, then sleep for 60 seconds. - watchMethod: WATCH - # search in configmap, secret or both - resource: both - # watchServerTimeout: request to the server, asking it to cleanly close the connection after that. - # defaults to 60sec; much higher values like 3600 seconds (1h) are feasible for non-Azure K8S - # watchServerTimeout: 3600 - # - # watchClientTimeout: is a client-side timeout, configuring your local socket. - # If you have a network outage dropping all packets with no RST/FIN, - # this is how long your client waits before realizing & dropping the connection. - # defaults to 66sec (sic!) - # watchClientTimeout: 60 - # - # Endpoint to send request to reload notifiers - reloadURL: "http://localhost:3000/api/admin/provisioning/notifications/reload" - # Absolute path to shell script to execute after a notifier got reloaded - script: null - skipReload: false - # Deploy the notifier sidecar as an initContainer in addition to a container. - # This is needed if skipReload is true, to load any notifiers defined at startup time. - initNotifiers: false - # Sets the size limit of the notifier sidecar emptyDir volume - sizeLimit: {} - -## Override the deployment namespace -## -namespaceOverride: "" - -## Number of old ReplicaSets to retain -## -revisionHistoryLimit: 10 - -## Add a seperate remote image renderer deployment/service -imageRenderer: - deploymentStrategy: {} - # Enable the image-renderer deployment & service - enabled: false - replicas: 1 - autoscaling: + ingress: enabled: false - minReplicas: 1 - maxReplicas: 5 - targetCPU: "60" - targetMemory: "" - behavior: {} - image: - # -- The Docker registry - registry: docker.io - # image-renderer Image repository - repository: grafana/grafana-image-renderer - # image-renderer Image tag - tag: latest - # image-renderer Image sha (optional) - sha: "" - # image-renderer ImagePullPolicy - pullPolicy: Always - # extra environment variables - env: - HTTP_HOST: "0.0.0.0" - # RENDERING_ARGS: --no-sandbox,--disable-gpu,--window-size=1280x758 - # RENDERING_MODE: clustered - # IGNORE_HTTPS_ERRORS: true - - ## "valueFrom" environment variable references that will be added to deployment pods. Name is templated. - ## ref: https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.19/#envvarsource-v1-core - ## Renders in container spec as: - ## env: - ## ... - ## - name: - ## valueFrom: - ## - envValueFrom: {} - # ENV_NAME: - # configMapKeyRef: - # name: configmap-name - # key: value_key + # For Kubernetes >= 1.18 you should specify the ingress-controller via the field ingressClassName + # See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#specifying-the-class-of-an-ingress + # ingressClassName: nginx + # Values can be templated + annotations: + kubernetes.io/ingress.class: nginx + cert-manager.io/cluster-issuer: letsencrypt-prod + # cert-manager.io/cluster-issuer: letsencrypt-staging + + # https://cert-manager.io/docs/faq/acme/#:~:text=If%20you%20receive%20a%20tls,the%20actual%20certificate%20is%20issued + cert-manager.io/issue-temporary-certificate: "true" + acme.cert-manager.io/http01-edit-in-place: "true" - # image-renderer deployment serviceAccount - serviceAccountName: "" - # image-renderer deployment securityContext - securityContext: {} - # image-renderer deployment container securityContext - containerSecurityContext: - seccompProfile: - type: RuntimeDefault - capabilities: - drop: ['ALL'] - allowPrivilegeEscalation: false - readOnlyRootFilesystem: true - ## image-renderer pod annotation - podAnnotations: {} - # image-renderer deployment Host Aliases - hostAliases: [] - # image-renderer deployment priority class - priorityClassName: '' - service: - # Enable the image-renderer service - enabled: true - # image-renderer service port name - portName: 'http' - # image-renderer service port used by both service and deployment - port: 8081 - targetPort: 8081 - # Adds the appProtocol field to the image-renderer service. This allows to work with istio protocol selection. Ex: "http" or "tcp" - appProtocol: "" - serviceMonitor: - ## If true, a ServiceMonitor CRD is created for a prometheus operator - ## https://github.com/coreos/prometheus-operator - ## - enabled: false - path: /metrics - # namespace: monitoring (defaults to use the namespace this chart is deployed to) labels: {} - interval: 1m - scheme: http - tlsConfig: {} - scrapeTimeout: 30s - relabelings: [] - # See: https://doc.crds.dev/github.com/prometheus-operator/kube-prometheus/monitoring.coreos.com/ServiceMonitor/v1@v0.11.0#spec-targetLabels - targetLabels: [] - # - targetLabel1 - # - targetLabel2 - # If https is enabled in Grafana, this needs to be set as 'https' to correctly configure the callback used in Grafana - grafanaProtocol: http - # In case a sub_path is used this needs to be added to the image renderer callback - grafanaSubPath: "" - # name of the image-renderer port on the pod - podPortName: http - # number of image-renderer replica sets to keep - revisionHistoryLimit: 10 - networkPolicy: - # Enable a NetworkPolicy to limit inbound traffic to only the created grafana pods - limitIngress: true - # Enable a NetworkPolicy to limit outbound traffic to only the created grafana pods - limitEgress: false - # Allow additional services to access image-renderer (eg. Prometheus operator when ServiceMonitor is enabled) - extraIngressSelectors: [] - resources: {} -# limits: -# cpu: 100m -# memory: 100Mi -# requests: -# cpu: 50m -# memory: 50Mi - ## Node labels for pod assignment - ## ref: https://kubernetes.io/docs/user-guide/node-selection/ - # - nodeSelector: {} - - ## Tolerations for pod assignment - ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ - ## - tolerations: [] - - ## Affinity for pod assignment (evaluated as template) - ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity - ## - affinity: {} - - ## Use an alternate scheduler, e.g. "stork". - ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ - ## - # schedulerName: "default-scheduler" - -networkPolicy: - ## @param networkPolicy.enabled Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. - ## - enabled: false - ## @param networkPolicy.allowExternal Don't require client label for connections - ## The Policy model to apply. When set to false, only pods with the correct - ## client label will have network access to grafana port defined. - ## When true, grafana will accept connections from any source - ## (with the correct destination port). - ## - ingress: true - ## @param networkPolicy.ingress When true enables the creation - ## an ingress network policy - ## - allowExternal: true - ## @param networkPolicy.explicitNamespacesSelector A Kubernetes LabelSelector to explicitly select namespaces from which traffic could be allowed - ## If explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace - ## and that match other criteria, the ones that have the good label, can reach the grafana. - ## But sometimes, we want the grafana to be accessible to clients from other namespaces, in this case, we can use this - ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. - ## - ## Example: - ## explicitNamespacesSelector: - ## matchLabels: - ## role: frontend - ## matchExpressions: - ## - {key: role, operator: In, values: [frontend]} - ## - explicitNamespacesSelector: {} - ## + path: / + + # pathType is only for k8s > 1.19 + pathType: Prefix + + hosts: + - grafana.dev.bitovi-staffing.com + + + ## Extra paths to prepend to every host configuration. This is useful when working with annotation based services. + extraPaths: [] + # - path: /* + # backend: + # serviceName: ssl-redirect + # servicePort: use-annotation + ## Or for k8s > 1.19 + # - path: /* + # pathType: Prefix + # backend: + # service: + # name: ssl-redirect + # port: + # name: service + + + tls: + - secretName: grafana-tls + hosts: + - grafana.dev.bitovi-staffing.com + + # Administrator credentials when not using an existing secret (see below) + adminUser: admin + # adminPassword: set via values-secrets.yaml + + # Use an existing secret for the admin user. + admin: + existingSecret: "" + userKey: admin-user + passwordKey: admin-password + + ## Configure grafana dashboard providers + ## ref: http://docs.grafana.org/administration/provisioning/#dashboards + ## + ## `path` must be /var/lib/grafana/dashboards/ + ## + dashboardProviders: + dashboardproviders.yaml: + apiVersion: 1 + providers: + - name: 'kubernetes' + orgId: 1 + folder: 'kubernetes' + type: file + disableDeletion: false + editable: true + options: + path: /var/lib/grafana/dashboards/kubernetes + ## Configure grafana dashboard to import + ## NOTE: To use dashboards you must also enable/configure dashboardProviders + ## ref: https://grafana.com/dashboards + ## + ## dashboards per provider, use provider name as key. ## - ## - ## - ## - ## - egress: - ## @param networkPolicy.egress.enabled When enabled, an egress network policy will be - ## created allowing grafana to connect to external data sources from kubernetes cluster. + dashboards: + # https://github.com/dotdc/grafana-dashboards-kubernetes + kubernetes: + k8s-addons-starboard-operator: + # k8s-addons-starboard-operator.json + gnetId: 16337 + datasource: Prometheus + k8s-system-api-server: + # k8s-system-api-server.json + gnetId: 15761 + datasource: Prometheus + k8s-system-coredns: + # k8s-system-coredns.json + gnetId: 15762 + datasource: Prometheus + k8s-views-global: + # k8s-views-global.json + gnetId: 15757 + datasource: Prometheus + k8s-views-namespaces: + # k8s-views-namespaces.json + gnetId: 15758 + datasource: Prometheus + k8s-views-nodes: + # k8s-views-nodes.json + gnetId: 15759 + datasource: Prometheus + k8s-views-pods: + # k8s-views-pods.json + gnetId: 15760 + datasource: Prometheus + # default: + # some-dashboard: + # json: | + # $RAW_JSON + # custom-dashboard: + # file: dashboards/custom-dashboard.json + # prometheus-stats: + # gnetId: 2 + # revision: 2 + # datasource: Prometheus + # local-dashboard: + # url: https://example.com/repository/test.json + # token: '' + # local-dashboard-base64: + # url: https://example.com/repository/test-b64.json + # token: '' + # b64content: true + + ## Grafana's primary configuration + ## NOTE: values in map will be converted to ini format + ## ref: http://docs.grafana.org/installation/configuration/ + ## + grafana.ini: + paths: + data: /var/lib/grafana/data + logs: /var/log/grafana + plugins: /var/lib/grafana/plugins + provisioning: /etc/grafana/provisioning + analytics: + check_for_updates: true + log: + mode: console + grafana_net: + url: https://grafana.net + ## grafana Authentication can be enabled with the following values on grafana.ini + # server: + # The full public facing url you use in browser, used for redirects and emails + # root_url: + # https://grafana.com/docs/grafana/latest/auth/github/#enable-github-in-grafana + # auth.github: + # enabled: false + # allow_sign_up: false + # scopes: user:email,read:org + # auth_url: https://github.com/login/oauth/authorize + # token_url: https://github.com/login/oauth/access_token + # api_url: https://api.github.com/user + # team_ids: + # allowed_organizations: + # client_id: + # client_secret: + ## LDAP Authentication can be enabled with the following values on grafana.ini + ## NOTE: Grafana will fail to start if the value for ldap.toml is invalid + # auth.ldap: + # enabled: true + # allow_sign_up: true + # config_file: /etc/grafana/ldap.toml + + ## Grafana's LDAP configuration + ## Templated by the template in _helpers.tpl + ## NOTE: To enable the grafana.ini must be configured with auth.ldap.enabled + ## ref: http://docs.grafana.org/installation/configuration/#auth-ldap + ## ref: http://docs.grafana.org/installation/ldap/#configuration + ldap: enabled: false - ## - ## @param networkPolicy.egress.blockDNSResolution When enabled, DNS resolution will be blocked - ## for all pods in the grafana namespace. - blockDNSResolution: false - ## - ## @param networkPolicy.egress.ports Add individual ports to be allowed by the egress - ports: [] - ## Add ports to the egress by specifying - port: - ## E.X. - ## - port: 80 - ## - port: 443 - ## - ## @param networkPolicy.egress.to Allow egress traffic to specific destinations - to: [] - ## Add destinations to the egress by specifying - ipBlock: - ## E.X. - ## to: - ## - namespaceSelector: - ## matchExpressions: - ## - {key: role, operator: In, values: [grafana]} - ## - ## - ## - ## - ## - -# Enable backward compatibility of kubernetes where version below 1.13 doesn't have the enableServiceLinks option -enableKubeBackwardCompatibility: false -useStatefulSet: false -# Create a dynamic manifests via values: -extraObjects: [] - # - apiVersion: "kubernetes-client.io/v1" - # kind: ExternalSecret - # metadata: - # name: grafana-secrets - # spec: - # backendType: gcpSecretsManager - # data: - # - key: grafana-admin-password - # name: adminPassword -configmap: - enabled: true - name: grafana -data: - - # account number - mapAccounts: | - - "000000000000" - - # add eksworkers to nodes and bootstrappers - # ensure accounts match - # add sso and other users to masters as necessry - mapRoles: | - - "groups": - - "system:bootstrappers" - - "system:nodes" - "rolearn": "arn:aws:iam::000000000:role/prod-eksworker" - "username": "system:node:{{EC2PrivateDNSName}}" - - "groups": - - "system:masters" - "rolearn": "arn:aws:iam::000000000:role/AWSReservedSSO_AdministratorAccess_1111111111" \ No newline at end of file + # `existingSecret` is a reference to an existing secret containing the ldap configuration + # for Grafana in a key `ldap-toml`. + existingSecret: "" + # `config` is the content of `ldap.toml` that will be stored in the created secret + config: "" + # config: |- + # verbose_logging = true + + # [[servers]] + # host = "my-ldap-server" + # port = 636 + # use_ssl = true + # start_tls = false + # ssl_skip_verify = false + # bind_dn = "uid=%s,ou=users,dc=myorg,dc=com" + + ## Grafana's SMTP configuration + ## NOTE: To enable, grafana.ini must be configured with smtp.enabled + ## ref: http://docs.grafana.org/installation/configuration/#smtp + smtp: + # `existingSecret` is a reference to an existing secret containing the smtp configuration + # for Grafana. + existingSecret: "" + userKey: "user" + passwordKey: "password" \ No newline at end of file From 1b93d887c3a173c716f31f1f38bbea3f222ac02b Mon Sep 17 00:00:00 2001 From: karamveer28 Date: Tue, 12 Dec 2023 20:47:51 +0530 Subject: [PATCH 35/99] Added fluent chart --- .../deployment/helm/fluent-bit/.helmignore | 23 + .../deployment/helm/fluent-bit/Chart.yaml | 13 + .../deployment/helm/fluent-bit/README.md | 57 + .../helm/fluent-bit/ci/ci-values.yaml | 4 + .../fluent-bit/dashboards/fluent-bit.json | 1565 +++++++++++++++++ .../helm/fluent-bit/templates/NOTES.txt | 6 + .../helm/fluent-bit/templates/_helpers.tpl | 138 ++ .../helm/fluent-bit/templates/_pod.tpl | 151 ++ .../fluent-bit/templates/clusterrole.yaml | 45 + .../templates/clusterrolebinding.yaml | 16 + .../templates/configmap-dashboards.yaml | 21 + .../templates/configmap-luascripts.yaml | 13 + .../helm/fluent-bit/templates/configmap.yaml | 25 + .../helm/fluent-bit/templates/daemonset.yaml | 48 + .../helm/fluent-bit/templates/deployment.yaml | 51 + .../helm/fluent-bit/templates/hpa.yaml | 40 + .../helm/fluent-bit/templates/ingress.yaml | 63 + .../fluent-bit/templates/networkpolicy.yaml | 23 + .../helm/fluent-bit/templates/pdb.yaml | 21 + .../fluent-bit/templates/prometheusrule.yaml | 18 + .../helm/fluent-bit/templates/psp.yaml | 42 + .../helm/fluent-bit/templates/scc.yaml | 41 + .../helm/fluent-bit/templates/service.yaml | 49 + .../fluent-bit/templates/serviceaccount.yaml | 13 + .../fluent-bit/templates/servicemonitor.yaml | 51 + .../templates/tests/test-connection.yaml | 23 + .../helm/fluent-bit/templates/vpa.yaml | 39 + .../deployment/helm/fluent-bit/values.yaml | 496 ++++++ operations/deployment/helm/grafana/Chart.yaml | 9 - operations/deployment/helm/grafana/ENV_FILE | 2 - operations/deployment/helm/grafana/README.md | 757 -------- .../helm/grafana/bitops.config.yaml | 14 - .../deployment/helm/grafana/values.yaml | 218 --- 33 files changed, 3095 insertions(+), 1000 deletions(-) create mode 100644 operations/deployment/helm/fluent-bit/.helmignore create mode 100644 operations/deployment/helm/fluent-bit/Chart.yaml create mode 100644 operations/deployment/helm/fluent-bit/README.md create mode 100644 operations/deployment/helm/fluent-bit/ci/ci-values.yaml create mode 100644 operations/deployment/helm/fluent-bit/dashboards/fluent-bit.json create mode 100644 operations/deployment/helm/fluent-bit/templates/NOTES.txt create mode 100644 operations/deployment/helm/fluent-bit/templates/_helpers.tpl create mode 100644 operations/deployment/helm/fluent-bit/templates/_pod.tpl create mode 100644 operations/deployment/helm/fluent-bit/templates/clusterrole.yaml create mode 100644 operations/deployment/helm/fluent-bit/templates/clusterrolebinding.yaml create mode 100644 operations/deployment/helm/fluent-bit/templates/configmap-dashboards.yaml create mode 100644 operations/deployment/helm/fluent-bit/templates/configmap-luascripts.yaml create mode 100644 operations/deployment/helm/fluent-bit/templates/configmap.yaml create mode 100644 operations/deployment/helm/fluent-bit/templates/daemonset.yaml create mode 100644 operations/deployment/helm/fluent-bit/templates/deployment.yaml create mode 100644 operations/deployment/helm/fluent-bit/templates/hpa.yaml create mode 100644 operations/deployment/helm/fluent-bit/templates/ingress.yaml create mode 100644 operations/deployment/helm/fluent-bit/templates/networkpolicy.yaml create mode 100644 operations/deployment/helm/fluent-bit/templates/pdb.yaml create mode 100644 operations/deployment/helm/fluent-bit/templates/prometheusrule.yaml create mode 100644 operations/deployment/helm/fluent-bit/templates/psp.yaml create mode 100644 operations/deployment/helm/fluent-bit/templates/scc.yaml create mode 100644 operations/deployment/helm/fluent-bit/templates/service.yaml create mode 100644 operations/deployment/helm/fluent-bit/templates/serviceaccount.yaml create mode 100644 operations/deployment/helm/fluent-bit/templates/servicemonitor.yaml create mode 100644 operations/deployment/helm/fluent-bit/templates/tests/test-connection.yaml create mode 100644 operations/deployment/helm/fluent-bit/templates/vpa.yaml create mode 100644 operations/deployment/helm/fluent-bit/values.yaml delete mode 100644 operations/deployment/helm/grafana/Chart.yaml delete mode 100644 operations/deployment/helm/grafana/ENV_FILE delete mode 100644 operations/deployment/helm/grafana/README.md delete mode 100644 operations/deployment/helm/grafana/bitops.config.yaml delete mode 100644 operations/deployment/helm/grafana/values.yaml diff --git a/operations/deployment/helm/fluent-bit/.helmignore b/operations/deployment/helm/fluent-bit/.helmignore new file mode 100644 index 0000000..0e8a0eb --- /dev/null +++ b/operations/deployment/helm/fluent-bit/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/operations/deployment/helm/fluent-bit/Chart.yaml b/operations/deployment/helm/fluent-bit/Chart.yaml new file mode 100644 index 0000000..b77c34d --- /dev/null +++ b/operations/deployment/helm/fluent-bit/Chart.yaml @@ -0,0 +1,13 @@ +apiVersion: v1 +name: fluent-bit +description: Fast and lightweight log processor and forwarder or Linux, OSX and BSD family operating systems. +keywords: + - logging + - fluent-bit + - fluentd +version: 0.40.0 +appVersion: 2.2.0 +icon: https://raw.githubusercontent.com/cncf/artwork/master/projects/fluentd/fluentbit/icon/fluentbit-icon-color.svg +home: https://fluentbit.io/ +sources: + - https://github.com/fluent/fluent-bit/ diff --git a/operations/deployment/helm/fluent-bit/README.md b/operations/deployment/helm/fluent-bit/README.md new file mode 100644 index 0000000..6920d3d --- /dev/null +++ b/operations/deployment/helm/fluent-bit/README.md @@ -0,0 +1,57 @@ +# Fluent Bit Helm chart + +[Fluent Bit](https://fluentbit.io) is a fast and lightweight log processor and forwarder or Linux, OSX and BSD family operating systems. + +## Installation + +To add the `fluent` helm repo, run: + +```sh +helm repo add fluent https://fluent.github.io/helm-charts +``` + +To install a release named `fluent-bit`, run: + +```sh +helm install fluent-bit fluent/fluent-bit +``` + +## Chart values + +```sh +helm show values fluent/fluent-bit +``` + +## Using Lua scripts +Fluent Bit allows us to build filter to modify the incoming records using custom [Lua scripts.](https://docs.fluentbit.io/manual/pipeline/filters/lua) + +### How to use Lua scripts with this Chart + +First, you should add your Lua scripts to `luaScripts` in values.yaml, for example: + +```yaml +luaScripts: + filter_example.lua: | + function filter_name(tag, timestamp, record) + -- put your lua code here. + end +``` + +After that, the Lua scripts will be ready to be used as filters. So next step is to add your Fluent bit [filter](https://docs.fluentbit.io/manual/concepts/data-pipeline/filter) to `config.filters` in values.yaml, for example: + +```yaml +config: + filters: | + [FILTER] + Name lua + Match + script /fluent-bit/scripts/filter_example.lua + call filter_name +``` +Under the hood, the chart will: +- Create a configmap using `luaScripts`. +- Add a volumeMounts for each Lua scripts using the path `/fluent-bit/scripts/