-
Notifications
You must be signed in to change notification settings - Fork 354
/
prometheus.yaml
225 lines (225 loc) · 7.15 KB
/
prometheus.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
apiVersion: monitoring.coreos.com/v1
kind: Prometheus
metadata:
labels:
app.kubernetes.io/component: prometheus
app.kubernetes.io/instance: k8s
app.kubernetes.io/managed-by: cluster-monitoring-operator
app.kubernetes.io/name: prometheus
app.kubernetes.io/part-of: openshift-monitoring
app.kubernetes.io/version: 2.49.1
name: k8s
namespace: openshift-monitoring
spec:
additionalAlertRelabelConfigs:
key: config.yaml
name: alert-relabel-configs
optional: true
additionalArgs:
- name: scrape.timestamp-tolerance
value: 15ms
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
app.kubernetes.io/component: prometheus
app.kubernetes.io/instance: k8s
app.kubernetes.io/name: prometheus
app.kubernetes.io/part-of: openshift-monitoring
namespaces:
- openshift-monitoring
topologyKey: kubernetes.io/hostname
alerting:
alertmanagers:
- apiVersion: v2
bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
name: alertmanager-main
namespace: openshift-monitoring
port: web
scheme: https
tlsConfig:
caFile: /etc/prometheus/configmaps/serving-certs-ca-bundle/service-ca.crt
serverName: alertmanager-main
configMaps:
- serving-certs-ca-bundle
- kubelet-serving-ca-bundle
- metrics-client-ca
containers:
- args:
- -provider=openshift
- -https-address=:9091
- -http-address=
- -email-domain=*
- -upstream=http://localhost:9090
- -openshift-service-account=prometheus-k8s
- '-openshift-sar={"resource": "namespaces", "verb": "get"}'
- '-openshift-delegate-urls={"/": {"resource": "namespaces", "verb": "get"}}'
- -tls-cert=/etc/tls/private/tls.crt
- -tls-key=/etc/tls/private/tls.key
- -client-secret-file=/var/run/secrets/kubernetes.io/serviceaccount/token
- -cookie-secret-file=/etc/proxy/secrets/session_secret
- -openshift-ca=/etc/pki/tls/cert.pem
- -openshift-ca=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt
env:
- name: HTTP_PROXY
value: ""
- name: HTTPS_PROXY
value: ""
- name: NO_PROXY
value: ""
image: quay.io/openshift/oauth-proxy:latest
name: prometheus-proxy
ports:
- containerPort: 9091
name: web
resources:
requests:
cpu: 1m
memory: 20Mi
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /etc/tls/private
name: secret-prometheus-k8s-tls
- mountPath: /etc/proxy/secrets
name: secret-prometheus-k8s-proxy
- args:
- --secure-listen-address=0.0.0.0:9092
- --upstream=http://127.0.0.1:9090
- --allow-paths=/metrics,/federate
- --config-file=/etc/kube-rbac-proxy/config.yaml
- --tls-cert-file=/etc/tls/private/tls.crt
- --tls-private-key-file=/etc/tls/private/tls.key
- --client-ca-file=/etc/tls/client/client-ca.crt
- --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305
image: quay.io/brancz/kube-rbac-proxy:v0.15.0
name: kube-rbac-proxy
ports:
- containerPort: 9092
name: metrics
resources:
requests:
cpu: 1m
memory: 15Mi
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /etc/tls/private
name: secret-prometheus-k8s-tls
- mountPath: /etc/tls/client
name: configmap-metrics-client-ca
readOnly: true
- mountPath: /etc/kube-rbac-proxy
name: secret-kube-rbac-proxy
- args:
- --secure-listen-address=[$(POD_IP)]:10903
- --upstream=http://127.0.0.1:10902
- --tls-cert-file=/etc/tls/private/tls.crt
- --tls-private-key-file=/etc/tls/private/tls.key
- --client-ca-file=/etc/tls/client/client-ca.crt
- --config-file=/etc/kube-rbac-proxy/config.yaml
- --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305
- --allow-paths=/metrics
- --logtostderr=true
env:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
image: quay.io/brancz/kube-rbac-proxy:v0.15.0
name: kube-rbac-proxy-thanos
ports:
- containerPort: 10903
name: thanos-proxy
resources:
requests:
cpu: 1m
memory: 10Mi
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /etc/tls/private
name: secret-prometheus-k8s-thanos-sidecar-tls
- mountPath: /etc/kube-rbac-proxy
name: secret-kube-rbac-proxy
- args:
- sidecar
- --prometheus.url=http://localhost:9090/
- --tsdb.path=/prometheus
- --http-address=127.0.0.1:10902
- --grpc-server-tls-cert=/etc/tls/grpc/server.crt
- --grpc-server-tls-key=/etc/tls/grpc/server.key
- --grpc-server-tls-client-ca=/etc/tls/grpc/ca.crt
name: thanos-sidecar
resources:
requests:
cpu: 1m
memory: 25Mi
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /etc/tls/grpc
name: secret-grpc-tls
- name: prometheus
terminationMessagePolicy: FallbackToLogsOnError
enableFeatures: []
externalLabels: {}
externalURL: https://prometheus-k8s.openshift-monitoring.svc:9091
image: quay.io/prometheus/prometheus:v2.49.1
listenLocal: true
maximumStartupDurationSeconds: 3600
nodeSelector:
kubernetes.io/os: linux
podMetadata:
annotations:
target.workload.openshift.io/management: '{"effect": "PreferredDuringScheduling"}'
labels:
app.kubernetes.io/component: prometheus
app.kubernetes.io/instance: k8s
app.kubernetes.io/name: prometheus
app.kubernetes.io/part-of: openshift-monitoring
app.kubernetes.io/version: 2.49.1
podMonitorNamespaceSelector:
matchLabels:
openshift.io/cluster-monitoring: "true"
podMonitorSelector: {}
priorityClassName: system-cluster-critical
probeNamespaceSelector:
matchLabels:
openshift.io/cluster-monitoring: "true"
probeSelector: {}
replicas: 2
resources:
requests:
cpu: 70m
memory: 1Gi
ruleNamespaceSelector:
matchLabels:
openshift.io/cluster-monitoring: "true"
ruleSelector: {}
scrapeConfigNamespaceSelector: null
scrapeConfigSelector: null
secrets:
- prometheus-k8s-tls
- prometheus-k8s-proxy
- prometheus-k8s-thanos-sidecar-tls
- kube-rbac-proxy
- metrics-client-certs
securityContext:
fsGroup: 65534
runAsNonRoot: true
runAsUser: 65534
serviceAccountName: prometheus-k8s
serviceMonitorNamespaceSelector:
matchLabels:
openshift.io/cluster-monitoring: "true"
serviceMonitorSelector: {}
thanos:
image: quay.io/thanos/thanos:v0.33.0
resources:
requests:
cpu: 1m
memory: 100Mi
version: 0.33.0
version: 2.49.1
web:
httpConfig:
headers:
contentSecurityPolicy: frame-ancestors 'none'