-
Notifications
You must be signed in to change notification settings - Fork 354
/
daemonset.yaml
183 lines (183 loc) · 6.33 KB
/
daemonset.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
apiVersion: apps/v1
kind: DaemonSet
metadata:
labels:
app.kubernetes.io/component: exporter
app.kubernetes.io/managed-by: cluster-monitoring-operator
app.kubernetes.io/name: node-exporter
app.kubernetes.io/part-of: openshift-monitoring
app.kubernetes.io/version: 1.6.0
name: node-exporter
namespace: openshift-monitoring
spec:
selector:
matchLabels:
app.kubernetes.io/component: exporter
app.kubernetes.io/name: node-exporter
app.kubernetes.io/part-of: openshift-monitoring
template:
metadata:
annotations:
kubectl.kubernetes.io/default-container: node-exporter
target.workload.openshift.io/management: '{"effect": "PreferredDuringScheduling"}'
labels:
app.kubernetes.io/component: exporter
app.kubernetes.io/managed-by: cluster-monitoring-operator
app.kubernetes.io/name: node-exporter
app.kubernetes.io/part-of: openshift-monitoring
app.kubernetes.io/version: 1.6.0
spec:
automountServiceAccountToken: true
containers:
- args:
- --web.listen-address=127.0.0.1:9100
- --path.sysfs=/host/sys
- --path.rootfs=/host/root
- --path.udev.data=/host/root/run/udev/data
- --no-collector.wifi
- --collector.filesystem.mount-points-exclude=^/(dev|proc|sys|run/k3s/containerd/.+|var/lib/docker/.+|var/lib/kubelet/pods/.+)($|/)
- --collector.netclass.ignored-devices=^(veth.*|[a-f0-9]{15}|enP.*|ovn-k8s-mp[0-9]*|br-ex|br-int|br-ext|br[0-9]*|tun[0-9]*|cali[a-f0-9]*)$
- --collector.netdev.device-exclude=^(veth.*|[a-f0-9]{15}|enP.*|ovn-k8s-mp[0-9]*|br-ex|br-int|br-ext|br[0-9]*|tun[0-9]*|cali[a-f0-9]*)$
- --collector.cpu.info
- --collector.textfile.directory=/var/node_exporter/textfile
- --no-collector.btrfs
command:
- /bin/sh
- -c
- |
export GOMAXPROCS=4
# We don't take CPU affinity into account as the container doesn't have integer CPU requests.
# In case of error, fallback to the default value.
NUM_CPUS=$(grep -c '^processor' "/proc/cpuinfo" 2>/dev/null || echo "0")
if [ "$NUM_CPUS" -lt "$GOMAXPROCS" ]; then
export GOMAXPROCS="$NUM_CPUS"
fi
echo "ts=$(date --iso-8601=seconds) num_cpus=$NUM_CPUS gomaxprocs=$GOMAXPROCS"
exec /bin/node_exporter "$0" "$@"
image: quay.io/prometheus/node-exporter:v1.6.0
name: node-exporter
resources:
requests:
cpu: 8m
memory: 32Mi
securityContext: {}
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /host/sys
mountPropagation: HostToContainer
name: sys
readOnly: true
- mountPath: /host/root
mountPropagation: HostToContainer
name: root
readOnly: true
- mountPath: /var/node_exporter/textfile
name: node-exporter-textfile
readOnly: true
workingDir: /var/node_exporter/textfile
- args:
- --logtostderr
- --secure-listen-address=[$(IP)]:9100
- --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384,TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305,TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305
- --upstream=http://127.0.0.1:9100/
- --tls-cert-file=/etc/tls/private/tls.crt
- --tls-private-key-file=/etc/tls/private/tls.key
- --client-ca-file=/etc/tls/client/client-ca.crt
- --config-file=/etc/kube-rbac-policy/config.yaml
env:
- name: IP
valueFrom:
fieldRef:
fieldPath: status.podIP
image: quay.io/brancz/kube-rbac-proxy:v0.14.1
name: kube-rbac-proxy
ports:
- containerPort: 9100
hostPort: 9100
name: https
resources:
requests:
cpu: 1m
memory: 15Mi
securityContext:
allowPrivilegeEscalation: false
capabilities:
drop:
- ALL
readOnlyRootFilesystem: true
runAsGroup: 65532
runAsNonRoot: true
runAsUser: 65532
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /etc/tls/private
name: node-exporter-tls
readOnly: false
- mountPath: /etc/tls/client
name: metrics-client-ca
readOnly: false
- mountPath: /etc/kube-rbac-policy
name: node-exporter-kube-rbac-proxy-config
readOnly: true
hostNetwork: true
hostPID: true
initContainers:
- command:
- /bin/sh
- -c
- '[[ ! -d /node_exporter/collectors/init ]] || find /node_exporter/collectors/init
-perm /111 -type f -exec {} \;'
env:
- name: TMPDIR
value: /tmp
image: quay.io/prometheus/node-exporter:v1.6.0
name: init-textfile
resources:
requests:
cpu: 1m
memory: 1Mi
securityContext:
privileged: true
runAsUser: 0
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- mountPath: /var/node_exporter/textfile
name: node-exporter-textfile
readOnly: false
- mountPath: /var/log/wtmp
name: node-exporter-wtmp
readOnly: true
workingDir: /var/node_exporter/textfile
nodeSelector:
kubernetes.io/os: linux
priorityClassName: system-cluster-critical
securityContext: {}
serviceAccountName: node-exporter
tolerations:
- operator: Exists
volumes:
- hostPath:
path: /sys
name: sys
- hostPath:
path: /
name: root
- emptyDir: {}
name: node-exporter-textfile
- name: node-exporter-tls
secret:
secretName: node-exporter-tls
- hostPath:
path: /var/log/wtmp
type: File
name: node-exporter-wtmp
- configMap:
name: metrics-client-ca
name: metrics-client-ca
- name: node-exporter-kube-rbac-proxy-config
secret:
secretName: node-exporter-kube-rbac-proxy-config
updateStrategy:
rollingUpdate:
maxUnavailable: 10%
type: RollingUpdate