-
Notifications
You must be signed in to change notification settings - Fork 82
/
daemonset.yaml
170 lines (162 loc) · 5.98 KB
/
daemonset.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
kind: DaemonSet
apiVersion: apps/v1
# name, namespace and labels are set at runtime
spec:
template:
spec:
serviceAccountName: dns
priorityClassName: system-node-critical
containers:
- name: dns
# image is set at runtime
imagePullPolicy: IfNotPresent
terminationMessagePolicy: FallbackToLogsOnError
command: [ "coredns" ]
args: [ "-conf", "/etc/coredns/Corefile" ]
volumeMounts:
- name: config-volume
mountPath: /etc/coredns
readOnly: true
ports:
- containerPort: 5353
name: dns
protocol: UDP
- containerPort: 5353
name: dns-tcp
protocol: TCP
readinessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
failureThreshold: 3
timeoutSeconds: 10
livenessProbe:
httpGet:
path: /health
port: 8080
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 5
successThreshold: 1
failureThreshold: 5
resources:
limits:
memory: 512Mi
requests:
cpu: 50m
memory: 70Mi
- name: kube-rbac-proxy
# image is set at runtime
args:
- --logtostderr
- --secure-listen-address=:9154
- --tls-cipher-suites=TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256,TLS_RSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_ECDSA_WITH_AES_128_CBC_SHA256,TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA256
- --upstream=http://127.0.0.1:9153/
- --tls-cert-file=/etc/tls/private/tls.crt
- --tls-private-key-file=/etc/tls/private/tls.key
ports:
- containerPort: 9154
name: metrics
resources:
requests:
cpu: 10m
memory: 40Mi
volumeMounts:
- mountPath: /etc/tls/private
name: metrics-tls
readOnly: true
- name: dns-node-resolver
# image is set at runtime
imagePullPolicy: IfNotPresent
terminationMessagePolicy: FallbackToLogsOnError
securityContext:
privileged: true
volumeMounts:
- name: hosts-file
mountPath: /etc/hosts
# env NAMESERVER and CLUSTER_DOMAIN are set at runtime
env:
- name: SERVICES
# Comma or space separated list of services
# NOTE: For now, ensure these are relative names; for each relative name,
# an alias with the CLUSTER_DOMAIN suffix will also be added.
value: "image-registry.openshift-image-registry.svc"
command:
- /bin/bash
- -c
- |
#!/bin/bash
set -uo pipefail
trap 'jobs -p | xargs kill || true; wait; exit 0' TERM
OPENSHIFT_MARKER="openshift-generated-node-resolver"
HOSTS_FILE="/etc/hosts"
TEMP_FILE="/etc/hosts.tmp"
IFS=', ' read -r -a services <<< "${SERVICES}"
# Make a temporary file with the old hosts file's attributes.
cp -f --attributes-only "${HOSTS_FILE}" "${TEMP_FILE}"
while true; do
declare -A svc_ips
for svc in "${services[@]}"; do
# Fetch service IP from cluster dns if present. We make several tries
# to do it: IPv4, IPv6, IPv4 over TCP and IPv6 over TCP. The two last ones
# are for deployments with Kuryr on older OpenStack (OSP13) - those do not
# support UDP loadbalancers and require reaching DNS through TCP.
cmds=('dig -t A @"${NAMESERVER}" +short "${svc}.${CLUSTER_DOMAIN}"'
'dig -t AAAA @"${NAMESERVER}" +short "${svc}.${CLUSTER_DOMAIN}"'
'dig -t A +tcp @"${NAMESERVER}" +short "${svc}.${CLUSTER_DOMAIN}"'
'dig -t AAAA +tcp @"${NAMESERVER}" +short "${svc}.${CLUSTER_DOMAIN}"')
for i in ${!cmds[*]}
do
ips=($(eval "${cmds[i]}"))
if [[ "$?" -eq 0 && "${#ips[@]}" -ne 0 ]]; then
svc_ips["${svc}"]="${ips[@]}"
break
fi
done
done
# Update /etc/hosts only if we get valid service IPs
# We will not update /etc/hosts when there is coredns service outage or api unavailability
# Stale entries could exist in /etc/hosts if the service is deleted
if [[ "${#svc_ips[@]}" -ne 0 ]]; then
# Build a new hosts file from /etc/hosts with our custom entries filtered out
grep -v "# ${OPENSHIFT_MARKER}" "${HOSTS_FILE}" > "${TEMP_FILE}"
# Append resolver entries for services
for svc in "${!svc_ips[@]}"; do
for ip in ${svc_ips[${svc}]}; do
echo "${ip} ${svc} ${svc}.${CLUSTER_DOMAIN} # ${OPENSHIFT_MARKER}" >> "${TEMP_FILE}"
done
done
# TODO: Update /etc/hosts atomically to avoid any inconsistent behavior
# Replace /etc/hosts with our modified version if needed
cmp "${TEMP_FILE}" "${HOSTS_FILE}" || cp -f "${TEMP_FILE}" "${HOSTS_FILE}"
# TEMP_FILE is not removed to avoid file create/delete and attributes copy churn
fi
sleep 60 & wait
unset svc_ips
done
resources:
requests:
cpu: 5m
dnsPolicy: Default
nodeSelector:
kubernetes.io/os: linux
volumes:
- name: config-volume
configMap:
# Name is set at runtime
items:
- key: Corefile
path: Corefile
- name: hosts-file
hostPath:
path: /etc/hosts
type: File
- name: metrics-tls
# secretName is set at runtime
tolerations:
# DNS needs to run everywhere. Tolerate all taints
- operator: Exists