-
Notifications
You must be signed in to change notification settings - Fork 114
/
bootstrap_master.sh
executable file
·401 lines (353 loc) · 14.5 KB
/
bootstrap_master.sh
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
#!/usr/bin/env bash
set -ex
echo Args passed: [[ $@ ]]
# variable should be set to true unless the script is executed for the first master
backup_master="$3"
# Pull images if not present
if [ -f /vagrant/images.tar ]; then
echo 'Found saved images at /vagrant/images.tar'
docker load -i /vagrant/images.tar
elif [ "${dep_scenario}" != "calico" ]; then
echo "Pulling Contiv-VPP plugin images..."
sudo -E ${contiv_dir}/k8s/pull-images.sh -b "${image_tag}"
fi
# --------------------------------------------------------
# ---> Build Contiv/VPP-vswitch Development Image <---
# --------------------------------------------------------
if [ "${dep_env}" = "dev" ]; then
# wait for apt auto-update to finish so we don't get conflicts
while `ps aux | grep -q [a]pt`; do
sleep 20
done
sudo -E apt-get install -y xorg \
openbox
echo "Downloading and installing Goland..."
curl -sL https://download.jetbrains.com/go/goland-"${goland_version}".tar.gz > /tmp/goland.tar.gz
tar -xvzf /tmp/goland.tar.gz --directory /home/vagrant >/dev/null 2>&1
if [ -f /vagrant/dev-contiv-vswitch.tar ]; then
echo "Found saved dev image at /vagrant/dev-contiv-vswitch.tar"
docker load -i /vagrant/dev-contiv-vswitch.tar
else
echo "vagrant" >> ${contiv_dir}/.dockerignore
echo "Building development contivpp/vswitch image..."
cd ${contiv_dir}/docker; ./build-all.sh
fi
fi
# --------------------------------------------------------
# ---> Create token and export it with kube master IP <---
# --------------------------------------------------------
if [ "$backup_master" != "true" ]; then
echo "Exporting Kube Master IP and Kubeadm Token..."
echo "export KUBEADM_TOKEN=$(kubeadm token generate)" >> /vagrant/config/init
fi
if [ "${dep_scenario}" != 'nostn' ] && [ "${ip_version}" != 'ipv6' ]; then
echo "export KUBE_MASTER_IP=$(hostname -I | cut -f2 -d' ')" >> /vagrant/config/init
source /vagrant/config/init
sed 's/127\.0\.1\.1.*k8s.*/'"$KUBE_MASTER_IP"' '"$1"'/' -i /etc/hosts
echo "export no_proxy='$1,$KUBE_MASTER_IP,localhost,127.0.0.1'" >> /etc/profile.d/envvar.sh
echo "export no_proxy='$1,$KUBE_MASTER_IP,localhost,127.0.0.1'" >> /home/vagrant/.profile
else
echo "export KUBE_MASTER_IP=$2" >> /vagrant/config/init
source /vagrant/config/init
sed 's/127\.0\.1\.1.*k8s.*/'"$2"' '"$1"'/' -i /etc/hosts
echo "export no_proxy='$1,$KUBE_MASTER_IP,localhost,127.0.0.1'" >> /etc/profile.d/envvar.sh
echo "export no_proxy='$1,$KUBE_MASTER_IP,localhost,127.0.0.1'" >> /home/vagrant/.profile
fi
source /etc/profile.d/envvar.sh
source /home/vagrant/.profile
# --------------------------------------------------------
# --------------> Kubeadm & Networking <------------------
# --------------------------------------------------------
# Based on kubernetes version, disable hugepages in Kubelet
# Initialize Kubernetes master
service_cidr="10.96.0.0/12"
pod_network_cidr="10.10.0.0/16"
if [ "${ip_version}" == "ipv6" ]; then
pod_network_cidr="2001::/16"
service_cidr="2096::/110"
elif [ "${dep_scenario}" != 'calico' ] && [ "${dep_scenario}" != 'calicovpp' ]; then
pod_network_cidr="10.0.0.0/8"
fi
split_k8s_version="$(cut -d "." -f 2 <<< "${k8s_version}")"
if [ $split_k8s_version -gt 10 ] ; then
if [ "${node_os_release}" == "16.04" ] ; then
sed -i '1s/.*/KUBELET_EXTRA_ARGS=--node-ip='"$KUBE_MASTER_IP"' --feature-gates HugePages=false/' /etc/default/kubelet
else
sed -i '1s/.*/KUBELET_EXTRA_ARGS=--node-ip='"$KUBE_MASTER_IP"' --feature-gates HugePages=false --resolv-conf=\/run\/systemd\/resolve\/resolv.conf/' /etc/default/kubelet
fi
systemctl daemon-reload
systemctl restart kubelet
if [ "${dep_scenario}" != 'calico' ] && [ "${dep_scenario}" != 'calicovpp' ]; then
if [ ${master_nodes} -gt 1 ]; then
cat > kubeadm.cfg <<EOF
---
apiVersion: kubeadm.k8s.io/v1beta1
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: $KUBEADM_TOKEN
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: $KUBE_MASTER_IP
bindPort: 6443
nodeRegistration:
criSocket: /var/run/dockershim.sock
name: k8s-master
taints:
- effect: NoSchedule
key: node-role.kubernetes.io/master
---
apiServer:
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta1
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: "10.20.0.100:6443"
controllerManager: {}
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: k8s.gcr.io
kind: ClusterConfiguration
kubernetesVersion: v1.13.0
networking:
dnsDomain: cluster.local
podSubnet: "$pod_network_cidr"
serviceSubnet: $service_cidr
scheduler: {}
EOF
if [ "$backup_master" != "true" ]; then
echo "$(kubeadm init --config=kubeadm.cfg)" >> /vagrant/config/cert
else
# since master join ignores node-ip arg in kubelet config
# modify default route in order to suggest kubelet choosing the correct IP
ip route del `ip route | grep default`
ip route add default via 10.20.0.100
# copy certificates from the first master node
mkdir -p /etc/kubernetes/pki/etcd
cp /vagrant/certs/* /etc/kubernetes/pki/
mv /etc/kubernetes/pki/etcd-ca.crt /etc/kubernetes/pki/etcd/ca.crt
mv /etc/kubernetes/pki/etcd-ca.key /etc/kubernetes/pki/etcd/ca.key
hash=$(awk 'END {print $NF}' /vagrant/config/cert)
kubeadm join --token "${KUBEADM_TOKEN}" 10.20.0.100:6443 --discovery-token-ca-cert-hash "$hash" --experimental-control-plane
fi
else
echo "$(kubeadm init --token-ttl 0 --kubernetes-version=v"${k8s_version}" --pod-network-cidr="${pod_network_cidr}" --apiserver-advertise-address="${KUBE_MASTER_IP}" --service-cidr="${service_cidr}" --token="${KUBEADM_TOKEN}")" >> /vagrant/config/cert
fi
else
echo "$(kubeadm init --token-ttl 0 --kubernetes-version=v"${k8s_version}" --pod-network-cidr="${pod_network_cidr}" --apiserver-advertise-address="${KUBE_MASTER_IP}" --service-cidr="${service_cidr}" --token="${KUBEADM_TOKEN}")" >> /vagrant/config/cert
fi
else
sed -i '4 a Environment="KUBELET_EXTRA_ARGS=--node-ip='"$KUBE_MASTER_IP"' --feature-gates HugePages=false"' /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
systemctl daemon-reload
systemctl restart kubelet
echo "$(kubeadm init --token-ttl 0 --kubernetes-version=v"${k8s_version}" --pod-network-cidr="${pod_network_cidr}" --apiserver-advertise-address="${KUBE_MASTER_IP}" --service-cidr="${service_cidr}" --token="${KUBEADM_TOKEN}")" >> /vagrant/config/cert
fi
echo "Create folder to store kubernetes and network configuration"
mkdir -p /home/vagrant/.kube
sudo cp -i /etc/kubernetes/admin.conf /home/vagrant/.kube/config
sudo chown vagrant:vagrant -R /home/vagrant/.kube
sleep 2;
if [ "$backup_master" != "true" ]; then
# copy the certs into shared folder
rm -rf /vagrant/certs
mkdir /vagrant/certs
cp /etc/kubernetes/pki/etcd/ca.crt /vagrant/certs/etcd-ca.crt
cp /etc/kubernetes/pki/etcd/ca.key /vagrant/certs/etcd-ca.key
cp /etc/kubernetes/admin.conf /vagrant/certs/
cp /etc/kubernetes/pki/front-proxy-ca.* /vagrant/certs/
cp /etc/kubernetes/pki/ca.* /vagrant/certs/
cp /etc/kubernetes/pki/sa.* /vagrant/certs/
fi
applySTNScenario() {
gw="10.130.1.254";
if [ "${ip_version}" = "ipv6" ]; then
gw="fe10::2:100";
fi
if [ "${dep_scenario}" = "nostn" ]; then
# Generate node config for use with CRD
cat > ${contiv_dir}/k8s/node-config/crd.yaml <<EOL
# Configuration for node config in the cluster
apiVersion: nodeconfig.contiv.vpp/v1
kind: NodeConfig
metadata:
name: k8s-master
spec:
mainVPPInterface:
interfaceName: "GigabitEthernet0/8/0"
gateway: $gw
---
EOL
counter=1;
until ((counter > "${num_nodes}"))
do
# Generate node config for use with CRD
cat <<EOL >> ${contiv_dir}/k8s/node-config/crd.yaml
# Configuration for node config in the cluster
apiVersion: nodeconfig.contiv.vpp/v1
kind: NodeConfig
metadata:
name: k8s-worker$counter
spec:
mainVPPInterface:
interfaceName: "GigabitEthernet0/8/0"
gateway: $gw
---
EOL
((counter++))
done
else
curl -s https://raw.githubusercontent.com/contiv/vpp/master/k8s/stn-install.sh > /tmp/contiv-stn.sh
chmod +x /tmp/contiv-stn.sh
sudo /tmp/contiv-stn.sh
# For use without CRD
stn_config="--set contiv.stealInterface=enp0s8"
# Generate node config for use with CRD
cat > ${contiv_dir}/k8s/node-config/crd.yaml <<EOL
# Configuration for node config in the cluster
apiVersion: nodeconfig.contiv.vpp/v1
kind: NodeConfig
metadata:
name: k8s-master
spec:
mainVPPInterface:
interfaceName: "GigabitEthernet0/8/0"
---
EOL
counter=1;
until ((counter > "${num_nodes}"))
do
# Generate node config for use with CRD
cat <<EOL >> ${contiv_dir}/k8s/node-config/crd.yaml
# Configuration for node config in the cluster
apiVersion: nodeconfig.contiv.vpp/v1
kind: NodeConfig
metadata:
name: k8s-worker$counter
spec:
mainVPPInterface:
interfaceName: "GigabitEthernet0/8/0"
---
EOL
((counter++))
done
fi
}
applyVPPnetwork() {
if [[ $ip_version == "ipv6" ]]; then
# Disable coredns loop detection plugin
kubectl get configmap coredns \
-o yaml \
-n kube-system \
--export >> coredns-config.yaml
sed -i 's/\/etc\/resolv.conf/fe10::2:100/' coredns-config.yaml
kubectl apply -f coredns-config.yaml -n kube-system
fi
if [[ $helm_extra_opts == *"contiv.useNoOverlay=true"* ]]; then
# Disable coredns loop detection plugin
kubectl get configmap coredns \
-o yaml \
-n kube-system \
--export >> coredns-config.yaml
sed -i '/loop/d' coredns-config.yaml
kubectl apply -f coredns-config.yaml -n kube-system
fi
helm_opts="${helm_extra_opts}"
if [ "${image_tag}" != "latest" ]; then
helm_opts="${helm_opts} --set vswitch.image.tag=${image_tag} --set cni.image.tag=${image_tag} --set ksr.image.tag=${image_tag} --set crd.image.tag=${image_tag}"
fi
if [ "${ip_version}" = "ipv6transport" ] || [ "${ip_version}" = "ipv6" ]; then
helm_opts="$helm_opts --set contiv.ipamConfig.nodeInterconnectCIDR=fe10::2:0/119"
helm_opts="$helm_opts --set contiv.ipamConfig.defaultGateway=fe10::2:100"
fi
if [ "${ip_version}" = "ipv6" ]; then
if [ "${crd_disabled}" = "true" ]; then
helm_opts="$helm_opts --set contiv.ipamConfig.podSubnetCIDR=2001::/48 --set contiv.ipamConfig.podSubnetOneNodePrefixLen=64"
helm_opts="$helm_opts --set contiv.ipamConfig.vppHostSubnetCIDR=2002::/64 --set contiv.ipamConfig.vppHostSubnetOneNodePrefixLen=112"
helm_opts="$helm_opts --set contiv.ipamConfig.vxlanCIDR=2005::/112"
else
helm_opts="$helm_opts --set contiv.ipamConfig.contivCIDR=fe10::/64"
fi
helm_opts="$helm_opts --set contiv.ipamConfig.serviceCIDR=2096::/110"
else
if [ "${crd_disabled}" = "false" ]; then
helm_opts="$helm_opts --set contiv.ipamConfig.contivCIDR=10.128.0.0/14"
fi
fi
if [ "${crd_disabled}" = "false" ]; then
# Deploy contiv-vpp networking with CRD
helm template --name vagrant $helm_opts $stn_config --set contiv.routeServiceCIDRToVPP=true --set contiv.tapv2RxRingSize=1024 --set contiv.tapv2TxRingSize=1024 --set contiv.crdNodeConfigurationDisabled=false --set contiv.ipamConfig.nodeInterconnectCIDR="" "${contiv_dir}"/k8s/contiv-vpp -f "${contiv_dir}"/k8s/contiv-vpp/values.yaml,"${contiv_dir}"/k8s/contiv-vpp/values-latest.yaml > "${contiv_dir}"/k8s/contiv-vpp/manifest.yaml
kubectl apply -f ${contiv_dir}/k8s/contiv-vpp/manifest.yaml
# Wait until crd agent is ready
crd_ready="";
while [ "$crd_ready" != "1" ];
do
echo "Waiting for crd agent to come up...";
crd_ready=$(kubectl get daemonset contiv-crd -n kube-system --template={{.status.numberReady}});
sleep 5;
done;
kubectl apply -f ${contiv_dir}/k8s/node-config/crd.yaml
else
if [ "${dep_scenario}" = "nostn" ] && [ "${ip_version}" = "ipv4" ]; then
gateway_config="--set contiv.ipamConfig.defaultGateway=192.168.16.100"
fi
# Deploy contiv-vpp networking without CRD
helm template --name vagrant $helm_opts $stn_config $gateway_config --set contiv.routeServiceCIDRToVPP=true --set contiv.tapv2RxRingSize=1024 --set contiv.tapv2TxRingSize=1024 "${contiv_dir}"/k8s/contiv-vpp -f "${contiv_dir}/"k8s/contiv-vpp/values.yaml,"${contiv_dir}"/k8s/contiv-vpp/values-latest.yaml > "${contiv_dir}"/k8s/contiv-vpp/manifest.yaml
kubectl apply -f ${contiv_dir}/k8s/contiv-vpp/manifest.yaml
fi
echo "Schedule Pods on master"
kubectl taint nodes --all node-role.kubernetes.io/master-
echo "Deploy contiv UI"
kubectl apply -f ${contiv_dir}/k8s/contiv-vpp-ui.yaml
}
applyCalicoNetwork() {
echo "Deploy Calico"
kubectl apply -f https://docs.projectcalico.org/v3.3/getting-started/kubernetes/installation/hosted/rbac-kdd.yaml
kubectl apply -f https://docs.projectcalico.org/v3.3/getting-started/kubernetes/installation/hosted/kubernetes-datastore/calico-networking/1.7/calico.yaml
echo "Schedule Pods on master"
kubectl taint nodes --all node-role.kubernetes.io/master-
}
applyCalicoVPPNetwork() {
echo "Deploy CalicoVPP"
kubectl apply -f ${contiv_dir}/vagrant/calico-vpp/rbac-kdd.yaml
kubectl apply -f ${contiv_dir}/vagrant/calico-vpp/calico.yaml
kubectl apply -f ${contiv_dir}/vagrant/calico-vpp/calico-vpp.yaml
echo "Label master with cni-type=calico"
kubectl label nodes k8s-master cni-type=calico
echo "Install calicoctl"
wget --progress=bar:force https://github.com/projectcalico/calicoctl/releases/download/v3.3.2/calicoctl
chmod +x calicoctl
sudo mv calicoctl /usr/local/bin/
sudo mkdir /etc/calico/
sudo cp ${contiv_dir}/vagrant/calico-vpp/calicoctl.cfg /etc/calico/
echo "Configure BGP"
until sudo calicoctl apply -f ${contiv_dir}/vagrant/calico-vpp/bgp.yaml
do
sleep 1
echo "retry..."
done
}
if [ "$backup_master" != "true" ]; then
stn_config=""
export stn_config
applySTNScenario
if [ "${dep_scenario}" == 'calico' ]; then
export -f applyCalicoNetwork
su vagrant -c "bash -c applyCalicoNetwork"
elif [ "${dep_scenario}" == 'calicovpp' ]; then
export stn_config="${stn_config} --set contiv.useNoOverlay=true --set contiv.ipamConfig.useExternalIPAM=true --set contiv.ipamConfig.podSubnetCIDR=10.10.0.0/16 --set vswitch.useNodeAffinity=true"
export -f applyVPPnetwork
su vagrant -c "bash -c applyVPPnetwork"
export -f applyCalicoVPPNetwork
su vagrant -c "bash -c applyCalicoVPPNetwork"
else
# nostn / stn
export -f applyVPPnetwork
su vagrant -c "bash -c applyVPPnetwork"
fi
fi