/
kuryr_kubernetes
1390 lines (1241 loc) · 42.9 KB
/
kuryr_kubernetes
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
#!/bin/bash
#
# lib/kuryr
# Utilities for kuryr-kubernetes devstack
# bind_for_kubelet
# Description: Creates an OVS internal port so that baremetal kubelet will be
# able to make both liveness and readiness http/tcp probes.
# Params:
# project - Id or name of the project used for kuryr devstack
# port - Port to open for K8s API, relevant only for OpenStack infra
# Dependencies:
# (none)
function ovs_bind_for_kubelet() {
local port_id
local port_mac
local port_ips
local port_subnets
local prefix
local project_id
local port_number
local security_group
local ifname
local service_subnet_cidr
local pod_subnet_gw
project_id="$1"
port_number="$2"
security_group=$(openstack security group list \
--project "$project_id" -c ID -c Name -f value | \
awk '{if ($2=="default") print $1}')
port_id=$(openstack port create \
--device-owner compute:kuryr \
--project "$project_id" \
--security-group "$security_group" \
--host "${HOSTNAME}" \
--network "${KURYR_NEUTRON_DEFAULT_POD_NET}" \
-f value -c id \
kubelet-"${HOSTNAME}")
# Need to enable Amphorae subnet access to the kubelet iface for API
# access
local use_octavia
use_octavia=$(trueorfalse True KURYR_K8S_LBAAS_USE_OCTAVIA)
if [[ "$use_octavia" == "True" ]]; then
openstack port set "$port_id" --security-group service_pod_access
fi
if [[ "$KURYR_SG_DRIVER" == "namespace" ]]; then
openstack port set "$port_id" --security-group allow_from_namespace
openstack port set "$port_id" --security-group allow_from_default
fi
ifname="kubelet${port_id}"
ifname="${ifname:0:14}"
service_subnet_cidr=$(openstack --os-cloud devstack-admin \
--os-region "$REGION_NAME" \
subnet show "$KURYR_NEUTRON_DEFAULT_SERVICE_SUBNET" \
-c cidr -f value)
pod_subnet_gw=$(openstack subnet show "$KURYR_NEUTRON_DEFAULT_POD_SUBNET" \
-c gateway_ip -f value)
port_mac=$(openstack port show "$port_id" -c mac_address -f value)
port_ips=($(openstack port show "$port_id" -f value -c fixed_ips | \
awk -F"'" '{print $2}'))
port_subnets=($(openstack port show "$port_id" -f value -c fixed_ips | \
awk -F"'" '{print $4}'))
sudo ovs-vsctl -- --may-exist add-port $OVS_BRIDGE "$ifname" \
-- set Interface "$ifname" type=internal \
-- set Interface "$ifname" external-ids:iface-status=active \
-- set Interface "$ifname" external-ids:attached-mac="$port_mac" \
-- set Interface "$ifname" external-ids:iface-id="$port_id"
sudo ip link set dev "$ifname" address "$port_mac"
sudo ip link set dev "$ifname" up
for ((i=0; i < ${#port_ips[@]}; i++)); do
prefix=$(openstack subnet show "${port_subnets[$i]}" \
-c cidr -f value | \
cut -f2 -d/)
sudo ip addr add "${port_ips[$i]}/${prefix}" dev "$ifname"
done
if [[ "$KURYR_SG_DRIVER" == "namespace" ]]; then
subnetpool_id=${KURYR_NEUTRON_DEFAULT_SUBNETPOOL_ID:-${SUBNETPOOL_V4_ID}}
subnetpool_cidr=$(openstack subnet pool show "${subnetpool_id}" \
-c prefixes -f value | cut -f2)
sudo ip route add "$subnetpool_cidr" via "$pod_subnet_gw" dev "$ifname"
else
sudo ip route add "$service_subnet_cidr" via "$pod_subnet_gw" dev "$ifname"
fi
if [ -n "$port_number" ]; then
# if openstack-INPUT chain doesn't exist we create it in INPUT (for
# local development envs since openstack-INPUT is usually only in gates)
sudo iptables -I openstack-INPUT 1 \
-p tcp -s 0.0.0.0/0 -d 0.0.0.0/0 --dport $port_number -j ACCEPT || \
sudo iptables -I INPUT 1 \
-p tcp -m conntrack --ctstate NEW \
-m tcp --dport "$port_number" \
-m comment --comment "kuryr-devstack: Access to OpenShift API" -j ACCEPT
fi
}
# get_container
# Description: Pulls a container from Dockerhub
# Params:
# image_name - the name of the image in docker hub
# version - The version of the image to pull. Defaults to 'latest'
function get_container {
local image
local image_name
local version
image_name="$1"
version="${2:-latest}"
if [ "$image_name" == "" ]; then
return 0
fi
image="${image_name}:${version}"
if [ -z "$(docker images -q "$image")" ]; then
docker pull "$image"
fi
}
# run_container
# Description: Runs a container and attaches devstack's logging to it
# Params:
# name - Name of the container to run
# args - arguments to run the container with
function run_container {
# Runs a detached container and uses devstack's run process to monitor
# its logs
local name
local docker_bin
docker_bin=$(which docker)
name="$1"
shift
args="$@"
$docker_bin create --name $name $args
run_process "$name" \
"$docker_bin start --attach $name"
}
# stop_container
# Description: stops a container and its devstack logging
# Params:
# name - Name of the container to stop
function stop_container {
local name
name="$1"
docker kill "$name"
docker rm "$name"
stop_process "$name"
}
# _allocation_range
# Description: Writes out tab separated usable ip range for a CIDR
# Params:
# cidr - The cidr to get the range for
# gateway_position - Whether to reserve at 'beginning' or at 'end'
function _allocation_range {
python - <<EOF "$@"
import sys
from ipaddress import ip_network
import six
n = ip_network(six.text_type(sys.argv[1]))
gateway_position = sys.argv[2]
if gateway_position == 'beginning':
beg_offset = 2
end_offset = 2
elif gateway_position == 'end':
beg_offset = 1
end_offset = 3
else:
raise ValueError('Disallowed gateway position %s' % gateway_position)
print("%s\\t%s" % (n[beg_offset], n[-end_offset]))
EOF
}
# create_k8s_icmp_sg_rules
# Description: Creates icmp sg rules for Kuryr-Kubernetes pods
# Params:
# sg_id - Kuryr's security group id
# direction - egress or ingress direction
function create_k8s_icmp_sg_rules {
local sg_id=$1
local direction="$2"
local project_id
project_id=$(get_or_create_project \
"$KURYR_NEUTRON_DEFAULT_PROJECT" default)
icmp_sg_rules=$(openstack --os-cloud devstack-admin \
--os-region "$REGION_NAME" \
security group rule create \
--project "$project_id" \
--protocol icmp \
--"$direction" "$sg_id")
die_if_not_set $LINENO icmp_sg_rules \
"Failure creating icmp sg ${direction} rule for ${sg_id}"
}
# create_k8s_subnet
# Description: Creates a network and subnet for Kuryr-Kubernetes usage
# Params:
# project_id - Kuryr's project uuid
# net_name - Name of the network to create
# subnet_name - Name of the subnet to create
# subnetpool_id - uuid of the subnet pool to use
# router - name of the router to plug the subnet to
# split_allocation - Whether to allocate on all the subnet or only the
# latter half
function create_k8s_subnet {
# REVISIT(apuimedo): add support for IPv6
local project_id=$1
local net_name="$2"
local subnet_name="$3"
local subnetpool_id="$4"
local router="$5"
local subnet_params="--project $project_id "
local subnet_cidr
local split_allocation
split_allocation="${6:-False}"
subnet_params+="--ip-version 4 "
subnet_params+="--no-dhcp --gateway none "
subnet_params+="--subnet-pool $subnetpool_id "
local net_id
net_id=$(openstack --os-cloud devstack-admin \
--os-region "$REGION_NAME" \
network create --project "$project_id" \
"$net_name" \
-c id -f value)
subnet_params+="--network $net_id $subnet_name"
local subnet_id
subnet_id=$(openstack --os-cloud devstack-admin \
--os-region "$REGION_NAME" \
subnet create $subnet_params \
--project "$project_id" \
-c id -f value)
die_if_not_set $LINENO subnet_id \
"Failure creating K8s ${subnet_name} IPv4 subnet for ${project_id}"
subnet_cidr=$(openstack --os-cloud devstack-admin \
--os-region "$REGION_NAME" \
subnet show "$subnet_id" \
-c cidr -f value)
die_if_not_set $LINENO subnet_cidr \
"Failure getting K8s ${subnet_name} IPv4 subnet for $project_id"
# Since K8s has its own IPAM for services and allocates the first IP from
# service subnet CIDR to Kubernetes apiserver, we'll always put the router
# interface at the end of the range.
local router_ip
local allocation_start
local allocation_end
local allocation_subnet
router_ip=$(_cidr_range "$subnet_cidr" | cut -f2)
if [[ "$split_allocation" == "True" ]]; then
allocation_subnet=$(split_subnet "$subnet_cidr" | cut -f2)
allocation_start=$(_allocation_range "$allocation_subnet" end | cut -f1)
allocation_end=$(_allocation_range "$allocation_subnet" end | cut -f2)
else
allocation_start=$(_allocation_range "$subnet_cidr" end | cut -f1)
allocation_end=$(_allocation_range "$subnet_cidr" end | cut -f2)
fi
die_if_not_set $LINENO router_ip \
"Failed to determine K8s ${subnet_name} subnet router IP"
openstack --os-cloud devstack-admin \
--os-region "$REGION_NAME" subnet set \
--gateway "$router_ip" --no-allocation-pool "$subnet_id" \
|| die $LINENO "Failed to update K8s ${subnet_name} subnet"
# Set a new allocation pool for the subnet so ports can be created again
openstack --os-cloud devstack-admin \
--os-region "$REGION_NAME" subnet set \
--allocation-pool "start=${allocation_start},end=${allocation_end}" \
"$subnet_id" || die $LINENO "Failed to update K8s ${subnet_name} subnet"
openstack --os-cloud devstack-admin \
--os-region "$REGION_NAME" \
router add subnet "$router" "$subnet_id" \
|| die $LINENO \
"Failed to enable routing for K8s ${subnet_name} subnet"
}
# create_k8s_router_fake_service
# Description: Creates an endpoint-less kubernetes service to keep Kubernetes
# API server from allocating the service subnet router IP for
# another service
function create_k8s_router_fake_service {
local router_ip
local fake_svc_name
fake_svc_name='kuryr-svc-router'
router_ip=$(openstack --os-cloud devstack-admin \
--os-region "$REGION_NAME" \
subnet show "$KURYR_NEUTRON_DEFAULT_SERVICE_SUBNET" \
-f value -c gateway_ip)
create_k8s_fake_service $fake_svc_name $router_ip
}
# create_k8s_fake_service
# Description: Creates an endpoint-less kubernetes service to keep Kubernetes
# API server from allocating this IP for another service
function create_k8s_fake_service {
local fake_svc_name
local fake_svc_ip
fake_svc_name="$1"
fake_svc_ip="$2"
existing_svc_ip=$(/usr/local/bin/kubectl get svc --namespace kube-system -o jsonpath='{.items[?(@.metadata.name=='"\"${fake_svc_name}\""')].spec.clusterIP}')
if [[ "$existing_svc_ip" == "" ]]; then
# Create fake service so the clusterIP can't be reassigned
cat <<EOF | /usr/local/bin/kubectl create -f -
kind: Service
apiVersion: v1
metadata:
name: "${fake_svc_name}"
namespace: kube-system
spec:
type: ClusterIP
clusterIP: "${fake_svc_ip}"
ports:
- protocol: TCP
port: 80
EOF
fi
}
# build_kuryr_containers
# Description: Generates a Kuryr controller and Kuryr CNI docker images in
# the local docker registry as kuryr/controller:latest and
# kuryr/cni:latest respectively
function build_kuryr_containers() {
local cni_buildtool_args
local cni_daemon
local build_dir
local use_py3
local controller_dockerfile
cni_buildtool_args="--bin-dir ${1} --conf-dir ${2}"
cni_daemon=$3
build_dir="${DEST}/kuryr-kubernetes"
pushd "$build_dir"
use_py3=$(trueorfalse False KURYR_CONTAINERS_USE_PY3)
if [[ "$use_py3" == "True" ]]; then
cni_buildtool_args="${cni_buildtool_args} --dockerfile cni_py3.Dockerfile"
controller_dockerfile="controller_py3.Dockerfile"
else
controller_dockerfile="controller.Dockerfile"
fi
if [[ "$cni_daemon" == "False" ]]; then
cni_buildtool_args="${cni_buildtool_args} --no-daemon"
fi
# Build controller image
sudo docker build \
-t kuryr/controller -f "$controller_dockerfile" .
# Build CNI image
sudo "./tools/build_cni_daemonset_image" $cni_buildtool_args
popd
}
function indent() {
sed 's/^/ /';
}
function generate_kuryr_configmap() {
local output_dir
local controller_conf_path
local cni_conf_path
output_dir=$1
controller_conf_path=${2:-""}
cni_conf_path=${3:-$controller_conf_path}
mkdir -p "$output_dir"
rm -f ${output_dir}/config_map.yml
# kuryr-contoller config
cat >> "${output_dir}/config_map.yml" << EOF
apiVersion: v1
kind: ConfigMap
metadata:
name: kuryr-config
namespace: kube-system
data:
kuryr.conf: |
EOF
cat $controller_conf_path | indent >> "${output_dir}/config_map.yml"
# kuryr-cni config (different token_file location)
# token_file = /etc/kuryr/token
# ssl_ca_crt_file = /etc/kuryr/ca.crt
# ssl_verify_server_crt = true
cat >> "${output_dir}/config_map.yml" << EOF
kuryr-cni.conf: |
EOF
cat $cni_conf_path | indent >> "${output_dir}/config_map.yml"
}
function generate_kuryr_certificates_secret() {
local output_dir
local certs_bundle_path
output_dir=$1
certs_bundle_path=${2:-""}
mkdir -p "$output_dir"
rm -f ${output_dir}/certificates_secret.yml
CA_CERT=\"\" # It's a "" string that will be inserted into yaml file.
if [ $certs_bundle_path -a -f $certs_bundle_path ]; then
CA_CERT=$(base64 -w0 < "$certs_bundle_path")
fi
cat >> "${output_dir}/certificates_secret.yml" << EOF
apiVersion: v1
kind: Secret
metadata:
name: kuryr-certificates
namespace: kube-system
type: Opaque
data:
kuryr-ca-bundle.crt: $CA_CERT
EOF
}
function generate_kuryr_service_account() {
output_dir=$1
mkdir -p "$output_dir"
rm -f ${output_dir}/service_account.yml
cat >> "${output_dir}/service_account.yml" << EOF
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: kuryr-controller
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: kuryr-controller
rules:
- apiGroups:
- ""
verbs: ["*"]
resources:
- endpoints
- ingress
- pods
- nodes
- services
- services/status
- namespaces
- apiGroups: ["oapi"]
resources:
- routes
verbs: ["*"]
- apiGroups:
- openstack.org
verbs: ["*"]
resources:
- kuryrnets
- kuryrnetpolicies
- apiGroups: ["networking.k8s.io"]
resources:
- networkpolicies
verbs:
- get
- list
- watch
- apiGroups: ["k8s.cni.cncf.io"]
resources:
- network-attachment-definitions
verbs:
- get
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1beta1
metadata:
name: kuryr-controller-global
subjects:
- kind: ServiceAccount
name: kuryr-controller
namespace: kube-system
roleRef:
kind: ClusterRole
name: kuryr-controller
apiGroup: rbac.authorization.k8s.io
EOF
}
function generate_controller_deployment() {
output_dir=$1
health_server_port=$2
controller_ha=$3
mkdir -p "$output_dir"
rm -f ${output_dir}/controller_deployment.yml
cat >> "${output_dir}/controller_deployment.yml" << EOF
apiVersion: apps/v1beta1
kind: Deployment
metadata:
labels:
name: kuryr-controller
name: kuryr-controller
namespace: kube-system
spec:
replicas: ${KURYR_CONTROLLER_REPLICAS:-1}
template:
metadata:
labels:
name: kuryr-controller
name: kuryr-controller
spec:
serviceAccountName: kuryr-controller
automountServiceAccountToken: true
hostNetwork: true
containers:
EOF
if [ "$controller_ha" == "True" ]; then
cat >> "${output_dir}/controller_deployment.yml" << EOF
- image: gcr.io/google_containers/leader-elector:0.5
name: leader-elector
args:
- "--election=kuryr-controller"
- "--http=0.0.0.0:${KURYR_CONTROLLER_HA_PORT:-16401}"
- "--election-namespace=kube-system"
- "--ttl=5s"
ports:
- containerPort: ${KURYR_CONTROLLER_HA_PORT:-16401}
protocol: TCP
EOF
fi
cat >> "${output_dir}/controller_deployment.yml" << EOF
- image: kuryr/controller:latest
imagePullPolicy: Never
name: controller
terminationMessagePath: "/dev/termination-log"
env:
- name: KUBERNETES_NODE_NAME
valueFrom:
fieldRef:
fieldPath: spec.nodeName
volumeMounts:
- name: config-volume
mountPath: "/etc/kuryr/kuryr.conf"
subPath: kuryr.conf
- name: certificates-volume
mountPath: "/etc/ssl/certs"
readOnly: true
readinessProbe:
httpGet:
path: /ready
port: ${health_server_port}
scheme: HTTP
timeoutSeconds: 5
livenessProbe:
httpGet:
path: /alive
port: ${health_server_port}
initialDelaySeconds: 15
EOF
cat >> "${output_dir}/controller_deployment.yml" << EOF
volumes:
- name: config-volume
configMap:
name: kuryr-config
- name: certificates-volume
secret:
secretName: kuryr-certificates
restartPolicy: Always
EOF
}
function generate_cni_daemon_set() {
output_dir=$1
cni_health_server_port=$2
cni_daemon=${3:-False}
cni_bin_dir=${4:-/opt/cni/bin}
cni_conf_dir=${5:-/etc/cni/net.d}
mkdir -p "$output_dir"
rm -f ${output_dir}/cni_ds.yml
cat >> "${output_dir}/cni_ds.yml" << EOF
apiVersion: extensions/v1beta1
kind: DaemonSet
metadata:
name: kuryr-cni-ds
namespace: kube-system
labels:
tier: node
app: kuryr
spec:
template:
metadata:
labels:
tier: node
app: kuryr
spec:
hostNetwork: true
tolerations:
- key: node-role.kubernetes.io/master
operator: Exists
effect: NoSchedule
serviceAccountName: kuryr-controller
containers:
- name: kuryr-cni
image: kuryr/cni:latest
imagePullPolicy: Never
command: [ "cni_ds_init" ]
env:
- name: KURYR_CNI_POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
securityContext:
privileged: true
volumeMounts:
- name: bin
mountPath: /opt/cni/bin
- name: net-conf
mountPath: /etc/cni/net.d
- name: config-volume
mountPath: /etc/kuryr/kuryr.conf
subPath: kuryr-cni.conf
- name: proc
mountPath: /host_proc
EOF
if [[ -n "$OVS_HOST_PATH" ]]; then
cat >> "${output_dir}/cni_ds.yml" << EOF
- name: openvswitch
mountPath: /var/run/openvswitch
EOF
fi
if [ "$cni_daemon" == "True" ]; then
cat >> "${output_dir}/cni_ds.yml" << EOF
readinessProbe:
httpGet:
path: /ready
port: ${cni_health_server_port}
scheme: HTTP
initialDelaySeconds: 60
timeoutSeconds: 10
livenessProbe:
httpGet:
path: /alive
port: ${cni_health_server_port}
initialDelaySeconds: 60
EOF
fi
cat >> "${output_dir}/cni_ds.yml" << EOF
volumes:
- name: bin
hostPath:
path: ${cni_bin_dir}
- name: net-conf
hostPath:
path: ${cni_conf_dir}
- name: config-volume
configMap:
name: kuryr-config
- name: proc
hostPath:
path: /proc
EOF
if [[ -n "$OVS_HOST_PATH" ]]; then
cat >> "${output_dir}/cni_ds.yml" << EOF
- name: openvswitch
hostPath:
path: ${OVS_HOST_PATH}
EOF
fi
}
# install_openshift_binary
# Description: Fetches the configured binary release of OpenShift and
# installs it in the system
function install_openshift_binary {
mkdir -p "$OPENSHIFT_BIN"
curl -L "$OPENSHIFT_BINARY_URL" -o "${OPENSHIFT_BIN}/openshift.tar.gz" --retry 2
tar xzvf "${OPENSHIFT_BIN}/openshift.tar.gz" --strip 1 -C "$OPENSHIFT_BIN"
# Make openshift run from its untarred directory
cat << EOF | sudo tee /usr/local/bin/openshift
#!/bin/bash
cd ${OPENSHIFT_BIN}
exec ./openshift "\$@"
EOF
sudo chmod a+x /usr/local/bin/openshift
# Make oc easily available
cat << EOF | sudo tee /usr/local/bin/oc
#!/bin/bash
CURL_CA_BUNDLE=${OPENSHIFT_DATA_DIR}/ca.crt \
KUBECONFIG=${OPENSHIFT_DATA_DIR}/admin.kubeconfig \
${OPENSHIFT_BIN}/oc "\$@"
EOF
sudo chmod a+x /usr/local/bin/oc
# Make kubectl easily available
cat << EOF | sudo tee /usr/local/bin/kubectl
#!/bin/bash
CURL_CA_BUNDLE=${OPENSHIFT_DATA_DIR}/ca.crt \
KUBECONFIG=${OPENSHIFT_DATA_DIR}/admin.kubeconfig \
${OPENSHIFT_BIN}/kubectl "\$@"
EOF
sudo chmod a+x /usr/local/bin/kubectl
}
# run_openshift_master
# Description: Starts the openshift master
function run_openshift_master {
local cmd
local pod_subnet_cidr
local service_subnet_cidr
local portal_net
sudo install -d -o "$STACK_USER" "$OPENSHIFT_DATA_DIR"
pod_subnet_cidr=$(openstack --os-cloud devstack-admin \
--os-region "$REGION_NAME" \
subnet show "$KURYR_NEUTRON_DEFAULT_POD_SUBNET" \
-c cidr -f value)
service_subnet_cidr=$(openstack --os-cloud devstack-admin \
--os-region "$REGION_NAME" \
subnet show "$KURYR_NEUTRON_DEFAULT_SERVICE_SUBNET" \
-c cidr -f value)
if is_service_enabled octavia; then
portal_net=$(split_subnet "$service_subnet_cidr" | cut -f1)
else
portal_net="$service_subnet_cidr"
fi
# Generate master config
"${OPENSHIFT_BIN}/openshift" start master \
"--etcd=http://${SERVICE_HOST}:${ETCD_PORT}" \
"--network-cidr=${pod_subnet_cidr}" \
"--portal-net=${portal_net}" \
"--listen=0.0.0.0:${OPENSHIFT_API_PORT}" \
"--master=${OPENSHIFT_API_URL}" \
"--write-config=${OPENSHIFT_DATA_DIR}"
# Enable externalIPs
sed -i 's/externalIPNetworkCIDRs: null/externalIPNetworkCIDRs: ["0.0.0.0\/0"]/' "${OPENSHIFT_DATA_DIR}/master-config.yaml"
# Reconfigure Kuryr-Kubernetes to use the certs generated
iniset "$KURYR_CONFIG" kubernetes ssl_client_crt_file "${OPENSHIFT_DATA_DIR}/admin.crt"
iniset "$KURYR_CONFIG" kubernetes ssl_client_key_file "${OPENSHIFT_DATA_DIR}/admin.key"
iniset "$KURYR_CONFIG" kubernetes ssl_ca_crt_file "${OPENSHIFT_DATA_DIR}/ca.crt"
sudo chown "${STACK_USER}:${STACK_USER}" -R "$OPENSHIFT_DATA_DIR"
# Generate kubelet kubeconfig
"${OPENSHIFT_BIN}/oc" adm create-kubeconfig \
"--client-key=${OPENSHIFT_DATA_DIR}/master.kubelet-client.key" \
"--client-certificate=${OPENSHIFT_DATA_DIR}/master.kubelet-client.crt" \
"--certificate-authority=${OPENSHIFT_DATA_DIR}/ca.crt" \
"--master=${OPENSHIFT_API_URL}" \
"--kubeconfig=${OPENSHIFT_DATA_DIR}/master.kubelet-client.kubeconfig"
cmd="/usr/local/bin/openshift start master \
--config=${OPENSHIFT_DATA_DIR}/master-config.yaml"
wait_for "etcd" "http://${SERVICE_HOST}:${ETCD_PORT}/v2/machines"
if [[ "$USE_SYSTEMD" = "True" ]]; then
# If systemd is being used, proceed as normal
run_process openshift-master "$cmd" root root
else
# If screen is being used, there is a possibility that the devstack
# environment is on a stable branch. Older versions of run_process have
# a different signature. Sudo is used as a workaround that works in
# both older and newer versions of devstack.
run_process openshift-master "sudo $cmd"
fi
}
# make_admin_cluster_admin
# Description: Gives the system:admin permissions over the cluster
function make_admin_cluster_admin {
wait_for "OpenShift API Server" "$OPENSHIFT_API_URL" \
"${OPENSHIFT_DATA_DIR}/ca.crt"
/usr/local/bin/oc adm policy add-cluster-role-to-user cluster-admin admin \
"--config=${OPENSHIFT_DATA_DIR}/openshift-master.kubeconfig"
}
# run_openshift_node
# Description: Starts the openshift node
function run_openshift_node {
local command
#install required CNI loopback driver
sudo mkdir -p "$CNI_BIN_DIR"
curl -L "$OPENSHIFT_CNI_BINARY_URL" | sudo tar -C "$CNI_BIN_DIR" -xzvf - ./loopback
command="/usr/local/bin/openshift start node \
--kubeconfig=${OPENSHIFT_DATA_DIR}/master.kubelet-client.kubeconfig \
--enable=kubelet,plugins \
--network-plugin=cni \
--listen=https://0.0.0.0:8442"
# Link master config necessary for bootstrapping
# TODO: This needs to be generated so we don't depend on it on multinode
mkdir -p "${OPENSHIFT_BIN}/openshift.local.config"
ln -fs "${OPENSHIFT_DATA_DIR}" "${OPENSHIFT_BIN}/openshift.local.config/master"
mkdir -p "${OPENSHIFT_DATA_DIR}/node"
ln -fs "${OPENSHIFT_DATA_DIR}/node" "${OPENSHIFT_BIN}/openshift.local.config/node"
# Link stack CNI to location expected by openshift node
sudo mkdir -p /etc/cni
sudo rm -fr /etc/cni/net.d
sudo rm -fr /opt/cni/bin
sudo ln -fs "${CNI_CONF_DIR}" /etc/cni/net.d
sudo mkdir -p /opt/cni
sudo ln -fs "${CNI_BIN_DIR}" /opt/cni/bin
if [[ "$USE_SYSTEMD" = "True" ]]; then
# If systemd is being used, proceed as normal
run_process openshift-node "$command" root root
else
# If screen is being used, there is a possibility that the devstack
# environment is on a stable branch. Older versions of run_process have
# a different signature. Sudo is used as a workaround that works in
# both older and newer versions of devstack.
run_process openshift-node "sudo $command"
fi
}
# lb_state
# Description: Returns the state of the load balancer
# Params:
# id - Id or name of the loadbalancer the state of which needs to be
# retrieved.
function lb_state {
local lb_id
lb_id="$1"
# Checks Neutron lbaas for the Load balancer state
if is_service_enabled octavia; then
openstack loadbalancer show "$lb_id" | \
awk '/provisioning_status/ {print $4}'
else
neutron lbaas-loadbalancer-show "$lb_id" | \
awk '/provisioning_status/ {print $4}'
fi
}
function wait_for_lb {
local lb_name
local curr_time
local time_diff
local start_time
lb_name="$1"
timeout=${2:-$KURYR_WAIT_TIMEOUT}
echo -n "Waiting for LB:$lb_name"
start_time=$(date +%s)
while [[ "$(lb_state "$lb_name")" != "ACTIVE" ]]; do
echo -n "Waiting till LB=$lb_name is ACTIVE."
curr_time=$(date +%s)
time_diff=$((curr_time - start_time))
[[ $time_diff -le $timeout ]] || die "Timed out waiting for $lb_name"
sleep 5
done
}
# create_load_balancer
# Description: Creates an OpenStack Load Balancer with either neutron LBaaS
# or Octavia
# Params:
# lb_name: Name to give to the load balancer.
# lb_vip_subnet: Id or name of the subnet where lb_vip should be
# allocated.
# project_id: Id of the project where the load balancer should be
# allocated.
# lb_vip: Virtual IP to give to the load balancer - optional.
function create_load_balancer {
local lb_name
local lb_vip_subnet
local lb_params
local project_id
lb_name="$1"
lb_vip_subnet="$2"
project_id="$3"
lb_params=" --name $lb_name "
if [ -z "$4" ]; then
echo -n "create_load_balancer LB=$lb_name, lb_vip not provided."
else
lb_params+=" --vip-address $4"
fi
if is_service_enabled octavia; then
lb_params+=" --project ${project_id} --vip-subnet-id $lb_vip_subnet"
openstack loadbalancer create $lb_params
else
lb_params+=" --tenant-id ${project_id} $lb_vip_subnet"
neutron lbaas-loadbalancer-create $lb_params
fi
}
# create_load_balancer_listener
# Description: Creates an OpenStack Load Balancer Listener for the specified
# Load Balancer with either neutron LBaaS or Octavia
# Params:
# name: Name to give to the load balancer listener.
# protocol: Whether it is HTTP, HTTPS, TCP, etc.
# port: The TCP port number to listen to.
# data_timeouts: Octavia's timeouts for client and server inactivity.
# lb: Id or name of the Load Balancer we want to add the Listener to.
# project_id: Id of the project where this listener belongs to.
function create_load_balancer_listener {
local name
local protocol
local port
local lb
local data_timeouts
local max_timeout
local project_id
name="$1"
protocol="$2"
port="$3"
lb="$4"
project_id="$5"
data_timeouts="$6"
max_timeout=1200
# Octavia needs the LB to be active for the listener
wait_for_lb $lb $max_timeout
if is_service_enabled octavia; then
openstack loadbalancer listener create --name "$name" \
--protocol "$protocol" \
--protocol-port "$port" \
--timeout-client-data "$data_timeouts" \
--timeout-member-data "$data_timeouts" \
"$lb"
else
neutron lbaas-listener-create --name "$name" \
--protocol "$protocol" \
--protocol-port "$port" \
--tenant-id "$project_id" \
--loadbalancer "$lb"
fi
}
# create_load_balancer_pool
# Description: Creates an OpenStack Load Balancer Pool for the specified
# Load Balancer listener with either neutron LBaaS or Octavia
# Params:
# name: Name to give to the load balancer listener.
# protocol: Whether it is HTTP, HTTPS, TCP, etc.
# algorithm: Load Balancing algorithm to use.
# listener: Id or name of the Load Balancer Listener we want to add the
# pool to.
# project_id: Id of the project where this pool belongs to.
# lb: Id or name of the Load Balancer we want to add the pool to
# (optional).
function create_load_balancer_pool {
local name
local protocol
local algorithm
local listener
local lb
local project_id
name="$1"
protocol="$2"
algorithm="$3"
listener="$4"
project_id="$5"
lb="$6"
# We must wait for the LB to be active before we can put a Pool for it
wait_for_lb $lb
if is_service_enabled octavia; then