From c1daf20afd44d2ca6cb430a8461679da90c6ec5c Mon Sep 17 00:00:00 2001 From: Lukas Macko Date: Wed, 10 Apr 2019 10:24:54 +0200 Subject: [PATCH] Vagrant multimaster setup Signed-off-by: Lukas Macko --- vagrant/README.md | 1 + vagrant/Vagrantfile | 48 ++++++++- vagrant/defaults | 1 + vagrant/provision/bootstrap_master.sh | 123 ++++++++++++++++++---- vagrant/provision/provision_every_node.sh | 2 +- vagrant/provision/provision_gateway.sh | 55 ++++++++++ 6 files changed, 207 insertions(+), 23 deletions(-) diff --git a/vagrant/README.md b/vagrant/README.md index 6a0ade200b..640c70fdbb 100755 --- a/vagrant/README.md +++ b/vagrant/README.md @@ -30,6 +30,7 @@ Environment variable | Description | Default `K8S_MASTER_MEMORY` | Memory size for master node | `4096` `K8S_NODE_MEMORY` | Memory size for worker nodes | `4096` `K8S_NODES` | Number of worker nodes (except from the master) | `1` +`K8S_MASTER_NODES` | Number of master nodes (Beware: multimaster feature is experimental doesn't work for all option combination) | `1` `K8S_VERSION` | Kubernetes version to be installed | `1.12.3` `K8S_DEPLOYMENT_SCENARIO` | Contiv deployment scenario to be used: `nostn` (default) or [`stn`](../docs/setup/SINGLE_NIC_SETUP.md) or [`calico-vpp`](calico-vpp/README.md) | `nostn` `K8S_DEPLOYMENT_ENV` | Contiv deployment environment to be used: `prod` (production) or `dev` (development) | `prod` diff --git a/vagrant/Vagrantfile b/vagrant/Vagrantfile index 2c1791b11f..99f5ba35ef 100755 --- a/vagrant/Vagrantfile +++ b/vagrant/Vagrantfile @@ -54,6 +54,7 @@ node_os_release = ENV['K8S_NODE_OS_RELEASE'] || '16.04' node_cpus = ENV['K8S_NODE_CPUS'].to_i == 0 ? 4 : ENV['K8S_NODE_CPUS'].to_i node_memory = ENV['K8S_NODE_MEMORY'].to_i == 0 ? 4096 : ENV['K8S_NODE_MEMORY'].to_i num_nodes = ENV['K8S_NODES'].to_i == 0 ? 0 : ENV['K8S_NODES'].to_i +master_nodes = ENV['K8S_MASTER_NODES'].to_i == 0 ? 0 : ENV['K8S_MASTER_NODES'].to_i master_cpus = ENV['K8S_MASTER_CPUS'].to_i == 0 ? 4 : ENV['K8S_MASTER_CPUS'].to_i master_memory = ENV['K8S_MASTER_MEMORY'].to_i == 0 ? 4096 : ENV['K8S_MASTER_MEMORY'].to_i dep_env = ENV['K8S_DEPLOYMENT_ENV'] @@ -85,13 +86,17 @@ if (ip_version == 'ipv6' && base_ip == '10.20.0.') base_prefix_len = '64' end +if ( master_nodes > 1 ) + k8s_version = '1.13.4' +end + env = { } # Do not modify env variables after this step VARIABLES = ['http_proxy', 'https_proxy', 'k8s_version', 'docker_version', 'node_os', 'node_os_release', 'node_cpus', 'node_memory', 'num_nodes', 'master_cpus', 'master_memory', 'dep_env', 'dep_scenario', 'base_ip', 'base_prefix_len', 'provider', 'image_tag', 'go_version', 'goland_version', 'helm_version', 'crd_disabled', -'ip_version', 'contiv_dir', 'helm_extra_opts'] +'ip_version', 'contiv_dir', 'helm_extra_opts', 'master_nodes'] VARIABLES.each do |x| env[x] = eval x @@ -136,6 +141,10 @@ VAGRANTFILE_API_VERSION = "2" node_ips = num_nodes.times.collect { |n| base_ip + "#{n+10}" } node_names = num_nodes.times.collect { |n| "k8s-worker#{n+1}" } + m_nodes = master_nodes - 1 + master_ips = m_nodes.times.collect { |n| base_ip + "#{n+3}" } + master_names = m_nodes.times.collect { |n| "k8s-master#{n+1}" } + config.ssh.insert_key = false if Vagrant.has_plugin?("vagrant-cachier") @@ -172,6 +181,10 @@ VAGRANTFILE_API_VERSION = "2" gw.vm.network :private_network, ip: "10.130.1.254", netmask: "255.255.254.0", virtualbox__intnet: "vpp", nic_type: "82540EM" else gw.vm.network :private_network, ip: "192.168.16.100", netmask: "255.255.255.0", virtualbox__intnet: "vpp", nic_type: "82540EM" + if master_nodes > 1 + # in order to support multi master add additional connect GW to mgmt network + gw.vm.network :private_network, ip: "#{base_ip}100", netmask: "255.255.255.0", virtualbox__intnet: "true" + end end else gw.vm.network :private_network, ip: "192.168.16.100", virtualbox__intnet: "vpp", nic_type: "82540EM" @@ -246,6 +259,39 @@ VAGRANTFILE_API_VERSION = "2" end end + # Configure backup master node(s) + # BEWARE: currently only ipv4, nostn, crdDisabled setup with k8s 1.13.4 setup supports multi-master setup + m_nodes.times do |n| + node_name = master_names[n] + node_addr = master_ips[n] + config.vm.define node_name do |node| + node.vm.hostname = node_name + node.vm.network :private_network, auto_config: false, virtualbox__intnet: "vpp", nic_type: "82540EM" + node.vm.network :private_network, ip: node_addr, netmask: "#{base_prefix_len}", virtualbox__intnet: "true" + + node.vm.provider "virtualbox" do |v| + v.customize ["modifyvm", :id, "--ioapic", "on"] + v.memory = master_memory + v.cpus = master_cpus + v.customize [ "modifyvm", :id, "--uartmode1", "disconnected" ] + end + node.vm.provision "shell" do |s| + s.path = "provision/provision_every_node.sh" + s.env = env + s.args = ["install"] + end + node.vm.provision "shell" do |s| + s.path = "provision/install_requirements_master.sh" + s.env = env + end + node.vm.provision "shell" do |s| + s.path = "provision/bootstrap_master.sh" + s.args = [node_name, node_addr, 'true'] + s.env = env + end + end + end + # Configure VBox Worker node(s) num_nodes.times do |n| node_name = node_names[n] diff --git a/vagrant/defaults b/vagrant/defaults index 6ada59af00..4266894a7c 100644 --- a/vagrant/defaults +++ b/vagrant/defaults @@ -3,6 +3,7 @@ export K8S_NODE_OS_RELEASE=${K8S_NODE_OS_RELEASE:-16.04} export K8S_NODE_CPUS=${K8S_NODE_CPUS:-4} export K8S_NODE_MEMORY=${K8S_NODE_MEMORY:-4096} export K8S_NODES=${K8S_NODES:-1} +export K8S_MASTER_NODES=${K8S_MASTER_NODES:-1} export IP_VERSION=${IP_VERSION:-ipv4} export K8S_MASTER_CPUS=${K8S_MASTER_CPUS:-4} diff --git a/vagrant/provision/bootstrap_master.sh b/vagrant/provision/bootstrap_master.sh index 9d024eb66d..031165ef42 100755 --- a/vagrant/provision/bootstrap_master.sh +++ b/vagrant/provision/bootstrap_master.sh @@ -3,6 +3,9 @@ set -ex echo Args passed: [[ $@ ]] +# variable should be set to true unless the script is executed for the first master +backup_master="$3" + # Pull images if not present if [ -f /vagrant/images.tar ]; then echo 'Found saved images at /vagrant/images.tar' @@ -43,8 +46,10 @@ fi # ---> Create token and export it with kube master IP <--- # -------------------------------------------------------- -echo "Exporting Kube Master IP and Kubeadm Token..." -echo "export KUBEADM_TOKEN=$(kubeadm token generate)" >> /vagrant/config/init +if [ "$backup_master" != "true" ]; then + echo "Exporting Kube Master IP and Kubeadm Token..." + echo "export KUBEADM_TOKEN=$(kubeadm token generate)" >> /vagrant/config/init +fi if [ "${dep_scenario}" != 'nostn' ] && [ "${ip_version}" != 'ipv6' ]; then echo "export KUBE_MASTER_IP=$(hostname -I | cut -f2 -d' ')" >> /vagrant/config/init @@ -89,9 +94,71 @@ if [ $split_k8s_version -gt 10 ] ; then systemctl daemon-reload systemctl restart kubelet if [ "${dep_scenario}" != 'calico' ] && [ "${dep_scenario}" != 'calicovpp' ]; then - echo "$(kubeadm init --token-ttl 0 --kubernetes-version=v"${k8s_version}" --pod-network-cidr="${pod_network_cidr}" --apiserver-advertise-address="${KUBE_MASTER_IP}" --service-cidr="${service_cidr}" --token="${KUBEADM_TOKEN}")" >> /vagrant/config/cert + if [ ${master_nodes} -gt 1 ]; then + cat > kubeadm.cfg <> /vagrant/config/cert + else + + # since master join ignores node-ip arg in kubelet config + # modify default route in order to suggest kubelet choosing the correct IP + ip route del `ip route | grep default` + ip route add default via 10.20.0.100 + + # copy certificates from the first master node + mkdir -p /etc/kubernetes/pki/etcd + cp /vagrant/certs/* /etc/kubernetes/pki/ + mv /etc/kubernetes/pki/etcd-ca.crt /etc/kubernetes/pki/etcd/ca.crt + mv /etc/kubernetes/pki/etcd-ca.key /etc/kubernetes/pki/etcd/ca.key + hash=$(awk 'END {print $NF}' /vagrant/config/cert) + kubeadm join --token "${KUBEADM_TOKEN}" 10.20.0.100:6443 --discovery-token-ca-cert-hash "$hash" --experimental-control-plane + fi + else + echo "$(kubeadm init --token-ttl 0 --kubernetes-version=v"${k8s_version}" --pod-network-cidr="${pod_network_cidr}" --apiserver-advertise-address="${KUBE_MASTER_IP}" --service-cidr="${service_cidr}" --token="${KUBEADM_TOKEN}")" >> /vagrant/config/cert + fi else - echo "$(kubeadm init --token-ttl 0 --kubernetes-version=v"${k8s_version}" --pod-network-cidr="${pod_network_cidr}" --apiserver-advertise-address="${KUBE_MASTER_IP}" --service-cidr="${service_cidr}" --token="${KUBEADM_TOKEN}")" >> /vagrant/config/cert + echo "$(kubeadm init --token-ttl 0 --kubernetes-version=v"${k8s_version}" --pod-network-cidr="${pod_network_cidr}" --apiserver-advertise-address="${KUBE_MASTER_IP}" --service-cidr="${service_cidr}" --token="${KUBEADM_TOKEN}")" >> /vagrant/config/cert fi else sed -i '4 a Environment="KUBELET_EXTRA_ARGS=--node-ip='"$KUBE_MASTER_IP"' --feature-gates HugePages=false"' /etc/systemd/system/kubelet.service.d/10-kubeadm.conf @@ -106,6 +173,18 @@ sudo cp -i /etc/kubernetes/admin.conf /home/vagrant/.kube/config sudo chown vagrant:vagrant -R /home/vagrant/.kube sleep 2; +if [ "$backup_master" != "true" ]; then + # copy the certs into shared folder + rm -rf /vagrant/certs + mkdir /vagrant/certs + cp /etc/kubernetes/pki/etcd/ca.crt /vagrant/certs/etcd-ca.crt + cp /etc/kubernetes/pki/etcd/ca.key /vagrant/certs/etcd-ca.key + cp /etc/kubernetes/admin.conf /vagrant/certs/ + cp /etc/kubernetes/pki/front-proxy-ca.* /vagrant/certs/ + cp /etc/kubernetes/pki/ca.* /vagrant/certs/ + cp /etc/kubernetes/pki/sa.* /vagrant/certs/ +fi + applySTNScenario() { gw="10.130.1.254"; if [ "${ip_version}" = "ipv6" ]; then @@ -300,21 +379,23 @@ applyCalicoVPPNetwork() { done } -stn_config="" -export stn_config -applySTNScenario - -if [ "${dep_scenario}" == 'calico' ]; then - export -f applyCalicoNetwork - su vagrant -c "bash -c applyCalicoNetwork" -elif [ "${dep_scenario}" == 'calicovpp' ]; then - export stn_config="${stn_config} --set contiv.useNoOverlay=true --set contiv.ipamConfig.useExternalIPAM=true --set contiv.ipamConfig.podSubnetCIDR=10.10.0.0/16 --set vswitch.useNodeAffinity=true" - export -f applyVPPnetwork - su vagrant -c "bash -c applyVPPnetwork" - export -f applyCalicoVPPNetwork - su vagrant -c "bash -c applyCalicoVPPNetwork" -else - # nostn / stn - export -f applyVPPnetwork - su vagrant -c "bash -c applyVPPnetwork" +if [ "$backup_master" != "true" ]; then + stn_config="" + export stn_config + applySTNScenario + + if [ "${dep_scenario}" == 'calico' ]; then + export -f applyCalicoNetwork + su vagrant -c "bash -c applyCalicoNetwork" + elif [ "${dep_scenario}" == 'calicovpp' ]; then + export stn_config="${stn_config} --set contiv.useNoOverlay=true --set contiv.ipamConfig.useExternalIPAM=true --set contiv.ipamConfig.podSubnetCIDR=10.10.0.0/16 --set vswitch.useNodeAffinity=true" + export -f applyVPPnetwork + su vagrant -c "bash -c applyVPPnetwork" + export -f applyCalicoVPPNetwork + su vagrant -c "bash -c applyCalicoVPPNetwork" + else + # nostn / stn + export -f applyVPPnetwork + su vagrant -c "bash -c applyVPPnetwork" + fi fi diff --git a/vagrant/provision/provision_every_node.sh b/vagrant/provision/provision_every_node.sh index ff4fd7c089..35d5321878 100755 --- a/vagrant/provision/provision_every_node.sh +++ b/vagrant/provision/provision_every_node.sh @@ -41,7 +41,7 @@ if [ "$1" = "install" ];then echo "Installing Kubernetes Components..." sudo -E apt-get install -qy kubelet=${k8s_version}-00 \ kubectl=${k8s_version}-00 \ - kubeadm=${k8s_version}-00 + kubeadm=${k8s_version}-00 kubernetes-cni=0.6.0-00 echo "Installing Docker..." if [ "${node_os_release}" == "16.04" ] ; then diff --git a/vagrant/provision/provision_gateway.sh b/vagrant/provision/provision_gateway.sh index 979de7c6d5..0989bcb367 100755 --- a/vagrant/provision/provision_gateway.sh +++ b/vagrant/provision/provision_gateway.sh @@ -117,4 +117,59 @@ EOL sudo /vagrant/bird/run.sh fi +if [ "${master_nodes}" -gt 1 ] ; then + echo "Installing HAproxy" + wget http://www.haproxy.org/download/1.9/src/haproxy-1.9.6.tar.gz + tar -xzf haproxy-1.9.6.tar.gz + cd haproxy-1.9.6 + make TARGET=generic + make install + mkdir /etc/haproxy + + cat > /etc/haproxy/haproxy.cfg < "${master_nodes}")) + do + ip=$(( counter + 1 )) + echo " server apiserver$counter 10.20.0.$ip:6443 check">> /etc/haproxy/haproxy.cfg + ((counter++)) + done + + echo "Configuring haproxy service" + sudo tee /etc/systemd/system/haproxy.service << EOF +[Unit] +Description=HA proxy for k8s +[Service] +ExecStart=/root/haproxy.sh +[Install] +WantedBy=default.target +EOF + + sudo tee /root/haproxy.sh << EOF +#!/bin/bash +haproxy -f /etc/haproxy/haproxy.cfg +EOF + + sudo chmod a+x /root/haproxy.sh + + sudo systemctl start haproxy.service + sudo systemctl enable haproxy.service + fi + +fi # end of ipv4 case