Skip to content

Commit

Permalink
Vagrant multimaster setup
Browse files Browse the repository at this point in the history
Signed-off-by: Lukas Macko <lmacko@cisco.com>
  • Loading branch information
lukasmacko committed Apr 10, 2019
1 parent a0d0486 commit c1daf20
Show file tree
Hide file tree
Showing 6 changed files with 207 additions and 23 deletions.
1 change: 1 addition & 0 deletions vagrant/README.md
Expand Up @@ -30,6 +30,7 @@ Environment variable | Description | Default
`K8S_MASTER_MEMORY` | Memory size for master node | `4096`
`K8S_NODE_MEMORY` | Memory size for worker nodes | `4096`
`K8S_NODES` | Number of worker nodes (except from the master) | `1`
`K8S_MASTER_NODES` | Number of master nodes (Beware: multimaster feature is experimental doesn't work for all option combination) | `1`
`K8S_VERSION` | Kubernetes version to be installed | `1.12.3`
`K8S_DEPLOYMENT_SCENARIO` | Contiv deployment scenario to be used: `nostn` (default) or [`stn`](../docs/setup/SINGLE_NIC_SETUP.md) or [`calico-vpp`](calico-vpp/README.md) | `nostn`
`K8S_DEPLOYMENT_ENV` | Contiv deployment environment to be used: `prod` (production) or `dev` (development) | `prod`
Expand Down
48 changes: 47 additions & 1 deletion vagrant/Vagrantfile
Expand Up @@ -54,6 +54,7 @@ node_os_release = ENV['K8S_NODE_OS_RELEASE'] || '16.04'
node_cpus = ENV['K8S_NODE_CPUS'].to_i == 0 ? 4 : ENV['K8S_NODE_CPUS'].to_i
node_memory = ENV['K8S_NODE_MEMORY'].to_i == 0 ? 4096 : ENV['K8S_NODE_MEMORY'].to_i
num_nodes = ENV['K8S_NODES'].to_i == 0 ? 0 : ENV['K8S_NODES'].to_i
master_nodes = ENV['K8S_MASTER_NODES'].to_i == 0 ? 0 : ENV['K8S_MASTER_NODES'].to_i
master_cpus = ENV['K8S_MASTER_CPUS'].to_i == 0 ? 4 : ENV['K8S_MASTER_CPUS'].to_i
master_memory = ENV['K8S_MASTER_MEMORY'].to_i == 0 ? 4096 : ENV['K8S_MASTER_MEMORY'].to_i
dep_env = ENV['K8S_DEPLOYMENT_ENV']
Expand Down Expand Up @@ -85,13 +86,17 @@ if (ip_version == 'ipv6' && base_ip == '10.20.0.')
base_prefix_len = '64'
end

if ( master_nodes > 1 )
k8s_version = '1.13.4'
end

env = { }
# Do not modify env variables after this step
VARIABLES = ['http_proxy', 'https_proxy', 'k8s_version', 'docker_version',
'node_os', 'node_os_release', 'node_cpus', 'node_memory', 'num_nodes', 'master_cpus',
'master_memory', 'dep_env', 'dep_scenario', 'base_ip', 'base_prefix_len', 'provider',
'image_tag', 'go_version', 'goland_version', 'helm_version', 'crd_disabled',
'ip_version', 'contiv_dir', 'helm_extra_opts']
'ip_version', 'contiv_dir', 'helm_extra_opts', 'master_nodes']

VARIABLES.each do |x|
env[x] = eval x
Expand Down Expand Up @@ -136,6 +141,10 @@ VAGRANTFILE_API_VERSION = "2"

node_ips = num_nodes.times.collect { |n| base_ip + "#{n+10}" }
node_names = num_nodes.times.collect { |n| "k8s-worker#{n+1}" }
m_nodes = master_nodes - 1
master_ips = m_nodes.times.collect { |n| base_ip + "#{n+3}" }
master_names = m_nodes.times.collect { |n| "k8s-master#{n+1}" }

config.ssh.insert_key = false

if Vagrant.has_plugin?("vagrant-cachier")
Expand Down Expand Up @@ -172,6 +181,10 @@ VAGRANTFILE_API_VERSION = "2"
gw.vm.network :private_network, ip: "10.130.1.254", netmask: "255.255.254.0", virtualbox__intnet: "vpp", nic_type: "82540EM"
else
gw.vm.network :private_network, ip: "192.168.16.100", netmask: "255.255.255.0", virtualbox__intnet: "vpp", nic_type: "82540EM"
if master_nodes > 1
# in order to support multi master add additional connect GW to mgmt network
gw.vm.network :private_network, ip: "#{base_ip}100", netmask: "255.255.255.0", virtualbox__intnet: "true"
end
end
else
gw.vm.network :private_network, ip: "192.168.16.100", virtualbox__intnet: "vpp", nic_type: "82540EM"
Expand Down Expand Up @@ -246,6 +259,39 @@ VAGRANTFILE_API_VERSION = "2"
end
end

# Configure backup master node(s)
# BEWARE: currently only ipv4, nostn, crdDisabled setup with k8s 1.13.4 setup supports multi-master setup
m_nodes.times do |n|
node_name = master_names[n]
node_addr = master_ips[n]
config.vm.define node_name do |node|
node.vm.hostname = node_name
node.vm.network :private_network, auto_config: false, virtualbox__intnet: "vpp", nic_type: "82540EM"
node.vm.network :private_network, ip: node_addr, netmask: "#{base_prefix_len}", virtualbox__intnet: "true"

node.vm.provider "virtualbox" do |v|
v.customize ["modifyvm", :id, "--ioapic", "on"]
v.memory = master_memory
v.cpus = master_cpus
v.customize [ "modifyvm", :id, "--uartmode1", "disconnected" ]
end
node.vm.provision "shell" do |s|
s.path = "provision/provision_every_node.sh"
s.env = env
s.args = ["install"]
end
node.vm.provision "shell" do |s|
s.path = "provision/install_requirements_master.sh"
s.env = env
end
node.vm.provision "shell" do |s|
s.path = "provision/bootstrap_master.sh"
s.args = [node_name, node_addr, 'true']
s.env = env
end
end
end

# Configure VBox Worker node(s)
num_nodes.times do |n|
node_name = node_names[n]
Expand Down
1 change: 1 addition & 0 deletions vagrant/defaults
Expand Up @@ -3,6 +3,7 @@ export K8S_NODE_OS_RELEASE=${K8S_NODE_OS_RELEASE:-16.04}
export K8S_NODE_CPUS=${K8S_NODE_CPUS:-4}
export K8S_NODE_MEMORY=${K8S_NODE_MEMORY:-4096}
export K8S_NODES=${K8S_NODES:-1}
export K8S_MASTER_NODES=${K8S_MASTER_NODES:-1}
export IP_VERSION=${IP_VERSION:-ipv4}

export K8S_MASTER_CPUS=${K8S_MASTER_CPUS:-4}
Expand Down
123 changes: 102 additions & 21 deletions vagrant/provision/bootstrap_master.sh
Expand Up @@ -3,6 +3,9 @@ set -ex

echo Args passed: [[ $@ ]]

# variable should be set to true unless the script is executed for the first master
backup_master="$3"

# Pull images if not present
if [ -f /vagrant/images.tar ]; then
echo 'Found saved images at /vagrant/images.tar'
Expand Down Expand Up @@ -43,8 +46,10 @@ fi
# ---> Create token and export it with kube master IP <---
# --------------------------------------------------------

echo "Exporting Kube Master IP and Kubeadm Token..."
echo "export KUBEADM_TOKEN=$(kubeadm token generate)" >> /vagrant/config/init
if [ "$backup_master" != "true" ]; then
echo "Exporting Kube Master IP and Kubeadm Token..."
echo "export KUBEADM_TOKEN=$(kubeadm token generate)" >> /vagrant/config/init
fi

if [ "${dep_scenario}" != 'nostn' ] && [ "${ip_version}" != 'ipv6' ]; then
echo "export KUBE_MASTER_IP=$(hostname -I | cut -f2 -d' ')" >> /vagrant/config/init
Expand Down Expand Up @@ -89,9 +94,71 @@ if [ $split_k8s_version -gt 10 ] ; then
systemctl daemon-reload
systemctl restart kubelet
if [ "${dep_scenario}" != 'calico' ] && [ "${dep_scenario}" != 'calicovpp' ]; then
echo "$(kubeadm init --token-ttl 0 --kubernetes-version=v"${k8s_version}" --pod-network-cidr="${pod_network_cidr}" --apiserver-advertise-address="${KUBE_MASTER_IP}" --service-cidr="${service_cidr}" --token="${KUBEADM_TOKEN}")" >> /vagrant/config/cert
if [ ${master_nodes} -gt 1 ]; then
cat > kubeadm.cfg <<EOF
---
apiVersion: kubeadm.k8s.io/v1beta1
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
token: $KUBEADM_TOKEN
usages:
- signing
- authentication
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: $KUBE_MASTER_IP
bindPort: 6443
nodeRegistration:
criSocket: /var/run/dockershim.sock
name: k8s-master
taints:
- effect: NoSchedule
key: node-role.kubernetes.io/master
---
apiServer:
timeoutForControlPlane: 4m0s
apiVersion: kubeadm.k8s.io/v1beta1
certificatesDir: /etc/kubernetes/pki
clusterName: kubernetes
controlPlaneEndpoint: "10.20.0.100:6443"
controllerManager: {}
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/etcd
imageRepository: k8s.gcr.io
kind: ClusterConfiguration
kubernetesVersion: v1.13.0
networking:
dnsDomain: cluster.local
podSubnet: "$pod_network_cidr"
serviceSubnet: $service_cidr
scheduler: {}
EOF
if [ "$backup_master" != "true" ]; then
echo "$(kubeadm init --config=kubeadm.cfg)" >> /vagrant/config/cert
else

# since master join ignores node-ip arg in kubelet config
# modify default route in order to suggest kubelet choosing the correct IP
ip route del `ip route | grep default`
ip route add default via 10.20.0.100

# copy certificates from the first master node
mkdir -p /etc/kubernetes/pki/etcd
cp /vagrant/certs/* /etc/kubernetes/pki/
mv /etc/kubernetes/pki/etcd-ca.crt /etc/kubernetes/pki/etcd/ca.crt
mv /etc/kubernetes/pki/etcd-ca.key /etc/kubernetes/pki/etcd/ca.key
hash=$(awk 'END {print $NF}' /vagrant/config/cert)
kubeadm join --token "${KUBEADM_TOKEN}" 10.20.0.100:6443 --discovery-token-ca-cert-hash "$hash" --experimental-control-plane
fi
else
echo "$(kubeadm init --token-ttl 0 --kubernetes-version=v"${k8s_version}" --pod-network-cidr="${pod_network_cidr}" --apiserver-advertise-address="${KUBE_MASTER_IP}" --service-cidr="${service_cidr}" --token="${KUBEADM_TOKEN}")" >> /vagrant/config/cert
fi
else
echo "$(kubeadm init --token-ttl 0 --kubernetes-version=v"${k8s_version}" --pod-network-cidr="${pod_network_cidr}" --apiserver-advertise-address="${KUBE_MASTER_IP}" --service-cidr="${service_cidr}" --token="${KUBEADM_TOKEN}")" >> /vagrant/config/cert
echo "$(kubeadm init --token-ttl 0 --kubernetes-version=v"${k8s_version}" --pod-network-cidr="${pod_network_cidr}" --apiserver-advertise-address="${KUBE_MASTER_IP}" --service-cidr="${service_cidr}" --token="${KUBEADM_TOKEN}")" >> /vagrant/config/cert
fi
else
sed -i '4 a Environment="KUBELET_EXTRA_ARGS=--node-ip='"$KUBE_MASTER_IP"' --feature-gates HugePages=false"' /etc/systemd/system/kubelet.service.d/10-kubeadm.conf
Expand All @@ -106,6 +173,18 @@ sudo cp -i /etc/kubernetes/admin.conf /home/vagrant/.kube/config
sudo chown vagrant:vagrant -R /home/vagrant/.kube
sleep 2;

if [ "$backup_master" != "true" ]; then
# copy the certs into shared folder
rm -rf /vagrant/certs
mkdir /vagrant/certs
cp /etc/kubernetes/pki/etcd/ca.crt /vagrant/certs/etcd-ca.crt
cp /etc/kubernetes/pki/etcd/ca.key /vagrant/certs/etcd-ca.key
cp /etc/kubernetes/admin.conf /vagrant/certs/
cp /etc/kubernetes/pki/front-proxy-ca.* /vagrant/certs/
cp /etc/kubernetes/pki/ca.* /vagrant/certs/
cp /etc/kubernetes/pki/sa.* /vagrant/certs/
fi

applySTNScenario() {
gw="10.130.1.254";
if [ "${ip_version}" = "ipv6" ]; then
Expand Down Expand Up @@ -300,21 +379,23 @@ applyCalicoVPPNetwork() {
done
}

stn_config=""
export stn_config
applySTNScenario

if [ "${dep_scenario}" == 'calico' ]; then
export -f applyCalicoNetwork
su vagrant -c "bash -c applyCalicoNetwork"
elif [ "${dep_scenario}" == 'calicovpp' ]; then
export stn_config="${stn_config} --set contiv.useNoOverlay=true --set contiv.ipamConfig.useExternalIPAM=true --set contiv.ipamConfig.podSubnetCIDR=10.10.0.0/16 --set vswitch.useNodeAffinity=true"
export -f applyVPPnetwork
su vagrant -c "bash -c applyVPPnetwork"
export -f applyCalicoVPPNetwork
su vagrant -c "bash -c applyCalicoVPPNetwork"
else
# nostn / stn
export -f applyVPPnetwork
su vagrant -c "bash -c applyVPPnetwork"
if [ "$backup_master" != "true" ]; then
stn_config=""
export stn_config
applySTNScenario

if [ "${dep_scenario}" == 'calico' ]; then
export -f applyCalicoNetwork
su vagrant -c "bash -c applyCalicoNetwork"
elif [ "${dep_scenario}" == 'calicovpp' ]; then
export stn_config="${stn_config} --set contiv.useNoOverlay=true --set contiv.ipamConfig.useExternalIPAM=true --set contiv.ipamConfig.podSubnetCIDR=10.10.0.0/16 --set vswitch.useNodeAffinity=true"
export -f applyVPPnetwork
su vagrant -c "bash -c applyVPPnetwork"
export -f applyCalicoVPPNetwork
su vagrant -c "bash -c applyCalicoVPPNetwork"
else
# nostn / stn
export -f applyVPPnetwork
su vagrant -c "bash -c applyVPPnetwork"
fi
fi
2 changes: 1 addition & 1 deletion vagrant/provision/provision_every_node.sh
Expand Up @@ -41,7 +41,7 @@ if [ "$1" = "install" ];then
echo "Installing Kubernetes Components..."
sudo -E apt-get install -qy kubelet=${k8s_version}-00 \
kubectl=${k8s_version}-00 \
kubeadm=${k8s_version}-00
kubeadm=${k8s_version}-00 kubernetes-cni=0.6.0-00

echo "Installing Docker..."
if [ "${node_os_release}" == "16.04" ] ; then
Expand Down
55 changes: 55 additions & 0 deletions vagrant/provision/provision_gateway.sh
Expand Up @@ -117,4 +117,59 @@ EOL
sudo /vagrant/bird/run.sh
fi

if [ "${master_nodes}" -gt 1 ] ; then
echo "Installing HAproxy"
wget http://www.haproxy.org/download/1.9/src/haproxy-1.9.6.tar.gz
tar -xzf haproxy-1.9.6.tar.gz
cd haproxy-1.9.6
make TARGET=generic
make install
mkdir /etc/haproxy

cat > /etc/haproxy/haproxy.cfg <<EOF
frontend k8s-api
bind 10.20.0.100:6443
bind 127.0.0.1:6443
mode tcp
option tcplog
default_backend k8s-api
backend k8s-api
mode tcp
option tcplog
option tcp-check
balance roundrobin
default-server inter 10s downinter 5s rise 2 fall 2 slowstart 60s maxconn 250 maxqueue 256 weight 100
EOF

counter=1
until ((counter > "${master_nodes}"))
do
ip=$(( counter + 1 ))
echo " server apiserver$counter 10.20.0.$ip:6443 check">> /etc/haproxy/haproxy.cfg
((counter++))
done

echo "Configuring haproxy service"
sudo tee /etc/systemd/system/haproxy.service << EOF
[Unit]
Description=HA proxy for k8s
[Service]
ExecStart=/root/haproxy.sh
[Install]
WantedBy=default.target
EOF

sudo tee /root/haproxy.sh << EOF
#!/bin/bash
haproxy -f /etc/haproxy/haproxy.cfg
EOF

sudo chmod a+x /root/haproxy.sh

sudo systemctl start haproxy.service
sudo systemctl enable haproxy.service

fi

fi # end of ipv4 case

0 comments on commit c1daf20

Please sign in to comment.