diff --git a/docs/vsphere.md b/docs/vsphere.md index 18c28cdfc..27fdd5c47 100644 --- a/docs/vsphere.md +++ b/docs/vsphere.md @@ -1,7 +1,9 @@ -# VMWware vSphere +# VMware vSphere To use the machine-controller to create machines on VMWare vsphere, you must first -create a template. Currently Ubuntu and Container Linux are supported. +create a template. + +Ubuntu & CoreOS: 1. Go into the VSphere WebUI, select your datacenter, right click onto it and choose "Deploy OVF Template" 2. Fill in the "URL" field with the appropriate url: @@ -12,3 +14,10 @@ create a template. Currently Ubuntu and Container Linux are supported. 5. Select the same network you want to use for your machines 6. Leave everyhting in the "Customize Template" and "Ready to complete" dialog as it is 7. Wait until the VM got fully imported and the "Snapshots" => "Create Snapshot" button is not grayed out anymore + +CentOS: + +1. Download the CentOS cloud image to your local workstation from here: `https://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud.qcow2` +1. Convert it to vmdk: `qemu-img convert -f qcow2 -O vmdk CentOS-7-x86_64-GenericCloud.qcow2 CentOS-7-x86_64-GenericCloud.vmdk` +1. Upload it to a Datastore of your Vsphere installation +1. Create a new virtual machine that uses the uploaded vmdk as rootdisk diff --git a/pkg/userdata/centos/testdata/kubelet-v1.10-aws.golden b/pkg/userdata/centos/testdata/kubelet-v1.10-aws.golden index a5bfa5566..0bc6e136e 100644 --- a/pkg/userdata/centos/testdata/kubelet-v1.10-aws.golden +++ b/pkg/userdata/centos/testdata/kubelet-v1.10-aws.golden @@ -51,10 +51,14 @@ write_files: setenforce 0 || true - # As we added some modules and don't want to reboot, restart the service + # As we added some modules and don't want to reboot, restart the service systemctl restart systemd-modules-load.service sysctl --system + # The normal way of setting it via cloud-init is broken: + # https://bugs.launchpad.net/cloud-init/+bug/1662542 + hostnamectl set-hostname node1 + yum install -y docker-1.13.1 \ ebtables \ ethtool \ @@ -87,8 +91,6 @@ write_files: curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/8b5b66e4910a6228dfaecccaa0a3b05ec4902f8e/pkg/userdata/scripts/health-monitor.sh chmod +x /opt/bin/health-monitor.sh fi - - systemctl enable --now docker systemctl enable --now kubelet systemctl enable --now --no-block kubelet-healthcheck.service diff --git a/pkg/userdata/centos/testdata/kubelet-v1.11-aws.golden b/pkg/userdata/centos/testdata/kubelet-v1.11-aws.golden index 47f0f564a..72e512608 100644 --- a/pkg/userdata/centos/testdata/kubelet-v1.11-aws.golden +++ b/pkg/userdata/centos/testdata/kubelet-v1.11-aws.golden @@ -51,10 +51,14 @@ write_files: setenforce 0 || true - # As we added some modules and don't want to reboot, restart the service + # As we added some modules and don't want to reboot, restart the service systemctl restart systemd-modules-load.service sysctl --system + # The normal way of setting it via cloud-init is broken: + # https://bugs.launchpad.net/cloud-init/+bug/1662542 + hostnamectl set-hostname node1 + yum install -y docker-1.13.1 \ ebtables \ ethtool \ @@ -87,8 +91,6 @@ write_files: curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/8b5b66e4910a6228dfaecccaa0a3b05ec4902f8e/pkg/userdata/scripts/health-monitor.sh chmod +x /opt/bin/health-monitor.sh fi - - systemctl enable --now docker systemctl enable --now kubelet systemctl enable --now --no-block kubelet-healthcheck.service diff --git a/pkg/userdata/centos/testdata/kubelet-v1.12-aws.golden b/pkg/userdata/centos/testdata/kubelet-v1.12-aws.golden index b739f1d85..b3fb93a39 100644 --- a/pkg/userdata/centos/testdata/kubelet-v1.12-aws.golden +++ b/pkg/userdata/centos/testdata/kubelet-v1.12-aws.golden @@ -51,10 +51,14 @@ write_files: setenforce 0 || true - # As we added some modules and don't want to reboot, restart the service + # As we added some modules and don't want to reboot, restart the service systemctl restart systemd-modules-load.service sysctl --system + # The normal way of setting it via cloud-init is broken: + # https://bugs.launchpad.net/cloud-init/+bug/1662542 + hostnamectl set-hostname node1 + yum install -y docker-1.13.1 \ ebtables \ ethtool \ @@ -87,8 +91,6 @@ write_files: curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/8b5b66e4910a6228dfaecccaa0a3b05ec4902f8e/pkg/userdata/scripts/health-monitor.sh chmod +x /opt/bin/health-monitor.sh fi - - systemctl enable --now docker systemctl enable --now kubelet systemctl enable --now --no-block kubelet-healthcheck.service diff --git a/pkg/userdata/centos/testdata/kubelet-v1.12-vsphere.golden b/pkg/userdata/centos/testdata/kubelet-v1.12-vsphere.golden new file mode 100644 index 000000000..8e4e2ad49 --- /dev/null +++ b/pkg/userdata/centos/testdata/kubelet-v1.12-vsphere.golden @@ -0,0 +1,259 @@ +#cloud-config +hostname: node1 + +ssh_pwauth: no + +write_files: +- path: "/etc/systemd/journald.conf.d/max_disk_use.conf" + content: | + [Journal] + SystemMaxUse=5G + + +- path: "/etc/modules-load.d/k8s.conf" + content: | + ip_vs + ip_vs_rr + ip_vs_wrr + ip_vs_sh + nf_conntrack_ipv4 + + +- path: "/etc/sysctl.d/k8s.conf" + content: | + net.bridge.bridge-nf-call-ip6tables = 1 + net.bridge.bridge-nf-call-iptables = 1 + kernel.panic_on_oops = 1 + kernel.panic = 10 + net.ipv4.ip_forward = 1 + vm.overcommit_memory = 1 + + +- path: /etc/sysconfig/selinux + content: | + # This file controls the state of SELinux on the system. + # SELINUX= can take one of these three values: + # enforcing - SELinux security policy is enforced. + # permissive - SELinux prints warnings instead of enforcing. + # disabled - No SELinux policy is loaded. + SELINUX=permissive + # SELINUXTYPE= can take one of three two values: + # targeted - Targeted processes are protected, + # minimum - Modification of targeted policy. Only selected processes are protected. + # mls - Multi Level Security protection. + SELINUXTYPE=targeted + +- path: "/opt/bin/setup" + permissions: "0777" + content: | + #!/bin/bash + set -xeuo pipefail + + setenforce 0 || true + + # As we added some modules and don't want to reboot, restart the service + systemctl restart systemd-modules-load.service + sysctl --system + + # The normal way of setting it via cloud-init is broken: + # https://bugs.launchpad.net/cloud-init/+bug/1662542 + hostnamectl set-hostname node1 + + yum install -y docker-1.13.1 \ + ebtables \ + ethtool \ + nfs-utils \ + bash-completion \ + sudo \ + socat \ + wget \ + curl \ + ipvsadm \ + open-vm-tools + + #setup some common directories + mkdir -p /opt/bin/ + mkdir -p /var/lib/calico + mkdir -p /etc/kubernetes/manifests + mkdir -p /etc/cni/net.d + mkdir -p /opt/cni/bin + + # cni + if [ ! -f /opt/cni/bin/loopback ]; then + curl -L https://github.com/containernetworking/plugins/releases/download/v0.6.0/cni-plugins-amd64-v0.6.0.tgz | tar -xvzC /opt/cni/bin -f - + fi + # kubelet + if [ ! -f /opt/bin/kubelet ]; then + curl -Lfo /opt/bin/kubelet https://storage.googleapis.com/kubernetes-release/release/v1.12.0/bin/linux/amd64/kubelet + chmod +x /opt/bin/kubelet + fi + + if [[ ! -x /opt/bin/health-monitor.sh ]]; then + curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/8b5b66e4910a6228dfaecccaa0a3b05ec4902f8e/pkg/userdata/scripts/health-monitor.sh + chmod +x /opt/bin/health-monitor.sh + fi + + systemctl enable --now vmtoolsd.service + systemctl enable --now docker + systemctl enable --now kubelet + systemctl enable --now --no-block kubelet-healthcheck.service + systemctl enable --now --no-block docker-healthcheck.service + +- path: "/opt/bin/supervise.sh" + permissions: "0755" + content: | + #!/bin/bash + set -xeuo pipefail + while ! "$@"; do + sleep 1 + done + +- path: "/etc/systemd/system/kubelet.service" + content: | + [Unit] + After=docker.service + Requires=docker.service + + Description=kubelet: The Kubernetes Node Agent + Documentation=https://kubernetes.io/docs/home/ + + [Service] + Restart=always + StartLimitInterval=0 + RestartSec=10 + + Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" + + ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ + --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ + --kubeconfig=/etc/kubernetes/kubelet.conf \ + --pod-manifest-path=/etc/kubernetes/manifests \ + --allow-privileged=true \ + --network-plugin=cni \ + --cni-conf-dir=/etc/cni/net.d \ + --cni-bin-dir=/opt/cni/bin \ + --authorization-mode=Webhook \ + --client-ca-file=/etc/kubernetes/pki/ca.crt \ + --rotate-certificates=true \ + --cert-dir=/etc/kubernetes/pki \ + --authentication-token-webhook=true \ + --cloud-provider=vsphere \ + --cloud-config=/etc/kubernetes/cloud-config \ + --hostname-override=node1 \ + --read-only-port=0 \ + --exit-on-lock-contention \ + --lock-file=/tmp/kubelet.lock \ + --anonymous-auth=false \ + --protect-kernel-defaults=true \ + --cluster-dns= \ + --cluster-domain=cluster.local + + [Install] + WantedBy=multi-user.target + +- path: "/etc/systemd/system/kubelet.service.d/extras.conf" + content: | + [Service] + Environment="KUBELET_EXTRA_ARGS=--cgroup-driver=systemd" + +- path: "/etc/kubernetes/cloud-config" + content: | + {config:true} + +- path: "/etc/kubernetes/bootstrap-kubelet.conf" + content: | + apiVersion: v1 + clusters: + - cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t + server: https://server:443 + name: "" + contexts: [] + current-context: "" + kind: Config + preferences: {} + users: + - name: "" + user: + token: my-token + + +- path: "/etc/kubernetes/pki/ca.crt" + content: | + -----BEGIN CERTIFICATE----- + MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV + BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG + A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 + DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 + NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG + cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv + c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B + AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS + R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT + ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk + JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 + mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW + caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G + A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt + hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB + MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES + MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv + bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h + U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao + eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 + UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD + 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n + sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF + kPe6XoSbiLm/kxk32T0= + -----END CERTIFICATE----- + +- path: "/etc/systemd/system/setup.service" + permissions: "0644" + content: | + [Install] + WantedBy=multi-user.target + + [Unit] + Requires=network-online.target + After=network-online.target + + [Service] + Type=oneshot + RemainAfterExit=true + ExecStart=/opt/bin/supervise.sh /opt/bin/setup + +- path: "/etc/profile.d/opt-bin-path.sh" + permissions: "0644" + content: | + export PATH="/opt/bin:$PATH" + +- path: /etc/systemd/system/kubelet-healthcheck.service + permissions: "0644" + content: | + [Unit] + Requires=kubelet.service + After=kubelet.service + + [Service] + ExecStart=/opt/bin/health-monitor.sh kubelet + + [Install] + WantedBy=multi-user.target + + +- path: /etc/systemd/system/docker-healthcheck.service + permissions: "0644" + content: | + [Unit] + Requires=docker.service + After=docker.service + + [Service] + ExecStart=/opt/bin/health-monitor.sh container-runtime + + [Install] + WantedBy=multi-user.target + + +runcmd: +- systemctl enable --now setup.service diff --git a/pkg/userdata/centos/testdata/kubelet-v1.9-aws.golden b/pkg/userdata/centos/testdata/kubelet-v1.9-aws.golden index 118b5435c..bfaa3c832 100644 --- a/pkg/userdata/centos/testdata/kubelet-v1.9-aws.golden +++ b/pkg/userdata/centos/testdata/kubelet-v1.9-aws.golden @@ -51,10 +51,14 @@ write_files: setenforce 0 || true - # As we added some modules and don't want to reboot, restart the service + # As we added some modules and don't want to reboot, restart the service systemctl restart systemd-modules-load.service sysctl --system + # The normal way of setting it via cloud-init is broken: + # https://bugs.launchpad.net/cloud-init/+bug/1662542 + hostnamectl set-hostname node1 + yum install -y docker-1.13.1 \ ebtables \ ethtool \ @@ -87,8 +91,6 @@ write_files: curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/8b5b66e4910a6228dfaecccaa0a3b05ec4902f8e/pkg/userdata/scripts/health-monitor.sh chmod +x /opt/bin/health-monitor.sh fi - - systemctl enable --now docker systemctl enable --now kubelet systemctl enable --now --no-block kubelet-healthcheck.service diff --git a/pkg/userdata/centos/userdata.go b/pkg/userdata/centos/userdata.go index 20ffef00d..d2c011edd 100644 --- a/pkg/userdata/centos/userdata.go +++ b/pkg/userdata/centos/userdata.go @@ -177,10 +177,14 @@ write_files: setenforce 0 || true - # As we added some modules and don't want to reboot, restart the service + # As we added some modules and don't want to reboot, restart the service systemctl restart systemd-modules-load.service sysctl --system + # The normal way of setting it via cloud-init is broken: + # https://bugs.launchpad.net/cloud-init/+bug/1662542 + hostnamectl set-hostname {{ .MachineSpec.Name }} + yum install -y docker-1.13.1 \ ebtables \ ethtool \ @@ -190,10 +194,14 @@ write_files: socat \ wget \ curl \ - ipvsadm + ipvsadm{{ if eq .CloudProvider "vsphere" }} \ + open-vm-tools{{ end }} {{ downloadBinariesScript .KubeletVersion true | indent 4 }} + {{- if eq .CloudProvider "vsphere" }} + systemctl enable --now vmtoolsd.service + {{ end -}} systemctl enable --now docker systemctl enable --now kubelet systemctl enable --now --no-block kubelet-healthcheck.service diff --git a/pkg/userdata/centos/userdata_test.go b/pkg/userdata/centos/userdata_test.go index a2e79e5b7..d42e92756 100644 --- a/pkg/userdata/centos/userdata_test.go +++ b/pkg/userdata/centos/userdata_test.go @@ -58,9 +58,10 @@ func TestUserDataGeneration(t *testing.T) { t.Parallel() tests := []struct { - name string - spec clusterv1alpha1.MachineSpec - clusterDNSIPs []net.IP + name string + spec clusterv1alpha1.MachineSpec + clusterDNSIPs []net.IP + cloudProviderName *string }{ { name: "kubelet-v1.9-aws", @@ -98,9 +99,19 @@ func TestUserDataGeneration(t *testing.T) { }, }, }, + { + name: "kubelet-v1.12-vsphere", + spec: clusterv1alpha1.MachineSpec{ + ObjectMeta: metav1.ObjectMeta{Name: "node1"}, + Versions: clusterv1alpha1.MachineVersionInfo{ + Kubelet: "1.12.0", + }, + }, + cloudProviderName: stringPtr("vsphere"), + }, } - cloudProvider := &fakeCloudConfigProvider{name: "aws", config: "{aws-config:true}", err: nil} + defaultCloudProvider := &fakeCloudConfigProvider{name: "aws", config: "{aws-config:true}", err: nil} kubeconfig := &clientcmdapi.Config{Clusters: map[string]*clientcmdapi.Cluster{ "": &clientcmdapi.Cluster{Server: "https://server:443", CertificateAuthorityData: []byte(pemCertificate)}}, AuthInfos: map[string]*clientcmdapi.AuthInfo{"": &clientcmdapi.AuthInfo{Token: "my-token"}}} @@ -110,6 +121,12 @@ func TestUserDataGeneration(t *testing.T) { emtpyProviderConfig := clusterv1alpha1.ProviderConfig{ Value: &runtime.RawExtension{}} test.spec.ProviderConfig = emtpyProviderConfig + var cloudProvider *fakeCloudConfigProvider + if test.cloudProviderName != nil { + cloudProvider = &fakeCloudConfigProvider{name: *test.cloudProviderName, config: "{config:true}", err: nil} + } else { + cloudProvider = defaultCloudProvider + } userdata, err := provider.UserData(test.spec, kubeconfig, cloudProvider, test.clusterDNSIPs) if err != nil { @@ -119,3 +136,7 @@ func TestUserDataGeneration(t *testing.T) { testhelper.CompareOutput(t, test.name, userdata, *update) } } + +func stringPtr(a string) *string { + return &a +} diff --git a/pkg/userdata/ubuntu/testdata/dist-upgrade-on-boot.golden b/pkg/userdata/ubuntu/testdata/dist-upgrade-on-boot.golden index b424ef78f..902db46ef 100644 --- a/pkg/userdata/ubuntu/testdata/dist-upgrade-on-boot.golden +++ b/pkg/userdata/ubuntu/testdata/dist-upgrade-on-boot.golden @@ -108,10 +108,10 @@ write_files: #!/bin/bash set -xeuo pipefail - # As we added some modules and don't want to reboot, restart the service + # As we added some modules and don't want to reboot, restart the service systemctl restart systemd-modules-load.service sysctl --system - + apt-key add /opt/docker.asc apt-get update @@ -138,7 +138,6 @@ write_files: socat \ util-linux \ ${CR_PKG} \ - open-vm-tools \ ipvsadm # If something failed during package installation but docker got installed, we need to put it on hold @@ -173,7 +172,7 @@ write_files: systemctl enable --now docker - systemctl enable --now kubelet + systemctl enable --now kubelet systemctl enable --now --no-block kubelet-healthcheck.service systemctl enable --now --no-block docker-healthcheck.service diff --git a/pkg/userdata/ubuntu/testdata/kubelet-version-without-v-prefix.golden b/pkg/userdata/ubuntu/testdata/kubelet-version-without-v-prefix.golden index 69439fc7b..cdbb8ee80 100644 --- a/pkg/userdata/ubuntu/testdata/kubelet-version-without-v-prefix.golden +++ b/pkg/userdata/ubuntu/testdata/kubelet-version-without-v-prefix.golden @@ -108,10 +108,10 @@ write_files: #!/bin/bash set -xeuo pipefail - # As we added some modules and don't want to reboot, restart the service + # As we added some modules and don't want to reboot, restart the service systemctl restart systemd-modules-load.service sysctl --system - + apt-key add /opt/docker.asc apt-get update @@ -138,7 +138,6 @@ write_files: socat \ util-linux \ ${CR_PKG} \ - open-vm-tools \ ipvsadm # If something failed during package installation but docker got installed, we need to put it on hold @@ -172,7 +171,7 @@ write_files: systemctl enable --now docker - systemctl enable --now kubelet + systemctl enable --now kubelet systemctl enable --now --no-block kubelet-healthcheck.service systemctl enable --now --no-block docker-healthcheck.service diff --git a/pkg/userdata/ubuntu/testdata/multiple-dns-servers.golden b/pkg/userdata/ubuntu/testdata/multiple-dns-servers.golden index fa4ce22b0..4a5d7780e 100644 --- a/pkg/userdata/ubuntu/testdata/multiple-dns-servers.golden +++ b/pkg/userdata/ubuntu/testdata/multiple-dns-servers.golden @@ -108,10 +108,10 @@ write_files: #!/bin/bash set -xeuo pipefail - # As we added some modules and don't want to reboot, restart the service + # As we added some modules and don't want to reboot, restart the service systemctl restart systemd-modules-load.service sysctl --system - + apt-key add /opt/docker.asc apt-get update @@ -138,7 +138,6 @@ write_files: socat \ util-linux \ ${CR_PKG} \ - open-vm-tools \ ipvsadm # If something failed during package installation but docker got installed, we need to put it on hold @@ -172,7 +171,7 @@ write_files: systemctl enable --now docker - systemctl enable --now kubelet + systemctl enable --now kubelet systemctl enable --now --no-block kubelet-healthcheck.service systemctl enable --now --no-block docker-healthcheck.service diff --git a/pkg/userdata/ubuntu/testdata/multiple-ssh-keys.golden b/pkg/userdata/ubuntu/testdata/multiple-ssh-keys.golden index 41578f254..986f1c387 100644 --- a/pkg/userdata/ubuntu/testdata/multiple-ssh-keys.golden +++ b/pkg/userdata/ubuntu/testdata/multiple-ssh-keys.golden @@ -110,10 +110,10 @@ write_files: #!/bin/bash set -xeuo pipefail - # As we added some modules and don't want to reboot, restart the service + # As we added some modules and don't want to reboot, restart the service systemctl restart systemd-modules-load.service sysctl --system - + apt-key add /opt/docker.asc apt-get update @@ -140,7 +140,6 @@ write_files: socat \ util-linux \ ${CR_PKG} \ - open-vm-tools \ ipvsadm # If something failed during package installation but docker got installed, we need to put it on hold @@ -174,7 +173,7 @@ write_files: systemctl enable --now docker - systemctl enable --now kubelet + systemctl enable --now kubelet systemctl enable --now --no-block kubelet-healthcheck.service systemctl enable --now --no-block docker-healthcheck.service diff --git a/pkg/userdata/ubuntu/testdata/openstack-overwrite-cloud-config.golden b/pkg/userdata/ubuntu/testdata/openstack-overwrite-cloud-config.golden index 33ed8819d..9bb5dc95b 100644 --- a/pkg/userdata/ubuntu/testdata/openstack-overwrite-cloud-config.golden +++ b/pkg/userdata/ubuntu/testdata/openstack-overwrite-cloud-config.golden @@ -108,10 +108,10 @@ write_files: #!/bin/bash set -xeuo pipefail - # As we added some modules and don't want to reboot, restart the service + # As we added some modules and don't want to reboot, restart the service systemctl restart systemd-modules-load.service sysctl --system - + apt-key add /opt/docker.asc apt-get update @@ -138,7 +138,6 @@ write_files: socat \ util-linux \ ${CR_PKG} \ - open-vm-tools \ ipvsadm # If something failed during package installation but docker got installed, we need to put it on hold @@ -172,7 +171,7 @@ write_files: systemctl enable --now docker - systemctl enable --now kubelet + systemctl enable --now kubelet systemctl enable --now --no-block kubelet-healthcheck.service systemctl enable --now --no-block docker-healthcheck.service diff --git a/pkg/userdata/ubuntu/testdata/openstack.golden b/pkg/userdata/ubuntu/testdata/openstack.golden index 747dc26eb..3b88048e8 100644 --- a/pkg/userdata/ubuntu/testdata/openstack.golden +++ b/pkg/userdata/ubuntu/testdata/openstack.golden @@ -108,10 +108,10 @@ write_files: #!/bin/bash set -xeuo pipefail - # As we added some modules and don't want to reboot, restart the service + # As we added some modules and don't want to reboot, restart the service systemctl restart systemd-modules-load.service sysctl --system - + apt-key add /opt/docker.asc apt-get update @@ -138,7 +138,6 @@ write_files: socat \ util-linux \ ${CR_PKG} \ - open-vm-tools \ ipvsadm # If something failed during package installation but docker got installed, we need to put it on hold @@ -172,7 +171,7 @@ write_files: systemctl enable --now docker - systemctl enable --now kubelet + systemctl enable --now kubelet systemctl enable --now --no-block kubelet-healthcheck.service systemctl enable --now --no-block docker-healthcheck.service diff --git a/pkg/userdata/ubuntu/testdata/version-1.10.10.golden b/pkg/userdata/ubuntu/testdata/version-1.10.10.golden index fdfe9827e..d0a67e430 100644 --- a/pkg/userdata/ubuntu/testdata/version-1.10.10.golden +++ b/pkg/userdata/ubuntu/testdata/version-1.10.10.golden @@ -108,10 +108,10 @@ write_files: #!/bin/bash set -xeuo pipefail - # As we added some modules and don't want to reboot, restart the service + # As we added some modules and don't want to reboot, restart the service systemctl restart systemd-modules-load.service sysctl --system - + apt-key add /opt/docker.asc apt-get update @@ -138,7 +138,6 @@ write_files: socat \ util-linux \ ${CR_PKG} \ - open-vm-tools \ ipvsadm # If something failed during package installation but docker got installed, we need to put it on hold @@ -172,7 +171,7 @@ write_files: systemctl enable --now docker - systemctl enable --now kubelet + systemctl enable --now kubelet systemctl enable --now --no-block kubelet-healthcheck.service systemctl enable --now --no-block docker-healthcheck.service diff --git a/pkg/userdata/ubuntu/testdata/version-1.11.3.golden b/pkg/userdata/ubuntu/testdata/version-1.11.3.golden index 69439fc7b..cdbb8ee80 100644 --- a/pkg/userdata/ubuntu/testdata/version-1.11.3.golden +++ b/pkg/userdata/ubuntu/testdata/version-1.11.3.golden @@ -108,10 +108,10 @@ write_files: #!/bin/bash set -xeuo pipefail - # As we added some modules and don't want to reboot, restart the service + # As we added some modules and don't want to reboot, restart the service systemctl restart systemd-modules-load.service sysctl --system - + apt-key add /opt/docker.asc apt-get update @@ -138,7 +138,6 @@ write_files: socat \ util-linux \ ${CR_PKG} \ - open-vm-tools \ ipvsadm # If something failed during package installation but docker got installed, we need to put it on hold @@ -172,7 +171,7 @@ write_files: systemctl enable --now docker - systemctl enable --now kubelet + systemctl enable --now kubelet systemctl enable --now --no-block kubelet-healthcheck.service systemctl enable --now --no-block docker-healthcheck.service diff --git a/pkg/userdata/ubuntu/testdata/version-1.12.1.golden b/pkg/userdata/ubuntu/testdata/version-1.12.1.golden index e8395348e..d2729a8be 100644 --- a/pkg/userdata/ubuntu/testdata/version-1.12.1.golden +++ b/pkg/userdata/ubuntu/testdata/version-1.12.1.golden @@ -108,10 +108,10 @@ write_files: #!/bin/bash set -xeuo pipefail - # As we added some modules and don't want to reboot, restart the service + # As we added some modules and don't want to reboot, restart the service systemctl restart systemd-modules-load.service sysctl --system - + apt-key add /opt/docker.asc apt-get update @@ -138,7 +138,6 @@ write_files: socat \ util-linux \ ${CR_PKG} \ - open-vm-tools \ ipvsadm # If something failed during package installation but docker got installed, we need to put it on hold @@ -172,7 +171,7 @@ write_files: systemctl enable --now docker - systemctl enable --now kubelet + systemctl enable --now kubelet systemctl enable --now --no-block kubelet-healthcheck.service systemctl enable --now --no-block docker-healthcheck.service diff --git a/pkg/userdata/ubuntu/testdata/version-1.9.10.golden b/pkg/userdata/ubuntu/testdata/version-1.9.10.golden index 319dcf436..e5b4550fd 100644 --- a/pkg/userdata/ubuntu/testdata/version-1.9.10.golden +++ b/pkg/userdata/ubuntu/testdata/version-1.9.10.golden @@ -108,10 +108,10 @@ write_files: #!/bin/bash set -xeuo pipefail - # As we added some modules and don't want to reboot, restart the service + # As we added some modules and don't want to reboot, restart the service systemctl restart systemd-modules-load.service sysctl --system - + apt-key add /opt/docker.asc apt-get update @@ -138,7 +138,6 @@ write_files: socat \ util-linux \ ${CR_PKG} \ - open-vm-tools \ ipvsadm # If something failed during package installation but docker got installed, we need to put it on hold @@ -172,7 +171,7 @@ write_files: systemctl enable --now docker - systemctl enable --now kubelet + systemctl enable --now kubelet systemctl enable --now --no-block kubelet-healthcheck.service systemctl enable --now --no-block docker-healthcheck.service diff --git a/pkg/userdata/ubuntu/testdata/vsphere.golden b/pkg/userdata/ubuntu/testdata/vsphere.golden new file mode 100644 index 000000000..348f8a06c --- /dev/null +++ b/pkg/userdata/ubuntu/testdata/vsphere.golden @@ -0,0 +1,346 @@ +#cloud-config +hostname: node1 + +ssh_pwauth: no + +ssh_authorized_keys: +- "ssh-rsa AAABBB" + +write_files: +- path: "/etc/systemd/journald.conf.d/max_disk_use.conf" + content: | + [Journal] + SystemMaxUse=5G + + +- path: "/etc/modules-load.d/k8s.conf" + content: | + ip_vs + ip_vs_rr + ip_vs_wrr + ip_vs_sh + nf_conntrack_ipv4 + + +- path: "/etc/sysctl.d/k8s.conf" + content: | + net.bridge.bridge-nf-call-ip6tables = 1 + net.bridge.bridge-nf-call-iptables = 1 + kernel.panic_on_oops = 1 + kernel.panic = 10 + net.ipv4.ip_forward = 1 + vm.overcommit_memory = 1 + + +- path: "/etc/apt/sources.list.d/docker.list" + permissions: "0644" + content: deb [arch=amd64] https://download.docker.com/linux/ubuntu bionic stable + +- path: "/opt/docker.asc" + permissions: "0400" + content: | + -----BEGIN PGP PUBLIC KEY BLOCK----- + + mQINBFit2ioBEADhWpZ8/wvZ6hUTiXOwQHXMAlaFHcPH9hAtr4F1y2+OYdbtMuth + lqqwp028AqyY+PRfVMtSYMbjuQuu5byyKR01BbqYhuS3jtqQmljZ/bJvXqnmiVXh + 38UuLa+z077PxyxQhu5BbqntTPQMfiyqEiU+BKbq2WmANUKQf+1AmZY/IruOXbnq + L4C1+gJ8vfmXQt99npCaxEjaNRVYfOS8QcixNzHUYnb6emjlANyEVlZzeqo7XKl7 + UrwV5inawTSzWNvtjEjj4nJL8NsLwscpLPQUhTQ+7BbQXAwAmeHCUTQIvvWXqw0N + cmhh4HgeQscQHYgOJjjDVfoY5MucvglbIgCqfzAHW9jxmRL4qbMZj+b1XoePEtht + ku4bIQN1X5P07fNWzlgaRL5Z4POXDDZTlIQ/El58j9kp4bnWRCJW0lya+f8ocodo + vZZ+Doi+fy4D5ZGrL4XEcIQP/Lv5uFyf+kQtl/94VFYVJOleAv8W92KdgDkhTcTD + G7c0tIkVEKNUq48b3aQ64NOZQW7fVjfoKwEZdOqPE72Pa45jrZzvUFxSpdiNk2tZ + XYukHjlxxEgBdC/J3cMMNRE1F4NCA3ApfV1Y7/hTeOnmDuDYwr9/obA8t016Yljj + q5rdkywPf4JF8mXUW5eCN1vAFHxeg9ZWemhBtQmGxXnw9M+z6hWwc6ahmwARAQAB + tCtEb2NrZXIgUmVsZWFzZSAoQ0UgZGViKSA8ZG9ja2VyQGRvY2tlci5jb20+iQI3 + BBMBCgAhBQJYrefAAhsvBQsJCAcDBRUKCQgLBRYCAwEAAh4BAheAAAoJEI2BgDwO + v82IsskP/iQZo68flDQmNvn8X5XTd6RRaUH33kXYXquT6NkHJciS7E2gTJmqvMqd + tI4mNYHCSEYxI5qrcYV5YqX9P6+Ko+vozo4nseUQLPH/ATQ4qL0Zok+1jkag3Lgk + jonyUf9bwtWxFp05HC3GMHPhhcUSexCxQLQvnFWXD2sWLKivHp2fT8QbRGeZ+d3m + 6fqcd5Fu7pxsqm0EUDK5NL+nPIgYhN+auTrhgzhK1CShfGccM/wfRlei9Utz6p9P + XRKIlWnXtT4qNGZNTN0tR+NLG/6Bqd8OYBaFAUcue/w1VW6JQ2VGYZHnZu9S8LMc + FYBa5Ig9PxwGQOgq6RDKDbV+PqTQT5EFMeR1mrjckk4DQJjbxeMZbiNMG5kGECA8 + g383P3elhn03WGbEEa4MNc3Z4+7c236QI3xWJfNPdUbXRaAwhy/6rTSFbzwKB0Jm + ebwzQfwjQY6f55MiI/RqDCyuPj3r3jyVRkK86pQKBAJwFHyqj9KaKXMZjfVnowLh + 9svIGfNbGHpucATqREvUHuQbNnqkCx8VVhtYkhDb9fEP2xBu5VvHbR+3nfVhMut5 + G34Ct5RS7Jt6LIfFdtcn8CaSas/l1HbiGeRgc70X/9aYx/V/CEJv0lIe8gP6uDoW + FPIZ7d6vH+Vro6xuWEGiuMaiznap2KhZmpkgfupyFmplh0s6knymuQINBFit2ioB + EADneL9S9m4vhU3blaRjVUUyJ7b/qTjcSylvCH5XUE6R2k+ckEZjfAMZPLpO+/tF + M2JIJMD4SifKuS3xck9KtZGCufGmcwiLQRzeHF7vJUKrLD5RTkNi23ydvWZgPjtx + Q+DTT1Zcn7BrQFY6FgnRoUVIxwtdw1bMY/89rsFgS5wwuMESd3Q2RYgb7EOFOpnu + w6da7WakWf4IhnF5nsNYGDVaIHzpiqCl+uTbf1epCjrOlIzkZ3Z3Yk5CM/TiFzPk + z2lLz89cpD8U+NtCsfagWWfjd2U3jDapgH+7nQnCEWpROtzaKHG6lA3pXdix5zG8 + eRc6/0IbUSWvfjKxLLPfNeCS2pCL3IeEI5nothEEYdQH6szpLog79xB9dVnJyKJb + VfxXnseoYqVrRz2VVbUI5Blwm6B40E3eGVfUQWiux54DspyVMMk41Mx7QJ3iynIa + 1N4ZAqVMAEruyXTRTxc9XW0tYhDMA/1GYvz0EmFpm8LzTHA6sFVtPm/ZlNCX6P1X + zJwrv7DSQKD6GGlBQUX+OeEJ8tTkkf8QTJSPUdh8P8YxDFS5EOGAvhhpMBYD42kQ + pqXjEC+XcycTvGI7impgv9PDY1RCC1zkBjKPa120rNhv/hkVk/YhuGoajoHyy4h7 + ZQopdcMtpN2dgmhEegny9JCSwxfQmQ0zK0g7m6SHiKMwjwARAQABiQQ+BBgBCAAJ + BQJYrdoqAhsCAikJEI2BgDwOv82IwV0gBBkBCAAGBQJYrdoqAAoJEH6gqcPyc/zY + 1WAP/2wJ+R0gE6qsce3rjaIz58PJmc8goKrir5hnElWhPgbq7cYIsW5qiFyLhkdp + YcMmhD9mRiPpQn6Ya2w3e3B8zfIVKipbMBnke/ytZ9M7qHmDCcjoiSmwEXN3wKYI + mD9VHONsl/CG1rU9Isw1jtB5g1YxuBA7M/m36XN6x2u+NtNMDB9P56yc4gfsZVES + KA9v+yY2/l45L8d/WUkUi0YXomn6hyBGI7JrBLq0CX37GEYP6O9rrKipfz73XfO7 + JIGzOKZlljb/D9RX/g7nRbCn+3EtH7xnk+TK/50euEKw8SMUg147sJTcpQmv6UzZ + cM4JgL0HbHVCojV4C/plELwMddALOFeYQzTif6sMRPf+3DSj8frbInjChC3yOLy0 + 6br92KFom17EIj2CAcoeq7UPhi2oouYBwPxh5ytdehJkoo+sN7RIWua6P2WSmon5 + U888cSylXC0+ADFdgLX9K2zrDVYUG1vo8CX0vzxFBaHwN6Px26fhIT1/hYUHQR1z + VfNDcyQmXqkOnZvvoMfz/Q0s9BhFJ/zU6AgQbIZE/hm1spsfgvtsD1frZfygXJ9f + irP+MSAI80xHSf91qSRZOj4Pl3ZJNbq4yYxv0b1pkMqeGdjdCYhLU+LZ4wbQmpCk + SVe2prlLureigXtmZfkqevRz7FrIZiu9ky8wnCAPwC7/zmS18rgP/17bOtL4/iIz + QhxAAoAMWVrGyJivSkjhSGx1uCojsWfsTAm11P7jsruIL61ZzMUVE2aM3Pmj5G+W + 9AcZ58Em+1WsVnAXdUR//bMmhyr8wL/G1YO1V3JEJTRdxsSxdYa4deGBBY/Adpsw + 24jxhOJR+lsJpqIUeb999+R8euDhRHG9eFO7DRu6weatUJ6suupoDTRWtr/4yGqe + dKxV3qQhNLSnaAzqW/1nA3iUB4k7kCaKZxhdhDbClf9P37qaRW467BLCVO/coL3y + Vm50dwdrNtKpMBh3ZpbB1uJvgi9mXtyBOMJ3v8RZeDzFiG8HdCtg9RvIt/AIFoHR + H3S+U79NT6i0KPzLImDfs8T7RlpyuMc4Ufs8ggyg9v3Ae6cN3eQyxcK3w0cbBwsh + /nQNfsA6uu+9H7NhbehBMhYnpNZyrHzCmzyXkauwRAqoCbGCNykTRwsur9gS41TQ + M8ssD1jFheOJf3hODnkKU+HKjvMROl1DK7zdmLdNzA1cvtZH/nCC9KPj1z8QC47S + xx+dTZSx4ONAhwbS/LN3PoKtn8LPjY9NP9uDWI+TWYquS2U+KHDrBDlsgozDbs/O + jCxcpDzNmXpWQHEtHU7649OXHP7UeNST1mCUCH5qdank0V1iejF6/CfTFU4MfcrG + YT90qFF93M3v01BbxP+EIY2/9tiIPbrd + =0YYh + -----END PGP PUBLIC KEY BLOCK----- + +- path: "/opt/bin/setup" + permissions: "0755" + content: | + #!/bin/bash + set -xeuo pipefail + + # As we added some modules and don't want to reboot, restart the service + systemctl restart systemd-modules-load.service + sysctl --system + + apt-key add /opt/docker.asc + apt-get update + + # Make sure we always disable swap - Otherwise the kubelet won't start'. + systemctl mask swap.target + swapoff -a + export CR_PKG='docker.io=17.12.1-0ubuntu1' + + DEBIAN_FRONTEND=noninteractive apt-get -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" install -y \ + curl \ + ca-certificates \ + ceph-common \ + cifs-utils \ + conntrack \ + e2fsprogs \ + ebtables \ + ethtool \ + glusterfs-client \ + iptables \ + jq \ + kmod \ + openssh-client \ + nfs-common \ + socat \ + util-linux \ + ${CR_PKG} \ + ipvsadm \ + open-vm-tools + + # If something failed during package installation but docker got installed, we need to put it on hold + apt-mark hold docker.io || true + apt-mark hold docker-ce || true + if [[ -e /var/run/reboot-required ]]; then + reboot + fi + + #setup some common directories + mkdir -p /opt/bin/ + mkdir -p /var/lib/calico + mkdir -p /etc/kubernetes/manifests + mkdir -p /etc/cni/net.d + mkdir -p /opt/cni/bin + + # cni + if [ ! -f /opt/cni/bin/loopback ]; then + curl -L https://github.com/containernetworking/plugins/releases/download/v0.6.0/cni-plugins-amd64-v0.6.0.tgz | tar -xvzC /opt/cni/bin -f - + fi + # kubelet + if [ ! -f /opt/bin/kubelet ]; then + curl -Lfo /opt/bin/kubelet https://storage.googleapis.com/kubernetes-release/release/v1.11.3/bin/linux/amd64/kubelet + chmod +x /opt/bin/kubelet + fi + + if [[ ! -x /opt/bin/health-monitor.sh ]]; then + curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/8b5b66e4910a6228dfaecccaa0a3b05ec4902f8e/pkg/userdata/scripts/health-monitor.sh + chmod +x /opt/bin/health-monitor.sh + fi + + + systemctl enable --now docker + systemctl enable --now kubelet + systemctl enable --now --no-block kubelet-healthcheck.service + systemctl enable --now --no-block docker-healthcheck.service + +- path: "/opt/bin/supervise.sh" + permissions: "0755" + content: | + #!/bin/bash + set -xeuo pipefail + while ! "$@"; do + sleep 1 + done + +- path: "/etc/systemd/system/kubelet.service" + content: | + [Unit] + After=docker.service + Requires=docker.service + + Description=kubelet: The Kubernetes Node Agent + Documentation=https://kubernetes.io/docs/home/ + + [Service] + Restart=always + StartLimitInterval=0 + RestartSec=10 + + Environment="PATH=/opt/bin:/bin:/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin/" + + ExecStart=/opt/bin/kubelet $KUBELET_EXTRA_ARGS \ + --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ + --kubeconfig=/etc/kubernetes/kubelet.conf \ + --pod-manifest-path=/etc/kubernetes/manifests \ + --allow-privileged=true \ + --network-plugin=cni \ + --cni-conf-dir=/etc/cni/net.d \ + --cni-bin-dir=/opt/cni/bin \ + --authorization-mode=Webhook \ + --client-ca-file=/etc/kubernetes/pki/ca.crt \ + --cadvisor-port=0 \ + --rotate-certificates=true \ + --cert-dir=/etc/kubernetes/pki \ + --authentication-token-webhook=true \ + --cloud-provider=vsphere \ + --cloud-config=/etc/kubernetes/cloud-config \ + --hostname-override=node1 \ + --read-only-port=0 \ + --exit-on-lock-contention \ + --lock-file=/tmp/kubelet.lock \ + --anonymous-auth=false \ + --protect-kernel-defaults=true \ + --cluster-dns=10.10.10.10 \ + --cluster-domain=cluster.local + + [Install] + WantedBy=multi-user.target + +- path: "/etc/systemd/system/kubelet.service.d/extras.conf" + content: | + [Service] + Environment="KUBELET_EXTRA_ARGS=--resolv-conf=/run/systemd/resolve/resolv.conf" + +- path: "/etc/kubernetes/cloud-config" + content: | + custom + cloud + config + +- path: "/etc/kubernetes/bootstrap-kubelet.conf" + content: | + apiVersion: v1 + clusters: + - cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t + server: https://server:443 + name: "" + contexts: [] + current-context: "" + kind: Config + preferences: {} + users: + - name: "" + user: + token: my-token + + +- path: "/etc/kubernetes/pki/ca.crt" + content: | + -----BEGIN CERTIFICATE----- + MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV + BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG + A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 + DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 + NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG + cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv + c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B + AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS + R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT + ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk + JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 + mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW + caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G + A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt + hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB + MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES + MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv + bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h + U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao + eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 + UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD + 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n + sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF + kPe6XoSbiLm/kxk32T0= + -----END CERTIFICATE----- + +- path: "/etc/systemd/system/setup.service" + permissions: "0644" + content: | + [Install] + WantedBy=multi-user.target + + [Unit] + Requires=network-online.target + After=network-online.target + + [Service] + Type=oneshot + RemainAfterExit=true + ExecStart=/opt/bin/supervise.sh /opt/bin/setup + +- path: "/etc/profile.d/opt-bin-path.sh" + permissions: "0644" + content: | + export PATH="/opt/bin:$PATH" + +- path: /etc/systemd/system/docker.service.d/10-storage.conf + permissions: "0644" + content: | + [Service] + ExecStart= + ExecStart=/usr/bin/dockerd -H fd:// --storage-driver=overlay2 + +- path: /etc/systemd/system/kubelet-healthcheck.service + permissions: "0644" + content: | + [Unit] + Requires=kubelet.service + After=kubelet.service + + [Service] + ExecStart=/opt/bin/health-monitor.sh kubelet + + [Install] + WantedBy=multi-user.target + + +- path: /etc/systemd/system/docker-healthcheck.service + permissions: "0644" + content: | + [Unit] + Requires=docker.service + After=docker.service + + [Service] + ExecStart=/opt/bin/health-monitor.sh container-runtime + + [Install] + WantedBy=multi-user.target + + +runcmd: +- systemctl enable --now setup.service diff --git a/pkg/userdata/ubuntu/userdata.go b/pkg/userdata/ubuntu/userdata.go index ed2e95956..b09d6e9d5 100644 --- a/pkg/userdata/ubuntu/userdata.go +++ b/pkg/userdata/ubuntu/userdata.go @@ -226,17 +226,17 @@ write_files: #!/bin/bash set -xeuo pipefail - # As we added some modules and don't want to reboot, restart the service + # As we added some modules and don't want to reboot, restart the service systemctl restart systemd-modules-load.service sysctl --system - + apt-key add /opt/docker.asc apt-get update # Make sure we always disable swap - Otherwise the kubelet won't start'. systemctl mask swap.target swapoff -a - + {{- if semverCompare "<1.12.0" .KubeletVersion }} export CR_PKG='docker.io=17.12.1-0ubuntu1' {{- else }} @@ -261,12 +261,12 @@ write_files: socat \ util-linux \ ${CR_PKG} \ - open-vm-tools \ - ipvsadm + ipvsadm{{ if eq .CloudProvider "vsphere" }} \ + open-vm-tools{{ end }} # If something failed during package installation but docker got installed, we need to put it on hold apt-mark hold docker.io || true - apt-mark hold docker-ce || true + apt-mark hold docker-ce || true {{- if .OSConfig.DistUpgradeOnBoot }} DEBIAN_FRONTEND=noninteractive apt-get -o Dpkg::Options::="--force-confdef" -o Dpkg::Options::="--force-confold" dist-upgrade -y @@ -278,7 +278,7 @@ write_files: {{ downloadBinariesScript .KubeletVersion true | indent 4 }} systemctl enable --now docker - systemctl enable --now kubelet + systemctl enable --now kubelet systemctl enable --now --no-block kubelet-healthcheck.service systemctl enable --now --no-block docker-healthcheck.service diff --git a/pkg/userdata/ubuntu/userdata_test.go b/pkg/userdata/ubuntu/userdata_test.go index a98abc6b6..58ae32f10 100644 --- a/pkg/userdata/ubuntu/userdata_test.go +++ b/pkg/userdata/ubuntu/userdata_test.go @@ -218,6 +218,24 @@ func TestProvider_UserData(t *testing.T) { kubernetesCACert: "CACert", osConfig: &Config{DistUpgradeOnBoot: false}, }, + { + name: "vsphere", + providerConfig: &providerconfig.Config{ + CloudProvider: "vsphere", + SSHPublicKeys: []string{"ssh-rsa AAABBB"}, + OverwriteCloudConfig: stringPtr("custom\ncloud\nconfig"), + }, + spec: clusterv1alpha1.MachineSpec{ + ObjectMeta: metav1.ObjectMeta{Name: "node1"}, + Versions: clusterv1alpha1.MachineVersionInfo{ + Kubelet: "v1.11.3", + }, + }, + ccProvider: &fakeCloudConfigProvider{name: "vsphere", config: "{vsphere-config:true}", err: nil}, + DNSIPs: []net.IP{net.ParseIP("10.10.10.10")}, + kubernetesCACert: "CACert", + osConfig: &Config{DistUpgradeOnBoot: false}, + }, }...) for _, test := range tests { diff --git a/test/e2e/provisioning/all_e2e_test.go b/test/e2e/provisioning/all_e2e_test.go index 7067b3104..3d15babfa 100644 --- a/test/e2e/provisioning/all_e2e_test.go +++ b/test/e2e/provisioning/all_e2e_test.go @@ -146,8 +146,7 @@ func TestVsphereProvisioningE2E(t *testing.T) { t.Fatal("unable to run the test suite, VSPHERE_E2E_PASSWORD, VSPHERE_E2E_USERNAME, VSPHERE_E2E_CLUSTER or VSPHERE_E2E_ADDRESS environment variables cannot be empty") } - // Vsphere only supports Ubuntu and CoreOS - excludeSelector := &scenarioSelector{osName: []string{"centos"}} + excludeSelector := &scenarioSelector{} // act params := []string{fmt.Sprintf("<< VSPHERE_PASSWORD >>=%s", vsPassword),