diff --git a/Makefile b/Makefile index 3450cf31e..b6106bcb1 100644 --- a/Makefile +++ b/Makefile @@ -33,7 +33,7 @@ IMAGE_TAG = \ $(shell echo $$(git rev-parse HEAD && if [[ -n $$(git status --porcelain) ]]; then echo '-dirty'; fi)|tr -d ' ') IMAGE_NAME = $(REGISTRY)/$(REGISTRY_NAMESPACE)/machine-controller:$(IMAGE_TAG) -OS = centos coreos ubuntu sles rhel +OS = centos coreos ubuntu sles rhel flatcar USERDATA_BIN = $(patsubst %, machine-controller-userdata-%, $(OS)) .PHONY: all diff --git a/cmd/userdata/flatcar/main.go b/cmd/userdata/flatcar/main.go new file mode 100644 index 000000000..8fa00bd9e --- /dev/null +++ b/cmd/userdata/flatcar/main.go @@ -0,0 +1,47 @@ +/* +Copyright 2019 The Machine Controller Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +// UserData plugin for flatcar. +// + +package main + +import ( + "flag" + + "k8s.io/klog" + + "github.com/kubermatic/machine-controller/pkg/userdata/convert" + "github.com/kubermatic/machine-controller/pkg/userdata/flatcar" + userdataplugin "github.com/kubermatic/machine-controller/pkg/userdata/plugin" +) + +func main() { + // Parse flags. + var debug bool + + flag.BoolVar(&debug, "debug", false, "Switch for enabling the plugin debugging") + flag.Parse() + + // Instantiate provider and start plugin. + var provider = &flatcar.Provider{} + var p = userdataplugin.New(convert.NewIgnition(provider), debug) + + if err := p.Run(); err != nil { + klog.Fatalf("error running flatcar plugin: %v", err) + } +} diff --git a/pkg/cloudprovider/provider/aws/provider.go b/pkg/cloudprovider/provider/aws/provider.go index 49cfff858..7c68bd148 100644 --- a/pkg/cloudprovider/provider/aws/provider.go +++ b/pkg/cloudprovider/provider/aws/provider.go @@ -118,6 +118,12 @@ var ( // The AWS marketplace ID from RedHat owner: "309956199498", }, + providerconfigtypes.OperatingSystemFlatcar: { + // Be as precise as possible - otherwise we might get a nightly dev build + description: "Flatcar Container Linux stable 2345.3.1 (HVM)", + // The AWS marketplace ID from AWS + owner: "075585003325", + }, } // cacheLock protects concurrent cache misses against a single key. This usually happens when multiple machines get created simultaneously @@ -236,6 +242,8 @@ func getDefaultRootDevicePath(os providerconfigtypes.OperatingSystem) (string, e return rootDevicePathCoreOSSLES, nil case providerconfigtypes.OperatingSystemRHEL: return rootDevicePathUbuntuCentOSRHEL, nil + case providerconfigtypes.OperatingSystemFlatcar: + return rootDevicePathCoreOSSLES, nil } return "", fmt.Errorf("no default root path found for %s operating system", os) @@ -488,8 +496,9 @@ func (p *provider) Create(machine *v1alpha1.Machine, data *cloudprovidertypes.Pr } } - if pc.OperatingSystem != providerconfigtypes.OperatingSystemCoreos { - // Gzip the userdata in case we don't use CoreOS. + if pc.OperatingSystem != providerconfigtypes.OperatingSystemCoreos && + pc.OperatingSystem != providerconfigtypes.OperatingSystemFlatcar { + // Gzip the userdata in case we don't use CoreOS and Flatcar userdata, err = convert.GzipString(userdata) if err != nil { return nil, fmt.Errorf("failed to gzip the userdata") diff --git a/pkg/providerconfig/types/types.go b/pkg/providerconfig/types/types.go index 67a28121a..bad23ddc6 100644 --- a/pkg/providerconfig/types/types.go +++ b/pkg/providerconfig/types/types.go @@ -33,11 +33,12 @@ import ( type OperatingSystem string const ( - OperatingSystemCoreos OperatingSystem = "coreos" - OperatingSystemUbuntu OperatingSystem = "ubuntu" - OperatingSystemCentOS OperatingSystem = "centos" - OperatingSystemSLES OperatingSystem = "sles" - OperatingSystemRHEL OperatingSystem = "rhel" + OperatingSystemCoreos OperatingSystem = "coreos" + OperatingSystemUbuntu OperatingSystem = "ubuntu" + OperatingSystemCentOS OperatingSystem = "centos" + OperatingSystemSLES OperatingSystem = "sles" + OperatingSystemRHEL OperatingSystem = "rhel" + OperatingSystemFlatcar OperatingSystem = "flatcar" ) type CloudProvider string @@ -67,6 +68,7 @@ var ( OperatingSystemCentOS, OperatingSystemSLES, OperatingSystemRHEL, + OperatingSystemFlatcar, } // AllCloudProviders is a slice containing all supported cloud providers. diff --git a/pkg/userdata/flatcar/flatcar.go b/pkg/userdata/flatcar/flatcar.go new file mode 100644 index 000000000..f72aae612 --- /dev/null +++ b/pkg/userdata/flatcar/flatcar.go @@ -0,0 +1,54 @@ +/* +Copyright 2019 The Machine Controller Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package flatcar + +import ( + "encoding/json" + + "k8s.io/apimachinery/pkg/runtime" +) + +// Config contains specific configuration for Flatcar. +type Config struct { + DisableAutoUpdate bool `json:"disableAutoUpdate"` + DisableLocksmithD bool `json:"disableLocksmithD"` + DisableUpdateEngine bool `json:"disableUpdateEngine"` +} + +// LoadConfig retrieves the Flatcar configuration from raw data. +func LoadConfig(r runtime.RawExtension) (*Config, error) { + cfg := Config{} + if len(r.Raw) == 0 { + return &cfg, nil + } + if err := json.Unmarshal(r.Raw, &cfg); err != nil { + return nil, err + } + return &cfg, nil +} + +// Spec return the configuration as raw data. +func (cfg *Config) Spec() (*runtime.RawExtension, error) { + ext := &runtime.RawExtension{} + b, err := json.Marshal(cfg) + if err != nil { + return nil, err + } + + ext.Raw = b + return ext, nil +} diff --git a/pkg/userdata/flatcar/provider.go b/pkg/userdata/flatcar/provider.go new file mode 100644 index 000000000..1687e0c1a --- /dev/null +++ b/pkg/userdata/flatcar/provider.go @@ -0,0 +1,377 @@ +/* +Copyright 2019 The Machine Controller Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +// UserData plugin for Flatcar. +// + +package flatcar + +import ( + "bytes" + "fmt" + "strings" + "text/template" + + "github.com/Masterminds/semver" + + "github.com/kubermatic/machine-controller/pkg/apis/plugin" + providerconfigtypes "github.com/kubermatic/machine-controller/pkg/providerconfig/types" + userdatahelper "github.com/kubermatic/machine-controller/pkg/userdata/helper" +) + +// Provider is a pkg/userdata/plugin.Provider implementation. +type Provider struct{} + +// UserData renders user-data template to string. +func (p Provider) UserData(req plugin.UserDataRequest) (string, error) { + + tmpl, err := template.New("user-data").Funcs(userdatahelper.TxtFuncMap()).Parse(userDataTemplate) + if err != nil { + return "", fmt.Errorf("failed to parse user-data template: %v", err) + } + + kubeletVersion, err := semver.NewVersion(req.MachineSpec.Versions.Kubelet) + if err != nil { + return "", fmt.Errorf("invalid kubelet version: %v", err) + } + + pconfig, err := providerconfigtypes.GetConfig(req.MachineSpec.ProviderSpec) + if err != nil { + return "", fmt.Errorf("failed to get provider config: %v", err) + } + + if pconfig.OverwriteCloudConfig != nil { + req.CloudConfig = *pconfig.OverwriteCloudConfig + } + + flatcarConfig, err := LoadConfig(pconfig.OperatingSystemSpec) + if err != nil { + return "", fmt.Errorf("failed to get flatcar config from provider config: %v", err) + } + + kubeconfigString, err := userdatahelper.StringifyKubeconfig(req.Kubeconfig) + if err != nil { + return "", err + } + + kubernetesCACert, err := userdatahelper.GetCACert(req.Kubeconfig) + if err != nil { + return "", fmt.Errorf("error extracting cacert: %v", err) + } + + // We need to reconfigure rkt to allow insecure registries in case the hyperkube image comes from an insecure registry + var insecureHyperkubeImage bool + for _, registry := range req.InsecureRegistries { + if strings.Contains(req.HyperkubeImage, registry) { + insecureHyperkubeImage = true + } + } + + if flatcarConfig.DisableAutoUpdate { + flatcarConfig.DisableLocksmithD = true + flatcarConfig.DisableUpdateEngine = true + } + + data := struct { + plugin.UserDataRequest + ProviderSpec *providerconfigtypes.Config + FlatcarConfig *Config + Kubeconfig string + KubernetesCACert string + KubeletVersion string + InsecureHyperkubeImage bool + }{ + UserDataRequest: req, + ProviderSpec: pconfig, + FlatcarConfig: flatcarConfig, + Kubeconfig: kubeconfigString, + KubernetesCACert: kubernetesCACert, + KubeletVersion: kubeletVersion.String(), + InsecureHyperkubeImage: insecureHyperkubeImage, + } + b := &bytes.Buffer{} + err = tmpl.Execute(b, data) + if err != nil { + return "", fmt.Errorf("failed to execute user-data template: %v", err) + } + return userdatahelper.CleanupTemplateOutput(b.String()) +} + +// UserData template. +const userDataTemplate = `passwd: +{{- if ne (len .ProviderSpec.SSHPublicKeys) 0 }} + users: + - name: core + ssh_authorized_keys: + {{range .ProviderSpec.SSHPublicKeys}}- {{.}} + {{end}} +{{- end }} + +{{- if .ProviderSpec.Network }} +networkd: + units: + - name: static-nic.network + contents: | + [Match] + # Because of difficulty predicting specific NIC names on different cloud providers, + # we only support static addressing on VSphere. There should be a single NIC attached + # that we will match by name prefix 'en' which denotes ethernet devices. + Name=en* + + [Network] + DHCP=no + Address={{ .ProviderSpec.Network.CIDR }} + Gateway={{ .ProviderSpec.Network.Gateway }} + {{range .ProviderSpec.Network.DNS.Servers}}DNS={{.}} + {{end}} +{{- end }} + +systemd: + units: +{{- if .FlatcarConfig.DisableUpdateEngine }} + - name: update-engine.service + mask: true +{{- end }} +{{- if .FlatcarConfig.DisableLocksmithD }} + - name: locksmithd.service + mask: true +{{- end }} + - name: docker.service + enabled: true + +{{- if .HTTPProxy }} + - name: update-engine.service + dropins: + - name: 50-proxy.conf + contents: | + [Service] + Environment=ALL_PROXY={{ .HTTPProxy }} +{{- end }} + + - name: download-healthcheck-script.service + enabled: true + contents: | + [Unit] + Requires=network-online.target + After=network-online.target + [Service] + Type=oneshot + EnvironmentFile=-/etc/environment + ExecStart=/opt/bin/download.sh + [Install] + WantedBy=multi-user.target + + - name: docker-healthcheck.service + enabled: true + dropins: + - name: 40-docker.conf + contents: | + [Unit] + Requires=download-healthcheck-script.service + After=download-healthcheck-script.service + contents: | +{{ containerRuntimeHealthCheckSystemdUnit | indent 10 }} + + - name: kubelet-healthcheck.service + enabled: true + dropins: + - name: 40-docker.conf + contents: | + [Unit] + Requires=download-healthcheck-script.service + After=download-healthcheck-script.service + contents: | +{{ kubeletHealthCheckSystemdUnit | indent 10 }} + + - name: kubelet.service + enabled: true + contents: | + [Unit] + Description=Kubernetes Kubelet + Requires=docker.service + After=docker.service + [Service] + TimeoutStartSec=5min + CPUAccounting=true + MemoryAccounting=true + EnvironmentFile=-/etc/environment +{{- if .HTTPProxy }} + Environment=KUBELET_IMAGE=docker://{{ .HyperkubeImage }}:v{{ .KubeletVersion }} +{{- else }} + Environment=KUBELET_IMAGE=docker://k8s.gcr.io/hyperkube-amd64:v{{ .KubeletVersion }} +{{- end }} + Environment="RKT_RUN_ARGS=--uuid-file-save=/var/cache/kubelet-pod.uuid \ + --inherit-env \ + --insecure-options=image{{if .InsecureHyperkubeImage }},http{{ end }} \ + --volume=resolv,kind=host,source=/etc/resolv.conf \ + --mount volume=resolv,target=/etc/resolv.conf \ + --volume cni-bin,kind=host,source=/opt/cni/bin \ + --mount volume=cni-bin,target=/opt/cni/bin \ + --volume cni-conf,kind=host,source=/etc/cni/net.d \ + --mount volume=cni-conf,target=/etc/cni/net.d \ + --volume etc-kubernetes,kind=host,source=/etc/kubernetes \ + --mount volume=etc-kubernetes,target=/etc/kubernetes \ + --volume var-log,kind=host,source=/var/log \ + --mount volume=var-log,target=/var/log \ + --volume var-lib-calico,kind=host,source=/var/lib/calico \ + --mount volume=var-lib-calico,target=/var/lib/calico" + ExecStartPre=/bin/mkdir -p /var/lib/calico + ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests + ExecStartPre=/bin/mkdir -p /etc/cni/net.d + ExecStartPre=/bin/mkdir -p /opt/cni/bin + ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/cache/kubelet-pod.uuid + ExecStartPre=-/bin/rm -rf /var/lib/rkt/cas/tmp/ + ExecStartPre=/bin/bash /opt/load-kernel-modules.sh + ExecStart=/usr/lib/flatcar/kubelet-wrapper \ +{{ if semverCompare ">=1.17.0" .KubeletVersion }}{{ print " kubelet \\\n" }}{{ end -}} +{{ kubeletFlags .KubeletVersion .CloudProviderName .MachineSpec.Name .DNSIPs .ExternalCloudProvider .PauseImage .MachineSpec.Taints | indent 10 }} + ExecStop=-/usr/bin/rkt stop --uuid-file=/var/cache/kubelet-pod.uuid + Restart=always + RestartSec=10 + [Install] + WantedBy=multi-user.target + + - name: docker.service + enabled: true + dropins: + - name: 10-environment.conf + contents: | + [Service] + EnvironmentFile=-/etc/environment + +storage: + files: +{{- if .HTTPProxy }} + - path: /etc/environment + filesystem: root + mode: 0644 + contents: + inline: | +{{ proxyEnvironment .HTTPProxy .NoProxy | indent 10 }} +{{- end }} + + - path: "/etc/systemd/journald.conf.d/max_disk_use.conf" + filesystem: root + mode: 0644 + contents: + inline: | +{{ journalDConfig | indent 10 }} + + - path: "/etc/kubernetes/kubelet.conf" + filesystem: root + mode: 0644 + contents: + inline: | +{{ kubeletConfiguration "cluster.local" .DNSIPs | indent 10 }} + + - path: /opt/load-kernel-modules.sh + filesystem: root + mode: 0755 + contents: + inline: | +{{ kernelModulesScript | indent 10 }} + + - path: /etc/sysctl.d/k8s.conf + filesystem: root + mode: 0644 + contents: + inline: | +{{ kernelSettings | indent 10 }} + + - path: /proc/sys/kernel/panic_on_oops + filesystem: root + mode: 0644 + contents: + inline: | + 1 + + - path: /proc/sys/kernel/panic + filesystem: root + mode: 0644 + contents: + inline: | + 10 + + - path: /proc/sys/vm/overcommit_memory + filesystem: root + mode: 0644 + contents: + inline: | + 1 + + - path: /etc/kubernetes/bootstrap-kubelet.conf + filesystem: root + mode: 0400 + contents: + inline: | +{{ .Kubeconfig | indent 10 }} + + - path: /etc/kubernetes/cloud-config + filesystem: root + mode: 0400 + contents: + inline: | +{{ .CloudConfig | indent 10 }} + + - path: /etc/kubernetes/pki/ca.crt + filesystem: root + mode: 0644 + contents: + inline: | +{{ .KubernetesCACert | indent 10 }} +{{ if ne .CloudProviderName "aws" }} + - path: /etc/hostname + filesystem: root + mode: 0600 + contents: + inline: '{{ .MachineSpec.Name }}' +{{- end }} + + - path: /etc/ssh/sshd_config + filesystem: root + mode: 0600 + user: + id: 0 + group: + id: 0 + contents: + inline: | + # Use most defaults for sshd configuration. + Subsystem sftp internal-sftp + ClientAliveInterval 180 + UseDNS no + UsePAM yes + PrintLastLog no # handled by PAM + PrintMotd no # handled by PAM + PasswordAuthentication no + ChallengeResponseAuthentication no + + - path: /etc/docker/daemon.json + filesystem: root + mode: 0644 + contents: + inline: | +{{ dockerConfig .InsecureRegistries .RegistryMirrors | indent 10 }} + + - path: /opt/bin/download.sh + filesystem: root + mode: 0755 + contents: + inline: | + #!/bin/bash + set -xeuo pipefail +{{ downloadBinariesScript .KubeletVersion false | indent 10 }}` diff --git a/pkg/userdata/flatcar/provider_test.go b/pkg/userdata/flatcar/provider_test.go new file mode 100644 index 000000000..5f3e6475c --- /dev/null +++ b/pkg/userdata/flatcar/provider_test.go @@ -0,0 +1,500 @@ +/* +Copyright 2019 The Machine Controller Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// +// UserData plugin for CentOS. +// + +package flatcar + +import ( + "encoding/json" + "flag" + "net" + "testing" + + clusterv1alpha1 "github.com/kubermatic/machine-controller/pkg/apis/cluster/v1alpha1" + "github.com/kubermatic/machine-controller/pkg/apis/plugin" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + clientcmdapi "k8s.io/client-go/tools/clientcmd/api" + + providerconfigtypes "github.com/kubermatic/machine-controller/pkg/providerconfig/types" + testhelper "github.com/kubermatic/machine-controller/pkg/test" + "github.com/kubermatic/machine-controller/pkg/userdata/cloud" + "github.com/kubermatic/machine-controller/pkg/userdata/convert" +) + +var ( + update = flag.Bool("update", false, "update testdata files") + + pemCertificate = `-----BEGIN CERTIFICATE----- +MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV +BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG +A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 +DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 +NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG +cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv +c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B +AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS +R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT +ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk +JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 +mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW +caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G +A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt +hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB +MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES +MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv +bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h +U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao +eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 +UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD +58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n +sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF +kPe6XoSbiLm/kxk32T0= +-----END CERTIFICATE-----` + + kubeconfig = &clientcmdapi.Config{ + Clusters: map[string]*clientcmdapi.Cluster{ + "": { + Server: "https://server:443", + CertificateAuthorityData: []byte(pemCertificate), + }, + }, + AuthInfos: map[string]*clientcmdapi.AuthInfo{ + "": { + Token: "my-token", + }, + }, + } +) + +// fakeCloudConfigProvider simulates cloud config provider for test. +type fakeCloudConfigProvider struct { + config string + name string + err error +} + +func (p *fakeCloudConfigProvider) GetCloudConfig(spec clusterv1alpha1.MachineSpec) (config string, name string, err error) { + return p.config, p.name, p.err +} + +// userDataTestCase contains the data for a table-driven test. +type userDataTestCase struct { + name string + spec clusterv1alpha1.MachineSpec + ccProvider cloud.ConfigProvider + osConfig *Config + providerSpec *providerconfigtypes.Config + DNSIPs []net.IP + externalCloudProvider bool + httpProxy string + noProxy string + insecureRegistries []string + registryMirrors []string + pauseImage string + hyperkubeImage string +} + +// TestUserDataGeneration runs the data generation for different +// environments. +func TestUserDataGeneration(t *testing.T) { + t.Parallel() + + tests := []userDataTestCase{ + { + name: "v1.9.2-disable-auto-update-aws", + providerSpec: &providerconfigtypes.Config{ + CloudProvider: "aws", + SSHPublicKeys: []string{"ssh-rsa AAABBB", "ssh-rsa CCCDDD"}, + }, + spec: clusterv1alpha1.MachineSpec{ + ObjectMeta: metav1.ObjectMeta{Name: "node1"}, + Versions: clusterv1alpha1.MachineVersionInfo{ + Kubelet: "1.9.2", + }, + }, + ccProvider: &fakeCloudConfigProvider{ + name: "aws", + config: "{aws-config:true}", + err: nil, + }, + DNSIPs: []net.IP{net.ParseIP("10.10.10.10")}, + osConfig: &Config{ + DisableAutoUpdate: true, + }, + }, + { + name: "v1.9.2-disable-locksmith-aws", + providerSpec: &providerconfigtypes.Config{ + CloudProvider: "aws", + SSHPublicKeys: []string{"ssh-rsa AAABBB", "ssh-rsa CCCDDD"}, + }, + spec: clusterv1alpha1.MachineSpec{ + ObjectMeta: metav1.ObjectMeta{Name: "node1"}, + Versions: clusterv1alpha1.MachineVersionInfo{ + Kubelet: "1.9.2", + }, + }, + ccProvider: &fakeCloudConfigProvider{ + name: "aws", + config: "{aws-config:true}", + err: nil, + }, + DNSIPs: []net.IP{net.ParseIP("10.10.10.10")}, + osConfig: &Config{ + DisableLocksmithD: true, + }, + }, + { + name: "v1.9.2-disable-update-engine-aws", + providerSpec: &providerconfigtypes.Config{ + CloudProvider: "aws", + SSHPublicKeys: []string{"ssh-rsa AAABBB", "ssh-rsa CCCDDD"}, + }, + spec: clusterv1alpha1.MachineSpec{ + ObjectMeta: metav1.ObjectMeta{Name: "node1"}, + Versions: clusterv1alpha1.MachineVersionInfo{ + Kubelet: "1.9.2", + }, + }, + ccProvider: &fakeCloudConfigProvider{ + name: "aws", + config: "{aws-config:true}", + err: nil, + }, + DNSIPs: []net.IP{net.ParseIP("10.10.10.10")}, + osConfig: &Config{ + DisableUpdateEngine: true, + }, + }, + { + name: "v1.9.2-disable-auto-update-aws-external", + providerSpec: &providerconfigtypes.Config{ + CloudProvider: "aws", + SSHPublicKeys: []string{"ssh-rsa AAABBB", "ssh-rsa CCCDDD"}, + }, + spec: clusterv1alpha1.MachineSpec{ + ObjectMeta: metav1.ObjectMeta{Name: "node1"}, + Versions: clusterv1alpha1.MachineVersionInfo{ + Kubelet: "1.9.2", + }, + }, + ccProvider: &fakeCloudConfigProvider{ + name: "aws", + config: "{aws-config:true}", + err: nil, + }, + DNSIPs: []net.IP{net.ParseIP("10.10.10.10")}, + osConfig: &Config{ + DisableAutoUpdate: true, + }, + externalCloudProvider: true, + }, + { + name: "v1.10.3-auto-update-openstack-multiple-dns", + providerSpec: &providerconfigtypes.Config{ + CloudProvider: "openstack", + SSHPublicKeys: []string{"ssh-rsa AAABBB", "ssh-rsa CCCDDD"}, + }, + spec: clusterv1alpha1.MachineSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node1", + }, + Versions: clusterv1alpha1.MachineVersionInfo{ + Kubelet: "1.10.3", + }, + }, + ccProvider: &fakeCloudConfigProvider{ + name: "openstack", + config: "{openstack-config:true}", + err: nil, + }, + DNSIPs: []net.IP{net.ParseIP("10.10.10.10"), net.ParseIP("10.10.10.11"), net.ParseIP("10.10.10.12")}, + osConfig: &Config{ + DisableAutoUpdate: false, + }, + }, + { + name: "auto-update-openstack-kubelet-v-version-prefix", + providerSpec: &providerconfigtypes.Config{ + CloudProvider: "openstack", + SSHPublicKeys: []string{"ssh-rsa AAABBB", "ssh-rsa CCCDDD"}, + }, + spec: clusterv1alpha1.MachineSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node1", + }, + Versions: clusterv1alpha1.MachineVersionInfo{ + Kubelet: "v1.9.2", + }, + }, + ccProvider: &fakeCloudConfigProvider{ + name: "openstack", + config: "{openstack-config:true}", + err: nil, + }, + DNSIPs: []net.IP{net.ParseIP("10.10.10.10")}, + osConfig: &Config{ + DisableAutoUpdate: false, + }, + }, + { + name: "v1.11.2-vsphere-static-ipconfig", + providerSpec: &providerconfigtypes.Config{ + CloudProvider: "vsphere", + SSHPublicKeys: []string{"ssh-rsa AAABBB", "ssh-rsa CCCDDD"}, + Network: &providerconfigtypes.NetworkConfig{ + CIDR: "192.168.81.4/24", + Gateway: "192.168.81.1", + DNS: providerconfigtypes.DNSConfig{ + Servers: []string{"8.8.8.8"}, + }, + }, + }, + spec: clusterv1alpha1.MachineSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node1", + }, + Versions: clusterv1alpha1.MachineVersionInfo{ + Kubelet: "1.11.2", + }, + }, + ccProvider: &fakeCloudConfigProvider{ + name: "vsphere", + config: "{vsphere-config:true}", + err: nil, + }, + DNSIPs: []net.IP{net.ParseIP("10.10.10.10")}, + osConfig: &Config{ + DisableAutoUpdate: true, + }, + }, + { + name: "v1.12.0-vsphere-overwrite-cloudconfig", + providerSpec: &providerconfigtypes.Config{ + CloudProvider: "vsphere", + OverwriteCloudConfig: stringPtr("my\ncustom\ncloud-config"), + SSHPublicKeys: []string{"ssh-rsa AAABBB", "ssh-rsa CCCDDD"}, + Network: &providerconfigtypes.NetworkConfig{ + CIDR: "192.168.81.4/24", + Gateway: "192.168.81.1", + DNS: providerconfigtypes.DNSConfig{ + Servers: []string{"8.8.8.8"}, + }, + }, + }, + spec: clusterv1alpha1.MachineSpec{ + ObjectMeta: metav1.ObjectMeta{Name: "node1"}, + Versions: clusterv1alpha1.MachineVersionInfo{ + Kubelet: "v1.12.0", + }, + }, + ccProvider: &fakeCloudConfigProvider{ + name: "vsphere", + config: "{vsphere-config:true}", + err: nil, + }, + DNSIPs: []net.IP{net.ParseIP("10.10.10.10")}, + osConfig: &Config{ + DisableAutoUpdate: true, + }, + }, + { + name: "v1.15.0-vsphere", + providerSpec: &providerconfigtypes.Config{ + CloudProvider: "vsphere", + }, + spec: clusterv1alpha1.MachineSpec{ + ObjectMeta: metav1.ObjectMeta{ + Name: "node1", + }, + Versions: clusterv1alpha1.MachineVersionInfo{ + Kubelet: "1.15.0-beta.2", + }, + }, + ccProvider: &fakeCloudConfigProvider{ + name: "vsphere", + config: "{vsphere-config:true}", + err: nil, + }, + DNSIPs: []net.IP{net.ParseIP("10.10.10.10")}, + osConfig: &Config{ + DisableAutoUpdate: true, + }, + }, + { + name: "v1.12.0-vsphere-proxy", + providerSpec: &providerconfigtypes.Config{ + CloudProvider: "vsphere", + SSHPublicKeys: []string{"ssh-rsa AAABBB", "ssh-rsa CCCDDD"}, + Network: &providerconfigtypes.NetworkConfig{ + CIDR: "192.168.81.4/24", + Gateway: "192.168.81.1", + DNS: providerconfigtypes.DNSConfig{ + Servers: []string{"8.8.8.8"}, + }, + }, + }, + spec: clusterv1alpha1.MachineSpec{ + ObjectMeta: metav1.ObjectMeta{Name: "node1"}, + Versions: clusterv1alpha1.MachineVersionInfo{ + Kubelet: "v1.12.0", + }, + }, + ccProvider: &fakeCloudConfigProvider{ + name: "vsphere", + config: "{vsphere-config:true}", + err: nil, + }, + DNSIPs: []net.IP{net.ParseIP("10.10.10.10")}, + osConfig: &Config{ + DisableAutoUpdate: true, + }, + httpProxy: "http://192.168.100.100:3128", + noProxy: "192.168.1.0", + insecureRegistries: []string{"192.168.100.100:5000", "10.0.0.1:5000"}, + pauseImage: "192.168.100.100:5000/kubernetes/pause:v3.1", + hyperkubeImage: "192.168.100.100:5000/kubernetes/hyperkube", + }, + { + name: "v1.12.0-vsphere-mirrors", + providerSpec: &providerconfigtypes.Config{ + CloudProvider: "vsphere", + SSHPublicKeys: []string{"ssh-rsa AAABBB", "ssh-rsa CCCDDD"}, + Network: &providerconfigtypes.NetworkConfig{ + CIDR: "192.168.81.4/24", + Gateway: "192.168.81.1", + DNS: providerconfigtypes.DNSConfig{ + Servers: []string{"8.8.8.8"}, + }, + }, + }, + spec: clusterv1alpha1.MachineSpec{ + ObjectMeta: metav1.ObjectMeta{Name: "node1"}, + Versions: clusterv1alpha1.MachineVersionInfo{ + Kubelet: "v1.12.0", + }, + }, + ccProvider: &fakeCloudConfigProvider{ + name: "vsphere", + config: "{vsphere-config:true}", + err: nil, + }, + DNSIPs: []net.IP{net.ParseIP("10.10.10.10")}, + osConfig: &Config{ + DisableAutoUpdate: true, + }, + httpProxy: "http://192.168.100.100:3128", + noProxy: "192.168.1.0", + registryMirrors: []string{"https://registry.docker-cn.com"}, + pauseImage: "192.168.100.100:5000/kubernetes/pause:v3.1", + hyperkubeImage: "192.168.100.100:5000/kubernetes/hyperkube", + }, + { + name: "v1.17.0", + providerSpec: &providerconfigtypes.Config{ + CloudProvider: "vsphere", + SSHPublicKeys: []string{"ssh-rsa AAABBB", "ssh-rsa CCCDDD"}, + Network: &providerconfigtypes.NetworkConfig{ + CIDR: "192.168.81.4/24", + Gateway: "192.168.81.1", + DNS: providerconfigtypes.DNSConfig{ + Servers: []string{"8.8.8.8"}, + }, + }, + }, + spec: clusterv1alpha1.MachineSpec{ + ObjectMeta: metav1.ObjectMeta{Name: "node1"}, + Versions: clusterv1alpha1.MachineVersionInfo{ + Kubelet: "v1.17.0", + }, + }, + ccProvider: &fakeCloudConfigProvider{ + name: "vsphere", + config: "{vsphere-config:true}", + err: nil, + }, + DNSIPs: []net.IP{net.ParseIP("10.10.10.10")}, + osConfig: &Config{ + DisableAutoUpdate: true, + }, + }, + } + + for _, test := range tests { + t.Run(test.name, func(t *testing.T) { + rProviderSpec := test.providerSpec + osConfigByte, err := json.Marshal(test.osConfig) + if err != nil { + t.Fatal(err) + } + rProviderSpec.OperatingSystemSpec = runtime.RawExtension{ + Raw: osConfigByte, + } + + providerSpecRaw, err := json.Marshal(rProviderSpec) + if err != nil { + t.Fatal(err) + } + test.spec.ProviderSpec = clusterv1alpha1.ProviderSpec{ + Value: &runtime.RawExtension{ + Raw: providerSpecRaw, + }, + } + provider := Provider{} + + cloudConfig, cloudProviderName, err := test.ccProvider.GetCloudConfig(test.spec) + if err != nil { + t.Fatalf("failed to get cloud config: %v", err) + } + + req := plugin.UserDataRequest{ + MachineSpec: test.spec, + Kubeconfig: kubeconfig, + CloudConfig: cloudConfig, + CloudProviderName: cloudProviderName, + DNSIPs: test.DNSIPs, + ExternalCloudProvider: test.externalCloudProvider, + HTTPProxy: test.httpProxy, + NoProxy: test.noProxy, + InsecureRegistries: test.insecureRegistries, + RegistryMirrors: test.registryMirrors, + PauseImage: test.pauseImage, + HyperkubeImage: test.hyperkubeImage, + } + + s, err := provider.UserData(req) + if err != nil { + t.Fatal(err) + } + + // Check if we can convert it to ignition. + if _, err := convert.ToIgnition(s); err != nil { + t.Fatal(err) + } + goldenName := test.name + ".yaml" + testhelper.CompareOutput(t, goldenName, s, *update) + }) + } +} + +// stringPtr returns pointer to given string. +func stringPtr(str string) *string { + return &str +} diff --git a/pkg/userdata/flatcar/testdata/auto-update-openstack-kubelet-v-version-prefix.yaml b/pkg/userdata/flatcar/testdata/auto-update-openstack-kubelet-v-version-prefix.yaml new file mode 100644 index 000000000..c1926de97 --- /dev/null +++ b/pkg/userdata/flatcar/testdata/auto-update-openstack-kubelet-v-version-prefix.yaml @@ -0,0 +1,354 @@ +passwd: + users: + - name: core + ssh_authorized_keys: + - ssh-rsa AAABBB + - ssh-rsa CCCDDD + + +systemd: + units: + - name: docker.service + enabled: true + + - name: download-healthcheck-script.service + enabled: true + contents: | + [Unit] + Requires=network-online.target + After=network-online.target + [Service] + Type=oneshot + EnvironmentFile=-/etc/environment + ExecStart=/opt/bin/download.sh + [Install] + WantedBy=multi-user.target + + - name: docker-healthcheck.service + enabled: true + dropins: + - name: 40-docker.conf + contents: | + [Unit] + Requires=download-healthcheck-script.service + After=download-healthcheck-script.service + contents: | + [Unit] + Requires=docker.service + After=docker.service + + [Service] + ExecStart=/opt/bin/health-monitor.sh container-runtime + + [Install] + WantedBy=multi-user.target + + - name: kubelet-healthcheck.service + enabled: true + dropins: + - name: 40-docker.conf + contents: | + [Unit] + Requires=download-healthcheck-script.service + After=download-healthcheck-script.service + contents: | + [Unit] + Requires=kubelet.service + After=kubelet.service + + [Service] + ExecStart=/opt/bin/health-monitor.sh kubelet + + [Install] + WantedBy=multi-user.target + + + - name: kubelet.service + enabled: true + contents: | + [Unit] + Description=Kubernetes Kubelet + Requires=docker.service + After=docker.service + [Service] + TimeoutStartSec=5min + CPUAccounting=true + MemoryAccounting=true + EnvironmentFile=-/etc/environment + Environment=KUBELET_IMAGE=docker://k8s.gcr.io/hyperkube-amd64:v1.9.2 + Environment="RKT_RUN_ARGS=--uuid-file-save=/var/cache/kubelet-pod.uuid \ + --inherit-env \ + --insecure-options=image \ + --volume=resolv,kind=host,source=/etc/resolv.conf \ + --mount volume=resolv,target=/etc/resolv.conf \ + --volume cni-bin,kind=host,source=/opt/cni/bin \ + --mount volume=cni-bin,target=/opt/cni/bin \ + --volume cni-conf,kind=host,source=/etc/cni/net.d \ + --mount volume=cni-conf,target=/etc/cni/net.d \ + --volume etc-kubernetes,kind=host,source=/etc/kubernetes \ + --mount volume=etc-kubernetes,target=/etc/kubernetes \ + --volume var-log,kind=host,source=/var/log \ + --mount volume=var-log,target=/var/log \ + --volume var-lib-calico,kind=host,source=/var/lib/calico \ + --mount volume=var-lib-calico,target=/var/lib/calico" + ExecStartPre=/bin/mkdir -p /var/lib/calico + ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests + ExecStartPre=/bin/mkdir -p /etc/cni/net.d + ExecStartPre=/bin/mkdir -p /opt/cni/bin + ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/cache/kubelet-pod.uuid + ExecStartPre=-/bin/rm -rf /var/lib/rkt/cas/tmp/ + ExecStartPre=/bin/bash /opt/load-kernel-modules.sh + ExecStart=/usr/lib/flatcar/kubelet-wrapper \ + --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ + --kubeconfig=/var/lib/kubelet/kubeconfig \ + --config=/etc/kubernetes/kubelet.conf \ + --allow-privileged=true \ + --network-plugin=cni \ + --cni-conf-dir=/etc/cni/net.d \ + --cni-bin-dir=/opt/cni/bin \ + --cadvisor-port=0 \ + --cert-dir=/etc/kubernetes/pki \ + --cloud-provider=openstack \ + --cloud-config=/etc/kubernetes/cloud-config \ + --hostname-override=node1 \ + --dynamic-config-dir /etc/kubernetes/dynamic-config-dir \ + --exit-on-lock-contention \ + --lock-file=/tmp/kubelet.lock \ + --kube-reserved=cpu=100m,memory=100Mi,ephemeral-storage=1Gi \ + --system-reserved=cpu=100m,memory=100Mi,ephemeral-storage=1Gi + ExecStop=-/usr/bin/rkt stop --uuid-file=/var/cache/kubelet-pod.uuid + Restart=always + RestartSec=10 + [Install] + WantedBy=multi-user.target + + - name: docker.service + enabled: true + dropins: + - name: 10-environment.conf + contents: | + [Service] + EnvironmentFile=-/etc/environment + +storage: + files: + + - path: "/etc/systemd/journald.conf.d/max_disk_use.conf" + filesystem: root + mode: 0644 + contents: + inline: | + [Journal] + SystemMaxUse=5G + + + - path: "/etc/kubernetes/kubelet.conf" + filesystem: root + mode: 0644 + contents: + inline: | + apiVersion: kubelet.config.k8s.io/v1beta1 + authentication: + anonymous: + enabled: false + webhook: + cacheTTL: 0s + enabled: true + x509: + clientCAFile: /etc/kubernetes/pki/ca.crt + authorization: + mode: Webhook + webhook: + cacheAuthorizedTTL: 0s + cacheUnauthorizedTTL: 0s + cgroupDriver: systemd + clusterDNS: + - 10.10.10.10 + clusterDomain: cluster.local + cpuManagerReconcilePeriod: 0s + evictionPressureTransitionPeriod: 0s + featureGates: + RotateKubeletServerCertificate: true + fileCheckFrequency: 0s + httpCheckFrequency: 0s + imageMinimumGCAge: 0s + kind: KubeletConfiguration + nodeStatusReportFrequency: 0s + nodeStatusUpdateFrequency: 0s + protectKernelDefaults: true + rotateCertificates: true + runtimeRequestTimeout: 0s + serverTLSBootstrap: true + staticPodPath: /etc/kubernetes/manifests + streamingConnectionIdleTimeout: 0s + syncFrequency: 0s + volumeStatsAggPeriod: 0s + + + - path: /opt/load-kernel-modules.sh + filesystem: root + mode: 0755 + contents: + inline: | + #!/usr/bin/env bash + set -euo pipefail + + modprobe ip_vs + modprobe ip_vs_rr + modprobe ip_vs_wrr + modprobe ip_vs_sh + + if modinfo nf_conntrack_ipv4 &> /dev/null; then + modprobe nf_conntrack_ipv4 + else + modprobe nf_conntrack + fi + + + - path: /etc/sysctl.d/k8s.conf + filesystem: root + mode: 0644 + contents: + inline: | + net.bridge.bridge-nf-call-ip6tables = 1 + net.bridge.bridge-nf-call-iptables = 1 + kernel.panic_on_oops = 1 + kernel.panic = 10 + net.ipv4.ip_forward = 1 + vm.overcommit_memory = 1 + fs.inotify.max_user_watches = 1048576 + + + - path: /proc/sys/kernel/panic_on_oops + filesystem: root + mode: 0644 + contents: + inline: | + 1 + + - path: /proc/sys/kernel/panic + filesystem: root + mode: 0644 + contents: + inline: | + 10 + + - path: /proc/sys/vm/overcommit_memory + filesystem: root + mode: 0644 + contents: + inline: | + 1 + + - path: /etc/kubernetes/bootstrap-kubelet.conf + filesystem: root + mode: 0400 + contents: + inline: | + apiVersion: v1 + clusters: + - cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t + server: https://server:443 + name: "" + contexts: [] + current-context: "" + kind: Config + preferences: {} + users: + - name: "" + user: + token: my-token + + + - path: /etc/kubernetes/cloud-config + filesystem: root + mode: 0400 + contents: + inline: | + {openstack-config:true} + + - path: /etc/kubernetes/pki/ca.crt + filesystem: root + mode: 0644 + contents: + inline: | + -----BEGIN CERTIFICATE----- + MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV + BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG + A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 + DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 + NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG + cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv + c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B + AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS + R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT + ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk + JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 + mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW + caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G + A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt + hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB + MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES + MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv + bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h + U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao + eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 + UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD + 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n + sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF + kPe6XoSbiLm/kxk32T0= + -----END CERTIFICATE----- + + - path: /etc/hostname + filesystem: root + mode: 0600 + contents: + inline: 'node1' + + - path: /etc/ssh/sshd_config + filesystem: root + mode: 0600 + user: + id: 0 + group: + id: 0 + contents: + inline: | + # Use most defaults for sshd configuration. + Subsystem sftp internal-sftp + ClientAliveInterval 180 + UseDNS no + UsePAM yes + PrintLastLog no # handled by PAM + PrintMotd no # handled by PAM + PasswordAuthentication no + ChallengeResponseAuthentication no + + - path: /etc/docker/daemon.json + filesystem: root + mode: 0644 + contents: + inline: | + {"exec-opts":["native.cgroupdriver=systemd"],"storage-driver":"overlay2","log-driver":"json-file","log-opts":{"max-size":"100m"}} + + - path: /opt/bin/download.sh + filesystem: root + mode: 0755 + contents: + inline: | + #!/bin/bash + set -xeuo pipefail + mkdir -p /opt/bin/ + mkdir -p /var/lib/calico + mkdir -p /etc/kubernetes/manifests + mkdir -p /etc/cni/net.d + mkdir -p /opt/cni/bin + if [ ! -f /opt/cni/bin/loopback ]; then + curl -L https://github.com/containernetworking/plugins/releases/download/v0.8.2/cni-plugins-linux-amd64-v0.8.2.tgz | tar -xvzC /opt/cni/bin -f - + fi + + if [[ ! -x /opt/bin/health-monitor.sh ]]; then + curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/8b5b66e4910a6228dfaecccaa0a3b05ec4902f8e/pkg/userdata/scripts/health-monitor.sh + chmod +x /opt/bin/health-monitor.sh + fi diff --git a/pkg/userdata/flatcar/testdata/v1.10.3-auto-update-openstack-multiple-dns.yaml b/pkg/userdata/flatcar/testdata/v1.10.3-auto-update-openstack-multiple-dns.yaml new file mode 100644 index 000000000..3c9f19a09 --- /dev/null +++ b/pkg/userdata/flatcar/testdata/v1.10.3-auto-update-openstack-multiple-dns.yaml @@ -0,0 +1,356 @@ +passwd: + users: + - name: core + ssh_authorized_keys: + - ssh-rsa AAABBB + - ssh-rsa CCCDDD + + +systemd: + units: + - name: docker.service + enabled: true + + - name: download-healthcheck-script.service + enabled: true + contents: | + [Unit] + Requires=network-online.target + After=network-online.target + [Service] + Type=oneshot + EnvironmentFile=-/etc/environment + ExecStart=/opt/bin/download.sh + [Install] + WantedBy=multi-user.target + + - name: docker-healthcheck.service + enabled: true + dropins: + - name: 40-docker.conf + contents: | + [Unit] + Requires=download-healthcheck-script.service + After=download-healthcheck-script.service + contents: | + [Unit] + Requires=docker.service + After=docker.service + + [Service] + ExecStart=/opt/bin/health-monitor.sh container-runtime + + [Install] + WantedBy=multi-user.target + + - name: kubelet-healthcheck.service + enabled: true + dropins: + - name: 40-docker.conf + contents: | + [Unit] + Requires=download-healthcheck-script.service + After=download-healthcheck-script.service + contents: | + [Unit] + Requires=kubelet.service + After=kubelet.service + + [Service] + ExecStart=/opt/bin/health-monitor.sh kubelet + + [Install] + WantedBy=multi-user.target + + + - name: kubelet.service + enabled: true + contents: | + [Unit] + Description=Kubernetes Kubelet + Requires=docker.service + After=docker.service + [Service] + TimeoutStartSec=5min + CPUAccounting=true + MemoryAccounting=true + EnvironmentFile=-/etc/environment + Environment=KUBELET_IMAGE=docker://k8s.gcr.io/hyperkube-amd64:v1.10.3 + Environment="RKT_RUN_ARGS=--uuid-file-save=/var/cache/kubelet-pod.uuid \ + --inherit-env \ + --insecure-options=image \ + --volume=resolv,kind=host,source=/etc/resolv.conf \ + --mount volume=resolv,target=/etc/resolv.conf \ + --volume cni-bin,kind=host,source=/opt/cni/bin \ + --mount volume=cni-bin,target=/opt/cni/bin \ + --volume cni-conf,kind=host,source=/etc/cni/net.d \ + --mount volume=cni-conf,target=/etc/cni/net.d \ + --volume etc-kubernetes,kind=host,source=/etc/kubernetes \ + --mount volume=etc-kubernetes,target=/etc/kubernetes \ + --volume var-log,kind=host,source=/var/log \ + --mount volume=var-log,target=/var/log \ + --volume var-lib-calico,kind=host,source=/var/lib/calico \ + --mount volume=var-lib-calico,target=/var/lib/calico" + ExecStartPre=/bin/mkdir -p /var/lib/calico + ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests + ExecStartPre=/bin/mkdir -p /etc/cni/net.d + ExecStartPre=/bin/mkdir -p /opt/cni/bin + ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/cache/kubelet-pod.uuid + ExecStartPre=-/bin/rm -rf /var/lib/rkt/cas/tmp/ + ExecStartPre=/bin/bash /opt/load-kernel-modules.sh + ExecStart=/usr/lib/flatcar/kubelet-wrapper \ + --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ + --kubeconfig=/var/lib/kubelet/kubeconfig \ + --config=/etc/kubernetes/kubelet.conf \ + --allow-privileged=true \ + --network-plugin=cni \ + --cni-conf-dir=/etc/cni/net.d \ + --cni-bin-dir=/opt/cni/bin \ + --cadvisor-port=0 \ + --cert-dir=/etc/kubernetes/pki \ + --cloud-provider=openstack \ + --cloud-config=/etc/kubernetes/cloud-config \ + --hostname-override=node1 \ + --dynamic-config-dir /etc/kubernetes/dynamic-config-dir \ + --exit-on-lock-contention \ + --lock-file=/tmp/kubelet.lock \ + --kube-reserved=cpu=100m,memory=100Mi,ephemeral-storage=1Gi \ + --system-reserved=cpu=100m,memory=100Mi,ephemeral-storage=1Gi + ExecStop=-/usr/bin/rkt stop --uuid-file=/var/cache/kubelet-pod.uuid + Restart=always + RestartSec=10 + [Install] + WantedBy=multi-user.target + + - name: docker.service + enabled: true + dropins: + - name: 10-environment.conf + contents: | + [Service] + EnvironmentFile=-/etc/environment + +storage: + files: + + - path: "/etc/systemd/journald.conf.d/max_disk_use.conf" + filesystem: root + mode: 0644 + contents: + inline: | + [Journal] + SystemMaxUse=5G + + + - path: "/etc/kubernetes/kubelet.conf" + filesystem: root + mode: 0644 + contents: + inline: | + apiVersion: kubelet.config.k8s.io/v1beta1 + authentication: + anonymous: + enabled: false + webhook: + cacheTTL: 0s + enabled: true + x509: + clientCAFile: /etc/kubernetes/pki/ca.crt + authorization: + mode: Webhook + webhook: + cacheAuthorizedTTL: 0s + cacheUnauthorizedTTL: 0s + cgroupDriver: systemd + clusterDNS: + - 10.10.10.10 + - 10.10.10.11 + - 10.10.10.12 + clusterDomain: cluster.local + cpuManagerReconcilePeriod: 0s + evictionPressureTransitionPeriod: 0s + featureGates: + RotateKubeletServerCertificate: true + fileCheckFrequency: 0s + httpCheckFrequency: 0s + imageMinimumGCAge: 0s + kind: KubeletConfiguration + nodeStatusReportFrequency: 0s + nodeStatusUpdateFrequency: 0s + protectKernelDefaults: true + rotateCertificates: true + runtimeRequestTimeout: 0s + serverTLSBootstrap: true + staticPodPath: /etc/kubernetes/manifests + streamingConnectionIdleTimeout: 0s + syncFrequency: 0s + volumeStatsAggPeriod: 0s + + + - path: /opt/load-kernel-modules.sh + filesystem: root + mode: 0755 + contents: + inline: | + #!/usr/bin/env bash + set -euo pipefail + + modprobe ip_vs + modprobe ip_vs_rr + modprobe ip_vs_wrr + modprobe ip_vs_sh + + if modinfo nf_conntrack_ipv4 &> /dev/null; then + modprobe nf_conntrack_ipv4 + else + modprobe nf_conntrack + fi + + + - path: /etc/sysctl.d/k8s.conf + filesystem: root + mode: 0644 + contents: + inline: | + net.bridge.bridge-nf-call-ip6tables = 1 + net.bridge.bridge-nf-call-iptables = 1 + kernel.panic_on_oops = 1 + kernel.panic = 10 + net.ipv4.ip_forward = 1 + vm.overcommit_memory = 1 + fs.inotify.max_user_watches = 1048576 + + + - path: /proc/sys/kernel/panic_on_oops + filesystem: root + mode: 0644 + contents: + inline: | + 1 + + - path: /proc/sys/kernel/panic + filesystem: root + mode: 0644 + contents: + inline: | + 10 + + - path: /proc/sys/vm/overcommit_memory + filesystem: root + mode: 0644 + contents: + inline: | + 1 + + - path: /etc/kubernetes/bootstrap-kubelet.conf + filesystem: root + mode: 0400 + contents: + inline: | + apiVersion: v1 + clusters: + - cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t + server: https://server:443 + name: "" + contexts: [] + current-context: "" + kind: Config + preferences: {} + users: + - name: "" + user: + token: my-token + + + - path: /etc/kubernetes/cloud-config + filesystem: root + mode: 0400 + contents: + inline: | + {openstack-config:true} + + - path: /etc/kubernetes/pki/ca.crt + filesystem: root + mode: 0644 + contents: + inline: | + -----BEGIN CERTIFICATE----- + MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV + BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG + A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 + DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 + NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG + cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv + c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B + AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS + R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT + ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk + JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 + mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW + caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G + A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt + hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB + MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES + MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv + bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h + U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao + eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 + UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD + 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n + sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF + kPe6XoSbiLm/kxk32T0= + -----END CERTIFICATE----- + + - path: /etc/hostname + filesystem: root + mode: 0600 + contents: + inline: 'node1' + + - path: /etc/ssh/sshd_config + filesystem: root + mode: 0600 + user: + id: 0 + group: + id: 0 + contents: + inline: | + # Use most defaults for sshd configuration. + Subsystem sftp internal-sftp + ClientAliveInterval 180 + UseDNS no + UsePAM yes + PrintLastLog no # handled by PAM + PrintMotd no # handled by PAM + PasswordAuthentication no + ChallengeResponseAuthentication no + + - path: /etc/docker/daemon.json + filesystem: root + mode: 0644 + contents: + inline: | + {"exec-opts":["native.cgroupdriver=systemd"],"storage-driver":"overlay2","log-driver":"json-file","log-opts":{"max-size":"100m"}} + + - path: /opt/bin/download.sh + filesystem: root + mode: 0755 + contents: + inline: | + #!/bin/bash + set -xeuo pipefail + mkdir -p /opt/bin/ + mkdir -p /var/lib/calico + mkdir -p /etc/kubernetes/manifests + mkdir -p /etc/cni/net.d + mkdir -p /opt/cni/bin + if [ ! -f /opt/cni/bin/loopback ]; then + curl -L https://github.com/containernetworking/plugins/releases/download/v0.8.2/cni-plugins-linux-amd64-v0.8.2.tgz | tar -xvzC /opt/cni/bin -f - + fi + + if [[ ! -x /opt/bin/health-monitor.sh ]]; then + curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/8b5b66e4910a6228dfaecccaa0a3b05ec4902f8e/pkg/userdata/scripts/health-monitor.sh + chmod +x /opt/bin/health-monitor.sh + fi diff --git a/pkg/userdata/flatcar/testdata/v1.11.2-vsphere-static-ipconfig.yaml b/pkg/userdata/flatcar/testdata/v1.11.2-vsphere-static-ipconfig.yaml new file mode 100644 index 000000000..606d6bc9d --- /dev/null +++ b/pkg/userdata/flatcar/testdata/v1.11.2-vsphere-static-ipconfig.yaml @@ -0,0 +1,374 @@ +passwd: + users: + - name: core + ssh_authorized_keys: + - ssh-rsa AAABBB + - ssh-rsa CCCDDD + +networkd: + units: + - name: static-nic.network + contents: | + [Match] + # Because of difficulty predicting specific NIC names on different cloud providers, + # we only support static addressing on VSphere. There should be a single NIC attached + # that we will match by name prefix 'en' which denotes ethernet devices. + Name=en* + + [Network] + DHCP=no + Address=192.168.81.4/24 + Gateway=192.168.81.1 + DNS=8.8.8.8 + + +systemd: + units: + - name: update-engine.service + mask: true + - name: locksmithd.service + mask: true + - name: docker.service + enabled: true + + - name: download-healthcheck-script.service + enabled: true + contents: | + [Unit] + Requires=network-online.target + After=network-online.target + [Service] + Type=oneshot + EnvironmentFile=-/etc/environment + ExecStart=/opt/bin/download.sh + [Install] + WantedBy=multi-user.target + + - name: docker-healthcheck.service + enabled: true + dropins: + - name: 40-docker.conf + contents: | + [Unit] + Requires=download-healthcheck-script.service + After=download-healthcheck-script.service + contents: | + [Unit] + Requires=docker.service + After=docker.service + + [Service] + ExecStart=/opt/bin/health-monitor.sh container-runtime + + [Install] + WantedBy=multi-user.target + + - name: kubelet-healthcheck.service + enabled: true + dropins: + - name: 40-docker.conf + contents: | + [Unit] + Requires=download-healthcheck-script.service + After=download-healthcheck-script.service + contents: | + [Unit] + Requires=kubelet.service + After=kubelet.service + + [Service] + ExecStart=/opt/bin/health-monitor.sh kubelet + + [Install] + WantedBy=multi-user.target + + + - name: kubelet.service + enabled: true + contents: | + [Unit] + Description=Kubernetes Kubelet + Requires=docker.service + After=docker.service + [Service] + TimeoutStartSec=5min + CPUAccounting=true + MemoryAccounting=true + EnvironmentFile=-/etc/environment + Environment=KUBELET_IMAGE=docker://k8s.gcr.io/hyperkube-amd64:v1.11.2 + Environment="RKT_RUN_ARGS=--uuid-file-save=/var/cache/kubelet-pod.uuid \ + --inherit-env \ + --insecure-options=image \ + --volume=resolv,kind=host,source=/etc/resolv.conf \ + --mount volume=resolv,target=/etc/resolv.conf \ + --volume cni-bin,kind=host,source=/opt/cni/bin \ + --mount volume=cni-bin,target=/opt/cni/bin \ + --volume cni-conf,kind=host,source=/etc/cni/net.d \ + --mount volume=cni-conf,target=/etc/cni/net.d \ + --volume etc-kubernetes,kind=host,source=/etc/kubernetes \ + --mount volume=etc-kubernetes,target=/etc/kubernetes \ + --volume var-log,kind=host,source=/var/log \ + --mount volume=var-log,target=/var/log \ + --volume var-lib-calico,kind=host,source=/var/lib/calico \ + --mount volume=var-lib-calico,target=/var/lib/calico" + ExecStartPre=/bin/mkdir -p /var/lib/calico + ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests + ExecStartPre=/bin/mkdir -p /etc/cni/net.d + ExecStartPre=/bin/mkdir -p /opt/cni/bin + ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/cache/kubelet-pod.uuid + ExecStartPre=-/bin/rm -rf /var/lib/rkt/cas/tmp/ + ExecStartPre=/bin/bash /opt/load-kernel-modules.sh + ExecStart=/usr/lib/flatcar/kubelet-wrapper \ + --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ + --kubeconfig=/var/lib/kubelet/kubeconfig \ + --config=/etc/kubernetes/kubelet.conf \ + --allow-privileged=true \ + --network-plugin=cni \ + --cni-conf-dir=/etc/cni/net.d \ + --cni-bin-dir=/opt/cni/bin \ + --cadvisor-port=0 \ + --cert-dir=/etc/kubernetes/pki \ + --cloud-provider=vsphere \ + --cloud-config=/etc/kubernetes/cloud-config \ + --hostname-override=node1 \ + --dynamic-config-dir /etc/kubernetes/dynamic-config-dir \ + --exit-on-lock-contention \ + --lock-file=/tmp/kubelet.lock \ + --kube-reserved=cpu=100m,memory=100Mi,ephemeral-storage=1Gi \ + --system-reserved=cpu=100m,memory=100Mi,ephemeral-storage=1Gi + ExecStop=-/usr/bin/rkt stop --uuid-file=/var/cache/kubelet-pod.uuid + Restart=always + RestartSec=10 + [Install] + WantedBy=multi-user.target + + - name: docker.service + enabled: true + dropins: + - name: 10-environment.conf + contents: | + [Service] + EnvironmentFile=-/etc/environment + +storage: + files: + + - path: "/etc/systemd/journald.conf.d/max_disk_use.conf" + filesystem: root + mode: 0644 + contents: + inline: | + [Journal] + SystemMaxUse=5G + + + - path: "/etc/kubernetes/kubelet.conf" + filesystem: root + mode: 0644 + contents: + inline: | + apiVersion: kubelet.config.k8s.io/v1beta1 + authentication: + anonymous: + enabled: false + webhook: + cacheTTL: 0s + enabled: true + x509: + clientCAFile: /etc/kubernetes/pki/ca.crt + authorization: + mode: Webhook + webhook: + cacheAuthorizedTTL: 0s + cacheUnauthorizedTTL: 0s + cgroupDriver: systemd + clusterDNS: + - 10.10.10.10 + clusterDomain: cluster.local + cpuManagerReconcilePeriod: 0s + evictionPressureTransitionPeriod: 0s + featureGates: + RotateKubeletServerCertificate: true + fileCheckFrequency: 0s + httpCheckFrequency: 0s + imageMinimumGCAge: 0s + kind: KubeletConfiguration + nodeStatusReportFrequency: 0s + nodeStatusUpdateFrequency: 0s + protectKernelDefaults: true + rotateCertificates: true + runtimeRequestTimeout: 0s + serverTLSBootstrap: true + staticPodPath: /etc/kubernetes/manifests + streamingConnectionIdleTimeout: 0s + syncFrequency: 0s + volumeStatsAggPeriod: 0s + + + - path: /opt/load-kernel-modules.sh + filesystem: root + mode: 0755 + contents: + inline: | + #!/usr/bin/env bash + set -euo pipefail + + modprobe ip_vs + modprobe ip_vs_rr + modprobe ip_vs_wrr + modprobe ip_vs_sh + + if modinfo nf_conntrack_ipv4 &> /dev/null; then + modprobe nf_conntrack_ipv4 + else + modprobe nf_conntrack + fi + + + - path: /etc/sysctl.d/k8s.conf + filesystem: root + mode: 0644 + contents: + inline: | + net.bridge.bridge-nf-call-ip6tables = 1 + net.bridge.bridge-nf-call-iptables = 1 + kernel.panic_on_oops = 1 + kernel.panic = 10 + net.ipv4.ip_forward = 1 + vm.overcommit_memory = 1 + fs.inotify.max_user_watches = 1048576 + + + - path: /proc/sys/kernel/panic_on_oops + filesystem: root + mode: 0644 + contents: + inline: | + 1 + + - path: /proc/sys/kernel/panic + filesystem: root + mode: 0644 + contents: + inline: | + 10 + + - path: /proc/sys/vm/overcommit_memory + filesystem: root + mode: 0644 + contents: + inline: | + 1 + + - path: /etc/kubernetes/bootstrap-kubelet.conf + filesystem: root + mode: 0400 + contents: + inline: | + apiVersion: v1 + clusters: + - cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t + server: https://server:443 + name: "" + contexts: [] + current-context: "" + kind: Config + preferences: {} + users: + - name: "" + user: + token: my-token + + + - path: /etc/kubernetes/cloud-config + filesystem: root + mode: 0400 + contents: + inline: | + {vsphere-config:true} + + - path: /etc/kubernetes/pki/ca.crt + filesystem: root + mode: 0644 + contents: + inline: | + -----BEGIN CERTIFICATE----- + MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV + BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG + A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 + DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 + NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG + cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv + c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B + AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS + R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT + ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk + JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 + mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW + caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G + A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt + hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB + MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES + MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv + bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h + U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao + eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 + UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD + 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n + sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF + kPe6XoSbiLm/kxk32T0= + -----END CERTIFICATE----- + + - path: /etc/hostname + filesystem: root + mode: 0600 + contents: + inline: 'node1' + + - path: /etc/ssh/sshd_config + filesystem: root + mode: 0600 + user: + id: 0 + group: + id: 0 + contents: + inline: | + # Use most defaults for sshd configuration. + Subsystem sftp internal-sftp + ClientAliveInterval 180 + UseDNS no + UsePAM yes + PrintLastLog no # handled by PAM + PrintMotd no # handled by PAM + PasswordAuthentication no + ChallengeResponseAuthentication no + + - path: /etc/docker/daemon.json + filesystem: root + mode: 0644 + contents: + inline: | + {"exec-opts":["native.cgroupdriver=systemd"],"storage-driver":"overlay2","log-driver":"json-file","log-opts":{"max-size":"100m"}} + + - path: /opt/bin/download.sh + filesystem: root + mode: 0755 + contents: + inline: | + #!/bin/bash + set -xeuo pipefail + mkdir -p /opt/bin/ + mkdir -p /var/lib/calico + mkdir -p /etc/kubernetes/manifests + mkdir -p /etc/cni/net.d + mkdir -p /opt/cni/bin + if [ ! -f /opt/cni/bin/loopback ]; then + curl -L https://github.com/containernetworking/plugins/releases/download/v0.8.2/cni-plugins-linux-amd64-v0.8.2.tgz | tar -xvzC /opt/cni/bin -f - + fi + + if [[ ! -x /opt/bin/health-monitor.sh ]]; then + curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/8b5b66e4910a6228dfaecccaa0a3b05ec4902f8e/pkg/userdata/scripts/health-monitor.sh + chmod +x /opt/bin/health-monitor.sh + fi diff --git a/pkg/userdata/flatcar/testdata/v1.12.0-vsphere-mirrors.yaml b/pkg/userdata/flatcar/testdata/v1.12.0-vsphere-mirrors.yaml new file mode 100644 index 000000000..ce74e246e --- /dev/null +++ b/pkg/userdata/flatcar/testdata/v1.12.0-vsphere-mirrors.yaml @@ -0,0 +1,391 @@ +passwd: + users: + - name: core + ssh_authorized_keys: + - ssh-rsa AAABBB + - ssh-rsa CCCDDD + +networkd: + units: + - name: static-nic.network + contents: | + [Match] + # Because of difficulty predicting specific NIC names on different cloud providers, + # we only support static addressing on VSphere. There should be a single NIC attached + # that we will match by name prefix 'en' which denotes ethernet devices. + Name=en* + + [Network] + DHCP=no + Address=192.168.81.4/24 + Gateway=192.168.81.1 + DNS=8.8.8.8 + + +systemd: + units: + - name: update-engine.service + mask: true + - name: locksmithd.service + mask: true + - name: docker.service + enabled: true + - name: update-engine.service + dropins: + - name: 50-proxy.conf + contents: | + [Service] + Environment=ALL_PROXY=http://192.168.100.100:3128 + + - name: download-healthcheck-script.service + enabled: true + contents: | + [Unit] + Requires=network-online.target + After=network-online.target + [Service] + Type=oneshot + EnvironmentFile=-/etc/environment + ExecStart=/opt/bin/download.sh + [Install] + WantedBy=multi-user.target + + - name: docker-healthcheck.service + enabled: true + dropins: + - name: 40-docker.conf + contents: | + [Unit] + Requires=download-healthcheck-script.service + After=download-healthcheck-script.service + contents: | + [Unit] + Requires=docker.service + After=docker.service + + [Service] + ExecStart=/opt/bin/health-monitor.sh container-runtime + + [Install] + WantedBy=multi-user.target + + - name: kubelet-healthcheck.service + enabled: true + dropins: + - name: 40-docker.conf + contents: | + [Unit] + Requires=download-healthcheck-script.service + After=download-healthcheck-script.service + contents: | + [Unit] + Requires=kubelet.service + After=kubelet.service + + [Service] + ExecStart=/opt/bin/health-monitor.sh kubelet + + [Install] + WantedBy=multi-user.target + + + - name: kubelet.service + enabled: true + contents: | + [Unit] + Description=Kubernetes Kubelet + Requires=docker.service + After=docker.service + [Service] + TimeoutStartSec=5min + CPUAccounting=true + MemoryAccounting=true + EnvironmentFile=-/etc/environment + Environment=KUBELET_IMAGE=docker://192.168.100.100:5000/kubernetes/hyperkube:v1.12.0 + Environment="RKT_RUN_ARGS=--uuid-file-save=/var/cache/kubelet-pod.uuid \ + --inherit-env \ + --insecure-options=image \ + --volume=resolv,kind=host,source=/etc/resolv.conf \ + --mount volume=resolv,target=/etc/resolv.conf \ + --volume cni-bin,kind=host,source=/opt/cni/bin \ + --mount volume=cni-bin,target=/opt/cni/bin \ + --volume cni-conf,kind=host,source=/etc/cni/net.d \ + --mount volume=cni-conf,target=/etc/cni/net.d \ + --volume etc-kubernetes,kind=host,source=/etc/kubernetes \ + --mount volume=etc-kubernetes,target=/etc/kubernetes \ + --volume var-log,kind=host,source=/var/log \ + --mount volume=var-log,target=/var/log \ + --volume var-lib-calico,kind=host,source=/var/lib/calico \ + --mount volume=var-lib-calico,target=/var/lib/calico" + ExecStartPre=/bin/mkdir -p /var/lib/calico + ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests + ExecStartPre=/bin/mkdir -p /etc/cni/net.d + ExecStartPre=/bin/mkdir -p /opt/cni/bin + ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/cache/kubelet-pod.uuid + ExecStartPre=-/bin/rm -rf /var/lib/rkt/cas/tmp/ + ExecStartPre=/bin/bash /opt/load-kernel-modules.sh + ExecStart=/usr/lib/flatcar/kubelet-wrapper \ + --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ + --kubeconfig=/var/lib/kubelet/kubeconfig \ + --config=/etc/kubernetes/kubelet.conf \ + --allow-privileged=true \ + --network-plugin=cni \ + --cni-conf-dir=/etc/cni/net.d \ + --cni-bin-dir=/opt/cni/bin \ + --cert-dir=/etc/kubernetes/pki \ + --cloud-provider=vsphere \ + --cloud-config=/etc/kubernetes/cloud-config \ + --hostname-override=node1 \ + --dynamic-config-dir /etc/kubernetes/dynamic-config-dir \ + --exit-on-lock-contention \ + --lock-file=/tmp/kubelet.lock \ + --pod-infra-container-image=192.168.100.100:5000/kubernetes/pause:v3.1 \ + --kube-reserved=cpu=100m,memory=100Mi,ephemeral-storage=1Gi \ + --system-reserved=cpu=100m,memory=100Mi,ephemeral-storage=1Gi + ExecStop=-/usr/bin/rkt stop --uuid-file=/var/cache/kubelet-pod.uuid + Restart=always + RestartSec=10 + [Install] + WantedBy=multi-user.target + + - name: docker.service + enabled: true + dropins: + - name: 10-environment.conf + contents: | + [Service] + EnvironmentFile=-/etc/environment + +storage: + files: + - path: /etc/environment + filesystem: root + mode: 0644 + contents: + inline: | + HTTP_PROXY=http://192.168.100.100:3128 + http_proxy=http://192.168.100.100:3128 + HTTPS_PROXY=http://192.168.100.100:3128 + https_proxy=http://192.168.100.100:3128 + NO_PROXY=192.168.1.0 + no_proxy=192.168.1.0 + + - path: "/etc/systemd/journald.conf.d/max_disk_use.conf" + filesystem: root + mode: 0644 + contents: + inline: | + [Journal] + SystemMaxUse=5G + + + - path: "/etc/kubernetes/kubelet.conf" + filesystem: root + mode: 0644 + contents: + inline: | + apiVersion: kubelet.config.k8s.io/v1beta1 + authentication: + anonymous: + enabled: false + webhook: + cacheTTL: 0s + enabled: true + x509: + clientCAFile: /etc/kubernetes/pki/ca.crt + authorization: + mode: Webhook + webhook: + cacheAuthorizedTTL: 0s + cacheUnauthorizedTTL: 0s + cgroupDriver: systemd + clusterDNS: + - 10.10.10.10 + clusterDomain: cluster.local + cpuManagerReconcilePeriod: 0s + evictionPressureTransitionPeriod: 0s + featureGates: + RotateKubeletServerCertificate: true + fileCheckFrequency: 0s + httpCheckFrequency: 0s + imageMinimumGCAge: 0s + kind: KubeletConfiguration + nodeStatusReportFrequency: 0s + nodeStatusUpdateFrequency: 0s + protectKernelDefaults: true + rotateCertificates: true + runtimeRequestTimeout: 0s + serverTLSBootstrap: true + staticPodPath: /etc/kubernetes/manifests + streamingConnectionIdleTimeout: 0s + syncFrequency: 0s + volumeStatsAggPeriod: 0s + + + - path: /opt/load-kernel-modules.sh + filesystem: root + mode: 0755 + contents: + inline: | + #!/usr/bin/env bash + set -euo pipefail + + modprobe ip_vs + modprobe ip_vs_rr + modprobe ip_vs_wrr + modprobe ip_vs_sh + + if modinfo nf_conntrack_ipv4 &> /dev/null; then + modprobe nf_conntrack_ipv4 + else + modprobe nf_conntrack + fi + + + - path: /etc/sysctl.d/k8s.conf + filesystem: root + mode: 0644 + contents: + inline: | + net.bridge.bridge-nf-call-ip6tables = 1 + net.bridge.bridge-nf-call-iptables = 1 + kernel.panic_on_oops = 1 + kernel.panic = 10 + net.ipv4.ip_forward = 1 + vm.overcommit_memory = 1 + fs.inotify.max_user_watches = 1048576 + + + - path: /proc/sys/kernel/panic_on_oops + filesystem: root + mode: 0644 + contents: + inline: | + 1 + + - path: /proc/sys/kernel/panic + filesystem: root + mode: 0644 + contents: + inline: | + 10 + + - path: /proc/sys/vm/overcommit_memory + filesystem: root + mode: 0644 + contents: + inline: | + 1 + + - path: /etc/kubernetes/bootstrap-kubelet.conf + filesystem: root + mode: 0400 + contents: + inline: | + apiVersion: v1 + clusters: + - cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t + server: https://server:443 + name: "" + contexts: [] + current-context: "" + kind: Config + preferences: {} + users: + - name: "" + user: + token: my-token + + + - path: /etc/kubernetes/cloud-config + filesystem: root + mode: 0400 + contents: + inline: | + {vsphere-config:true} + + - path: /etc/kubernetes/pki/ca.crt + filesystem: root + mode: 0644 + contents: + inline: | + -----BEGIN CERTIFICATE----- + MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV + BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG + A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 + DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 + NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG + cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv + c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B + AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS + R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT + ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk + JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 + mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW + caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G + A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt + hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB + MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES + MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv + bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h + U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao + eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 + UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD + 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n + sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF + kPe6XoSbiLm/kxk32T0= + -----END CERTIFICATE----- + + - path: /etc/hostname + filesystem: root + mode: 0600 + contents: + inline: 'node1' + + - path: /etc/ssh/sshd_config + filesystem: root + mode: 0600 + user: + id: 0 + group: + id: 0 + contents: + inline: | + # Use most defaults for sshd configuration. + Subsystem sftp internal-sftp + ClientAliveInterval 180 + UseDNS no + UsePAM yes + PrintLastLog no # handled by PAM + PrintMotd no # handled by PAM + PasswordAuthentication no + ChallengeResponseAuthentication no + + - path: /etc/docker/daemon.json + filesystem: root + mode: 0644 + contents: + inline: | + {"exec-opts":["native.cgroupdriver=systemd"],"storage-driver":"overlay2","log-driver":"json-file","log-opts":{"max-size":"100m"},"registry-mirrors":["https://registry.docker-cn.com"]} + + - path: /opt/bin/download.sh + filesystem: root + mode: 0755 + contents: + inline: | + #!/bin/bash + set -xeuo pipefail + mkdir -p /opt/bin/ + mkdir -p /var/lib/calico + mkdir -p /etc/kubernetes/manifests + mkdir -p /etc/cni/net.d + mkdir -p /opt/cni/bin + if [ ! -f /opt/cni/bin/loopback ]; then + curl -L https://github.com/containernetworking/plugins/releases/download/v0.8.2/cni-plugins-linux-amd64-v0.8.2.tgz | tar -xvzC /opt/cni/bin -f - + fi + + if [[ ! -x /opt/bin/health-monitor.sh ]]; then + curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/8b5b66e4910a6228dfaecccaa0a3b05ec4902f8e/pkg/userdata/scripts/health-monitor.sh + chmod +x /opt/bin/health-monitor.sh + fi diff --git a/pkg/userdata/flatcar/testdata/v1.12.0-vsphere-overwrite-cloudconfig.yaml b/pkg/userdata/flatcar/testdata/v1.12.0-vsphere-overwrite-cloudconfig.yaml new file mode 100644 index 000000000..7b8c9f064 --- /dev/null +++ b/pkg/userdata/flatcar/testdata/v1.12.0-vsphere-overwrite-cloudconfig.yaml @@ -0,0 +1,375 @@ +passwd: + users: + - name: core + ssh_authorized_keys: + - ssh-rsa AAABBB + - ssh-rsa CCCDDD + +networkd: + units: + - name: static-nic.network + contents: | + [Match] + # Because of difficulty predicting specific NIC names on different cloud providers, + # we only support static addressing on VSphere. There should be a single NIC attached + # that we will match by name prefix 'en' which denotes ethernet devices. + Name=en* + + [Network] + DHCP=no + Address=192.168.81.4/24 + Gateway=192.168.81.1 + DNS=8.8.8.8 + + +systemd: + units: + - name: update-engine.service + mask: true + - name: locksmithd.service + mask: true + - name: docker.service + enabled: true + + - name: download-healthcheck-script.service + enabled: true + contents: | + [Unit] + Requires=network-online.target + After=network-online.target + [Service] + Type=oneshot + EnvironmentFile=-/etc/environment + ExecStart=/opt/bin/download.sh + [Install] + WantedBy=multi-user.target + + - name: docker-healthcheck.service + enabled: true + dropins: + - name: 40-docker.conf + contents: | + [Unit] + Requires=download-healthcheck-script.service + After=download-healthcheck-script.service + contents: | + [Unit] + Requires=docker.service + After=docker.service + + [Service] + ExecStart=/opt/bin/health-monitor.sh container-runtime + + [Install] + WantedBy=multi-user.target + + - name: kubelet-healthcheck.service + enabled: true + dropins: + - name: 40-docker.conf + contents: | + [Unit] + Requires=download-healthcheck-script.service + After=download-healthcheck-script.service + contents: | + [Unit] + Requires=kubelet.service + After=kubelet.service + + [Service] + ExecStart=/opt/bin/health-monitor.sh kubelet + + [Install] + WantedBy=multi-user.target + + + - name: kubelet.service + enabled: true + contents: | + [Unit] + Description=Kubernetes Kubelet + Requires=docker.service + After=docker.service + [Service] + TimeoutStartSec=5min + CPUAccounting=true + MemoryAccounting=true + EnvironmentFile=-/etc/environment + Environment=KUBELET_IMAGE=docker://k8s.gcr.io/hyperkube-amd64:v1.12.0 + Environment="RKT_RUN_ARGS=--uuid-file-save=/var/cache/kubelet-pod.uuid \ + --inherit-env \ + --insecure-options=image \ + --volume=resolv,kind=host,source=/etc/resolv.conf \ + --mount volume=resolv,target=/etc/resolv.conf \ + --volume cni-bin,kind=host,source=/opt/cni/bin \ + --mount volume=cni-bin,target=/opt/cni/bin \ + --volume cni-conf,kind=host,source=/etc/cni/net.d \ + --mount volume=cni-conf,target=/etc/cni/net.d \ + --volume etc-kubernetes,kind=host,source=/etc/kubernetes \ + --mount volume=etc-kubernetes,target=/etc/kubernetes \ + --volume var-log,kind=host,source=/var/log \ + --mount volume=var-log,target=/var/log \ + --volume var-lib-calico,kind=host,source=/var/lib/calico \ + --mount volume=var-lib-calico,target=/var/lib/calico" + ExecStartPre=/bin/mkdir -p /var/lib/calico + ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests + ExecStartPre=/bin/mkdir -p /etc/cni/net.d + ExecStartPre=/bin/mkdir -p /opt/cni/bin + ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/cache/kubelet-pod.uuid + ExecStartPre=-/bin/rm -rf /var/lib/rkt/cas/tmp/ + ExecStartPre=/bin/bash /opt/load-kernel-modules.sh + ExecStart=/usr/lib/flatcar/kubelet-wrapper \ + --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ + --kubeconfig=/var/lib/kubelet/kubeconfig \ + --config=/etc/kubernetes/kubelet.conf \ + --allow-privileged=true \ + --network-plugin=cni \ + --cni-conf-dir=/etc/cni/net.d \ + --cni-bin-dir=/opt/cni/bin \ + --cert-dir=/etc/kubernetes/pki \ + --cloud-provider=vsphere \ + --cloud-config=/etc/kubernetes/cloud-config \ + --hostname-override=node1 \ + --dynamic-config-dir /etc/kubernetes/dynamic-config-dir \ + --exit-on-lock-contention \ + --lock-file=/tmp/kubelet.lock \ + --kube-reserved=cpu=100m,memory=100Mi,ephemeral-storage=1Gi \ + --system-reserved=cpu=100m,memory=100Mi,ephemeral-storage=1Gi + ExecStop=-/usr/bin/rkt stop --uuid-file=/var/cache/kubelet-pod.uuid + Restart=always + RestartSec=10 + [Install] + WantedBy=multi-user.target + + - name: docker.service + enabled: true + dropins: + - name: 10-environment.conf + contents: | + [Service] + EnvironmentFile=-/etc/environment + +storage: + files: + + - path: "/etc/systemd/journald.conf.d/max_disk_use.conf" + filesystem: root + mode: 0644 + contents: + inline: | + [Journal] + SystemMaxUse=5G + + + - path: "/etc/kubernetes/kubelet.conf" + filesystem: root + mode: 0644 + contents: + inline: | + apiVersion: kubelet.config.k8s.io/v1beta1 + authentication: + anonymous: + enabled: false + webhook: + cacheTTL: 0s + enabled: true + x509: + clientCAFile: /etc/kubernetes/pki/ca.crt + authorization: + mode: Webhook + webhook: + cacheAuthorizedTTL: 0s + cacheUnauthorizedTTL: 0s + cgroupDriver: systemd + clusterDNS: + - 10.10.10.10 + clusterDomain: cluster.local + cpuManagerReconcilePeriod: 0s + evictionPressureTransitionPeriod: 0s + featureGates: + RotateKubeletServerCertificate: true + fileCheckFrequency: 0s + httpCheckFrequency: 0s + imageMinimumGCAge: 0s + kind: KubeletConfiguration + nodeStatusReportFrequency: 0s + nodeStatusUpdateFrequency: 0s + protectKernelDefaults: true + rotateCertificates: true + runtimeRequestTimeout: 0s + serverTLSBootstrap: true + staticPodPath: /etc/kubernetes/manifests + streamingConnectionIdleTimeout: 0s + syncFrequency: 0s + volumeStatsAggPeriod: 0s + + + - path: /opt/load-kernel-modules.sh + filesystem: root + mode: 0755 + contents: + inline: | + #!/usr/bin/env bash + set -euo pipefail + + modprobe ip_vs + modprobe ip_vs_rr + modprobe ip_vs_wrr + modprobe ip_vs_sh + + if modinfo nf_conntrack_ipv4 &> /dev/null; then + modprobe nf_conntrack_ipv4 + else + modprobe nf_conntrack + fi + + + - path: /etc/sysctl.d/k8s.conf + filesystem: root + mode: 0644 + contents: + inline: | + net.bridge.bridge-nf-call-ip6tables = 1 + net.bridge.bridge-nf-call-iptables = 1 + kernel.panic_on_oops = 1 + kernel.panic = 10 + net.ipv4.ip_forward = 1 + vm.overcommit_memory = 1 + fs.inotify.max_user_watches = 1048576 + + + - path: /proc/sys/kernel/panic_on_oops + filesystem: root + mode: 0644 + contents: + inline: | + 1 + + - path: /proc/sys/kernel/panic + filesystem: root + mode: 0644 + contents: + inline: | + 10 + + - path: /proc/sys/vm/overcommit_memory + filesystem: root + mode: 0644 + contents: + inline: | + 1 + + - path: /etc/kubernetes/bootstrap-kubelet.conf + filesystem: root + mode: 0400 + contents: + inline: | + apiVersion: v1 + clusters: + - cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t + server: https://server:443 + name: "" + contexts: [] + current-context: "" + kind: Config + preferences: {} + users: + - name: "" + user: + token: my-token + + + - path: /etc/kubernetes/cloud-config + filesystem: root + mode: 0400 + contents: + inline: | + my + custom + cloud-config + + - path: /etc/kubernetes/pki/ca.crt + filesystem: root + mode: 0644 + contents: + inline: | + -----BEGIN CERTIFICATE----- + MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV + BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG + A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 + DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 + NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG + cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv + c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B + AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS + R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT + ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk + JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 + mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW + caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G + A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt + hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB + MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES + MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv + bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h + U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao + eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 + UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD + 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n + sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF + kPe6XoSbiLm/kxk32T0= + -----END CERTIFICATE----- + + - path: /etc/hostname + filesystem: root + mode: 0600 + contents: + inline: 'node1' + + - path: /etc/ssh/sshd_config + filesystem: root + mode: 0600 + user: + id: 0 + group: + id: 0 + contents: + inline: | + # Use most defaults for sshd configuration. + Subsystem sftp internal-sftp + ClientAliveInterval 180 + UseDNS no + UsePAM yes + PrintLastLog no # handled by PAM + PrintMotd no # handled by PAM + PasswordAuthentication no + ChallengeResponseAuthentication no + + - path: /etc/docker/daemon.json + filesystem: root + mode: 0644 + contents: + inline: | + {"exec-opts":["native.cgroupdriver=systemd"],"storage-driver":"overlay2","log-driver":"json-file","log-opts":{"max-size":"100m"}} + + - path: /opt/bin/download.sh + filesystem: root + mode: 0755 + contents: + inline: | + #!/bin/bash + set -xeuo pipefail + mkdir -p /opt/bin/ + mkdir -p /var/lib/calico + mkdir -p /etc/kubernetes/manifests + mkdir -p /etc/cni/net.d + mkdir -p /opt/cni/bin + if [ ! -f /opt/cni/bin/loopback ]; then + curl -L https://github.com/containernetworking/plugins/releases/download/v0.8.2/cni-plugins-linux-amd64-v0.8.2.tgz | tar -xvzC /opt/cni/bin -f - + fi + + if [[ ! -x /opt/bin/health-monitor.sh ]]; then + curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/8b5b66e4910a6228dfaecccaa0a3b05ec4902f8e/pkg/userdata/scripts/health-monitor.sh + chmod +x /opt/bin/health-monitor.sh + fi diff --git a/pkg/userdata/flatcar/testdata/v1.12.0-vsphere-proxy.yaml b/pkg/userdata/flatcar/testdata/v1.12.0-vsphere-proxy.yaml new file mode 100644 index 000000000..b416c2ce3 --- /dev/null +++ b/pkg/userdata/flatcar/testdata/v1.12.0-vsphere-proxy.yaml @@ -0,0 +1,391 @@ +passwd: + users: + - name: core + ssh_authorized_keys: + - ssh-rsa AAABBB + - ssh-rsa CCCDDD + +networkd: + units: + - name: static-nic.network + contents: | + [Match] + # Because of difficulty predicting specific NIC names on different cloud providers, + # we only support static addressing on VSphere. There should be a single NIC attached + # that we will match by name prefix 'en' which denotes ethernet devices. + Name=en* + + [Network] + DHCP=no + Address=192.168.81.4/24 + Gateway=192.168.81.1 + DNS=8.8.8.8 + + +systemd: + units: + - name: update-engine.service + mask: true + - name: locksmithd.service + mask: true + - name: docker.service + enabled: true + - name: update-engine.service + dropins: + - name: 50-proxy.conf + contents: | + [Service] + Environment=ALL_PROXY=http://192.168.100.100:3128 + + - name: download-healthcheck-script.service + enabled: true + contents: | + [Unit] + Requires=network-online.target + After=network-online.target + [Service] + Type=oneshot + EnvironmentFile=-/etc/environment + ExecStart=/opt/bin/download.sh + [Install] + WantedBy=multi-user.target + + - name: docker-healthcheck.service + enabled: true + dropins: + - name: 40-docker.conf + contents: | + [Unit] + Requires=download-healthcheck-script.service + After=download-healthcheck-script.service + contents: | + [Unit] + Requires=docker.service + After=docker.service + + [Service] + ExecStart=/opt/bin/health-monitor.sh container-runtime + + [Install] + WantedBy=multi-user.target + + - name: kubelet-healthcheck.service + enabled: true + dropins: + - name: 40-docker.conf + contents: | + [Unit] + Requires=download-healthcheck-script.service + After=download-healthcheck-script.service + contents: | + [Unit] + Requires=kubelet.service + After=kubelet.service + + [Service] + ExecStart=/opt/bin/health-monitor.sh kubelet + + [Install] + WantedBy=multi-user.target + + + - name: kubelet.service + enabled: true + contents: | + [Unit] + Description=Kubernetes Kubelet + Requires=docker.service + After=docker.service + [Service] + TimeoutStartSec=5min + CPUAccounting=true + MemoryAccounting=true + EnvironmentFile=-/etc/environment + Environment=KUBELET_IMAGE=docker://192.168.100.100:5000/kubernetes/hyperkube:v1.12.0 + Environment="RKT_RUN_ARGS=--uuid-file-save=/var/cache/kubelet-pod.uuid \ + --inherit-env \ + --insecure-options=image,http \ + --volume=resolv,kind=host,source=/etc/resolv.conf \ + --mount volume=resolv,target=/etc/resolv.conf \ + --volume cni-bin,kind=host,source=/opt/cni/bin \ + --mount volume=cni-bin,target=/opt/cni/bin \ + --volume cni-conf,kind=host,source=/etc/cni/net.d \ + --mount volume=cni-conf,target=/etc/cni/net.d \ + --volume etc-kubernetes,kind=host,source=/etc/kubernetes \ + --mount volume=etc-kubernetes,target=/etc/kubernetes \ + --volume var-log,kind=host,source=/var/log \ + --mount volume=var-log,target=/var/log \ + --volume var-lib-calico,kind=host,source=/var/lib/calico \ + --mount volume=var-lib-calico,target=/var/lib/calico" + ExecStartPre=/bin/mkdir -p /var/lib/calico + ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests + ExecStartPre=/bin/mkdir -p /etc/cni/net.d + ExecStartPre=/bin/mkdir -p /opt/cni/bin + ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/cache/kubelet-pod.uuid + ExecStartPre=-/bin/rm -rf /var/lib/rkt/cas/tmp/ + ExecStartPre=/bin/bash /opt/load-kernel-modules.sh + ExecStart=/usr/lib/flatcar/kubelet-wrapper \ + --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ + --kubeconfig=/var/lib/kubelet/kubeconfig \ + --config=/etc/kubernetes/kubelet.conf \ + --allow-privileged=true \ + --network-plugin=cni \ + --cni-conf-dir=/etc/cni/net.d \ + --cni-bin-dir=/opt/cni/bin \ + --cert-dir=/etc/kubernetes/pki \ + --cloud-provider=vsphere \ + --cloud-config=/etc/kubernetes/cloud-config \ + --hostname-override=node1 \ + --dynamic-config-dir /etc/kubernetes/dynamic-config-dir \ + --exit-on-lock-contention \ + --lock-file=/tmp/kubelet.lock \ + --pod-infra-container-image=192.168.100.100:5000/kubernetes/pause:v3.1 \ + --kube-reserved=cpu=100m,memory=100Mi,ephemeral-storage=1Gi \ + --system-reserved=cpu=100m,memory=100Mi,ephemeral-storage=1Gi + ExecStop=-/usr/bin/rkt stop --uuid-file=/var/cache/kubelet-pod.uuid + Restart=always + RestartSec=10 + [Install] + WantedBy=multi-user.target + + - name: docker.service + enabled: true + dropins: + - name: 10-environment.conf + contents: | + [Service] + EnvironmentFile=-/etc/environment + +storage: + files: + - path: /etc/environment + filesystem: root + mode: 0644 + contents: + inline: | + HTTP_PROXY=http://192.168.100.100:3128 + http_proxy=http://192.168.100.100:3128 + HTTPS_PROXY=http://192.168.100.100:3128 + https_proxy=http://192.168.100.100:3128 + NO_PROXY=192.168.1.0 + no_proxy=192.168.1.0 + + - path: "/etc/systemd/journald.conf.d/max_disk_use.conf" + filesystem: root + mode: 0644 + contents: + inline: | + [Journal] + SystemMaxUse=5G + + + - path: "/etc/kubernetes/kubelet.conf" + filesystem: root + mode: 0644 + contents: + inline: | + apiVersion: kubelet.config.k8s.io/v1beta1 + authentication: + anonymous: + enabled: false + webhook: + cacheTTL: 0s + enabled: true + x509: + clientCAFile: /etc/kubernetes/pki/ca.crt + authorization: + mode: Webhook + webhook: + cacheAuthorizedTTL: 0s + cacheUnauthorizedTTL: 0s + cgroupDriver: systemd + clusterDNS: + - 10.10.10.10 + clusterDomain: cluster.local + cpuManagerReconcilePeriod: 0s + evictionPressureTransitionPeriod: 0s + featureGates: + RotateKubeletServerCertificate: true + fileCheckFrequency: 0s + httpCheckFrequency: 0s + imageMinimumGCAge: 0s + kind: KubeletConfiguration + nodeStatusReportFrequency: 0s + nodeStatusUpdateFrequency: 0s + protectKernelDefaults: true + rotateCertificates: true + runtimeRequestTimeout: 0s + serverTLSBootstrap: true + staticPodPath: /etc/kubernetes/manifests + streamingConnectionIdleTimeout: 0s + syncFrequency: 0s + volumeStatsAggPeriod: 0s + + + - path: /opt/load-kernel-modules.sh + filesystem: root + mode: 0755 + contents: + inline: | + #!/usr/bin/env bash + set -euo pipefail + + modprobe ip_vs + modprobe ip_vs_rr + modprobe ip_vs_wrr + modprobe ip_vs_sh + + if modinfo nf_conntrack_ipv4 &> /dev/null; then + modprobe nf_conntrack_ipv4 + else + modprobe nf_conntrack + fi + + + - path: /etc/sysctl.d/k8s.conf + filesystem: root + mode: 0644 + contents: + inline: | + net.bridge.bridge-nf-call-ip6tables = 1 + net.bridge.bridge-nf-call-iptables = 1 + kernel.panic_on_oops = 1 + kernel.panic = 10 + net.ipv4.ip_forward = 1 + vm.overcommit_memory = 1 + fs.inotify.max_user_watches = 1048576 + + + - path: /proc/sys/kernel/panic_on_oops + filesystem: root + mode: 0644 + contents: + inline: | + 1 + + - path: /proc/sys/kernel/panic + filesystem: root + mode: 0644 + contents: + inline: | + 10 + + - path: /proc/sys/vm/overcommit_memory + filesystem: root + mode: 0644 + contents: + inline: | + 1 + + - path: /etc/kubernetes/bootstrap-kubelet.conf + filesystem: root + mode: 0400 + contents: + inline: | + apiVersion: v1 + clusters: + - cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t + server: https://server:443 + name: "" + contexts: [] + current-context: "" + kind: Config + preferences: {} + users: + - name: "" + user: + token: my-token + + + - path: /etc/kubernetes/cloud-config + filesystem: root + mode: 0400 + contents: + inline: | + {vsphere-config:true} + + - path: /etc/kubernetes/pki/ca.crt + filesystem: root + mode: 0644 + contents: + inline: | + -----BEGIN CERTIFICATE----- + MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV + BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG + A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 + DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 + NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG + cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv + c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B + AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS + R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT + ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk + JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 + mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW + caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G + A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt + hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB + MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES + MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv + bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h + U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao + eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 + UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD + 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n + sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF + kPe6XoSbiLm/kxk32T0= + -----END CERTIFICATE----- + + - path: /etc/hostname + filesystem: root + mode: 0600 + contents: + inline: 'node1' + + - path: /etc/ssh/sshd_config + filesystem: root + mode: 0600 + user: + id: 0 + group: + id: 0 + contents: + inline: | + # Use most defaults for sshd configuration. + Subsystem sftp internal-sftp + ClientAliveInterval 180 + UseDNS no + UsePAM yes + PrintLastLog no # handled by PAM + PrintMotd no # handled by PAM + PasswordAuthentication no + ChallengeResponseAuthentication no + + - path: /etc/docker/daemon.json + filesystem: root + mode: 0644 + contents: + inline: | + {"exec-opts":["native.cgroupdriver=systemd"],"storage-driver":"overlay2","log-driver":"json-file","log-opts":{"max-size":"100m"},"insecure-registries":["192.168.100.100:5000","10.0.0.1:5000"]} + + - path: /opt/bin/download.sh + filesystem: root + mode: 0755 + contents: + inline: | + #!/bin/bash + set -xeuo pipefail + mkdir -p /opt/bin/ + mkdir -p /var/lib/calico + mkdir -p /etc/kubernetes/manifests + mkdir -p /etc/cni/net.d + mkdir -p /opt/cni/bin + if [ ! -f /opt/cni/bin/loopback ]; then + curl -L https://github.com/containernetworking/plugins/releases/download/v0.8.2/cni-plugins-linux-amd64-v0.8.2.tgz | tar -xvzC /opt/cni/bin -f - + fi + + if [[ ! -x /opt/bin/health-monitor.sh ]]; then + curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/8b5b66e4910a6228dfaecccaa0a3b05ec4902f8e/pkg/userdata/scripts/health-monitor.sh + chmod +x /opt/bin/health-monitor.sh + fi diff --git a/pkg/userdata/flatcar/testdata/v1.15.0-vsphere.yaml b/pkg/userdata/flatcar/testdata/v1.15.0-vsphere.yaml new file mode 100644 index 000000000..3f71ba2d8 --- /dev/null +++ b/pkg/userdata/flatcar/testdata/v1.15.0-vsphere.yaml @@ -0,0 +1,350 @@ +passwd: + +systemd: + units: + - name: update-engine.service + mask: true + - name: locksmithd.service + mask: true + - name: docker.service + enabled: true + + - name: download-healthcheck-script.service + enabled: true + contents: | + [Unit] + Requires=network-online.target + After=network-online.target + [Service] + Type=oneshot + EnvironmentFile=-/etc/environment + ExecStart=/opt/bin/download.sh + [Install] + WantedBy=multi-user.target + + - name: docker-healthcheck.service + enabled: true + dropins: + - name: 40-docker.conf + contents: | + [Unit] + Requires=download-healthcheck-script.service + After=download-healthcheck-script.service + contents: | + [Unit] + Requires=docker.service + After=docker.service + + [Service] + ExecStart=/opt/bin/health-monitor.sh container-runtime + + [Install] + WantedBy=multi-user.target + + - name: kubelet-healthcheck.service + enabled: true + dropins: + - name: 40-docker.conf + contents: | + [Unit] + Requires=download-healthcheck-script.service + After=download-healthcheck-script.service + contents: | + [Unit] + Requires=kubelet.service + After=kubelet.service + + [Service] + ExecStart=/opt/bin/health-monitor.sh kubelet + + [Install] + WantedBy=multi-user.target + + + - name: kubelet.service + enabled: true + contents: | + [Unit] + Description=Kubernetes Kubelet + Requires=docker.service + After=docker.service + [Service] + TimeoutStartSec=5min + CPUAccounting=true + MemoryAccounting=true + EnvironmentFile=-/etc/environment + Environment=KUBELET_IMAGE=docker://k8s.gcr.io/hyperkube-amd64:v1.15.0-beta.2 + Environment="RKT_RUN_ARGS=--uuid-file-save=/var/cache/kubelet-pod.uuid \ + --inherit-env \ + --insecure-options=image \ + --volume=resolv,kind=host,source=/etc/resolv.conf \ + --mount volume=resolv,target=/etc/resolv.conf \ + --volume cni-bin,kind=host,source=/opt/cni/bin \ + --mount volume=cni-bin,target=/opt/cni/bin \ + --volume cni-conf,kind=host,source=/etc/cni/net.d \ + --mount volume=cni-conf,target=/etc/cni/net.d \ + --volume etc-kubernetes,kind=host,source=/etc/kubernetes \ + --mount volume=etc-kubernetes,target=/etc/kubernetes \ + --volume var-log,kind=host,source=/var/log \ + --mount volume=var-log,target=/var/log \ + --volume var-lib-calico,kind=host,source=/var/lib/calico \ + --mount volume=var-lib-calico,target=/var/lib/calico" + ExecStartPre=/bin/mkdir -p /var/lib/calico + ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests + ExecStartPre=/bin/mkdir -p /etc/cni/net.d + ExecStartPre=/bin/mkdir -p /opt/cni/bin + ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/cache/kubelet-pod.uuid + ExecStartPre=-/bin/rm -rf /var/lib/rkt/cas/tmp/ + ExecStartPre=/bin/bash /opt/load-kernel-modules.sh + ExecStart=/usr/lib/flatcar/kubelet-wrapper \ + --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ + --kubeconfig=/var/lib/kubelet/kubeconfig \ + --config=/etc/kubernetes/kubelet.conf \ + --network-plugin=cni \ + --cni-conf-dir=/etc/cni/net.d \ + --cni-bin-dir=/opt/cni/bin \ + --cert-dir=/etc/kubernetes/pki \ + --cloud-provider=vsphere \ + --cloud-config=/etc/kubernetes/cloud-config \ + --hostname-override=node1 \ + --dynamic-config-dir /etc/kubernetes/dynamic-config-dir \ + --exit-on-lock-contention \ + --lock-file=/tmp/kubelet.lock \ + --kube-reserved=cpu=100m,memory=100Mi,ephemeral-storage=1Gi \ + --system-reserved=cpu=100m,memory=100Mi,ephemeral-storage=1Gi + ExecStop=-/usr/bin/rkt stop --uuid-file=/var/cache/kubelet-pod.uuid + Restart=always + RestartSec=10 + [Install] + WantedBy=multi-user.target + + - name: docker.service + enabled: true + dropins: + - name: 10-environment.conf + contents: | + [Service] + EnvironmentFile=-/etc/environment + +storage: + files: + + - path: "/etc/systemd/journald.conf.d/max_disk_use.conf" + filesystem: root + mode: 0644 + contents: + inline: | + [Journal] + SystemMaxUse=5G + + + - path: "/etc/kubernetes/kubelet.conf" + filesystem: root + mode: 0644 + contents: + inline: | + apiVersion: kubelet.config.k8s.io/v1beta1 + authentication: + anonymous: + enabled: false + webhook: + cacheTTL: 0s + enabled: true + x509: + clientCAFile: /etc/kubernetes/pki/ca.crt + authorization: + mode: Webhook + webhook: + cacheAuthorizedTTL: 0s + cacheUnauthorizedTTL: 0s + cgroupDriver: systemd + clusterDNS: + - 10.10.10.10 + clusterDomain: cluster.local + cpuManagerReconcilePeriod: 0s + evictionPressureTransitionPeriod: 0s + featureGates: + RotateKubeletServerCertificate: true + fileCheckFrequency: 0s + httpCheckFrequency: 0s + imageMinimumGCAge: 0s + kind: KubeletConfiguration + nodeStatusReportFrequency: 0s + nodeStatusUpdateFrequency: 0s + protectKernelDefaults: true + rotateCertificates: true + runtimeRequestTimeout: 0s + serverTLSBootstrap: true + staticPodPath: /etc/kubernetes/manifests + streamingConnectionIdleTimeout: 0s + syncFrequency: 0s + volumeStatsAggPeriod: 0s + + + - path: /opt/load-kernel-modules.sh + filesystem: root + mode: 0755 + contents: + inline: | + #!/usr/bin/env bash + set -euo pipefail + + modprobe ip_vs + modprobe ip_vs_rr + modprobe ip_vs_wrr + modprobe ip_vs_sh + + if modinfo nf_conntrack_ipv4 &> /dev/null; then + modprobe nf_conntrack_ipv4 + else + modprobe nf_conntrack + fi + + + - path: /etc/sysctl.d/k8s.conf + filesystem: root + mode: 0644 + contents: + inline: | + net.bridge.bridge-nf-call-ip6tables = 1 + net.bridge.bridge-nf-call-iptables = 1 + kernel.panic_on_oops = 1 + kernel.panic = 10 + net.ipv4.ip_forward = 1 + vm.overcommit_memory = 1 + fs.inotify.max_user_watches = 1048576 + + + - path: /proc/sys/kernel/panic_on_oops + filesystem: root + mode: 0644 + contents: + inline: | + 1 + + - path: /proc/sys/kernel/panic + filesystem: root + mode: 0644 + contents: + inline: | + 10 + + - path: /proc/sys/vm/overcommit_memory + filesystem: root + mode: 0644 + contents: + inline: | + 1 + + - path: /etc/kubernetes/bootstrap-kubelet.conf + filesystem: root + mode: 0400 + contents: + inline: | + apiVersion: v1 + clusters: + - cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t + server: https://server:443 + name: "" + contexts: [] + current-context: "" + kind: Config + preferences: {} + users: + - name: "" + user: + token: my-token + + + - path: /etc/kubernetes/cloud-config + filesystem: root + mode: 0400 + contents: + inline: | + {vsphere-config:true} + + - path: /etc/kubernetes/pki/ca.crt + filesystem: root + mode: 0644 + contents: + inline: | + -----BEGIN CERTIFICATE----- + MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV + BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG + A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 + DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 + NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG + cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv + c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B + AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS + R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT + ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk + JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 + mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW + caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G + A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt + hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB + MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES + MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv + bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h + U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao + eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 + UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD + 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n + sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF + kPe6XoSbiLm/kxk32T0= + -----END CERTIFICATE----- + + - path: /etc/hostname + filesystem: root + mode: 0600 + contents: + inline: 'node1' + + - path: /etc/ssh/sshd_config + filesystem: root + mode: 0600 + user: + id: 0 + group: + id: 0 + contents: + inline: | + # Use most defaults for sshd configuration. + Subsystem sftp internal-sftp + ClientAliveInterval 180 + UseDNS no + UsePAM yes + PrintLastLog no # handled by PAM + PrintMotd no # handled by PAM + PasswordAuthentication no + ChallengeResponseAuthentication no + + - path: /etc/docker/daemon.json + filesystem: root + mode: 0644 + contents: + inline: | + {"exec-opts":["native.cgroupdriver=systemd"],"storage-driver":"overlay2","log-driver":"json-file","log-opts":{"max-size":"100m"}} + + - path: /opt/bin/download.sh + filesystem: root + mode: 0755 + contents: + inline: | + #!/bin/bash + set -xeuo pipefail + mkdir -p /opt/bin/ + mkdir -p /var/lib/calico + mkdir -p /etc/kubernetes/manifests + mkdir -p /etc/cni/net.d + mkdir -p /opt/cni/bin + if [ ! -f /opt/cni/bin/loopback ]; then + curl -L https://github.com/containernetworking/plugins/releases/download/v0.8.2/cni-plugins-linux-amd64-v0.8.2.tgz | tar -xvzC /opt/cni/bin -f - + fi + + if [[ ! -x /opt/bin/health-monitor.sh ]]; then + curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/8b5b66e4910a6228dfaecccaa0a3b05ec4902f8e/pkg/userdata/scripts/health-monitor.sh + chmod +x /opt/bin/health-monitor.sh + fi diff --git a/pkg/userdata/flatcar/testdata/v1.17.0.yaml b/pkg/userdata/flatcar/testdata/v1.17.0.yaml new file mode 100644 index 000000000..5bca5e332 --- /dev/null +++ b/pkg/userdata/flatcar/testdata/v1.17.0.yaml @@ -0,0 +1,373 @@ +passwd: + users: + - name: core + ssh_authorized_keys: + - ssh-rsa AAABBB + - ssh-rsa CCCDDD + +networkd: + units: + - name: static-nic.network + contents: | + [Match] + # Because of difficulty predicting specific NIC names on different cloud providers, + # we only support static addressing on VSphere. There should be a single NIC attached + # that we will match by name prefix 'en' which denotes ethernet devices. + Name=en* + + [Network] + DHCP=no + Address=192.168.81.4/24 + Gateway=192.168.81.1 + DNS=8.8.8.8 + + +systemd: + units: + - name: update-engine.service + mask: true + - name: locksmithd.service + mask: true + - name: docker.service + enabled: true + + - name: download-healthcheck-script.service + enabled: true + contents: | + [Unit] + Requires=network-online.target + After=network-online.target + [Service] + Type=oneshot + EnvironmentFile=-/etc/environment + ExecStart=/opt/bin/download.sh + [Install] + WantedBy=multi-user.target + + - name: docker-healthcheck.service + enabled: true + dropins: + - name: 40-docker.conf + contents: | + [Unit] + Requires=download-healthcheck-script.service + After=download-healthcheck-script.service + contents: | + [Unit] + Requires=docker.service + After=docker.service + + [Service] + ExecStart=/opt/bin/health-monitor.sh container-runtime + + [Install] + WantedBy=multi-user.target + + - name: kubelet-healthcheck.service + enabled: true + dropins: + - name: 40-docker.conf + contents: | + [Unit] + Requires=download-healthcheck-script.service + After=download-healthcheck-script.service + contents: | + [Unit] + Requires=kubelet.service + After=kubelet.service + + [Service] + ExecStart=/opt/bin/health-monitor.sh kubelet + + [Install] + WantedBy=multi-user.target + + + - name: kubelet.service + enabled: true + contents: | + [Unit] + Description=Kubernetes Kubelet + Requires=docker.service + After=docker.service + [Service] + TimeoutStartSec=5min + CPUAccounting=true + MemoryAccounting=true + EnvironmentFile=-/etc/environment + Environment=KUBELET_IMAGE=docker://k8s.gcr.io/hyperkube-amd64:v1.17.0 + Environment="RKT_RUN_ARGS=--uuid-file-save=/var/cache/kubelet-pod.uuid \ + --inherit-env \ + --insecure-options=image \ + --volume=resolv,kind=host,source=/etc/resolv.conf \ + --mount volume=resolv,target=/etc/resolv.conf \ + --volume cni-bin,kind=host,source=/opt/cni/bin \ + --mount volume=cni-bin,target=/opt/cni/bin \ + --volume cni-conf,kind=host,source=/etc/cni/net.d \ + --mount volume=cni-conf,target=/etc/cni/net.d \ + --volume etc-kubernetes,kind=host,source=/etc/kubernetes \ + --mount volume=etc-kubernetes,target=/etc/kubernetes \ + --volume var-log,kind=host,source=/var/log \ + --mount volume=var-log,target=/var/log \ + --volume var-lib-calico,kind=host,source=/var/lib/calico \ + --mount volume=var-lib-calico,target=/var/lib/calico" + ExecStartPre=/bin/mkdir -p /var/lib/calico + ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests + ExecStartPre=/bin/mkdir -p /etc/cni/net.d + ExecStartPre=/bin/mkdir -p /opt/cni/bin + ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/cache/kubelet-pod.uuid + ExecStartPre=-/bin/rm -rf /var/lib/rkt/cas/tmp/ + ExecStartPre=/bin/bash /opt/load-kernel-modules.sh + ExecStart=/usr/lib/flatcar/kubelet-wrapper \ + kubelet \ + --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ + --kubeconfig=/var/lib/kubelet/kubeconfig \ + --config=/etc/kubernetes/kubelet.conf \ + --network-plugin=cni \ + --cni-conf-dir=/etc/cni/net.d \ + --cni-bin-dir=/opt/cni/bin \ + --cert-dir=/etc/kubernetes/pki \ + --cloud-provider=vsphere \ + --cloud-config=/etc/kubernetes/cloud-config \ + --hostname-override=node1 \ + --dynamic-config-dir /etc/kubernetes/dynamic-config-dir \ + --exit-on-lock-contention \ + --lock-file=/tmp/kubelet.lock \ + --kube-reserved=cpu=100m,memory=100Mi,ephemeral-storage=1Gi \ + --system-reserved=cpu=100m,memory=100Mi,ephemeral-storage=1Gi + ExecStop=-/usr/bin/rkt stop --uuid-file=/var/cache/kubelet-pod.uuid + Restart=always + RestartSec=10 + [Install] + WantedBy=multi-user.target + + - name: docker.service + enabled: true + dropins: + - name: 10-environment.conf + contents: | + [Service] + EnvironmentFile=-/etc/environment + +storage: + files: + + - path: "/etc/systemd/journald.conf.d/max_disk_use.conf" + filesystem: root + mode: 0644 + contents: + inline: | + [Journal] + SystemMaxUse=5G + + + - path: "/etc/kubernetes/kubelet.conf" + filesystem: root + mode: 0644 + contents: + inline: | + apiVersion: kubelet.config.k8s.io/v1beta1 + authentication: + anonymous: + enabled: false + webhook: + cacheTTL: 0s + enabled: true + x509: + clientCAFile: /etc/kubernetes/pki/ca.crt + authorization: + mode: Webhook + webhook: + cacheAuthorizedTTL: 0s + cacheUnauthorizedTTL: 0s + cgroupDriver: systemd + clusterDNS: + - 10.10.10.10 + clusterDomain: cluster.local + cpuManagerReconcilePeriod: 0s + evictionPressureTransitionPeriod: 0s + featureGates: + RotateKubeletServerCertificate: true + fileCheckFrequency: 0s + httpCheckFrequency: 0s + imageMinimumGCAge: 0s + kind: KubeletConfiguration + nodeStatusReportFrequency: 0s + nodeStatusUpdateFrequency: 0s + protectKernelDefaults: true + rotateCertificates: true + runtimeRequestTimeout: 0s + serverTLSBootstrap: true + staticPodPath: /etc/kubernetes/manifests + streamingConnectionIdleTimeout: 0s + syncFrequency: 0s + volumeStatsAggPeriod: 0s + + + - path: /opt/load-kernel-modules.sh + filesystem: root + mode: 0755 + contents: + inline: | + #!/usr/bin/env bash + set -euo pipefail + + modprobe ip_vs + modprobe ip_vs_rr + modprobe ip_vs_wrr + modprobe ip_vs_sh + + if modinfo nf_conntrack_ipv4 &> /dev/null; then + modprobe nf_conntrack_ipv4 + else + modprobe nf_conntrack + fi + + + - path: /etc/sysctl.d/k8s.conf + filesystem: root + mode: 0644 + contents: + inline: | + net.bridge.bridge-nf-call-ip6tables = 1 + net.bridge.bridge-nf-call-iptables = 1 + kernel.panic_on_oops = 1 + kernel.panic = 10 + net.ipv4.ip_forward = 1 + vm.overcommit_memory = 1 + fs.inotify.max_user_watches = 1048576 + + + - path: /proc/sys/kernel/panic_on_oops + filesystem: root + mode: 0644 + contents: + inline: | + 1 + + - path: /proc/sys/kernel/panic + filesystem: root + mode: 0644 + contents: + inline: | + 10 + + - path: /proc/sys/vm/overcommit_memory + filesystem: root + mode: 0644 + contents: + inline: | + 1 + + - path: /etc/kubernetes/bootstrap-kubelet.conf + filesystem: root + mode: 0400 + contents: + inline: | + apiVersion: v1 + clusters: + - cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t + server: https://server:443 + name: "" + contexts: [] + current-context: "" + kind: Config + preferences: {} + users: + - name: "" + user: + token: my-token + + + - path: /etc/kubernetes/cloud-config + filesystem: root + mode: 0400 + contents: + inline: | + {vsphere-config:true} + + - path: /etc/kubernetes/pki/ca.crt + filesystem: root + mode: 0644 + contents: + inline: | + -----BEGIN CERTIFICATE----- + MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV + BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG + A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 + DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 + NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG + cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv + c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B + AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS + R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT + ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk + JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 + mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW + caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G + A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt + hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB + MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES + MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv + bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h + U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao + eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 + UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD + 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n + sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF + kPe6XoSbiLm/kxk32T0= + -----END CERTIFICATE----- + + - path: /etc/hostname + filesystem: root + mode: 0600 + contents: + inline: 'node1' + + - path: /etc/ssh/sshd_config + filesystem: root + mode: 0600 + user: + id: 0 + group: + id: 0 + contents: + inline: | + # Use most defaults for sshd configuration. + Subsystem sftp internal-sftp + ClientAliveInterval 180 + UseDNS no + UsePAM yes + PrintLastLog no # handled by PAM + PrintMotd no # handled by PAM + PasswordAuthentication no + ChallengeResponseAuthentication no + + - path: /etc/docker/daemon.json + filesystem: root + mode: 0644 + contents: + inline: | + {"exec-opts":["native.cgroupdriver=systemd"],"storage-driver":"overlay2","log-driver":"json-file","log-opts":{"max-size":"100m"}} + + - path: /opt/bin/download.sh + filesystem: root + mode: 0755 + contents: + inline: | + #!/bin/bash + set -xeuo pipefail + mkdir -p /opt/bin/ + mkdir -p /var/lib/calico + mkdir -p /etc/kubernetes/manifests + mkdir -p /etc/cni/net.d + mkdir -p /opt/cni/bin + if [ ! -f /opt/cni/bin/loopback ]; then + curl -L https://github.com/containernetworking/plugins/releases/download/v0.8.2/cni-plugins-linux-amd64-v0.8.2.tgz | tar -xvzC /opt/cni/bin -f - + fi + + if [[ ! -x /opt/bin/health-monitor.sh ]]; then + curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/8b5b66e4910a6228dfaecccaa0a3b05ec4902f8e/pkg/userdata/scripts/health-monitor.sh + chmod +x /opt/bin/health-monitor.sh + fi diff --git a/pkg/userdata/flatcar/testdata/v1.9.2-disable-auto-update-aws-external.yaml b/pkg/userdata/flatcar/testdata/v1.9.2-disable-auto-update-aws-external.yaml new file mode 100644 index 000000000..aea244473 --- /dev/null +++ b/pkg/userdata/flatcar/testdata/v1.9.2-disable-auto-update-aws-external.yaml @@ -0,0 +1,351 @@ +passwd: + users: + - name: core + ssh_authorized_keys: + - ssh-rsa AAABBB + - ssh-rsa CCCDDD + + +systemd: + units: + - name: update-engine.service + mask: true + - name: locksmithd.service + mask: true + - name: docker.service + enabled: true + + - name: download-healthcheck-script.service + enabled: true + contents: | + [Unit] + Requires=network-online.target + After=network-online.target + [Service] + Type=oneshot + EnvironmentFile=-/etc/environment + ExecStart=/opt/bin/download.sh + [Install] + WantedBy=multi-user.target + + - name: docker-healthcheck.service + enabled: true + dropins: + - name: 40-docker.conf + contents: | + [Unit] + Requires=download-healthcheck-script.service + After=download-healthcheck-script.service + contents: | + [Unit] + Requires=docker.service + After=docker.service + + [Service] + ExecStart=/opt/bin/health-monitor.sh container-runtime + + [Install] + WantedBy=multi-user.target + + - name: kubelet-healthcheck.service + enabled: true + dropins: + - name: 40-docker.conf + contents: | + [Unit] + Requires=download-healthcheck-script.service + After=download-healthcheck-script.service + contents: | + [Unit] + Requires=kubelet.service + After=kubelet.service + + [Service] + ExecStart=/opt/bin/health-monitor.sh kubelet + + [Install] + WantedBy=multi-user.target + + + - name: kubelet.service + enabled: true + contents: | + [Unit] + Description=Kubernetes Kubelet + Requires=docker.service + After=docker.service + [Service] + TimeoutStartSec=5min + CPUAccounting=true + MemoryAccounting=true + EnvironmentFile=-/etc/environment + Environment=KUBELET_IMAGE=docker://k8s.gcr.io/hyperkube-amd64:v1.9.2 + Environment="RKT_RUN_ARGS=--uuid-file-save=/var/cache/kubelet-pod.uuid \ + --inherit-env \ + --insecure-options=image \ + --volume=resolv,kind=host,source=/etc/resolv.conf \ + --mount volume=resolv,target=/etc/resolv.conf \ + --volume cni-bin,kind=host,source=/opt/cni/bin \ + --mount volume=cni-bin,target=/opt/cni/bin \ + --volume cni-conf,kind=host,source=/etc/cni/net.d \ + --mount volume=cni-conf,target=/etc/cni/net.d \ + --volume etc-kubernetes,kind=host,source=/etc/kubernetes \ + --mount volume=etc-kubernetes,target=/etc/kubernetes \ + --volume var-log,kind=host,source=/var/log \ + --mount volume=var-log,target=/var/log \ + --volume var-lib-calico,kind=host,source=/var/lib/calico \ + --mount volume=var-lib-calico,target=/var/lib/calico" + ExecStartPre=/bin/mkdir -p /var/lib/calico + ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests + ExecStartPre=/bin/mkdir -p /etc/cni/net.d + ExecStartPre=/bin/mkdir -p /opt/cni/bin + ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/cache/kubelet-pod.uuid + ExecStartPre=-/bin/rm -rf /var/lib/rkt/cas/tmp/ + ExecStartPre=/bin/bash /opt/load-kernel-modules.sh + ExecStart=/usr/lib/flatcar/kubelet-wrapper \ + --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ + --kubeconfig=/var/lib/kubelet/kubeconfig \ + --config=/etc/kubernetes/kubelet.conf \ + --allow-privileged=true \ + --network-plugin=cni \ + --cni-conf-dir=/etc/cni/net.d \ + --cni-bin-dir=/opt/cni/bin \ + --cadvisor-port=0 \ + --cert-dir=/etc/kubernetes/pki \ + --cloud-provider=external \ + --dynamic-config-dir /etc/kubernetes/dynamic-config-dir \ + --exit-on-lock-contention \ + --lock-file=/tmp/kubelet.lock \ + --kube-reserved=cpu=100m,memory=100Mi,ephemeral-storage=1Gi \ + --system-reserved=cpu=100m,memory=100Mi,ephemeral-storage=1Gi + ExecStop=-/usr/bin/rkt stop --uuid-file=/var/cache/kubelet-pod.uuid + Restart=always + RestartSec=10 + [Install] + WantedBy=multi-user.target + + - name: docker.service + enabled: true + dropins: + - name: 10-environment.conf + contents: | + [Service] + EnvironmentFile=-/etc/environment + +storage: + files: + + - path: "/etc/systemd/journald.conf.d/max_disk_use.conf" + filesystem: root + mode: 0644 + contents: + inline: | + [Journal] + SystemMaxUse=5G + + + - path: "/etc/kubernetes/kubelet.conf" + filesystem: root + mode: 0644 + contents: + inline: | + apiVersion: kubelet.config.k8s.io/v1beta1 + authentication: + anonymous: + enabled: false + webhook: + cacheTTL: 0s + enabled: true + x509: + clientCAFile: /etc/kubernetes/pki/ca.crt + authorization: + mode: Webhook + webhook: + cacheAuthorizedTTL: 0s + cacheUnauthorizedTTL: 0s + cgroupDriver: systemd + clusterDNS: + - 10.10.10.10 + clusterDomain: cluster.local + cpuManagerReconcilePeriod: 0s + evictionPressureTransitionPeriod: 0s + featureGates: + RotateKubeletServerCertificate: true + fileCheckFrequency: 0s + httpCheckFrequency: 0s + imageMinimumGCAge: 0s + kind: KubeletConfiguration + nodeStatusReportFrequency: 0s + nodeStatusUpdateFrequency: 0s + protectKernelDefaults: true + rotateCertificates: true + runtimeRequestTimeout: 0s + serverTLSBootstrap: true + staticPodPath: /etc/kubernetes/manifests + streamingConnectionIdleTimeout: 0s + syncFrequency: 0s + volumeStatsAggPeriod: 0s + + + - path: /opt/load-kernel-modules.sh + filesystem: root + mode: 0755 + contents: + inline: | + #!/usr/bin/env bash + set -euo pipefail + + modprobe ip_vs + modprobe ip_vs_rr + modprobe ip_vs_wrr + modprobe ip_vs_sh + + if modinfo nf_conntrack_ipv4 &> /dev/null; then + modprobe nf_conntrack_ipv4 + else + modprobe nf_conntrack + fi + + + - path: /etc/sysctl.d/k8s.conf + filesystem: root + mode: 0644 + contents: + inline: | + net.bridge.bridge-nf-call-ip6tables = 1 + net.bridge.bridge-nf-call-iptables = 1 + kernel.panic_on_oops = 1 + kernel.panic = 10 + net.ipv4.ip_forward = 1 + vm.overcommit_memory = 1 + fs.inotify.max_user_watches = 1048576 + + + - path: /proc/sys/kernel/panic_on_oops + filesystem: root + mode: 0644 + contents: + inline: | + 1 + + - path: /proc/sys/kernel/panic + filesystem: root + mode: 0644 + contents: + inline: | + 10 + + - path: /proc/sys/vm/overcommit_memory + filesystem: root + mode: 0644 + contents: + inline: | + 1 + + - path: /etc/kubernetes/bootstrap-kubelet.conf + filesystem: root + mode: 0400 + contents: + inline: | + apiVersion: v1 + clusters: + - cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t + server: https://server:443 + name: "" + contexts: [] + current-context: "" + kind: Config + preferences: {} + users: + - name: "" + user: + token: my-token + + + - path: /etc/kubernetes/cloud-config + filesystem: root + mode: 0400 + contents: + inline: | + {aws-config:true} + + - path: /etc/kubernetes/pki/ca.crt + filesystem: root + mode: 0644 + contents: + inline: | + -----BEGIN CERTIFICATE----- + MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV + BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG + A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 + DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 + NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG + cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv + c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B + AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS + R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT + ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk + JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 + mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW + caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G + A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt + hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB + MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES + MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv + bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h + U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao + eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 + UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD + 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n + sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF + kPe6XoSbiLm/kxk32T0= + -----END CERTIFICATE----- + + + - path: /etc/ssh/sshd_config + filesystem: root + mode: 0600 + user: + id: 0 + group: + id: 0 + contents: + inline: | + # Use most defaults for sshd configuration. + Subsystem sftp internal-sftp + ClientAliveInterval 180 + UseDNS no + UsePAM yes + PrintLastLog no # handled by PAM + PrintMotd no # handled by PAM + PasswordAuthentication no + ChallengeResponseAuthentication no + + - path: /etc/docker/daemon.json + filesystem: root + mode: 0644 + contents: + inline: | + {"exec-opts":["native.cgroupdriver=systemd"],"storage-driver":"overlay2","log-driver":"json-file","log-opts":{"max-size":"100m"}} + + - path: /opt/bin/download.sh + filesystem: root + mode: 0755 + contents: + inline: | + #!/bin/bash + set -xeuo pipefail + mkdir -p /opt/bin/ + mkdir -p /var/lib/calico + mkdir -p /etc/kubernetes/manifests + mkdir -p /etc/cni/net.d + mkdir -p /opt/cni/bin + if [ ! -f /opt/cni/bin/loopback ]; then + curl -L https://github.com/containernetworking/plugins/releases/download/v0.8.2/cni-plugins-linux-amd64-v0.8.2.tgz | tar -xvzC /opt/cni/bin -f - + fi + + if [[ ! -x /opt/bin/health-monitor.sh ]]; then + curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/8b5b66e4910a6228dfaecccaa0a3b05ec4902f8e/pkg/userdata/scripts/health-monitor.sh + chmod +x /opt/bin/health-monitor.sh + fi diff --git a/pkg/userdata/flatcar/testdata/v1.9.2-disable-auto-update-aws.yaml b/pkg/userdata/flatcar/testdata/v1.9.2-disable-auto-update-aws.yaml new file mode 100644 index 000000000..9528f047a --- /dev/null +++ b/pkg/userdata/flatcar/testdata/v1.9.2-disable-auto-update-aws.yaml @@ -0,0 +1,352 @@ +passwd: + users: + - name: core + ssh_authorized_keys: + - ssh-rsa AAABBB + - ssh-rsa CCCDDD + + +systemd: + units: + - name: update-engine.service + mask: true + - name: locksmithd.service + mask: true + - name: docker.service + enabled: true + + - name: download-healthcheck-script.service + enabled: true + contents: | + [Unit] + Requires=network-online.target + After=network-online.target + [Service] + Type=oneshot + EnvironmentFile=-/etc/environment + ExecStart=/opt/bin/download.sh + [Install] + WantedBy=multi-user.target + + - name: docker-healthcheck.service + enabled: true + dropins: + - name: 40-docker.conf + contents: | + [Unit] + Requires=download-healthcheck-script.service + After=download-healthcheck-script.service + contents: | + [Unit] + Requires=docker.service + After=docker.service + + [Service] + ExecStart=/opt/bin/health-monitor.sh container-runtime + + [Install] + WantedBy=multi-user.target + + - name: kubelet-healthcheck.service + enabled: true + dropins: + - name: 40-docker.conf + contents: | + [Unit] + Requires=download-healthcheck-script.service + After=download-healthcheck-script.service + contents: | + [Unit] + Requires=kubelet.service + After=kubelet.service + + [Service] + ExecStart=/opt/bin/health-monitor.sh kubelet + + [Install] + WantedBy=multi-user.target + + + - name: kubelet.service + enabled: true + contents: | + [Unit] + Description=Kubernetes Kubelet + Requires=docker.service + After=docker.service + [Service] + TimeoutStartSec=5min + CPUAccounting=true + MemoryAccounting=true + EnvironmentFile=-/etc/environment + Environment=KUBELET_IMAGE=docker://k8s.gcr.io/hyperkube-amd64:v1.9.2 + Environment="RKT_RUN_ARGS=--uuid-file-save=/var/cache/kubelet-pod.uuid \ + --inherit-env \ + --insecure-options=image \ + --volume=resolv,kind=host,source=/etc/resolv.conf \ + --mount volume=resolv,target=/etc/resolv.conf \ + --volume cni-bin,kind=host,source=/opt/cni/bin \ + --mount volume=cni-bin,target=/opt/cni/bin \ + --volume cni-conf,kind=host,source=/etc/cni/net.d \ + --mount volume=cni-conf,target=/etc/cni/net.d \ + --volume etc-kubernetes,kind=host,source=/etc/kubernetes \ + --mount volume=etc-kubernetes,target=/etc/kubernetes \ + --volume var-log,kind=host,source=/var/log \ + --mount volume=var-log,target=/var/log \ + --volume var-lib-calico,kind=host,source=/var/lib/calico \ + --mount volume=var-lib-calico,target=/var/lib/calico" + ExecStartPre=/bin/mkdir -p /var/lib/calico + ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests + ExecStartPre=/bin/mkdir -p /etc/cni/net.d + ExecStartPre=/bin/mkdir -p /opt/cni/bin + ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/cache/kubelet-pod.uuid + ExecStartPre=-/bin/rm -rf /var/lib/rkt/cas/tmp/ + ExecStartPre=/bin/bash /opt/load-kernel-modules.sh + ExecStart=/usr/lib/flatcar/kubelet-wrapper \ + --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ + --kubeconfig=/var/lib/kubelet/kubeconfig \ + --config=/etc/kubernetes/kubelet.conf \ + --allow-privileged=true \ + --network-plugin=cni \ + --cni-conf-dir=/etc/cni/net.d \ + --cni-bin-dir=/opt/cni/bin \ + --cadvisor-port=0 \ + --cert-dir=/etc/kubernetes/pki \ + --cloud-provider=aws \ + --cloud-config=/etc/kubernetes/cloud-config \ + --dynamic-config-dir /etc/kubernetes/dynamic-config-dir \ + --exit-on-lock-contention \ + --lock-file=/tmp/kubelet.lock \ + --kube-reserved=cpu=100m,memory=100Mi,ephemeral-storage=1Gi \ + --system-reserved=cpu=100m,memory=100Mi,ephemeral-storage=1Gi + ExecStop=-/usr/bin/rkt stop --uuid-file=/var/cache/kubelet-pod.uuid + Restart=always + RestartSec=10 + [Install] + WantedBy=multi-user.target + + - name: docker.service + enabled: true + dropins: + - name: 10-environment.conf + contents: | + [Service] + EnvironmentFile=-/etc/environment + +storage: + files: + + - path: "/etc/systemd/journald.conf.d/max_disk_use.conf" + filesystem: root + mode: 0644 + contents: + inline: | + [Journal] + SystemMaxUse=5G + + + - path: "/etc/kubernetes/kubelet.conf" + filesystem: root + mode: 0644 + contents: + inline: | + apiVersion: kubelet.config.k8s.io/v1beta1 + authentication: + anonymous: + enabled: false + webhook: + cacheTTL: 0s + enabled: true + x509: + clientCAFile: /etc/kubernetes/pki/ca.crt + authorization: + mode: Webhook + webhook: + cacheAuthorizedTTL: 0s + cacheUnauthorizedTTL: 0s + cgroupDriver: systemd + clusterDNS: + - 10.10.10.10 + clusterDomain: cluster.local + cpuManagerReconcilePeriod: 0s + evictionPressureTransitionPeriod: 0s + featureGates: + RotateKubeletServerCertificate: true + fileCheckFrequency: 0s + httpCheckFrequency: 0s + imageMinimumGCAge: 0s + kind: KubeletConfiguration + nodeStatusReportFrequency: 0s + nodeStatusUpdateFrequency: 0s + protectKernelDefaults: true + rotateCertificates: true + runtimeRequestTimeout: 0s + serverTLSBootstrap: true + staticPodPath: /etc/kubernetes/manifests + streamingConnectionIdleTimeout: 0s + syncFrequency: 0s + volumeStatsAggPeriod: 0s + + + - path: /opt/load-kernel-modules.sh + filesystem: root + mode: 0755 + contents: + inline: | + #!/usr/bin/env bash + set -euo pipefail + + modprobe ip_vs + modprobe ip_vs_rr + modprobe ip_vs_wrr + modprobe ip_vs_sh + + if modinfo nf_conntrack_ipv4 &> /dev/null; then + modprobe nf_conntrack_ipv4 + else + modprobe nf_conntrack + fi + + + - path: /etc/sysctl.d/k8s.conf + filesystem: root + mode: 0644 + contents: + inline: | + net.bridge.bridge-nf-call-ip6tables = 1 + net.bridge.bridge-nf-call-iptables = 1 + kernel.panic_on_oops = 1 + kernel.panic = 10 + net.ipv4.ip_forward = 1 + vm.overcommit_memory = 1 + fs.inotify.max_user_watches = 1048576 + + + - path: /proc/sys/kernel/panic_on_oops + filesystem: root + mode: 0644 + contents: + inline: | + 1 + + - path: /proc/sys/kernel/panic + filesystem: root + mode: 0644 + contents: + inline: | + 10 + + - path: /proc/sys/vm/overcommit_memory + filesystem: root + mode: 0644 + contents: + inline: | + 1 + + - path: /etc/kubernetes/bootstrap-kubelet.conf + filesystem: root + mode: 0400 + contents: + inline: | + apiVersion: v1 + clusters: + - cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t + server: https://server:443 + name: "" + contexts: [] + current-context: "" + kind: Config + preferences: {} + users: + - name: "" + user: + token: my-token + + + - path: /etc/kubernetes/cloud-config + filesystem: root + mode: 0400 + contents: + inline: | + {aws-config:true} + + - path: /etc/kubernetes/pki/ca.crt + filesystem: root + mode: 0644 + contents: + inline: | + -----BEGIN CERTIFICATE----- + MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV + BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG + A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 + DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 + NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG + cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv + c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B + AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS + R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT + ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk + JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 + mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW + caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G + A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt + hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB + MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES + MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv + bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h + U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao + eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 + UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD + 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n + sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF + kPe6XoSbiLm/kxk32T0= + -----END CERTIFICATE----- + + + - path: /etc/ssh/sshd_config + filesystem: root + mode: 0600 + user: + id: 0 + group: + id: 0 + contents: + inline: | + # Use most defaults for sshd configuration. + Subsystem sftp internal-sftp + ClientAliveInterval 180 + UseDNS no + UsePAM yes + PrintLastLog no # handled by PAM + PrintMotd no # handled by PAM + PasswordAuthentication no + ChallengeResponseAuthentication no + + - path: /etc/docker/daemon.json + filesystem: root + mode: 0644 + contents: + inline: | + {"exec-opts":["native.cgroupdriver=systemd"],"storage-driver":"overlay2","log-driver":"json-file","log-opts":{"max-size":"100m"}} + + - path: /opt/bin/download.sh + filesystem: root + mode: 0755 + contents: + inline: | + #!/bin/bash + set -xeuo pipefail + mkdir -p /opt/bin/ + mkdir -p /var/lib/calico + mkdir -p /etc/kubernetes/manifests + mkdir -p /etc/cni/net.d + mkdir -p /opt/cni/bin + if [ ! -f /opt/cni/bin/loopback ]; then + curl -L https://github.com/containernetworking/plugins/releases/download/v0.8.2/cni-plugins-linux-amd64-v0.8.2.tgz | tar -xvzC /opt/cni/bin -f - + fi + + if [[ ! -x /opt/bin/health-monitor.sh ]]; then + curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/8b5b66e4910a6228dfaecccaa0a3b05ec4902f8e/pkg/userdata/scripts/health-monitor.sh + chmod +x /opt/bin/health-monitor.sh + fi diff --git a/pkg/userdata/flatcar/testdata/v1.9.2-disable-locksmith-aws.yaml b/pkg/userdata/flatcar/testdata/v1.9.2-disable-locksmith-aws.yaml new file mode 100644 index 000000000..f9002d4d0 --- /dev/null +++ b/pkg/userdata/flatcar/testdata/v1.9.2-disable-locksmith-aws.yaml @@ -0,0 +1,350 @@ +passwd: + users: + - name: core + ssh_authorized_keys: + - ssh-rsa AAABBB + - ssh-rsa CCCDDD + + +systemd: + units: + - name: locksmithd.service + mask: true + - name: docker.service + enabled: true + + - name: download-healthcheck-script.service + enabled: true + contents: | + [Unit] + Requires=network-online.target + After=network-online.target + [Service] + Type=oneshot + EnvironmentFile=-/etc/environment + ExecStart=/opt/bin/download.sh + [Install] + WantedBy=multi-user.target + + - name: docker-healthcheck.service + enabled: true + dropins: + - name: 40-docker.conf + contents: | + [Unit] + Requires=download-healthcheck-script.service + After=download-healthcheck-script.service + contents: | + [Unit] + Requires=docker.service + After=docker.service + + [Service] + ExecStart=/opt/bin/health-monitor.sh container-runtime + + [Install] + WantedBy=multi-user.target + + - name: kubelet-healthcheck.service + enabled: true + dropins: + - name: 40-docker.conf + contents: | + [Unit] + Requires=download-healthcheck-script.service + After=download-healthcheck-script.service + contents: | + [Unit] + Requires=kubelet.service + After=kubelet.service + + [Service] + ExecStart=/opt/bin/health-monitor.sh kubelet + + [Install] + WantedBy=multi-user.target + + + - name: kubelet.service + enabled: true + contents: | + [Unit] + Description=Kubernetes Kubelet + Requires=docker.service + After=docker.service + [Service] + TimeoutStartSec=5min + CPUAccounting=true + MemoryAccounting=true + EnvironmentFile=-/etc/environment + Environment=KUBELET_IMAGE=docker://k8s.gcr.io/hyperkube-amd64:v1.9.2 + Environment="RKT_RUN_ARGS=--uuid-file-save=/var/cache/kubelet-pod.uuid \ + --inherit-env \ + --insecure-options=image \ + --volume=resolv,kind=host,source=/etc/resolv.conf \ + --mount volume=resolv,target=/etc/resolv.conf \ + --volume cni-bin,kind=host,source=/opt/cni/bin \ + --mount volume=cni-bin,target=/opt/cni/bin \ + --volume cni-conf,kind=host,source=/etc/cni/net.d \ + --mount volume=cni-conf,target=/etc/cni/net.d \ + --volume etc-kubernetes,kind=host,source=/etc/kubernetes \ + --mount volume=etc-kubernetes,target=/etc/kubernetes \ + --volume var-log,kind=host,source=/var/log \ + --mount volume=var-log,target=/var/log \ + --volume var-lib-calico,kind=host,source=/var/lib/calico \ + --mount volume=var-lib-calico,target=/var/lib/calico" + ExecStartPre=/bin/mkdir -p /var/lib/calico + ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests + ExecStartPre=/bin/mkdir -p /etc/cni/net.d + ExecStartPre=/bin/mkdir -p /opt/cni/bin + ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/cache/kubelet-pod.uuid + ExecStartPre=-/bin/rm -rf /var/lib/rkt/cas/tmp/ + ExecStartPre=/bin/bash /opt/load-kernel-modules.sh + ExecStart=/usr/lib/flatcar/kubelet-wrapper \ + --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ + --kubeconfig=/var/lib/kubelet/kubeconfig \ + --config=/etc/kubernetes/kubelet.conf \ + --allow-privileged=true \ + --network-plugin=cni \ + --cni-conf-dir=/etc/cni/net.d \ + --cni-bin-dir=/opt/cni/bin \ + --cadvisor-port=0 \ + --cert-dir=/etc/kubernetes/pki \ + --cloud-provider=aws \ + --cloud-config=/etc/kubernetes/cloud-config \ + --dynamic-config-dir /etc/kubernetes/dynamic-config-dir \ + --exit-on-lock-contention \ + --lock-file=/tmp/kubelet.lock \ + --kube-reserved=cpu=100m,memory=100Mi,ephemeral-storage=1Gi \ + --system-reserved=cpu=100m,memory=100Mi,ephemeral-storage=1Gi + ExecStop=-/usr/bin/rkt stop --uuid-file=/var/cache/kubelet-pod.uuid + Restart=always + RestartSec=10 + [Install] + WantedBy=multi-user.target + + - name: docker.service + enabled: true + dropins: + - name: 10-environment.conf + contents: | + [Service] + EnvironmentFile=-/etc/environment + +storage: + files: + + - path: "/etc/systemd/journald.conf.d/max_disk_use.conf" + filesystem: root + mode: 0644 + contents: + inline: | + [Journal] + SystemMaxUse=5G + + + - path: "/etc/kubernetes/kubelet.conf" + filesystem: root + mode: 0644 + contents: + inline: | + apiVersion: kubelet.config.k8s.io/v1beta1 + authentication: + anonymous: + enabled: false + webhook: + cacheTTL: 0s + enabled: true + x509: + clientCAFile: /etc/kubernetes/pki/ca.crt + authorization: + mode: Webhook + webhook: + cacheAuthorizedTTL: 0s + cacheUnauthorizedTTL: 0s + cgroupDriver: systemd + clusterDNS: + - 10.10.10.10 + clusterDomain: cluster.local + cpuManagerReconcilePeriod: 0s + evictionPressureTransitionPeriod: 0s + featureGates: + RotateKubeletServerCertificate: true + fileCheckFrequency: 0s + httpCheckFrequency: 0s + imageMinimumGCAge: 0s + kind: KubeletConfiguration + nodeStatusReportFrequency: 0s + nodeStatusUpdateFrequency: 0s + protectKernelDefaults: true + rotateCertificates: true + runtimeRequestTimeout: 0s + serverTLSBootstrap: true + staticPodPath: /etc/kubernetes/manifests + streamingConnectionIdleTimeout: 0s + syncFrequency: 0s + volumeStatsAggPeriod: 0s + + + - path: /opt/load-kernel-modules.sh + filesystem: root + mode: 0755 + contents: + inline: | + #!/usr/bin/env bash + set -euo pipefail + + modprobe ip_vs + modprobe ip_vs_rr + modprobe ip_vs_wrr + modprobe ip_vs_sh + + if modinfo nf_conntrack_ipv4 &> /dev/null; then + modprobe nf_conntrack_ipv4 + else + modprobe nf_conntrack + fi + + + - path: /etc/sysctl.d/k8s.conf + filesystem: root + mode: 0644 + contents: + inline: | + net.bridge.bridge-nf-call-ip6tables = 1 + net.bridge.bridge-nf-call-iptables = 1 + kernel.panic_on_oops = 1 + kernel.panic = 10 + net.ipv4.ip_forward = 1 + vm.overcommit_memory = 1 + fs.inotify.max_user_watches = 1048576 + + + - path: /proc/sys/kernel/panic_on_oops + filesystem: root + mode: 0644 + contents: + inline: | + 1 + + - path: /proc/sys/kernel/panic + filesystem: root + mode: 0644 + contents: + inline: | + 10 + + - path: /proc/sys/vm/overcommit_memory + filesystem: root + mode: 0644 + contents: + inline: | + 1 + + - path: /etc/kubernetes/bootstrap-kubelet.conf + filesystem: root + mode: 0400 + contents: + inline: | + apiVersion: v1 + clusters: + - cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t + server: https://server:443 + name: "" + contexts: [] + current-context: "" + kind: Config + preferences: {} + users: + - name: "" + user: + token: my-token + + + - path: /etc/kubernetes/cloud-config + filesystem: root + mode: 0400 + contents: + inline: | + {aws-config:true} + + - path: /etc/kubernetes/pki/ca.crt + filesystem: root + mode: 0644 + contents: + inline: | + -----BEGIN CERTIFICATE----- + MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV + BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG + A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 + DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 + NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG + cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv + c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B + AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS + R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT + ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk + JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 + mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW + caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G + A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt + hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB + MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES + MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv + bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h + U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao + eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 + UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD + 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n + sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF + kPe6XoSbiLm/kxk32T0= + -----END CERTIFICATE----- + + + - path: /etc/ssh/sshd_config + filesystem: root + mode: 0600 + user: + id: 0 + group: + id: 0 + contents: + inline: | + # Use most defaults for sshd configuration. + Subsystem sftp internal-sftp + ClientAliveInterval 180 + UseDNS no + UsePAM yes + PrintLastLog no # handled by PAM + PrintMotd no # handled by PAM + PasswordAuthentication no + ChallengeResponseAuthentication no + + - path: /etc/docker/daemon.json + filesystem: root + mode: 0644 + contents: + inline: | + {"exec-opts":["native.cgroupdriver=systemd"],"storage-driver":"overlay2","log-driver":"json-file","log-opts":{"max-size":"100m"}} + + - path: /opt/bin/download.sh + filesystem: root + mode: 0755 + contents: + inline: | + #!/bin/bash + set -xeuo pipefail + mkdir -p /opt/bin/ + mkdir -p /var/lib/calico + mkdir -p /etc/kubernetes/manifests + mkdir -p /etc/cni/net.d + mkdir -p /opt/cni/bin + if [ ! -f /opt/cni/bin/loopback ]; then + curl -L https://github.com/containernetworking/plugins/releases/download/v0.8.2/cni-plugins-linux-amd64-v0.8.2.tgz | tar -xvzC /opt/cni/bin -f - + fi + + if [[ ! -x /opt/bin/health-monitor.sh ]]; then + curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/8b5b66e4910a6228dfaecccaa0a3b05ec4902f8e/pkg/userdata/scripts/health-monitor.sh + chmod +x /opt/bin/health-monitor.sh + fi diff --git a/pkg/userdata/flatcar/testdata/v1.9.2-disable-update-engine-aws.yaml b/pkg/userdata/flatcar/testdata/v1.9.2-disable-update-engine-aws.yaml new file mode 100644 index 000000000..5463dc3a0 --- /dev/null +++ b/pkg/userdata/flatcar/testdata/v1.9.2-disable-update-engine-aws.yaml @@ -0,0 +1,350 @@ +passwd: + users: + - name: core + ssh_authorized_keys: + - ssh-rsa AAABBB + - ssh-rsa CCCDDD + + +systemd: + units: + - name: update-engine.service + mask: true + - name: docker.service + enabled: true + + - name: download-healthcheck-script.service + enabled: true + contents: | + [Unit] + Requires=network-online.target + After=network-online.target + [Service] + Type=oneshot + EnvironmentFile=-/etc/environment + ExecStart=/opt/bin/download.sh + [Install] + WantedBy=multi-user.target + + - name: docker-healthcheck.service + enabled: true + dropins: + - name: 40-docker.conf + contents: | + [Unit] + Requires=download-healthcheck-script.service + After=download-healthcheck-script.service + contents: | + [Unit] + Requires=docker.service + After=docker.service + + [Service] + ExecStart=/opt/bin/health-monitor.sh container-runtime + + [Install] + WantedBy=multi-user.target + + - name: kubelet-healthcheck.service + enabled: true + dropins: + - name: 40-docker.conf + contents: | + [Unit] + Requires=download-healthcheck-script.service + After=download-healthcheck-script.service + contents: | + [Unit] + Requires=kubelet.service + After=kubelet.service + + [Service] + ExecStart=/opt/bin/health-monitor.sh kubelet + + [Install] + WantedBy=multi-user.target + + + - name: kubelet.service + enabled: true + contents: | + [Unit] + Description=Kubernetes Kubelet + Requires=docker.service + After=docker.service + [Service] + TimeoutStartSec=5min + CPUAccounting=true + MemoryAccounting=true + EnvironmentFile=-/etc/environment + Environment=KUBELET_IMAGE=docker://k8s.gcr.io/hyperkube-amd64:v1.9.2 + Environment="RKT_RUN_ARGS=--uuid-file-save=/var/cache/kubelet-pod.uuid \ + --inherit-env \ + --insecure-options=image \ + --volume=resolv,kind=host,source=/etc/resolv.conf \ + --mount volume=resolv,target=/etc/resolv.conf \ + --volume cni-bin,kind=host,source=/opt/cni/bin \ + --mount volume=cni-bin,target=/opt/cni/bin \ + --volume cni-conf,kind=host,source=/etc/cni/net.d \ + --mount volume=cni-conf,target=/etc/cni/net.d \ + --volume etc-kubernetes,kind=host,source=/etc/kubernetes \ + --mount volume=etc-kubernetes,target=/etc/kubernetes \ + --volume var-log,kind=host,source=/var/log \ + --mount volume=var-log,target=/var/log \ + --volume var-lib-calico,kind=host,source=/var/lib/calico \ + --mount volume=var-lib-calico,target=/var/lib/calico" + ExecStartPre=/bin/mkdir -p /var/lib/calico + ExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests + ExecStartPre=/bin/mkdir -p /etc/cni/net.d + ExecStartPre=/bin/mkdir -p /opt/cni/bin + ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/cache/kubelet-pod.uuid + ExecStartPre=-/bin/rm -rf /var/lib/rkt/cas/tmp/ + ExecStartPre=/bin/bash /opt/load-kernel-modules.sh + ExecStart=/usr/lib/flatcar/kubelet-wrapper \ + --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \ + --kubeconfig=/var/lib/kubelet/kubeconfig \ + --config=/etc/kubernetes/kubelet.conf \ + --allow-privileged=true \ + --network-plugin=cni \ + --cni-conf-dir=/etc/cni/net.d \ + --cni-bin-dir=/opt/cni/bin \ + --cadvisor-port=0 \ + --cert-dir=/etc/kubernetes/pki \ + --cloud-provider=aws \ + --cloud-config=/etc/kubernetes/cloud-config \ + --dynamic-config-dir /etc/kubernetes/dynamic-config-dir \ + --exit-on-lock-contention \ + --lock-file=/tmp/kubelet.lock \ + --kube-reserved=cpu=100m,memory=100Mi,ephemeral-storage=1Gi \ + --system-reserved=cpu=100m,memory=100Mi,ephemeral-storage=1Gi + ExecStop=-/usr/bin/rkt stop --uuid-file=/var/cache/kubelet-pod.uuid + Restart=always + RestartSec=10 + [Install] + WantedBy=multi-user.target + + - name: docker.service + enabled: true + dropins: + - name: 10-environment.conf + contents: | + [Service] + EnvironmentFile=-/etc/environment + +storage: + files: + + - path: "/etc/systemd/journald.conf.d/max_disk_use.conf" + filesystem: root + mode: 0644 + contents: + inline: | + [Journal] + SystemMaxUse=5G + + + - path: "/etc/kubernetes/kubelet.conf" + filesystem: root + mode: 0644 + contents: + inline: | + apiVersion: kubelet.config.k8s.io/v1beta1 + authentication: + anonymous: + enabled: false + webhook: + cacheTTL: 0s + enabled: true + x509: + clientCAFile: /etc/kubernetes/pki/ca.crt + authorization: + mode: Webhook + webhook: + cacheAuthorizedTTL: 0s + cacheUnauthorizedTTL: 0s + cgroupDriver: systemd + clusterDNS: + - 10.10.10.10 + clusterDomain: cluster.local + cpuManagerReconcilePeriod: 0s + evictionPressureTransitionPeriod: 0s + featureGates: + RotateKubeletServerCertificate: true + fileCheckFrequency: 0s + httpCheckFrequency: 0s + imageMinimumGCAge: 0s + kind: KubeletConfiguration + nodeStatusReportFrequency: 0s + nodeStatusUpdateFrequency: 0s + protectKernelDefaults: true + rotateCertificates: true + runtimeRequestTimeout: 0s + serverTLSBootstrap: true + staticPodPath: /etc/kubernetes/manifests + streamingConnectionIdleTimeout: 0s + syncFrequency: 0s + volumeStatsAggPeriod: 0s + + + - path: /opt/load-kernel-modules.sh + filesystem: root + mode: 0755 + contents: + inline: | + #!/usr/bin/env bash + set -euo pipefail + + modprobe ip_vs + modprobe ip_vs_rr + modprobe ip_vs_wrr + modprobe ip_vs_sh + + if modinfo nf_conntrack_ipv4 &> /dev/null; then + modprobe nf_conntrack_ipv4 + else + modprobe nf_conntrack + fi + + + - path: /etc/sysctl.d/k8s.conf + filesystem: root + mode: 0644 + contents: + inline: | + net.bridge.bridge-nf-call-ip6tables = 1 + net.bridge.bridge-nf-call-iptables = 1 + kernel.panic_on_oops = 1 + kernel.panic = 10 + net.ipv4.ip_forward = 1 + vm.overcommit_memory = 1 + fs.inotify.max_user_watches = 1048576 + + + - path: /proc/sys/kernel/panic_on_oops + filesystem: root + mode: 0644 + contents: + inline: | + 1 + + - path: /proc/sys/kernel/panic + filesystem: root + mode: 0644 + contents: + inline: | + 10 + + - path: /proc/sys/vm/overcommit_memory + filesystem: root + mode: 0644 + contents: + inline: | + 1 + + - path: /etc/kubernetes/bootstrap-kubelet.conf + filesystem: root + mode: 0400 + contents: + inline: | + apiVersion: v1 + clusters: + - cluster: + certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUVXakNDQTBLZ0F3SUJBZ0lKQUxmUmxXc0k4WVFITUEwR0NTcUdTSWIzRFFFQkJRVUFNSHN4Q3pBSkJnTlYKQkFZVEFsVlRNUXN3Q1FZRFZRUUlFd0pEUVRFV01CUUdBMVVFQnhNTlUyRnVJRVp5WVc1amFYTmpiekVVTUJJRwpBMVVFQ2hNTFFuSmhaR1pwZEhwcGJtTXhFakFRQmdOVkJBTVRDV3h2WTJGc2FHOXpkREVkTUJzR0NTcUdTSWIzCkRRRUpBUllPWW5KaFpFQmtZVzVuWVM1amIyMHdIaGNOTVRRd056RTFNakEwTmpBMVdoY05NVGN3TlRBME1qQTAKTmpBMVdqQjdNUXN3Q1FZRFZRUUdFd0pWVXpFTE1Ba0dBMVVFQ0JNQ1EwRXhGakFVQmdOVkJBY1REVk5oYmlCRwpjbUZ1WTJselkyOHhGREFTQmdOVkJBb1RDMEp5WVdSbWFYUjZhVzVqTVJJd0VBWURWUVFERXdsc2IyTmhiR2h2CmMzUXhIVEFiQmdrcWhraUc5dzBCQ1FFV0RtSnlZV1JBWkdGdVoyRXVZMjl0TUlJQklqQU5CZ2txaGtpRzl3MEIKQVFFRkFBT0NBUThBTUlJQkNnS0NBUUVBdDVmQWpwNGZUY2VrV1VUZnpzcDBreWloMU9ZYnNHTDBLWDFlUmJTUwpSOE9kMCs5UTYySHlueStHRndNVGI0QS9LVThtc3NvSHZjY2VTQUFid2ZieEZLLytzNTFUb2JxVW5PUlpyT29UClpqa1V5Z2J5WERTSzk5WUJiY1IxUGlwOHZ3TVRtNFhLdUx0Q2lnZUJCZGpqQVFkZ1VPMjhMRU5HbHNNbm1lWWsKSmZPRFZHblZtcjVMdGI5QU5BOElLeVRmc25ISjRpT0NTL1BsUGJVajJxN1lub1ZMcG9zVUJNbGdVYi9DeWtYMwptT29MYjR5SkpReUEvaVNUNlp4aUlFajM2RDR5V1o1bGc3WUpsK1VpaUJRSEdDblBkR3lpcHFWMDZleDBoZVlXCmNhaVc4TFdaU1VROTNqUStXVkNIOGhUN0RRTzFkbXN2VW1YbHEvSmVBbHdRL1FJREFRQUJvNEhnTUlIZE1CMEcKQTFVZERnUVdCQlJjQVJPdGhTNFA0VTd2VGZqQnlDNTY5UjdFNkRDQnJRWURWUjBqQklHbE1JR2lnQlJjQVJPdApoUzRQNFU3dlRmakJ5QzU2OVI3RTZLRi9wSDB3ZXpFTE1Ba0dBMVVFQmhNQ1ZWTXhDekFKQmdOVkJBZ1RBa05CCk1SWXdGQVlEVlFRSEV3MVRZVzRnUm5KaGJtTnBjMk52TVJRd0VnWURWUVFLRXd0Q2NtRmtabWwwZW1sdVl6RVMKTUJBR0ExVUVBeE1KYkc5allXeG9iM04wTVIwd0d3WUpLb1pJaHZjTkFRa0JGZzVpY21Ga1FHUmhibWRoTG1OdgpiWUlKQUxmUmxXc0k4WVFITUF3R0ExVWRFd1FGTUFNQkFmOHdEUVlKS29aSWh2Y05BUUVGQlFBRGdnRUJBRzZoClU5ZjlzTkgwLzZvQmJHR3kyRVZVMFVnSVRVUUlyRldvOXJGa3JXNWsvWGtEalFtKzNsempUMGlHUjRJeEUvQW8KZVU2c1FodWE3d3JXZUZFbjQ3R0w5OGxuQ3NKZEQ3b1pOaEZtUTk1VGIvTG5EVWpzNVlqOWJyUDBOV3pYZllVNApVSzJabklOSlJjSnBCOGlSQ2FDeEU4RGRjVUYwWHFJRXE2cEEyNzJzbm9MbWlYTE12Tmwza1lFZG0ramU2dm9ECjU4U05WRVVzenR6UXlYbUpFaENwd1ZJMEE2UUNqelhqK3F2cG13M1paSGk4SndYZWk4WlpCTFRTRkJraThaN24Kc0g5QkJIMzgvU3pVbUFONFFIU1B5MWdqcW0wME9BRThOYVlEa2gvYnpFNGQ3bUxHR01XcC9XRTNLUFN1ODJIRgprUGU2WG9TYmlMbS9reGszMlQwPQotLS0tLUVORCBDRVJUSUZJQ0FURS0tLS0t + server: https://server:443 + name: "" + contexts: [] + current-context: "" + kind: Config + preferences: {} + users: + - name: "" + user: + token: my-token + + + - path: /etc/kubernetes/cloud-config + filesystem: root + mode: 0400 + contents: + inline: | + {aws-config:true} + + - path: /etc/kubernetes/pki/ca.crt + filesystem: root + mode: 0644 + contents: + inline: | + -----BEGIN CERTIFICATE----- + MIIEWjCCA0KgAwIBAgIJALfRlWsI8YQHMA0GCSqGSIb3DQEBBQUAMHsxCzAJBgNV + BAYTAlVTMQswCQYDVQQIEwJDQTEWMBQGA1UEBxMNU2FuIEZyYW5jaXNjbzEUMBIG + A1UEChMLQnJhZGZpdHppbmMxEjAQBgNVBAMTCWxvY2FsaG9zdDEdMBsGCSqGSIb3 + DQEJARYOYnJhZEBkYW5nYS5jb20wHhcNMTQwNzE1MjA0NjA1WhcNMTcwNTA0MjA0 + NjA1WjB7MQswCQYDVQQGEwJVUzELMAkGA1UECBMCQ0ExFjAUBgNVBAcTDVNhbiBG + cmFuY2lzY28xFDASBgNVBAoTC0JyYWRmaXR6aW5jMRIwEAYDVQQDEwlsb2NhbGhv + c3QxHTAbBgkqhkiG9w0BCQEWDmJyYWRAZGFuZ2EuY29tMIIBIjANBgkqhkiG9w0B + AQEFAAOCAQ8AMIIBCgKCAQEAt5fAjp4fTcekWUTfzsp0kyih1OYbsGL0KX1eRbSS + R8Od0+9Q62Hyny+GFwMTb4A/KU8mssoHvcceSAAbwfbxFK/+s51TobqUnORZrOoT + ZjkUygbyXDSK99YBbcR1Pip8vwMTm4XKuLtCigeBBdjjAQdgUO28LENGlsMnmeYk + JfODVGnVmr5Ltb9ANA8IKyTfsnHJ4iOCS/PlPbUj2q7YnoVLposUBMlgUb/CykX3 + mOoLb4yJJQyA/iST6ZxiIEj36D4yWZ5lg7YJl+UiiBQHGCnPdGyipqV06ex0heYW + caiW8LWZSUQ93jQ+WVCH8hT7DQO1dmsvUmXlq/JeAlwQ/QIDAQABo4HgMIHdMB0G + A1UdDgQWBBRcAROthS4P4U7vTfjByC569R7E6DCBrQYDVR0jBIGlMIGigBRcAROt + hS4P4U7vTfjByC569R7E6KF/pH0wezELMAkGA1UEBhMCVVMxCzAJBgNVBAgTAkNB + MRYwFAYDVQQHEw1TYW4gRnJhbmNpc2NvMRQwEgYDVQQKEwtCcmFkZml0emluYzES + MBAGA1UEAxMJbG9jYWxob3N0MR0wGwYJKoZIhvcNAQkBFg5icmFkQGRhbmdhLmNv + bYIJALfRlWsI8YQHMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEBAG6h + U9f9sNH0/6oBbGGy2EVU0UgITUQIrFWo9rFkrW5k/XkDjQm+3lzjT0iGR4IxE/Ao + eU6sQhua7wrWeFEn47GL98lnCsJdD7oZNhFmQ95Tb/LnDUjs5Yj9brP0NWzXfYU4 + UK2ZnINJRcJpB8iRCaCxE8DdcUF0XqIEq6pA272snoLmiXLMvNl3kYEdm+je6voD + 58SNVEUsztzQyXmJEhCpwVI0A6QCjzXj+qvpmw3ZZHi8JwXei8ZZBLTSFBki8Z7n + sH9BBH38/SzUmAN4QHSPy1gjqm00OAE8NaYDkh/bzE4d7mLGGMWp/WE3KPSu82HF + kPe6XoSbiLm/kxk32T0= + -----END CERTIFICATE----- + + + - path: /etc/ssh/sshd_config + filesystem: root + mode: 0600 + user: + id: 0 + group: + id: 0 + contents: + inline: | + # Use most defaults for sshd configuration. + Subsystem sftp internal-sftp + ClientAliveInterval 180 + UseDNS no + UsePAM yes + PrintLastLog no # handled by PAM + PrintMotd no # handled by PAM + PasswordAuthentication no + ChallengeResponseAuthentication no + + - path: /etc/docker/daemon.json + filesystem: root + mode: 0644 + contents: + inline: | + {"exec-opts":["native.cgroupdriver=systemd"],"storage-driver":"overlay2","log-driver":"json-file","log-opts":{"max-size":"100m"}} + + - path: /opt/bin/download.sh + filesystem: root + mode: 0755 + contents: + inline: | + #!/bin/bash + set -xeuo pipefail + mkdir -p /opt/bin/ + mkdir -p /var/lib/calico + mkdir -p /etc/kubernetes/manifests + mkdir -p /etc/cni/net.d + mkdir -p /opt/cni/bin + if [ ! -f /opt/cni/bin/loopback ]; then + curl -L https://github.com/containernetworking/plugins/releases/download/v0.8.2/cni-plugins-linux-amd64-v0.8.2.tgz | tar -xvzC /opt/cni/bin -f - + fi + + if [[ ! -x /opt/bin/health-monitor.sh ]]; then + curl -Lfo /opt/bin/health-monitor.sh https://raw.githubusercontent.com/kubermatic/machine-controller/8b5b66e4910a6228dfaecccaa0a3b05ec4902f8e/pkg/userdata/scripts/health-monitor.sh + chmod +x /opt/bin/health-monitor.sh + fi diff --git a/pkg/userdata/manager/manager.go b/pkg/userdata/manager/manager.go index 347584c66..0e16326f3 100644 --- a/pkg/userdata/manager/manager.go +++ b/pkg/userdata/manager/manager.go @@ -49,6 +49,7 @@ var ( providerconfigtypes.OperatingSystemUbuntu, providerconfigtypes.OperatingSystemSLES, providerconfigtypes.OperatingSystemRHEL, + providerconfigtypes.OperatingSystemFlatcar, } ) diff --git a/test/e2e/provisioning/all_e2e_test.go b/test/e2e/provisioning/all_e2e_test.go index 29b146fcb..bb148a356 100644 --- a/test/e2e/provisioning/all_e2e_test.go +++ b/test/e2e/provisioning/all_e2e_test.go @@ -84,7 +84,7 @@ func TestKubevirtProvisioningE2E(t *testing.T) { t.Fatalf("Unable to run kubevirt tests, KUBEVIRT_E2E_TESTS_KUBECONFIG must be set") } - excludeSelector := &scenarioSelector{osName: []string{"sles"}} + excludeSelector := &scenarioSelector{osName: []string{"sles", "flatcar"}} params := []string{ fmt.Sprintf("<< KUBECONFIG >>=%s", kubevirtKubeconfig), } @@ -117,7 +117,7 @@ func TestOpenstackProvisioningE2E(t *testing.T) { fmt.Sprintf("<< NETWORK_NAME >>=%s", osNetwork), } - excludeSelector := &scenarioSelector{osName: []string{"sles", "rhel"}} + excludeSelector := &scenarioSelector{osName: []string{"sles", "rhel", "flatcar"}} runScenarios(t, excludeSelector, params, OSManifest, fmt.Sprintf("os-%s", *testRunIdentifier)) } @@ -134,7 +134,7 @@ func TestDigitalOceanProvisioningE2E(t *testing.T) { t.Fatal("unable to run the test suite, DO_E2E_TESTS_TOKEN environement varialbe cannot be empty") } - excludeSelector := &scenarioSelector{osName: []string{"sles", "rhel"}} + excludeSelector := &scenarioSelector{osName: []string{"sles", "rhel", "flatcar"}} // act params := []string{fmt.Sprintf("<< DIGITALOCEAN_TOKEN >>=%s", doToken)} runScenarios(t, excludeSelector, params, DOManifest, fmt.Sprintf("do-%s", *testRunIdentifier)) @@ -177,7 +177,7 @@ func TestAWSSLESProvisioningE2E(t *testing.T) { } // We would like to test SLES image only in this test as the other images are tested in TestAWSProvisioningE2E - excludeSelector := &scenarioSelector{osName: []string{"coreos", "ubuntu", "centos", "rhel"}} + excludeSelector := &scenarioSelector{osName: []string{"coreos", "ubuntu", "centos", "rhel", "flatcar"}} runScenarios(t, excludeSelector, params, AWSManifest, fmt.Sprintf("aws-%s", *testRunIdentifier)) } @@ -222,7 +222,7 @@ func TestAzureProvisioningE2E(t *testing.T) { t.Fatal("unable to run the test suite, AZURE_TENANT_ID, AZURE_SUBSCRIPTION_ID, AZURE_CLIENT_ID and AZURE_CLIENT_SECRET environment variables cannot be empty") } - excludeSelector := &scenarioSelector{osName: []string{"sles", "rhel"}} + excludeSelector := &scenarioSelector{osName: []string{"sles", "rhel", "flatcar"}} // act params := []string{ fmt.Sprintf("<< AZURE_TENANT_ID >>=%s", azureTenantID), @@ -246,7 +246,7 @@ func TestGCEProvisioningE2E(t *testing.T) { } // Act. GCE does not support CentOS. - excludeSelector := &scenarioSelector{osName: []string{"centos", "sles", "rhel"}} + excludeSelector := &scenarioSelector{osName: []string{"centos", "sles", "rhel", "flatcar"}} params := []string{ fmt.Sprintf("<< GOOGLE_SERVICE_ACCOUNT >>=%s", googleServiceAccount), } @@ -265,7 +265,7 @@ func TestHetznerProvisioningE2E(t *testing.T) { } // Hetzner does not support coreos - excludeSelector := &scenarioSelector{osName: []string{"coreos", "sles", "rhel"}} + excludeSelector := &scenarioSelector{osName: []string{"coreos", "sles", "rhel", "flatcar"}} // act params := []string{fmt.Sprintf("<< HETZNER_TOKEN >>=%s", hzToken)} @@ -288,7 +288,7 @@ func TestPacketProvisioningE2E(t *testing.T) { t.Fatal("unable to run the test suite, PACKET_PROJECT_ID environment variable cannot be empty") } - excludeSelector := &scenarioSelector{osName: []string{"sles", "rhel"}} + excludeSelector := &scenarioSelector{osName: []string{"sles", "rhel", "flatcar"}} // act params := []string{ @@ -312,7 +312,7 @@ func TestAlibabaProvisioningE2E(t *testing.T) { t.Fatal("unable to run the test suite, ALIBABA_ACCESS_KEY_SECRET environment variable cannot be empty") } - excludeSelector := &scenarioSelector{osName: []string{"coreos", "rhel", "sles"}} + excludeSelector := &scenarioSelector{osName: []string{"coreos", "rhel", "sles", "flatcar"}} // act params := []string{ @@ -337,7 +337,7 @@ func TestLinodeProvisioningE2E(t *testing.T) { // we're shimming userdata through Linode stackscripts, and Linode's coreos does not support stackscripts // and the stackscript hasn't been verified for use with centos - excludeSelector := &scenarioSelector{osName: []string{"coreos", "centos", "sles", "rhel"}} + excludeSelector := &scenarioSelector{osName: []string{"coreos", "centos", "sles", "rhel", "flatcar"}} // act params := []string{fmt.Sprintf("<< LINODE_TOKEN >>=%s", linodeToken)} @@ -370,7 +370,7 @@ func getVSphereTestParams(t *testing.T) []string { func TestVsphereProvisioningE2E(t *testing.T) { t.Parallel() - excludeSelector := &scenarioSelector{osName: []string{"sles", "rhel"}} + excludeSelector := &scenarioSelector{osName: []string{"sles", "rhel", "flatcar"}} params := getVSphereTestParams(t) runScenarios(t, excludeSelector, params, VSPhereManifest, fmt.Sprintf("vs-%s", *testRunIdentifier)) @@ -381,7 +381,7 @@ func TestVsphereProvisioningE2E(t *testing.T) { func TestVsphereDatastoreClusterProvisioningE2E(t *testing.T) { t.Parallel() - excludeSelector := &scenarioSelector{osName: []string{"sles", "rhel"}} + excludeSelector := &scenarioSelector{osName: []string{"sles", "rhel", "flatcar"}} params := getVSphereTestParams(t) runScenarios(t, excludeSelector, params, VSPhereDSCManifest, fmt.Sprintf("vs-dsc-%s", *testRunIdentifier)) diff --git a/test/e2e/provisioning/helper.go b/test/e2e/provisioning/helper.go index 86e119a2a..2183c6a28 100644 --- a/test/e2e/provisioning/helper.go +++ b/test/e2e/provisioning/helper.go @@ -44,6 +44,7 @@ var ( providerconfigtypes.OperatingSystemCentOS, providerconfigtypes.OperatingSystemSLES, providerconfigtypes.OperatingSystemRHEL, + providerconfigtypes.OperatingSystemFlatcar, } openStackImages = map[string]string{