diff --git a/pkg/cloudprovider/provider/vsphere/helper.go b/pkg/cloudprovider/provider/vsphere/helper.go index 1e48bfd72..9ec981ec2 100644 --- a/pkg/cloudprovider/provider/vsphere/helper.go +++ b/pkg/cloudprovider/provider/vsphere/helper.go @@ -33,60 +33,47 @@ const ( var errSnapshotNotFound = errors.New("no snapshot with given name found") -func createLinkClonedVM(vmName, vmImage, datacenter, clusterName, folder string, cpus int32, memoryMB int64, client *govmomi.Client, containerLinuxUserdata string) error { - f := find.NewFinder(client.Client, true) - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() - - dc, err := f.Datacenter(ctx, datacenter) +func createClonedVM(ctx context.Context, vmName string, config *Config, dc *object.Datacenter, f *find.Finder, containerLinuxUserdata string) (*object.VirtualMachine, error) { + templateVM, err := f.VirtualMachine(ctx, config.TemplateVMName) if err != nil { - return fmt.Errorf("failed to get datacenter: %v", err) - } - f.SetDatacenter(dc) - - templateVM, err := f.VirtualMachine(ctx, vmImage) - if err != nil { - return fmt.Errorf("failed to get virtualmachine: %v", err) + return nil, fmt.Errorf("failed to get template vm: %v", err) } glog.V(3).Infof("Template VM ref is %+v", templateVM) - datacenterFolders, err := dc.Folders(ctx) - if err != nil { - return fmt.Errorf("failed to get datacenter folders: %v", err) - } - // Find the target folder, if its include in the provider config. - targetVMFolder := datacenterFolders.VmFolder - if folder != "" { + // Find the target folder, if its included in the provider config. + var targetVMFolder *object.Folder + if config.Folder != "" { // If non-absolute folder name is used, e.g. 'duplicate-folder' it can match // multiple folders and thus fail. It will also gladly match a folder from // a different datacenter. It is therefore preferable to use absolute folder // paths, e.g. '/Datacenter/vm/nested/folder'. // The target folder must already exist. - targetVMFolder, err = f.Folder(ctx, folder) + targetVMFolder, err = f.Folder(ctx, config.Folder) + if err != nil { + return nil, fmt.Errorf("failed to get target folder: %v", err) + } + } else { + // Do not query datacenter folders unless required + datacenterFolders, err := dc.Folders(ctx) if err != nil { - return fmt.Errorf("failed to get target folder: %v", err) + return nil, fmt.Errorf("failed to get datacenter folders: %v", err) } + targetVMFolder = datacenterFolders.VmFolder } // Create snapshot of the template VM if not already snapshotted. snapshot, err := findSnapshot(ctx, templateVM, snapshotName) if err != nil { if err != errSnapshotNotFound { - return fmt.Errorf("failed to find snapshot: %v", err) + return nil, fmt.Errorf("failed to find snapshot: %v", err) } snapshot, err = createSnapshot(ctx, templateVM, snapshotName, snapshotDesc) if err != nil { - return fmt.Errorf("failed to create snapshot: %v", err) + return nil, fmt.Errorf("failed to create snapshot: %v", err) } } - clsComputeRes, err := f.ClusterComputeResource(ctx, clusterName) - if err != nil { - return fmt.Errorf("failed to get cluster %s: %v", clusterName, err) - } - glog.V(3).Infof("Cluster is %+v", clsComputeRes) - snapshotRef := snapshot.Reference() var vAppAconfig *types.VmConfigSpec @@ -97,14 +84,13 @@ func createLinkClonedVM(vmName, vmImage, datacenter, clusterName, folder string, // In order to overwrite them, we need to specify their numeric Key values, // which we'll extract from that template. var mvm mo.VirtualMachine - err = templateVM.Properties(ctx, templateVM.Reference(), []string{"config", "config.vAppConfig", "config.vAppConfig.property"}, &mvm) - if err != nil { - return fmt.Errorf("failed to extract vapp properties for coreos: %v", err) + if err := templateVM.Properties(ctx, templateVM.Reference(), []string{"config", "config.vAppConfig", "config.vAppConfig.property"}, &mvm); err != nil { + return nil, fmt.Errorf("failed to extract vapp properties for coreos: %v", err) } var propertySpecs []types.VAppPropertySpec if mvm.Config.VAppConfig.GetVmConfigInfo() == nil { - return fmt.Errorf("no vm config found in template '%s'. Make sure you import the correct OVA with the appropriate coreos settings", vmImage) + return nil, fmt.Errorf("no vm config found in template '%s'. Make sure you import the correct OVA with the appropriate coreos settings", config.TemplateVMName) } for _, item := range mvm.Config.VAppConfig.GetVmConfigInfo().Property { @@ -138,28 +124,54 @@ func createLinkClonedVM(vmName, vmImage, datacenter, clusterName, folder string, } diskUUIDEnabled := true - cloneSpec := &types.VirtualMachineCloneSpec{ - Config: &types.VirtualMachineConfigSpec{ - Flags: &types.VirtualMachineFlagInfo{ - DiskUuidEnabled: &diskUUIDEnabled, - }, - NumCPUs: cpus, - MemoryMB: memoryMB, - VAppConfig: vAppAconfig, + desiredConfig := types.VirtualMachineConfigSpec{ + Flags: &types.VirtualMachineFlagInfo{ + DiskUuidEnabled: &diskUUIDEnabled, }, - Snapshot: &snapshotRef, + NumCPUs: config.CPUs, + MemoryMB: config.MemoryMB, + VAppConfig: vAppAconfig, } - // Create a link cloned VM from the template VM's snapshot - clonedVMTask, err := templateVM.Clone(ctx, targetVMFolder, vmName, *cloneSpec) + // Create a cloned VM from the template VM's snapshot + clonedVMTask, err := templateVM.Clone(ctx, targetVMFolder, vmName, types.VirtualMachineCloneSpec{Snapshot: &snapshotRef}) if err != nil { - return fmt.Errorf("failed to clone template vm: %v", err) + return nil, fmt.Errorf("failed to clone template vm: %v", err) } - if _, err = clonedVMTask.WaitForResult(ctx, nil); err != nil { - return fmt.Errorf("error when waiting for result of clone task: %v", err) + if err := clonedVMTask.Wait(ctx); err != nil { + return nil, fmt.Errorf("error when waiting for result of clone task: %v", err) } - return nil + + virtualMachine, err := f.VirtualMachine(ctx, vmName) + if err != nil { + return nil, fmt.Errorf("failed to get virtual machine object after cloning: %v", err) + } + + reconfigureTask, err := virtualMachine.Reconfigure(ctx, desiredConfig) + if err != nil { + return nil, fmt.Errorf("failed to reconfigure vm: %v", err) + } + + if err := reconfigureTask.Wait(ctx); err != nil { + return nil, fmt.Errorf("error waiting for reconfigure task to finish: %v", err) + } + + // Update network if requested + if config.VMNetName != "" { + if err := updateNetworkForVM(ctx, virtualMachine, config.TemplateNetName, config.VMNetName); err != nil { + return nil, fmt.Errorf("couldn't set network for vm: %v", err) + } + } + + // Ubuntu wont boot with attached floppy device, because it tries to write to it + // which fails, because the floppy device does not contain a floppy disk + // Upstream issue: https://bugs.launchpad.net/cloud-images/+bug/1573095 + if err := removeFloppyDevice(ctx, virtualMachine); err != nil { + return nil, fmt.Errorf("failed to remove floppy device: %v", err) + } + + return virtualMachine, nil } func updateNetworkForVM(ctx context.Context, vm *object.VirtualMachine, currentNetName string, newNetName string) error { @@ -249,8 +261,7 @@ func createSnapshot(ctx context.Context, vm *object.VirtualMachine, snapshotName func findSnapshot(ctx context.Context, vm *object.VirtualMachine, name string) (object.Reference, error) { var moVirtualMachine mo.VirtualMachine - err := vm.Properties(ctx, vm.Reference(), []string{"snapshot"}, &moVirtualMachine) - if err != nil { + if err := vm.Properties(ctx, vm.Reference(), []string{"snapshot"}, &moVirtualMachine); err != nil { return nil, fmt.Errorf("failed to get vm properties: %v", err) } @@ -284,9 +295,7 @@ func addMatchingSnapshotToList(list *[]object.Reference, tree types.VirtualMachi } } -func uploadAndAttachISO(f *find.Finder, vmRef *object.VirtualMachine, localIsoFilePath, datastoreName string, client *govmomi.Client) error { - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() +func uploadAndAttachISO(ctx context.Context, f *find.Finder, vmRef *object.VirtualMachine, localIsoFilePath, datastoreName string) error { datastore, err := f.Datastore(ctx, datastoreName) if err != nil { @@ -295,8 +304,7 @@ func uploadAndAttachISO(f *find.Finder, vmRef *object.VirtualMachine, localIsoFi p := soap.DefaultUpload remoteIsoFilePath := fmt.Sprintf("%s/%s", vmRef.Name(), "cloud-init.iso") glog.V(3).Infof("Uploading userdata ISO to datastore %+v, destination iso is %s\n", datastore, remoteIsoFilePath) - err = datastore.UploadFile(ctx, localIsoFilePath, remoteIsoFilePath, &p) - if err != nil { + if err := datastore.UploadFile(ctx, localIsoFilePath, remoteIsoFilePath, &p); err != nil { return fmt.Errorf("failed to upload iso: %v", err) } glog.V(3).Infof("Uploaded ISO file %s", localIsoFilePath) @@ -335,8 +343,7 @@ func generateLocalUserdataISO(userdata, name string) (string, error) { return "", fmt.Errorf("failed to create local temp directory for userdata at %s: %v", userdataDir, err) } defer func() { - err := os.RemoveAll(userdataDir) - if err != nil { + if err := os.RemoveAll(userdataDir); err != nil { utilruntime.HandleError(fmt.Errorf("error cleaning up local userdata tempdir %s: %v", userdataDir, err)) } }() @@ -357,18 +364,15 @@ func generateLocalUserdataISO(userdata, name string) (string, error) { InstanceID: name, Hostname: name, } - err = metadataTmpl.Execute(metadata, templateContext) - if err != nil { + if err = metadataTmpl.Execute(metadata, templateContext); err != nil { return "", fmt.Errorf("failed to render metadata: %v", err) } - err = ioutil.WriteFile(userdataFilePath, []byte(userdata), 0644) - if err != nil { + if err := ioutil.WriteFile(userdataFilePath, []byte(userdata), 0644); err != nil { return "", fmt.Errorf("failed to locally write userdata file to %s: %v", userdataFilePath, err) } - err = ioutil.WriteFile(metadataFilePath, metadata.Bytes(), 0644) - if err != nil { + if err := ioutil.WriteFile(metadataFilePath, metadata.Bytes(), 0644); err != nil { return "", fmt.Errorf("failed to locally write metadata file to %s: %v", userdataFilePath, err) } @@ -386,16 +390,15 @@ func generateLocalUserdataISO(userdata, name string) (string, error) { } cmd := exec.Command(command, args...) - output, err := cmd.CombinedOutput() - if err != nil { + if output, err := cmd.CombinedOutput(); err != nil { return "", fmt.Errorf("error executing command `%s %s`: output: `%s`, error: `%v`", command, args, string(output), err) } return isoFilePath, nil } -func removeFloppyDevice(virtualMachine *object.VirtualMachine) error { - vmDevices, err := virtualMachine.Device(context.TODO()) +func removeFloppyDevice(ctx context.Context, virtualMachine *object.VirtualMachine) error { + vmDevices, err := virtualMachine.Device(ctx) if err != nil { return fmt.Errorf("failed to get device list: %v", err) } @@ -410,8 +413,7 @@ func removeFloppyDevice(virtualMachine *object.VirtualMachine) error { return fmt.Errorf("failed to find floppy: %v", err) } - err = virtualMachine.RemoveDevice(context.TODO(), false, floppyDevice) - if err != nil { + if err := virtualMachine.RemoveDevice(ctx, false, floppyDevice); err != nil { return fmt.Errorf("failed to remove floppy device: %v", err) } diff --git a/pkg/cloudprovider/provider/vsphere/provider.go b/pkg/cloudprovider/provider/vsphere/provider.go index 02a1ef9b4..adb343bd1 100644 --- a/pkg/cloudprovider/provider/vsphere/provider.go +++ b/pkg/cloudprovider/provider/vsphere/provider.go @@ -8,7 +8,6 @@ import ( "net/url" "os" "strings" - "time" "github.com/golang/glog" @@ -267,9 +266,11 @@ func (p *provider) getConfig(s v1alpha1.ProviderConfig) (*Config, *providerconfi } func (p *provider) Validate(spec v1alpha1.MachineSpec) error { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() config, _, _, err := p.getConfig(spec.ProviderConfig) if err != nil { - return err + return fmt.Errorf("failed to get config: %v", err) } if config.VMNetName != "" && config.TemplateNetName == "" { @@ -281,23 +282,25 @@ func (p *provider) Validate(spec v1alpha1.MachineSpec) error { return fmt.Errorf("failed to get vsphere client: '%v'", err) } defer func() { - if lerr := client.Logout(context.TODO()); lerr != nil { - utilruntime.HandleError(fmt.Errorf("vsphere client failed to logout: %s", lerr)) + if err := client.Logout(context.Background()); err != nil { + utilruntime.HandleError(fmt.Errorf("vsphere client failed to logout: %s", err)) } }() finder, err := getDatacenterFinder(config.Datacenter, client) if err != nil { - return err + return fmt.Errorf("failed to get datacenter %s: %v", config.Datacenter, err) } - _, err = finder.Datastore(context.TODO(), config.Datastore) - if err != nil { - return err + if _, err := finder.Datastore(ctx, config.Datastore); err != nil { + return fmt.Errorf("failed to get datastore %s: %v", config.Datastore, err) } - _, err = finder.ClusterComputeResource(context.TODO(), config.Cluster) - return err + if _, err := finder.ClusterComputeResource(ctx, config.Cluster); err != nil { + return fmt.Errorf("failed to get cluster: %s: %v", config.Cluster, err) + } + + return nil } func machineInvalidConfigurationTerminalError(err error) error { @@ -308,6 +311,9 @@ func machineInvalidConfigurationTerminalError(err error) error { } func (p *provider) Create(machine *v1alpha1.Machine, _ cloud.MachineUpdater, userdata string) (instance.Instance, error) { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + config, pc, _, err := p.getConfig(machine.Spec.ProviderConfig) if err != nil { return nil, fmt.Errorf("failed to parse config: %v", err) @@ -318,8 +324,8 @@ func (p *provider) Create(machine *v1alpha1.Machine, _ cloud.MachineUpdater, use return nil, fmt.Errorf("failed to get vsphere client: '%v'", err) } defer func() { - if lerr := client.Logout(context.TODO()); lerr != nil { - utilruntime.HandleError(fmt.Errorf("vsphere client failed to logout: %s", lerr)) + if err := client.Logout(context.Background()); err != nil { + utilruntime.HandleError(fmt.Errorf("vsphere client failed to logout: %s", err)) } }() @@ -328,39 +334,27 @@ func (p *provider) Create(machine *v1alpha1.Machine, _ cloud.MachineUpdater, use containerLinuxUserdata = userdata } - if err = createLinkClonedVM(machine.Spec.Name, - config.TemplateVMName, - config.Datacenter, - config.Cluster, - config.Folder, - config.CPUs, - config.MemoryMB, - client, - containerLinuxUserdata); err != nil { - return nil, machineInvalidConfigurationTerminalError(fmt.Errorf("failed to create linked vm: '%v'", err)) - } - - finder, err := getDatacenterFinder(config.Datacenter, client) - if err != nil { - return nil, err - } - virtualMachine, err := finder.VirtualMachine(context.TODO(), machine.Spec.Name) + finder := find.NewFinder(client.Client, true) + dc, err := finder.Datacenter(ctx, config.Datacenter) if err != nil { - return nil, fmt.Errorf("failed to get virtual machine object: %v", err) + return nil, fmt.Errorf("failed to get datacenter: %v", err) } + finder.SetDatacenter(dc) - // Map networks - if config.VMNetName != "" { - err = updateNetworkForVM(context.TODO(), virtualMachine, config.TemplateNetName, config.VMNetName) - if err != nil { - return nil, fmt.Errorf("couldn't set network for vm: %v", err) - } + virtualMachine, err := createClonedVM(ctx, + machine.Spec.Name, + config, + dc, + finder, + containerLinuxUserdata) + if err != nil { + return nil, machineInvalidConfigurationTerminalError(fmt.Errorf("failed to create cloned vm: '%v'", err)) } if pc.OperatingSystem != providerconfig.OperatingSystemCoreos { localUserdataIsoFilePath, err := generateLocalUserdataISO(userdata, machine.Spec.Name) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to generate local userdadata iso: %v", err) } defer func() { @@ -370,30 +364,18 @@ func (p *provider) Create(machine *v1alpha1.Machine, _ cloud.MachineUpdater, use } }() - err = uploadAndAttachISO(finder, virtualMachine, localUserdataIsoFilePath, config.Datastore, client) - if err != nil { + if err := uploadAndAttachISO(ctx, finder, virtualMachine, localUserdataIsoFilePath, config.Datastore); err != nil { return nil, machineInvalidConfigurationTerminalError(fmt.Errorf("failed to upload and attach userdata iso: %v", err)) } } - // Ubuntu wont boot with attached floppy device, because it tries to write to it - // which fails, because the floppy device does not contain a floppy disk - // Upstream issue: https://bugs.launchpad.net/cloud-images/+bug/1573095 - err = removeFloppyDevice(virtualMachine) - if err != nil { - return nil, fmt.Errorf("failed to remove floppy device: %v", err) - } - - powerOnTask, err := virtualMachine.PowerOn(context.TODO()) + powerOnTask, err := virtualMachine.PowerOn(ctx) if err != nil { return nil, fmt.Errorf("failed to power on machine: %v", err) } - powerOnTaskContext, cancel := context.WithTimeout(context.Background(), 30*time.Second) - defer cancel() - err = powerOnTask.Wait(powerOnTaskContext) - if err != nil { - return nil, fmt.Errorf("timed out waiting to power on vm %s: %v", virtualMachine.Name(), err) + if err := powerOnTask.Wait(ctx); err != nil { + return nil, fmt.Errorf("error when waiting for vm powerOn task: %v", err) } return Server{name: virtualMachine.Name(), status: instance.StatusRunning, id: virtualMachine.Reference().Value}, nil @@ -417,8 +399,8 @@ func (p *provider) Delete(machine *v1alpha1.Machine, _ cloud.MachineUpdater) err return fmt.Errorf("failed to get vsphere client: '%v'", err) } defer func() { - if lerr := client.Logout(context.TODO()); lerr != nil { - utilruntime.HandleError(fmt.Errorf("vsphere client failed to logout: %s", lerr)) + if err := client.Logout(context.TODO()); err != nil { + utilruntime.HandleError(fmt.Errorf("vsphere client failed to logout: %s", err)) } }() finder := find.NewFinder(client.Client, true) @@ -459,7 +441,7 @@ func (p *provider) Delete(machine *v1alpha1.Machine, _ cloud.MachineUpdater) err if err != nil { return fmt.Errorf("failed to destroy vm %s: %v", virtualMachine.Name(), err) } - if err = destroyTask.Wait(context.TODO()); err != nil { + if err := destroyTask.Wait(context.TODO()); err != nil { return fmt.Errorf("failed to destroy vm %s: %v", virtualMachine.Name(), err) } @@ -527,8 +509,7 @@ func (p *provider) Get(machine *v1alpha1.Machine) (instance.Instance, error) { if isGuestToolsRunning { var moVirtualMachine mo.VirtualMachine pc := property.DefaultCollector(client.Client) - err = pc.RetrieveOne(context.TODO(), virtualMachine.Reference(), []string{"guest"}, &moVirtualMachine) - if err != nil { + if err := pc.RetrieveOne(context.TODO(), virtualMachine.Reference(), []string{"guest"}, &moVirtualMachine); err != nil { return nil, fmt.Errorf("failed to retrieve guest info: %v", err) } diff --git a/pkg/userdata/coreos/testdata/auto-update-openstack-kubelet-v-version-prefix.golden b/pkg/userdata/coreos/testdata/auto-update-openstack-kubelet-v-version-prefix.golden index c35eb7b37..8a70c4c73 100644 --- a/pkg/userdata/coreos/testdata/auto-update-openstack-kubelet-v-version-prefix.golden +++ b/pkg/userdata/coreos/testdata/auto-update-openstack-kubelet-v-version-prefix.golden @@ -212,7 +212,7 @@ "name": "kubelet-healthcheck.service" }, { - "contents": "[Unit]\nDescription=Kubernetes Kubelet\nRequires=docker.service\nAfter=docker.service\n[Service]\nTimeoutStartSec=5min\nEnvironment=KUBELET_IMAGE=docker://k8s.gcr.io/hyperkube-amd64:v1.9.2\nEnvironment=\"RKT_RUN_ARGS=--uuid-file-save=/var/cache/kubelet-pod.uuid \\\n --insecure-options=image \\\n --volume=resolv,kind=host,source=/etc/resolv.conf \\\n --mount volume=resolv,target=/etc/resolv.conf \\\n --volume cni-bin,kind=host,source=/opt/cni/bin \\\n --mount volume=cni-bin,target=/opt/cni/bin \\\n --volume cni-conf,kind=host,source=/etc/cni/net.d \\\n --mount volume=cni-conf,target=/etc/cni/net.d \\\n --volume etc-kubernetes,kind=host,source=/etc/kubernetes \\\n --mount volume=etc-kubernetes,target=/etc/kubernetes \\\n --volume var-log,kind=host,source=/var/log \\\n --mount volume=var-log,target=/var/log \\\n --volume var-lib-calico,kind=host,source=/var/lib/calico \\\n --mount volume=var-lib-calico,target=/var/lib/calico\"\nExecStartPre=/bin/mkdir -p /var/lib/calico\nExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests\nExecStartPre=/bin/mkdir -p /etc/cni/net.d\nExecStartPre=/bin/mkdir -p /opt/cni/bin\nExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/cache/kubelet-pod.uuid\nExecStart=/usr/lib/coreos/kubelet-wrapper \\\n --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \\\n --kubeconfig=/etc/kubernetes/kubelet.conf \\\n --pod-manifest-path=/etc/kubernetes/manifests \\\n --allow-privileged=true \\\n --network-plugin=cni \\\n --cni-conf-dir=/etc/cni/net.d \\\n --cni-bin-dir=/opt/cni/bin \\\n --authorization-mode=Webhook \\\n --client-ca-file=/etc/kubernetes/pki/ca.crt \\\n --cadvisor-port=0 \\\n --rotate-certificates=true \\\n --cert-dir=/etc/kubernetes/pki \\\n --authentication-token-webhook=true \\\n --cloud-provider=openstack \\\n --cloud-config=/etc/kubernetes/cloud-config \\\n --hostname-override=node1 \\\n --read-only-port=0 \\\n --exit-on-lock-contention \\\n --lock-file=/tmp/kubelet.lock \\\n --anonymous-auth=false \\\n --protect-kernel-defaults=true \\\n --cluster-dns=10.10.10.10 \\\n --cluster-domain=cluster.local\nExecStop=-/usr/bin/rkt stop --uuid-file=/var/cache/kubelet-pod.uuid\nRestart=always\nRestartSec=10\n[Install]\nWantedBy=multi-user.target\n", + "contents": "[Unit]\nDescription=Kubernetes Kubelet\nRequires=docker.service\nAfter=docker.service\n[Service]\nTimeoutStartSec=5min\nEnvironment=KUBELET_IMAGE=docker://k8s.gcr.io/hyperkube-amd64:v1.9.2\nEnvironment=\"RKT_RUN_ARGS=--uuid-file-save=/var/cache/kubelet-pod.uuid \\\n --insecure-options=image \\\n --volume=resolv,kind=host,source=/etc/resolv.conf \\\n --mount volume=resolv,target=/etc/resolv.conf \\\n --volume cni-bin,kind=host,source=/opt/cni/bin \\\n --mount volume=cni-bin,target=/opt/cni/bin \\\n --volume cni-conf,kind=host,source=/etc/cni/net.d \\\n --mount volume=cni-conf,target=/etc/cni/net.d \\\n --volume etc-kubernetes,kind=host,source=/etc/kubernetes \\\n --mount volume=etc-kubernetes,target=/etc/kubernetes \\\n --volume var-log,kind=host,source=/var/log \\\n --mount volume=var-log,target=/var/log \\\n --volume var-lib-calico,kind=host,source=/var/lib/calico \\\n --mount volume=var-lib-calico,target=/var/lib/calico\"\nExecStartPre=/bin/mkdir -p /var/lib/calico\nExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests\nExecStartPre=/bin/mkdir -p /etc/cni/net.d\nExecStartPre=/bin/mkdir -p /opt/cni/bin\nExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/cache/kubelet-pod.uuid\nExecStartPre=-/bin/rm -rf /var/lib/rkt/cas/tmp/\nExecStart=/usr/lib/coreos/kubelet-wrapper \\\n --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \\\n --kubeconfig=/etc/kubernetes/kubelet.conf \\\n --pod-manifest-path=/etc/kubernetes/manifests \\\n --allow-privileged=true \\\n --network-plugin=cni \\\n --cni-conf-dir=/etc/cni/net.d \\\n --cni-bin-dir=/opt/cni/bin \\\n --authorization-mode=Webhook \\\n --client-ca-file=/etc/kubernetes/pki/ca.crt \\\n --cadvisor-port=0 \\\n --rotate-certificates=true \\\n --cert-dir=/etc/kubernetes/pki \\\n --authentication-token-webhook=true \\\n --cloud-provider=openstack \\\n --cloud-config=/etc/kubernetes/cloud-config \\\n --hostname-override=node1 \\\n --read-only-port=0 \\\n --exit-on-lock-contention \\\n --lock-file=/tmp/kubelet.lock \\\n --anonymous-auth=false \\\n --protect-kernel-defaults=true \\\n --cluster-dns=10.10.10.10 \\\n --cluster-domain=cluster.local\nExecStop=-/usr/bin/rkt stop --uuid-file=/var/cache/kubelet-pod.uuid\nRestart=always\nRestartSec=10\n[Install]\nWantedBy=multi-user.target\n", "enabled": true, "name": "kubelet.service" } diff --git a/pkg/userdata/coreos/testdata/v1.10.3-auto-update-openstack-multiple-dns.golden b/pkg/userdata/coreos/testdata/v1.10.3-auto-update-openstack-multiple-dns.golden index 8a27e766f..8b6ae4c88 100644 --- a/pkg/userdata/coreos/testdata/v1.10.3-auto-update-openstack-multiple-dns.golden +++ b/pkg/userdata/coreos/testdata/v1.10.3-auto-update-openstack-multiple-dns.golden @@ -212,7 +212,7 @@ "name": "kubelet-healthcheck.service" }, { - "contents": "[Unit]\nDescription=Kubernetes Kubelet\nRequires=docker.service\nAfter=docker.service\n[Service]\nTimeoutStartSec=5min\nEnvironment=KUBELET_IMAGE=docker://k8s.gcr.io/hyperkube-amd64:v1.10.3\nEnvironment=\"RKT_RUN_ARGS=--uuid-file-save=/var/cache/kubelet-pod.uuid \\\n --insecure-options=image \\\n --volume=resolv,kind=host,source=/etc/resolv.conf \\\n --mount volume=resolv,target=/etc/resolv.conf \\\n --volume cni-bin,kind=host,source=/opt/cni/bin \\\n --mount volume=cni-bin,target=/opt/cni/bin \\\n --volume cni-conf,kind=host,source=/etc/cni/net.d \\\n --mount volume=cni-conf,target=/etc/cni/net.d \\\n --volume etc-kubernetes,kind=host,source=/etc/kubernetes \\\n --mount volume=etc-kubernetes,target=/etc/kubernetes \\\n --volume var-log,kind=host,source=/var/log \\\n --mount volume=var-log,target=/var/log \\\n --volume var-lib-calico,kind=host,source=/var/lib/calico \\\n --mount volume=var-lib-calico,target=/var/lib/calico\"\nExecStartPre=/bin/mkdir -p /var/lib/calico\nExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests\nExecStartPre=/bin/mkdir -p /etc/cni/net.d\nExecStartPre=/bin/mkdir -p /opt/cni/bin\nExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/cache/kubelet-pod.uuid\nExecStart=/usr/lib/coreos/kubelet-wrapper \\\n --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \\\n --kubeconfig=/etc/kubernetes/kubelet.conf \\\n --pod-manifest-path=/etc/kubernetes/manifests \\\n --allow-privileged=true \\\n --network-plugin=cni \\\n --cni-conf-dir=/etc/cni/net.d \\\n --cni-bin-dir=/opt/cni/bin \\\n --authorization-mode=Webhook \\\n --client-ca-file=/etc/kubernetes/pki/ca.crt \\\n --cadvisor-port=0 \\\n --rotate-certificates=true \\\n --cert-dir=/etc/kubernetes/pki \\\n --authentication-token-webhook=true \\\n --cloud-provider=openstack \\\n --cloud-config=/etc/kubernetes/cloud-config \\\n --hostname-override=node1 \\\n --read-only-port=0 \\\n --exit-on-lock-contention \\\n --lock-file=/tmp/kubelet.lock \\\n --anonymous-auth=false \\\n --protect-kernel-defaults=true \\\n --cluster-dns=10.10.10.10,10.10.10.11,10.10.10.12 \\\n --cluster-domain=cluster.local\nExecStop=-/usr/bin/rkt stop --uuid-file=/var/cache/kubelet-pod.uuid\nRestart=always\nRestartSec=10\n[Install]\nWantedBy=multi-user.target\n", + "contents": "[Unit]\nDescription=Kubernetes Kubelet\nRequires=docker.service\nAfter=docker.service\n[Service]\nTimeoutStartSec=5min\nEnvironment=KUBELET_IMAGE=docker://k8s.gcr.io/hyperkube-amd64:v1.10.3\nEnvironment=\"RKT_RUN_ARGS=--uuid-file-save=/var/cache/kubelet-pod.uuid \\\n --insecure-options=image \\\n --volume=resolv,kind=host,source=/etc/resolv.conf \\\n --mount volume=resolv,target=/etc/resolv.conf \\\n --volume cni-bin,kind=host,source=/opt/cni/bin \\\n --mount volume=cni-bin,target=/opt/cni/bin \\\n --volume cni-conf,kind=host,source=/etc/cni/net.d \\\n --mount volume=cni-conf,target=/etc/cni/net.d \\\n --volume etc-kubernetes,kind=host,source=/etc/kubernetes \\\n --mount volume=etc-kubernetes,target=/etc/kubernetes \\\n --volume var-log,kind=host,source=/var/log \\\n --mount volume=var-log,target=/var/log \\\n --volume var-lib-calico,kind=host,source=/var/lib/calico \\\n --mount volume=var-lib-calico,target=/var/lib/calico\"\nExecStartPre=/bin/mkdir -p /var/lib/calico\nExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests\nExecStartPre=/bin/mkdir -p /etc/cni/net.d\nExecStartPre=/bin/mkdir -p /opt/cni/bin\nExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/cache/kubelet-pod.uuid\nExecStartPre=-/bin/rm -rf /var/lib/rkt/cas/tmp/\nExecStart=/usr/lib/coreos/kubelet-wrapper \\\n --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \\\n --kubeconfig=/etc/kubernetes/kubelet.conf \\\n --pod-manifest-path=/etc/kubernetes/manifests \\\n --allow-privileged=true \\\n --network-plugin=cni \\\n --cni-conf-dir=/etc/cni/net.d \\\n --cni-bin-dir=/opt/cni/bin \\\n --authorization-mode=Webhook \\\n --client-ca-file=/etc/kubernetes/pki/ca.crt \\\n --cadvisor-port=0 \\\n --rotate-certificates=true \\\n --cert-dir=/etc/kubernetes/pki \\\n --authentication-token-webhook=true \\\n --cloud-provider=openstack \\\n --cloud-config=/etc/kubernetes/cloud-config \\\n --hostname-override=node1 \\\n --read-only-port=0 \\\n --exit-on-lock-contention \\\n --lock-file=/tmp/kubelet.lock \\\n --anonymous-auth=false \\\n --protect-kernel-defaults=true \\\n --cluster-dns=10.10.10.10,10.10.10.11,10.10.10.12 \\\n --cluster-domain=cluster.local\nExecStop=-/usr/bin/rkt stop --uuid-file=/var/cache/kubelet-pod.uuid\nRestart=always\nRestartSec=10\n[Install]\nWantedBy=multi-user.target\n", "enabled": true, "name": "kubelet.service" } diff --git a/pkg/userdata/coreos/testdata/v1.11.2-vsphere-static-ipconfig.golden b/pkg/userdata/coreos/testdata/v1.11.2-vsphere-static-ipconfig.golden index 705489a8a..e006bba9e 100644 --- a/pkg/userdata/coreos/testdata/v1.11.2-vsphere-static-ipconfig.golden +++ b/pkg/userdata/coreos/testdata/v1.11.2-vsphere-static-ipconfig.golden @@ -227,7 +227,7 @@ "name": "kubelet-healthcheck.service" }, { - "contents": "[Unit]\nDescription=Kubernetes Kubelet\nRequires=docker.service\nAfter=docker.service\n[Service]\nTimeoutStartSec=5min\nEnvironment=KUBELET_IMAGE=docker://k8s.gcr.io/hyperkube-amd64:v1.11.2\nEnvironment=\"RKT_RUN_ARGS=--uuid-file-save=/var/cache/kubelet-pod.uuid \\\n --insecure-options=image \\\n --volume=resolv,kind=host,source=/etc/resolv.conf \\\n --mount volume=resolv,target=/etc/resolv.conf \\\n --volume cni-bin,kind=host,source=/opt/cni/bin \\\n --mount volume=cni-bin,target=/opt/cni/bin \\\n --volume cni-conf,kind=host,source=/etc/cni/net.d \\\n --mount volume=cni-conf,target=/etc/cni/net.d \\\n --volume etc-kubernetes,kind=host,source=/etc/kubernetes \\\n --mount volume=etc-kubernetes,target=/etc/kubernetes \\\n --volume var-log,kind=host,source=/var/log \\\n --mount volume=var-log,target=/var/log \\\n --volume var-lib-calico,kind=host,source=/var/lib/calico \\\n --mount volume=var-lib-calico,target=/var/lib/calico\"\nExecStartPre=/bin/mkdir -p /var/lib/calico\nExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests\nExecStartPre=/bin/mkdir -p /etc/cni/net.d\nExecStartPre=/bin/mkdir -p /opt/cni/bin\nExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/cache/kubelet-pod.uuid\nExecStart=/usr/lib/coreos/kubelet-wrapper \\\n --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \\\n --kubeconfig=/etc/kubernetes/kubelet.conf \\\n --pod-manifest-path=/etc/kubernetes/manifests \\\n --allow-privileged=true \\\n --network-plugin=cni \\\n --cni-conf-dir=/etc/cni/net.d \\\n --cni-bin-dir=/opt/cni/bin \\\n --authorization-mode=Webhook \\\n --client-ca-file=/etc/kubernetes/pki/ca.crt \\\n --cadvisor-port=0 \\\n --rotate-certificates=true \\\n --cert-dir=/etc/kubernetes/pki \\\n --authentication-token-webhook=true \\\n --cloud-provider=vsphere \\\n --cloud-config=/etc/kubernetes/cloud-config \\\n --hostname-override=node1 \\\n --read-only-port=0 \\\n --exit-on-lock-contention \\\n --lock-file=/tmp/kubelet.lock \\\n --anonymous-auth=false \\\n --protect-kernel-defaults=true \\\n --cluster-dns=10.10.10.10 \\\n --cluster-domain=cluster.local\nExecStop=-/usr/bin/rkt stop --uuid-file=/var/cache/kubelet-pod.uuid\nRestart=always\nRestartSec=10\n[Install]\nWantedBy=multi-user.target\n", + "contents": "[Unit]\nDescription=Kubernetes Kubelet\nRequires=docker.service\nAfter=docker.service\n[Service]\nTimeoutStartSec=5min\nEnvironment=KUBELET_IMAGE=docker://k8s.gcr.io/hyperkube-amd64:v1.11.2\nEnvironment=\"RKT_RUN_ARGS=--uuid-file-save=/var/cache/kubelet-pod.uuid \\\n --insecure-options=image \\\n --volume=resolv,kind=host,source=/etc/resolv.conf \\\n --mount volume=resolv,target=/etc/resolv.conf \\\n --volume cni-bin,kind=host,source=/opt/cni/bin \\\n --mount volume=cni-bin,target=/opt/cni/bin \\\n --volume cni-conf,kind=host,source=/etc/cni/net.d \\\n --mount volume=cni-conf,target=/etc/cni/net.d \\\n --volume etc-kubernetes,kind=host,source=/etc/kubernetes \\\n --mount volume=etc-kubernetes,target=/etc/kubernetes \\\n --volume var-log,kind=host,source=/var/log \\\n --mount volume=var-log,target=/var/log \\\n --volume var-lib-calico,kind=host,source=/var/lib/calico \\\n --mount volume=var-lib-calico,target=/var/lib/calico\"\nExecStartPre=/bin/mkdir -p /var/lib/calico\nExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests\nExecStartPre=/bin/mkdir -p /etc/cni/net.d\nExecStartPre=/bin/mkdir -p /opt/cni/bin\nExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/cache/kubelet-pod.uuid\nExecStartPre=-/bin/rm -rf /var/lib/rkt/cas/tmp/\nExecStart=/usr/lib/coreos/kubelet-wrapper \\\n --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \\\n --kubeconfig=/etc/kubernetes/kubelet.conf \\\n --pod-manifest-path=/etc/kubernetes/manifests \\\n --allow-privileged=true \\\n --network-plugin=cni \\\n --cni-conf-dir=/etc/cni/net.d \\\n --cni-bin-dir=/opt/cni/bin \\\n --authorization-mode=Webhook \\\n --client-ca-file=/etc/kubernetes/pki/ca.crt \\\n --cadvisor-port=0 \\\n --rotate-certificates=true \\\n --cert-dir=/etc/kubernetes/pki \\\n --authentication-token-webhook=true \\\n --cloud-provider=vsphere \\\n --cloud-config=/etc/kubernetes/cloud-config \\\n --hostname-override=node1 \\\n --read-only-port=0 \\\n --exit-on-lock-contention \\\n --lock-file=/tmp/kubelet.lock \\\n --anonymous-auth=false \\\n --protect-kernel-defaults=true \\\n --cluster-dns=10.10.10.10 \\\n --cluster-domain=cluster.local\nExecStop=-/usr/bin/rkt stop --uuid-file=/var/cache/kubelet-pod.uuid\nRestart=always\nRestartSec=10\n[Install]\nWantedBy=multi-user.target\n", "enabled": true, "name": "kubelet.service" } diff --git a/pkg/userdata/coreos/testdata/v1.12.0-vsphere-overwrite-cloudconfig.golden b/pkg/userdata/coreos/testdata/v1.12.0-vsphere-overwrite-cloudconfig.golden index b472fee5a..db4e8e0db 100644 --- a/pkg/userdata/coreos/testdata/v1.12.0-vsphere-overwrite-cloudconfig.golden +++ b/pkg/userdata/coreos/testdata/v1.12.0-vsphere-overwrite-cloudconfig.golden @@ -216,7 +216,7 @@ "name": "kubelet-healthcheck.service" }, { - "contents": "[Unit]\nDescription=Kubernetes Kubelet\nRequires=docker.service\nAfter=docker.service\n[Service]\nTimeoutStartSec=5min\nEnvironment=KUBELET_IMAGE=docker://k8s.gcr.io/hyperkube-amd64:v1.12.0\nEnvironment=\"RKT_RUN_ARGS=--uuid-file-save=/var/cache/kubelet-pod.uuid \\\n --insecure-options=image \\\n --volume=resolv,kind=host,source=/etc/resolv.conf \\\n --mount volume=resolv,target=/etc/resolv.conf \\\n --volume cni-bin,kind=host,source=/opt/cni/bin \\\n --mount volume=cni-bin,target=/opt/cni/bin \\\n --volume cni-conf,kind=host,source=/etc/cni/net.d \\\n --mount volume=cni-conf,target=/etc/cni/net.d \\\n --volume etc-kubernetes,kind=host,source=/etc/kubernetes \\\n --mount volume=etc-kubernetes,target=/etc/kubernetes \\\n --volume var-log,kind=host,source=/var/log \\\n --mount volume=var-log,target=/var/log \\\n --volume var-lib-calico,kind=host,source=/var/lib/calico \\\n --mount volume=var-lib-calico,target=/var/lib/calico\"\nExecStartPre=/bin/mkdir -p /var/lib/calico\nExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests\nExecStartPre=/bin/mkdir -p /etc/cni/net.d\nExecStartPre=/bin/mkdir -p /opt/cni/bin\nExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/cache/kubelet-pod.uuid\nExecStart=/usr/lib/coreos/kubelet-wrapper \\\n --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \\\n --kubeconfig=/etc/kubernetes/kubelet.conf \\\n --pod-manifest-path=/etc/kubernetes/manifests \\\n --allow-privileged=true \\\n --network-plugin=cni \\\n --cni-conf-dir=/etc/cni/net.d \\\n --cni-bin-dir=/opt/cni/bin \\\n --authorization-mode=Webhook \\\n --client-ca-file=/etc/kubernetes/pki/ca.crt \\\n --rotate-certificates=true \\\n --cert-dir=/etc/kubernetes/pki \\\n --authentication-token-webhook=true \\\n --cloud-provider=vsphere \\\n --cloud-config=/etc/kubernetes/cloud-config \\\n --hostname-override=node1 \\\n --read-only-port=0 \\\n --exit-on-lock-contention \\\n --lock-file=/tmp/kubelet.lock \\\n --anonymous-auth=false \\\n --protect-kernel-defaults=true \\\n --cluster-dns=10.10.10.10 \\\n --cluster-domain=cluster.local\nExecStop=-/usr/bin/rkt stop --uuid-file=/var/cache/kubelet-pod.uuid\nRestart=always\nRestartSec=10\n[Install]\nWantedBy=multi-user.target\n", + "contents": "[Unit]\nDescription=Kubernetes Kubelet\nRequires=docker.service\nAfter=docker.service\n[Service]\nTimeoutStartSec=5min\nEnvironment=KUBELET_IMAGE=docker://k8s.gcr.io/hyperkube-amd64:v1.12.0\nEnvironment=\"RKT_RUN_ARGS=--uuid-file-save=/var/cache/kubelet-pod.uuid \\\n --insecure-options=image \\\n --volume=resolv,kind=host,source=/etc/resolv.conf \\\n --mount volume=resolv,target=/etc/resolv.conf \\\n --volume cni-bin,kind=host,source=/opt/cni/bin \\\n --mount volume=cni-bin,target=/opt/cni/bin \\\n --volume cni-conf,kind=host,source=/etc/cni/net.d \\\n --mount volume=cni-conf,target=/etc/cni/net.d \\\n --volume etc-kubernetes,kind=host,source=/etc/kubernetes \\\n --mount volume=etc-kubernetes,target=/etc/kubernetes \\\n --volume var-log,kind=host,source=/var/log \\\n --mount volume=var-log,target=/var/log \\\n --volume var-lib-calico,kind=host,source=/var/lib/calico \\\n --mount volume=var-lib-calico,target=/var/lib/calico\"\nExecStartPre=/bin/mkdir -p /var/lib/calico\nExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests\nExecStartPre=/bin/mkdir -p /etc/cni/net.d\nExecStartPre=/bin/mkdir -p /opt/cni/bin\nExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/cache/kubelet-pod.uuid\nExecStartPre=-/bin/rm -rf /var/lib/rkt/cas/tmp/\nExecStart=/usr/lib/coreos/kubelet-wrapper \\\n --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \\\n --kubeconfig=/etc/kubernetes/kubelet.conf \\\n --pod-manifest-path=/etc/kubernetes/manifests \\\n --allow-privileged=true \\\n --network-plugin=cni \\\n --cni-conf-dir=/etc/cni/net.d \\\n --cni-bin-dir=/opt/cni/bin \\\n --authorization-mode=Webhook \\\n --client-ca-file=/etc/kubernetes/pki/ca.crt \\\n --rotate-certificates=true \\\n --cert-dir=/etc/kubernetes/pki \\\n --authentication-token-webhook=true \\\n --cloud-provider=vsphere \\\n --cloud-config=/etc/kubernetes/cloud-config \\\n --hostname-override=node1 \\\n --read-only-port=0 \\\n --exit-on-lock-contention \\\n --lock-file=/tmp/kubelet.lock \\\n --anonymous-auth=false \\\n --protect-kernel-defaults=true \\\n --cluster-dns=10.10.10.10 \\\n --cluster-domain=cluster.local\nExecStop=-/usr/bin/rkt stop --uuid-file=/var/cache/kubelet-pod.uuid\nRestart=always\nRestartSec=10\n[Install]\nWantedBy=multi-user.target\n", "enabled": true, "name": "kubelet.service" } diff --git a/pkg/userdata/coreos/testdata/v1.9.2-disable-auto-update-aws.golden b/pkg/userdata/coreos/testdata/v1.9.2-disable-auto-update-aws.golden index 4bcea2819..e3ca97d4f 100644 --- a/pkg/userdata/coreos/testdata/v1.9.2-disable-auto-update-aws.golden +++ b/pkg/userdata/coreos/testdata/v1.9.2-disable-auto-update-aws.golden @@ -220,7 +220,7 @@ "name": "kubelet-healthcheck.service" }, { - "contents": "[Unit]\nDescription=Kubernetes Kubelet\nRequires=docker.service\nAfter=docker.service\n[Service]\nTimeoutStartSec=5min\nEnvironment=KUBELET_IMAGE=docker://k8s.gcr.io/hyperkube-amd64:v1.9.2\nEnvironment=\"RKT_RUN_ARGS=--uuid-file-save=/var/cache/kubelet-pod.uuid \\\n --insecure-options=image \\\n --volume=resolv,kind=host,source=/etc/resolv.conf \\\n --mount volume=resolv,target=/etc/resolv.conf \\\n --volume cni-bin,kind=host,source=/opt/cni/bin \\\n --mount volume=cni-bin,target=/opt/cni/bin \\\n --volume cni-conf,kind=host,source=/etc/cni/net.d \\\n --mount volume=cni-conf,target=/etc/cni/net.d \\\n --volume etc-kubernetes,kind=host,source=/etc/kubernetes \\\n --mount volume=etc-kubernetes,target=/etc/kubernetes \\\n --volume var-log,kind=host,source=/var/log \\\n --mount volume=var-log,target=/var/log \\\n --volume var-lib-calico,kind=host,source=/var/lib/calico \\\n --mount volume=var-lib-calico,target=/var/lib/calico\"\nExecStartPre=/bin/mkdir -p /var/lib/calico\nExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests\nExecStartPre=/bin/mkdir -p /etc/cni/net.d\nExecStartPre=/bin/mkdir -p /opt/cni/bin\nExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/cache/kubelet-pod.uuid\nExecStart=/usr/lib/coreos/kubelet-wrapper \\\n --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \\\n --kubeconfig=/etc/kubernetes/kubelet.conf \\\n --pod-manifest-path=/etc/kubernetes/manifests \\\n --allow-privileged=true \\\n --network-plugin=cni \\\n --cni-conf-dir=/etc/cni/net.d \\\n --cni-bin-dir=/opt/cni/bin \\\n --authorization-mode=Webhook \\\n --client-ca-file=/etc/kubernetes/pki/ca.crt \\\n --cadvisor-port=0 \\\n --rotate-certificates=true \\\n --cert-dir=/etc/kubernetes/pki \\\n --authentication-token-webhook=true \\\n --cloud-provider=aws \\\n --cloud-config=/etc/kubernetes/cloud-config \\\n --hostname-override=node1 \\\n --read-only-port=0 \\\n --exit-on-lock-contention \\\n --lock-file=/tmp/kubelet.lock \\\n --anonymous-auth=false \\\n --protect-kernel-defaults=true \\\n --cluster-dns=10.10.10.10 \\\n --cluster-domain=cluster.local\nExecStop=-/usr/bin/rkt stop --uuid-file=/var/cache/kubelet-pod.uuid\nRestart=always\nRestartSec=10\n[Install]\nWantedBy=multi-user.target\n", + "contents": "[Unit]\nDescription=Kubernetes Kubelet\nRequires=docker.service\nAfter=docker.service\n[Service]\nTimeoutStartSec=5min\nEnvironment=KUBELET_IMAGE=docker://k8s.gcr.io/hyperkube-amd64:v1.9.2\nEnvironment=\"RKT_RUN_ARGS=--uuid-file-save=/var/cache/kubelet-pod.uuid \\\n --insecure-options=image \\\n --volume=resolv,kind=host,source=/etc/resolv.conf \\\n --mount volume=resolv,target=/etc/resolv.conf \\\n --volume cni-bin,kind=host,source=/opt/cni/bin \\\n --mount volume=cni-bin,target=/opt/cni/bin \\\n --volume cni-conf,kind=host,source=/etc/cni/net.d \\\n --mount volume=cni-conf,target=/etc/cni/net.d \\\n --volume etc-kubernetes,kind=host,source=/etc/kubernetes \\\n --mount volume=etc-kubernetes,target=/etc/kubernetes \\\n --volume var-log,kind=host,source=/var/log \\\n --mount volume=var-log,target=/var/log \\\n --volume var-lib-calico,kind=host,source=/var/lib/calico \\\n --mount volume=var-lib-calico,target=/var/lib/calico\"\nExecStartPre=/bin/mkdir -p /var/lib/calico\nExecStartPre=/bin/mkdir -p /etc/kubernetes/manifests\nExecStartPre=/bin/mkdir -p /etc/cni/net.d\nExecStartPre=/bin/mkdir -p /opt/cni/bin\nExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/cache/kubelet-pod.uuid\nExecStartPre=-/bin/rm -rf /var/lib/rkt/cas/tmp/\nExecStart=/usr/lib/coreos/kubelet-wrapper \\\n --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf \\\n --kubeconfig=/etc/kubernetes/kubelet.conf \\\n --pod-manifest-path=/etc/kubernetes/manifests \\\n --allow-privileged=true \\\n --network-plugin=cni \\\n --cni-conf-dir=/etc/cni/net.d \\\n --cni-bin-dir=/opt/cni/bin \\\n --authorization-mode=Webhook \\\n --client-ca-file=/etc/kubernetes/pki/ca.crt \\\n --cadvisor-port=0 \\\n --rotate-certificates=true \\\n --cert-dir=/etc/kubernetes/pki \\\n --authentication-token-webhook=true \\\n --cloud-provider=aws \\\n --cloud-config=/etc/kubernetes/cloud-config \\\n --hostname-override=node1 \\\n --read-only-port=0 \\\n --exit-on-lock-contention \\\n --lock-file=/tmp/kubelet.lock \\\n --anonymous-auth=false \\\n --protect-kernel-defaults=true \\\n --cluster-dns=10.10.10.10 \\\n --cluster-domain=cluster.local\nExecStop=-/usr/bin/rkt stop --uuid-file=/var/cache/kubelet-pod.uuid\nRestart=always\nRestartSec=10\n[Install]\nWantedBy=multi-user.target\n", "enabled": true, "name": "kubelet.service" } diff --git a/pkg/userdata/coreos/userdata.go b/pkg/userdata/coreos/userdata.go index 842ed6af4..6ce460d0b 100644 --- a/pkg/userdata/coreos/userdata.go +++ b/pkg/userdata/coreos/userdata.go @@ -235,6 +235,7 @@ systemd: ExecStartPre=/bin/mkdir -p /etc/cni/net.d ExecStartPre=/bin/mkdir -p /opt/cni/bin ExecStartPre=-/usr/bin/rkt rm --uuid-file=/var/cache/kubelet-pod.uuid + ExecStartPre=-/bin/rm -rf /var/lib/rkt/cas/tmp/ ExecStart=/usr/lib/coreos/kubelet-wrapper \ {{ kubeletFlags .KubeletVersion .CloudProvider .MachineSpec.Name .ClusterDNSIPs | indent 10 }} ExecStop=-/usr/bin/rkt stop --uuid-file=/var/cache/kubelet-pod.uuid