Skip to content

Commit fc32d2f

Browse files
author
Kubernetes Submit Queue
authored
Merge pull request kubernetes#61518 from msau42/automated-cherry-pick-of-#61373-upstream-release-1.10
Automatic merge from submit-queue. [1.10] Automated cherry pick of kubernetes#61373: Use inner volume name instead of outer volume name for subpath directory Cherry pick of kubernetes#61373 on release-1.10. kubernetes#61373: Use inner volume name instead of outer volume name for subpath directory **Release note**: ```release-note ACTION REQUIRED: In-place node upgrades to this release from versions 1.7.14, 1.8.9, and 1.9.4 are not supported if using subpath volumes with PVCs. Such pods should be drained from the node first. ```
2 parents 3a61a66 + dd7f2c6 commit fc32d2f

File tree

6 files changed

+103
-7
lines changed

6 files changed

+103
-7
lines changed

pkg/kubelet/container/runtime.go

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -470,6 +470,9 @@ type VolumeInfo struct {
470470
// Whether the volume permission is set to read-only or not
471471
// This value is passed from volume.spec
472472
ReadOnly bool
473+
// Inner volume spec name, which is the PV name if used, otherwise
474+
// it is the same as the outer volume spec name.
475+
InnerVolumeSpecName string
473476
}
474477

475478
type VolumeMap map[string]VolumeInfo

pkg/kubelet/kubelet_pods.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -235,7 +235,7 @@ func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, h
235235
hostPath, cleanupAction, err = mounter.PrepareSafeSubpath(mountutil.Subpath{
236236
VolumeMountIndex: i,
237237
Path: hostPath,
238-
VolumeName: mount.Name,
238+
VolumeName: vol.InnerVolumeSpecName,
239239
VolumePath: volumePath,
240240
PodDir: podDir,
241241
ContainerName: container.Name,

pkg/kubelet/volumemanager/volume_manager.go

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -255,9 +255,10 @@ func (vm *volumeManager) GetMountedVolumesForPod(podName types.UniquePodName) co
255255
podVolumes := make(container.VolumeMap)
256256
for _, mountedVolume := range vm.actualStateOfWorld.GetMountedVolumesForPod(podName) {
257257
podVolumes[mountedVolume.OuterVolumeSpecName] = container.VolumeInfo{
258-
Mounter: mountedVolume.Mounter,
259-
BlockVolumeMapper: mountedVolume.BlockVolumeMapper,
260-
ReadOnly: mountedVolume.VolumeSpec.ReadOnly,
258+
Mounter: mountedVolume.Mounter,
259+
BlockVolumeMapper: mountedVolume.BlockVolumeMapper,
260+
ReadOnly: mountedVolume.VolumeSpec.ReadOnly,
261+
InnerVolumeSpecName: mountedVolume.InnerVolumeSpecName,
261262
}
262263
}
263264
return podVolumes

pkg/util/mount/mount_linux.go

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -815,9 +815,10 @@ func (mounter *Mounter) CleanSubPaths(podDir string, volumeName string) error {
815815

816816
// This implementation is shared between Linux and NsEnterMounter
817817
func doCleanSubPaths(mounter Interface, podDir string, volumeName string) error {
818-
glog.V(4).Infof("Cleaning up subpath mounts for %s", podDir)
819818
// scan /var/lib/kubelet/pods/<uid>/volume-subpaths/<volume>/*
820819
subPathDir := filepath.Join(podDir, containerSubPathDirectoryName, volumeName)
820+
glog.V(4).Infof("Cleaning up subpath mounts for %s", subPathDir)
821+
821822
containerDirs, err := ioutil.ReadDir(subPathDir)
822823
if err != nil {
823824
if os.IsNotExist(err) {

pkg/volume/util/operationexecutor/operation_generator.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -672,7 +672,7 @@ func (og *operationGenerator) GenerateUnmountVolumeFunc(
672672

673673
// Remove all bind-mounts for subPaths
674674
podDir := path.Join(podsDir, string(volumeToUnmount.PodUID))
675-
if err := mounter.CleanSubPaths(podDir, volumeToUnmount.OuterVolumeSpecName); err != nil {
675+
if err := mounter.CleanSubPaths(podDir, volumeToUnmount.InnerVolumeSpecName); err != nil {
676676
return volumeToUnmount.GenerateError("error cleaning subPath mounts", err)
677677
}
678678

test/e2e/storage/subpath.go

Lines changed: 92 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,6 +23,7 @@ import (
2323

2424
"k8s.io/api/core/v1"
2525
apierrors "k8s.io/apimachinery/pkg/api/errors"
26+
apierrs "k8s.io/apimachinery/pkg/api/errors"
2627
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
2728
"k8s.io/apimachinery/pkg/fields"
2829
utilerrors "k8s.io/apimachinery/pkg/util/errors"
@@ -60,7 +61,7 @@ var initVolSources = map[string]func() volSource{
6061
"hostPath": initHostpath,
6162
"hostPathSymlink": initHostpathSymlink,
6263
"emptyDir": initEmptydir,
63-
"gcePD": initGCEPD,
64+
"gcePDPVC": initGCEPD,
6465
"gcePDPartitioned": initGCEPDPartition,
6566
"nfs": initNFS,
6667
"nfsPVC": initNFSPVC,
@@ -307,6 +308,17 @@ var _ = utils.SIGDescribe("Subpath", func() {
307308

308309
testPodContainerRestart(f, pod, filePathInVolume, filePathInSubpath)
309310
})
311+
312+
It("should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow]", func() {
313+
testSubpathReconstruction(f, pod, false)
314+
})
315+
316+
It("should unmount if pod is force deleted while kubelet is down [Disruptive][Slow]", func() {
317+
if curVolType == "hostPath" || curVolType == "hostPathSymlink" {
318+
framework.Skipf("%s volume type does not support reconstruction, skipping", curVolType)
319+
}
320+
testSubpathReconstruction(f, pod, true)
321+
})
310322
})
311323
}
312324

@@ -549,6 +561,85 @@ func testPodContainerRestart(f *framework.Framework, pod *v1.Pod, fileInVolume,
549561
Expect(strings.TrimSpace(out)).To(BeEquivalentTo("test-after"))
550562
}
551563

564+
func testSubpathReconstruction(f *framework.Framework, pod *v1.Pod, forceDelete bool) {
565+
// This is mostly copied from TestVolumeUnmountsFromDeletedPodWithForceOption()
566+
567+
// Change to busybox
568+
pod.Spec.Containers[0].Image = "busybox"
569+
pod.Spec.Containers[0].Command = []string{"/bin/sh", "-ec", "sleep 100000"}
570+
pod.Spec.Containers[1].Image = "busybox"
571+
pod.Spec.Containers[1].Command = []string{"/bin/sh", "-ec", "sleep 100000"}
572+
573+
By(fmt.Sprintf("Creating pod %s", pod.Name))
574+
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
575+
Expect(err).ToNot(HaveOccurred())
576+
577+
err = framework.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, time.Minute)
578+
Expect(err).ToNot(HaveOccurred())
579+
580+
pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
581+
Expect(err).ToNot(HaveOccurred())
582+
583+
nodeIP, err := framework.GetHostExternalAddress(f.ClientSet, pod)
584+
Expect(err).NotTo(HaveOccurred())
585+
nodeIP = nodeIP + ":22"
586+
587+
By("Expecting the volume mount to be found.")
588+
result, err := framework.SSH(fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", pod.UID), nodeIP, framework.TestContext.Provider)
589+
framework.LogSSHResult(result)
590+
Expect(err).NotTo(HaveOccurred(), "Encountered SSH error.")
591+
Expect(result.Code).To(BeZero(), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
592+
593+
By("Expecting the subpath volume mount to be found.")
594+
result, err = framework.SSH(fmt.Sprintf("cat /proc/self/mountinfo | grep volume-subpaths | grep %s", pod.UID), nodeIP, framework.TestContext.Provider)
595+
framework.LogSSHResult(result)
596+
Expect(err).NotTo(HaveOccurred(), "Encountered SSH error.")
597+
Expect(result.Code).To(BeZero(), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))
598+
599+
By("Stopping the kubelet.")
600+
utils.KubeletCommand(utils.KStop, f.ClientSet, pod)
601+
defer func() {
602+
if err != nil {
603+
utils.KubeletCommand(utils.KStart, f.ClientSet, pod)
604+
}
605+
}()
606+
607+
By(fmt.Sprintf("Deleting Pod %q", pod.Name))
608+
if forceDelete {
609+
err = f.ClientSet.CoreV1().Pods(pod.Namespace).Delete(pod.Name, metav1.NewDeleteOptions(0))
610+
} else {
611+
err = f.ClientSet.CoreV1().Pods(pod.Namespace).Delete(pod.Name, &metav1.DeleteOptions{})
612+
}
613+
Expect(err).NotTo(HaveOccurred())
614+
615+
By("Starting the kubelet and waiting for pod to delete.")
616+
utils.KubeletCommand(utils.KStart, f.ClientSet, pod)
617+
err = f.WaitForPodTerminated(pod.Name, "")
618+
if !apierrs.IsNotFound(err) && err != nil {
619+
Expect(err).NotTo(HaveOccurred(), "Expected pod to terminate.")
620+
}
621+
622+
if forceDelete {
623+
// With forceDelete, since pods are immediately deleted from API server, there is no way to be sure when volumes are torn down
624+
// so wait some time to finish
625+
time.Sleep(30 * time.Second)
626+
}
627+
628+
By("Expecting the volume mount not to be found.")
629+
result, err = framework.SSH(fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", pod.UID), nodeIP, framework.TestContext.Provider)
630+
framework.LogSSHResult(result)
631+
Expect(err).NotTo(HaveOccurred(), "Encountered SSH error.")
632+
Expect(result.Stdout).To(BeEmpty(), "Expected grep stdout to be empty (i.e. no mount found).")
633+
framework.Logf("Volume unmounted on node %s", pod.Spec.NodeName)
634+
635+
By("Expecting the subpath volume mount not to be found.")
636+
result, err = framework.SSH(fmt.Sprintf("cat /proc/self/mountinfo | grep volume-subpaths | grep %s", pod.UID), nodeIP, framework.TestContext.Provider)
637+
framework.LogSSHResult(result)
638+
Expect(err).NotTo(HaveOccurred(), "Encountered SSH error.")
639+
Expect(result.Stdout).To(BeEmpty(), "Expected grep stdout to be empty (i.e. no subpath mount found).")
640+
framework.Logf("Subpath volume unmounted on node %s", pod.Spec.NodeName)
641+
}
642+
552643
func podContainerExec(pod *v1.Pod, containerIndex int, bashExec string) (string, error) {
553644
return framework.RunKubectl("exec", fmt.Sprintf("--namespace=%s", pod.Namespace), pod.Name, "--container", pod.Spec.Containers[containerIndex].Name, "--", "/bin/sh", "-c", bashExec)
554645
}

0 commit comments

Comments
 (0)