Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[1.9] Automated cherry pick of #61373: Use inner volume name instead of outer volume name for subpath directory #61519

Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
3 changes: 3 additions & 0 deletions pkg/kubelet/container/runtime.go
Expand Up @@ -454,6 +454,9 @@ type VolumeInfo struct {
// Whether the volume permission is set to read-only or not
// This value is passed from volume.spec
ReadOnly bool
// Inner volume spec name, which is the PV name if used, otherwise
// it is the same as the outer volume spec name.
InnerVolumeSpecName string
}

type VolumeMap map[string]VolumeInfo
Expand Down
2 changes: 1 addition & 1 deletion pkg/kubelet/kubelet_pods.go
Expand Up @@ -238,7 +238,7 @@ func makeMounts(pod *v1.Pod, podDir string, container *v1.Container, hostName, h
hostPath, cleanupAction, err = mounter.PrepareSafeSubpath(mountutil.Subpath{
VolumeMountIndex: i,
Path: hostPath,
VolumeName: mount.Name,
VolumeName: vol.InnerVolumeSpecName,
VolumePath: volumePath,
PodDir: podDir,
ContainerName: container.Name,
Expand Down
7 changes: 4 additions & 3 deletions pkg/kubelet/volumemanager/volume_manager.go
Expand Up @@ -255,9 +255,10 @@ func (vm *volumeManager) GetMountedVolumesForPod(podName types.UniquePodName) co
podVolumes := make(container.VolumeMap)
for _, mountedVolume := range vm.actualStateOfWorld.GetMountedVolumesForPod(podName) {
podVolumes[mountedVolume.OuterVolumeSpecName] = container.VolumeInfo{
Mounter: mountedVolume.Mounter,
BlockVolumeMapper: mountedVolume.BlockVolumeMapper,
ReadOnly: mountedVolume.VolumeSpec.ReadOnly,
Mounter: mountedVolume.Mounter,
BlockVolumeMapper: mountedVolume.BlockVolumeMapper,
ReadOnly: mountedVolume.VolumeSpec.ReadOnly,
InnerVolumeSpecName: mountedVolume.InnerVolumeSpecName,
}
}
return podVolumes
Expand Down
3 changes: 2 additions & 1 deletion pkg/util/mount/mount_linux.go
Expand Up @@ -778,9 +778,10 @@ func (mounter *Mounter) CleanSubPaths(podDir string, volumeName string) error {

// This implementation is shared between Linux and NsEnterMounter
func doCleanSubPaths(mounter Interface, podDir string, volumeName string) error {
glog.V(4).Infof("Cleaning up subpath mounts for %s", podDir)
// scan /var/lib/kubelet/pods/<uid>/volume-subpaths/<volume>/*
subPathDir := filepath.Join(podDir, containerSubPathDirectoryName, volumeName)
glog.V(4).Infof("Cleaning up subpath mounts for %s", subPathDir)

containerDirs, err := ioutil.ReadDir(subPathDir)
if err != nil {
if os.IsNotExist(err) {
Expand Down
2 changes: 1 addition & 1 deletion pkg/volume/util/operationexecutor/operation_generator.go
Expand Up @@ -629,7 +629,7 @@ func (og *operationGenerator) GenerateUnmountVolumeFunc(

// Remove all bind-mounts for subPaths
podDir := path.Join(podsDir, string(volumeToUnmount.PodUID))
if err := mounter.CleanSubPaths(podDir, volumeToUnmount.OuterVolumeSpecName); err != nil {
if err := mounter.CleanSubPaths(podDir, volumeToUnmount.InnerVolumeSpecName); err != nil {
return volumeToUnmount.GenerateErrorDetailed("error cleaning subPath mounts", err)
}

Expand Down
99 changes: 98 additions & 1 deletion test/e2e/storage/subpath.go
Expand Up @@ -23,6 +23,7 @@ import (

"k8s.io/api/core/v1"
apierrors "k8s.io/apimachinery/pkg/api/errors"
apierrs "k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/fields"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
Expand Down Expand Up @@ -59,13 +60,19 @@ var initVolSources = map[string]func() volSource{
"hostPath": initHostpath,
"hostPathSymlink": initHostpathSymlink,
"emptyDir": initEmptydir,
"gcePD": initGCEPD,
"gcePDPVC": initGCEPD,
"gcePDPartitioned": initGCEPDPartition,
"nfs": initNFS,
"nfsPVC": initNFSPVC,
"gluster": initGluster,
}

var forceReconstructionSkipTypes = map[string]interface{}{
"hostPath": nil,
"hostPathSymlink": nil,
"gluster": nil,
}

var _ = SIGDescribe("Subpath", func() {
var (
subPath string
Expand Down Expand Up @@ -306,6 +313,17 @@ var _ = SIGDescribe("Subpath", func() {

testPodContainerRestart(f, pod, filePathInVolume, filePathInSubpath)
})

It("should unmount if pod is gracefully deleted while kubelet is down [Disruptive][Slow]", func() {
testSubpathReconstruction(f, pod, false)
})

It("should unmount if pod is force deleted while kubelet is down [Disruptive][Slow]", func() {
if _, ok := forceReconstructionSkipTypes[curVolType]; ok {
framework.Skipf("%s volume type does not support reconstruction, skipping", curVolType)
}
testSubpathReconstruction(f, pod, true)
})
})
}

Expand Down Expand Up @@ -548,6 +566,85 @@ func testPodContainerRestart(f *framework.Framework, pod *v1.Pod, fileInVolume,
Expect(strings.TrimSpace(out)).To(BeEquivalentTo("test-after"))
}

func testSubpathReconstruction(f *framework.Framework, pod *v1.Pod, forceDelete bool) {
// This is mostly copied from TestVolumeUnmountsFromDeletedPodWithForceOption()

// Change to busybox
pod.Spec.Containers[0].Image = "busybox"
pod.Spec.Containers[0].Command = []string{"/bin/sh", "-ec", "sleep 100000"}
pod.Spec.Containers[1].Image = "busybox"
pod.Spec.Containers[1].Command = []string{"/bin/sh", "-ec", "sleep 100000"}

By(fmt.Sprintf("Creating pod %s", pod.Name))
pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Create(pod)
Expect(err).ToNot(HaveOccurred())

err = framework.WaitTimeoutForPodRunningInNamespace(f.ClientSet, pod.Name, pod.Namespace, time.Minute)
Expect(err).ToNot(HaveOccurred())

pod, err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(pod.Name, metav1.GetOptions{})
Expect(err).ToNot(HaveOccurred())

nodeIP, err := framework.GetHostExternalAddress(f.ClientSet, pod)
Expect(err).NotTo(HaveOccurred())
nodeIP = nodeIP + ":22"

By("Expecting the volume mount to be found.")
result, err := framework.SSH(fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", pod.UID), nodeIP, framework.TestContext.Provider)
framework.LogSSHResult(result)
Expect(err).NotTo(HaveOccurred(), "Encountered SSH error.")
Expect(result.Code).To(BeZero(), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))

By("Expecting the subpath volume mount to be found.")
result, err = framework.SSH(fmt.Sprintf("cat /proc/self/mountinfo | grep volume-subpaths | grep %s", pod.UID), nodeIP, framework.TestContext.Provider)
framework.LogSSHResult(result)
Expect(err).NotTo(HaveOccurred(), "Encountered SSH error.")
Expect(result.Code).To(BeZero(), fmt.Sprintf("Expected grep exit code of 0, got %d", result.Code))

By("Stopping the kubelet.")
kubeletCommand(kStop, f.ClientSet, pod)
defer func() {
if err != nil {
kubeletCommand(kStart, f.ClientSet, pod)
}
}()

By(fmt.Sprintf("Deleting Pod %q", pod.Name))
if forceDelete {
err = f.ClientSet.CoreV1().Pods(pod.Namespace).Delete(pod.Name, metav1.NewDeleteOptions(0))
} else {
err = f.ClientSet.CoreV1().Pods(pod.Namespace).Delete(pod.Name, &metav1.DeleteOptions{})
}
Expect(err).NotTo(HaveOccurred())

By("Starting the kubelet and waiting for pod to delete.")
kubeletCommand(kStart, f.ClientSet, pod)
err = f.WaitForPodTerminated(pod.Name, "")
if !apierrs.IsNotFound(err) && err != nil {
Expect(err).NotTo(HaveOccurred(), "Expected pod to terminate.")
}

if forceDelete {
// With forceDelete, since pods are immediately deleted from API server, there is no way to be sure when volumes are torn down
// so wait some time to finish
time.Sleep(4 * time.Minute)
}

By("Expecting the volume mount not to be found.")
result, err = framework.SSH(fmt.Sprintf("mount | grep %s | grep -v volume-subpaths", pod.UID), nodeIP, framework.TestContext.Provider)
framework.LogSSHResult(result)
Expect(err).NotTo(HaveOccurred(), "Encountered SSH error.")
Expect(result.Stdout).To(BeEmpty(), "Expected grep stdout to be empty (i.e. no mount found).")
framework.Logf("Volume unmounted on node %s", pod.Spec.NodeName)

By("Expecting the subpath volume mount not to be found.")
result, err = framework.SSH(fmt.Sprintf("cat /proc/self/mountinfo | grep volume-subpaths | grep %s", pod.UID), nodeIP, framework.TestContext.Provider)
framework.LogSSHResult(result)
Expect(err).NotTo(HaveOccurred(), "Encountered SSH error.")
Expect(result.Stdout).To(BeEmpty(), "Expected grep stdout to be empty (i.e. no subpath mount found).")
framework.Logf("Subpath volume unmounted on node %s", pod.Spec.NodeName)
}

func podContainerExec(pod *v1.Pod, containerIndex int, bashExec string) (string, error) {
return framework.RunKubectl("exec", fmt.Sprintf("--namespace=%s", pod.Namespace), pod.Name, "--container", pod.Spec.Containers[containerIndex].Name, "--", "/bin/sh", "-c", bashExec)
}
Expand Down