From 1d38818326e5e98598833e21a08eb9bd1d258c3b Mon Sep 17 00:00:00 2001 From: David Ashpole Date: Fri, 10 Feb 2017 15:08:03 -0800 Subject: [PATCH] Revert "Merge pull request #41202 from dashpole/revert-41095-deletion_pod_lifecycle" This reverts commit ff87d13b2c9a820968d41034ed2c5b687bfacb98, reversing changes made to 46becf2c81e5911d79ce40009c6a2838194b3cf4. --- pkg/kubelet/BUILD | 1 + pkg/kubelet/kubelet.go | 3 +- pkg/kubelet/kubelet_pods.go | 31 +++++++++++++++ pkg/kubelet/kubelet_test.go | 4 +- pkg/kubelet/prober/BUILD | 1 + pkg/kubelet/prober/common_test.go | 3 +- pkg/kubelet/prober/worker_test.go | 3 +- pkg/kubelet/runonce_test.go | 4 +- pkg/kubelet/status/BUILD | 6 ++- pkg/kubelet/status/status_manager.go | 38 ++++++++++--------- pkg/kubelet/status/status_manager_test.go | 3 +- pkg/kubelet/status/testing/BUILD | 31 +++++++++++++++ .../testing/fake_pod_deletion_safety.go | 28 ++++++++++++++ pkg/kubelet/volumemanager/BUILD | 3 ++ pkg/kubelet/volumemanager/populator/BUILD | 1 + .../desired_state_of_world_populator.go | 27 +++++++++++-- pkg/kubelet/volumemanager/volume_manager.go | 3 ++ .../volumemanager/volume_manager_test.go | 4 ++ 18 files changed, 165 insertions(+), 29 deletions(-) create mode 100644 pkg/kubelet/status/testing/BUILD create mode 100644 pkg/kubelet/status/testing/fake_pod_deletion_safety.go diff --git a/pkg/kubelet/BUILD b/pkg/kubelet/BUILD index a17e77f31c6a..59b0f17878dd 100644 --- a/pkg/kubelet/BUILD +++ b/pkg/kubelet/BUILD @@ -182,6 +182,7 @@ go_test( "//pkg/kubelet/server/remotecommand:go_default_library", "//pkg/kubelet/server/stats:go_default_library", "//pkg/kubelet/status:go_default_library", + "//pkg/kubelet/status/testing:go_default_library", "//pkg/kubelet/types:go_default_library", "//pkg/kubelet/util/queue:go_default_library", "//pkg/kubelet/util/sliceutils:go_default_library", diff --git a/pkg/kubelet/kubelet.go b/pkg/kubelet/kubelet.go index 092e1068f5da..7f29fab7f258 100644 --- a/pkg/kubelet/kubelet.go +++ b/pkg/kubelet/kubelet.go @@ -690,7 +690,7 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub } klet.imageManager = imageManager - klet.statusManager = status.NewManager(klet.kubeClient, klet.podManager) + klet.statusManager = status.NewManager(klet.kubeClient, klet.podManager, klet) klet.probeManager = prober.NewManager( klet.statusManager, @@ -715,6 +715,7 @@ func NewMainKubelet(kubeCfg *componentconfig.KubeletConfiguration, kubeDeps *Kub kubeCfg.EnableControllerAttachDetach, nodeName, klet.podManager, + klet.statusManager, klet.kubeClient, klet.volumePluginMgr, klet.containerRuntime, diff --git a/pkg/kubelet/kubelet_pods.go b/pkg/kubelet/kubelet_pods.go index 556d7a47e94f..e3e3e5c43652 100644 --- a/pkg/kubelet/kubelet_pods.go +++ b/pkg/kubelet/kubelet_pods.go @@ -730,6 +730,37 @@ func (kl *Kubelet) podIsTerminated(pod *v1.Pod) bool { return false } +// Returns true if all required node-level resources that a pod was consuming have been reclaimed by the kubelet. +// Reclaiming resources is a prerequisite to deleting a pod from the API server. +func (kl *Kubelet) OkToDeletePod(pod *v1.Pod) bool { + if pod.DeletionTimestamp == nil { + // We shouldnt delete pods whose DeletionTimestamp is not set + return false + } + if !notRunning(pod.Status.ContainerStatuses) { + // We shouldnt delete pods that still have running containers + glog.V(3).Infof("Pod %q is terminated, but some containers are still running", format.Pod(pod)) + return false + } + if kl.podVolumesExist(pod.UID) && !kl.kubeletConfiguration.KeepTerminatedPodVolumes { + // We shouldnt delete pods whose volumes have not been cleaned up if we are not keeping terminated pod volumes + glog.V(3).Infof("Pod %q is terminated, but some volumes have not been cleaned up", format.Pod(pod)) + return false + } + return true +} + +// notRunning returns true if every status is terminated or waiting, or the status list +// is empty. +func notRunning(statuses []v1.ContainerStatus) bool { + for _, status := range statuses { + if status.State.Terminated == nil && status.State.Waiting == nil { + return false + } + } + return true +} + // filterOutTerminatedPods returns the given pods which the status manager // does not consider failed or succeeded. func (kl *Kubelet) filterOutTerminatedPods(pods []*v1.Pod) []*v1.Pod { diff --git a/pkg/kubelet/kubelet_test.go b/pkg/kubelet/kubelet_test.go index 284eb2b77265..b342c61235b5 100644 --- a/pkg/kubelet/kubelet_test.go +++ b/pkg/kubelet/kubelet_test.go @@ -60,6 +60,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/secret" "k8s.io/kubernetes/pkg/kubelet/server/stats" "k8s.io/kubernetes/pkg/kubelet/status" + statustest "k8s.io/kubernetes/pkg/kubelet/status/testing" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" "k8s.io/kubernetes/pkg/kubelet/util/queue" kubeletvolume "k8s.io/kubernetes/pkg/kubelet/volumemanager" @@ -177,7 +178,7 @@ func newTestKubeletWithImageList( secretManager := secret.NewSimpleSecretManager(kubelet.kubeClient) kubelet.secretManager = secretManager kubelet.podManager = kubepod.NewBasicPodManager(fakeMirrorClient, kubelet.secretManager) - kubelet.statusManager = status.NewManager(fakeKubeClient, kubelet.podManager) + kubelet.statusManager = status.NewManager(fakeKubeClient, kubelet.podManager, &statustest.FakePodDeletionSafetyProvider{}) kubelet.containerRefManager = kubecontainer.NewRefManager() diskSpaceManager, err := newDiskSpaceManager(mockCadvisor, DiskSpacePolicy{}) if err != nil { @@ -259,6 +260,7 @@ func newTestKubeletWithImageList( controllerAttachDetachEnabled, kubelet.nodeName, kubelet.podManager, + kubelet.statusManager, fakeKubeClient, kubelet.volumePluginMgr, fakeRuntime, diff --git a/pkg/kubelet/prober/BUILD b/pkg/kubelet/prober/BUILD index b1713519dd62..7b0e93690814 100644 --- a/pkg/kubelet/prober/BUILD +++ b/pkg/kubelet/prober/BUILD @@ -56,6 +56,7 @@ go_test( "//pkg/kubelet/pod:go_default_library", "//pkg/kubelet/prober/results:go_default_library", "//pkg/kubelet/status:go_default_library", + "//pkg/kubelet/status/testing:go_default_library", "//pkg/probe:go_default_library", "//pkg/util/exec:go_default_library", "//vendor:github.com/golang/glog", diff --git a/pkg/kubelet/prober/common_test.go b/pkg/kubelet/prober/common_test.go index 6a3b41575026..6044cfe084ce 100644 --- a/pkg/kubelet/prober/common_test.go +++ b/pkg/kubelet/prober/common_test.go @@ -28,6 +28,7 @@ import ( kubepod "k8s.io/kubernetes/pkg/kubelet/pod" "k8s.io/kubernetes/pkg/kubelet/prober/results" "k8s.io/kubernetes/pkg/kubelet/status" + statustest "k8s.io/kubernetes/pkg/kubelet/status/testing" "k8s.io/kubernetes/pkg/probe" "k8s.io/kubernetes/pkg/util/exec" ) @@ -102,7 +103,7 @@ func newTestManager() *manager { // Add test pod to pod manager, so that status manager can get the pod from pod manager if needed. podManager.AddPod(getTestPod()) m := NewManager( - status.NewManager(&fake.Clientset{}, podManager), + status.NewManager(&fake.Clientset{}, podManager, &statustest.FakePodDeletionSafetyProvider{}), results.NewManager(), nil, // runner refManager, diff --git a/pkg/kubelet/prober/worker_test.go b/pkg/kubelet/prober/worker_test.go index 6239dc0f2134..da4a523a1dc0 100644 --- a/pkg/kubelet/prober/worker_test.go +++ b/pkg/kubelet/prober/worker_test.go @@ -31,6 +31,7 @@ import ( kubepod "k8s.io/kubernetes/pkg/kubelet/pod" "k8s.io/kubernetes/pkg/kubelet/prober/results" "k8s.io/kubernetes/pkg/kubelet/status" + statustest "k8s.io/kubernetes/pkg/kubelet/status/testing" "k8s.io/kubernetes/pkg/probe" "k8s.io/kubernetes/pkg/util/exec" ) @@ -117,7 +118,7 @@ func TestDoProbe(t *testing.T) { } // Clean up. - m.statusManager = status.NewManager(&fake.Clientset{}, kubepod.NewBasicPodManager(nil, nil)) + m.statusManager = status.NewManager(&fake.Clientset{}, kubepod.NewBasicPodManager(nil, nil), &statustest.FakePodDeletionSafetyProvider{}) resultsManager(m, probeType).Remove(testContainerID) } } diff --git a/pkg/kubelet/runonce_test.go b/pkg/kubelet/runonce_test.go index 0b33cfdde0b1..c5091007cf6b 100644 --- a/pkg/kubelet/runonce_test.go +++ b/pkg/kubelet/runonce_test.go @@ -44,6 +44,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/secret" "k8s.io/kubernetes/pkg/kubelet/server/stats" "k8s.io/kubernetes/pkg/kubelet/status" + statustest "k8s.io/kubernetes/pkg/kubelet/status/testing" "k8s.io/kubernetes/pkg/kubelet/volumemanager" "k8s.io/kubernetes/pkg/volume" volumetest "k8s.io/kubernetes/pkg/volume/testing" @@ -77,7 +78,7 @@ func TestRunOnce(t *testing.T) { cadvisor: cadvisor, nodeLister: testNodeLister{}, nodeInfo: testNodeInfo{}, - statusManager: status.NewManager(nil, podManager), + statusManager: status.NewManager(nil, podManager, &statustest.FakePodDeletionSafetyProvider{}), containerRefManager: kubecontainer.NewRefManager(), podManager: podManager, os: &containertest.FakeOS{}, @@ -102,6 +103,7 @@ func TestRunOnce(t *testing.T) { true, kb.nodeName, kb.podManager, + kb.statusManager, kb.kubeClient, kb.volumePluginMgr, fakeRuntime, diff --git a/pkg/kubelet/status/BUILD b/pkg/kubelet/status/BUILD index 21e6004ca1a5..a931c3eded90 100644 --- a/pkg/kubelet/status/BUILD +++ b/pkg/kubelet/status/BUILD @@ -51,6 +51,7 @@ go_test( "//pkg/kubelet/pod:go_default_library", "//pkg/kubelet/pod/testing:go_default_library", "//pkg/kubelet/secret:go_default_library", + "//pkg/kubelet/status/testing:go_default_library", "//pkg/kubelet/types:go_default_library", "//vendor:github.com/stretchr/testify/assert", "//vendor:k8s.io/apimachinery/pkg/api/errors", @@ -70,6 +71,9 @@ filegroup( filegroup( name = "all-srcs", - srcs = [":package-srcs"], + srcs = [ + ":package-srcs", + "//pkg/kubelet/status/testing:all-srcs", + ], tags = ["automanaged"], ) diff --git a/pkg/kubelet/status/status_manager.go b/pkg/kubelet/status/status_manager.go index efd4abc8f190..ab52734f2a38 100644 --- a/pkg/kubelet/status/status_manager.go +++ b/pkg/kubelet/status/status_manager.go @@ -67,6 +67,7 @@ type manager struct { // Map from (mirror) pod UID to latest status version successfully sent to the API server. // apiStatusVersions must only be accessed from the sync thread. apiStatusVersions map[types.UID]uint64 + podDeletionSafety PodDeletionSafetyProvider } // PodStatusProvider knows how to provide status for a pod. It's intended to be used by other components @@ -77,6 +78,12 @@ type PodStatusProvider interface { GetPodStatus(uid types.UID) (v1.PodStatus, bool) } +// An object which provides guarantees that a pod can be saftely deleted. +type PodDeletionSafetyProvider interface { + // A function which returns true if the pod can safely be deleted + OkToDeletePod(pod *v1.Pod) bool +} + // Manager is the Source of truth for kubelet pod status, and should be kept up-to-date with // the latest v1.PodStatus. It also syncs updates back to the API server. type Manager interface { @@ -103,13 +110,14 @@ type Manager interface { const syncPeriod = 10 * time.Second -func NewManager(kubeClient clientset.Interface, podManager kubepod.Manager) Manager { +func NewManager(kubeClient clientset.Interface, podManager kubepod.Manager, podDeletionSafety PodDeletionSafetyProvider) Manager { return &manager{ kubeClient: kubeClient, podManager: podManager, podStatuses: make(map[types.UID]versionedPodStatus), podStatusChannel: make(chan podStatusSyncRequest, 1000), // Buffer up to 1000 statuses apiStatusVersions: make(map[types.UID]uint64), + podDeletionSafety: podDeletionSafety, } } @@ -385,7 +393,7 @@ func (m *manager) syncBatch() { } syncedUID = mirrorUID } - if m.needsUpdate(syncedUID, status) { + if m.needsUpdate(syncedUID, status) || m.couldBeDeleted(uid, status.status) { updatedStatuses = append(updatedStatuses, podStatusSyncRequest{uid, status}) } else if m.needsReconcile(uid, status.status) { // Delete the apiStatusVersions here to force an update on the pod status @@ -439,11 +447,7 @@ func (m *manager) syncPod(uid types.UID, status versionedPodStatus) { // We don't handle graceful deletion of mirror pods. return } - if pod.DeletionTimestamp == nil { - return - } - if !notRunning(pod.Status.ContainerStatuses) { - glog.V(3).Infof("Pod %q is terminated, but some containers are still running", format.Pod(pod)) + if !m.podDeletionSafety.OkToDeletePod(pod) { return } deleteOptions := metav1.NewDeleteOptions(0) @@ -468,6 +472,15 @@ func (m *manager) needsUpdate(uid types.UID, status versionedPodStatus) bool { return !ok || latest < status.version } +func (m *manager) couldBeDeleted(uid types.UID, status v1.PodStatus) bool { + // The pod could be a static pod, so we should translate first. + pod, ok := m.podManager.GetPodByUID(uid) + if !ok { + return false + } + return !kubepod.IsMirrorPod(pod) && m.podDeletionSafety.OkToDeletePod(pod) +} + // needsReconcile compares the given status with the status in the pod manager (which // in fact comes from apiserver), returns whether the status needs to be reconciled with // the apiserver. Now when pod status is inconsistent between apiserver and kubelet, @@ -568,17 +581,6 @@ func normalizeStatus(pod *v1.Pod, status *v1.PodStatus) *v1.PodStatus { return status } -// notRunning returns true if every status is terminated or waiting, or the status list -// is empty. -func notRunning(statuses []v1.ContainerStatus) bool { - for _, status := range statuses { - if status.State.Terminated == nil && status.State.Waiting == nil { - return false - } - } - return true -} - func copyStatus(source *v1.PodStatus) (v1.PodStatus, error) { clone, err := api.Scheme.DeepCopy(source) if err != nil { diff --git a/pkg/kubelet/status/status_manager_test.go b/pkg/kubelet/status/status_manager_test.go index 7a53194fa7ef..9a1654c30697 100644 --- a/pkg/kubelet/status/status_manager_test.go +++ b/pkg/kubelet/status/status_manager_test.go @@ -39,6 +39,7 @@ import ( kubepod "k8s.io/kubernetes/pkg/kubelet/pod" podtest "k8s.io/kubernetes/pkg/kubelet/pod/testing" kubesecret "k8s.io/kubernetes/pkg/kubelet/secret" + statustest "k8s.io/kubernetes/pkg/kubelet/status/testing" kubetypes "k8s.io/kubernetes/pkg/kubelet/types" ) @@ -74,7 +75,7 @@ func (m *manager) testSyncBatch() { func newTestManager(kubeClient clientset.Interface) *manager { podManager := kubepod.NewBasicPodManager(podtest.NewFakeMirrorClient(), kubesecret.NewFakeManager()) podManager.AddPod(getTestPod()) - return NewManager(kubeClient, podManager).(*manager) + return NewManager(kubeClient, podManager, &statustest.FakePodDeletionSafetyProvider{}).(*manager) } func generateRandomMessage() string { diff --git a/pkg/kubelet/status/testing/BUILD b/pkg/kubelet/status/testing/BUILD new file mode 100644 index 000000000000..93a4a9422611 --- /dev/null +++ b/pkg/kubelet/status/testing/BUILD @@ -0,0 +1,31 @@ +package(default_visibility = ["//visibility:public"]) + +licenses(["notice"]) + +load( + "@io_bazel_rules_go//go:def.bzl", + "go_library", +) + +go_library( + name = "go_default_library", + srcs = ["fake_pod_deletion_safety.go"], + tags = ["automanaged"], + deps = [ + "//pkg/api/v1:go_default_library", + "//pkg/kubelet/pod:go_default_library", + ], +) + +filegroup( + name = "package-srcs", + srcs = glob(["**"]), + tags = ["automanaged"], + visibility = ["//visibility:private"], +) + +filegroup( + name = "all-srcs", + srcs = [":package-srcs"], + tags = ["automanaged"], +) diff --git a/pkg/kubelet/status/testing/fake_pod_deletion_safety.go b/pkg/kubelet/status/testing/fake_pod_deletion_safety.go new file mode 100644 index 000000000000..c05382907f38 --- /dev/null +++ b/pkg/kubelet/status/testing/fake_pod_deletion_safety.go @@ -0,0 +1,28 @@ +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package testing + +import ( + "k8s.io/kubernetes/pkg/api/v1" + kubepod "k8s.io/kubernetes/pkg/kubelet/pod" +) + +type FakePodDeletionSafetyProvider struct{} + +func (f *FakePodDeletionSafetyProvider) OkToDeletePod(pod *v1.Pod) bool { + return !kubepod.IsMirrorPod(pod) && pod.DeletionTimestamp != nil +} diff --git a/pkg/kubelet/volumemanager/BUILD b/pkg/kubelet/volumemanager/BUILD index dd82d53fc250..a73b968f96e4 100644 --- a/pkg/kubelet/volumemanager/BUILD +++ b/pkg/kubelet/volumemanager/BUILD @@ -18,6 +18,7 @@ go_library( "//pkg/kubelet/config:go_default_library", "//pkg/kubelet/container:go_default_library", "//pkg/kubelet/pod:go_default_library", + "//pkg/kubelet/status:go_default_library", "//pkg/kubelet/util/format:go_default_library", "//pkg/kubelet/volumemanager/cache:go_default_library", "//pkg/kubelet/volumemanager/populator:go_default_library", @@ -50,6 +51,8 @@ go_test( "//pkg/kubelet/pod:go_default_library", "//pkg/kubelet/pod/testing:go_default_library", "//pkg/kubelet/secret:go_default_library", + "//pkg/kubelet/status:go_default_library", + "//pkg/kubelet/status/testing:go_default_library", "//pkg/util/mount:go_default_library", "//pkg/volume:go_default_library", "//pkg/volume/testing:go_default_library", diff --git a/pkg/kubelet/volumemanager/populator/BUILD b/pkg/kubelet/volumemanager/populator/BUILD index ec4f55b93840..c844e3e68a37 100644 --- a/pkg/kubelet/volumemanager/populator/BUILD +++ b/pkg/kubelet/volumemanager/populator/BUILD @@ -17,6 +17,7 @@ go_library( "//pkg/client/clientset_generated/clientset:go_default_library", "//pkg/kubelet/container:go_default_library", "//pkg/kubelet/pod:go_default_library", + "//pkg/kubelet/status:go_default_library", "//pkg/kubelet/util/format:go_default_library", "//pkg/kubelet/volumemanager/cache:go_default_library", "//pkg/volume:go_default_library", diff --git a/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go b/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go index 5e8d8d4137a3..5854fbbe5cf9 100644 --- a/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go +++ b/pkg/kubelet/volumemanager/populator/desired_state_of_world_populator.go @@ -35,6 +35,7 @@ import ( "k8s.io/kubernetes/pkg/client/clientset_generated/clientset" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/pod" + "k8s.io/kubernetes/pkg/kubelet/status" "k8s.io/kubernetes/pkg/kubelet/util/format" "k8s.io/kubernetes/pkg/kubelet/volumemanager/cache" "k8s.io/kubernetes/pkg/volume" @@ -70,6 +71,7 @@ func NewDesiredStateOfWorldPopulator( loopSleepDuration time.Duration, getPodStatusRetryDuration time.Duration, podManager pod.Manager, + podStatusProvider status.PodStatusProvider, desiredStateOfWorld cache.DesiredStateOfWorld, kubeContainerRuntime kubecontainer.Runtime, keepTerminatedPodVolumes bool) DesiredStateOfWorldPopulator { @@ -78,6 +80,7 @@ func NewDesiredStateOfWorldPopulator( loopSleepDuration: loopSleepDuration, getPodStatusRetryDuration: getPodStatusRetryDuration, podManager: podManager, + podStatusProvider: podStatusProvider, desiredStateOfWorld: desiredStateOfWorld, pods: processedPods{ processedPods: make(map[volumetypes.UniquePodName]bool)}, @@ -91,6 +94,7 @@ type desiredStateOfWorldPopulator struct { loopSleepDuration time.Duration getPodStatusRetryDuration time.Duration podManager pod.Manager + podStatusProvider status.PodStatusProvider desiredStateOfWorld cache.DesiredStateOfWorld pods processedPods kubeContainerRuntime kubecontainer.Runtime @@ -134,15 +138,30 @@ func (dswp *desiredStateOfWorldPopulator) populatorLoopFunc() func() { } } -func isPodTerminated(pod *v1.Pod) bool { - return pod.Status.Phase == v1.PodFailed || pod.Status.Phase == v1.PodSucceeded +func (dswp *desiredStateOfWorldPopulator) isPodTerminated(pod *v1.Pod) bool { + podStatus, found := dswp.podStatusProvider.GetPodStatus(pod.UID) + if !found { + podStatus = pod.Status + } + return podStatus.Phase == v1.PodFailed || podStatus.Phase == v1.PodSucceeded || (pod.DeletionTimestamp != nil && notRunning(podStatus.ContainerStatuses)) +} + +// notRunning returns true if every status is terminated or waiting, or the status list +// is empty. +func notRunning(statuses []v1.ContainerStatus) bool { + for _, status := range statuses { + if status.State.Terminated == nil && status.State.Waiting == nil { + return false + } + } + return true } // Iterate through all pods and add to desired state of world if they don't // exist but should func (dswp *desiredStateOfWorldPopulator) findAndAddNewPods() { for _, pod := range dswp.podManager.GetPods() { - if isPodTerminated(pod) { + if dswp.isPodTerminated(pod) { // Do not (re)add volumes for terminated pods continue } @@ -160,7 +179,7 @@ func (dswp *desiredStateOfWorldPopulator) findAndRemoveDeletedPods() { pod, podExists := dswp.podManager.GetPodByUID(volumeToMount.Pod.UID) if podExists { // Skip running pods - if !isPodTerminated(pod) { + if !dswp.isPodTerminated(pod) { continue } if dswp.keepTerminatedPodVolumes { diff --git a/pkg/kubelet/volumemanager/volume_manager.go b/pkg/kubelet/volumemanager/volume_manager.go index fc330c4f83f8..c0f6850b5048 100644 --- a/pkg/kubelet/volumemanager/volume_manager.go +++ b/pkg/kubelet/volumemanager/volume_manager.go @@ -33,6 +33,7 @@ import ( "k8s.io/kubernetes/pkg/kubelet/container" kubecontainer "k8s.io/kubernetes/pkg/kubelet/container" "k8s.io/kubernetes/pkg/kubelet/pod" + "k8s.io/kubernetes/pkg/kubelet/status" "k8s.io/kubernetes/pkg/kubelet/util/format" "k8s.io/kubernetes/pkg/kubelet/volumemanager/cache" "k8s.io/kubernetes/pkg/kubelet/volumemanager/populator" @@ -151,6 +152,7 @@ func NewVolumeManager( controllerAttachDetachEnabled bool, nodeName k8stypes.NodeName, podManager pod.Manager, + podStatusProvider status.PodStatusProvider, kubeClient clientset.Interface, volumePluginMgr *volume.VolumePluginMgr, kubeContainerRuntime kubecontainer.Runtime, @@ -191,6 +193,7 @@ func NewVolumeManager( desiredStateOfWorldPopulatorLoopSleepPeriod, desiredStateOfWorldPopulatorGetPodStatusRetryDuration, podManager, + podStatusProvider, vm.desiredStateOfWorld, kubeContainerRuntime, keepTerminatedPodVolumes) diff --git a/pkg/kubelet/volumemanager/volume_manager_test.go b/pkg/kubelet/volumemanager/volume_manager_test.go index 88fdf0849b57..93b2e834fae3 100644 --- a/pkg/kubelet/volumemanager/volume_manager_test.go +++ b/pkg/kubelet/volumemanager/volume_manager_test.go @@ -36,6 +36,8 @@ import ( kubepod "k8s.io/kubernetes/pkg/kubelet/pod" podtest "k8s.io/kubernetes/pkg/kubelet/pod/testing" "k8s.io/kubernetes/pkg/kubelet/secret" + "k8s.io/kubernetes/pkg/kubelet/status" + statustest "k8s.io/kubernetes/pkg/kubelet/status/testing" "k8s.io/kubernetes/pkg/util/mount" "k8s.io/kubernetes/pkg/volume" volumetest "k8s.io/kubernetes/pkg/volume/testing" @@ -187,11 +189,13 @@ func newTestVolumeManager( fakeRecorder := &record.FakeRecorder{} plugMgr := &volume.VolumePluginMgr{} plugMgr.InitPlugins([]volume.VolumePlugin{plug}, volumetest.NewFakeVolumeHost(tmpDir, kubeClient, nil)) + statusManager := status.NewManager(kubeClient, podManager, &statustest.FakePodDeletionSafetyProvider{}) vm, err := NewVolumeManager( true, testHostname, podManager, + statusManager, kubeClient, plugMgr, &containertest.FakeRuntime{},