Skip to content

Commit

Permalink
Resolve the cherrypick conflict.
Browse files Browse the repository at this point in the history
  • Loading branch information
dchen1107 committed Jan 7, 2017
1 parent 07f1799 commit e9ddb4b
Show file tree
Hide file tree
Showing 6 changed files with 31 additions and 31 deletions.
3 changes: 2 additions & 1 deletion pkg/kubelet/eviction/eviction_manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ import (
kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
"k8s.io/kubernetes/pkg/kubelet/qos"
"k8s.io/kubernetes/pkg/kubelet/server/stats"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/kubelet/util/format"
"k8s.io/kubernetes/pkg/util/clock"
"k8s.io/kubernetes/pkg/util/wait"
Expand Down Expand Up @@ -109,7 +110,7 @@ func (m *managerImpl) Admit(attrs *lifecycle.PodAdmitAttributes) lifecycle.PodAd
// the node has memory pressure, admit if not best-effort
if hasNodeCondition(m.nodeConditions, api.NodeMemoryPressure) {
notBestEffort := qos.BestEffort != qos.GetPodQOS(attrs.Pod)
if notBestEffort || kubepod.IsCriticalPod(attrs.Pod) {
if notBestEffort || kubetypes.IsCriticalPod(attrs.Pod) {
return lifecycle.PodAdmitResult{Admit: true}
}
}
Expand Down
6 changes: 3 additions & 3 deletions pkg/kubelet/kubelet.go
Original file line number Diff line number Diff line change
Expand Up @@ -1911,10 +1911,10 @@ func (kl *Kubelet) HandlePodAdditions(pods []*api.Pod) {
start := kl.clock.Now()

// Pass critical pods through admission check first.
var criticalPods []*v1.Pod
var nonCriticalPods []*v1.Pod
var criticalPods []*api.Pod
var nonCriticalPods []*api.Pod
for _, p := range pods {
if kubepod.IsCriticalPod(p) {
if kubetypes.IsCriticalPod(p) {
criticalPods = append(criticalPods, p)
} else {
nonCriticalPods = append(nonCriticalPods, p)
Expand Down
32 changes: 16 additions & 16 deletions pkg/kubelet/kubelet_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -467,12 +467,12 @@ func TestHandlePortConflicts(t *testing.T) {
func TestCriticalPrioritySorting(t *testing.T) {
testKubelet := newTestKubelet(t, false /* controllerAttachDetachEnabled */)
kl := testKubelet.kubelet
nodes := []v1.Node{
{ObjectMeta: v1.ObjectMeta{Name: testKubeletHostname},
Status: v1.NodeStatus{Capacity: v1.ResourceList{}, Allocatable: v1.ResourceList{
v1.ResourceCPU: *resource.NewMilliQuantity(10, resource.DecimalSI),
v1.ResourceMemory: *resource.NewQuantity(100, resource.BinarySI),
v1.ResourcePods: *resource.NewQuantity(40, resource.DecimalSI),
nodes := []api.Node{
{ObjectMeta: api.ObjectMeta{Name: testKubeletHostname},
Status: api.NodeStatus{Capacity: api.ResourceList{}, Allocatable: api.ResourceList{
api.ResourceCPU: *resource.NewMilliQuantity(10, resource.DecimalSI),
api.ResourceMemory: *resource.NewQuantity(100, resource.BinarySI),
api.ResourcePods: *resource.NewQuantity(40, resource.DecimalSI),
}}},
}
kl.nodeLister = testNodeLister{nodes: nodes}
Expand All @@ -481,23 +481,23 @@ func TestCriticalPrioritySorting(t *testing.T) {
testKubelet.fakeCadvisor.On("ImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)

spec := v1.PodSpec{NodeName: string(kl.nodeName),
Containers: []v1.Container{{Resources: v1.ResourceRequirements{
Requests: v1.ResourceList{
spec := api.PodSpec{NodeName: string(kl.nodeName),
Containers: []api.Container{{Resources: api.ResourceRequirements{
Requests: api.ResourceList{
"memory": resource.MustParse("90"),
},
}}}}
pods := []*v1.Pod{
pods := []*api.Pod{
podWithUidNameNsSpec("000000000", "newpod", "foo", spec),
podWithUidNameNsSpec("987654321", "oldpod", "foo", spec),
podWithUidNameNsSpec("123456789", "middlepod", "foo", spec),
}

// Pods are not sorted by creation time.
startTime := time.Now()
pods[0].CreationTimestamp = metav1.NewTime(startTime.Add(10 * time.Second))
pods[1].CreationTimestamp = metav1.NewTime(startTime)
pods[2].CreationTimestamp = metav1.NewTime(startTime.Add(1 * time.Second))
pods[0].CreationTimestamp = unversioned.NewTime(startTime.Add(10 * time.Second))
pods[1].CreationTimestamp = unversioned.NewTime(startTime)
pods[2].CreationTimestamp = unversioned.NewTime(startTime.Add(1 * time.Second))

// Make the middle and new pod critical, the middle pod should win
// even though it comes later in the list
Expand All @@ -507,7 +507,7 @@ func TestCriticalPrioritySorting(t *testing.T) {
pods[2].Annotations = critical

// The non-critical pod should be rejected
notfittingPods := []*v1.Pod{pods[0], pods[1]}
notfittingPods := []*api.Pod{pods[0], pods[1]}
fittingPod := pods[2]

kl.HandlePodAdditions(pods)
Expand All @@ -516,13 +516,13 @@ func TestCriticalPrioritySorting(t *testing.T) {
for _, p := range notfittingPods {
status, found := kl.statusManager.GetPodStatus(p.UID)
require.True(t, found, "Status of pod %q is not found in the status map", p.UID)
require.Equal(t, v1.PodFailed, status.Phase)
require.Equal(t, api.PodFailed, status.Phase)
}

// fittingPod should be Pending
status, found := kl.statusManager.GetPodStatus(fittingPod.UID)
require.True(t, found, "Status of pod %q is not found in the status map", fittingPod.UID)
require.Equal(t, v1.PodPending, status.Phase)
require.Equal(t, api.PodPending, status.Phase)
}

// Tests that we handle host name conflicts correctly by setting the failed status in status map.
Expand Down
9 changes: 0 additions & 9 deletions pkg/kubelet/pod/pod_manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,6 @@ import (

"k8s.io/kubernetes/pkg/api"
kubecontainer "k8s.io/kubernetes/pkg/kubelet/container"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
"k8s.io/kubernetes/pkg/types"
)

Expand Down Expand Up @@ -307,11 +306,3 @@ func (pm *basicManager) GetPodByMirrorPod(mirrorPod *api.Pod) (*api.Pod, bool) {
pod, ok := pm.podByFullName[kubecontainer.GetPodFullName(mirrorPod)]
return pod, ok
}

// IsCriticalPod returns true if the pod bears the critical pod annotation
// key. Both the rescheduler and the kubelet use this key to make admission
// and scheduling decisions.
func IsCriticalPod(pod *v1.Pod) bool {
_, ok := pod.Annotations[kubetypes.CriticalPodAnnotationKey]
return ok
}
4 changes: 2 additions & 2 deletions pkg/kubelet/qos/policy.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,7 @@ package qos

import (
"k8s.io/kubernetes/pkg/api"
kubepod "k8s.io/kubernetes/pkg/kubelet/pod"
kubetypes "k8s.io/kubernetes/pkg/kubelet/types"
)

const (
Expand All @@ -44,7 +44,7 @@ const (
// and 1000. Containers with higher OOM scores are killed if the system runs out of memory.
// See https://lwn.net/Articles/391222/ for more information.
func GetContainerOOMScoreAdjust(pod *api.Pod, container *api.Container, memoryCapacity int64) int {
if kubepod.IsCriticalPod(pod) {
if kubetypes.IsCriticalPod(pod) {
return CriticalPodOOMAdj
}

Expand Down
8 changes: 8 additions & 0 deletions pkg/kubelet/types/pod_update.go
Original file line number Diff line number Diff line change
Expand Up @@ -140,3 +140,11 @@ func (sp SyncPodType) String() string {
return "unknown"
}
}

// IsCriticalPod returns true if the pod bears the critical pod annotation
// key. Both the rescheduler and the kubelet use this key to make admission
// and scheduling decisions.
func IsCriticalPod(pod *api.Pod) bool {
_, ok := pod.Annotations[CriticalPodAnnotationKey]
return ok
}

0 comments on commit e9ddb4b

Please sign in to comment.