Skip to content

Commit

Permalink
Merge pull request #24674 from gmarek/maxpods
Browse files Browse the repository at this point in the history
Automatic merge from submit-queue

Enforce --max-pods in kubelet admission; previously was only enforced in scheduler

This is an ugly hack - I spent some time trying to understand what one NodeInfo has in common with the other one, but at some point decided that I just don't have time to do that.

Fixes #24262
Fixes #20263

cc @HaiyangDING @lavalamp
  • Loading branch information
k8s-merge-robot committed Apr 23, 2016
2 parents 1f1eb62 + e0712f7 commit 2ec9080
Show file tree
Hide file tree
Showing 2 changed files with 53 additions and 13 deletions.
54 changes: 48 additions & 6 deletions pkg/kubelet/kubelet_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -2366,10 +2366,24 @@ func TestHandlePortConflicts(t *testing.T) {
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)

kl.nodeLister = testNodeLister{nodes: []api.Node{
{ObjectMeta: api.ObjectMeta{Name: kl.nodeName}},
{
ObjectMeta: api.ObjectMeta{Name: kl.nodeName},
Status: api.NodeStatus{
Allocatable: api.ResourceList{
api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
},
},
},
}}
kl.nodeInfo = testNodeInfo{nodes: []api.Node{
{ObjectMeta: api.ObjectMeta{Name: kl.nodeName}},
{
ObjectMeta: api.ObjectMeta{Name: kl.nodeName},
Status: api.NodeStatus{
Allocatable: api.ResourceList{
api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
},
},
},
}}

spec := api.PodSpec{NodeName: kl.nodeName, Containers: []api.Container{{Ports: []api.ContainerPort{{HostPort: 80}}}}}
Expand Down Expand Up @@ -2427,10 +2441,24 @@ func TestHandleHostNameConflicts(t *testing.T) {
testKubelet.fakeCadvisor.On("RootFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)

kl.nodeLister = testNodeLister{nodes: []api.Node{
{ObjectMeta: api.ObjectMeta{Name: "127.0.0.1"}},
{
ObjectMeta: api.ObjectMeta{Name: "127.0.0.1"},
Status: api.NodeStatus{
Allocatable: api.ResourceList{
api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
},
},
},
}}
kl.nodeInfo = testNodeInfo{nodes: []api.Node{
{ObjectMeta: api.ObjectMeta{Name: "127.0.0.1"}},
{
ObjectMeta: api.ObjectMeta{Name: "127.0.0.1"},
Status: api.NodeStatus{
Allocatable: api.ResourceList{
api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
},
},
},
}}

pods := []*api.Pod{
Expand Down Expand Up @@ -2486,10 +2514,24 @@ func TestHandleNodeSelector(t *testing.T) {
testKubelet := newTestKubelet(t)
kl := testKubelet.kubelet
kl.nodeLister = testNodeLister{nodes: []api.Node{
{ObjectMeta: api.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{"key": "B"}}},
{
ObjectMeta: api.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{"key": "B"}},
Status: api.NodeStatus{
Allocatable: api.ResourceList{
api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
},
},
},
}}
kl.nodeInfo = testNodeInfo{nodes: []api.Node{
{ObjectMeta: api.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{"key": "B"}}},
{
ObjectMeta: api.ObjectMeta{Name: testKubeletHostname, Labels: map[string]string{"key": "B"}},
Status: api.NodeStatus{
Allocatable: api.ResourceList{
api.ResourcePods: *resource.NewQuantity(110, resource.DecimalSI),
},
},
},
}}
testKubelet.fakeCadvisor.On("MachineInfo").Return(&cadvisorapi.MachineInfo{}, nil)
testKubelet.fakeCadvisor.On("DockerImagesFsInfo").Return(cadvisorapiv2.FsInfo{}, nil)
Expand Down
12 changes: 5 additions & 7 deletions plugin/pkg/scheduler/algorithm/predicates/predicates.go
Original file line number Diff line number Diff line change
Expand Up @@ -425,6 +425,10 @@ func podName(pod *api.Pod) string {
func podFitsResourcesInternal(pod *api.Pod, nodeName string, nodeInfo *schedulercache.NodeInfo, info *api.Node) (bool, error) {
allocatable := info.Status.Allocatable
allowedPodNumber := allocatable.Pods().Value()
if int64(len(nodeInfo.Pods()))+1 > allowedPodNumber {
return false,
newInsufficientResourceError(podCountResourceName, 1, int64(len(nodeInfo.Pods())), allowedPodNumber)
}
podRequest := getResourceRequest(pod)
if podRequest.milliCPU == 0 && podRequest.memory == 0 {
return true, nil
Expand All @@ -451,13 +455,6 @@ func (r *NodeStatus) PodFitsResources(pod *api.Pod, nodeName string, nodeInfo *s
if err != nil {
return false, err
}
// TODO: move the following podNumber check to podFitsResourcesInternal when Kubelet allows podNumber check (See #20263).
allocatable := info.Status.Allocatable
allowedPodNumber := allocatable.Pods().Value()
if int64(len(nodeInfo.Pods()))+1 > allowedPodNumber {
return false,
newInsufficientResourceError(podCountResourceName, 1, int64(len(nodeInfo.Pods())), allowedPodNumber)
}
return podFitsResourcesInternal(pod, nodeName, nodeInfo, info)
}

Expand Down Expand Up @@ -775,6 +772,7 @@ func RunGeneralPredicates(pod *api.Pod, nodeName string, nodeInfo *schedulercach
if !fit {
return fit, err
}

fit, err = PodFitsHost(pod, nodeName, nodeInfo)
if !fit {
return fit, err
Expand Down

2 comments on commit 2ec9080

@k8s-teamcity-mesosphere

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

TeamCity OSS :: Kubernetes Mesos :: 4 - Smoke Tests Build 22435 outcome was FAILURE
Summary: Tests failed: 1, passed: 0, ignored: 277 Build time: 00:20:47

Failed tests

null: Kubernetes e2e suite.[k8s.io] Kubectl client [k8s.io] Guestbook application should create and stop a working application [Conformance]: <no details avaliable>

@k8s-teamcity-mesosphere

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

TeamCity OSS :: Kubernetes Mesos :: 4 - Smoke Tests Build 22434 outcome was FAILURE
Summary: Tests failed: 1, passed: 0, ignored: 277 Build time: 00:44:40

Failed tests

null: Kubernetes e2e suite.[k8s.io] Kubectl client [k8s.io] Guestbook application should create and stop a working application [Conformance]: <no details avaliable>

Please sign in to comment.