Skip to content

Commit

Permalink
Merge pull request #55267 from liggitt/automated-cherry-pick-of-#5526…
Browse files Browse the repository at this point in the history
…2-upstream-release-1.7

Automatic merge from submit-queue.

Automated cherry pick of #55262

Cherry pick of #55262 on release-1.7.

#55262: Fix 'Schedulercache is corrupted' error
  • Loading branch information
Kubernetes Submit Queue committed Nov 14, 2017
2 parents 0381875 + 371ad3f commit 80ade7f
Show file tree
Hide file tree
Showing 2 changed files with 69 additions and 0 deletions.
1 change: 1 addition & 0 deletions plugin/pkg/scheduler/schedulercache/cache.go
Expand Up @@ -232,6 +232,7 @@ func (cache *schedulerCache) AddPod(pod *v1.Pod) error {
}
delete(cache.assumedPods, key)
cache.podStates[key].deadline = nil
cache.podStates[key].pod = pod
case !ok:
// Pod was expired. We should add it back.
cache.addPod(pod)
Expand Down
68 changes: 68 additions & 0 deletions plugin/pkg/scheduler/schedulercache/cache_test.go
Expand Up @@ -246,6 +246,74 @@ func TestAddPodWillConfirm(t *testing.T) {
}
}

// TestAddPodWillReplaceAssumed tests that a pod being Add()ed will replace any assumed pod.
func TestAddPodWillReplaceAssumed(t *testing.T) {
now := time.Now()
ttl := 10 * time.Second

assumedPod := func() *v1.Pod {
return makeBasePod("assumed-node-1", "test-1", "100m", "500", []v1.ContainerPort{{HostPort: 80}})
}
addedPod := func() *v1.Pod {
return makeBasePod("actual-node", "test-1", "100m", "500", []v1.ContainerPort{{HostPort: 80}})
}
updatedPod := func() *v1.Pod {
return makeBasePod("actual-node", "test-1", "200m", "500", []v1.ContainerPort{{HostPort: 90}})
}

tests := []struct {
podsToAssume []*v1.Pod
podsToAdd []*v1.Pod
podsToUpdate [][]*v1.Pod

wNodeInfo map[string]*NodeInfo
}{{
podsToAssume: []*v1.Pod{assumedPod()},
podsToAdd: []*v1.Pod{addedPod()},
podsToUpdate: [][]*v1.Pod{{addedPod(), updatedPod()}},
wNodeInfo: map[string]*NodeInfo{
"assumed-node": nil,
"actual-node": {
requestedResource: &Resource{
MilliCPU: 200,
Memory: 500,
},
nonzeroRequest: &Resource{
MilliCPU: 200,
Memory: 500,
},
allocatableResource: &Resource{},
pods: []*v1.Pod{updatedPod()},
usedPorts: map[int]bool{90: true},
},
},
}}

for i, tt := range tests {
cache := newSchedulerCache(ttl, time.Second, nil)
for _, podToAssume := range tt.podsToAssume {
if err := assumeAndFinishBinding(cache, podToAssume, now); err != nil {
t.Fatalf("assumePod failed: %v", err)
}
}
for _, podToAdd := range tt.podsToAdd {
if err := cache.AddPod(podToAdd); err != nil {
t.Fatalf("AddPod failed: %v", err)
}
}
for _, podToUpdate := range tt.podsToUpdate {
if err := cache.UpdatePod(podToUpdate[0], podToUpdate[1]); err != nil {
t.Fatalf("UpdatePod failed: %v", err)
}
}
for nodeName, expected := range tt.wNodeInfo {
t.Log(nodeName)
n := cache.nodes[nodeName]
deepEqualWithoutGeneration(t, i, n, expected)
}
}
}

// TestAddPodAfterExpiration tests that a pod being Add()ed will be added back if expired.
func TestAddPodAfterExpiration(t *testing.T) {
nodeName := "node"
Expand Down

0 comments on commit 80ade7f

Please sign in to comment.