Skip to content

Commit

Permalink
Merge pull request #120398 from aleksandra-malinowska/sts-restart-always
Browse files Browse the repository at this point in the history
Make StatefulSet restart pods with phase Succeeded
  • Loading branch information
k8s-ci-robot committed Sep 13, 2023
2 parents 7c98d3f + d7264d0 commit 3eca0a5
Show file tree
Hide file tree
Showing 3 changed files with 65 additions and 7 deletions.
28 changes: 21 additions & 7 deletions pkg/controller/statefulset/stateful_set_control.go
Expand Up @@ -375,13 +375,27 @@ func (ssc *defaultStatefulSetControl) processReplica(
replicas []*v1.Pod,
i int) (bool, error) {
logger := klog.FromContext(ctx)
// delete and recreate failed pods
if isFailed(replicas[i]) {
ssc.recorder.Eventf(set, v1.EventTypeWarning, "RecreatingFailedPod",
"StatefulSet %s/%s is recreating failed Pod %s",
set.Namespace,
set.Name,
replicas[i].Name)
// Delete and recreate pods which finished running.
//
// Note that pods with phase Succeeded will also trigger this event. This is
// because final pod phase of evicted or otherwise forcibly stopped pods
// (e.g. terminated on node reboot) is determined by the exit code of the
// container, not by the reason for pod termination. We should restart the pod
// regardless of the exit code.
if isFailed(replicas[i]) || isSucceeded(replicas[i]) {
if isFailed(replicas[i]) {
ssc.recorder.Eventf(set, v1.EventTypeWarning, "RecreatingFailedPod",
"StatefulSet %s/%s is recreating failed Pod %s",
set.Namespace,
set.Name,
replicas[i].Name)
} else {
ssc.recorder.Eventf(set, v1.EventTypeNormal, "RecreatingTerminatedPod",
"StatefulSet %s/%s is recreating terminated Pod %s",
set.Namespace,
set.Name,
replicas[i].Name)
}
if err := ssc.podControl.DeleteStatefulPod(set, replicas[i]); err != nil {
return true, err
}
Expand Down
39 changes: 39 additions & 0 deletions pkg/controller/statefulset/stateful_set_control_test.go
Expand Up @@ -170,6 +170,7 @@ func TestStatefulSetControl(t *testing.T) {
{ScalesDown, simpleSetFn},
{ReplacesPods, largeSetFn},
{RecreatesFailedPod, simpleSetFn},
{RecreatesSucceededPod, simpleSetFn},
{CreatePodFailure, simpleSetFn},
{UpdatePodFailure, simpleSetFn},
{UpdateSetStatusFailure, simpleSetFn},
Expand Down Expand Up @@ -435,6 +436,44 @@ func RecreatesFailedPod(t *testing.T, set *apps.StatefulSet, invariants invarian
}
}

func RecreatesSucceededPod(t *testing.T, set *apps.StatefulSet, invariants invariantFunc) {
client := fake.NewSimpleClientset()
om, _, ssc := setupController(client)
selector, err := metav1.LabelSelectorAsSelector(set.Spec.Selector)
if err != nil {
t.Error(err)
}
pods, err := om.podsLister.Pods(set.Namespace).List(selector)
if err != nil {
t.Error(err)
}
if _, err := ssc.UpdateStatefulSet(context.TODO(), set, pods); err != nil {
t.Errorf("Error updating StatefulSet %s", err)
}
if err := invariants(set, om); err != nil {
t.Error(err)
}
pods, err = om.podsLister.Pods(set.Namespace).List(selector)
if err != nil {
t.Error(err)
}
pods[0].Status.Phase = v1.PodSucceeded
_ = om.podsIndexer.Update(pods[0])
if _, err := ssc.UpdateStatefulSet(context.TODO(), set, pods); err != nil {
t.Errorf("Error updating StatefulSet %s", err)
}
if err := invariants(set, om); err != nil {
t.Error(err)
}
pods, err = om.podsLister.Pods(set.Namespace).List(selector)
if err != nil {
t.Error(err)
}
if isCreated(pods[0]) {
t.Error("StatefulSet did not recreate succeeded Pod")
}
}

func CreatePodFailure(t *testing.T, set *apps.StatefulSet, invariants invariantFunc) {
client := fake.NewSimpleClientset(set)
om, _, ssc := setupController(client)
Expand Down
5 changes: 5 additions & 0 deletions pkg/controller/statefulset/stateful_set_utils.go
Expand Up @@ -426,6 +426,11 @@ func isFailed(pod *v1.Pod) bool {
return pod.Status.Phase == v1.PodFailed
}

// isSucceeded returns true if pod has a Phase of PodSucceeded
func isSucceeded(pod *v1.Pod) bool {
return pod.Status.Phase == v1.PodSucceeded
}

// isTerminating returns true if pod's DeletionTimestamp has been set
func isTerminating(pod *v1.Pod) bool {
return pod.DeletionTimestamp != nil
Expand Down

0 comments on commit 3eca0a5

Please sign in to comment.