Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Teach Kubelet about Pod Ready++ #64344

Merged
merged 2 commits into from
Jun 5, 2018
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
15 changes: 12 additions & 3 deletions pkg/api/v1/pod/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -261,9 +261,18 @@ func GetPodCondition(status *v1.PodStatus, conditionType v1.PodConditionType) (i
if status == nil {
return -1, nil
}
for i := range status.Conditions {
if status.Conditions[i].Type == conditionType {
return i, &status.Conditions[i]
return GetPodConditionFromList(status.Conditions, conditionType)
}

// GetPodConditionFromList extracts the provided condition from the given list of condition and
// returns the index of the condition and the condition. Returns -1 and nil if the condition is not present.
func GetPodConditionFromList(conditions []v1.PodCondition, conditionType v1.PodConditionType) (int, *v1.PodCondition) {
if conditions == nil {
return -1, nil
}
for i := range conditions {
if conditions[i].Type == conditionType {
return i, &conditions[i]
}
}
return -1, nil
Expand Down
7 changes: 7 additions & 0 deletions pkg/kubelet/kubelet.go
Original file line number Diff line number Diff line change
Expand Up @@ -2042,11 +2042,18 @@ func (kl *Kubelet) HandlePodRemoves(pods []*v1.Pod) {
// HandlePodReconcile is the callback in the SyncHandler interface for pods
// that should be reconciled.
func (kl *Kubelet) HandlePodReconcile(pods []*v1.Pod) {
start := kl.clock.Now()
for _, pod := range pods {
// Update the pod in pod manager, status manager will do periodically reconcile according
// to the pod manager.
kl.podManager.UpdatePod(pod)

// Reconcile Pod "Ready" condition if necessary. Trigger sync pod for reconciliation.
if status.NeedToReconcilePodReadiness(pod) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Question for the authors, why did we only trigger the pod worker (which is what drives status manager) here?

A reconcile is triggered when status is mutated from the API server - was this done to try and minimize the number of status sync events for a running pod?

Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I'm not sure @freehan is paying attention to k/k any more :(

mirrorPod, _ := kl.podManager.GetMirrorPodByPod(pod)
kl.dispatchWork(pod, kubetypes.SyncPodSync, mirrorPod, start)
}

// After an evicted pod is synced, all dead containers in the pod can be removed.
if eviction.PodIsEvicted(pod.Status) {
if podStatus, err := kl.podCache.Get(pod.UID); err == nil {
Expand Down
8 changes: 7 additions & 1 deletion pkg/kubelet/kubelet_pods.go
Original file line number Diff line number Diff line change
Expand Up @@ -1354,7 +1354,7 @@ func (kl *Kubelet) generateAPIPodStatus(pod *v1.Pod, podStatus *kubecontainer.Po
}
kl.probeManager.UpdatePodStatus(pod.UID, s)
s.Conditions = append(s.Conditions, status.GeneratePodInitializedCondition(spec, s.InitContainerStatuses, s.Phase))
s.Conditions = append(s.Conditions, status.GeneratePodReadyCondition(spec, s.ContainerStatuses, s.Phase))
s.Conditions = append(s.Conditions, status.GeneratePodReadyCondition(spec, s.Conditions, s.ContainerStatuses, s.Phase))
// Status manager will take care of the LastTransitionTimestamp, either preserve
// the timestamp from apiserver, or set a new one. When kubelet sees the pod,
// `PodScheduled` condition must be true.
Expand Down Expand Up @@ -1407,6 +1407,12 @@ func (kl *Kubelet) convertStatusToAPIStatus(pod *v1.Pod, podStatus *kubecontaine
true,
)

// Preserves conditions not controlled by kubelet
for _, c := range pod.Status.Conditions {
if !kubetypes.PodConditionByKubelet(c.Type) {
apiPodStatus.Conditions = append(apiPodStatus.Conditions, c)
}
}
return &apiPodStatus
}

Expand Down
32 changes: 29 additions & 3 deletions pkg/kubelet/status/generate.go
Original file line number Diff line number Diff line change
Expand Up @@ -29,11 +29,13 @@ const (
PodCompleted = "PodCompleted"
ContainersNotReady = "ContainersNotReady"
ContainersNotInitialized = "ContainersNotInitialized"
ReadinessGatesNotReady = "ReadinessGatesNotReady"
)

// GeneratePodReadyCondition returns ready condition if all containers in a pod are ready, else it
// returns an unready condition.
func GeneratePodReadyCondition(spec *v1.PodSpec, containerStatuses []v1.ContainerStatus, podPhase v1.PodPhase) v1.PodCondition {
// GeneratePodReadyCondition returns "Ready" condition of a pod.
// The status of "Ready" condition is "True", if all containers in a pod are ready
// AND all matching conditions specified in the ReadinessGates have status equal to "True".
func GeneratePodReadyCondition(spec *v1.PodSpec, conditions []v1.PodCondition, containerStatuses []v1.ContainerStatus, podPhase v1.PodPhase) v1.PodCondition {
// Find if all containers are ready or not.
if containerStatuses == nil {
return v1.PodCondition{
Expand Down Expand Up @@ -63,6 +65,7 @@ func GeneratePodReadyCondition(spec *v1.PodSpec, containerStatuses []v1.Containe
}
}

// Generate message for containers in unknown condition.
unreadyMessages := []string{}
if len(unknownContainers) > 0 {
unreadyMessages = append(unreadyMessages, fmt.Sprintf("containers with unknown status: %s", unknownContainers))
Expand All @@ -80,6 +83,29 @@ func GeneratePodReadyCondition(spec *v1.PodSpec, containerStatuses []v1.Containe
}
}

// Evaluate corresponding conditions specified in readiness gate
// Generate message if any readiness gate is not satisfied.
unreadyMessages = []string{}
for _, rg := range spec.ReadinessGates {
_, c := podutil.GetPodConditionFromList(conditions, rg.ConditionType)
if c == nil {
unreadyMessages = append(unreadyMessages, fmt.Sprintf("corresponding condition of pod readiness gate %q does not exist.", string(rg.ConditionType)))
} else if c.Status != v1.ConditionTrue {
unreadyMessages = append(unreadyMessages, fmt.Sprintf("the status of pod readiness gate %q is not \"True\", but %v", string(rg.ConditionType), c.Status))
}
}

// Set "Ready" condition to "False" if any readiness gate is not ready.
if len(unreadyMessages) != 0 {
unreadyMessage := strings.Join(unreadyMessages, ", ")
return v1.PodCondition{
Type: v1.PodReady,
Status: v1.ConditionFalse,
Reason: ReadinessGatesNotReady,
Message: unreadyMessage,
}
}

return v1.PodCondition{
Type: v1.PodReady,
Status: v1.ConditionTrue,
Expand Down
143 changes: 122 additions & 21 deletions pkg/kubelet/status/generate_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,31 +27,35 @@ import (
func TestGeneratePodReadyCondition(t *testing.T) {
tests := []struct {
spec *v1.PodSpec
conditions []v1.PodCondition
containerStatuses []v1.ContainerStatus
podPhase v1.PodPhase
expected v1.PodCondition
expectReady v1.PodCondition
}{
{
spec: nil,
conditions: nil,
containerStatuses: nil,
podPhase: v1.PodRunning,
expected: getReadyCondition(false, UnknownContainerStatuses, ""),
expectReady: getPodCondition(v1.PodReady, v1.ConditionFalse, UnknownContainerStatuses, ""),
},
{
spec: &v1.PodSpec{},
conditions: nil,
containerStatuses: []v1.ContainerStatus{},
podPhase: v1.PodRunning,
expected: getReadyCondition(true, "", ""),
expectReady: getPodCondition(v1.PodReady, v1.ConditionTrue, "", ""),
},
{
spec: &v1.PodSpec{
Containers: []v1.Container{
{Name: "1234"},
},
},
conditions: nil,
containerStatuses: []v1.ContainerStatus{},
podPhase: v1.PodRunning,
expected: getReadyCondition(false, ContainersNotReady, "containers with unknown status: [1234]"),
expectReady: getPodCondition(v1.PodReady, v1.ConditionFalse, ContainersNotReady, "containers with unknown status: [1234]"),
},
{
spec: &v1.PodSpec{
Expand All @@ -60,12 +64,13 @@ func TestGeneratePodReadyCondition(t *testing.T) {
{Name: "5678"},
},
},
conditions: nil,
containerStatuses: []v1.ContainerStatus{
getReadyStatus("1234"),
getReadyStatus("5678"),
},
podPhase: v1.PodRunning,
expected: getReadyCondition(true, "", ""),
podPhase: v1.PodRunning,
expectReady: getPodCondition(v1.PodReady, v1.ConditionTrue, "", ""),
},
{
spec: &v1.PodSpec{
Expand All @@ -74,11 +79,12 @@ func TestGeneratePodReadyCondition(t *testing.T) {
{Name: "5678"},
},
},
conditions: nil,
containerStatuses: []v1.ContainerStatus{
getReadyStatus("1234"),
},
podPhase: v1.PodRunning,
expected: getReadyCondition(false, ContainersNotReady, "containers with unknown status: [5678]"),
podPhase: v1.PodRunning,
expectReady: getPodCondition(v1.PodReady, v1.ConditionFalse, ContainersNotReady, "containers with unknown status: [5678]"),
},
{
spec: &v1.PodSpec{
Expand All @@ -87,31 +93,130 @@ func TestGeneratePodReadyCondition(t *testing.T) {
{Name: "5678"},
},
},
conditions: nil,
containerStatuses: []v1.ContainerStatus{
getReadyStatus("1234"),
getNotReadyStatus("5678"),
},
podPhase: v1.PodRunning,
expected: getReadyCondition(false, ContainersNotReady, "containers with unready status: [5678]"),
podPhase: v1.PodRunning,
expectReady: getPodCondition(v1.PodReady, v1.ConditionFalse, ContainersNotReady, "containers with unready status: [5678]"),
},
{
spec: &v1.PodSpec{
Containers: []v1.Container{
{Name: "1234"},
},
},
conditions: nil,
containerStatuses: []v1.ContainerStatus{
getNotReadyStatus("1234"),
},
podPhase: v1.PodSucceeded,
expected: getReadyCondition(false, PodCompleted, ""),
podPhase: v1.PodSucceeded,
expectReady: getPodCondition(v1.PodReady, v1.ConditionFalse, PodCompleted, ""),
},
{
spec: &v1.PodSpec{
ReadinessGates: []v1.PodReadinessGate{
{ConditionType: v1.PodConditionType("gate1")},
},
},
conditions: nil,
containerStatuses: []v1.ContainerStatus{},
podPhase: v1.PodRunning,
expectReady: getPodCondition(v1.PodReady, v1.ConditionFalse, ReadinessGatesNotReady, `corresponding condition of pod readiness gate "gate1" does not exist.`),
},
{
spec: &v1.PodSpec{
ReadinessGates: []v1.PodReadinessGate{
{ConditionType: v1.PodConditionType("gate1")},
},
},
conditions: []v1.PodCondition{
getPodCondition("gate1", v1.ConditionFalse, "", ""),
},
containerStatuses: []v1.ContainerStatus{},
podPhase: v1.PodRunning,
expectReady: getPodCondition(v1.PodReady, v1.ConditionFalse, ReadinessGatesNotReady, `the status of pod readiness gate "gate1" is not "True", but False`),
},
{
spec: &v1.PodSpec{
ReadinessGates: []v1.PodReadinessGate{
{ConditionType: v1.PodConditionType("gate1")},
},
},
conditions: []v1.PodCondition{
getPodCondition("gate1", v1.ConditionTrue, "", ""),
},
containerStatuses: []v1.ContainerStatus{},
podPhase: v1.PodRunning,
expectReady: getPodCondition(v1.PodReady, v1.ConditionTrue, "", ""),
},
{
spec: &v1.PodSpec{
ReadinessGates: []v1.PodReadinessGate{
{ConditionType: v1.PodConditionType("gate1")},
{ConditionType: v1.PodConditionType("gate2")},
},
},
conditions: []v1.PodCondition{
getPodCondition("gate1", v1.ConditionTrue, "", ""),
},
containerStatuses: []v1.ContainerStatus{},
podPhase: v1.PodRunning,
expectReady: getPodCondition(v1.PodReady, v1.ConditionFalse, ReadinessGatesNotReady, `corresponding condition of pod readiness gate "gate2" does not exist.`),
},
{
spec: &v1.PodSpec{
ReadinessGates: []v1.PodReadinessGate{
{ConditionType: v1.PodConditionType("gate1")},
{ConditionType: v1.PodConditionType("gate2")},
},
},
conditions: []v1.PodCondition{
getPodCondition("gate1", v1.ConditionTrue, "", ""),
getPodCondition("gate2", v1.ConditionFalse, "", ""),
},
containerStatuses: []v1.ContainerStatus{},
podPhase: v1.PodRunning,
expectReady: getPodCondition(v1.PodReady, v1.ConditionFalse, ReadinessGatesNotReady, `the status of pod readiness gate "gate2" is not "True", but False`),
},
{
spec: &v1.PodSpec{
ReadinessGates: []v1.PodReadinessGate{
{ConditionType: v1.PodConditionType("gate1")},
{ConditionType: v1.PodConditionType("gate2")},
},
},
conditions: []v1.PodCondition{
getPodCondition("gate1", v1.ConditionTrue, "", ""),
getPodCondition("gate2", v1.ConditionTrue, "", ""),
},
containerStatuses: []v1.ContainerStatus{},
podPhase: v1.PodRunning,
expectReady: getPodCondition(v1.PodReady, v1.ConditionTrue, "", ""),
},
{
spec: &v1.PodSpec{
Containers: []v1.Container{
{Name: "1234"},
},
ReadinessGates: []v1.PodReadinessGate{
{ConditionType: v1.PodConditionType("gate1")},
},
},
conditions: []v1.PodCondition{
getPodCondition("gate1", v1.ConditionTrue, "", ""),
},
containerStatuses: []v1.ContainerStatus{getNotReadyStatus("1234")},
podPhase: v1.PodRunning,
expectReady: getPodCondition(v1.PodReady, v1.ConditionFalse, ContainersNotReady, "containers with unready status: [1234]"),
},
}

for i, test := range tests {
condition := GeneratePodReadyCondition(test.spec, test.containerStatuses, test.podPhase)
if !reflect.DeepEqual(condition, test.expected) {
t.Errorf("On test case %v, expected:\n%+v\ngot\n%+v\n", i, test.expected, condition)
ready := GeneratePodReadyCondition(test.spec, test.conditions, test.containerStatuses, test.podPhase)
if !reflect.DeepEqual(ready, test.expectReady) {
t.Errorf("On test case %v, expectReady:\n%+v\ngot\n%+v\n", i, test.expectReady, ready)
}
}
}
Expand Down Expand Up @@ -220,13 +325,9 @@ func TestGeneratePodInitializedCondition(t *testing.T) {
}
}

func getReadyCondition(ready bool, reason, message string) v1.PodCondition {
status := v1.ConditionFalse
if ready {
status = v1.ConditionTrue
}
func getPodCondition(conditionType v1.PodConditionType, status v1.ConditionStatus, reason, message string) v1.PodCondition {
return v1.PodCondition{
Type: v1.PodReady,
Type: conditionType,
Status: status,
Reason: reason,
Message: message,
Expand Down
27 changes: 20 additions & 7 deletions pkg/kubelet/status/status_manager.go
Original file line number Diff line number Diff line change
Expand Up @@ -227,21 +227,20 @@ func (m *manager) SetContainerReadiness(podUID types.UID, containerID kubecontai
containerStatus.Ready = ready

// Update pod condition.
readyConditionIndex := -1
podReadyConditionIndex := -1
for i, condition := range status.Conditions {
if condition.Type == v1.PodReady {
readyConditionIndex = i
podReadyConditionIndex = i
break
}
}
readyCondition := GeneratePodReadyCondition(&pod.Spec, status.ContainerStatuses, status.Phase)
if readyConditionIndex != -1 {
status.Conditions[readyConditionIndex] = readyCondition
podReady := GeneratePodReadyCondition(&pod.Spec, status.Conditions, status.ContainerStatuses, status.Phase)
if podReadyConditionIndex != -1 {
status.Conditions[podReadyConditionIndex] = podReady
} else {
glog.Warningf("PodStatus missing PodReady condition: %+v", status)
status.Conditions = append(status.Conditions, readyCondition)
status.Conditions = append(status.Conditions, podReady)
}

m.updateStatusInternal(pod, status, false)
}

Expand Down Expand Up @@ -652,3 +651,17 @@ func mergePodStatus(oldPodStatus, newPodStatus v1.PodStatus) v1.PodStatus {
newPodStatus.Conditions = podConditions
return newPodStatus
}

// NeedToReconcilePodReadiness returns if the pod "Ready" condition need to be reconcile
func NeedToReconcilePodReadiness(pod *v1.Pod) bool {
if len(pod.Spec.ReadinessGates) == 0 {
return false
}
podReadyCondition := GeneratePodReadyCondition(&pod.Spec, pod.Status.Conditions, pod.Status.ContainerStatuses, pod.Status.Phase)
i, curCondition := podutil.GetPodConditionFromList(pod.Status.Conditions, v1.PodReady)
// Only reconcile if "Ready" condition is present
if i >= 0 && curCondition.Status != podReadyCondition.Status {
return true
}
return false
}