-
Notifications
You must be signed in to change notification settings - Fork 113
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Refactor Pod await logic #590
Changes from all commits
549a92b
8454a8c
c0530c2
99499c2
2c1181b
d5a20f3
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -497,7 +497,7 @@ func Test_Apps_Deployment(t *testing.T) { | |
object: deploymentProgressing(inputNamespace, deploymentInputName, revision1), | ||
subErrors: []string{ | ||
"Minimum number of live Pods was not attained", | ||
`1 Pods failed to run because: [ImagePullBackOff] Back-off pulling image "sdkjlsdlkj"`, | ||
`containers with unready status: [nginx] -- Back-off pulling image "sdkjlsdlkj"`, | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Not a huge thing but as a general rule I think we should keep the |
||
}}, | ||
}, | ||
{ | ||
|
@@ -523,7 +523,7 @@ func Test_Apps_Deployment(t *testing.T) { | |
object: deploymentProgressing(inputNamespace, deploymentInputName, revision2), | ||
subErrors: []string{ | ||
"Minimum number of live Pods was not attained", | ||
`1 Pods failed to run because: [ImagePullBackOff] Back-off pulling image "sdkjlsdlkj"`, | ||
`containers with unready status: [nginx] -- Back-off pulling image "sdkjlsdlkj"`, | ||
}}, | ||
}, | ||
{ | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -7,8 +7,10 @@ import ( | |
|
||
"github.com/golang/glog" | ||
"github.com/pkg/errors" | ||
"github.com/pulumi/pulumi-kubernetes/pkg/await/states" | ||
"github.com/pulumi/pulumi-kubernetes/pkg/clients" | ||
"github.com/pulumi/pulumi-kubernetes/pkg/kinds" | ||
"github.com/pulumi/pulumi-kubernetes/pkg/logging" | ||
"github.com/pulumi/pulumi-kubernetes/pkg/metadata" | ||
"github.com/pulumi/pulumi-kubernetes/pkg/openapi" | ||
"github.com/pulumi/pulumi/pkg/diag" | ||
|
@@ -240,13 +242,9 @@ func (sia *statefulsetInitAwaiter) await( | |
subErrors: sia.errorMessages(), | ||
} | ||
case <-aggregateErrorTicker: | ||
scheduleErrors, containerErrors := sia.aggregatePodErrors() | ||
for _, message := range scheduleErrors { | ||
sia.config.logStatus(diag.Warning, message) | ||
} | ||
|
||
for _, message := range containerErrors { | ||
sia.config.logStatus(diag.Warning, message) | ||
messages := sia.aggregatePodErrors() | ||
for _, message := range messages { | ||
sia.config.logMessage(message) | ||
} | ||
case event := <-statefulsetWatcher.ResultChan(): | ||
sia.processStatefulSetEvent(event) | ||
|
@@ -392,45 +390,25 @@ func (sia *statefulsetInitAwaiter) processPodEvent(event watch.Event) { | |
sia.pods[podName] = pod | ||
} | ||
|
||
func (sia *statefulsetInitAwaiter) aggregatePodErrors() ([]string, []string) { | ||
scheduleErrorCounts := map[string]int{} | ||
containerErrorCounts := map[string]int{} | ||
for _, pod := range sia.pods { | ||
func (sia *statefulsetInitAwaiter) aggregatePodErrors() logging.Messages { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Not a big deal, but an alternative design you might consider here is to have it take a lambda as an argument instead of passing an allocated list back, and then allocating another list, again, in response, which is what we're doing in a lot of the callsites.
|
||
var messages logging.Messages | ||
for _, unstructuredPod := range sia.pods { | ||
// Filter down to only Pods owned by the active StatefulSet. | ||
if !isOwnedBy(pod, sia.statefulset) { | ||
if !isOwnedBy(unstructuredPod, sia.statefulset) { | ||
continue | ||
} | ||
|
||
// Check the pod for errors. | ||
checker := makePodChecker() | ||
checker.check(pod) | ||
|
||
for reason, message := range checker.podScheduledErrors { | ||
message = fmt.Sprintf("[%s] %s", reason, message) | ||
scheduleErrorCounts[message] = scheduleErrorCounts[message] + 1 | ||
} | ||
|
||
for reason, messages := range checker.containerErrors { | ||
for _, message := range messages { | ||
message = fmt.Sprintf("[%s] %s", reason, message) | ||
containerErrorCounts[message] = containerErrorCounts[message] + 1 | ||
} | ||
checker := states.NewPodChecker() | ||
pod, err := clients.PodFromUnstructured(unstructuredPod) | ||
if err != nil { | ||
glog.V(3).Infof("Failed to unmarshal Pod event: %v", err) | ||
return nil | ||
} | ||
messages = append(messages, checker.Update(pod).Warnings()...) | ||
} | ||
|
||
scheduleErrors := make([]string, 0) | ||
for message, count := range scheduleErrorCounts { | ||
message = fmt.Sprintf("%d Pods failed to schedule because: %s", count, message) | ||
scheduleErrors = append(scheduleErrors, message) | ||
} | ||
|
||
containerErrors := make([]string, 0) | ||
for message, count := range containerErrorCounts { | ||
message = fmt.Sprintf("%d Pods failed to run because: %s", count, message) | ||
containerErrors = append(containerErrors, message) | ||
} | ||
|
||
return scheduleErrors, containerErrors | ||
return messages | ||
} | ||
|
||
func (sia *statefulsetInitAwaiter) errorMessages() []string { | ||
|
@@ -445,9 +423,10 @@ func (sia *statefulsetInitAwaiter) errorMessages() []string { | |
".status.currentRevision does not match .status.updateRevision") | ||
} | ||
|
||
scheduleErrors, containerErrors := sia.aggregatePodErrors() | ||
messages = append(messages, scheduleErrors...) | ||
messages = append(messages, containerErrors...) | ||
errorMessages := sia.aggregatePodErrors() | ||
for _, message := range errorMessages { | ||
messages = append(messages, message.S) | ||
} | ||
|
||
return messages | ||
} | ||
|
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -55,6 +55,10 @@ func (cac *createAwaitConfig) logStatus(sev diag.Severity, message string) { | |
cac.logger.LogMessage(sev, message) | ||
} | ||
|
||
func (cac *createAwaitConfig) logMessage(message logging.Message) { | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Why not just have |
||
cac.logger.LogMessage(message.Severity, message.S) | ||
} | ||
|
||
// updateAwaitConfig specifies on which conditions we are to consider a resource "fully updated", | ||
// i.e., the spec of the API object has changed and the controllers have reached a steady state. For | ||
// example, we might consider a `Deployment` "fully updated" only when the previous generation of | ||
|
@@ -173,9 +177,9 @@ var awaiters = map[string]awaitSpec{ | |
awaitCreation: untilCoreV1PersistentVolumeClaimBound, | ||
}, | ||
coreV1Pod: { | ||
// NOTE: Because we replace the Pod in most situations, we do not require special logic for | ||
// the update path. | ||
awaitCreation: func(c createAwaitConfig) error { return makePodInitAwaiter(c).Await() }, | ||
awaitCreation: awaitPodInit, | ||
awaitRead: awaitPodRead, | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Is this comment no longer true? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It's true in most cases, but IIRC, it wasn't working quite right as I was testing individual Pod updates. Seemed like it made sense to standardize since the change was trivial. |
||
awaitUpdate: awaitPodUpdate, | ||
awaitDeletion: untilCoreV1PodDeleted, | ||
}, | ||
coreV1ReplicationController: { | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Nice.