Skip to content

Commit

Permalink
Adds event information when waiting for a pod
Browse files Browse the repository at this point in the history
**What type of PR is this?**
/kind feature

**What does does this PR do / why we need it**:

This PR:
  - Adds more information when deploying via "WaitAndGetPod". We parse
  the current events which are happening and then look for event counts
  which have occured more than 5 times. We then output this information
  with the spinner in order to help the user determine what's taking so
  long to deploy
  - Collect event information that's outputted when it fails with a
  table.
  - Changes how we output errors

Example outputs:

```sh
▶ odo push -f
Validation
 ✓  Checking component [122ms]

Configuration changes
 ✓  Initializing component
 ✓  Creating component [733ms]

Pushing to component nodejs-nodejs-ex-dorv of type local
 ✗  Waiting for component to start [5s]

ERROR:
waited 5s but was unable to find a running pod matching selector: 'deploymentconfig=nodejs-nodejs-ex-dorv-app'
```

```sh
~/openshift/nodejs-ex  master ✗                                                                                                                                                                                                                                                                                                                                    28d ⚑ ◒
▶ odo push -f
Validation
 ✓  Checking component [134ms]

Configuration changes
 ✓  Initializing component
 ✓  Creating component [417ms]

Pushing to component nodejs-nodejs-ex-dorv of type local
 ✗  Waiting for component to start [5s] [WARNING x5: FailedScheduling, use `-v` for more information.]

ERROR:
waited 5s but was unable to find a running pod matching selector: 'deploymentconfig=nodejs-nodejs-ex-dorv-app'
For more information to help determine the cause of the error, re-run with '-v'.
See below for a list of failed events that occured more than 5 times during deployment:
+----------------------------------------------------+-------+------------------+-------------------------------------+
|                        NAME                        | COUNT |      REASON      |               MESSAGE               |
+----------------------------------------------------+-------+------------------+-------------------------------------+
| nodejs-nodejs-ex-dorv-app-1-r46cg.15f66f7d73693e5c |    15 | FailedScheduling | persistentvolumeclaim               |
|                                                    |       |                  | "nodejs-nodejs-ex-dorv-app-s2idata" |
|                                                    |       |                  | not found                           |
| nodejs-nodejs-ex-dorv-app-1-vn8vd.15f66f8482ee88f6 |     5 | FailedScheduling | persistentvolumeclaim               |
|                                                    |       |                  | "nodejs-nodejs-ex-dorv-app-s2idata" |
|                                                    |       |                  | not found                           |
+----------------------------------------------------+-------+------------------+-------------------------------------+
```

**Which issue(s) this PR fixes**:

Fixes redhat-developer#2244

**How to test changes / Special notes to the reviewer**:

Run a test application like so:

```sh
git clone https://github.com/openshift/nodejs-ex
odo create node js
odo preference set PushTimeout
watch -n 1 oc delete pvc --all
odo push -f
```
  • Loading branch information
cdrage committed Feb 25, 2020
1 parent 949bcb3 commit fab45c7
Show file tree
Hide file tree
Showing 5 changed files with 238 additions and 91 deletions.
9 changes: 9 additions & 0 deletions pkg/log/fidget/spinner.go
Expand Up @@ -91,7 +91,16 @@ func (s *Spinner) SetPrefix(prefix string) {
// SetSuffix sets the suffix to print after the spinner
func (s *Spinner) SetSuffix(suffix string) {
s.mu.Lock()

// Awful hack to "clear" the line if the line is better than the previous one...
if len(suffix) < len(s.suffix) {
spacingLength := len(s.prefix) + len(s.suffix)
fmt.Fprintf(s.writer, "\r%*s", spacingLength, "")
}
s.suffix = suffix

// Make sure we go back to the original line...
fmt.Print("\r")
s.mu.Unlock()
}

Expand Down
85 changes: 43 additions & 42 deletions pkg/log/status.go
Expand Up @@ -43,9 +43,10 @@ const prefixSpacing = " "
// Status is used to track ongoing status in a CLI, with a nice loading spinner
// when attached to a terminal
type Status struct {
spinner *fidget.Spinner
status string
writer io.Writer
spinner *fidget.Spinner
status string
warningStatus string
writer io.Writer
}

// NewStatus creates a new default Status
Expand All @@ -58,43 +59,6 @@ func NewStatus(w io.Writer) *Status {
return s
}

// StatusFriendlyWriter is used to wrap another Writer to make it toggle the
// status spinner before and after writes so that they do not collide
type StatusFriendlyWriter struct {
status *Status
inner io.Writer
}

var _ io.Writer = &StatusFriendlyWriter{}

func (ww *StatusFriendlyWriter) Write(p []byte) (n int, err error) {
ww.status.spinner.Stop()
_, err = ww.inner.Write([]byte("\r"))
if err != nil {
return n, err
}
n, err = ww.inner.Write(p)
ww.status.spinner.Start()
return n, err
}

// WrapWriter returns a StatusFriendlyWriter for w
func (s *Status) WrapWriter(w io.Writer) io.Writer {
return &StatusFriendlyWriter{
status: s,
inner: w,
}
}

// MaybeWrapWriter returns a StatusFriendlyWriter for w IFF w and spinner's
// output are a terminal, otherwise it returns w
func (s *Status) MaybeWrapWriter(w io.Writer) io.Writer {
if IsTerminal(s.writer) && IsTerminal(w) {
return s.WrapWriter(w)
}
return w
}

// IsTerminal returns true if the writer w is a terminal
// This function is modified if we are running within Windows..
// as Golang's built-in "IsTerminal" command only works on UNIX-based systems:
Expand All @@ -108,6 +72,23 @@ func IsTerminal(w io.Writer) bool {
return false
}

// WarningStatus puts a warning status within the spinner and then updates the current status
func (s *Status) WarningStatus(status string) {
s.warningStatus = status
s.updateStatus()
}

// Updates the status and makes sure that if the previous status was longer, it
// "clears" the rest of the message.
func (s *Status) updateStatus() {
if s.warningStatus != "" {
yellow := color.New(color.FgYellow).SprintFunc()
s.spinner.SetSuffix(fmt.Sprintf(suffixSpacing+"%s [%s %s]", s.status, yellow(getWarningString()), yellow(s.warningStatus)))
} else {
s.spinner.SetSuffix(fmt.Sprintf(suffixSpacing+"%s", s.status))
}
}

// Start starts a new phase of the status, if attached to a terminal
// there will be a loading spinner with this status
func (s *Status) Start(status string, debug bool) {
Expand Down Expand Up @@ -149,11 +130,17 @@ func (s *Status) End(success bool) {

if !IsJSON() {
if success {
// Clear the warning (uneeded now)
s.WarningStatus("")
green := color.New(color.FgGreen).SprintFunc()
fmt.Fprintf(s.writer, prefixSpacing+"%s"+suffixSpacing+"%s [%s]\n", green(getSuccessString()), s.status, s.spinner.TimeSpent())
} else {
red := color.New(color.FgRed).SprintFunc()
fmt.Fprintf(s.writer, prefixSpacing+"%s"+suffixSpacing+"%s [%s]\n", red(getErrString()), s.status, s.spinner.TimeSpent())
if s.warningStatus != "" {
fmt.Fprintf(s.writer, prefixSpacing+"%s"+suffixSpacing+"%s [%s] [%s]\n", red(getErrString()), s.status, s.spinner.TimeSpent(), s.warningStatus)
} else {
fmt.Fprintf(s.writer, prefixSpacing+"%s"+suffixSpacing+"%s [%s]\n", red(getErrString()), s.status, s.spinner.TimeSpent())
}
}
}

Expand Down Expand Up @@ -191,14 +178,20 @@ func Successf(format string, a ...interface{}) {
}
}

// Warningf will output in an appropriate "progress" manner
// Warningf will output in an appropriate "warning" manner
func Warningf(format string, a ...interface{}) {
yellow := color.New(color.FgYellow).SprintFunc()
if !IsJSON() {
fmt.Fprintf(GetStderr(), " %s%s%s\n", yellow(getWarningString()), suffixSpacing, fmt.Sprintf(format, a...))
}
}

// Swarningf (like Sprintf) will return a string in the "warning" manner
func Swarningf(format string, a ...interface{}) string {
yellow := color.New(color.FgYellow).SprintFunc()
return fmt.Sprintf(" %s%s%s", yellow(getWarningString()), suffixSpacing, fmt.Sprintf(format, a...))
}

// Warning will output in an appropriate "progress" manner
func Warning(a ...interface{}) {
yellow := color.New(color.FgYellow).SprintFunc()
Expand All @@ -223,6 +216,14 @@ func Error(a ...interface{}) {
}
}

// FinalError will output in an appropriate "progress" manner
func FinalError(a ...interface{}) {
red := color.New(color.FgRed).SprintFunc()
if !IsJSON() {
fmt.Fprintf(GetStderr(), "%s%s", red("\nERROR:\n"), red(fmt.Sprintln(a...)))
}
}

// Italic will simply print out information on a new italic line
func Italic(a ...interface{}) {
italic := color.New(color.Italic).SprintFunc()
Expand Down
83 changes: 76 additions & 7 deletions pkg/occlient/occlient.go
Expand Up @@ -16,6 +16,8 @@ import (
"strings"
"time"

"github.com/olekukonko/tablewriter"

"github.com/fatih/color"
"github.com/golang/glog"
"github.com/pkg/errors"
Expand Down Expand Up @@ -88,6 +90,7 @@ type CreateArgs struct {
}

const (
failedEventCount = 5
OcUpdateTimeout = 5 * time.Minute
OcBuildTimeout = 5 * time.Minute
OpenShiftNameSpace = "openshift"
Expand Down Expand Up @@ -1801,8 +1804,8 @@ func (c *Client) WaitAndGetPod(selector string, desiredPhase corev1.PodPhase, wa
}

glog.V(4).Infof("Waiting for %s pod", selector)
s := log.Spinner(waitMessage)
defer s.End(false)
spinner := log.Spinner(waitMessage)
defer spinner.End(false)

w, err := c.kubeClient.CoreV1().Pods(c.Namespace).Watch(metav1.ListOptions{
LabelSelector: selector,
Expand All @@ -1812,10 +1815,10 @@ func (c *Client) WaitAndGetPod(selector string, desiredPhase corev1.PodPhase, wa
}
defer w.Stop()

// Here we are going to start a loop watching for the pod status
podChannel := make(chan *corev1.Pod)
watchErrorChannel := make(chan error)

go func() {
go func(spinny *log.Status) {
loop:
for {
val, ok := <-w.ResultChan()
Expand All @@ -1827,7 +1830,6 @@ func (c *Client) WaitAndGetPod(selector string, desiredPhase corev1.PodPhase, wa
glog.V(4).Infof("Status of %s pod is %s", e.Name, e.Status.Phase)
switch e.Status.Phase {
case desiredPhase:
s.End(true)
glog.V(4).Infof("Pod %s is %v", e.Name, desiredPhase)
podChannel <- e
break loop
Expand All @@ -1842,15 +1844,82 @@ func (c *Client) WaitAndGetPod(selector string, desiredPhase corev1.PodPhase, wa
}
close(podChannel)
close(watchErrorChannel)
}()
}(spinner)

// Secondly, we will start a go routine for watching for events related to the pod and update our pod status accordingly.
eventWatcher, err := c.kubeClient.CoreV1().Events(c.Namespace).Watch(metav1.ListOptions{})
if err != nil {
return nil, errors.Wrapf(err, "unable to watch for events")
}
defer eventWatcher.Stop()
eventChannel := make(chan *corev1.Event)
watchEventChannel := make(chan error)
failedEvents := make(map[string]corev1.Event)

go func(failedEvents *map[string]corev1.Event, spinny *log.Status) {
loop:
for {
val, ok := <-eventWatcher.ResultChan()
if !ok {
watchEventChannel <- errors.New("watch channel was closed")
break loop
}
if e, ok := val.Object.(*corev1.Event); ok {

// Log the information of *all* warning event's in order to help the user determine what happened / stalled..

// If there are multiple failed events.. we should show them
if e.Type == "Warning" {

if e.Count >= failedEventCount {
(*failedEvents)[e.Name] = *e
glog.V(4).Infof("Warning Event: Count: %d, Reason: %s, Message: %s", e.Count, e.Reason, e.Message)
// Change the spinner message to show the warning
spinny.WarningStatus(fmt.Sprintf("WARNING x%d: %s, use `-v` for more information.", e.Count, e.Reason))
}

}

} else {
watchEventChannel <- errors.New("unable to convert object to event")
break loop
}
}
close(eventChannel)
close(watchEventChannel)
}(&failedEvents, spinner)

select {
case val := <-podChannel:
spinner.End(true)
return val, nil
case err := <-watchErrorChannel:
return nil, err
case err := <-watchEventChannel:
return nil, errors.Wrap(err, "error with watch event")
case <-time.After(pushTimeout):
return nil, errors.Errorf("waited %s but couldn't find running pod matching selector: '%s'", pushTimeout, selector)

// Create a useful error if there are any failed events
errorMessage := fmt.Sprintf(`waited %s but was unable to find a running pod matching selector: '%s'`, pushTimeout, selector)

if len(failedEvents) != 0 {

tableString := &strings.Builder{}
table := tablewriter.NewWriter(tableString)
table.SetHeader([]string{"Name", "Count", "Reason", "Message"})
for name, event := range failedEvents {
table.Append([]string{name, strconv.Itoa(int(event.Count)), event.Reason, event.Message})
}

table.Render()

errorMessage = fmt.Sprintf(`waited %s but was unable to find a running pod matching selector: '%s'
For more information to help determine the cause of the error, re-run with '-v'.
See below for a list of failed events that occured more than %d times during deployment:
%s`, pushTimeout, selector, failedEventCount, tableString)
}

return nil, errors.Errorf(errorMessage)
}
}

Expand Down

0 comments on commit fab45c7

Please sign in to comment.