Skip to content

Commit

Permalink
Prevent logs -f from repeating
Browse files Browse the repository at this point in the history
The retryWatcher in client go seems to be broken and results
in the logs being restarted from the same spot each time despite
the version info being set.

Using a typical watch seems to fix the issue.

Signed-off-by: John Schnake <jschnake@vmware.com>
  • Loading branch information
johnSchnake committed Jun 26, 2021
1 parent d0258d6 commit cf5bbd4
Showing 1 changed file with 2 additions and 11 deletions.
13 changes: 2 additions & 11 deletions pkg/client/logs.go
Expand Up @@ -29,7 +29,6 @@ import (
"k8s.io/apimachinery/pkg/watch"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/tools/cache"
watchtool "k8s.io/client-go/tools/watch"
)

const (
Expand Down Expand Up @@ -188,17 +187,9 @@ func watchPodsToStreamLogs(client kubernetes.Interface, cfg *LogConfig, podCh ch
},
}

// You must get an initial resource version for the watcher; the retry watcher can't simply
// start at "now". It will err if given "", or "0" and API instructs users to not assume
// numerical or sequential access.
initVersionObj, err := client.CoreV1().Pods(cfg.Namespace).List(context.TODO(), listOptions)
watcher, err := lw.Watch(listOptions)
if err != nil {
return errors.Wrap(err, "failed to obtain initial resource version for retry watcher")
}

watcher, err := watchtool.NewRetryWatcher(initVersionObj.GetResourceVersion(), lw)
if err != nil {
return errors.Wrap(err, "failed to create retry watcher")
return errors.Wrap(err, "failed to start watching pod logs")
}
ch := watcher.ResultChan()

Expand Down

0 comments on commit cf5bbd4

Please sign in to comment.