Skip to content
This repository was archived by the owner on Feb 3, 2023. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -23,9 +23,9 @@ The node-drainer container takes as argument the parameters below.
| count | The number of nodes to drain. | `1` |
| max-unscheduled-pods | The maximum number of unscheduled pods on the cluster beyond which the drain will fail. | `0` |
| eviction-timeout | The timeout in seconds for pods eviction during node drain. | `300` |
| poll-interval | The poll interval in seconds to check pods deletion on drain. | `5` |
| dev | Enable dev mode for logging. | `false` |
| v | Logs verbosity. 0 => panic, 1 => error, 2 => warning, 3 => info, 4 => debug | 3 |
| asg-poll-interval | AutoScaling Groups polling interval (used to generate custom metrics about ASGs). | 30 |


## Supervision
Expand Down
5 changes: 4 additions & 1 deletion main.go
Original file line number Diff line number Diff line change
Expand Up @@ -25,6 +25,7 @@ func main() {
fSelector map[string]string
fEvictionGlobalTimeout int
fOlderThan time.Duration
fPollInterval int
fCount int
fMaxUnscheduledPods int
fKubeConfig string
Expand All @@ -33,10 +34,11 @@ func main() {
flag.BoolVar(&fEnableDevLogs, "dev", false, "Enable dev mode for logging.")
flag.IntVar(&fLogVerbosity, "v", 3, "Logs verbosity. 0 => panic, 1 => error, 2 => warning, 3 => info, 4 => debug")
flag.Var(cliflag.NewMapStringString(&fSelector), "l", "Selector to list the nodes to drain on labels separated by commas (e.g. `-l foo=bar,bar=baz`).")
flag.IntVar(&fEvictionGlobalTimeout, "eviction-timeout", 300, "The timeout in seconds for pods eviction during node drain.")
flag.DurationVar(&fOlderThan, "older-than", time.Hour*8, "The minimum lifespan that a node must have to be drained.")
flag.IntVar(&fCount, "count", 1, "The number of nodes to drain.")
flag.IntVar(&fMaxUnscheduledPods, "max-unscheduled-pods", 0, "The maximum number of unscheduled pods on the cluster beyond which the drain will fail.")
flag.IntVar(&fEvictionGlobalTimeout, "eviction-timeout", 300, "The timeout in seconds for pods eviction during node drain.")
flag.IntVar(&fPollInterval, "poll-interval", 5, "The poll interval in seconds to check pods deletion on drain.")
flag.StringVar(&fKubeConfig, "kubeconfig", "", "(optional) absolute path to the kubeconfig file")
flag.Parse()

Expand Down Expand Up @@ -73,6 +75,7 @@ func main() {
// Perform node drains
d := drainer.New(drainer.Configuration{
EvictionGlobalTimeout: fEvictionGlobalTimeout,
PollInterval: time.Second * time.Duration(fPollInterval),
Cli: clientset,
Log: log,
})
Expand Down
5 changes: 2 additions & 3 deletions pkg/drainer/drainer.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,8 +27,6 @@ const (
evictionKind = "Eviction"
// evictionSubresource represents the kind of evictions object as pod's subresource
evictionSubresource = "pods/eviction"
// The delete pod polling interval
pollInterval = time.Second
)

// ErrNoPodToEvict indicates that there's no pod to evict on the node.
Expand All @@ -37,6 +35,7 @@ var ErrNoPodToEvict = errors.New("no pod to evict")
// Configuration wraps Drainer configuration
type Configuration struct {
EvictionGlobalTimeout int
PollInterval time.Duration
Cli *kubernetes.Clientset
Log logr.Logger
}
Expand Down Expand Up @@ -379,7 +378,7 @@ func deleteTimeout(pods []corev1.Pod) time.Duration {
// waitForDelete poll pods to check their deletion.
// This code is largely inspired by kubectl cli source code.
func (d *Drainer) waitForDelete(ctx context.Context, pods []corev1.Pod) ([]corev1.Pod, error) {
err := wait.PollImmediate(pollInterval, deleteTimeout(pods), func() (bool, error) {
err := wait.PollImmediate(d.PollInterval, deleteTimeout(pods), func() (bool, error) {
pendingPods := []corev1.Pod{}
for i, pod := range pods {
p, err := d.Cli.CoreV1().Pods(pod.Namespace).Get(ctx, pod.Name, metav1.GetOptions{})
Expand Down