diff --git a/cluster-autoscaler/Makefile b/cluster-autoscaler/Makefile index b7296d2430..1fa99a0bc9 100644 --- a/cluster-autoscaler/Makefile +++ b/cluster-autoscaler/Makefile @@ -21,7 +21,7 @@ ifndef REGISTRY $(ERR) endif docker build --pull -t ${REGISTRY}/cluster-autoscaler:${TAG} . - gcloud docker push ${REGISTRY}/cluster-autoscaler:${TAG} + gcloud docker -- push ${REGISTRY}/cluster-autoscaler:${TAG} clean: rm -f cluster-autoscaler diff --git a/cluster-autoscaler/clusterstate/clusterstate.go b/cluster-autoscaler/clusterstate/clusterstate.go index 21ce38d036..882e612d6b 100644 --- a/cluster-autoscaler/clusterstate/clusterstate.go +++ b/cluster-autoscaler/clusterstate/clusterstate.go @@ -40,7 +40,7 @@ const ( // MaxStatusSettingDelayAfterCreation is the maximum time for node to set its initial status after the // node is registered. - MaxStatusSettingDelayAfterCreation = time.Minute + MaxStatusSettingDelayAfterCreation = 2 * time.Minute ) // ScaleUpRequest contains information about the requested node group scale up. @@ -615,6 +615,16 @@ func isNodeNotStarted(node *apiv1.Node) bool { condition.LastTransitionTime.Time.Sub(node.CreationTimestamp.Time) < MaxStatusSettingDelayAfterCreation { return true } + if condition.Type == apiv1.NodeOutOfDisk && + condition.Status == apiv1.ConditionTrue && + condition.LastTransitionTime.Time.Sub(node.CreationTimestamp.Time) < MaxStatusSettingDelayAfterCreation { + return true + } + if condition.Type == apiv1.NodeNetworkUnavailable && + condition.Status == apiv1.ConditionTrue && + condition.LastTransitionTime.Time.Sub(node.CreationTimestamp.Time) < MaxStatusSettingDelayAfterCreation { + return true + } } return false } diff --git a/cluster-autoscaler/core/scale_up.go b/cluster-autoscaler/core/scale_up.go index 59f2be57e6..3e773a7457 100644 --- a/cluster-autoscaler/core/scale_up.go +++ b/cluster-autoscaler/core/scale_up.go @@ -59,6 +59,7 @@ func ScaleUp(context *AutoscalingContext, unschedulablePods []*apiv1.Pod, nodes upcomingNodes = append(upcomingNodes, nodeTemplate) } } + glog.V(4).Infof("Upcoming %d nodes", len(upcomingNodes)) podsRemainUnschedulable := make(map[*apiv1.Pod]bool) expansionOptions := make([]expander.Option, 0) @@ -119,12 +120,16 @@ func ScaleUp(context *AutoscalingContext, unschedulablePods []*apiv1.Pod, nodes } if option.NodeCount > 0 { expansionOptions = append(expansionOptions, option) + } else { + glog.V(2).Infof("No need for any nodes in %s", nodeGroup.Id()) } + } else { + glog.V(4).Info("No pod can fit to %s", nodeGroup.Id()) } } if len(expansionOptions) == 0 { - glog.V(1).Info("No node group can help with pending pods.") + glog.V(1).Info("No expansion options") for pod, unschedulable := range podsRemainUnschedulable { if unschedulable { context.Recorder.Event(pod, apiv1.EventTypeNormal, "NotTriggerScaleUp", diff --git a/cluster-autoscaler/core/static_autoscaler.go b/cluster-autoscaler/core/static_autoscaler.go index 7f228ec1cc..3a0b389886 100644 --- a/cluster-autoscaler/core/static_autoscaler.go +++ b/cluster-autoscaler/core/static_autoscaler.go @@ -187,12 +187,16 @@ func (a *StaticAutoscaler) RunOnce(currentTime time.Time) { // in the describe situation. schedulablePodsPresent := false if a.VerifyUnschedulablePods { + + glog.V(4).Infof("Filtering out schedulables") newUnschedulablePodsToHelp := FilterOutSchedulable(unschedulablePodsToHelp, readyNodes, allScheduled, a.PredicateChecker) if len(newUnschedulablePodsToHelp) != len(unschedulablePodsToHelp) { glog.V(2).Info("Schedulable pods present") schedulablePodsPresent = true + } else { + glog.V(4).Info("No schedulable pods") } unschedulablePodsToHelp = newUnschedulablePodsToHelp }