Skip to content
This repository was archived by the owner on Apr 17, 2019. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions cluster-autoscaler/Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -15,9 +15,9 @@ test-unit: clean deps build
$(ENVVAR) godep go test --test.short -race ./... $(FLAGS)

release: build

ifndef REGISTRY
$(error REGISTRY is undefined)
ERR = $(error REGISTRY is undefined)
$(ERR)
endif
docker build -t ${REGISTRY}/cluster-autoscaler:${TAG} .
gcloud docker push ${REGISTRY}/cluster-autoscaler:${TAG}
Expand Down
12 changes: 6 additions & 6 deletions cluster-autoscaler/cluster_autoscaler.go
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ func main() {
continue
}

pods, err := unschedulablePodLister.List()
allPods, err := unschedulablePodLister.List()
if err != nil {
glog.Errorf("Failed to list unscheduled pods: %v", err)
continue
Expand All @@ -99,16 +99,16 @@ func main() {
// We need to reset all pods that have been marked as unschedulable not after
// the newest node became available for the scheduler.
allNodesAvailableTime := GetAllNodesAvailableTime(nodes)
resetOldPods(kubeClient, pods, allNodesAvailableTime)
podsToReset, unschedulablePods := SlicePodsByPodScheduledTime(allPods, allNodesAvailableTime)
ResetPodScheduledCondition(kubeClient, podsToReset)

// From now on we only care about unschedulable pods that were marked after the newest
// node became available for the scheduler.
pods = filterOldPods(pods, allNodesAvailableTime)
if len(pods) == 0 {
if len(unschedulablePods) == 0 {
glog.V(1).Info("No unschedulable pods")
continue
}
for _, pod := range pods {
for _, pod := range unschedulablePods {
glog.V(1).Infof("Pod %s/%s is unschedulable", pod.Namespace, pod.Name)
}

Expand Down Expand Up @@ -144,7 +144,7 @@ func main() {
continue
}

for _, pod := range pods {
for _, pod := range unschedulablePods {
err = predicateChecker.CheckPredicates(pod, nodeInfo)
if err == nil {
migHelpsSomePods = true
Expand Down
2 changes: 2 additions & 0 deletions cluster-autoscaler/deploy/deploy.sh
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,8 @@
# Usage:
# REGISTRY=<my_reg> MIG_LINK=<my_mig> [MIN=1] [MAX=4] [VERSION=v1.1] deploy.sh

set -e

ROOT=$(dirname "${BASH_SOURCE}")/..

if [ -z "$REGISTRY" ]; then
Expand Down
36 changes: 17 additions & 19 deletions cluster-autoscaler/utils.go
Original file line number Diff line number Diff line change
Expand Up @@ -147,40 +147,38 @@ func GetAllNodesAvailableTime(nodes []*kube_api.Node) time.Time {
return result.Add(1 * time.Minute)
}

// Returns pods for which PodScheduled condition have LastTransitionTime after
// the threshold.
// SlicePodsByPodScheduledTime slices given pod array into those where PodScheduled condition
// have been updated after the thresold and others.
// Each pod must be in condition "Scheduled: False; Reason: Unschedulable"
// NOTE: This function must be in sync with resetOldPods.
func filterOldPods(pods []*kube_api.Pod, threshold time.Time) []*kube_api.Pod {
var result []*kube_api.Pod
func SlicePodsByPodScheduledTime(pods []*kube_api.Pod, threshold time.Time) (oldPods []*kube_api.Pod, newPods []*kube_api.Pod) {
for _, pod := range pods {
_, condition := kube_api.GetPodCondition(&pod.Status, kube_api.PodScheduled)
if condition != nil && condition.LastTransitionTime.After(threshold) {
result = append(result, pod)
if condition != nil {
if condition.LastTransitionTime.After(threshold) {
newPods = append(newPods, pod)
} else {
oldPods = append(oldPods, pod)
}
}
}
return result
return
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Remove

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

As discussed offline this is required.

}

// Resets pod condition PodScheduled to "unknown" for all the pods with LastTransitionTime
// ResetPodScheduledCondition resets pod condition PodScheduled to "unknown" for all the pods with LastTransitionTime
// not after the threshold time.
// NOTE: This function must be in sync with resetOldPods.
func resetOldPods(kubeClient *kube_client.Client, pods []*kube_api.Pod, threshold time.Time) {
func ResetPodScheduledCondition(kubeClient *kube_client.Client, pods []*kube_api.Pod) {
for _, pod := range pods {
_, condition := kube_api.GetPodCondition(&pod.Status, kube_api.PodScheduled)
if condition != nil && !condition.LastTransitionTime.After(threshold) {
glog.V(4).Infof("Reseting pod condition for %s/%s, last transition: %s",
pod.Namespace, pod.Name, condition.LastTransitionTime.Time.String())
if err := resetPodScheduledCondition(kubeClient, pod); err != nil {
glog.Errorf("Error during reseting pod condition for %s/%s: %v", pod.Namespace, pod.Name, err)
}
if err := resetPodScheduledConditionForPod(kubeClient, pod); err != nil {
glog.Errorf("Error during reseting pod condition for %s/%s: %v", pod.Namespace, pod.Name, err)
}
}
}

func resetPodScheduledCondition(kubeClient *kube_client.Client, pod *kube_api.Pod) error {
func resetPodScheduledConditionForPod(kubeClient *kube_client.Client, pod *kube_api.Pod) error {
_, condition := kube_api.GetPodCondition(&pod.Status, kube_api.PodScheduled)
if condition != nil {
glog.V(4).Infof("Reseting pod condition for %s/%s, last transition: %s",
pod.Namespace, pod.Name, condition.LastTransitionTime.Time.String())
condition.Status = kube_api.ConditionUnknown
condition.LastTransitionTime = kube_api_unversioned.Now()
_, err := kubeClient.Pods(pod.Namespace).UpdateStatus(pod)
Expand Down