forked from openshift/kubernetes
-
Notifications
You must be signed in to change notification settings - Fork 0
/
conditions.go
64 lines (53 loc) · 2.6 KB
/
conditions.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
/*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package unversioned
import (
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/apis/extensions"
"k8s.io/kubernetes/pkg/util/wait"
)
// ControllerHasDesiredReplicas returns a condition that will be true if and only if
// the desired replica count for a controller's ReplicaSelector equals the Replicas count.
func ControllerHasDesiredReplicas(c Interface, controller *api.ReplicationController) wait.ConditionFunc {
// If we're given a controller where the status lags the spec, it either means that the controller is stale,
// or that the rc manager hasn't noticed the update yet. Polling status.Replicas is not safe in the latter case.
desiredGeneration := controller.Generation
return func() (bool, error) {
ctrl, err := c.ReplicationControllers(controller.Namespace).Get(controller.Name)
if err != nil {
return false, err
}
// There's a chance a concurrent update modifies the Spec.Replicas causing this check to pass,
// or, after this check has passed, a modification causes the rc manager to create more pods.
// This will not be an issue once we've implemented graceful delete for rcs, but till then
// concurrent stop operations on the same rc might have unintended side effects.
return ctrl.Status.ObservedGeneration >= desiredGeneration && ctrl.Status.Replicas == ctrl.Spec.Replicas, nil
}
}
// JobHasDesiredParallelism returns a condition that will be true if the desired parallelism count
// for a job equals the current active counts or is less by an appropriate successful/unsuccessful count.
func JobHasDesiredParallelism(c Interface, job *extensions.Job) wait.ConditionFunc {
return func() (bool, error) {
job, err := c.Extensions().Jobs(job.Namespace).Get(job.Name)
if err != nil {
return false, err
}
// desired parallelism can be either the exact number, in which case return immediately
if job.Status.Active == *job.Spec.Parallelism {
return true, nil
}
// otherwise count successful
progress := *job.Spec.Completions - job.Status.Active - job.Status.Succeeded
return progress == 0, nil
}
}