Skip to content

Commit

Permalink
Merge pull request #283 from ironcladlou/degraded-propagation
Browse files Browse the repository at this point in the history
Bug 1698562: status: introduce ingresscontroller degraded condition
  • Loading branch information
openshift-merge-robot committed Aug 7, 2019
2 parents bbe2742 + e5f283b commit df1eba3
Show file tree
Hide file tree
Showing 6 changed files with 112 additions and 129 deletions.
3 changes: 2 additions & 1 deletion assets/router/deployment.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@ kind: Deployment
apiVersion: apps/v1
# name and namespace are set at runtime.
spec:
progressDeadlineSeconds: 600
template:
spec:
serviceAccountName: router
Expand Down Expand Up @@ -41,7 +42,7 @@ spec:
readinessProbe:
initialDelaySeconds: 10
httpGet:
path: /healthz
path: /healthz/ready
port: 1936
resources:
requests:
Expand Down
2 changes: 2 additions & 0 deletions hack/run-local.sh
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,8 @@

set -euo pipefail

oc scale --replicas 0 -n openshift-cluster-version deployments/cluster-version-operator
oc scale --replicas 0 -n openshift-ingress-operator deployments ingress-operator

IMAGE=$(oc get -n openshift-ingress-operator deployments/ingress-operator -o json | jq -r '.spec.template.spec.containers[0].env[] | select(.name=="IMAGE").value')
RELEASE_VERSION=$(oc get clusterversion/version -o json | jq -r '.status.desired.version')
Expand Down
8 changes: 4 additions & 4 deletions pkg/manifests/bindata.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

18 changes: 18 additions & 0 deletions pkg/operator/controller/ingress/status.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ func (r *reconciler) syncIngressControllerStatus(ic *operatorv1.IngressControlle
updated.Status.Conditions = mergeConditions(updated.Status.Conditions, computeIngressAvailableCondition(deployment))
updated.Status.Conditions = mergeConditions(updated.Status.Conditions, computeLoadBalancerStatus(ic, service, operandEvents)...)
updated.Status.Conditions = mergeConditions(updated.Status.Conditions, computeDNSStatus(ic, wildcardRecord, dnsConfig)...)
updated.Status.Conditions = mergeConditions(updated.Status.Conditions, computeIngressDegradedCondition(deployment))

if !ingressStatusesEqual(updated.Status, ic.Status) {
if err := r.client.Status().Update(context.TODO(), updated); err != nil {
Expand Down Expand Up @@ -108,6 +109,23 @@ func computeIngressAvailableCondition(deployment *appsv1.Deployment) operatorv1.
}
}

func computeIngressDegradedCondition(deployment *appsv1.Deployment) operatorv1.OperatorCondition {
for _, cond := range deployment.Status.Conditions {
if cond.Type == appsv1.DeploymentProgressing && cond.Status == corev1.ConditionFalse && cond.Reason == "ProgressDeadlineExceeded" {
return operatorv1.OperatorCondition{
Type: operatorv1.OperatorStatusTypeDegraded,
Status: operatorv1.ConditionTrue,
Reason: "DeploymentFailed",
Message: fmt.Sprintf("The deployment failed (reason: %s) with message: %s", cond.Reason, cond.Message),
}
}
}
return operatorv1.OperatorCondition{
Type: operatorv1.OperatorStatusTypeDegraded,
Status: operatorv1.ConditionFalse,
}
}

// ingressStatusesEqual compares two IngressControllerStatus values. Returns true
// if the provided values should be considered equal for the purpose of determining
// whether an update is necessary, false otherwise.
Expand Down
129 changes: 67 additions & 62 deletions pkg/operator/controller/status/controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ import (
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
utilclock "k8s.io/apimachinery/pkg/util/clock"

"sigs.k8s.io/controller-runtime/pkg/cache"
"sigs.k8s.io/controller-runtime/pkg/client"
Expand All @@ -43,6 +44,9 @@ const (

var log = logf.Logger.WithName(controllerName)

// clock is to enable unit testing
var clock utilclock.Clock = utilclock.RealClock{}

// New creates the status controller. This is the controller that handles all
// the logic for creating the ClusterOperator operator and updating its status.
//
Expand Down Expand Up @@ -139,8 +143,10 @@ func (r *reconciler) Reconcile(request reconcile.Request) (reconcile.Result, err
allIngressesAvailable := checkAllIngressesAvailable(state.IngressControllers)

co.Status.Versions = r.computeOperatorStatusVersions(oldStatus.Versions, allIngressesAvailable)
co.Status.Conditions = r.computeOperatorStatusConditions(oldStatus.Conditions,
state.Namespace, allIngressesAvailable, oldStatus.Versions, co.Status.Versions)

co.Status.Conditions = mergeConditions(co.Status.Conditions, computeOperatorAvailableCondition(allIngressesAvailable))
co.Status.Conditions = mergeConditions(co.Status.Conditions, computeOperatorProgressingCondition(allIngressesAvailable, oldStatus.Versions, co.Status.Versions, r.OperatorReleaseVersion, r.IngressControllerImage))
co.Status.Conditions = mergeConditions(co.Status.Conditions, computeOperatorDegradedCondition(state.IngressControllers))

if !operatorStatusesEqual(*oldStatus, co.Status) {
if err := r.client.Status().Update(context.TODO(), co); err != nil {
Expand Down Expand Up @@ -236,31 +242,6 @@ func (r *reconciler) computeOperatorStatusVersions(oldVersions []configv1.Operan
}
}

// computeOperatorStatusConditions computes the operator's current state.
func (r *reconciler) computeOperatorStatusConditions(oldConditions []configv1.ClusterOperatorStatusCondition,
ns *corev1.Namespace, allIngressesAvailable bool,
oldVersions, curVersions []configv1.OperandVersion) []configv1.ClusterOperatorStatusCondition {
var oldDegradedCondition, oldProgressingCondition, oldAvailableCondition *configv1.ClusterOperatorStatusCondition
for i := range oldConditions {
switch oldConditions[i].Type {
case configv1.OperatorDegraded:
oldDegradedCondition = &oldConditions[i]
case configv1.OperatorProgressing:
oldProgressingCondition = &oldConditions[i]
case configv1.OperatorAvailable:
oldAvailableCondition = &oldConditions[i]
}
}

conditions := []configv1.ClusterOperatorStatusCondition{
computeOperatorDegradedCondition(oldDegradedCondition, ns),
r.computeOperatorProgressingCondition(oldProgressingCondition, allIngressesAvailable, oldVersions, curVersions),
computeOperatorAvailableCondition(oldAvailableCondition, allIngressesAvailable),
}

return conditions
}

// checkAllIngressesAvailable checks if all the ingress controllers are available.
func checkAllIngressesAvailable(ingresses []operatorv1.IngressController) bool {
for _, ing := range ingresses {
Expand All @@ -276,31 +257,36 @@ func checkAllIngressesAvailable(ingresses []operatorv1.IngressController) bool {
}
}

return (len(ingresses) != 0)
return len(ingresses) != 0
}

// computeOperatorDegradedCondition computes the operator's current Degraded status state.
func computeOperatorDegradedCondition(oldCondition *configv1.ClusterOperatorStatusCondition,
ns *corev1.Namespace) configv1.ClusterOperatorStatusCondition {
degradedCondition := configv1.ClusterOperatorStatusCondition{
Type: configv1.OperatorDegraded,
}
if ns == nil {
degradedCondition.Status = configv1.ConditionTrue
degradedCondition.Reason = "NoNamespace"
degradedCondition.Message = "operand namespace does not exist"
} else {
degradedCondition.Status = configv1.ConditionFalse
degradedCondition.Message = "operand namespace exists"
func computeOperatorDegradedCondition(ingresses []operatorv1.IngressController) configv1.ClusterOperatorStatusCondition {
var degradedIngresses []string
for _, ingress := range ingresses {
for _, cond := range ingress.Status.Conditions {
if cond.Type == operatorv1.OperatorStatusTypeDegraded && cond.Status == operatorv1.ConditionTrue {
degradedIngresses = append(degradedIngresses, ingress.Name)
}
}
}
if len(degradedIngresses) == 0 {
return configv1.ClusterOperatorStatusCondition{
Type: configv1.OperatorDegraded,
Status: configv1.ConditionFalse,
Reason: "NoIngressControllersDegraded",
}
}
return configv1.ClusterOperatorStatusCondition{
Type: configv1.OperatorDegraded,
Status: configv1.ConditionTrue,
Reason: "IngressControllersDegraded",
Message: fmt.Sprintf("Some ingresscontrollers are degraded: %s", strings.Join(degradedIngresses, ",")),
}

setLastTransitionTime(&degradedCondition, oldCondition)
return degradedCondition
}

// computeOperatorProgressingCondition computes the operator's current Progressing status state.
func (r *reconciler) computeOperatorProgressingCondition(oldCondition *configv1.ClusterOperatorStatusCondition,
allIngressesAvailable bool, oldVersions, curVersions []configv1.OperandVersion) configv1.ClusterOperatorStatusCondition {
func computeOperatorProgressingCondition(allIngressesAvailable bool, oldVersions, curVersions []configv1.OperandVersion, operatorReleaseVersion, ingressControllerImage string) configv1.ClusterOperatorStatusCondition {
// TODO: Update progressingCondition when an ingresscontroller
// progressing condition is created. The Operator's condition
// should be derived from the ingresscontroller's condition.
Expand All @@ -310,7 +296,7 @@ func (r *reconciler) computeOperatorProgressingCondition(oldCondition *configv1.

progressing := false

messages := []string{}
var messages []string
if !allIngressesAvailable {
messages = append(messages, "Not all ingress controllers are available.")
progressing = true
Expand All @@ -327,13 +313,13 @@ func (r *reconciler) computeOperatorProgressingCondition(oldCondition *configv1.
}
switch opv.Name {
case OperatorVersionName:
if opv.Version != r.OperatorReleaseVersion {
messages = append(messages, fmt.Sprintf("Moving to release version %q.", r.OperatorReleaseVersion))
if opv.Version != operatorReleaseVersion {
messages = append(messages, fmt.Sprintf("Moving to release version %q.", operatorReleaseVersion))
progressing = true
}
case IngressControllerVersionName:
if opv.Version != r.IngressControllerImage {
messages = append(messages, fmt.Sprintf("Moving to ingress-controller image version %q.", r.IngressControllerImage))
if opv.Version != ingressControllerImage {
messages = append(messages, fmt.Sprintf("Moving to ingress-controller image version %q.", ingressControllerImage))
progressing = true
}
}
Expand All @@ -350,13 +336,11 @@ func (r *reconciler) computeOperatorProgressingCondition(oldCondition *configv1.
progressingCondition.Message = strings.Join(messages, "\n")
}

setLastTransitionTime(&progressingCondition, oldCondition)
return progressingCondition
}

// computeOperatorAvailableCondition computes the operator's current Available status state.
func computeOperatorAvailableCondition(oldCondition *configv1.ClusterOperatorStatusCondition,
allIngressesAvailable bool) configv1.ClusterOperatorStatusCondition {
func computeOperatorAvailableCondition(allIngressesAvailable bool) configv1.ClusterOperatorStatusCondition {
availableCondition := configv1.ClusterOperatorStatusCondition{
Type: configv1.OperatorAvailable,
}
Expand All @@ -370,19 +354,40 @@ func computeOperatorAvailableCondition(oldCondition *configv1.ClusterOperatorSta
availableCondition.Message = "Not all ingress controllers are available."
}

setLastTransitionTime(&availableCondition, oldCondition)
return availableCondition
}

// setLastTransitionTime sets LastTransitionTime for the given condition.
// If the condition has changed, it will assign a new timestamp otherwise keeps the old timestamp.
func setLastTransitionTime(condition, oldCondition *configv1.ClusterOperatorStatusCondition) {
if oldCondition != nil && condition.Status == oldCondition.Status &&
condition.Reason == oldCondition.Reason && condition.Message == oldCondition.Message {
condition.LastTransitionTime = oldCondition.LastTransitionTime
} else {
condition.LastTransitionTime = metav1.Now()
// mergeConditions adds or updates matching conditions, and updates
// the transition time if details of a condition have changed. Returns
// the updated condition array.
func mergeConditions(conditions []configv1.ClusterOperatorStatusCondition, updates ...configv1.ClusterOperatorStatusCondition) []configv1.ClusterOperatorStatusCondition {
now := metav1.NewTime(clock.Now())
var additions []configv1.ClusterOperatorStatusCondition
for i, update := range updates {
add := true
for j, cond := range conditions {
if cond.Type == update.Type {
add = false
if conditionChanged(cond, update) {
conditions[j].Status = update.Status
conditions[j].Reason = update.Reason
conditions[j].Message = update.Message
conditions[j].LastTransitionTime = now
break
}
}
}
if add {
updates[i].LastTransitionTime = now
additions = append(additions, updates[i])
}
}
conditions = append(conditions, additions...)
return conditions
}

func conditionChanged(a, b configv1.ClusterOperatorStatusCondition) bool {
return a.Status != b.Status || a.Reason != b.Reason || a.Message != b.Message
}

// operatorStatusesEqual compares two ClusterOperatorStatus values. Returns
Expand Down

0 comments on commit df1eba3

Please sign in to comment.