Skip to content

Commit

Permalink
Merge pull request #1818 from kyrtapz/bug_13922
Browse files Browse the repository at this point in the history
OCPBUGS-13922: Revert "Do not set the operator as available before updating the network config"
  • Loading branch information
openshift-merge-robot committed May 25, 2023
2 parents 7df32ec + c296a7c commit f68e72f
Show file tree
Hide file tree
Showing 5 changed files with 1 addition and 139 deletions.
2 changes: 0 additions & 2 deletions pkg/controller/operconfig/cluster.go
Expand Up @@ -86,8 +86,6 @@ func (r *ReconcileOperConfig) ClusterNetworkStatus(ctx context.Context, operConf
// Update the cluster config status
status := network.StatusFromOperatorConfig(&operConfig.Spec, &clusterConfig.Status)
if status == nil || reflect.DeepEqual(*status, clusterConfig.Status) {
// clusterConfig is already updated
r.status.SetConfigUpdated(true)
return nil, nil
}
clusterConfig.Status = *status
Expand Down
1 change: 0 additions & 1 deletion pkg/controller/operconfig/operconfig_controller.go
Expand Up @@ -470,7 +470,6 @@ func (r *ReconcileOperConfig) Reconcile(ctx context.Context, request reconcile.R
fmt.Sprintf("Could not update cluster configuration status: %v", err))
return reconcile.Result{}, err
}
r.status.SetConfigUpdated(true)
}

r.status.SetNotDegraded(statusmanager.OperatorConfig)
Expand Down
3 changes: 1 addition & 2 deletions pkg/controller/statusmanager/pod_status.go
Expand Up @@ -262,8 +262,7 @@ func (status *StatusManager) SetFromPods() {
status.unsetProgressing(PodDeployment)
}

// set the operator as available only after it rolled out all resources and the operator config was updated
if reachedAvailableLevel && status.configUpdated {
if reachedAvailableLevel {
status.set(reachedAvailableLevel, operv1.OperatorCondition{
Type: operv1.OperatorStatusTypeAvailable,
Status: operv1.ConditionTrue})
Expand Down
7 changes: 0 additions & 7 deletions pkg/controller/statusmanager/status_manager.go
Expand Up @@ -87,7 +87,6 @@ type StatusManager struct {

failing [maxStatusLevel]*operv1.OperatorCondition
installComplete bool
configUpdated bool

// All our informers and listers
dsInformers map[string]cache.SharedIndexInformer
Expand Down Expand Up @@ -534,12 +533,6 @@ func (status *StatusManager) SetNotDegraded(statusLevel StatusLevel) {
status.setNotDegraded(statusLevel)
}

func (status *StatusManager) SetConfigUpdated(updated bool) {
status.Lock()
defer status.Unlock()
status.configUpdated = updated
}

// syncProgressing syncs the current Progressing status
func (status *StatusManager) syncProgressing() {
for _, c := range status.failing {
Expand Down
127 changes: 0 additions & 127 deletions pkg/controller/statusmanager/status_manager_test.go
Expand Up @@ -339,7 +339,6 @@ func TestStatusManagerSetFromDaemonSets(t *testing.T) {
no := &operv1.Network{ObjectMeta: metav1.ObjectMeta{Name: names.OPERATOR_CONFIG}}
set(t, client, no)

status.SetConfigUpdated(true)
status.SetFromPods()
co, oc, err := getStatuses(client, "testing")
if err != nil {
Expand Down Expand Up @@ -377,7 +376,6 @@ func TestStatusManagerSetFromDaemonSets(t *testing.T) {
},
}
set(t, client, dsB)
status.SetConfigUpdated(true)
status.SetFromPods()

// Since the DaemonSet.Status reports no pods Available, the status should be Progressing
Expand Down Expand Up @@ -434,7 +432,6 @@ func TestStatusManagerSetFromDaemonSets(t *testing.T) {
for dsA.Status.NumberUnavailable > 0 || dsB.Status.NumberUnavailable > 0 {
set(t, client, dsA)
set(t, client, dsB)
status.SetConfigUpdated(true)
status.SetFromPods()

co, oc, err = getStatuses(client, "testing")
Expand Down Expand Up @@ -489,7 +486,6 @@ func TestStatusManagerSetFromDaemonSets(t *testing.T) {
set(t, client, dsA)
set(t, client, dsB)
time.Sleep(1 * time.Second) // minimum transition time fidelity
status.SetConfigUpdated(true)
status.SetFromPods()

co, oc, err = getStatuses(client, "testing")
Expand Down Expand Up @@ -534,7 +530,6 @@ func TestStatusManagerSetFromDaemonSets(t *testing.T) {
// that we enter Progressing state but otherwise stay Available
dsA.Generation = 2
set(t, client, dsA)
status.SetConfigUpdated(true)
status.SetFromPods()

co, oc, err = getStatuses(client, "testing")
Expand Down Expand Up @@ -576,7 +571,6 @@ func TestStatusManagerSetFromDaemonSets(t *testing.T) {
ObservedGeneration: 2,
}
set(t, client, dsA)
status.SetConfigUpdated(true)
status.SetFromPods()

co, oc, err = getStatuses(client, "testing")
Expand Down Expand Up @@ -617,7 +611,6 @@ func TestStatusManagerSetFromDaemonSets(t *testing.T) {
ObservedGeneration: 2,
}
set(t, client, dsA)
status.SetConfigUpdated(true)
status.SetFromPods()

co, oc, err = getStatuses(client, "testing")
Expand Down Expand Up @@ -662,7 +655,6 @@ func TestStatusManagerSetFromDaemonSets(t *testing.T) {

t0 := time.Now()
time.Sleep(time.Second / 10)
status.SetConfigUpdated(true)
status.SetFromPods()

co, oc, err = getStatuses(client, "testing")
Expand Down Expand Up @@ -724,7 +716,6 @@ func TestStatusManagerSetFromDaemonSets(t *testing.T) {
}
}
setLastPodState(t, client, "testing", ps)
status.SetConfigUpdated(true)
status.SetFromPods()

co, oc, err = getStatuses(client, "testing")
Expand Down Expand Up @@ -776,7 +767,6 @@ func TestStatusManagerSetFromDaemonSets(t *testing.T) {
UpdatedNumberScheduled: 1,
}
set(t, client, dsA)
status.SetConfigUpdated(true)
status.SetFromPods()

// see that the pod state is sensible
Expand Down Expand Up @@ -834,7 +824,6 @@ func TestStatusManagerSetFromDaemonSets(t *testing.T) {
},
}
set(t, client, dsNC)
status.SetConfigUpdated(true)
status.SetFromPods()

// We should now be Progressing, but not un-Available
Expand Down Expand Up @@ -876,7 +865,6 @@ func TestStatusManagerSetFromDaemonSets(t *testing.T) {
}
}
setLastPodState(t, client, "testing", ps)
status.SetConfigUpdated(true)
status.SetFromPods()

co, oc, err = getStatuses(client, "testing")
Expand Down Expand Up @@ -913,7 +901,6 @@ func TestStatusManagerSetFromDaemonSets(t *testing.T) {
dsNC.Status.DesiredNumberScheduled = 1
dsNC.Status.UpdatedNumberScheduled = 1
set(t, client, dsNC)
status.SetConfigUpdated(true)
status.SetFromPods()

co, oc, err = getStatuses(client, "testing")
Expand Down Expand Up @@ -952,7 +939,6 @@ func TestStatusManagerSetFromDeployments(t *testing.T) {
no := &operv1.Network{ObjectMeta: metav1.ObjectMeta{Name: names.OPERATOR_CONFIG}}
set(t, client, no)

status.SetConfigUpdated(true)
status.SetFromPods()

co, oc, err := getStatuses(client, "testing")
Expand All @@ -978,7 +964,6 @@ func TestStatusManagerSetFromDeployments(t *testing.T) {
depA := &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Namespace: "one", Name: "alpha", Labels: sl}}
set(t, client, depA)

status.SetConfigUpdated(true)
status.SetFromPods()

co, oc, err = getStatuses(client, "testing")
Expand Down Expand Up @@ -1017,7 +1002,6 @@ func TestStatusManagerSetFromDeployments(t *testing.T) {
depA.Status.UnavailableReplicas = 0
depA.Status.AvailableReplicas = depA.Status.Replicas
set(t, client, depA)
status.SetConfigUpdated(true)
status.SetFromPods()

co, oc, err = getStatuses(client, "testing")
Expand Down Expand Up @@ -1074,7 +1058,6 @@ func TestStatusManagerSetFromDeployments(t *testing.T) {

t0 := time.Now()
time.Sleep(time.Second / 10)
status.SetConfigUpdated(true)
status.SetFromPods()

co, oc, err = getStatuses(client, "testing")
Expand Down Expand Up @@ -1141,7 +1124,6 @@ func TestStatusManagerSetFromDeployments(t *testing.T) {
depB.Status.AvailableReplicas = depB.Status.Replicas

set(t, client, depB)
status.SetConfigUpdated(true)
status.SetFromPods()

co, oc, err = getStatuses(client, "testing")
Expand Down Expand Up @@ -1183,7 +1165,6 @@ func TestStatusManagerSetFromDeployments(t *testing.T) {
}
}
setLastPodState(t, client, "testing", ps)
status.SetConfigUpdated(true)
status.SetFromPods()

co, oc, err = getStatuses(client, "testing")
Expand Down Expand Up @@ -1226,7 +1207,6 @@ func TestStatusManagerSetFromDeployments(t *testing.T) {
depB.Status.UnavailableReplicas = 0
depB.Status.AvailableReplicas = depB.Status.Replicas
set(t, client, depB)
status.SetConfigUpdated(true)
status.SetFromPods()

co, oc, err = getStatuses(client, "testing")
Expand Down Expand Up @@ -1422,7 +1402,6 @@ func TestStatusManagerCheckCrashLoopBackOffPods(t *testing.T) {
}
set(t, client, podnC)

status.SetConfigUpdated(true)
status.SetFromPods()

oc, err := getOC(client)
Expand Down Expand Up @@ -1490,7 +1469,6 @@ func TestStatusManagerCheckCrashLoopBackOffPods(t *testing.T) {
}
set(t, client, podC)

status.SetConfigUpdated(true)
status.SetFromPods()
oc, err = getOC(client)
if err != nil {
Expand Down Expand Up @@ -1568,7 +1546,6 @@ func TestStatusManagerHyperShift(t *testing.T) {
}}
set(t, mgmtClient, depHCP)

mgmtStatus.SetConfigUpdated(true)
mgmtStatus.SetFromPods()

// mgmt conditions should not reflect the failures in hosted clusters namespace
Expand Down Expand Up @@ -1597,107 +1574,3 @@ func TestStatusManagerHyperShift(t *testing.T) {
t.Fatalf("unexpected Status.Conditions: %#v", oc.Status.Conditions)
}
}

func TestStatusManagerSetConfigUpdated(t *testing.T) {
client := fake.NewFakeClient()
status := New(client, "testing", "")
setFakeListers(status)
no := &operv1.Network{ObjectMeta: metav1.ObjectMeta{Name: names.OPERATOR_CONFIG}}
set(t, client, no)

status.SetFromPods()
co, oc, err := getStatuses(client, "testing")
if err != nil {
t.Fatalf("error getting ClusterOperator: %v", err)
}
if !conditionsInclude(oc.Status.Conditions, []operv1.OperatorCondition{
{
Type: operv1.OperatorStatusTypeProgressing,
Status: operv1.ConditionTrue,
Reason: "Deploying",
},
}) {
t.Fatalf("unexpected Status.Conditions: %#v", oc.Status.Conditions)
}
if len(co.Status.Versions) > 0 {
t.Fatalf("Status.Versions unexpectedly already set: %#v", co.Status.Versions)
}

// Create minimal Deployment
depA := &appsv1.Deployment{
ObjectMeta: metav1.ObjectMeta{Namespace: "one", Name: "alpha", Labels: sl},
Status: appsv1.DeploymentStatus{
Replicas: 1,
UpdatedReplicas: 1,
AvailableReplicas: 1,
},
Spec: appsv1.DeploymentSpec{
Selector: &metav1.LabelSelector{
MatchLabels: map[string]string{"app": "gamma"},
},
},
}
set(t, client, depA)

// Even though the deployment is done the operator should not be Available if the config is not updated
status.SetConfigUpdated(false)
status.SetFromPods()

co, oc, err = getStatuses(client, "testing")
if err != nil {
t.Fatalf("error getting ClusterOperator: %v", err)
}
if !conditionsInclude(oc.Status.Conditions, []operv1.OperatorCondition{
{
Type: operv1.OperatorStatusTypeDegraded,
Status: operv1.ConditionFalse,
},
{
Type: operv1.OperatorStatusTypeProgressing,
Status: operv1.ConditionFalse,
},
{
Type: operv1.OperatorStatusTypeUpgradeable,
Status: operv1.ConditionTrue,
},
{
Type: operv1.OperatorStatusTypeAvailable,
Status: operv1.ConditionFalse,
Reason: "Startup",
},
}) {
t.Fatalf("unexpected Status.Conditions: %#v", oc.Status.Conditions)
}
if len(co.Status.Versions) > 0 {
t.Fatalf("Status.Versions unexpectedly already set: %#v", co.Status.Versions)
}

// Operator should be available when the deployment is done and the config is updated
status.SetConfigUpdated(true)
status.SetFromPods()

oc, err = getOC(client)
if err != nil {
t.Fatalf("error getting ClusterOperator: %v", err)
}
if !conditionsInclude(oc.Status.Conditions, []operv1.OperatorCondition{
{
Type: operv1.OperatorStatusTypeDegraded,
Status: operv1.ConditionFalse,
},
{
Type: operv1.OperatorStatusTypeProgressing,
Status: operv1.ConditionFalse,
},
{
Type: operv1.OperatorStatusTypeUpgradeable,
Status: operv1.ConditionTrue,
},
{
Type: operv1.OperatorStatusTypeAvailable,
Status: operv1.ConditionTrue,
},
}) {
t.Fatalf("unexpected Status.Conditions: %#v", oc.Status.Conditions)
}
}

0 comments on commit f68e72f

Please sign in to comment.