From c296a7cc82fb4a1507e1a32489e1d1c1cb270173 Mon Sep 17 00:00:00 2001 From: Patryk Diak Date: Wed, 24 May 2023 12:53:25 +0200 Subject: [PATCH] Revert "Do not set the operator as available before updating the network config" This reverts commit c63e98d2bbb4e26e5d79fc226307b04050841336. --- pkg/controller/operconfig/cluster.go | 2 - .../operconfig/operconfig_controller.go | 1 - pkg/controller/statusmanager/pod_status.go | 3 +- .../statusmanager/status_manager.go | 7 - .../statusmanager/status_manager_test.go | 127 ------------------ 5 files changed, 1 insertion(+), 139 deletions(-) diff --git a/pkg/controller/operconfig/cluster.go b/pkg/controller/operconfig/cluster.go index 540ddc6f53..d5d0a86f13 100644 --- a/pkg/controller/operconfig/cluster.go +++ b/pkg/controller/operconfig/cluster.go @@ -86,8 +86,6 @@ func (r *ReconcileOperConfig) ClusterNetworkStatus(ctx context.Context, operConf // Update the cluster config status status := network.StatusFromOperatorConfig(&operConfig.Spec, &clusterConfig.Status) if status == nil || reflect.DeepEqual(*status, clusterConfig.Status) { - // clusterConfig is already updated - r.status.SetConfigUpdated(true) return nil, nil } clusterConfig.Status = *status diff --git a/pkg/controller/operconfig/operconfig_controller.go b/pkg/controller/operconfig/operconfig_controller.go index b09487bb86..d840a58a36 100644 --- a/pkg/controller/operconfig/operconfig_controller.go +++ b/pkg/controller/operconfig/operconfig_controller.go @@ -470,7 +470,6 @@ func (r *ReconcileOperConfig) Reconcile(ctx context.Context, request reconcile.R fmt.Sprintf("Could not update cluster configuration status: %v", err)) return reconcile.Result{}, err } - r.status.SetConfigUpdated(true) } r.status.SetNotDegraded(statusmanager.OperatorConfig) diff --git a/pkg/controller/statusmanager/pod_status.go b/pkg/controller/statusmanager/pod_status.go index 13d18ec51b..c0717e52bb 100644 --- a/pkg/controller/statusmanager/pod_status.go +++ b/pkg/controller/statusmanager/pod_status.go @@ -262,8 +262,7 @@ func (status *StatusManager) SetFromPods() { status.unsetProgressing(PodDeployment) } - // set the operator as available only after it rolled out all resources and the operator config was updated - if reachedAvailableLevel && status.configUpdated { + if reachedAvailableLevel { status.set(reachedAvailableLevel, operv1.OperatorCondition{ Type: operv1.OperatorStatusTypeAvailable, Status: operv1.ConditionTrue}) diff --git a/pkg/controller/statusmanager/status_manager.go b/pkg/controller/statusmanager/status_manager.go index 489365e91b..03431ca5d4 100644 --- a/pkg/controller/statusmanager/status_manager.go +++ b/pkg/controller/statusmanager/status_manager.go @@ -87,7 +87,6 @@ type StatusManager struct { failing [maxStatusLevel]*operv1.OperatorCondition installComplete bool - configUpdated bool // All our informers and listers dsInformers map[string]cache.SharedIndexInformer @@ -534,12 +533,6 @@ func (status *StatusManager) SetNotDegraded(statusLevel StatusLevel) { status.setNotDegraded(statusLevel) } -func (status *StatusManager) SetConfigUpdated(updated bool) { - status.Lock() - defer status.Unlock() - status.configUpdated = updated -} - // syncProgressing syncs the current Progressing status func (status *StatusManager) syncProgressing() { for _, c := range status.failing { diff --git a/pkg/controller/statusmanager/status_manager_test.go b/pkg/controller/statusmanager/status_manager_test.go index bd2a79f65c..635d078836 100644 --- a/pkg/controller/statusmanager/status_manager_test.go +++ b/pkg/controller/statusmanager/status_manager_test.go @@ -339,7 +339,6 @@ func TestStatusManagerSetFromDaemonSets(t *testing.T) { no := &operv1.Network{ObjectMeta: metav1.ObjectMeta{Name: names.OPERATOR_CONFIG}} set(t, client, no) - status.SetConfigUpdated(true) status.SetFromPods() co, oc, err := getStatuses(client, "testing") if err != nil { @@ -377,7 +376,6 @@ func TestStatusManagerSetFromDaemonSets(t *testing.T) { }, } set(t, client, dsB) - status.SetConfigUpdated(true) status.SetFromPods() // Since the DaemonSet.Status reports no pods Available, the status should be Progressing @@ -434,7 +432,6 @@ func TestStatusManagerSetFromDaemonSets(t *testing.T) { for dsA.Status.NumberUnavailable > 0 || dsB.Status.NumberUnavailable > 0 { set(t, client, dsA) set(t, client, dsB) - status.SetConfigUpdated(true) status.SetFromPods() co, oc, err = getStatuses(client, "testing") @@ -489,7 +486,6 @@ func TestStatusManagerSetFromDaemonSets(t *testing.T) { set(t, client, dsA) set(t, client, dsB) time.Sleep(1 * time.Second) // minimum transition time fidelity - status.SetConfigUpdated(true) status.SetFromPods() co, oc, err = getStatuses(client, "testing") @@ -534,7 +530,6 @@ func TestStatusManagerSetFromDaemonSets(t *testing.T) { // that we enter Progressing state but otherwise stay Available dsA.Generation = 2 set(t, client, dsA) - status.SetConfigUpdated(true) status.SetFromPods() co, oc, err = getStatuses(client, "testing") @@ -576,7 +571,6 @@ func TestStatusManagerSetFromDaemonSets(t *testing.T) { ObservedGeneration: 2, } set(t, client, dsA) - status.SetConfigUpdated(true) status.SetFromPods() co, oc, err = getStatuses(client, "testing") @@ -617,7 +611,6 @@ func TestStatusManagerSetFromDaemonSets(t *testing.T) { ObservedGeneration: 2, } set(t, client, dsA) - status.SetConfigUpdated(true) status.SetFromPods() co, oc, err = getStatuses(client, "testing") @@ -662,7 +655,6 @@ func TestStatusManagerSetFromDaemonSets(t *testing.T) { t0 := time.Now() time.Sleep(time.Second / 10) - status.SetConfigUpdated(true) status.SetFromPods() co, oc, err = getStatuses(client, "testing") @@ -724,7 +716,6 @@ func TestStatusManagerSetFromDaemonSets(t *testing.T) { } } setLastPodState(t, client, "testing", ps) - status.SetConfigUpdated(true) status.SetFromPods() co, oc, err = getStatuses(client, "testing") @@ -776,7 +767,6 @@ func TestStatusManagerSetFromDaemonSets(t *testing.T) { UpdatedNumberScheduled: 1, } set(t, client, dsA) - status.SetConfigUpdated(true) status.SetFromPods() // see that the pod state is sensible @@ -834,7 +824,6 @@ func TestStatusManagerSetFromDaemonSets(t *testing.T) { }, } set(t, client, dsNC) - status.SetConfigUpdated(true) status.SetFromPods() // We should now be Progressing, but not un-Available @@ -876,7 +865,6 @@ func TestStatusManagerSetFromDaemonSets(t *testing.T) { } } setLastPodState(t, client, "testing", ps) - status.SetConfigUpdated(true) status.SetFromPods() co, oc, err = getStatuses(client, "testing") @@ -913,7 +901,6 @@ func TestStatusManagerSetFromDaemonSets(t *testing.T) { dsNC.Status.DesiredNumberScheduled = 1 dsNC.Status.UpdatedNumberScheduled = 1 set(t, client, dsNC) - status.SetConfigUpdated(true) status.SetFromPods() co, oc, err = getStatuses(client, "testing") @@ -952,7 +939,6 @@ func TestStatusManagerSetFromDeployments(t *testing.T) { no := &operv1.Network{ObjectMeta: metav1.ObjectMeta{Name: names.OPERATOR_CONFIG}} set(t, client, no) - status.SetConfigUpdated(true) status.SetFromPods() co, oc, err := getStatuses(client, "testing") @@ -978,7 +964,6 @@ func TestStatusManagerSetFromDeployments(t *testing.T) { depA := &appsv1.Deployment{ObjectMeta: metav1.ObjectMeta{Namespace: "one", Name: "alpha", Labels: sl}} set(t, client, depA) - status.SetConfigUpdated(true) status.SetFromPods() co, oc, err = getStatuses(client, "testing") @@ -1017,7 +1002,6 @@ func TestStatusManagerSetFromDeployments(t *testing.T) { depA.Status.UnavailableReplicas = 0 depA.Status.AvailableReplicas = depA.Status.Replicas set(t, client, depA) - status.SetConfigUpdated(true) status.SetFromPods() co, oc, err = getStatuses(client, "testing") @@ -1074,7 +1058,6 @@ func TestStatusManagerSetFromDeployments(t *testing.T) { t0 := time.Now() time.Sleep(time.Second / 10) - status.SetConfigUpdated(true) status.SetFromPods() co, oc, err = getStatuses(client, "testing") @@ -1141,7 +1124,6 @@ func TestStatusManagerSetFromDeployments(t *testing.T) { depB.Status.AvailableReplicas = depB.Status.Replicas set(t, client, depB) - status.SetConfigUpdated(true) status.SetFromPods() co, oc, err = getStatuses(client, "testing") @@ -1183,7 +1165,6 @@ func TestStatusManagerSetFromDeployments(t *testing.T) { } } setLastPodState(t, client, "testing", ps) - status.SetConfigUpdated(true) status.SetFromPods() co, oc, err = getStatuses(client, "testing") @@ -1226,7 +1207,6 @@ func TestStatusManagerSetFromDeployments(t *testing.T) { depB.Status.UnavailableReplicas = 0 depB.Status.AvailableReplicas = depB.Status.Replicas set(t, client, depB) - status.SetConfigUpdated(true) status.SetFromPods() co, oc, err = getStatuses(client, "testing") @@ -1422,7 +1402,6 @@ func TestStatusManagerCheckCrashLoopBackOffPods(t *testing.T) { } set(t, client, podnC) - status.SetConfigUpdated(true) status.SetFromPods() oc, err := getOC(client) @@ -1490,7 +1469,6 @@ func TestStatusManagerCheckCrashLoopBackOffPods(t *testing.T) { } set(t, client, podC) - status.SetConfigUpdated(true) status.SetFromPods() oc, err = getOC(client) if err != nil { @@ -1568,7 +1546,6 @@ func TestStatusManagerHyperShift(t *testing.T) { }} set(t, mgmtClient, depHCP) - mgmtStatus.SetConfigUpdated(true) mgmtStatus.SetFromPods() // mgmt conditions should not reflect the failures in hosted clusters namespace @@ -1597,107 +1574,3 @@ func TestStatusManagerHyperShift(t *testing.T) { t.Fatalf("unexpected Status.Conditions: %#v", oc.Status.Conditions) } } - -func TestStatusManagerSetConfigUpdated(t *testing.T) { - client := fake.NewFakeClient() - status := New(client, "testing", "") - setFakeListers(status) - no := &operv1.Network{ObjectMeta: metav1.ObjectMeta{Name: names.OPERATOR_CONFIG}} - set(t, client, no) - - status.SetFromPods() - co, oc, err := getStatuses(client, "testing") - if err != nil { - t.Fatalf("error getting ClusterOperator: %v", err) - } - if !conditionsInclude(oc.Status.Conditions, []operv1.OperatorCondition{ - { - Type: operv1.OperatorStatusTypeProgressing, - Status: operv1.ConditionTrue, - Reason: "Deploying", - }, - }) { - t.Fatalf("unexpected Status.Conditions: %#v", oc.Status.Conditions) - } - if len(co.Status.Versions) > 0 { - t.Fatalf("Status.Versions unexpectedly already set: %#v", co.Status.Versions) - } - - // Create minimal Deployment - depA := &appsv1.Deployment{ - ObjectMeta: metav1.ObjectMeta{Namespace: "one", Name: "alpha", Labels: sl}, - Status: appsv1.DeploymentStatus{ - Replicas: 1, - UpdatedReplicas: 1, - AvailableReplicas: 1, - }, - Spec: appsv1.DeploymentSpec{ - Selector: &metav1.LabelSelector{ - MatchLabels: map[string]string{"app": "gamma"}, - }, - }, - } - set(t, client, depA) - - // Even though the deployment is done the operator should not be Available if the config is not updated - status.SetConfigUpdated(false) - status.SetFromPods() - - co, oc, err = getStatuses(client, "testing") - if err != nil { - t.Fatalf("error getting ClusterOperator: %v", err) - } - if !conditionsInclude(oc.Status.Conditions, []operv1.OperatorCondition{ - { - Type: operv1.OperatorStatusTypeDegraded, - Status: operv1.ConditionFalse, - }, - { - Type: operv1.OperatorStatusTypeProgressing, - Status: operv1.ConditionFalse, - }, - { - Type: operv1.OperatorStatusTypeUpgradeable, - Status: operv1.ConditionTrue, - }, - { - Type: operv1.OperatorStatusTypeAvailable, - Status: operv1.ConditionFalse, - Reason: "Startup", - }, - }) { - t.Fatalf("unexpected Status.Conditions: %#v", oc.Status.Conditions) - } - if len(co.Status.Versions) > 0 { - t.Fatalf("Status.Versions unexpectedly already set: %#v", co.Status.Versions) - } - - // Operator should be available when the deployment is done and the config is updated - status.SetConfigUpdated(true) - status.SetFromPods() - - oc, err = getOC(client) - if err != nil { - t.Fatalf("error getting ClusterOperator: %v", err) - } - if !conditionsInclude(oc.Status.Conditions, []operv1.OperatorCondition{ - { - Type: operv1.OperatorStatusTypeDegraded, - Status: operv1.ConditionFalse, - }, - { - Type: operv1.OperatorStatusTypeProgressing, - Status: operv1.ConditionFalse, - }, - { - Type: operv1.OperatorStatusTypeUpgradeable, - Status: operv1.ConditionTrue, - }, - { - Type: operv1.OperatorStatusTypeAvailable, - Status: operv1.ConditionTrue, - }, - }) { - t.Fatalf("unexpected Status.Conditions: %#v", oc.Status.Conditions) - } -}