Skip to content

Commit

Permalink
Update ClusterLogForwarder status when ClusterLogging becomes valid
Browse files Browse the repository at this point in the history
After a valid ClusterLogForwarder and an invalid ClusterLogging resource
are created, the ClusterLogForwarder's status will show that the CLF is
not available. When the ClusterLogging CR is then changes to a valid
configuration, the CLF's status should reflect that everything is o.k.
now.

Signed-off-by: Andreas Karis <ak.karis@gmail.com>
  • Loading branch information
andreaskaris committed Feb 21, 2024
1 parent a2c2275 commit 1b70c34
Show file tree
Hide file tree
Showing 2 changed files with 41 additions and 3 deletions.
10 changes: 9 additions & 1 deletion internal/controller/clusterlogging/clusterlogging_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -108,10 +108,18 @@ func (r *ReconcileClusterLogging) Reconcile(ctx context.Context, request ctrl.Re
telemetry.Data.CLInfo.Set("managedStatus", constants.UnManagedStatus)
return ctrl.Result{}, nil
}
clf, err, _ := loader.FetchClusterLogForwarder(r.Client, request.NamespacedName.Namespace, request.NamespacedName.Name, false, func() loggingv1.ClusterLogging { return instance })
clf, err, cls := loader.FetchClusterLogForwarder(r.Client, request.NamespacedName.Namespace, request.NamespacedName.Name, false, func() loggingv1.ClusterLogging { return instance })
if err != nil && !errors.IsNotFound(err) {
return ctrl.Result{}, err
}
// Make sure that the ClusterForwarder status is updated as well, as we might migrate from an invalid to a valid
// configuration or vice versa.
clf.Status.Synchronize(cls)
defer func() {
if err := r.Client.Status().Update(ctx, &clf); err != nil {
log.Error(err, "Error while updating status for ClusterLoggingForwarder %s/%s", clf.Namespace, clf.Name)
}
}()

clusterVersion, clusterID := r.GetClusterVersionID(ctx, request.Namespace)
resourceNames := factory.GenerateResourceNames(clf)
Expand Down
34 changes: 32 additions & 2 deletions test/e2e/logforwarding/miscellaneous/forward_miscellaneous_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -3,8 +3,11 @@ package miscellaneous
import (
"fmt"
"testing"
"time"

"github.com/openshift/cluster-logging-operator/test/helpers"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/util/retry"

"github.com/stretchr/testify/require"
"golang.org/x/sync/errgroup"
Expand Down Expand Up @@ -43,9 +46,11 @@ var spec = loggingv1.ClusterLogForwarderSpec{
},
}

// TestLogForwardingWithEmptyCollection tests for issue https://github.com/openshift/cluster-logging-operator/issues/2312.
// It creates a CL with cl.Spec.Collection set to nil. This would trigger a nil pointer exception without a
// TestLogForwardingWithEmptyCollection tests for issues https://github.com/openshift/cluster-logging-operator/issues/2312
// and https://github.com/openshift/cluster-logging-operator/issues/2314.
// It first creates a CL with cl.Spec.Collection set to nil. This would trigger a nil pointer exception without a
// fix in place.
// It then updates the CL to a valid status. Without a fix in place, the CLF's status would not update.
func TestLogForwardingWithEmptyCollection(t *testing.T) {
// First, make sure that the Operator can handle a nil cl.Spec.Collection.
// https://github.com/openshift/cluster-logging-operator/issues/2312
Expand All @@ -67,6 +72,31 @@ func TestLogForwardingWithEmptyCollection(t *testing.T) {
require.NoError(t, g.Wait())
require.NoError(t, c.WaitFor(clf, client.ClusterLogForwarderValidationFailure))
require.NoError(t, framework.WaitFor(helpers.ComponentTypeCollector))

// Now, make sure that the CLF's status updates to Ready when we update the CL resource to a valid status.
// https://github.com/openshift/cluster-logging-operator/issues/2314
t.Log("TestLogForwardingWithEmptyCollection: Make sure CLF updates when CL transitions to good state")
clSpec := &loggingv1.CollectionSpec{
Type: loggingv1.LogCollectionTypeVector,
CollectorSpec: loggingv1.CollectorSpec{},
}
retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {
if err := c.Get(cl); err != nil {
return err
}
cl.Spec.Collection = clSpec
return c.Update(cl)
})
require.NoError(t, retryErr)
// WaitFor alone will return too early and return an error. Instead, make use of the K8s retry framework and retry
// up to 30 seconds.
retryErr = retry.OnError(
wait.Backoff{Steps: 10, Duration: 3 * time.Second, Factor: 1.0},
func(error) bool { return true },
func() error { t.Log("Retrieving CLF status"); return c.WaitFor(clf, client.ClusterLogForwarderReady) },
)
require.NoError(t, retryErr)
require.NoError(t, framework.WaitFor(helpers.ComponentTypeCollector))
}

// TestLogForwardingWithEmptyCollection tests for issue https://github.com/openshift/cluster-logging-operator/issues/2315.
Expand Down

0 comments on commit 1b70c34

Please sign in to comment.