Skip to content

Commit

Permalink
Test: catch nil ptr exceptions, update CLF status when CL becomes valid
Browse files Browse the repository at this point in the history
Test openshift#2312
and openshift#2314

Signed-off-by: Andreas Karis <ak.karis@gmail.com>
  • Loading branch information
andreaskaris committed Feb 9, 2024
1 parent 5c3e8c5 commit 44408cd
Show file tree
Hide file tree
Showing 2 changed files with 126 additions and 0 deletions.
27 changes: 27 additions & 0 deletions test/client/clf.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,18 @@ package client

import (
"fmt"
"strings"

loggingv1 "github.com/openshift/cluster-logging-operator/apis/logging/v1"
"github.com/openshift/cluster-logging-operator/test"
v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/watch"
)

const (
validationFailureMsg = "is dependent on a ClusterLogging instance with a valid Collector configuration"
)

func ClusterLogForwarderReady(e watch.Event) (bool, error) {
clf := e.Object.(*loggingv1.ClusterLogForwarder)
cond := clf.Status.Conditions
Expand All @@ -20,3 +26,24 @@ func ClusterLogForwarderReady(e watch.Event) (bool, error) {
return false, nil
}
}

// ClusterLogForwarderValidationFailure expects condition type "Validation" to be set on the ClusterLogForwarder
// resource. If no such condition can be found, it returns false, and a nil error (so that c.WaitFor can wait until
// the condition is set, or time out if the condition is never set). If the condition is set, we expect its message
// to match validationFailureMsg and we expect it to be "True". We also expect the "Ready" condition to be "False".
// In that case, we return true and no error. In case of the contrary, we return false and an error.
func ClusterLogForwarderValidationFailure(e watch.Event) (bool, error) {
clf := e.Object.(*loggingv1.ClusterLogForwarder)
cond := clf.Status.Conditions

validationCondition := cond.GetCondition(loggingv1.ValidationCondition)
if validationCondition == nil {
return false, nil
}

if strings.Contains(validationCondition.Message, validationFailureMsg) &&
validationCondition.Status == v1.ConditionTrue && cond.IsFalseFor(loggingv1.ConditionReady) {
return true, nil
}
return false, fmt.Errorf("ClusterLogForwarder unexpected condition: %v", test.YAMLString(clf.Status))
}
99 changes: 99 additions & 0 deletions test/e2e/logforwarding/miscellaneous/forward_miscellaneous_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,99 @@
package miscellaneous

import (
"testing"
"time"

"github.com/openshift/cluster-logging-operator/test/helpers"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/util/retry"

"github.com/stretchr/testify/require"
"golang.org/x/sync/errgroup"

loggingv1 "github.com/openshift/cluster-logging-operator/apis/logging/v1"
"github.com/openshift/cluster-logging-operator/test/client"
"github.com/openshift/cluster-logging-operator/test/framework/e2e"
"github.com/openshift/cluster-logging-operator/test/runtime"
)

const miscellaneousReceiverName = "miscellaneous-receiver"

var spec = loggingv1.ClusterLogForwarderSpec{
Outputs: []loggingv1.OutputSpec{{
Name: miscellaneousReceiverName,
Type: loggingv1.OutputTypeLoki,
URL: "http://127.0.0.1:3100",
}},
Pipelines: []loggingv1.PipelineSpec{
{
Name: "test-app",
InputRefs: []string{loggingv1.InputNameApplication},
OutputRefs: []string{miscellaneousReceiverName},
Labels: map[string]string{"key1": "value1", "key2": "value2"},
},
{
Name: "test-audit",
InputRefs: []string{loggingv1.InputNameAudit},
OutputRefs: []string{miscellaneousReceiverName},
},
{
Name: "test-infra",
InputRefs: []string{loggingv1.InputNameInfrastructure},
OutputRefs: []string{miscellaneousReceiverName},
},
},
}

// TestLogForwardingWithEmptyCollection tests for issues https://github.com/openshift/cluster-logging-operator/issues/2312
// and https://github.com/openshift/cluster-logging-operator/issues/2314.
// It first creates a CL with cl.Spec.Collection set to nil. This would trigger a nil pointer exception without a
// fix in place.
// It then updates the CL to a valid status. Without a fix in place, the CLF's status would not update.
func TestLogForwardingWithEmptyCollection(t *testing.T) {
// First, make sure that the Operator can handle a nil cl.Spec.Collection.
// https://github.com/openshift/cluster-logging-operator/issues/2312
t.Log("TestLogForwardingWithEmptyCollection: Test handling an empty ClusterLogging Spec.Condition")
cl := runtime.NewClusterLogging()
cl.Spec.Collection = nil
clf := runtime.NewClusterLogForwarder()
clf.Spec = spec

c := client.ForTest(t)
framework := e2e.NewE2ETestFramework()
defer framework.Cleanup()
framework.AddCleanup(func() error { return c.Delete(cl) })
framework.AddCleanup(func() error { return c.Delete(clf) })
var g errgroup.Group
e2e.RecreateClClfAsync(&g, c, cl, clf)

// We now expect to see a validation error.
require.NoError(t, g.Wait())
require.NoError(t, c.WaitFor(clf, client.ClusterLogForwarderValidationFailure))
require.NoError(t, framework.WaitFor(helpers.ComponentTypeCollector))

// Now, make sure that the CLF's status updates to Ready when we update the CL resource to a valid status.
// https://github.com/openshift/cluster-logging-operator/issues/2314
t.Log("TestLogForwardingWithEmptyCollection: Make sure CLF updates when CL transitions to good state")
clSpec := &loggingv1.CollectionSpec{
Type: loggingv1.LogCollectionTypeVector,
CollectorSpec: loggingv1.CollectorSpec{},
}
retryErr := retry.RetryOnConflict(retry.DefaultRetry, func() error {
if err := c.Get(cl); err != nil {
return err
}
cl.Spec.Collection = clSpec
return c.Update(cl)
})
require.NoError(t, retryErr)
// WaitFor alone will return too early and return an error. Instead, make use of the K8s retry framework and retry
// up to 30 seconds.
retryErr = retry.OnError(
wait.Backoff{Steps: 10, Duration: 3 * time.Second, Factor: 1.0},
func(error) bool { return true },
func() error { t.Log("Retrieving CLF status"); return c.WaitFor(clf, client.ClusterLogForwarderReady) },
)
require.NoError(t, retryErr)
require.NoError(t, framework.WaitFor(helpers.ComponentTypeCollector))
}

0 comments on commit 44408cd

Please sign in to comment.