From fb3c9452bb87be0b0d5a31b256651b935b401e24 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20Echterh=C3=B6lter?= Date: Thu, 3 Jul 2025 19:48:38 +0200 Subject: [PATCH 01/16] feat(multicluster): implement multicluster lifecycle management and reconcile logic # Conflicts: # controller/lifecycle/lifecycle.go # controller/testSupport/multicluster.go --- .../lifecycle/controllerruntime/lifecycle.go | 20 +- controller/lifecycle/lifecycle.go | 27 +- .../lifecycle/multicluster/lifecycle.go | 130 ++ .../lifecycle/multicluster/lifecycle_test.go | 1368 +++++++++++++++++ controller/testSupport/multicluster.go | 64 + go.mod | 29 +- go.sum | 46 +- 7 files changed, 1629 insertions(+), 55 deletions(-) create mode 100644 controller/lifecycle/multicluster/lifecycle.go create mode 100644 controller/lifecycle/multicluster/lifecycle_test.go create mode 100644 controller/testSupport/multicluster.go diff --git a/controller/lifecycle/controllerruntime/lifecycle.go b/controller/lifecycle/controllerruntime/lifecycle.go index 48cd752..2c71c16 100644 --- a/controller/lifecycle/controllerruntime/lifecycle.go +++ b/controller/lifecycle/controllerruntime/lifecycle.go @@ -73,27 +73,11 @@ func (l *LifecycleManager) Spreader() api.SpreadManager { } func (l *LifecycleManager) Reconcile(ctx context.Context, req ctrl.Request, instance runtimeobject.RuntimeObject) (ctrl.Result, error) { - return lifecycle.Reconcile(ctx, req, instance, l.client, l) -} - -func (l *LifecycleManager) validateInterfaces(instance runtimeobject.RuntimeObject, log *logger.Logger) error { - if l.Spreader() != nil { - _, err := l.Spreader().ToRuntimeObjectSpreadReconcileStatusInterface(instance, log) - if err != nil { - return err - } - } - if l.ConditionsManager() != nil { - _, err := l.ConditionsManager().ToRuntimeObjectConditionsInterface(instance, log) - if err != nil { - return err - } - } - return nil + return lifecycle.Reconcile(ctx, req.NamespacedName, instance, l.client, l) } func (l *LifecycleManager) SetupWithManagerBuilder(mgr ctrl.Manager, maxReconciles int, reconcilerName string, instance runtimeobject.RuntimeObject, debugLabelValue string, log *logger.Logger, eventPredicates ...predicate.Predicate) (*builder.Builder, error) { - if err := l.validateInterfaces(instance, log); err != nil { + if err := lifecycle.ValidateInterfaces(instance, log, l); err != nil { return nil, err } diff --git a/controller/lifecycle/lifecycle.go b/controller/lifecycle/lifecycle.go index 35667f7..9e70870 100644 --- a/controller/lifecycle/lifecycle.go +++ b/controller/lifecycle/lifecycle.go @@ -13,6 +13,7 @@ import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/apis/meta/v1/unstructured" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -26,21 +27,23 @@ import ( "github.com/platform-mesh/golang-commons/sentry" ) -func Reconcile(ctx context.Context, req ctrl.Request, instance runtimeobject.RuntimeObject, cl client.Client, l api.Lifecycle) (ctrl.Result, error) { +func Reconcile(ctx context.Context, nName types.NamespacedName, instance runtimeobject.RuntimeObject, cl client.Client, l Lifecycle) (ctrl.Result, error) { ctx, span := otel.Tracer(l.Config().OperatorName).Start(ctx, fmt.Sprintf("%s.Reconcile", l.Config().ControllerName)) defer span.End() result := ctrl.Result{} reconcileId := uuid.New().String() - log := l.Log().MustChildLoggerWithAttributes("name", req.Name, "namespace", req.Namespace, "reconcile_id", reconcileId) - sentryTags := sentry.Tags{"namespace": req.Namespace, "name": req.Name} + log := l.Log().MustChildLoggerWithAttributes("name", nName.Name, "namespace", nName.Namespace, "reconcile_id", reconcileId) + sentryTags := sentry.Tags{"namespace": nName.Namespace, "name": nName.Name} ctx = logger.SetLoggerInContext(ctx, log) ctx = sentry.ContextWithSentryTags(ctx, sentryTags) log.Info().Msg("start reconcile") - err := cl.Get(ctx, req.NamespacedName, instance) + + nn := types.NamespacedName{Namespace: nName.Namespace, Name: nName.Name} + err := cl.Get(ctx, nn, instance) if err != nil { if kerrors.IsNotFound(err) { log.Info().Msg("instance not found. It was likely deleted") @@ -363,3 +366,19 @@ func HandleOperatorError(ctx context.Context, operatorError errors.OperatorError return ctrl.Result{}, nil } + +func ValidateInterfaces(instance runtimeobject.RuntimeObject, log *logger.Logger, l Lifecycle) error { + if l.Spreader() != nil { + _, err := l.Spreader().ToRuntimeObjectSpreadReconcileStatusInterface(instance, log) + if err != nil { + return err + } + } + if l.ConditionsManager() != nil { + _, err := l.ConditionsManager().ToRuntimeObjectConditionsInterface(instance, log) + if err != nil { + return err + } + } + return nil +} diff --git a/controller/lifecycle/multicluster/lifecycle.go b/controller/lifecycle/multicluster/lifecycle.go new file mode 100644 index 0000000..4aadd3f --- /dev/null +++ b/controller/lifecycle/multicluster/lifecycle.go @@ -0,0 +1,130 @@ +package multicluster + +import ( + "context" + "fmt" + + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/cluster" + "sigs.k8s.io/controller-runtime/pkg/controller" + "sigs.k8s.io/controller-runtime/pkg/predicate" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + mcbuilder "sigs.k8s.io/multicluster-runtime/pkg/builder" + mcmanager "sigs.k8s.io/multicluster-runtime/pkg/manager" + mcreconcile "sigs.k8s.io/multicluster-runtime/pkg/reconcile" + + "github.com/platform-mesh/golang-commons/controller/filter" + "github.com/platform-mesh/golang-commons/controller/lifecycle" + "github.com/platform-mesh/golang-commons/controller/lifecycle/conditions" + "github.com/platform-mesh/golang-commons/controller/lifecycle/runtimeobject" + "github.com/platform-mesh/golang-commons/controller/lifecycle/spread" + "github.com/platform-mesh/golang-commons/controller/lifecycle/subroutine" + "github.com/platform-mesh/golang-commons/logger" +) + +type ClusterGetter interface { + GetCluster(ctx context.Context, clusterName string) (cluster.Cluster, error) +} + +type LifecycleManager struct { + log *logger.Logger + mgr ClusterGetter + config lifecycle.Config + subroutines []subroutine.Subroutine + spreader *spread.Spreader + conditionsManager *conditions.ConditionManager + prepareContextFunc lifecycle.PrepareContextFunc +} + +func NewLifecycleManager(log *logger.Logger, operatorName string, controllerName string, mgr ClusterGetter, subroutines []subroutine.Subroutine) *LifecycleManager { + log = log.MustChildLoggerWithAttributes("operator", operatorName, "controller", controllerName) + return &LifecycleManager{ + log: log, + mgr: mgr, + subroutines: subroutines, + config: lifecycle.Config{ + OperatorName: operatorName, + ControllerName: controllerName, + }, + } +} + +func (l *LifecycleManager) Config() lifecycle.Config { + return l.config +} +func (l *LifecycleManager) Log() *logger.Logger { + return l.log +} +func (l *LifecycleManager) Subroutines() []subroutine.Subroutine { + return l.subroutines +} +func (l *LifecycleManager) PrepareContextFunc() lifecycle.PrepareContextFunc { + return l.prepareContextFunc +} +func (l *LifecycleManager) ConditionsManager() *conditions.ConditionManager { + return l.conditionsManager +} +func (l *LifecycleManager) Spreader() *spread.Spreader { + return l.spreader +} +func (l *LifecycleManager) Reconcile(ctx context.Context, req mcreconcile.Request, instance runtimeobject.RuntimeObject) (ctrl.Result, error) { + cl, err := l.mgr.GetCluster(ctx, req.ClusterName) + if err != nil { + return reconcile.Result{}, fmt.Errorf("failed to get cluster: %w", err) + } + client := cl.GetClient() + return lifecycle.Reconcile(ctx, req.NamespacedName, instance, client, l) +} +func (l *LifecycleManager) SetupWithManagerBuilder(mgr mcmanager.Manager, maxReconciles int, reconcilerName string, instance runtimeobject.RuntimeObject, debugLabelValue string, log *logger.Logger, eventPredicates ...predicate.Predicate) (*mcbuilder.Builder, error) { + if err := lifecycle.ValidateInterfaces(instance, log, l); err != nil { + return nil, err + } + + if (l.ConditionsManager() != nil || l.Spreader() != nil) && l.Config().ReadOnly { + return nil, fmt.Errorf("cannot use conditions or spread reconciles in read-only mode") + } + + eventPredicates = append([]predicate.Predicate{filter.DebugResourcesBehaviourPredicate(debugLabelValue)}, eventPredicates...) + opts := controller.TypedOptions[mcreconcile.Request]{ + MaxConcurrentReconciles: maxReconciles, + } + return mcbuilder.ControllerManagedBy(mgr). + Named(reconcilerName). + For(instance). + WithOptions(opts). + WithEventFilter(predicate.And(eventPredicates...)), nil +} +func (l *LifecycleManager) SetupWithManager(mgr mcmanager.Manager, maxReconciles int, reconcilerName string, instance runtimeobject.RuntimeObject, debugLabelValue string, r mcreconcile.Reconciler, log *logger.Logger, eventPredicates ...predicate.Predicate) error { + b, err := l.SetupWithManagerBuilder(mgr, maxReconciles, reconcilerName, instance, debugLabelValue, log, eventPredicates...) + if err != nil { + return err + } + + return b.Complete(r) +} + +// WithPrepareContextFunc allows to set a function that prepares the context before each reconciliation +// This can be used to add additional information to the context that is needed by the subroutines +// You need to return a new context and an OperatorError in case of an error +func (l *LifecycleManager) WithPrepareContextFunc(prepareFunction lifecycle.PrepareContextFunc) *LifecycleManager { + l.prepareContextFunc = prepareFunction + return l +} + +// WithReadOnly allows to set the controller to read-only mode +// In read-only mode, the controller will not update the status of the instance +func (l *LifecycleManager) WithReadOnly() *LifecycleManager { + l.config.ReadOnly = true + return l +} + +// WithSpreadingReconciles sets the LifecycleManager to spread out the reconciles +func (l *LifecycleManager) WithSpreadingReconciles() *LifecycleManager { + l.spreader = spread.NewSpreader() + return l +} + +func (l *LifecycleManager) WithConditionManagement() *LifecycleManager { + l.conditionsManager = conditions.NewConditionManager() + return l +} diff --git a/controller/lifecycle/multicluster/lifecycle_test.go b/controller/lifecycle/multicluster/lifecycle_test.go new file mode 100644 index 0000000..b9ece2b --- /dev/null +++ b/controller/lifecycle/multicluster/lifecycle_test.go @@ -0,0 +1,1368 @@ +package multicluster + +import ( + "context" + goerrors "errors" + "fmt" + "testing" + "time" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + v1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + controllerruntime "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + mcreconcile "sigs.k8s.io/multicluster-runtime/pkg/reconcile" + + "github.com/platform-mesh/golang-commons/controller/lifecycle" + "github.com/platform-mesh/golang-commons/controller/lifecycle/conditions" + "github.com/platform-mesh/golang-commons/controller/lifecycle/runtimeobject" + "github.com/platform-mesh/golang-commons/controller/lifecycle/spread" + "github.com/platform-mesh/golang-commons/controller/lifecycle/subroutine" + pmtesting "github.com/platform-mesh/golang-commons/controller/lifecycle/testing" + "github.com/platform-mesh/golang-commons/controller/testSupport" + operrors "github.com/platform-mesh/golang-commons/errors" + "github.com/platform-mesh/golang-commons/logger/testlogger" + "github.com/platform-mesh/golang-commons/sentry" +) + +func TestLifecycle(t *testing.T) { + namespace := "bar" + name := "foo" + request := mcreconcile.Request{ + Request: controllerruntime.Request{ + NamespacedName: types.NamespacedName{ + Namespace: namespace, + Name: name, + }, + }, + } + testApiObject := &testSupport.TestApiObject{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + } + ctx := context.Background() + + t.Run("Lifecycle with a not found object", func(t *testing.T) { + // Arrange + fakeClient := testSupport.CreateFakeClient(t, &testSupport.TestApiObject{}) + + mgr, log := createLifecycleManager([]subroutine.Subroutine{}, fakeClient) + + // Act + result, err := mgr.Reconcile(ctx, request, &testSupport.TestApiObject{}) + + // Assert + assert.NoError(t, err) + assert.NotNil(t, result) + logMessages, err := log.GetLogMessages() + assert.NoError(t, err) + assert.Equal(t, len(logMessages), 2) + assert.Equal(t, logMessages[0].Message, "start reconcile") + assert.Contains(t, logMessages[1].Message, "instance not found") + }) + t.Run("Lifecycle with a finalizer - add finalizer", func(t *testing.T) { + // Arrange + instance := &testSupport.TestApiObject{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + } + + fakeClient := testSupport.CreateFakeClient(t, instance) + + mgr, _ := createLifecycleManager([]subroutine.Subroutine{ + pmtesting.FinalizerSubroutine{ + Client: fakeClient, + }, + }, fakeClient) + + // Act + _, err := mgr.Reconcile(ctx, request, instance) + + assert.NoError(t, err) + assert.Equal(t, 1, len(instance.Finalizers)) + }) + t.Run("Lifecycle with a finalizer - finalization", func(t *testing.T) { + // Arrange + now := &metav1.Time{Time: time.Now()} + finalizers := []string{pmtesting.SubroutineFinalizer} + instance := &testSupport.TestApiObject{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + DeletionTimestamp: now, + Finalizers: finalizers, + }, + } + + fakeClient := testSupport.CreateFakeClient(t, instance) + + mgr, _ := createLifecycleManager([]subroutine.Subroutine{ + pmtesting.FinalizerSubroutine{ + Client: fakeClient, + }, + }, fakeClient) + + // Act + _, err := mgr.Reconcile(ctx, request, instance) + + assert.NoError(t, err) + assert.Equal(t, 0, len(instance.Finalizers)) + }) + t.Run("Lifecycle with a finalizer - finalization(requeue)", func(t *testing.T) { + // Arrange + now := &metav1.Time{Time: time.Now()} + finalizers := []string{pmtesting.SubroutineFinalizer} + instance := &testSupport.TestApiObject{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + DeletionTimestamp: now, + Finalizers: finalizers, + }, + } + + fakeClient := testSupport.CreateFakeClient(t, instance) + + mgr, _ := createLifecycleManager([]subroutine.Subroutine{ + pmtesting.FinalizerSubroutine{ + Client: fakeClient, + RequeueAfter: 1 * time.Second, + }, + }, fakeClient) + + // Act + res, err := mgr.Reconcile(ctx, request, instance) + + assert.NoError(t, err) + assert.Equal(t, 1, len(instance.Finalizers)) + assert.Equal(t, time.Duration(1*time.Second), res.RequeueAfter) + }) + t.Run("Lifecycle with a finalizer - finalization(requeueAfter)", func(t *testing.T) { + // Arrange + now := &metav1.Time{Time: time.Now()} + finalizers := []string{pmtesting.SubroutineFinalizer} + instance := &testSupport.TestApiObject{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + DeletionTimestamp: now, + Finalizers: finalizers, + }, + } + + fakeClient := testSupport.CreateFakeClient(t, instance) + + mgr, _ := createLifecycleManager([]subroutine.Subroutine{ + pmtesting.FinalizerSubroutine{ + Client: fakeClient, + RequeueAfter: 2 * time.Second, + }, + }, fakeClient) + + // Act + res, err := mgr.Reconcile(ctx, request, instance) + + assert.NoError(t, err) + assert.Equal(t, 1, len(instance.Finalizers)) + assert.Equal(t, 2*time.Second, res.RequeueAfter) + }) + t.Run("Lifecycle with a finalizer - skip finalization if the finalizer is not in there", func(t *testing.T) { + // Arrange + now := &metav1.Time{Time: time.Now()} + finalizers := []string{"other-finalizer"} + instance := &testSupport.TestApiObject{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + DeletionTimestamp: now, + Finalizers: finalizers, + }, + } + + fakeClient := testSupport.CreateFakeClient(t, instance) + + mgr, _ := createLifecycleManager([]subroutine.Subroutine{ + pmtesting.FinalizerSubroutine{ + Client: fakeClient, + }, + }, fakeClient) + + // Act + _, err := mgr.Reconcile(ctx, request, instance) + + assert.NoError(t, err) + assert.Equal(t, 1, len(instance.Finalizers)) + }) + t.Run("Lifecycle with a finalizer - failing finalization subroutine", func(t *testing.T) { + // Arrange + now := &metav1.Time{Time: time.Now()} + finalizers := []string{pmtesting.SubroutineFinalizer} + instance := &testSupport.TestApiObject{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + DeletionTimestamp: now, + Finalizers: finalizers, + }, + } + + fakeClient := testSupport.CreateFakeClient(t, instance) + + mgr, _ := createLifecycleManager([]subroutine.Subroutine{ + pmtesting.FinalizerSubroutine{ + Client: fakeClient, + Err: fmt.Errorf("some error"), + }, + }, fakeClient) + + // Act + _, err := mgr.Reconcile(ctx, request, instance) + + assert.Error(t, err) + assert.Equal(t, 1, len(instance.Finalizers)) + }) + t.Run("Lifecycle without changing status", func(t *testing.T) { + // Arrange + instance := &testSupport.TestApiObject{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Status: testSupport.TestStatus{Some: "string"}, + } + fakeClient := testSupport.CreateFakeClient(t, instance) + + mgr, log := createLifecycleManager([]subroutine.Subroutine{}, fakeClient) + + // Act + result, err := mgr.Reconcile(ctx, request, instance) + + // Assert + assert.NoError(t, err) + assert.NotNil(t, result) + logMessages, err := log.GetLogMessages() + assert.NoError(t, err) + assert.Equal(t, len(logMessages), 3) + assert.Equal(t, logMessages[0].Message, "start reconcile") + assert.Equal(t, logMessages[1].Message, "skipping status update, since they are equal") + assert.Equal(t, logMessages[2].Message, "end reconcile") + }) + t.Run("Lifecycle with changing status", func(t *testing.T) { + // Arrange + instance := &testSupport.TestApiObject{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Status: testSupport.TestStatus{Some: "string"}, + } + + fakeClient := testSupport.CreateFakeClient(t, instance) + + mgr, log := createLifecycleManager([]subroutine.Subroutine{ + pmtesting.ChangeStatusSubroutine{ + Client: fakeClient, + }, + }, fakeClient) + + // Act + result, err := mgr.Reconcile(ctx, request, instance) + + // Assert + assert.NoError(t, err) + assert.NotNil(t, result) + logMessages, err := log.GetLogMessages() + assert.NoError(t, err) + assert.Equal(t, len(logMessages), 7) + assert.Equal(t, logMessages[0].Message, "start reconcile") + assert.Equal(t, logMessages[1].Message, "start subroutine") + assert.Equal(t, logMessages[2].Message, "processing instance") + assert.Equal(t, logMessages[3].Message, "processed instance") + assert.Equal(t, logMessages[4].Message, "end subroutine") + + serverObject := &testSupport.TestApiObject{} + err = fakeClient.Get(ctx, client.ObjectKey{Name: name, Namespace: namespace}, serverObject) + assert.NoError(t, err) + assert.Equal(t, serverObject.Status.Some, "other string") + }) + t.Run("Lifecycle with spread reconciles", func(t *testing.T) { + // Arrange + instance := &pmtesting.ImplementingSpreadReconciles{ + TestApiObject: testSupport.TestApiObject{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Generation: 1, + }, + Status: testSupport.TestStatus{ + Some: "string", + ObservedGeneration: 0, + }, + }, + } + + fakeClient := testSupport.CreateFakeClient(t, instance) + + mgr, _ := createLifecycleManager([]subroutine.Subroutine{ + pmtesting.ChangeStatusSubroutine{ + Client: fakeClient, + }, + }, fakeClient) + mgr.WithSpreadingReconciles() + + // Act + _, err := mgr.Reconcile(ctx, request, instance) + + assert.NoError(t, err) + assert.Equal(t, instance.Generation, instance.Status.ObservedGeneration) + }) + t.Run("Lifecycle with spread reconciles on deleted object", func(t *testing.T) { + // Arrange + instance := &pmtesting.ImplementingSpreadReconciles{ + TestApiObject: testSupport.TestApiObject{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Generation: 2, + DeletionTimestamp: &metav1.Time{Time: time.Now()}, + Finalizers: []string{pmtesting.ChangeStatusSubroutineFinalizer}, + }, + Status: testSupport.TestStatus{ + Some: "string", + ObservedGeneration: 2, + NextReconcileTime: metav1.Time{Time: time.Now().Add(2 * time.Hour)}, + }, + }, + } + + fakeClient := testSupport.CreateFakeClient(t, instance) + + mgr, _ := createLifecycleManager([]subroutine.Subroutine{ + pmtesting.ChangeStatusSubroutine{ + Client: fakeClient, + }, + }, fakeClient) + mgr.WithSpreadingReconciles() + + // Act + _, err := mgr.Reconcile(ctx, request, instance) + assert.NoError(t, err) + assert.Len(t, instance.Finalizers, 0) + + }) + t.Run("Lifecycle with spread reconciles skips if the generation is the same", func(t *testing.T) { + // Arrange + nextReconcileTime := metav1.NewTime(time.Now().Add(1 * time.Hour)) + instance := &pmtesting.ImplementingSpreadReconciles{ + TestApiObject: testSupport.TestApiObject{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Generation: 1, + }, + Status: testSupport.TestStatus{ + Some: "string", + ObservedGeneration: 1, + NextReconcileTime: nextReconcileTime, + }, + }, + } + + fakeClient := testSupport.CreateFakeClient(t, instance) + + mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.FailureScenarioSubroutine{RequeAfter: false}}, fakeClient) + mgr.WithSpreadingReconciles() + + // Act + result, err := mgr.Reconcile(ctx, request, instance) + + assert.NoError(t, err) + assert.Equal(t, int64(1), instance.Status.ObservedGeneration) + assert.GreaterOrEqual(t, 12*time.Hour, result.RequeueAfter) + }) + t.Run("Lifecycle with spread reconciles and processing fails (no-retry)", func(t *testing.T) { + // Arrange + instance := &pmtesting.ImplementingSpreadReconciles{ + TestApiObject: testSupport.TestApiObject{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Generation: 1, + }, + Status: testSupport.TestStatus{ + Some: "string", + ObservedGeneration: 0, + }, + }, + } + + fakeClient := testSupport.CreateFakeClient(t, instance) + + mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.FailureScenarioSubroutine{Retry: false, RequeAfter: false}}, fakeClient) + mgr.WithSpreadingReconciles() + + // Act + _, err := mgr.Reconcile(ctx, request, instance) + + assert.NoError(t, err) + assert.Equal(t, int64(1), instance.Status.ObservedGeneration) + }) + t.Run("Lifecycle with spread reconciles and processing fails (retry)", func(t *testing.T) { + // Arrange + instance := &pmtesting.ImplementingSpreadReconciles{ + TestApiObject: testSupport.TestApiObject{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Generation: 1, + }, + Status: testSupport.TestStatus{ + Some: "string", + ObservedGeneration: 0, + }, + }, + } + + fakeClient := testSupport.CreateFakeClient(t, instance) + + mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.FailureScenarioSubroutine{Retry: true, RequeAfter: false}}, fakeClient) + mgr.WithSpreadingReconciles() + + // Act + _, err := mgr.Reconcile(ctx, request, instance) + + assert.Error(t, err) + assert.Equal(t, int64(0), instance.Status.ObservedGeneration) + }) + t.Run("Lifecycle with spread reconciles and processing needs requeue", func(t *testing.T) { + // Arrange + instance := &pmtesting.ImplementingSpreadReconciles{ + TestApiObject: testSupport.TestApiObject{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Generation: 1, + }, + Status: testSupport.TestStatus{ + Some: "string", + ObservedGeneration: 0, + }, + }, + } + + fakeClient := testSupport.CreateFakeClient(t, instance) + + mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.FailureScenarioSubroutine{RequeAfter: true}}, fakeClient) + mgr.WithSpreadingReconciles() + + // Act + _, err := mgr.Reconcile(ctx, request, instance) + + assert.NoError(t, err) + assert.Equal(t, int64(0), instance.Status.ObservedGeneration) + }) + t.Run("Lifecycle with spread reconciles and processing needs requeueAfter", func(t *testing.T) { + // Arrange + instance := &pmtesting.ImplementingSpreadReconciles{ + TestApiObject: testSupport.TestApiObject{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Generation: 1, + }, + Status: testSupport.TestStatus{ + Some: "string", + ObservedGeneration: 0, + }, + }, + } + fakeClient := testSupport.CreateFakeClient(t, instance) + + mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.FailureScenarioSubroutine{RequeAfter: true}}, fakeClient) + mgr.WithSpreadingReconciles() + + // Act + _, err := mgr.Reconcile(ctx, request, instance) + + assert.NoError(t, err) + assert.Equal(t, int64(0), instance.Status.ObservedGeneration) + }) + t.Run("Lifecycle with spread not implementing the interface", func(t *testing.T) { + // Arrange + instance := &pmtesting.NotImplementingSpreadReconciles{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Generation: 1, + }, + Status: testSupport.TestStatus{ + Some: "string", + ObservedGeneration: 0, + }, + } + + fakeClient := testSupport.CreateFakeClient(t, instance) + + mgr, _ := createLifecycleManager([]subroutine.Subroutine{ + pmtesting.ChangeStatusSubroutine{ + Client: fakeClient, + }, + }, fakeClient) + mgr.WithSpreadingReconciles() + + // Act + assert.Panics(t, func() { + _, _ = mgr.Reconcile(ctx, request, instance) + }) + }) + + //t.Run("Should setup with manager", func(t *testing.T) { + // // Arrange + // instance := &testSupport.TestApiObject{} + // fakeClient := testSupport.CreateFakeClient(t, instance) + // log, err := logger.New(logger.DefaultConfig()) + // assert.NoError(t, err) + // provider, err := apiexport.New(&rest.Config{}, apiexport.Options{}) + // assert.NoError(t, err) + // m, err := mcmanager.New(&rest.Config{}, provider, mcmanager.Options{}) + // assert.NoError(t, err) + // + // lm, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.FailureScenarioSubroutine{RequeAfter: true}}, fakeClient) + // tr := &testReconciler{ + // lifecycleManager: lm, + // } + // + // // Act + // err = lm.SetupWithManager(m, 0, "testReconciler", instance, "test", tr, log) + // + // // Assert + // assert.NoError(t, err) + //}) + + //t.Run("Should setup with manager not implementing interface", func(t *testing.T) { + // // Arrange + // instance := &pmtesting.NotImplementingSpreadReconciles{} + // fakeClient := testSupport.CreateFakeClient(t, instance) + // log, err := logger.New(logger.DefaultConfig()) + // assert.NoError(t, err) + // m, err := manager.New(&rest.Config{}, manager.Options{ + // Scheme: fakeClient.Scheme(), + // }) + // assert.NoError(t, err) + // + // lm, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.FailureScenarioSubroutine{RequeAfter: true}}, fakeClient) + // lm.WithSpreadingReconciles() + // tr := &testReconciler{ + // lifecycleManager: lm, + // } + // + // // Act + // err = lm.SetupWithManager(m, 0, "testReconciler", instance, "test", tr, log) + // + // // Assert + // assert.Error(t, err) + //}) + + t.Run("Lifecycle with spread reconciles and refresh label", func(t *testing.T) { + // Arrange + instance := &pmtesting.ImplementingSpreadReconciles{ + TestApiObject: testSupport.TestApiObject{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Generation: 1, + Labels: map[string]string{spread.ReconcileRefreshLabel: "true"}, + }, + Status: testSupport.TestStatus{ + Some: "string", + ObservedGeneration: 1, + }, + }, + } + + fakeClient := testSupport.CreateFakeClient(t, instance) + + lm, _ := createLifecycleManager([]subroutine.Subroutine{ + pmtesting.ChangeStatusSubroutine{ + Client: fakeClient, + }, + }, fakeClient) + lm.WithSpreadingReconciles() + + // Act + _, err := lm.Reconcile(ctx, request, instance) + + assert.NoError(t, err) + assert.Equal(t, int64(1), instance.Status.ObservedGeneration) + + serverObject := &pmtesting.ImplementingSpreadReconciles{} + err = fakeClient.Get(ctx, client.ObjectKey{Name: name, Namespace: namespace}, serverObject) + assert.NoError(t, err) + assert.Equal(t, serverObject.Status.Some, "other string") + _, ok := serverObject.Labels[spread.ReconcileRefreshLabel] + assert.False(t, ok) + }) + + t.Run("Should handle a client error", func(t *testing.T) { + // Arrange + _, log := createLifecycleManager([]subroutine.Subroutine{}, nil) + testErr := fmt.Errorf("test error") + + // Act + result, err := lifecycle.HandleClientError("test", log.Logger, testErr, true, sentry.Tags{}) + + // Assert + assert.Error(t, err) + assert.Equal(t, testErr, err) + assert.Equal(t, controllerruntime.Result{}, result) + }) + + t.Run("Lifecycle with manage conditions reconciles w/o subroutines", func(t *testing.T) { + // Arrange + instance := &pmtesting.ImplementConditions{ + TestApiObject: testSupport.TestApiObject{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Generation: 1, + }, + Status: testSupport.TestStatus{}, + }, + } + + fakeClient := testSupport.CreateFakeClient(t, instance) + + mgr, _ := createLifecycleManager([]subroutine.Subroutine{}, fakeClient) + mgr.WithConditionManagement() + + // Act + _, err := mgr.Reconcile(ctx, request, instance) + + assert.NoError(t, err) + assert.Len(t, instance.Status.Conditions, 1) + assert.Equal(t, instance.Status.Conditions[0].Type, conditions.ConditionReady) + assert.Equal(t, instance.Status.Conditions[0].Status, metav1.ConditionTrue) + assert.Equal(t, instance.Status.Conditions[0].Message, "The resource is ready") + }) + + t.Run("Lifecycle with manage conditions reconciles with subroutine", func(t *testing.T) { + // Arrange + instance := &pmtesting.ImplementConditions{ + TestApiObject: testSupport.TestApiObject{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Generation: 1, + }, + Status: testSupport.TestStatus{}, + }, + } + + fakeClient := testSupport.CreateFakeClient(t, instance) + + mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.ChangeStatusSubroutine{ + Client: fakeClient, + }}, fakeClient) + mgr.WithConditionManagement() + + // Act + _, err := mgr.Reconcile(ctx, request, instance) + + assert.NoError(t, err) + require.Len(t, instance.Status.Conditions, 2) + assert.Equal(t, conditions.ConditionReady, instance.Status.Conditions[0].Type) + assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[0].Status) + assert.Equal(t, "The resource is ready", instance.Status.Conditions[0].Message) + assert.Equal(t, "changeStatus_Ready", instance.Status.Conditions[1].Type) + assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[1].Status) + assert.Equal(t, "The subroutine is complete", instance.Status.Conditions[1].Message) + }) + + t.Run("Lifecycle with manage conditions reconciles with subroutine that adds a condition", func(t *testing.T) { + // Arrange + instance := &pmtesting.ImplementConditions{ + TestApiObject: testSupport.TestApiObject{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Generation: 1, + }, + Status: testSupport.TestStatus{}, + }, + } + + fakeClient := testSupport.CreateFakeClient(t, instance) + + mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.AddConditionSubroutine{Ready: metav1.ConditionTrue}}, fakeClient) + mgr.WithConditionManagement() + + // Act + _, err := mgr.Reconcile(ctx, request, instance) + + assert.NoError(t, err) + require.Len(t, instance.Status.Conditions, 3) + assert.Equal(t, conditions.ConditionReady, instance.Status.Conditions[0].Type) + assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[0].Status) + assert.Equal(t, "The resource is ready", instance.Status.Conditions[0].Message) + assert.Equal(t, "addCondition_Ready", instance.Status.Conditions[1].Type) + assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[1].Status) + assert.Equal(t, "The subroutine is complete", instance.Status.Conditions[1].Message) + assert.Equal(t, "test", instance.Status.Conditions[2].Type) + assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[2].Status) + assert.Equal(t, "test", instance.Status.Conditions[2].Message) + + }) + + t.Run("Lifecycle with manage conditions reconciles with subroutine that adds a condition", func(t *testing.T) { + // Arrange + instance := &pmtesting.ImplementConditions{ + TestApiObject: testSupport.TestApiObject{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Generation: 1, + }, + Status: testSupport.TestStatus{}, + }, + } + + fakeClient := testSupport.CreateFakeClient(t, instance) + + mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.AddConditionSubroutine{Ready: metav1.ConditionTrue}}, fakeClient) + mgr.WithConditionManagement() + + // Act + _, err := mgr.Reconcile(ctx, request, instance) + + assert.NoError(t, err) + require.Len(t, instance.Status.Conditions, 3) + assert.Equal(t, conditions.ConditionReady, instance.Status.Conditions[0].Type) + assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[0].Status) + assert.Equal(t, "The resource is ready", instance.Status.Conditions[0].Message) + assert.Equal(t, "addCondition_Ready", instance.Status.Conditions[1].Type) + assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[1].Status) + assert.Equal(t, "The subroutine is complete", instance.Status.Conditions[1].Message) + assert.Equal(t, "test", instance.Status.Conditions[2].Type) + assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[2].Status) + assert.Equal(t, "test", instance.Status.Conditions[2].Message) + + }) + + t.Run("Lifecycle with manage conditions reconciles with subroutine that adds a condition with preexisting conditions (update)", func(t *testing.T) { + // Arrange + instance := &pmtesting.ImplementConditions{ + TestApiObject: testSupport.TestApiObject{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Generation: 1, + }, + Status: testSupport.TestStatus{ + Conditions: []metav1.Condition{ + { + Type: "test", + Status: metav1.ConditionFalse, + Reason: "test", + Message: "test", + }, + }, + }, + }, + } + + fakeClient := testSupport.CreateFakeClient(t, instance) + + mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.AddConditionSubroutine{Ready: metav1.ConditionTrue}}, fakeClient) + mgr.WithConditionManagement() + + // Act + _, err := mgr.Reconcile(ctx, request, instance) + + assert.NoError(t, err) + require.Len(t, instance.Status.Conditions, 3) + assert.Equal(t, "test", instance.Status.Conditions[0].Type) + assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[0].Status) + assert.Equal(t, "test", instance.Status.Conditions[0].Message) + assert.Equal(t, conditions.ConditionReady, instance.Status.Conditions[1].Type) + assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[1].Status) + assert.Equal(t, "The resource is ready", instance.Status.Conditions[1].Message) + assert.Equal(t, "addCondition_Ready", instance.Status.Conditions[2].Type) + assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[2].Status) + assert.Equal(t, "The subroutine is complete", instance.Status.Conditions[2].Message) + + }) + + t.Run("Lifecycle with manage conditions reconciles with subroutine that adds a condition with preexisting conditions", func(t *testing.T) { + // Arrange + instance := &pmtesting.ImplementConditions{ + TestApiObject: testSupport.TestApiObject{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Generation: 1, + }, + Status: testSupport.TestStatus{ + Conditions: []metav1.Condition{ + { + Type: conditions.ConditionReady, + Status: metav1.ConditionTrue, + Message: "The resource is ready!!", + Reason: conditions.ConditionReady, + }, + }, + }, + }, + } + + fakeClient := testSupport.CreateFakeClient(t, instance) + + mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.AddConditionSubroutine{Ready: metav1.ConditionTrue}}, fakeClient) + mgr.WithConditionManagement() + + // Act + _, err := mgr.Reconcile(ctx, request, instance) + + assert.NoError(t, err) + require.Len(t, instance.Status.Conditions, 3) + assert.Equal(t, conditions.ConditionReady, instance.Status.Conditions[0].Type) + assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[0].Status) + assert.Equal(t, "The resource is ready", instance.Status.Conditions[0].Message) + assert.Equal(t, "addCondition_Ready", instance.Status.Conditions[1].Type) + assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[1].Status) + assert.Equal(t, "The subroutine is complete", instance.Status.Conditions[1].Message) + assert.Equal(t, "test", instance.Status.Conditions[2].Type) + assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[2].Status) + assert.Equal(t, "test", instance.Status.Conditions[2].Message) + + }) + + t.Run("Lifecycle w/o manage conditions reconciles with subroutine that adds a condition", func(t *testing.T) { + // Arrange + instance := &pmtesting.ImplementConditions{ + TestApiObject: testSupport.TestApiObject{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Generation: 1, + }, + Status: testSupport.TestStatus{}, + }, + } + + fakeClient := testSupport.CreateFakeClient(t, instance) + + mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.AddConditionSubroutine{Ready: metav1.ConditionTrue}}, fakeClient) + + // Act + _, err := mgr.Reconcile(ctx, request, instance) + + assert.NoError(t, err) + require.Len(t, instance.Status.Conditions, 1) + assert.Equal(t, "test", instance.Status.Conditions[0].Type) + assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[0].Status) + assert.Equal(t, "test", instance.Status.Conditions[0].Message) + + }) + + t.Run("Lifecycle with manage conditions reconciles with subroutine failing Status update", func(t *testing.T) { + // Arrange + instance := &pmtesting.ImplementConditions{ + TestApiObject: testSupport.TestApiObject{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Generation: 1, + }, + Status: testSupport.TestStatus{}, + }, + } + + fakeClient := testSupport.CreateFakeClient(t, instance) + + mgr, _ := createLifecycleManager([]subroutine.Subroutine{ + pmtesting.ChangeStatusSubroutine{ + Client: fakeClient, + }}, fakeClient) + mgr.WithConditionManagement() + + // Act + _, err := mgr.Reconcile(ctx, request, instance) + + assert.NoError(t, err) + assert.Len(t, instance.Status.Conditions, 2) + assert.Equal(t, conditions.ConditionReady, instance.Status.Conditions[0].Type) + assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[0].Status) + assert.Equal(t, "The resource is ready", instance.Status.Conditions[0].Message) + assert.Equal(t, "changeStatus_Ready", instance.Status.Conditions[1].Type) + assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[1].Status) + assert.Equal(t, "The subroutine is complete", instance.Status.Conditions[1].Message) + }) + + t.Run("Lifecycle with manage conditions finalizes with multiple subroutines partially succeeding", func(t *testing.T) { + // Arrange + instance := &pmtesting.ImplementConditions{ + TestApiObject: testSupport.TestApiObject{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Generation: 1, + DeletionTimestamp: &metav1.Time{Time: time.Now()}, + Finalizers: []string{pmtesting.FailureScenarioSubroutineFinalizer, pmtesting.ChangeStatusSubroutineFinalizer}, + }, + Status: testSupport.TestStatus{}, + }, + } + + fakeClient := testSupport.CreateFakeClient(t, instance) + + mgr, _ := createLifecycleManager([]subroutine.Subroutine{ + pmtesting.FailureScenarioSubroutine{}, + pmtesting.ChangeStatusSubroutine{Client: fakeClient}}, fakeClient) + mgr.WithConditionManagement() + + // Act + _, err := mgr.Reconcile(ctx, request, instance) + + assert.Error(t, err) + require.Len(t, instance.Status.Conditions, 3) + assert.Equal(t, "changeStatus_Finalize", instance.Status.Conditions[0].Type, "") + assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[0].Status) + assert.Equal(t, "The subroutine finalization is complete", instance.Status.Conditions[0].Message) + assert.Equal(t, "FailureScenarioSubroutine_Finalize", instance.Status.Conditions[1].Type) + assert.Equal(t, metav1.ConditionFalse, instance.Status.Conditions[1].Status) + assert.Equal(t, "The subroutine finalization has an error: FailureScenarioSubroutine", instance.Status.Conditions[1].Message) + assert.Equal(t, conditions.ConditionReady, instance.Status.Conditions[2].Type) + assert.Equal(t, metav1.ConditionFalse, instance.Status.Conditions[2].Status) + assert.Equal(t, "The resource is not ready", instance.Status.Conditions[2].Message) + }) + + t.Run("Lifecycle with manage conditions reconciles with ReqeueAfter subroutine", func(t *testing.T) { + // Arrange + instance := &pmtesting.ImplementConditions{ + TestApiObject: testSupport.TestApiObject{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Generation: 1, + }, + Status: testSupport.TestStatus{}, + }, + } + + fakeClient := testSupport.CreateFakeClient(t, instance) + + mgr, _ := createLifecycleManager([]subroutine.Subroutine{ + pmtesting.FailureScenarioSubroutine{RequeAfter: true}}, fakeClient) + mgr.WithConditionManagement() + + // Act + _, err := mgr.Reconcile(ctx, request, instance) + + assert.NoError(t, err) + assert.Len(t, instance.Status.Conditions, 2) + assert.Equal(t, conditions.ConditionReady, instance.Status.Conditions[0].Type) + assert.Equal(t, metav1.ConditionFalse, instance.Status.Conditions[0].Status) + assert.Equal(t, "The resource is not ready", instance.Status.Conditions[0].Message) + assert.Equal(t, "FailureScenarioSubroutine_Ready", instance.Status.Conditions[1].Type) + assert.Equal(t, metav1.ConditionUnknown, instance.Status.Conditions[1].Status) + assert.Equal(t, "The subroutine is processing", instance.Status.Conditions[1].Message) + }) + + t.Run("Lifecycle with manage conditions reconciles with Error subroutine (no-retry)", func(t *testing.T) { + // Arrange + instance := &pmtesting.ImplementConditions{ + TestApiObject: testSupport.TestApiObject{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Generation: 1, + }, + Status: testSupport.TestStatus{}, + }, + } + + fakeClient := testSupport.CreateFakeClient(t, instance) + + mgr, _ := createLifecycleManager([]subroutine.Subroutine{ + pmtesting.FailureScenarioSubroutine{RequeAfter: false}}, fakeClient) + mgr.WithConditionManagement() + + // Act + _, err := mgr.Reconcile(ctx, request, instance) + + assert.NoError(t, err) + assert.Len(t, instance.Status.Conditions, 2) + assert.Equal(t, conditions.ConditionReady, instance.Status.Conditions[0].Type) + assert.Equal(t, metav1.ConditionFalse, instance.Status.Conditions[0].Status) + assert.Equal(t, "The resource is not ready", instance.Status.Conditions[0].Message) + assert.Equal(t, "FailureScenarioSubroutine_Ready", instance.Status.Conditions[1].Type) + assert.Equal(t, metav1.ConditionFalse, instance.Status.Conditions[1].Status) + assert.Equal(t, "The subroutine has an error: FailureScenarioSubroutine", instance.Status.Conditions[1].Message) + }) + + t.Run("Lifecycle with manage conditions reconciles with Error subroutine (retry)", func(t *testing.T) { + // Arrange + instance := &pmtesting.ImplementConditions{ + TestApiObject: testSupport.TestApiObject{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Generation: 1, + }, + Status: testSupport.TestStatus{}, + }, + } + + fakeClient := testSupport.CreateFakeClient(t, instance) + + mgr, _ := createLifecycleManager([]subroutine.Subroutine{ + pmtesting.FailureScenarioSubroutine{Retry: true, RequeAfter: false}}, fakeClient) + mgr.WithConditionManagement() + + // Act + _, err := mgr.Reconcile(ctx, request, instance) + + assert.Error(t, err) + assert.Len(t, instance.Status.Conditions, 2) + assert.Equal(t, conditions.ConditionReady, instance.Status.Conditions[0].Type) + assert.Equal(t, metav1.ConditionFalse, instance.Status.Conditions[0].Status) + assert.Equal(t, "The resource is not ready", instance.Status.Conditions[0].Message) + assert.Equal(t, "FailureScenarioSubroutine_Ready", instance.Status.Conditions[1].Type) + assert.Equal(t, metav1.ConditionFalse, instance.Status.Conditions[1].Status) + assert.Equal(t, "The subroutine has an error: FailureScenarioSubroutine", instance.Status.Conditions[1].Message) + }) + + t.Run("Lifecycle with manage conditions not implementing the interface", func(t *testing.T) { + // Arrange + instance := &pmtesting.NotImplementingSpreadReconciles{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Generation: 1, + }, + Status: testSupport.TestStatus{ + Some: "string", + ObservedGeneration: 0, + }, + } + + fakeClient := testSupport.CreateFakeClient(t, instance) + + mgr, _ := createLifecycleManager([]subroutine.Subroutine{ + pmtesting.ChangeStatusSubroutine{ + Client: fakeClient, + }, + }, fakeClient) + mgr.WithConditionManagement() + + // Act + // So the validation is already happening in SetupWithManager. So we can panic in the reconcile. + assert.Panics(t, func() { + _, _ = mgr.Reconcile(ctx, request, instance) + }) + }) + + t.Run("Lifecycle with manage conditions failing finalize", func(t *testing.T) { + // Arrange + instance := &pmtesting.ImplementConditions{ + TestApiObject: testSupport.TestApiObject{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Generation: 1, + Finalizers: []string{pmtesting.FailureScenarioSubroutineFinalizer}, + DeletionTimestamp: &metav1.Time{Time: time.Now()}, + }, + Status: testSupport.TestStatus{ + Some: "string", + ObservedGeneration: 0, + }, + }, + } + + fakeClient := testSupport.CreateFakeClient(t, instance) + + mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.FailureScenarioSubroutine{}}, fakeClient) + mgr.WithConditionManagement() + + // Act + _, err := mgr.Reconcile(ctx, request, instance) + + assert.Error(t, err) + assert.Equal(t, "FailureScenarioSubroutine", err.Error()) + }) + + t.Run("Lifecycle with spread reconciles and manage conditions and processing fails (retry)", func(t *testing.T) { + // Arrange + instance := &pmtesting.ImplementConditionsAndSpreadReconciles{ + TestApiObject: testSupport.TestApiObject{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Generation: 1, + }, + Status: testSupport.TestStatus{ + Some: "string", + ObservedGeneration: 0, + }, + }, + } + + fakeClient := testSupport.CreateFakeClient(t, instance) + + mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.FailureScenarioSubroutine{Retry: true, RequeAfter: false}}, fakeClient) + mgr.WithSpreadingReconciles() + mgr.WithConditionManagement() + + // Act + _, err := mgr.Reconcile(ctx, request, instance) + + assert.Error(t, err) + assert.Len(t, instance.Status.Conditions, 2) + assert.Equal(t, conditions.ConditionReady, instance.Status.Conditions[0].Type) + assert.Equal(t, string(v1.ConditionFalse), string(instance.Status.Conditions[0].Status)) + assert.Equal(t, "FailureScenarioSubroutine_Ready", instance.Status.Conditions[1].Type) + assert.Equal(t, string(v1.ConditionFalse), string(instance.Status.Conditions[1].Status)) + assert.Equal(t, int64(0), instance.Status.ObservedGeneration) + }) + + t.Run("Lifecycle with spread reconciles and manage conditions and processing fails (no-retry)", func(t *testing.T) { + // Arrange + instance := &pmtesting.ImplementConditionsAndSpreadReconciles{ + TestApiObject: testSupport.TestApiObject{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Generation: 1, + }, + Status: testSupport.TestStatus{ + Some: "string", + ObservedGeneration: 0, + }, + }, + } + + fakeClient := testSupport.CreateFakeClient(t, instance) + + mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.FailureScenarioSubroutine{RequeAfter: false}}, fakeClient) + mgr.WithSpreadingReconciles() + mgr.WithConditionManagement() + + // Act + _, err := mgr.Reconcile(ctx, request, instance) + + assert.NoError(t, err) + assert.Len(t, instance.Status.Conditions, 2) + assert.Equal(t, conditions.ConditionReady, instance.Status.Conditions[0].Type) + assert.Equal(t, string(v1.ConditionFalse), string(instance.Status.Conditions[0].Status)) + assert.Equal(t, "FailureScenarioSubroutine_Ready", instance.Status.Conditions[1].Type) + assert.Equal(t, string(v1.ConditionFalse), string(instance.Status.Conditions[1].Status)) + assert.Equal(t, int64(1), instance.Status.ObservedGeneration) + }) + + //t.Run("Test Lifecycle setupWithManager /w conditions and expecting no error", func(t *testing.T) { + // // Arrange + // instance := &pmtesting.ImplementConditions{} + // fakeClient := testSupport.CreateFakeClient(t, instance) + // + // m, err := manager.New(&rest.Config{}, manager.Options{Scheme: fakeClient.Scheme()}) + // assert.NoError(t, err) + // + // lm, log := createLifecycleManager([]subroutine.Subroutine{}, fakeClient) + // lm = lm.WithConditionManagement() + // tr := &testReconciler{lifecycleManager: lm} + // + // // Act + // err = lm.SetupWithManager(m, 0, "testReconciler1", instance, "test", tr, log.Logger) + // + // // Assert + // assert.NoError(t, err) + //}) + + //t.Run("Test Lifecycle setupWithManager /w conditions and expecting error", func(t *testing.T) { + // // Arrange + // instance := &pmtesting.NotImplementingSpreadReconciles{} + // fakeClient := testSupport.CreateFakeClient(t, instance) + // + // m, err := manager.New(&rest.Config{}, manager.Options{Scheme: fakeClient.Scheme()}) + // assert.NoError(t, err) + // + // lm, log := createLifecycleManager([]subroutine.Subroutine{}, fakeClient) + // lm = lm.WithConditionManagement() + // tr := &testReconciler{lifecycleManager: lm} + // + // // Act + // err = lm.SetupWithManager(m, 0, "testReconciler2", instance, "test", tr, log.Logger) + // + // // Assert + // assert.Error(t, err) + //}) + + //t.Run("Test Lifecycle setupWithManager /w spread and expecting no error", func(t *testing.T) { + // // Arrange + // instance := &pmtesting.ImplementingSpreadReconciles{} + // fakeClient := testSupport.CreateFakeClient(t, instance) + // + // m, err := manager.New(&rest.Config{}, manager.Options{Scheme: fakeClient.Scheme()}) + // assert.NoError(t, err) + // + // lm, log := createLifecycleManager([]subroutine.Subroutine{}, fakeClient) + // lm = lm.WithSpreadingReconciles() + // tr := &testReconciler{lifecycleManager: lm} + // + // // Act + // err = lm.SetupWithManager(m, 0, "testReconciler3", instance, "test", tr, log.Logger) + // + // // Assert + // assert.NoError(t, err) + //}) + + //t.Run("Test Lifecycle setupWithManager /w spread and expecting a error", func(t *testing.T) { + // // Arrange + // instance := &pmtesting.NotImplementingSpreadReconciles{} + // fakeClient := testSupport.CreateFakeClient(t, instance) + // + // m, err := manager.New(&rest.Config{}, manager.Options{Scheme: fakeClient.Scheme()}) + // assert.NoError(t, err) + // + // lm, log := createLifecycleManager([]subroutine.Subroutine{}, fakeClient) + // lm = lm.WithSpreadingReconciles() + // tr := &testReconciler{lifecycleManager: lm} + // + // // Act + // err = lm.SetupWithManager(m, 0, "testReconciler", instance, "test", tr, log.Logger) + // + // // Assert + // assert.Error(t, err) + //}) + + errorMessage := "oh nose" + t.Run("handleOperatorError", func(t *testing.T) { + t.Run("Should handle an operator error with retry and sentry", func(t *testing.T) { + // Arrange + instance := &pmtesting.ImplementConditions{} + fakeClient := testSupport.CreateFakeClient(t, instance) + + _, log := createLifecycleManager([]subroutine.Subroutine{}, fakeClient) + ctx = sentry.ContextWithSentryTags(ctx, map[string]string{}) + + // Act + result, err := lifecycle.HandleOperatorError(ctx, operrors.NewOperatorError(goerrors.New(errorMessage), true, true), "handle op error", true, log.Logger) + + // Assert + assert.Error(t, err) + assert.NotNil(t, result) + assert.Equal(t, errorMessage, err.Error()) + + errorMessages, err := log.GetErrorMessages() + assert.NoError(t, err) + assert.Equal(t, errorMessage, *errorMessages[0].Error) + }) + + t.Run("Should handle an operator error without retry", func(t *testing.T) { + // Arrange + instance := &pmtesting.ImplementConditions{} + fakeClient := testSupport.CreateFakeClient(t, instance) + + _, log := createLifecycleManager([]subroutine.Subroutine{}, fakeClient) + + // Act + result, err := lifecycle.HandleOperatorError(ctx, operrors.NewOperatorError(goerrors.New(errorMessage), false, false), "handle op error", true, log.Logger) + + // Assert + assert.Nil(t, err) + assert.NotNil(t, result) + + errorMessages, err := log.GetErrorMessages() + assert.NoError(t, err) + assert.Equal(t, errorMessage, *errorMessages[0].Error) + }) + }) + + t.Run("Prepare Context", func(t *testing.T) { + t.Run("Sets a context that can be used in the subroutine", func(t *testing.T) { + // Arrange + ctx := context.Background() + + fakeClient := testSupport.CreateFakeClient(t, testApiObject) + + lm, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.ContextValueSubroutine{}}, fakeClient) + lm = lm.WithPrepareContextFunc(func(ctx context.Context, instance runtimeobject.RuntimeObject) (context.Context, operrors.OperatorError) { + return context.WithValue(ctx, pmtesting.ContextValueKey, "valueFromContext"), nil + }) + tr := &testReconciler{lifecycleManager: lm} + req := mcreconcile.Request{ + Request: controllerruntime.Request{NamespacedName: types.NamespacedName{Name: name, Namespace: namespace}}, + } + result, err := tr.Reconcile(ctx, req) + + // Then + assert.NotNil(t, ctx) + assert.NotNil(t, result) + assert.NoError(t, err) + + err = fakeClient.Get(ctx, client.ObjectKey{Name: name, Namespace: namespace}, testApiObject) + assert.NoError(t, err) + assert.Equal(t, "valueFromContext", testApiObject.Status.Some) + }) + + t.Run("Handles the errors correctly", func(t *testing.T) { + // Arrange + ctx := context.Background() + + fakeClient := testSupport.CreateFakeClient(t, testApiObject) + + lm, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.ContextValueSubroutine{}}, fakeClient) + lm = lm.WithPrepareContextFunc(func(ctx context.Context, instance runtimeobject.RuntimeObject) (context.Context, operrors.OperatorError) { + return nil, operrors.NewOperatorError(goerrors.New(errorMessage), true, false) + }) + tr := &testReconciler{lifecycleManager: lm} + request := mcreconcile.Request{ + Request: controllerruntime.Request{NamespacedName: types.NamespacedName{Name: name, Namespace: namespace}}, + } + + result, err := tr.Reconcile(ctx, request) + + // Then + assert.NotNil(t, ctx) + assert.NotNil(t, result) + assert.Error(t, err) + }) + }) +} + +// Test LifecycleManager.WithConditionManagement +func TestLifecycleManager_WithConditionManagement(t *testing.T) { + // Given + fakeClient := testSupport.CreateFakeClient(t, &testSupport.TestApiObject{}) + clusterGetter := &pmtesting.FakeManager{Client: fakeClient} + _, log := createLifecycleManager([]subroutine.Subroutine{}, fakeClient) + + // When + l := NewLifecycleManager(log.Logger, "test-operator", "test-controller", clusterGetter, []subroutine.Subroutine{}).WithConditionManagement() + + // Then + assert.True(t, true, l.ConditionsManager() != nil) +} + +type testReconciler struct { + lifecycleManager *LifecycleManager +} + +func (r *testReconciler) Reconcile(ctx context.Context, req mcreconcile.Request) (controllerruntime.Result, error) { + return r.lifecycleManager.Reconcile(ctx, req, &testSupport.TestApiObject{}) +} + +func createLifecycleManager(subroutines []subroutine.Subroutine, client client.Client) (*LifecycleManager, *testlogger.TestLogger) { + log := testlogger.New() + clusterGetter := &pmtesting.FakeManager{Client: client} + m := NewLifecycleManager(log.Logger, "test-operator", "test-controller", clusterGetter, subroutines) + return m, log +} diff --git a/controller/testSupport/multicluster.go b/controller/testSupport/multicluster.go new file mode 100644 index 0000000..a9881ed --- /dev/null +++ b/controller/testSupport/multicluster.go @@ -0,0 +1,64 @@ +package testing + +import ( + "context" + "net/http" + + "k8s.io/apimachinery/pkg/api/meta" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/record" + "sigs.k8s.io/controller-runtime/pkg/cache" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/cluster" +) + +type FakeManager struct{ Client client.Client } + +func (f *FakeManager) GetCluster(ctx context.Context, clusterName string) (cluster.Cluster, error) { + return &FakeCluster{client: f.Client}, nil +} + +var _ cluster.Cluster = (*FakeCluster)(nil) + +type FakeCluster struct{ client client.Client } + +func (f FakeCluster) GetHTTPClient() *http.Client { + return nil +} + +func (f FakeCluster) GetConfig() *rest.Config { + return nil +} + +func (f FakeCluster) GetCache() cache.Cache { + return nil +} + +func (f FakeCluster) GetScheme() *runtime.Scheme { + return nil +} + +func (f FakeCluster) GetClient() client.Client { + return f.client +} + +func (f FakeCluster) GetFieldIndexer() client.FieldIndexer { + return nil +} + +func (f FakeCluster) GetEventRecorderFor(name string) record.EventRecorder { + return nil +} + +func (f FakeCluster) GetRESTMapper() meta.RESTMapper { + return nil +} + +func (f FakeCluster) GetAPIReader() client.Reader { + return nil +} + +func (f FakeCluster) Start(ctx context.Context) error { + return nil +} diff --git a/go.mod b/go.mod index 851accc..de27034 100644 --- a/go.mod +++ b/go.mod @@ -2,11 +2,19 @@ module github.com/platform-mesh/golang-commons go 1.24.3 +replace ( + k8s.io/api => k8s.io/api v0.32.3 + k8s.io/apimachinery => k8s.io/apimachinery v0.32.3 + k8s.io/client-go => k8s.io/client-go v0.32.3 + k8s.io/utils => k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 + sigs.k8s.io/controller-runtime => sigs.k8s.io/controller-runtime v0.20.4 +) + require ( github.com/99designs/gqlgen v0.17.76 github.com/getsentry/sentry-go v0.34.0 github.com/go-http-utils/headers v0.0.0-20181008091004-fed159eddc2a - github.com/go-jose/go-jose/v4 v4.1.1 + github.com/go-jose/go-jose/v4 v4.0.5 github.com/go-logr/logr v1.4.3 github.com/go-logr/zerologr v1.2.3 github.com/golang-jwt/jwt/v5 v5.2.2 @@ -24,20 +32,21 @@ require ( github.com/spf13/viper v1.20.1 github.com/stretchr/testify v1.10.0 github.com/vektah/gqlparser/v2 v2.5.30 - go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0 + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 go.opentelemetry.io/otel v1.37.0 - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0 go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.37.0 go.opentelemetry.io/otel/sdk v1.37.0 go.opentelemetry.io/proto/otlp v1.7.0 - golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b + golang.org/x/exp v0.0.0-20250506013437-ce4c2cf36ca6 golang.org/x/oauth2 v0.30.0 google.golang.org/grpc v1.73.0 - k8s.io/api v0.33.2 - k8s.io/apimachinery v0.33.2 - k8s.io/client-go v0.33.2 - k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 + k8s.io/api v0.33.0 + k8s.io/apimachinery v0.33.0 + k8s.io/client-go v0.33.0 + k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 sigs.k8s.io/controller-runtime v0.21.0 + sigs.k8s.io/multicluster-runtime v0.20.4-alpha.7 ) require ( @@ -45,7 +54,7 @@ require ( github.com/Yiling-J/theine-go v0.6.1 // indirect github.com/antlr4-go/antlr/v4 v4.13.1 // indirect github.com/beorn7/perks v1.0.1 // indirect - github.com/cenkalti/backoff/v5 v5.0.2 // indirect + github.com/cenkalti/backoff/v4 v4.3.0 // indirect github.com/cespare/xxhash/v2 v2.3.0 // indirect github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect @@ -62,10 +71,12 @@ require ( github.com/go-openapi/swag v0.23.0 // indirect github.com/go-viper/mapstructure/v2 v2.3.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect + github.com/golang/protobuf v1.5.4 // indirect github.com/google/btree v1.1.3 // indirect github.com/google/cel-go v0.25.0 // indirect github.com/google/gnostic-models v0.6.9 // indirect github.com/google/go-cmp v0.7.0 // indirect + github.com/google/gofuzz v1.2.0 // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 // indirect github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.2 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 // indirect diff --git a/go.sum b/go.sum index 8cfcd28..58d0566 100644 --- a/go.sum +++ b/go.sum @@ -19,12 +19,8 @@ github.com/antlr4-go/antlr/v4 v4.13.1/go.mod h1:GKmUxMtwp6ZgGwZSva4eWPC5mS6vUAmO github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= -github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= -github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= -github.com/cenkalti/backoff/v5 v5.0.2 h1:rIfFVxEf1QsI7E1ZHfp/B4DF/6QBAUhmgkxc0H7Zss8= -github.com/cenkalti/backoff/v5 v5.0.2/go.mod h1:rkhZdG3JZukswDf7f0cwqPNk4K0sa+F97BxZthm/crw= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= @@ -79,8 +75,8 @@ github.com/go-errors/errors v1.4.2 h1:J6MZopCL4uSllY1OfXM374weqZFFItUbrImctkmUxI github.com/go-errors/errors v1.4.2/go.mod h1:sIVyrIiJhuEF+Pj9Ebtd6P/rEYROXFi3BopGUQ5a5Og= github.com/go-http-utils/headers v0.0.0-20181008091004-fed159eddc2a h1:v6zMvHuY9yue4+QkG/HQ/W67wvtQmWJ4SDo9aK/GIno= github.com/go-http-utils/headers v0.0.0-20181008091004-fed159eddc2a/go.mod h1:I79BieaU4fxrw4LMXby6q5OS9XnoR9UIKLOzDFjUmuw= -github.com/go-jose/go-jose/v4 v4.1.1 h1:JYhSgy4mXXzAdF3nUx3ygx347LRXJRrpgyU3adRmkAI= -github.com/go-jose/go-jose/v4 v4.1.1/go.mod h1:BdsZGqgdO3b6tTc6LSE56wcDbMMLuPsw5d4ZD5f94kA= +github.com/go-jose/go-jose/v4 v4.0.5 h1:M6T8+mKZl/+fNNuFHvGIzDz7BTLQPIounk/b9dw3AaE= +github.com/go-jose/go-jose/v4 v4.0.5/go.mod h1:s3P1lRrkT8igV8D9OjyL4WRyHvjB6a4JSllnOrmmBOA= github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY= github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A= github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= @@ -317,22 +313,22 @@ github.com/zeebo/xxh3 v1.0.2 h1:xZmwmqxHZA8AI603jOQ0tMqmBr9lPeFwGg6d+xy9DC0= github.com/zeebo/xxh3 v1.0.2/go.mod h1:5NWz9Sef7zIDm2JHfFlcQvNekmcEl9ekUZQQKCYaDcA= go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA= go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0 h1:Hf9xI/XLML9ElpiHVDNwvqI0hIFlzV8dgIr35kV1kRU= -go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.62.0/go.mod h1:NfchwuyNoMcZ5MLHwPrODwUF1HWCXWrL31s8gSAdIKY= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 h1:sbiXRNDSWJOTobXh5HyQKjq6wUC5tNybqjIqDpAY4CU= +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0/go.mod h1:69uWxva0WgAA/4bu2Yy70SLDBwZXuQ6PbBpbsa5iZrQ= go.opentelemetry.io/otel v1.37.0 h1:9zhNfelUvx0KBfu/gb+ZgeAfAgtWrfHJZcAqFC228wQ= go.opentelemetry.io/otel v1.37.0/go.mod h1:ehE/umFRLnuLa/vSccNq9oS1ErUlkkK71gMcN34UG8I= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0 h1:Ahq7pZmv87yiyn3jeFz/LekZmPLLdKejuO3NcK9MssM= go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0/go.mod h1:MJTqhM0im3mRLw1i8uGHnCvUEeS7VwRyxlLC78PA18M= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0 h1:EtFWSnwW9hGObjkIdmlnWSydO+Qs8OwzfzXLUPg4xOc= -go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.37.0/go.mod h1:QjUEoiGCPkvFZ/MjK6ZZfNOS6mfVEVKYE99dFhuN2LI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0 h1:m639+BofXTvcY1q8CGs4ItwQarYtJPOWmVobfM1HpVI= +go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0/go.mod h1:LjReUci/F4BUyv+y4dwnq3h/26iNOeC3wAIqgvTIZVo= go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.37.0 h1:SNhVp/9q4Go/XHBkQ1/d5u9P/U+L1yaGPoi0x+mStaI= go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.37.0/go.mod h1:tx8OOlGH6R4kLV67YaYO44GFXloEjGPZuMjEkaaqIp4= go.opentelemetry.io/otel/metric v1.37.0 h1:mvwbQS5m0tbmqML4NqK+e3aDiO02vsf/WgbsdpcPoZE= go.opentelemetry.io/otel/metric v1.37.0/go.mod h1:04wGrZurHYKOc+RKeye86GwKiTb9FKm1WHtO+4EVr2E= go.opentelemetry.io/otel/sdk v1.37.0 h1:ItB0QUqnjesGRvNcmAcU0LyvkVyGJ2xftD29bWdDvKI= go.opentelemetry.io/otel/sdk v1.37.0/go.mod h1:VredYzxUvuo2q3WRcDnKDjbdvmO0sCzOvVAiY+yUkAg= -go.opentelemetry.io/otel/sdk/metric v1.37.0 h1:90lI228XrB9jCMuSdA0673aubgRobVZFhbjxHHspCPc= -go.opentelemetry.io/otel/sdk/metric v1.37.0/go.mod h1:cNen4ZWfiD37l5NhS+Keb5RXVWZWpRE+9WyVCpbo5ps= +go.opentelemetry.io/otel/sdk/metric v1.35.0 h1:1RriWBmCKgkeHEhM7a2uMjMUfP7MsOF5JpUCaEqEI9o= +go.opentelemetry.io/otel/sdk/metric v1.35.0/go.mod h1:is6XYCUMpcKi+ZsOvfluY5YstFnhW0BidkR+gL+qN+w= go.opentelemetry.io/otel/trace v1.37.0 h1:HLdcFNbRQBE2imdSEgm/kwqmQj1Or1l/7bW6mxVK7z4= go.opentelemetry.io/otel/trace v1.37.0/go.mod h1:TlgrlQ+PtQO5XFerSPUYG0JSgGyryXewPGyayAWSBS0= go.opentelemetry.io/proto/otlp v1.7.0 h1:jX1VolD6nHuFzOYso2E73H85i92Mv8JQYk0K9vz09os= @@ -355,8 +351,8 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM= golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= -golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b h1:M2rDM6z3Fhozi9O7NWsxAkg/yqS/lQJ6PmkyIV3YP+o= -golang.org/x/exp v0.0.0-20250620022241-b7579e27df2b/go.mod h1:3//PLf8L/X+8b4vuAfHzxeRUl04Adcb341+IGKfnqS8= +golang.org/x/exp v0.0.0-20250506013437-ce4c2cf36ca6 h1:y5zboxd6LQAqYIhHnB48p0ByQ/GnQx2BE33L8BOHQkI= +golang.org/x/exp v0.0.0-20250506013437-ce4c2cf36ca6/go.mod h1:U6Lno4MTRCDY+Ba7aCcauB9T60gsv5s4ralQzP72ZoQ= golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU= golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= @@ -457,20 +453,20 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/api v0.33.2 h1:YgwIS5jKfA+BZg//OQhkJNIfie/kmRsO0BmNaVSimvY= -k8s.io/api v0.33.2/go.mod h1:fhrbphQJSM2cXzCWgqU29xLDuks4mu7ti9vveEnpSXs= +k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls= +k8s.io/api v0.32.3/go.mod h1:2wEDTXADtm/HA7CCMD8D8bK4yuBUptzaRhYcYEEYA3k= k8s.io/apiextensions-apiserver v0.33.0 h1:d2qpYL7Mngbsc1taA4IjJPRJ9ilnsXIrndH+r9IimOs= k8s.io/apiextensions-apiserver v0.33.0/go.mod h1:VeJ8u9dEEN+tbETo+lFkwaaZPg6uFKLGj5vyNEwwSzc= -k8s.io/apimachinery v0.33.2 h1:IHFVhqg59mb8PJWTLi8m1mAoepkUNYmptHsV+Z1m5jY= -k8s.io/apimachinery v0.33.2/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= -k8s.io/client-go v0.33.2 h1:z8CIcc0P581x/J1ZYf4CNzRKxRvQAwoAolYPbtQes+E= -k8s.io/client-go v0.33.2/go.mod h1:9mCgT4wROvL948w6f6ArJNb7yQd7QsvqavDeZHvNmHo= +k8s.io/apimachinery v0.32.3 h1:JmDuDarhDmA/Li7j3aPrwhpNBA94Nvk5zLeOge9HH1U= +k8s.io/apimachinery v0.32.3/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= +k8s.io/client-go v0.32.3 h1:RKPVltzopkSgHS7aS98QdscAgtgah/+zmpAogooIqVU= +k8s.io/client-go v0.32.3/go.mod h1:3v0+3k4IcT9bXTc4V2rt+d2ZPPG700Xy6Oi0Gdl2PaY= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4= k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8= -k8s.io/utils v0.0.0-20250604170112-4c0f3b243397 h1:hwvWFiBzdWw1FhfY1FooPn3kzWuJ8tmbZBHi4zVsl1Y= -k8s.io/utils v0.0.0-20250604170112-4c0f3b243397/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= +k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= modernc.org/libc v1.65.10 h1:ZwEk8+jhW7qBjHIT+wd0d9VjitRyQef9BnzlzGwMODc= modernc.org/libc v1.65.10/go.mod h1:StFvYpx7i/mXtBAfVOjaU0PWZOvIRoZSgXhrwXzr8Po= modernc.org/mathutil v1.7.1 h1:GCZVGXdaN8gTqB1Mf/usp1Y/hSqgI2vAGGP4jZMCxOU= @@ -479,10 +475,12 @@ modernc.org/memory v1.11.0 h1:o4QC8aMQzmcwCK3t3Ux/ZHmwFPzE6hf2Y5LbkRs+hbI= modernc.org/memory v1.11.0/go.mod h1:/JP4VbVC+K5sU2wZi9bHoq2MAkCnrt2r98UGeSK7Mjw= modernc.org/sqlite v1.38.0 h1:+4OrfPQ8pxHKuWG4md1JpR/EYAh3Md7TdejuuzE7EUI= modernc.org/sqlite v1.38.0/go.mod h1:1Bj+yES4SVvBZ4cBOpVZ6QgesMCKpJZDq0nxYzOpmNE= -sigs.k8s.io/controller-runtime v0.21.0 h1:CYfjpEuicjUecRk+KAeyYh+ouUBn4llGyDYytIGcJS8= -sigs.k8s.io/controller-runtime v0.21.0/go.mod h1:OSg14+F65eWqIu4DceX7k/+QRAbTTvxeQSNSOQpukWM= +sigs.k8s.io/controller-runtime v0.20.4 h1:X3c+Odnxz+iPTRobG4tp092+CvBU9UK0t/bRf+n0DGU= +sigs.k8s.io/controller-runtime v0.20.4/go.mod h1:xg2XB0K5ShQzAgsoujxuKN4LNXR2LfwwHsPj7Iaw+XY= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= +sigs.k8s.io/multicluster-runtime v0.20.4-alpha.7 h1:AFlM/TFQaESxtCRX6scodEKensLhcbfGwXfjJIvoaT8= +sigs.k8s.io/multicluster-runtime v0.20.4-alpha.7/go.mod h1:2N2/c3p08bYC9eDaRs0dllTxgAm5xiLDSkmGZpWKyw4= sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= From 8c656d1fc7d70add210cd56e7402a482fe167346 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20Echterh=C3=B6lter?= Date: Sat, 5 Jul 2025 11:07:53 +0200 Subject: [PATCH 02/16] refactor: adjust to main changes --- controller/lifecycle/lifecycle.go | 4 ++-- .../lifecycle/multicluster/lifecycle.go | 24 ++++++++++++------- controller/testSupport/multicluster.go | 2 +- 3 files changed, 19 insertions(+), 11 deletions(-) diff --git a/controller/lifecycle/lifecycle.go b/controller/lifecycle/lifecycle.go index 9e70870..5ff7304 100644 --- a/controller/lifecycle/lifecycle.go +++ b/controller/lifecycle/lifecycle.go @@ -27,7 +27,7 @@ import ( "github.com/platform-mesh/golang-commons/sentry" ) -func Reconcile(ctx context.Context, nName types.NamespacedName, instance runtimeobject.RuntimeObject, cl client.Client, l Lifecycle) (ctrl.Result, error) { +func Reconcile(ctx context.Context, nName types.NamespacedName, instance runtimeobject.RuntimeObject, cl client.Client, l api.Lifecycle) (ctrl.Result, error) { ctx, span := otel.Tracer(l.Config().OperatorName).Start(ctx, fmt.Sprintf("%s.Reconcile", l.Config().ControllerName)) defer span.End() @@ -367,7 +367,7 @@ func HandleOperatorError(ctx context.Context, operatorError errors.OperatorError return ctrl.Result{}, nil } -func ValidateInterfaces(instance runtimeobject.RuntimeObject, log *logger.Logger, l Lifecycle) error { +func ValidateInterfaces(instance runtimeobject.RuntimeObject, log *logger.Logger, l api.Lifecycle) error { if l.Spreader() != nil { _, err := l.Spreader().ToRuntimeObjectSpreadReconcileStatusInterface(instance, log) if err != nil { diff --git a/controller/lifecycle/multicluster/lifecycle.go b/controller/lifecycle/multicluster/lifecycle.go index 4aadd3f..bbf6638 100644 --- a/controller/lifecycle/multicluster/lifecycle.go +++ b/controller/lifecycle/multicluster/lifecycle.go @@ -15,6 +15,7 @@ import ( "github.com/platform-mesh/golang-commons/controller/filter" "github.com/platform-mesh/golang-commons/controller/lifecycle" + "github.com/platform-mesh/golang-commons/controller/lifecycle/api" "github.com/platform-mesh/golang-commons/controller/lifecycle/conditions" "github.com/platform-mesh/golang-commons/controller/lifecycle/runtimeobject" "github.com/platform-mesh/golang-commons/controller/lifecycle/spread" @@ -29,11 +30,11 @@ type ClusterGetter interface { type LifecycleManager struct { log *logger.Logger mgr ClusterGetter - config lifecycle.Config + config api.Config subroutines []subroutine.Subroutine spreader *spread.Spreader conditionsManager *conditions.ConditionManager - prepareContextFunc lifecycle.PrepareContextFunc + prepareContextFunc api.PrepareContextFunc } func NewLifecycleManager(log *logger.Logger, operatorName string, controllerName string, mgr ClusterGetter, subroutines []subroutine.Subroutine) *LifecycleManager { @@ -42,14 +43,14 @@ func NewLifecycleManager(log *logger.Logger, operatorName string, controllerName log: log, mgr: mgr, subroutines: subroutines, - config: lifecycle.Config{ + config: api.Config{ OperatorName: operatorName, ControllerName: controllerName, }, } } -func (l *LifecycleManager) Config() lifecycle.Config { +func (l *LifecycleManager) Config() api.Config { return l.config } func (l *LifecycleManager) Log() *logger.Logger { @@ -58,13 +59,20 @@ func (l *LifecycleManager) Log() *logger.Logger { func (l *LifecycleManager) Subroutines() []subroutine.Subroutine { return l.subroutines } -func (l *LifecycleManager) PrepareContextFunc() lifecycle.PrepareContextFunc { +func (l *LifecycleManager) PrepareContextFunc() api.PrepareContextFunc { return l.prepareContextFunc } -func (l *LifecycleManager) ConditionsManager() *conditions.ConditionManager { +func (l *LifecycleManager) ConditionsManager() api.ConditionManager { + // it is important to return nil unsted of a nil pointer to the interface to avoid misbehaving nil checks + if l.conditionsManager == nil { + return nil + } return l.conditionsManager } -func (l *LifecycleManager) Spreader() *spread.Spreader { +func (l *LifecycleManager) Spreader() api.SpreadManager { // it is important to return nil unsted of a nil pointer to the interface to avoid misbehaving nil checks + if l.spreader == nil { + return nil + } return l.spreader } func (l *LifecycleManager) Reconcile(ctx context.Context, req mcreconcile.Request, instance runtimeobject.RuntimeObject) (ctrl.Result, error) { @@ -106,7 +114,7 @@ func (l *LifecycleManager) SetupWithManager(mgr mcmanager.Manager, maxReconciles // WithPrepareContextFunc allows to set a function that prepares the context before each reconciliation // This can be used to add additional information to the context that is needed by the subroutines // You need to return a new context and an OperatorError in case of an error -func (l *LifecycleManager) WithPrepareContextFunc(prepareFunction lifecycle.PrepareContextFunc) *LifecycleManager { +func (l *LifecycleManager) WithPrepareContextFunc(prepareFunction api.PrepareContextFunc) *LifecycleManager { l.prepareContextFunc = prepareFunction return l } diff --git a/controller/testSupport/multicluster.go b/controller/testSupport/multicluster.go index a9881ed..1fc7fa0 100644 --- a/controller/testSupport/multicluster.go +++ b/controller/testSupport/multicluster.go @@ -1,4 +1,4 @@ -package testing +package testSupport import ( "context" From c0ce1ca3badea4daa6be6b92cb8351ecfb355f16 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20Echterh=C3=B6lter?= Date: Sat, 5 Jul 2025 11:08:23 +0200 Subject: [PATCH 03/16] refactor: adjust to main changes --- controller/testSupport/multicluster.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/controller/testSupport/multicluster.go b/controller/testSupport/multicluster.go index 1fc7fa0..3b5494b 100644 --- a/controller/testSupport/multicluster.go +++ b/controller/testSupport/multicluster.go @@ -15,7 +15,7 @@ import ( type FakeManager struct{ Client client.Client } -func (f *FakeManager) GetCluster(ctx context.Context, clusterName string) (cluster.Cluster, error) { +func (f *FakeManager) GetCluster(context.Context, string) (cluster.Cluster, error) { return &FakeCluster{client: f.Client}, nil } @@ -47,7 +47,7 @@ func (f FakeCluster) GetFieldIndexer() client.FieldIndexer { return nil } -func (f FakeCluster) GetEventRecorderFor(name string) record.EventRecorder { +func (f FakeCluster) GetEventRecorderFor(string) record.EventRecorder { return nil } @@ -59,6 +59,6 @@ func (f FakeCluster) GetAPIReader() client.Reader { return nil } -func (f FakeCluster) Start(ctx context.Context) error { +func (f FakeCluster) Start(context.Context) error { return nil } From 0108706fd22b7236ce7f30bb203c6e5032b75020 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20Echterh=C3=B6lter?= Date: Sat, 5 Jul 2025 11:15:51 +0200 Subject: [PATCH 04/16] refactor: lint warnings --- controller/lifecycle/lifecycle_test.go | 2 +- .../lifecycle/multicluster/lifecycle_test.go | 219 +++++++++--------- 2 files changed, 110 insertions(+), 111 deletions(-) diff --git a/controller/lifecycle/lifecycle_test.go b/controller/lifecycle/lifecycle_test.go index 31ae5bb..6e8a779 100644 --- a/controller/lifecycle/lifecycle_test.go +++ b/controller/lifecycle/lifecycle_test.go @@ -43,7 +43,7 @@ func TestLifecycle(t *testing.T) { mgr := pmtesting.TestLifecycleManager{Logger: log} // Act - result, err := Reconcile(ctx, request, &pmtesting.TestApiObject{}, fakeClient, mgr) + result, err := Reconcile(ctx, request.NamespacedName, &pmtesting.TestApiObject{}, fakeClient, mgr) // Assert assert.NoError(t, err) diff --git a/controller/lifecycle/multicluster/lifecycle_test.go b/controller/lifecycle/multicluster/lifecycle_test.go index b9ece2b..e713455 100644 --- a/controller/lifecycle/multicluster/lifecycle_test.go +++ b/controller/lifecycle/multicluster/lifecycle_test.go @@ -21,8 +21,7 @@ import ( "github.com/platform-mesh/golang-commons/controller/lifecycle/runtimeobject" "github.com/platform-mesh/golang-commons/controller/lifecycle/spread" "github.com/platform-mesh/golang-commons/controller/lifecycle/subroutine" - pmtesting "github.com/platform-mesh/golang-commons/controller/lifecycle/testing" - "github.com/platform-mesh/golang-commons/controller/testSupport" + pmtesting "github.com/platform-mesh/golang-commons/controller/testSupport" operrors "github.com/platform-mesh/golang-commons/errors" "github.com/platform-mesh/golang-commons/logger/testlogger" "github.com/platform-mesh/golang-commons/sentry" @@ -39,7 +38,7 @@ func TestLifecycle(t *testing.T) { }, }, } - testApiObject := &testSupport.TestApiObject{ + testApiObject := &pmtesting.TestApiObject{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, @@ -49,12 +48,12 @@ func TestLifecycle(t *testing.T) { t.Run("Lifecycle with a not found object", func(t *testing.T) { // Arrange - fakeClient := testSupport.CreateFakeClient(t, &testSupport.TestApiObject{}) + fakeClient := pmtesting.CreateFakeClient(t, &pmtesting.TestApiObject{}) mgr, log := createLifecycleManager([]subroutine.Subroutine{}, fakeClient) // Act - result, err := mgr.Reconcile(ctx, request, &testSupport.TestApiObject{}) + result, err := mgr.Reconcile(ctx, request, &pmtesting.TestApiObject{}) // Assert assert.NoError(t, err) @@ -67,14 +66,14 @@ func TestLifecycle(t *testing.T) { }) t.Run("Lifecycle with a finalizer - add finalizer", func(t *testing.T) { // Arrange - instance := &testSupport.TestApiObject{ + instance := &pmtesting.TestApiObject{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, }, } - fakeClient := testSupport.CreateFakeClient(t, instance) + fakeClient := pmtesting.CreateFakeClient(t, instance) mgr, _ := createLifecycleManager([]subroutine.Subroutine{ pmtesting.FinalizerSubroutine{ @@ -92,7 +91,7 @@ func TestLifecycle(t *testing.T) { // Arrange now := &metav1.Time{Time: time.Now()} finalizers := []string{pmtesting.SubroutineFinalizer} - instance := &testSupport.TestApiObject{ + instance := &pmtesting.TestApiObject{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, @@ -101,7 +100,7 @@ func TestLifecycle(t *testing.T) { }, } - fakeClient := testSupport.CreateFakeClient(t, instance) + fakeClient := pmtesting.CreateFakeClient(t, instance) mgr, _ := createLifecycleManager([]subroutine.Subroutine{ pmtesting.FinalizerSubroutine{ @@ -119,7 +118,7 @@ func TestLifecycle(t *testing.T) { // Arrange now := &metav1.Time{Time: time.Now()} finalizers := []string{pmtesting.SubroutineFinalizer} - instance := &testSupport.TestApiObject{ + instance := &pmtesting.TestApiObject{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, @@ -128,7 +127,7 @@ func TestLifecycle(t *testing.T) { }, } - fakeClient := testSupport.CreateFakeClient(t, instance) + fakeClient := pmtesting.CreateFakeClient(t, instance) mgr, _ := createLifecycleManager([]subroutine.Subroutine{ pmtesting.FinalizerSubroutine{ @@ -148,7 +147,7 @@ func TestLifecycle(t *testing.T) { // Arrange now := &metav1.Time{Time: time.Now()} finalizers := []string{pmtesting.SubroutineFinalizer} - instance := &testSupport.TestApiObject{ + instance := &pmtesting.TestApiObject{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, @@ -157,7 +156,7 @@ func TestLifecycle(t *testing.T) { }, } - fakeClient := testSupport.CreateFakeClient(t, instance) + fakeClient := pmtesting.CreateFakeClient(t, instance) mgr, _ := createLifecycleManager([]subroutine.Subroutine{ pmtesting.FinalizerSubroutine{ @@ -177,7 +176,7 @@ func TestLifecycle(t *testing.T) { // Arrange now := &metav1.Time{Time: time.Now()} finalizers := []string{"other-finalizer"} - instance := &testSupport.TestApiObject{ + instance := &pmtesting.TestApiObject{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, @@ -186,7 +185,7 @@ func TestLifecycle(t *testing.T) { }, } - fakeClient := testSupport.CreateFakeClient(t, instance) + fakeClient := pmtesting.CreateFakeClient(t, instance) mgr, _ := createLifecycleManager([]subroutine.Subroutine{ pmtesting.FinalizerSubroutine{ @@ -204,7 +203,7 @@ func TestLifecycle(t *testing.T) { // Arrange now := &metav1.Time{Time: time.Now()} finalizers := []string{pmtesting.SubroutineFinalizer} - instance := &testSupport.TestApiObject{ + instance := &pmtesting.TestApiObject{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, @@ -213,7 +212,7 @@ func TestLifecycle(t *testing.T) { }, } - fakeClient := testSupport.CreateFakeClient(t, instance) + fakeClient := pmtesting.CreateFakeClient(t, instance) mgr, _ := createLifecycleManager([]subroutine.Subroutine{ pmtesting.FinalizerSubroutine{ @@ -230,14 +229,14 @@ func TestLifecycle(t *testing.T) { }) t.Run("Lifecycle without changing status", func(t *testing.T) { // Arrange - instance := &testSupport.TestApiObject{ + instance := &pmtesting.TestApiObject{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, }, - Status: testSupport.TestStatus{Some: "string"}, + Status: pmtesting.TestStatus{Some: "string"}, } - fakeClient := testSupport.CreateFakeClient(t, instance) + fakeClient := pmtesting.CreateFakeClient(t, instance) mgr, log := createLifecycleManager([]subroutine.Subroutine{}, fakeClient) @@ -256,15 +255,15 @@ func TestLifecycle(t *testing.T) { }) t.Run("Lifecycle with changing status", func(t *testing.T) { // Arrange - instance := &testSupport.TestApiObject{ + instance := &pmtesting.TestApiObject{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, }, - Status: testSupport.TestStatus{Some: "string"}, + Status: pmtesting.TestStatus{Some: "string"}, } - fakeClient := testSupport.CreateFakeClient(t, instance) + fakeClient := pmtesting.CreateFakeClient(t, instance) mgr, log := createLifecycleManager([]subroutine.Subroutine{ pmtesting.ChangeStatusSubroutine{ @@ -287,7 +286,7 @@ func TestLifecycle(t *testing.T) { assert.Equal(t, logMessages[3].Message, "processed instance") assert.Equal(t, logMessages[4].Message, "end subroutine") - serverObject := &testSupport.TestApiObject{} + serverObject := &pmtesting.TestApiObject{} err = fakeClient.Get(ctx, client.ObjectKey{Name: name, Namespace: namespace}, serverObject) assert.NoError(t, err) assert.Equal(t, serverObject.Status.Some, "other string") @@ -295,20 +294,20 @@ func TestLifecycle(t *testing.T) { t.Run("Lifecycle with spread reconciles", func(t *testing.T) { // Arrange instance := &pmtesting.ImplementingSpreadReconciles{ - TestApiObject: testSupport.TestApiObject{ + TestApiObject: pmtesting.TestApiObject{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, Generation: 1, }, - Status: testSupport.TestStatus{ + Status: pmtesting.TestStatus{ Some: "string", ObservedGeneration: 0, }, }, } - fakeClient := testSupport.CreateFakeClient(t, instance) + fakeClient := pmtesting.CreateFakeClient(t, instance) mgr, _ := createLifecycleManager([]subroutine.Subroutine{ pmtesting.ChangeStatusSubroutine{ @@ -326,7 +325,7 @@ func TestLifecycle(t *testing.T) { t.Run("Lifecycle with spread reconciles on deleted object", func(t *testing.T) { // Arrange instance := &pmtesting.ImplementingSpreadReconciles{ - TestApiObject: testSupport.TestApiObject{ + TestApiObject: pmtesting.TestApiObject{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, @@ -334,7 +333,7 @@ func TestLifecycle(t *testing.T) { DeletionTimestamp: &metav1.Time{Time: time.Now()}, Finalizers: []string{pmtesting.ChangeStatusSubroutineFinalizer}, }, - Status: testSupport.TestStatus{ + Status: pmtesting.TestStatus{ Some: "string", ObservedGeneration: 2, NextReconcileTime: metav1.Time{Time: time.Now().Add(2 * time.Hour)}, @@ -342,7 +341,7 @@ func TestLifecycle(t *testing.T) { }, } - fakeClient := testSupport.CreateFakeClient(t, instance) + fakeClient := pmtesting.CreateFakeClient(t, instance) mgr, _ := createLifecycleManager([]subroutine.Subroutine{ pmtesting.ChangeStatusSubroutine{ @@ -361,13 +360,13 @@ func TestLifecycle(t *testing.T) { // Arrange nextReconcileTime := metav1.NewTime(time.Now().Add(1 * time.Hour)) instance := &pmtesting.ImplementingSpreadReconciles{ - TestApiObject: testSupport.TestApiObject{ + TestApiObject: pmtesting.TestApiObject{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, Generation: 1, }, - Status: testSupport.TestStatus{ + Status: pmtesting.TestStatus{ Some: "string", ObservedGeneration: 1, NextReconcileTime: nextReconcileTime, @@ -375,7 +374,7 @@ func TestLifecycle(t *testing.T) { }, } - fakeClient := testSupport.CreateFakeClient(t, instance) + fakeClient := pmtesting.CreateFakeClient(t, instance) mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.FailureScenarioSubroutine{RequeAfter: false}}, fakeClient) mgr.WithSpreadingReconciles() @@ -390,20 +389,20 @@ func TestLifecycle(t *testing.T) { t.Run("Lifecycle with spread reconciles and processing fails (no-retry)", func(t *testing.T) { // Arrange instance := &pmtesting.ImplementingSpreadReconciles{ - TestApiObject: testSupport.TestApiObject{ + TestApiObject: pmtesting.TestApiObject{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, Generation: 1, }, - Status: testSupport.TestStatus{ + Status: pmtesting.TestStatus{ Some: "string", ObservedGeneration: 0, }, }, } - fakeClient := testSupport.CreateFakeClient(t, instance) + fakeClient := pmtesting.CreateFakeClient(t, instance) mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.FailureScenarioSubroutine{Retry: false, RequeAfter: false}}, fakeClient) mgr.WithSpreadingReconciles() @@ -417,20 +416,20 @@ func TestLifecycle(t *testing.T) { t.Run("Lifecycle with spread reconciles and processing fails (retry)", func(t *testing.T) { // Arrange instance := &pmtesting.ImplementingSpreadReconciles{ - TestApiObject: testSupport.TestApiObject{ + TestApiObject: pmtesting.TestApiObject{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, Generation: 1, }, - Status: testSupport.TestStatus{ + Status: pmtesting.TestStatus{ Some: "string", ObservedGeneration: 0, }, }, } - fakeClient := testSupport.CreateFakeClient(t, instance) + fakeClient := pmtesting.CreateFakeClient(t, instance) mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.FailureScenarioSubroutine{Retry: true, RequeAfter: false}}, fakeClient) mgr.WithSpreadingReconciles() @@ -444,20 +443,20 @@ func TestLifecycle(t *testing.T) { t.Run("Lifecycle with spread reconciles and processing needs requeue", func(t *testing.T) { // Arrange instance := &pmtesting.ImplementingSpreadReconciles{ - TestApiObject: testSupport.TestApiObject{ + TestApiObject: pmtesting.TestApiObject{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, Generation: 1, }, - Status: testSupport.TestStatus{ + Status: pmtesting.TestStatus{ Some: "string", ObservedGeneration: 0, }, }, } - fakeClient := testSupport.CreateFakeClient(t, instance) + fakeClient := pmtesting.CreateFakeClient(t, instance) mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.FailureScenarioSubroutine{RequeAfter: true}}, fakeClient) mgr.WithSpreadingReconciles() @@ -471,19 +470,19 @@ func TestLifecycle(t *testing.T) { t.Run("Lifecycle with spread reconciles and processing needs requeueAfter", func(t *testing.T) { // Arrange instance := &pmtesting.ImplementingSpreadReconciles{ - TestApiObject: testSupport.TestApiObject{ + TestApiObject: pmtesting.TestApiObject{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, Generation: 1, }, - Status: testSupport.TestStatus{ + Status: pmtesting.TestStatus{ Some: "string", ObservedGeneration: 0, }, }, } - fakeClient := testSupport.CreateFakeClient(t, instance) + fakeClient := pmtesting.CreateFakeClient(t, instance) mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.FailureScenarioSubroutine{RequeAfter: true}}, fakeClient) mgr.WithSpreadingReconciles() @@ -502,13 +501,13 @@ func TestLifecycle(t *testing.T) { Namespace: namespace, Generation: 1, }, - Status: testSupport.TestStatus{ + Status: pmtesting.TestStatus{ Some: "string", ObservedGeneration: 0, }, } - fakeClient := testSupport.CreateFakeClient(t, instance) + fakeClient := pmtesting.CreateFakeClient(t, instance) mgr, _ := createLifecycleManager([]subroutine.Subroutine{ pmtesting.ChangeStatusSubroutine{ @@ -525,8 +524,8 @@ func TestLifecycle(t *testing.T) { //t.Run("Should setup with manager", func(t *testing.T) { // // Arrange - // instance := &testSupport.TestApiObject{} - // fakeClient := testSupport.CreateFakeClient(t, instance) + // instance := &pmtesting.TestApiObject{} + // fakeClient := pmtesting.CreateFakeClient(t, instance) // log, err := logger.New(logger.DefaultConfig()) // assert.NoError(t, err) // provider, err := apiexport.New(&rest.Config{}, apiexport.Options{}) @@ -549,7 +548,7 @@ func TestLifecycle(t *testing.T) { //t.Run("Should setup with manager not implementing interface", func(t *testing.T) { // // Arrange // instance := &pmtesting.NotImplementingSpreadReconciles{} - // fakeClient := testSupport.CreateFakeClient(t, instance) + // fakeClient := pmtesting.CreateFakeClient(t, instance) // log, err := logger.New(logger.DefaultConfig()) // assert.NoError(t, err) // m, err := manager.New(&rest.Config{}, manager.Options{ @@ -573,21 +572,21 @@ func TestLifecycle(t *testing.T) { t.Run("Lifecycle with spread reconciles and refresh label", func(t *testing.T) { // Arrange instance := &pmtesting.ImplementingSpreadReconciles{ - TestApiObject: testSupport.TestApiObject{ + TestApiObject: pmtesting.TestApiObject{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, Generation: 1, Labels: map[string]string{spread.ReconcileRefreshLabel: "true"}, }, - Status: testSupport.TestStatus{ + Status: pmtesting.TestStatus{ Some: "string", ObservedGeneration: 1, }, }, } - fakeClient := testSupport.CreateFakeClient(t, instance) + fakeClient := pmtesting.CreateFakeClient(t, instance) lm, _ := createLifecycleManager([]subroutine.Subroutine{ pmtesting.ChangeStatusSubroutine{ @@ -627,17 +626,17 @@ func TestLifecycle(t *testing.T) { t.Run("Lifecycle with manage conditions reconciles w/o subroutines", func(t *testing.T) { // Arrange instance := &pmtesting.ImplementConditions{ - TestApiObject: testSupport.TestApiObject{ + TestApiObject: pmtesting.TestApiObject{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, Generation: 1, }, - Status: testSupport.TestStatus{}, + Status: pmtesting.TestStatus{}, }, } - fakeClient := testSupport.CreateFakeClient(t, instance) + fakeClient := pmtesting.CreateFakeClient(t, instance) mgr, _ := createLifecycleManager([]subroutine.Subroutine{}, fakeClient) mgr.WithConditionManagement() @@ -655,17 +654,17 @@ func TestLifecycle(t *testing.T) { t.Run("Lifecycle with manage conditions reconciles with subroutine", func(t *testing.T) { // Arrange instance := &pmtesting.ImplementConditions{ - TestApiObject: testSupport.TestApiObject{ + TestApiObject: pmtesting.TestApiObject{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, Generation: 1, }, - Status: testSupport.TestStatus{}, + Status: pmtesting.TestStatus{}, }, } - fakeClient := testSupport.CreateFakeClient(t, instance) + fakeClient := pmtesting.CreateFakeClient(t, instance) mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.ChangeStatusSubroutine{ Client: fakeClient, @@ -688,17 +687,17 @@ func TestLifecycle(t *testing.T) { t.Run("Lifecycle with manage conditions reconciles with subroutine that adds a condition", func(t *testing.T) { // Arrange instance := &pmtesting.ImplementConditions{ - TestApiObject: testSupport.TestApiObject{ + TestApiObject: pmtesting.TestApiObject{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, Generation: 1, }, - Status: testSupport.TestStatus{}, + Status: pmtesting.TestStatus{}, }, } - fakeClient := testSupport.CreateFakeClient(t, instance) + fakeClient := pmtesting.CreateFakeClient(t, instance) mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.AddConditionSubroutine{Ready: metav1.ConditionTrue}}, fakeClient) mgr.WithConditionManagement() @@ -723,17 +722,17 @@ func TestLifecycle(t *testing.T) { t.Run("Lifecycle with manage conditions reconciles with subroutine that adds a condition", func(t *testing.T) { // Arrange instance := &pmtesting.ImplementConditions{ - TestApiObject: testSupport.TestApiObject{ + TestApiObject: pmtesting.TestApiObject{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, Generation: 1, }, - Status: testSupport.TestStatus{}, + Status: pmtesting.TestStatus{}, }, } - fakeClient := testSupport.CreateFakeClient(t, instance) + fakeClient := pmtesting.CreateFakeClient(t, instance) mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.AddConditionSubroutine{Ready: metav1.ConditionTrue}}, fakeClient) mgr.WithConditionManagement() @@ -758,13 +757,13 @@ func TestLifecycle(t *testing.T) { t.Run("Lifecycle with manage conditions reconciles with subroutine that adds a condition with preexisting conditions (update)", func(t *testing.T) { // Arrange instance := &pmtesting.ImplementConditions{ - TestApiObject: testSupport.TestApiObject{ + TestApiObject: pmtesting.TestApiObject{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, Generation: 1, }, - Status: testSupport.TestStatus{ + Status: pmtesting.TestStatus{ Conditions: []metav1.Condition{ { Type: "test", @@ -777,7 +776,7 @@ func TestLifecycle(t *testing.T) { }, } - fakeClient := testSupport.CreateFakeClient(t, instance) + fakeClient := pmtesting.CreateFakeClient(t, instance) mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.AddConditionSubroutine{Ready: metav1.ConditionTrue}}, fakeClient) mgr.WithConditionManagement() @@ -802,13 +801,13 @@ func TestLifecycle(t *testing.T) { t.Run("Lifecycle with manage conditions reconciles with subroutine that adds a condition with preexisting conditions", func(t *testing.T) { // Arrange instance := &pmtesting.ImplementConditions{ - TestApiObject: testSupport.TestApiObject{ + TestApiObject: pmtesting.TestApiObject{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, Generation: 1, }, - Status: testSupport.TestStatus{ + Status: pmtesting.TestStatus{ Conditions: []metav1.Condition{ { Type: conditions.ConditionReady, @@ -821,7 +820,7 @@ func TestLifecycle(t *testing.T) { }, } - fakeClient := testSupport.CreateFakeClient(t, instance) + fakeClient := pmtesting.CreateFakeClient(t, instance) mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.AddConditionSubroutine{Ready: metav1.ConditionTrue}}, fakeClient) mgr.WithConditionManagement() @@ -846,17 +845,17 @@ func TestLifecycle(t *testing.T) { t.Run("Lifecycle w/o manage conditions reconciles with subroutine that adds a condition", func(t *testing.T) { // Arrange instance := &pmtesting.ImplementConditions{ - TestApiObject: testSupport.TestApiObject{ + TestApiObject: pmtesting.TestApiObject{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, Generation: 1, }, - Status: testSupport.TestStatus{}, + Status: pmtesting.TestStatus{}, }, } - fakeClient := testSupport.CreateFakeClient(t, instance) + fakeClient := pmtesting.CreateFakeClient(t, instance) mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.AddConditionSubroutine{Ready: metav1.ConditionTrue}}, fakeClient) @@ -874,17 +873,17 @@ func TestLifecycle(t *testing.T) { t.Run("Lifecycle with manage conditions reconciles with subroutine failing Status update", func(t *testing.T) { // Arrange instance := &pmtesting.ImplementConditions{ - TestApiObject: testSupport.TestApiObject{ + TestApiObject: pmtesting.TestApiObject{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, Generation: 1, }, - Status: testSupport.TestStatus{}, + Status: pmtesting.TestStatus{}, }, } - fakeClient := testSupport.CreateFakeClient(t, instance) + fakeClient := pmtesting.CreateFakeClient(t, instance) mgr, _ := createLifecycleManager([]subroutine.Subroutine{ pmtesting.ChangeStatusSubroutine{ @@ -908,7 +907,7 @@ func TestLifecycle(t *testing.T) { t.Run("Lifecycle with manage conditions finalizes with multiple subroutines partially succeeding", func(t *testing.T) { // Arrange instance := &pmtesting.ImplementConditions{ - TestApiObject: testSupport.TestApiObject{ + TestApiObject: pmtesting.TestApiObject{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, @@ -916,11 +915,11 @@ func TestLifecycle(t *testing.T) { DeletionTimestamp: &metav1.Time{Time: time.Now()}, Finalizers: []string{pmtesting.FailureScenarioSubroutineFinalizer, pmtesting.ChangeStatusSubroutineFinalizer}, }, - Status: testSupport.TestStatus{}, + Status: pmtesting.TestStatus{}, }, } - fakeClient := testSupport.CreateFakeClient(t, instance) + fakeClient := pmtesting.CreateFakeClient(t, instance) mgr, _ := createLifecycleManager([]subroutine.Subroutine{ pmtesting.FailureScenarioSubroutine{}, @@ -946,17 +945,17 @@ func TestLifecycle(t *testing.T) { t.Run("Lifecycle with manage conditions reconciles with ReqeueAfter subroutine", func(t *testing.T) { // Arrange instance := &pmtesting.ImplementConditions{ - TestApiObject: testSupport.TestApiObject{ + TestApiObject: pmtesting.TestApiObject{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, Generation: 1, }, - Status: testSupport.TestStatus{}, + Status: pmtesting.TestStatus{}, }, } - fakeClient := testSupport.CreateFakeClient(t, instance) + fakeClient := pmtesting.CreateFakeClient(t, instance) mgr, _ := createLifecycleManager([]subroutine.Subroutine{ pmtesting.FailureScenarioSubroutine{RequeAfter: true}}, fakeClient) @@ -978,17 +977,17 @@ func TestLifecycle(t *testing.T) { t.Run("Lifecycle with manage conditions reconciles with Error subroutine (no-retry)", func(t *testing.T) { // Arrange instance := &pmtesting.ImplementConditions{ - TestApiObject: testSupport.TestApiObject{ + TestApiObject: pmtesting.TestApiObject{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, Generation: 1, }, - Status: testSupport.TestStatus{}, + Status: pmtesting.TestStatus{}, }, } - fakeClient := testSupport.CreateFakeClient(t, instance) + fakeClient := pmtesting.CreateFakeClient(t, instance) mgr, _ := createLifecycleManager([]subroutine.Subroutine{ pmtesting.FailureScenarioSubroutine{RequeAfter: false}}, fakeClient) @@ -1010,17 +1009,17 @@ func TestLifecycle(t *testing.T) { t.Run("Lifecycle with manage conditions reconciles with Error subroutine (retry)", func(t *testing.T) { // Arrange instance := &pmtesting.ImplementConditions{ - TestApiObject: testSupport.TestApiObject{ + TestApiObject: pmtesting.TestApiObject{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, Generation: 1, }, - Status: testSupport.TestStatus{}, + Status: pmtesting.TestStatus{}, }, } - fakeClient := testSupport.CreateFakeClient(t, instance) + fakeClient := pmtesting.CreateFakeClient(t, instance) mgr, _ := createLifecycleManager([]subroutine.Subroutine{ pmtesting.FailureScenarioSubroutine{Retry: true, RequeAfter: false}}, fakeClient) @@ -1047,13 +1046,13 @@ func TestLifecycle(t *testing.T) { Namespace: namespace, Generation: 1, }, - Status: testSupport.TestStatus{ + Status: pmtesting.TestStatus{ Some: "string", ObservedGeneration: 0, }, } - fakeClient := testSupport.CreateFakeClient(t, instance) + fakeClient := pmtesting.CreateFakeClient(t, instance) mgr, _ := createLifecycleManager([]subroutine.Subroutine{ pmtesting.ChangeStatusSubroutine{ @@ -1072,7 +1071,7 @@ func TestLifecycle(t *testing.T) { t.Run("Lifecycle with manage conditions failing finalize", func(t *testing.T) { // Arrange instance := &pmtesting.ImplementConditions{ - TestApiObject: testSupport.TestApiObject{ + TestApiObject: pmtesting.TestApiObject{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, @@ -1080,14 +1079,14 @@ func TestLifecycle(t *testing.T) { Finalizers: []string{pmtesting.FailureScenarioSubroutineFinalizer}, DeletionTimestamp: &metav1.Time{Time: time.Now()}, }, - Status: testSupport.TestStatus{ + Status: pmtesting.TestStatus{ Some: "string", ObservedGeneration: 0, }, }, } - fakeClient := testSupport.CreateFakeClient(t, instance) + fakeClient := pmtesting.CreateFakeClient(t, instance) mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.FailureScenarioSubroutine{}}, fakeClient) mgr.WithConditionManagement() @@ -1102,20 +1101,20 @@ func TestLifecycle(t *testing.T) { t.Run("Lifecycle with spread reconciles and manage conditions and processing fails (retry)", func(t *testing.T) { // Arrange instance := &pmtesting.ImplementConditionsAndSpreadReconciles{ - TestApiObject: testSupport.TestApiObject{ + TestApiObject: pmtesting.TestApiObject{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, Generation: 1, }, - Status: testSupport.TestStatus{ + Status: pmtesting.TestStatus{ Some: "string", ObservedGeneration: 0, }, }, } - fakeClient := testSupport.CreateFakeClient(t, instance) + fakeClient := pmtesting.CreateFakeClient(t, instance) mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.FailureScenarioSubroutine{Retry: true, RequeAfter: false}}, fakeClient) mgr.WithSpreadingReconciles() @@ -1136,20 +1135,20 @@ func TestLifecycle(t *testing.T) { t.Run("Lifecycle with spread reconciles and manage conditions and processing fails (no-retry)", func(t *testing.T) { // Arrange instance := &pmtesting.ImplementConditionsAndSpreadReconciles{ - TestApiObject: testSupport.TestApiObject{ + TestApiObject: pmtesting.TestApiObject{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, Generation: 1, }, - Status: testSupport.TestStatus{ + Status: pmtesting.TestStatus{ Some: "string", ObservedGeneration: 0, }, }, } - fakeClient := testSupport.CreateFakeClient(t, instance) + fakeClient := pmtesting.CreateFakeClient(t, instance) mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.FailureScenarioSubroutine{RequeAfter: false}}, fakeClient) mgr.WithSpreadingReconciles() @@ -1170,7 +1169,7 @@ func TestLifecycle(t *testing.T) { //t.Run("Test Lifecycle setupWithManager /w conditions and expecting no error", func(t *testing.T) { // // Arrange // instance := &pmtesting.ImplementConditions{} - // fakeClient := testSupport.CreateFakeClient(t, instance) + // fakeClient := pmtesting.CreateFakeClient(t, instance) // // m, err := manager.New(&rest.Config{}, manager.Options{Scheme: fakeClient.Scheme()}) // assert.NoError(t, err) @@ -1189,7 +1188,7 @@ func TestLifecycle(t *testing.T) { //t.Run("Test Lifecycle setupWithManager /w conditions and expecting error", func(t *testing.T) { // // Arrange // instance := &pmtesting.NotImplementingSpreadReconciles{} - // fakeClient := testSupport.CreateFakeClient(t, instance) + // fakeClient := pmtesting.CreateFakeClient(t, instance) // // m, err := manager.New(&rest.Config{}, manager.Options{Scheme: fakeClient.Scheme()}) // assert.NoError(t, err) @@ -1208,7 +1207,7 @@ func TestLifecycle(t *testing.T) { //t.Run("Test Lifecycle setupWithManager /w spread and expecting no error", func(t *testing.T) { // // Arrange // instance := &pmtesting.ImplementingSpreadReconciles{} - // fakeClient := testSupport.CreateFakeClient(t, instance) + // fakeClient := pmtesting.CreateFakeClient(t, instance) // // m, err := manager.New(&rest.Config{}, manager.Options{Scheme: fakeClient.Scheme()}) // assert.NoError(t, err) @@ -1227,7 +1226,7 @@ func TestLifecycle(t *testing.T) { //t.Run("Test Lifecycle setupWithManager /w spread and expecting a error", func(t *testing.T) { // // Arrange // instance := &pmtesting.NotImplementingSpreadReconciles{} - // fakeClient := testSupport.CreateFakeClient(t, instance) + // fakeClient := pmtesting.CreateFakeClient(t, instance) // // m, err := manager.New(&rest.Config{}, manager.Options{Scheme: fakeClient.Scheme()}) // assert.NoError(t, err) @@ -1248,7 +1247,7 @@ func TestLifecycle(t *testing.T) { t.Run("Should handle an operator error with retry and sentry", func(t *testing.T) { // Arrange instance := &pmtesting.ImplementConditions{} - fakeClient := testSupport.CreateFakeClient(t, instance) + fakeClient := pmtesting.CreateFakeClient(t, instance) _, log := createLifecycleManager([]subroutine.Subroutine{}, fakeClient) ctx = sentry.ContextWithSentryTags(ctx, map[string]string{}) @@ -1269,7 +1268,7 @@ func TestLifecycle(t *testing.T) { t.Run("Should handle an operator error without retry", func(t *testing.T) { // Arrange instance := &pmtesting.ImplementConditions{} - fakeClient := testSupport.CreateFakeClient(t, instance) + fakeClient := pmtesting.CreateFakeClient(t, instance) _, log := createLifecycleManager([]subroutine.Subroutine{}, fakeClient) @@ -1291,7 +1290,7 @@ func TestLifecycle(t *testing.T) { // Arrange ctx := context.Background() - fakeClient := testSupport.CreateFakeClient(t, testApiObject) + fakeClient := pmtesting.CreateFakeClient(t, testApiObject) lm, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.ContextValueSubroutine{}}, fakeClient) lm = lm.WithPrepareContextFunc(func(ctx context.Context, instance runtimeobject.RuntimeObject) (context.Context, operrors.OperatorError) { @@ -1317,7 +1316,7 @@ func TestLifecycle(t *testing.T) { // Arrange ctx := context.Background() - fakeClient := testSupport.CreateFakeClient(t, testApiObject) + fakeClient := pmtesting.CreateFakeClient(t, testApiObject) lm, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.ContextValueSubroutine{}}, fakeClient) lm = lm.WithPrepareContextFunc(func(ctx context.Context, instance runtimeobject.RuntimeObject) (context.Context, operrors.OperatorError) { @@ -1341,7 +1340,7 @@ func TestLifecycle(t *testing.T) { // Test LifecycleManager.WithConditionManagement func TestLifecycleManager_WithConditionManagement(t *testing.T) { // Given - fakeClient := testSupport.CreateFakeClient(t, &testSupport.TestApiObject{}) + fakeClient := pmtesting.CreateFakeClient(t, &pmtesting.TestApiObject{}) clusterGetter := &pmtesting.FakeManager{Client: fakeClient} _, log := createLifecycleManager([]subroutine.Subroutine{}, fakeClient) @@ -1357,7 +1356,7 @@ type testReconciler struct { } func (r *testReconciler) Reconcile(ctx context.Context, req mcreconcile.Request) (controllerruntime.Result, error) { - return r.lifecycleManager.Reconcile(ctx, req, &testSupport.TestApiObject{}) + return r.lifecycleManager.Reconcile(ctx, req, &pmtesting.TestApiObject{}) } func createLifecycleManager(subroutines []subroutine.Subroutine, client client.Client) (*LifecycleManager, *testlogger.TestLogger) { From 8cb35773f38f505b5993092fb0907c72c964bd80 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20Echterh=C3=B6lter?= Date: Mon, 7 Jul 2025 06:59:49 +0200 Subject: [PATCH 05/16] test: refactoring for unittests --- controller/lifecycle/api/api.go | 5 +- .../lifecycle/controllerruntime/lifecycle.go | 4 - .../controllerruntime/lifecycle_test.go | 19 - controller/lifecycle/lifecycle.go | 16 +- controller/lifecycle/lifecycle_test.go | 1280 ++++++++++++++++- .../lifecycle/multicluster/lifecycle.go | 4 +- .../lifecycle/multicluster/lifecycle_test.go | 1258 +--------------- controller/lifecycle/spread/spread.go | 30 +- controller/lifecycle/spread/spread_test.go | 98 +- controller/lifecycle/util/convert.go | 30 + controller/lifecycle/util/convert_test.go | 49 + controller/testSupport/lifecycle.go | 93 +- controller/testSupport/provider.go | 27 + 13 files changed, 1598 insertions(+), 1315 deletions(-) create mode 100644 controller/lifecycle/util/convert.go create mode 100644 controller/lifecycle/util/convert_test.go create mode 100644 controller/testSupport/provider.go diff --git a/controller/lifecycle/api/api.go b/controller/lifecycle/api/api.go index 32c6b52..e630f5a 100644 --- a/controller/lifecycle/api/api.go +++ b/controller/lifecycle/api/api.go @@ -44,9 +44,8 @@ type RuntimeObjectConditions interface { } type SpreadManager interface { - ToRuntimeObjectSpreadReconcileStatusInterface(instance runtimeobject.RuntimeObject, log *logger.Logger) (RuntimeObjectSpreadReconcileStatus, error) - MustToRuntimeObjectSpreadReconcileStatusInterface(instance runtimeobject.RuntimeObject, log *logger.Logger) RuntimeObjectSpreadReconcileStatus - OnNextReconcile(instanceStatusObj RuntimeObjectSpreadReconcileStatus, log *logger.Logger) (ctrl.Result, error) + ReconcileRequired(instance runtimeobject.RuntimeObject, log *logger.Logger) bool + OnNextReconcile(instance runtimeobject.RuntimeObject, log *logger.Logger) (ctrl.Result, error) RemoveRefreshLabelIfExists(instance runtimeobject.RuntimeObject) bool SetNextReconcileTime(instanceStatusObj RuntimeObjectSpreadReconcileStatus, log *logger.Logger) UpdateObservedGeneration(instanceStatusObj RuntimeObjectSpreadReconcileStatus, log *logger.Logger) diff --git a/controller/lifecycle/controllerruntime/lifecycle.go b/controller/lifecycle/controllerruntime/lifecycle.go index 2c71c16..83d1928 100644 --- a/controller/lifecycle/controllerruntime/lifecycle.go +++ b/controller/lifecycle/controllerruntime/lifecycle.go @@ -63,7 +63,6 @@ func (l *LifecycleManager) ConditionsManager() api.ConditionManager { } return l.conditionsManager } - func (l *LifecycleManager) Spreader() api.SpreadManager { // it is important to return nil unsted of a nil pointer to the interface to avoid misbehaving nil checks if l.spreader == nil { @@ -71,11 +70,9 @@ func (l *LifecycleManager) Spreader() api.SpreadManager { } return l.spreader } - func (l *LifecycleManager) Reconcile(ctx context.Context, req ctrl.Request, instance runtimeobject.RuntimeObject) (ctrl.Result, error) { return lifecycle.Reconcile(ctx, req.NamespacedName, instance, l.client, l) } - func (l *LifecycleManager) SetupWithManagerBuilder(mgr ctrl.Manager, maxReconciles int, reconcilerName string, instance runtimeobject.RuntimeObject, debugLabelValue string, log *logger.Logger, eventPredicates ...predicate.Predicate) (*builder.Builder, error) { if err := lifecycle.ValidateInterfaces(instance, log, l); err != nil { return nil, err @@ -92,7 +89,6 @@ func (l *LifecycleManager) SetupWithManagerBuilder(mgr ctrl.Manager, maxReconcil WithOptions(controller.Options{MaxConcurrentReconciles: maxReconciles}). WithEventFilter(predicate.And(eventPredicates...)), nil } - func (l *LifecycleManager) SetupWithManager(mgr ctrl.Manager, maxReconciles int, reconcilerName string, instance runtimeobject.RuntimeObject, debugLabelValue string, r reconcile.Reconciler, log *logger.Logger, eventPredicates ...predicate.Predicate) error { b, err := l.SetupWithManagerBuilder(mgr, maxReconciles, reconcilerName, instance, debugLabelValue, log, eventPredicates...) if err != nil { diff --git a/controller/lifecycle/controllerruntime/lifecycle_test.go b/controller/lifecycle/controllerruntime/lifecycle_test.go index 587d3ed..7ceaa8d 100644 --- a/controller/lifecycle/controllerruntime/lifecycle_test.go +++ b/controller/lifecycle/controllerruntime/lifecycle_test.go @@ -47,25 +47,6 @@ func TestLifecycle(t *testing.T) { } ctx := context.Background() - t.Run("Lifecycle with a not found object", func(t *testing.T) { - // Arrange - fakeClient := pmtesting.CreateFakeClient(t, &pmtesting.TestApiObject{}) - - mgr, log := createLifecycleManager([]subroutine.Subroutine{}, fakeClient) - - // Act - result, err := mgr.Reconcile(ctx, request, &pmtesting.TestApiObject{}) - - // Assert - assert.NoError(t, err) - assert.NotNil(t, result) - logMessages, err := log.GetLogMessages() - assert.NoError(t, err) - assert.Equal(t, len(logMessages), 2) - assert.Equal(t, logMessages[0].Message, "start reconcile") - assert.Contains(t, logMessages[1].Message, "instance not found") - }) - t.Run("Lifecycle with a finalizer - add finalizer", func(t *testing.T) { // Arrange instance := &pmtesting.TestApiObject{ diff --git a/controller/lifecycle/lifecycle.go b/controller/lifecycle/lifecycle.go index 5ff7304..a509942 100644 --- a/controller/lifecycle/lifecycle.go +++ b/controller/lifecycle/lifecycle.go @@ -7,7 +7,6 @@ import ( "github.com/google/uuid" "go.opentelemetry.io/otel" - "golang.org/x/exp/maps" "k8s.io/apimachinery/pkg/api/equality" kerrors "k8s.io/apimachinery/pkg/api/errors" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" @@ -20,8 +19,8 @@ import ( "github.com/platform-mesh/golang-commons/controller/lifecycle/api" "github.com/platform-mesh/golang-commons/controller/lifecycle/runtimeobject" - "github.com/platform-mesh/golang-commons/controller/lifecycle/spread" "github.com/platform-mesh/golang-commons/controller/lifecycle/subroutine" + "github.com/platform-mesh/golang-commons/controller/lifecycle/util" "github.com/platform-mesh/golang-commons/errors" "github.com/platform-mesh/golang-commons/logger" "github.com/platform-mesh/golang-commons/sentry" @@ -57,15 +56,10 @@ func Reconcile(ctx context.Context, nName types.NamespacedName, instance runtime generationChanged := true if l.Spreader() != nil && instance.GetDeletionTimestamp().IsZero() { - instanceStatusObj := l.Spreader().MustToRuntimeObjectSpreadReconcileStatusInterface(instance, log) - generationChanged = instance.GetGeneration() != instanceStatusObj.GetObservedGeneration() - isAfterNextReconcileTime := v1.Now().UTC().After(instanceStatusObj.GetNextReconcileTime().UTC()) - refreshRequested := slices.Contains(maps.Keys(instance.GetLabels()), spread.ReconcileRefreshLabel) - - reconcileRequired := generationChanged || isAfterNextReconcileTime || refreshRequested + reconcileRequired := l.Spreader().ReconcileRequired(instance, log) if !reconcileRequired { log.Info().Msg("skipping reconciliation, spread reconcile is active. No processing needed") - return l.Spreader().OnNextReconcile(instanceStatusObj, log) + return l.Spreader().OnNextReconcile(instance, log) } } @@ -305,7 +299,7 @@ func HandleClientError(msg string, log *logger.Logger, err error, generationChan func MarkResourceAsFinal(instance runtimeobject.RuntimeObject, log *logger.Logger, conditions []v1.Condition, status v1.ConditionStatus, l api.Lifecycle) { if l.Spreader() != nil && instance.GetDeletionTimestamp().IsZero() { - instanceStatusObj := l.Spreader().MustToRuntimeObjectSpreadReconcileStatusInterface(instance, log) + instanceStatusObj := util.MustToInterface[api.RuntimeObjectSpreadReconcileStatus](instance, log) l.Spreader().SetNextReconcileTime(instanceStatusObj, log) l.Spreader().UpdateObservedGeneration(instanceStatusObj, log) } @@ -369,7 +363,7 @@ func HandleOperatorError(ctx context.Context, operatorError errors.OperatorError func ValidateInterfaces(instance runtimeobject.RuntimeObject, log *logger.Logger, l api.Lifecycle) error { if l.Spreader() != nil { - _, err := l.Spreader().ToRuntimeObjectSpreadReconcileStatusInterface(instance, log) + _, err := util.ToInterface[api.RuntimeObjectSpreadReconcileStatus](instance, log) if err != nil { return err } diff --git a/controller/lifecycle/lifecycle_test.go b/controller/lifecycle/lifecycle_test.go index 6e8a779..40e7032 100644 --- a/controller/lifecycle/lifecycle_test.go +++ b/controller/lifecycle/lifecycle_test.go @@ -2,15 +2,20 @@ package lifecycle import ( "context" + "fmt" "testing" + "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" "k8s.io/apimachinery/pkg/api/errors" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" "github.com/platform-mesh/golang-commons/controller/lifecycle/mocks" + "github.com/platform-mesh/golang-commons/controller/lifecycle/subroutine" pmtesting "github.com/platform-mesh/golang-commons/controller/testSupport" "github.com/platform-mesh/golang-commons/logger" ) @@ -24,17 +29,11 @@ func TestLifecycle(t *testing.T) { Name: name, }, } + ctx := context.Background() logcfg := logger.DefaultConfig() logcfg.NoJSON = true log, err := logger.New(logcfg) assert.NoError(t, err) - //testApiObject := &pmtesting.TestApiObject{ - // ObjectMeta: metav1.ObjectMeta{ - // Name: name, - // Namespace: namespace, - // }, - //} - ctx := context.Background() t.Run("Lifecycle with a not found object", func(t *testing.T) { // Arrange @@ -51,6 +50,1273 @@ func TestLifecycle(t *testing.T) { assert.NoError(t, err) }) + t.Run("Lifecycle with a finalizer - add finalizer", func(t *testing.T) { + // Arrange + instance := &pmtesting.TestApiObject{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + } + + fakeClient := pmtesting.CreateFakeClient(t, instance) + + mgr := pmtesting.TestLifecycleManager{Logger: log, SubroutinesArr: []subroutine.Subroutine{ + pmtesting.FinalizerSubroutine{ + Client: fakeClient, + }}, + } + + // Act + _, err := Reconcile(ctx, request.NamespacedName, instance, fakeClient, mgr) + + assert.NoError(t, err) + assert.Equal(t, 1, len(instance.Finalizers)) + }) + + t.Run("Lifecycle with a finalizer - finalization", func(t *testing.T) { + // Arrange + now := &metav1.Time{Time: time.Now()} + finalizers := []string{pmtesting.SubroutineFinalizer} + instance := &pmtesting.TestApiObject{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + DeletionTimestamp: now, + Finalizers: finalizers, + }, + } + + fakeClient := pmtesting.CreateFakeClient(t, instance) + + mgr := pmtesting.TestLifecycleManager{Logger: log, SubroutinesArr: []subroutine.Subroutine{ + pmtesting.FinalizerSubroutine{ + Client: fakeClient, + }, + }} + + // Act + _, err := Reconcile(ctx, request.NamespacedName, instance, fakeClient, mgr) + + assert.NoError(t, err) + assert.Equal(t, 0, len(instance.Finalizers)) + }) + + t.Run("Lifecycle with a finalizer - finalization(requeue)", func(t *testing.T) { + // Arrange + now := &metav1.Time{Time: time.Now()} + finalizers := []string{pmtesting.SubroutineFinalizer} + instance := &pmtesting.TestApiObject{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + DeletionTimestamp: now, + Finalizers: finalizers, + }, + } + + fakeClient := pmtesting.CreateFakeClient(t, instance) + + mgr := pmtesting.TestLifecycleManager{Logger: log, SubroutinesArr: []subroutine.Subroutine{ + pmtesting.FinalizerSubroutine{ + Client: fakeClient, + RequeueAfter: 1 * time.Second, + }, + }} + + // Act + res, err := Reconcile(ctx, request.NamespacedName, instance, fakeClient, mgr) + + assert.NoError(t, err) + assert.Equal(t, 1, len(instance.Finalizers)) + assert.Equal(t, time.Duration(1*time.Second), res.RequeueAfter) + }) + + t.Run("Lifecycle with a finalizer - finalization(requeueAfter)", func(t *testing.T) { + // Arrange + now := &metav1.Time{Time: time.Now()} + finalizers := []string{pmtesting.SubroutineFinalizer} + instance := &pmtesting.TestApiObject{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + DeletionTimestamp: now, + Finalizers: finalizers, + }, + } + + fakeClient := pmtesting.CreateFakeClient(t, instance) + + mgr := pmtesting.TestLifecycleManager{Logger: log, SubroutinesArr: []subroutine.Subroutine{ + pmtesting.FinalizerSubroutine{ + Client: fakeClient, + RequeueAfter: 2 * time.Second, + }, + }} + + // Act + res, err := Reconcile(ctx, request.NamespacedName, instance, fakeClient, mgr) + assert.NoError(t, err) + + assert.Equal(t, 1, len(instance.Finalizers)) + + assert.Equal(t, 2*time.Second, res.RequeueAfter) + }) + + t.Run("Lifecycle with a finalizer - skip finalization if the finalizer is not in there", func(t *testing.T) { + // Arrange + now := &metav1.Time{Time: time.Now()} + finalizers := []string{"other-finalizer"} + instance := &pmtesting.TestApiObject{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + DeletionTimestamp: now, + Finalizers: finalizers, + }, + } + + fakeClient := pmtesting.CreateFakeClient(t, instance) + + mgr := pmtesting.TestLifecycleManager{Logger: log, SubroutinesArr: []subroutine.Subroutine{ + pmtesting.FinalizerSubroutine{ + Client: fakeClient, + }, + }} + + // Act + _, err := Reconcile(ctx, request.NamespacedName, instance, fakeClient, mgr) + + assert.NoError(t, err) + assert.Equal(t, 1, len(instance.Finalizers)) + }) + + t.Run("Lifecycle with a finalizer - failing finalization subroutine", func(t *testing.T) { + // Arrange + now := &metav1.Time{Time: time.Now()} + finalizers := []string{pmtesting.SubroutineFinalizer} + instance := &pmtesting.TestApiObject{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + DeletionTimestamp: now, + Finalizers: finalizers, + }, + } + + fakeClient := pmtesting.CreateFakeClient(t, instance) + + mgr := pmtesting.TestLifecycleManager{Logger: log, SubroutinesArr: []subroutine.Subroutine{ + pmtesting.FinalizerSubroutine{ + Client: fakeClient, + Err: fmt.Errorf("some error"), + }, + }} + + // Act + _, err := Reconcile(ctx, request.NamespacedName, instance, fakeClient, mgr) + + assert.Error(t, err) + assert.Equal(t, 1, len(instance.Finalizers)) + }) + + t.Run("Lifecycle without changing status", func(t *testing.T) { + // Arrange + instance := &pmtesting.TestApiObject{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Status: pmtesting.TestStatus{Some: "string"}, + } + fakeClient := pmtesting.CreateFakeClient(t, instance) + + mgr := pmtesting.TestLifecycleManager{Logger: log, SubroutinesArr: []subroutine.Subroutine{}} + + // Act + result, err := Reconcile(ctx, request.NamespacedName, instance, fakeClient, mgr) + + // Assert + assert.NoError(t, err) + assert.NotNil(t, result) + }) + + t.Run("Lifecycle with changing status", func(t *testing.T) { + // Arrange + instance := &pmtesting.TestApiObject{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + Status: pmtesting.TestStatus{Some: "string"}, + } + + fakeClient := pmtesting.CreateFakeClient(t, instance) + + mgr := pmtesting.TestLifecycleManager{Logger: log, SubroutinesArr: []subroutine.Subroutine{ + pmtesting.ChangeStatusSubroutine{ + Client: fakeClient, + }, + }} + + // Act + result, err := Reconcile(ctx, request.NamespacedName, instance, fakeClient, mgr) + + // Assert + assert.NoError(t, err) + assert.NotNil(t, result) + assert.NoError(t, err) + + serverObject := &pmtesting.TestApiObject{} + err = fakeClient.Get(ctx, client.ObjectKey{Name: name, Namespace: namespace}, serverObject) + assert.NoError(t, err) + assert.Equal(t, serverObject.Status.Some, "other string") + }) + + t.Run("Lifecycle with spread reconciles", func(t *testing.T) { + // Arrange + instance := &pmtesting.ImplementingSpreadReconciles{ + TestApiObject: pmtesting.TestApiObject{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Generation: 1, + }, + Status: pmtesting.TestStatus{ + Some: "string", + ObservedGeneration: 0, + }, + }, + } + + fakeClient := pmtesting.CreateFakeClient(t, instance) + + mgr := pmtesting.TestLifecycleManager{Logger: log, ShouldReconcile: true, SubroutinesArr: []subroutine.Subroutine{ + pmtesting.ChangeStatusSubroutine{ + Client: fakeClient, + }, + }} + mgr.WithSpreadingReconciles() + + // Act + _, err := Reconcile(ctx, request.NamespacedName, instance, fakeClient, mgr) + + assert.NoError(t, err) + assert.Equal(t, instance.Generation, instance.Status.ObservedGeneration) + }) + // + //t.Run("Lifecycle with spread reconciles on deleted object", func(t *testing.T) { + // // Arrange + // instance := &pmtesting.ImplementingSpreadReconciles{ + // TestApiObject: pmtesting.TestApiObject{ + // ObjectMeta: metav1.ObjectMeta{ + // Name: name, + // Namespace: namespace, + // Generation: 2, + // DeletionTimestamp: &metav1.Time{Time: time.Now()}, + // Finalizers: []string{pmtesting.ChangeStatusSubroutineFinalizer}, + // }, + // Status: pmtesting.TestStatus{ + // Some: "string", + // ObservedGeneration: 2, + // NextReconcileTime: metav1.Time{Time: time.Now().Add(2 * time.Hour)}, + // }, + // }, + // } + // + // fakeClient := pmtesting.CreateFakeClient(t, instance) + // + // mgr, _ := createLifecycleManager([]subroutine.Subroutine{ + // pmtesting.ChangeStatusSubroutine{ + // Client: fakeClient, + // }, + // }, fakeClient) + // mgr.WithSpreadingReconciles() + // + // // Act + // _, err := mgr.Reconcile(ctx, request, instance) + // assert.NoError(t, err) + // assert.Len(t, instance.Finalizers, 0) + // + //}) + // + //t.Run("Lifecycle with spread reconciles skips if the generation is the same", func(t *testing.T) { + // // Arrange + // nextReconcileTime := metav1.NewTime(time.Now().Add(1 * time.Hour)) + // instance := &pmtesting.ImplementingSpreadReconciles{ + // TestApiObject: pmtesting.TestApiObject{ + // ObjectMeta: metav1.ObjectMeta{ + // Name: name, + // Namespace: namespace, + // Generation: 1, + // }, + // Status: pmtesting.TestStatus{ + // Some: "string", + // ObservedGeneration: 1, + // NextReconcileTime: nextReconcileTime, + // }, + // }, + // } + // + // fakeClient := pmtesting.CreateFakeClient(t, instance) + // + // mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.FailureScenarioSubroutine{RequeAfter: false}}, fakeClient) + // mgr.WithSpreadingReconciles() + // + // // Act + // result, err := mgr.Reconcile(ctx, request, instance) + // + // assert.NoError(t, err) + // assert.Equal(t, int64(1), instance.Status.ObservedGeneration) + // assert.GreaterOrEqual(t, 12*time.Hour, result.RequeueAfter) + //}) + // + //t.Run("Lifecycle with spread reconciles and processing fails (no-retry)", func(t *testing.T) { + // // Arrange + // instance := &pmtesting.ImplementingSpreadReconciles{ + // TestApiObject: pmtesting.TestApiObject{ + // ObjectMeta: metav1.ObjectMeta{ + // Name: name, + // Namespace: namespace, + // Generation: 1, + // }, + // Status: pmtesting.TestStatus{ + // Some: "string", + // ObservedGeneration: 0, + // }, + // }, + // } + // + // fakeClient := pmtesting.CreateFakeClient(t, instance) + // + // mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.FailureScenarioSubroutine{Retry: false, RequeAfter: false}}, fakeClient) + // mgr.WithSpreadingReconciles() + // + // // Act + // _, err := mgr.Reconcile(ctx, request, instance) + // + // assert.NoError(t, err) + // assert.Equal(t, int64(1), instance.Status.ObservedGeneration) + //}) + // + //t.Run("Lifecycle with spread reconciles and processing fails (retry)", func(t *testing.T) { + // // Arrange + // instance := &pmtesting.ImplementingSpreadReconciles{ + // TestApiObject: pmtesting.TestApiObject{ + // ObjectMeta: metav1.ObjectMeta{ + // Name: name, + // Namespace: namespace, + // Generation: 1, + // }, + // Status: pmtesting.TestStatus{ + // Some: "string", + // ObservedGeneration: 0, + // }, + // }, + // } + // + // fakeClient := pmtesting.CreateFakeClient(t, instance) + // + // mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.FailureScenarioSubroutine{Retry: true, RequeAfter: false}}, fakeClient) + // mgr.WithSpreadingReconciles() + // + // // Act + // _, err := mgr.Reconcile(ctx, request, instance) + // + // assert.Error(t, err) + // assert.Equal(t, int64(0), instance.Status.ObservedGeneration) + //}) + // + //t.Run("Lifecycle with spread reconciles and processing needs requeue", func(t *testing.T) { + // // Arrange + // instance := &pmtesting.ImplementingSpreadReconciles{ + // TestApiObject: pmtesting.TestApiObject{ + // ObjectMeta: metav1.ObjectMeta{ + // Name: name, + // Namespace: namespace, + // Generation: 1, + // }, + // Status: pmtesting.TestStatus{ + // Some: "string", + // ObservedGeneration: 0, + // }, + // }, + // } + // + // fakeClient := pmtesting.CreateFakeClient(t, instance) + // + // mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.FailureScenarioSubroutine{RequeAfter: true}}, fakeClient) + // mgr.WithSpreadingReconciles() + // + // // Act + // _, err := mgr.Reconcile(ctx, request, instance) + // + // assert.NoError(t, err) + // assert.Equal(t, int64(0), instance.Status.ObservedGeneration) + //}) + // + //t.Run("Lifecycle with spread reconciles and processing needs requeueAfter", func(t *testing.T) { + // // Arrange + // instance := &pmtesting.ImplementingSpreadReconciles{ + // TestApiObject: pmtesting.TestApiObject{ + // ObjectMeta: metav1.ObjectMeta{ + // Name: name, + // Namespace: namespace, + // Generation: 1, + // }, + // Status: pmtesting.TestStatus{ + // Some: "string", + // ObservedGeneration: 0, + // }, + // }, + // } + // fakeClient := pmtesting.CreateFakeClient(t, instance) + // + // mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.FailureScenarioSubroutine{RequeAfter: true}}, fakeClient) + // mgr.WithSpreadingReconciles() + // + // // Act + // _, err := mgr.Reconcile(ctx, request, instance) + // + // assert.NoError(t, err) + // assert.Equal(t, int64(0), instance.Status.ObservedGeneration) + //}) + // + //t.Run("Lifecycle with spread not implementing the interface", func(t *testing.T) { + // // Arrange + // instance := &pmtesting.NotImplementingSpreadReconciles{ + // ObjectMeta: metav1.ObjectMeta{ + // Name: name, + // Namespace: namespace, + // Generation: 1, + // }, + // Status: pmtesting.TestStatus{ + // Some: "string", + // ObservedGeneration: 0, + // }, + // } + // + // fakeClient := pmtesting.CreateFakeClient(t, instance) + // + // mgr, _ := createLifecycleManager([]subroutine.Subroutine{ + // pmtesting.ChangeStatusSubroutine{ + // Client: fakeClient, + // }, + // }, fakeClient) + // mgr.WithSpreadingReconciles() + // + // // Act + // assert.Panics(t, func() { + // _, _ = mgr.Reconcile(ctx, request, instance) + // }) + //}) + // + //t.Run("Should setup with manager", func(t *testing.T) { + // // Arrange + // instance := &pmtesting.TestApiObject{} + // fakeClient := pmtesting.CreateFakeClient(t, instance) + // log, err := logger.New(logger.DefaultConfig()) + // assert.NoError(t, err) + // m, err := manager.New(&rest.Config{}, manager.Options{ + // Scheme: fakeClient.Scheme(), + // }) + // assert.NoError(t, err) + // + // lm, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.FailureScenarioSubroutine{RequeAfter: true}}, fakeClient) + // tr := &testReconciler{ + // lifecycleManager: lm, + // } + // + // // Act + // err = lm.SetupWithManager(m, 0, "testReconciler", instance, "test", tr, log) + // + // // Assert + // assert.NoError(t, err) + //}) + // + //t.Run("Should setup with manager not implementing interface", func(t *testing.T) { + // // Arrange + // instance := &pmtesting.NotImplementingSpreadReconciles{} + // fakeClient := pmtesting.CreateFakeClient(t, instance) + // log, err := logger.New(logger.DefaultConfig()) + // assert.NoError(t, err) + // m, err := manager.New(&rest.Config{}, manager.Options{ + // Scheme: fakeClient.Scheme(), + // }) + // assert.NoError(t, err) + // + // lm, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.FailureScenarioSubroutine{RequeAfter: true}}, fakeClient) + // lm.WithSpreadingReconciles() + // tr := &testReconciler{ + // lifecycleManager: lm, + // } + // + // // Act + // err = lm.SetupWithManager(m, 0, "testReconciler", instance, "test", tr, log) + // + // // Assert + // assert.Error(t, err) + //}) + // + //t.Run("Lifecycle with spread reconciles and refresh label", func(t *testing.T) { + // // Arrange + // instance := &pmtesting.ImplementingSpreadReconciles{ + // TestApiObject: pmtesting.TestApiObject{ + // ObjectMeta: metav1.ObjectMeta{ + // Name: name, + // Namespace: namespace, + // Generation: 1, + // Labels: map[string]string{spread.ReconcileRefreshLabel: "true"}, + // }, + // Status: pmtesting.TestStatus{ + // Some: "string", + // ObservedGeneration: 1, + // }, + // }, + // } + // + // fakeClient := pmtesting.CreateFakeClient(t, instance) + // + // lm, _ := createLifecycleManager([]subroutine.Subroutine{ + // pmtesting.ChangeStatusSubroutine{ + // Client: fakeClient, + // }, + // }, fakeClient) + // lm.WithSpreadingReconciles() + // + // // Act + // _, err := lm.Reconcile(ctx, request, instance) + // + // assert.NoError(t, err) + // assert.Equal(t, int64(1), instance.Status.ObservedGeneration) + // + // serverObject := &pmtesting.ImplementingSpreadReconciles{} + // err = fakeClient.Get(ctx, client.ObjectKey{Name: name, Namespace: namespace}, serverObject) + // assert.NoError(t, err) + // assert.Equal(t, serverObject.Status.Some, "other string") + // _, ok := serverObject.Labels[spread.ReconcileRefreshLabel] + // assert.False(t, ok) + //}) + // + //t.Run("Should handle a client error", func(t *testing.T) { + // // Arrange + // _, log := createLifecycleManager([]subroutine.Subroutine{}, nil) + // testErr := fmt.Errorf("test error") + // + // // Act + // result, err := lifecycle.HandleClientError("test", log.Logger, testErr, true, sentry.Tags{}) + // + // // Assert + // assert.Error(t, err) + // assert.Equal(t, testErr, err) + // assert.Equal(t, controllerruntime.Result{}, result) + //}) + // + //t.Run("Lifecycle with manage conditions reconciles w/o subroutines", func(t *testing.T) { + // // Arrange + // instance := &pmtesting.ImplementConditions{ + // TestApiObject: pmtesting.TestApiObject{ + // ObjectMeta: metav1.ObjectMeta{ + // Name: name, + // Namespace: namespace, + // Generation: 1, + // }, + // Status: pmtesting.TestStatus{}, + // }, + // } + // + // fakeClient := pmtesting.CreateFakeClient(t, instance) + // + // mgr, _ := createLifecycleManager([]subroutine.Subroutine{}, fakeClient) + // mgr.WithConditionManagement() + // + // // Act + // _, err := mgr.Reconcile(ctx, request, instance) + // + // assert.NoError(t, err) + // assert.Len(t, instance.Status.Conditions, 1) + // assert.Equal(t, instance.Status.Conditions[0].Type, conditions.ConditionReady) + // assert.Equal(t, instance.Status.Conditions[0].Status, metav1.ConditionTrue) + // assert.Equal(t, instance.Status.Conditions[0].Message, "The resource is ready") + //}) + // + //t.Run("Lifecycle with manage conditions reconciles with subroutine", func(t *testing.T) { + // // Arrange + // instance := &pmtesting.ImplementConditions{ + // TestApiObject: pmtesting.TestApiObject{ + // ObjectMeta: metav1.ObjectMeta{ + // Name: name, + // Namespace: namespace, + // Generation: 1, + // }, + // Status: pmtesting.TestStatus{}, + // }, + // } + // + // fakeClient := pmtesting.CreateFakeClient(t, instance) + // + // mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.ChangeStatusSubroutine{ + // Client: fakeClient, + // }}, fakeClient) + // mgr.WithConditionManagement() + // + // // Act + // _, err := mgr.Reconcile(ctx, request, instance) + // + // assert.NoError(t, err) + // require.Len(t, instance.Status.Conditions, 2) + // assert.Equal(t, conditions.ConditionReady, instance.Status.Conditions[0].Type) + // assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[0].Status) + // assert.Equal(t, "The resource is ready", instance.Status.Conditions[0].Message) + // assert.Equal(t, "changeStatus_Ready", instance.Status.Conditions[1].Type) + // assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[1].Status) + // assert.Equal(t, "The subroutine is complete", instance.Status.Conditions[1].Message) + //}) + // + //t.Run("Lifecycle with manage conditions reconciles with subroutine that adds a condition", func(t *testing.T) { + // // Arrange + // instance := &pmtesting.ImplementConditions{ + // TestApiObject: pmtesting.TestApiObject{ + // ObjectMeta: metav1.ObjectMeta{ + // Name: name, + // Namespace: namespace, + // Generation: 1, + // }, + // Status: pmtesting.TestStatus{}, + // }, + // } + // + // fakeClient := pmtesting.CreateFakeClient(t, instance) + // + // mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.AddConditionSubroutine{Ready: metav1.ConditionTrue}}, fakeClient) + // mgr.WithConditionManagement() + // + // // Act + // _, err := mgr.Reconcile(ctx, request, instance) + // + // assert.NoError(t, err) + // require.Len(t, instance.Status.Conditions, 3) + // assert.Equal(t, conditions.ConditionReady, instance.Status.Conditions[0].Type) + // assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[0].Status) + // assert.Equal(t, "The resource is ready", instance.Status.Conditions[0].Message) + // assert.Equal(t, "addCondition_Ready", instance.Status.Conditions[1].Type) + // assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[1].Status) + // assert.Equal(t, "The subroutine is complete", instance.Status.Conditions[1].Message) + // assert.Equal(t, "test", instance.Status.Conditions[2].Type) + // assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[2].Status) + // assert.Equal(t, "test", instance.Status.Conditions[2].Message) + // + //}) + // + //t.Run("Lifecycle with manage conditions reconciles with subroutine that adds a condition", func(t *testing.T) { + // // Arrange + // instance := &pmtesting.ImplementConditions{ + // TestApiObject: pmtesting.TestApiObject{ + // ObjectMeta: metav1.ObjectMeta{ + // Name: name, + // Namespace: namespace, + // Generation: 1, + // }, + // Status: pmtesting.TestStatus{}, + // }, + // } + // + // fakeClient := pmtesting.CreateFakeClient(t, instance) + // + // mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.AddConditionSubroutine{Ready: metav1.ConditionTrue}}, fakeClient) + // mgr.WithConditionManagement() + // + // // Act + // _, err := mgr.Reconcile(ctx, request, instance) + // + // assert.NoError(t, err) + // require.Len(t, instance.Status.Conditions, 3) + // assert.Equal(t, conditions.ConditionReady, instance.Status.Conditions[0].Type) + // assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[0].Status) + // assert.Equal(t, "The resource is ready", instance.Status.Conditions[0].Message) + // assert.Equal(t, "addCondition_Ready", instance.Status.Conditions[1].Type) + // assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[1].Status) + // assert.Equal(t, "The subroutine is complete", instance.Status.Conditions[1].Message) + // assert.Equal(t, "test", instance.Status.Conditions[2].Type) + // assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[2].Status) + // assert.Equal(t, "test", instance.Status.Conditions[2].Message) + // + //}) + // + //t.Run("Lifecycle with manage conditions reconciles with subroutine that adds a condition with preexisting conditions (update)", func(t *testing.T) { + // // Arrange + // instance := &pmtesting.ImplementConditions{ + // TestApiObject: pmtesting.TestApiObject{ + // ObjectMeta: metav1.ObjectMeta{ + // Name: name, + // Namespace: namespace, + // Generation: 1, + // }, + // Status: pmtesting.TestStatus{ + // Conditions: []metav1.Condition{ + // { + // Type: "test", + // Status: metav1.ConditionFalse, + // Reason: "test", + // Message: "test", + // }, + // }, + // }, + // }, + // } + // + // fakeClient := pmtesting.CreateFakeClient(t, instance) + // + // mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.AddConditionSubroutine{Ready: metav1.ConditionTrue}}, fakeClient) + // mgr.WithConditionManagement() + // + // // Act + // _, err := mgr.Reconcile(ctx, request, instance) + // + // assert.NoError(t, err) + // require.Len(t, instance.Status.Conditions, 3) + // assert.Equal(t, "test", instance.Status.Conditions[0].Type) + // assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[0].Status) + // assert.Equal(t, "test", instance.Status.Conditions[0].Message) + // assert.Equal(t, conditions.ConditionReady, instance.Status.Conditions[1].Type) + // assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[1].Status) + // assert.Equal(t, "The resource is ready", instance.Status.Conditions[1].Message) + // assert.Equal(t, "addCondition_Ready", instance.Status.Conditions[2].Type) + // assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[2].Status) + // assert.Equal(t, "The subroutine is complete", instance.Status.Conditions[2].Message) + // + //}) + // + //t.Run("Lifecycle with manage conditions reconciles with subroutine that adds a condition with preexisting conditions", func(t *testing.T) { + // // Arrange + // instance := &pmtesting.ImplementConditions{ + // TestApiObject: pmtesting.TestApiObject{ + // ObjectMeta: metav1.ObjectMeta{ + // Name: name, + // Namespace: namespace, + // Generation: 1, + // }, + // Status: pmtesting.TestStatus{ + // Conditions: []metav1.Condition{ + // { + // Type: conditions.ConditionReady, + // Status: metav1.ConditionTrue, + // Message: "The resource is ready!!", + // Reason: conditions.ConditionReady, + // }, + // }, + // }, + // }, + // } + // + // fakeClient := pmtesting.CreateFakeClient(t, instance) + // + // mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.AddConditionSubroutine{Ready: metav1.ConditionTrue}}, fakeClient) + // mgr.WithConditionManagement() + // + // // Act + // _, err := mgr.Reconcile(ctx, request, instance) + // + // assert.NoError(t, err) + // require.Len(t, instance.Status.Conditions, 3) + // assert.Equal(t, conditions.ConditionReady, instance.Status.Conditions[0].Type) + // assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[0].Status) + // assert.Equal(t, "The resource is ready", instance.Status.Conditions[0].Message) + // assert.Equal(t, "addCondition_Ready", instance.Status.Conditions[1].Type) + // assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[1].Status) + // assert.Equal(t, "The subroutine is complete", instance.Status.Conditions[1].Message) + // assert.Equal(t, "test", instance.Status.Conditions[2].Type) + // assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[2].Status) + // assert.Equal(t, "test", instance.Status.Conditions[2].Message) + // + //}) + // + //t.Run("Lifecycle w/o manage conditions reconciles with subroutine that adds a condition", func(t *testing.T) { + // // Arrange + // instance := &pmtesting.ImplementConditions{ + // TestApiObject: pmtesting.TestApiObject{ + // ObjectMeta: metav1.ObjectMeta{ + // Name: name, + // Namespace: namespace, + // Generation: 1, + // }, + // Status: pmtesting.TestStatus{}, + // }, + // } + // + // fakeClient := pmtesting.CreateFakeClient(t, instance) + // + // mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.AddConditionSubroutine{Ready: metav1.ConditionTrue}}, fakeClient) + // + // // Act + // _, err := mgr.Reconcile(ctx, request, instance) + // + // assert.NoError(t, err) + // require.Len(t, instance.Status.Conditions, 1) + // assert.Equal(t, "test", instance.Status.Conditions[0].Type) + // assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[0].Status) + // assert.Equal(t, "test", instance.Status.Conditions[0].Message) + // + //}) + // + //t.Run("Lifecycle with manage conditions reconciles with subroutine failing Status update", func(t *testing.T) { + // // Arrange + // instance := &pmtesting.ImplementConditions{ + // TestApiObject: pmtesting.TestApiObject{ + // ObjectMeta: metav1.ObjectMeta{ + // Name: name, + // Namespace: namespace, + // Generation: 1, + // }, + // Status: pmtesting.TestStatus{}, + // }, + // } + // + // fakeClient := pmtesting.CreateFakeClient(t, instance) + // + // mgr, _ := createLifecycleManager([]subroutine.Subroutine{ + // pmtesting.ChangeStatusSubroutine{ + // Client: fakeClient, + // }}, fakeClient) + // mgr.WithConditionManagement() + // + // // Act + // _, err := mgr.Reconcile(ctx, request, instance) + // + // assert.NoError(t, err) + // assert.Len(t, instance.Status.Conditions, 2) + // assert.Equal(t, conditions.ConditionReady, instance.Status.Conditions[0].Type) + // assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[0].Status) + // assert.Equal(t, "The resource is ready", instance.Status.Conditions[0].Message) + // assert.Equal(t, "changeStatus_Ready", instance.Status.Conditions[1].Type) + // assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[1].Status) + // assert.Equal(t, "The subroutine is complete", instance.Status.Conditions[1].Message) + //}) + // + //t.Run("Lifecycle with manage conditions finalizes with multiple subroutines partially succeeding", func(t *testing.T) { + // // Arrange + // instance := &pmtesting.ImplementConditions{ + // TestApiObject: pmtesting.TestApiObject{ + // ObjectMeta: metav1.ObjectMeta{ + // Name: name, + // Namespace: namespace, + // Generation: 1, + // DeletionTimestamp: &metav1.Time{Time: time.Now()}, + // Finalizers: []string{pmtesting.FailureScenarioSubroutineFinalizer, pmtesting.ChangeStatusSubroutineFinalizer}, + // }, + // Status: pmtesting.TestStatus{}, + // }, + // } + // + // fakeClient := pmtesting.CreateFakeClient(t, instance) + // + // mgr, _ := createLifecycleManager([]subroutine.Subroutine{ + // pmtesting.FailureScenarioSubroutine{}, + // pmtesting.ChangeStatusSubroutine{Client: fakeClient}}, fakeClient) + // mgr.WithConditionManagement() + // + // // Act + // _, err := mgr.Reconcile(ctx, request, instance) + // + // assert.Error(t, err) + // require.Len(t, instance.Status.Conditions, 3) + // assert.Equal(t, "changeStatus_Finalize", instance.Status.Conditions[0].Type, "") + // assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[0].Status) + // assert.Equal(t, "The subroutine finalization is complete", instance.Status.Conditions[0].Message) + // assert.Equal(t, "FailureScenarioSubroutine_Finalize", instance.Status.Conditions[1].Type) + // assert.Equal(t, metav1.ConditionFalse, instance.Status.Conditions[1].Status) + // assert.Equal(t, "The subroutine finalization has an error: FailureScenarioSubroutine", instance.Status.Conditions[1].Message) + // assert.Equal(t, conditions.ConditionReady, instance.Status.Conditions[2].Type) + // assert.Equal(t, metav1.ConditionFalse, instance.Status.Conditions[2].Status) + // assert.Equal(t, "The resource is not ready", instance.Status.Conditions[2].Message) + //}) + // + //t.Run("Lifecycle with manage conditions reconciles with ReqeueAfter subroutine", func(t *testing.T) { + // // Arrange + // instance := &pmtesting.ImplementConditions{ + // TestApiObject: pmtesting.TestApiObject{ + // ObjectMeta: metav1.ObjectMeta{ + // Name: name, + // Namespace: namespace, + // Generation: 1, + // }, + // Status: pmtesting.TestStatus{}, + // }, + // } + // + // fakeClient := pmtesting.CreateFakeClient(t, instance) + // + // mgr, _ := createLifecycleManager([]subroutine.Subroutine{ + // pmtesting.FailureScenarioSubroutine{RequeAfter: true}}, fakeClient) + // mgr.WithConditionManagement() + // + // // Act + // _, err := mgr.Reconcile(ctx, request, instance) + // + // assert.NoError(t, err) + // assert.Len(t, instance.Status.Conditions, 2) + // assert.Equal(t, conditions.ConditionReady, instance.Status.Conditions[0].Type) + // assert.Equal(t, metav1.ConditionFalse, instance.Status.Conditions[0].Status) + // assert.Equal(t, "The resource is not ready", instance.Status.Conditions[0].Message) + // assert.Equal(t, "FailureScenarioSubroutine_Ready", instance.Status.Conditions[1].Type) + // assert.Equal(t, metav1.ConditionUnknown, instance.Status.Conditions[1].Status) + // assert.Equal(t, "The subroutine is processing", instance.Status.Conditions[1].Message) + //}) + // + //t.Run("Lifecycle with manage conditions reconciles with Error subroutine (no-retry)", func(t *testing.T) { + // // Arrange + // instance := &pmtesting.ImplementConditions{ + // TestApiObject: pmtesting.TestApiObject{ + // ObjectMeta: metav1.ObjectMeta{ + // Name: name, + // Namespace: namespace, + // Generation: 1, + // }, + // Status: pmtesting.TestStatus{}, + // }, + // } + // + // fakeClient := pmtesting.CreateFakeClient(t, instance) + // + // mgr, _ := createLifecycleManager([]subroutine.Subroutine{ + // pmtesting.FailureScenarioSubroutine{RequeAfter: false}}, fakeClient) + // mgr.WithConditionManagement() + // + // // Act + // _, err := mgr.Reconcile(ctx, request, instance) + // + // assert.NoError(t, err) + // assert.Len(t, instance.Status.Conditions, 2) + // assert.Equal(t, conditions.ConditionReady, instance.Status.Conditions[0].Type) + // assert.Equal(t, metav1.ConditionFalse, instance.Status.Conditions[0].Status) + // assert.Equal(t, "The resource is not ready", instance.Status.Conditions[0].Message) + // assert.Equal(t, "FailureScenarioSubroutine_Ready", instance.Status.Conditions[1].Type) + // assert.Equal(t, metav1.ConditionFalse, instance.Status.Conditions[1].Status) + // assert.Equal(t, "The subroutine has an error: FailureScenarioSubroutine", instance.Status.Conditions[1].Message) + //}) + // + //t.Run("Lifecycle with manage conditions reconciles with Error subroutine (retry)", func(t *testing.T) { + // // Arrange + // instance := &pmtesting.ImplementConditions{ + // TestApiObject: pmtesting.TestApiObject{ + // ObjectMeta: metav1.ObjectMeta{ + // Name: name, + // Namespace: namespace, + // Generation: 1, + // }, + // Status: pmtesting.TestStatus{}, + // }, + // } + // + // fakeClient := pmtesting.CreateFakeClient(t, instance) + // + // mgr, _ := createLifecycleManager([]subroutine.Subroutine{ + // pmtesting.FailureScenarioSubroutine{Retry: true, RequeAfter: false}}, fakeClient) + // mgr.WithConditionManagement() + // + // // Act + // _, err := mgr.Reconcile(ctx, request, instance) + // + // assert.Error(t, err) + // assert.Len(t, instance.Status.Conditions, 2) + // assert.Equal(t, conditions.ConditionReady, instance.Status.Conditions[0].Type) + // assert.Equal(t, metav1.ConditionFalse, instance.Status.Conditions[0].Status) + // assert.Equal(t, "The resource is not ready", instance.Status.Conditions[0].Message) + // assert.Equal(t, "FailureScenarioSubroutine_Ready", instance.Status.Conditions[1].Type) + // assert.Equal(t, metav1.ConditionFalse, instance.Status.Conditions[1].Status) + // assert.Equal(t, "The subroutine has an error: FailureScenarioSubroutine", instance.Status.Conditions[1].Message) + //}) + // + //t.Run("Lifecycle with manage conditions not implementing the interface", func(t *testing.T) { + // // Arrange + // instance := &pmtesting.NotImplementingSpreadReconciles{ + // ObjectMeta: metav1.ObjectMeta{ + // Name: name, + // Namespace: namespace, + // Generation: 1, + // }, + // Status: pmtesting.TestStatus{ + // Some: "string", + // ObservedGeneration: 0, + // }, + // } + // + // fakeClient := pmtesting.CreateFakeClient(t, instance) + // + // mgr, _ := createLifecycleManager([]subroutine.Subroutine{ + // pmtesting.ChangeStatusSubroutine{ + // Client: fakeClient, + // }, + // }, fakeClient) + // mgr.WithConditionManagement() + // + // // Act + // // So the validation is already happening in SetupWithManager. So we can panic in the reconcile. + // assert.Panics(t, func() { + // _, _ = mgr.Reconcile(ctx, request, instance) + // }) + //}) + // + //t.Run("Lifecycle with manage conditions failing finalize", func(t *testing.T) { + // // Arrange + // instance := &pmtesting.ImplementConditions{ + // TestApiObject: pmtesting.TestApiObject{ + // ObjectMeta: metav1.ObjectMeta{ + // Name: name, + // Namespace: namespace, + // Generation: 1, + // Finalizers: []string{pmtesting.FailureScenarioSubroutineFinalizer}, + // DeletionTimestamp: &metav1.Time{Time: time.Now()}, + // }, + // Status: pmtesting.TestStatus{ + // Some: "string", + // ObservedGeneration: 0, + // }, + // }, + // } + // + // fakeClient := pmtesting.CreateFakeClient(t, instance) + // + // mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.FailureScenarioSubroutine{}}, fakeClient) + // mgr.WithConditionManagement() + // + // // Act + // _, err := mgr.Reconcile(ctx, request, instance) + // + // assert.Error(t, err) + // assert.Equal(t, "FailureScenarioSubroutine", err.Error()) + //}) + // + //t.Run("Lifecycle with spread reconciles and manage conditions and processing fails (retry)", func(t *testing.T) { + // // Arrange + // instance := &pmtesting.ImplementConditionsAndSpreadReconciles{ + // TestApiObject: pmtesting.TestApiObject{ + // ObjectMeta: metav1.ObjectMeta{ + // Name: name, + // Namespace: namespace, + // Generation: 1, + // }, + // Status: pmtesting.TestStatus{ + // Some: "string", + // ObservedGeneration: 0, + // }, + // }, + // } + // + // fakeClient := pmtesting.CreateFakeClient(t, instance) + // + // mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.FailureScenarioSubroutine{Retry: true, RequeAfter: false}}, fakeClient) + // mgr.WithSpreadingReconciles() + // mgr.WithConditionManagement() + // + // // Act + // _, err := mgr.Reconcile(ctx, request, instance) + // + // assert.Error(t, err) + // assert.Len(t, instance.Status.Conditions, 2) + // assert.Equal(t, conditions.ConditionReady, instance.Status.Conditions[0].Type) + // assert.Equal(t, string(v1.ConditionFalse), string(instance.Status.Conditions[0].Status)) + // assert.Equal(t, "FailureScenarioSubroutine_Ready", instance.Status.Conditions[1].Type) + // assert.Equal(t, string(v1.ConditionFalse), string(instance.Status.Conditions[1].Status)) + // assert.Equal(t, int64(0), instance.Status.ObservedGeneration) + //}) + // + //t.Run("Lifecycle with spread reconciles and manage conditions and processing fails (no-retry)", func(t *testing.T) { + // // Arrange + // instance := &pmtesting.ImplementConditionsAndSpreadReconciles{ + // TestApiObject: pmtesting.TestApiObject{ + // ObjectMeta: metav1.ObjectMeta{ + // Name: name, + // Namespace: namespace, + // Generation: 1, + // }, + // Status: pmtesting.TestStatus{ + // Some: "string", + // ObservedGeneration: 0, + // }, + // }, + // } + // + // fakeClient := pmtesting.CreateFakeClient(t, instance) + // + // mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.FailureScenarioSubroutine{RequeAfter: false}}, fakeClient) + // mgr.WithSpreadingReconciles() + // mgr.WithConditionManagement() + // + // // Act + // _, err := mgr.Reconcile(ctx, request, instance) + // + // assert.NoError(t, err) + // assert.Len(t, instance.Status.Conditions, 2) + // assert.Equal(t, conditions.ConditionReady, instance.Status.Conditions[0].Type) + // assert.Equal(t, string(v1.ConditionFalse), string(instance.Status.Conditions[0].Status)) + // assert.Equal(t, "FailureScenarioSubroutine_Ready", instance.Status.Conditions[1].Type) + // assert.Equal(t, string(v1.ConditionFalse), string(instance.Status.Conditions[1].Status)) + // assert.Equal(t, int64(1), instance.Status.ObservedGeneration) + //}) + // + //t.Run("Test Lifecycle setupWithManager /w conditions and expecting no error", func(t *testing.T) { + // // Arrange + // instance := &pmtesting.ImplementConditions{} + // fakeClient := pmtesting.CreateFakeClient(t, instance) + // + // m, err := manager.New(&rest.Config{}, manager.Options{Scheme: fakeClient.Scheme()}) + // assert.NoError(t, err) + // + // lm, log := createLifecycleManager([]subroutine.Subroutine{}, fakeClient) + // lm = lm.WithConditionManagement() + // tr := &testReconciler{lifecycleManager: lm} + // + // // Act + // err = lm.SetupWithManager(m, 0, "testReconciler1", instance, "test", tr, log.Logger) + // + // // Assert + // assert.NoError(t, err) + //}) + // + //t.Run("Test Lifecycle setupWithManager /w conditions and expecting error", func(t *testing.T) { + // // Arrange + // instance := &pmtesting.NotImplementingSpreadReconciles{} + // fakeClient := pmtesting.CreateFakeClient(t, instance) + // + // m, err := manager.New(&rest.Config{}, manager.Options{Scheme: fakeClient.Scheme()}) + // assert.NoError(t, err) + // + // lm, log := createLifecycleManager([]subroutine.Subroutine{}, fakeClient) + // lm = lm.WithConditionManagement() + // tr := &testReconciler{lifecycleManager: lm} + // + // // Act + // err = lm.SetupWithManager(m, 0, "testReconciler2", instance, "test", tr, log.Logger) + // + // // Assert + // assert.Error(t, err) + //}) + // + //t.Run("Test Lifecycle setupWithManager /w spread and expecting no error", func(t *testing.T) { + // // Arrange + // instance := &pmtesting.ImplementingSpreadReconciles{} + // fakeClient := pmtesting.CreateFakeClient(t, instance) + // + // m, err := manager.New(&rest.Config{}, manager.Options{Scheme: fakeClient.Scheme()}) + // assert.NoError(t, err) + // + // lm, log := createLifecycleManager([]subroutine.Subroutine{}, fakeClient) + // lm = lm.WithSpreadingReconciles() + // tr := &testReconciler{lifecycleManager: lm} + // + // // Act + // err = lm.SetupWithManager(m, 0, "testReconciler3", instance, "test", tr, log.Logger) + // + // // Assert + // assert.NoError(t, err) + //}) + // + //t.Run("Test Lifecycle setupWithManager /w spread and expecting a error", func(t *testing.T) { + // // Arrange + // instance := &pmtesting.NotImplementingSpreadReconciles{} + // fakeClient := pmtesting.CreateFakeClient(t, instance) + // + // m, err := manager.New(&rest.Config{}, manager.Options{Scheme: fakeClient.Scheme()}) + // assert.NoError(t, err) + // + // lm, log := createLifecycleManager([]subroutine.Subroutine{}, fakeClient) + // lm = lm.WithSpreadingReconciles() + // tr := &testReconciler{lifecycleManager: lm} + // + // // Act + // err = lm.SetupWithManager(m, 0, "testReconciler", instance, "test", tr, log.Logger) + // + // // Assert + // assert.Error(t, err) + //}) + // + //errorMessage := "oh nose" + //t.Run("handleOperatorError", func(t *testing.T) { + // t.Run("Should handle an operator error with retry and sentry", func(t *testing.T) { + // // Arrange + // instance := &pmtesting.ImplementConditions{} + // fakeClient := pmtesting.CreateFakeClient(t, instance) + // + // _, log := createLifecycleManager([]subroutine.Subroutine{}, fakeClient) + // ctx = sentry.ContextWithSentryTags(ctx, map[string]string{}) + // + // // Act + // result, err := lifecycle.HandleOperatorError(ctx, operrors.NewOperatorError(goerrors.New(errorMessage), true, true), "handle op error", true, log.Logger) + // + // // Assert + // assert.Error(t, err) + // assert.NotNil(t, result) + // assert.Equal(t, errorMessage, err.Error()) + // + // errorMessages, err := log.GetErrorMessages() + // assert.NoError(t, err) + // assert.Equal(t, errorMessage, *errorMessages[0].Error) + // }) + // + // t.Run("Should handle an operator error without retry", func(t *testing.T) { + // // Arrange + // instance := &pmtesting.ImplementConditions{} + // fakeClient := pmtesting.CreateFakeClient(t, instance) + // + // _, log := createLifecycleManager([]subroutine.Subroutine{}, fakeClient) + // + // // Act + // result, err := lifecycle.HandleOperatorError(ctx, operrors.NewOperatorError(goerrors.New(errorMessage), false, false), "handle op error", true, log.Logger) + // + // // Assert + // assert.Nil(t, err) + // assert.NotNil(t, result) + // + // errorMessages, err := log.GetErrorMessages() + // assert.NoError(t, err) + // assert.Equal(t, errorMessage, *errorMessages[0].Error) + // }) + //}) + // + //t.Run("Prepare Context", func(t *testing.T) { + // t.Run("Sets a context that can be used in the subroutine", func(t *testing.T) { + // // Arrange + // ctx := context.Background() + // + // fakeClient := pmtesting.CreateFakeClient(t, testApiObject) + // + // lm, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.ContextValueSubroutine{}}, fakeClient) + // lm = lm.WithPrepareContextFunc(func(ctx context.Context, instance runtimeobject.RuntimeObject) (context.Context, operrors.OperatorError) { + // return context.WithValue(ctx, pmtesting.ContextValueKey, "valueFromContext"), nil + // }) + // tr := &testReconciler{lifecycleManager: lm} + // result, err := tr.Reconcile(ctx, controllerruntime.Request{NamespacedName: types.NamespacedName{Name: name, Namespace: namespace}}) + // + // // Then + // assert.NotNil(t, ctx) + // assert.NotNil(t, result) + // assert.NoError(t, err) + // + // err = fakeClient.Get(ctx, client.ObjectKey{Name: name, Namespace: namespace}, testApiObject) + // assert.NoError(t, err) + // assert.Equal(t, "valueFromContext", testApiObject.Status.Some) + // }) + // + // t.Run("Handles the errors correctly", func(t *testing.T) { + // // Arrange + // ctx := context.Background() + // + // fakeClient := pmtesting.CreateFakeClient(t, testApiObject) + // + // lm, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.ContextValueSubroutine{}}, fakeClient) + // lm = lm.WithPrepareContextFunc(func(ctx context.Context, instance runtimeobject.RuntimeObject) (context.Context, operrors.OperatorError) { + // return nil, operrors.NewOperatorError(goerrors.New(errorMessage), true, false) + // }) + // tr := &testReconciler{lifecycleManager: lm} + // result, err := tr.Reconcile(ctx, controllerruntime.Request{NamespacedName: types.NamespacedName{Name: name, Namespace: namespace}}) + // + // // Then + // assert.NotNil(t, ctx) + // assert.NotNil(t, result) + // assert.Error(t, err) + // }) + //}) } func TestUpdateStatus(t *testing.T) { diff --git a/controller/lifecycle/multicluster/lifecycle.go b/controller/lifecycle/multicluster/lifecycle.go index bbf6638..76ff2aa 100644 --- a/controller/lifecycle/multicluster/lifecycle.go +++ b/controller/lifecycle/multicluster/lifecycle.go @@ -127,12 +127,12 @@ func (l *LifecycleManager) WithReadOnly() *LifecycleManager { } // WithSpreadingReconciles sets the LifecycleManager to spread out the reconciles -func (l *LifecycleManager) WithSpreadingReconciles() *LifecycleManager { +func (l *LifecycleManager) WithSpreadingReconciles() api.Lifecycle { l.spreader = spread.NewSpreader() return l } -func (l *LifecycleManager) WithConditionManagement() *LifecycleManager { +func (l *LifecycleManager) WithConditionManagement() api.Lifecycle { l.conditionsManager = conditions.NewConditionManager() return l } diff --git a/controller/lifecycle/multicluster/lifecycle_test.go b/controller/lifecycle/multicluster/lifecycle_test.go index e713455..3e0307e 100644 --- a/controller/lifecycle/multicluster/lifecycle_test.go +++ b/controller/lifecycle/multicluster/lifecycle_test.go @@ -3,1288 +3,128 @@ package multicluster import ( "context" goerrors "errors" - "fmt" "testing" - "time" + + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + clientgoscheme "k8s.io/client-go/kubernetes/scheme" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" + "k8s.io/client-go/rest" controllerruntime "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + mcmanager "sigs.k8s.io/multicluster-runtime/pkg/manager" mcreconcile "sigs.k8s.io/multicluster-runtime/pkg/reconcile" - "github.com/platform-mesh/golang-commons/controller/lifecycle" - "github.com/platform-mesh/golang-commons/controller/lifecycle/conditions" "github.com/platform-mesh/golang-commons/controller/lifecycle/runtimeobject" - "github.com/platform-mesh/golang-commons/controller/lifecycle/spread" "github.com/platform-mesh/golang-commons/controller/lifecycle/subroutine" pmtesting "github.com/platform-mesh/golang-commons/controller/testSupport" operrors "github.com/platform-mesh/golang-commons/errors" "github.com/platform-mesh/golang-commons/logger/testlogger" - "github.com/platform-mesh/golang-commons/sentry" ) func TestLifecycle(t *testing.T) { namespace := "bar" name := "foo" - request := mcreconcile.Request{ - Request: controllerruntime.Request{ - NamespacedName: types.NamespacedName{ - Namespace: namespace, - Name: name, - }, - }, - } testApiObject := &pmtesting.TestApiObject{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, }, } - ctx := context.Background() - - t.Run("Lifecycle with a not found object", func(t *testing.T) { - // Arrange - fakeClient := pmtesting.CreateFakeClient(t, &pmtesting.TestApiObject{}) - - mgr, log := createLifecycleManager([]subroutine.Subroutine{}, fakeClient) - - // Act - result, err := mgr.Reconcile(ctx, request, &pmtesting.TestApiObject{}) - - // Assert - assert.NoError(t, err) - assert.NotNil(t, result) - logMessages, err := log.GetLogMessages() - assert.NoError(t, err) - assert.Equal(t, len(logMessages), 2) - assert.Equal(t, logMessages[0].Message, "start reconcile") - assert.Contains(t, logMessages[1].Message, "instance not found") - }) - t.Run("Lifecycle with a finalizer - add finalizer", func(t *testing.T) { - // Arrange - instance := &pmtesting.TestApiObject{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - }, - } - - fakeClient := pmtesting.CreateFakeClient(t, instance) - - mgr, _ := createLifecycleManager([]subroutine.Subroutine{ - pmtesting.FinalizerSubroutine{ - Client: fakeClient, - }, - }, fakeClient) - - // Act - _, err := mgr.Reconcile(ctx, request, instance) - - assert.NoError(t, err) - assert.Equal(t, 1, len(instance.Finalizers)) - }) - t.Run("Lifecycle with a finalizer - finalization", func(t *testing.T) { - // Arrange - now := &metav1.Time{Time: time.Now()} - finalizers := []string{pmtesting.SubroutineFinalizer} - instance := &pmtesting.TestApiObject{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - DeletionTimestamp: now, - Finalizers: finalizers, - }, - } - - fakeClient := pmtesting.CreateFakeClient(t, instance) - - mgr, _ := createLifecycleManager([]subroutine.Subroutine{ - pmtesting.FinalizerSubroutine{ - Client: fakeClient, - }, - }, fakeClient) - - // Act - _, err := mgr.Reconcile(ctx, request, instance) - - assert.NoError(t, err) - assert.Equal(t, 0, len(instance.Finalizers)) - }) - t.Run("Lifecycle with a finalizer - finalization(requeue)", func(t *testing.T) { - // Arrange - now := &metav1.Time{Time: time.Now()} - finalizers := []string{pmtesting.SubroutineFinalizer} - instance := &pmtesting.TestApiObject{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - DeletionTimestamp: now, - Finalizers: finalizers, - }, - } - fakeClient := pmtesting.CreateFakeClient(t, instance) - - mgr, _ := createLifecycleManager([]subroutine.Subroutine{ - pmtesting.FinalizerSubroutine{ - Client: fakeClient, - RequeueAfter: 1 * time.Second, - }, - }, fakeClient) - - // Act - res, err := mgr.Reconcile(ctx, request, instance) - - assert.NoError(t, err) - assert.Equal(t, 1, len(instance.Finalizers)) - assert.Equal(t, time.Duration(1*time.Second), res.RequeueAfter) - }) - t.Run("Lifecycle with a finalizer - finalization(requeueAfter)", func(t *testing.T) { + t.Run("Should setup with manager ok", func(t *testing.T) { // Arrange - now := &metav1.Time{Time: time.Now()} - finalizers := []string{pmtesting.SubroutineFinalizer} - instance := &pmtesting.TestApiObject{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - DeletionTimestamp: now, - Finalizers: finalizers, - }, - } - - fakeClient := pmtesting.CreateFakeClient(t, instance) - - mgr, _ := createLifecycleManager([]subroutine.Subroutine{ - pmtesting.FinalizerSubroutine{ - Client: fakeClient, - RequeueAfter: 2 * time.Second, - }, - }, fakeClient) - - // Act - res, err := mgr.Reconcile(ctx, request, instance) - - assert.NoError(t, err) - assert.Equal(t, 1, len(instance.Finalizers)) - assert.Equal(t, 2*time.Second, res.RequeueAfter) - }) - t.Run("Lifecycle with a finalizer - skip finalization if the finalizer is not in there", func(t *testing.T) { - // Arrange - now := &metav1.Time{Time: time.Now()} - finalizers := []string{"other-finalizer"} - instance := &pmtesting.TestApiObject{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - DeletionTimestamp: now, - Finalizers: finalizers, - }, - } - - fakeClient := pmtesting.CreateFakeClient(t, instance) - - mgr, _ := createLifecycleManager([]subroutine.Subroutine{ - pmtesting.FinalizerSubroutine{ - Client: fakeClient, - }, - }, fakeClient) - - // Act - _, err := mgr.Reconcile(ctx, request, instance) - - assert.NoError(t, err) - assert.Equal(t, 1, len(instance.Finalizers)) - }) - t.Run("Lifecycle with a finalizer - failing finalization subroutine", func(t *testing.T) { - // Arrange - now := &metav1.Time{Time: time.Now()} - finalizers := []string{pmtesting.SubroutineFinalizer} - instance := &pmtesting.TestApiObject{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - DeletionTimestamp: now, - Finalizers: finalizers, - }, - } - - fakeClient := pmtesting.CreateFakeClient(t, instance) - - mgr, _ := createLifecycleManager([]subroutine.Subroutine{ - pmtesting.FinalizerSubroutine{ - Client: fakeClient, - Err: fmt.Errorf("some error"), - }, - }, fakeClient) - - // Act - _, err := mgr.Reconcile(ctx, request, instance) - - assert.Error(t, err) - assert.Equal(t, 1, len(instance.Finalizers)) - }) - t.Run("Lifecycle without changing status", func(t *testing.T) { - // Arrange - instance := &pmtesting.TestApiObject{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - }, - Status: pmtesting.TestStatus{Some: "string"}, - } + instance := &v1.Namespace{} fakeClient := pmtesting.CreateFakeClient(t, instance) mgr, log := createLifecycleManager([]subroutine.Subroutine{}, fakeClient) - // Act - result, err := mgr.Reconcile(ctx, request, instance) - - // Assert - assert.NoError(t, err) - assert.NotNil(t, result) - logMessages, err := log.GetLogMessages() - assert.NoError(t, err) - assert.Equal(t, len(logMessages), 3) - assert.Equal(t, logMessages[0].Message, "start reconcile") - assert.Equal(t, logMessages[1].Message, "skipping status update, since they are equal") - assert.Equal(t, logMessages[2].Message, "end reconcile") - }) - t.Run("Lifecycle with changing status", func(t *testing.T) { - // Arrange - instance := &pmtesting.TestApiObject{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - }, - Status: pmtesting.TestStatus{Some: "string"}, + tr := &testReconciler{ + lifecycleManager: mgr, } - fakeClient := pmtesting.CreateFakeClient(t, instance) - - mgr, log := createLifecycleManager([]subroutine.Subroutine{ - pmtesting.ChangeStatusSubroutine{ - Client: fakeClient, - }, - }, fakeClient) - // Act - result, err := mgr.Reconcile(ctx, request, instance) - - // Assert - assert.NoError(t, err) - assert.NotNil(t, result) - logMessages, err := log.GetLogMessages() + cfg := &rest.Config{} + provider := pmtesting.NewFakeProvider(cfg) + scheme := runtime.NewScheme() + utilruntime.Must(clientgoscheme.AddToScheme(scheme)) + mmanager, err := mcmanager.New(cfg, provider, mcmanager.Options{Scheme: scheme}) assert.NoError(t, err) - assert.Equal(t, len(logMessages), 7) - assert.Equal(t, logMessages[0].Message, "start reconcile") - assert.Equal(t, logMessages[1].Message, "start subroutine") - assert.Equal(t, logMessages[2].Message, "processing instance") - assert.Equal(t, logMessages[3].Message, "processed instance") - assert.Equal(t, logMessages[4].Message, "end subroutine") - - serverObject := &pmtesting.TestApiObject{} - err = fakeClient.Get(ctx, client.ObjectKey{Name: name, Namespace: namespace}, serverObject) - assert.NoError(t, err) - assert.Equal(t, serverObject.Status.Some, "other string") - }) - t.Run("Lifecycle with spread reconciles", func(t *testing.T) { - // Arrange - instance := &pmtesting.ImplementingSpreadReconciles{ - TestApiObject: pmtesting.TestApiObject{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Generation: 1, - }, - Status: pmtesting.TestStatus{ - Some: "string", - ObservedGeneration: 0, - }, - }, - } - - fakeClient := pmtesting.CreateFakeClient(t, instance) - - mgr, _ := createLifecycleManager([]subroutine.Subroutine{ - pmtesting.ChangeStatusSubroutine{ - Client: fakeClient, - }, - }, fakeClient) - mgr.WithSpreadingReconciles() - - // Act - _, err := mgr.Reconcile(ctx, request, instance) - - assert.NoError(t, err) - assert.Equal(t, instance.Generation, instance.Status.ObservedGeneration) - }) - t.Run("Lifecycle with spread reconciles on deleted object", func(t *testing.T) { - // Arrange - instance := &pmtesting.ImplementingSpreadReconciles{ - TestApiObject: pmtesting.TestApiObject{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Generation: 2, - DeletionTimestamp: &metav1.Time{Time: time.Now()}, - Finalizers: []string{pmtesting.ChangeStatusSubroutineFinalizer}, - }, - Status: pmtesting.TestStatus{ - Some: "string", - ObservedGeneration: 2, - NextReconcileTime: metav1.Time{Time: time.Now().Add(2 * time.Hour)}, - }, - }, - } - - fakeClient := pmtesting.CreateFakeClient(t, instance) - - mgr, _ := createLifecycleManager([]subroutine.Subroutine{ - pmtesting.ChangeStatusSubroutine{ - Client: fakeClient, - }, - }, fakeClient) - mgr.WithSpreadingReconciles() - - // Act - _, err := mgr.Reconcile(ctx, request, instance) - assert.NoError(t, err) - assert.Len(t, instance.Finalizers, 0) - - }) - t.Run("Lifecycle with spread reconciles skips if the generation is the same", func(t *testing.T) { - // Arrange - nextReconcileTime := metav1.NewTime(time.Now().Add(1 * time.Hour)) - instance := &pmtesting.ImplementingSpreadReconciles{ - TestApiObject: pmtesting.TestApiObject{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Generation: 1, - }, - Status: pmtesting.TestStatus{ - Some: "string", - ObservedGeneration: 1, - NextReconcileTime: nextReconcileTime, - }, - }, - } - - fakeClient := pmtesting.CreateFakeClient(t, instance) - - mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.FailureScenarioSubroutine{RequeAfter: false}}, fakeClient) - mgr.WithSpreadingReconciles() - - // Act - result, err := mgr.Reconcile(ctx, request, instance) - - assert.NoError(t, err) - assert.Equal(t, int64(1), instance.Status.ObservedGeneration) - assert.GreaterOrEqual(t, 12*time.Hour, result.RequeueAfter) - }) - t.Run("Lifecycle with spread reconciles and processing fails (no-retry)", func(t *testing.T) { - // Arrange - instance := &pmtesting.ImplementingSpreadReconciles{ - TestApiObject: pmtesting.TestApiObject{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Generation: 1, - }, - Status: pmtesting.TestStatus{ - Some: "string", - ObservedGeneration: 0, - }, - }, - } - - fakeClient := pmtesting.CreateFakeClient(t, instance) - - mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.FailureScenarioSubroutine{Retry: false, RequeAfter: false}}, fakeClient) - mgr.WithSpreadingReconciles() - - // Act - _, err := mgr.Reconcile(ctx, request, instance) - - assert.NoError(t, err) - assert.Equal(t, int64(1), instance.Status.ObservedGeneration) - }) - t.Run("Lifecycle with spread reconciles and processing fails (retry)", func(t *testing.T) { - // Arrange - instance := &pmtesting.ImplementingSpreadReconciles{ - TestApiObject: pmtesting.TestApiObject{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Generation: 1, - }, - Status: pmtesting.TestStatus{ - Some: "string", - ObservedGeneration: 0, - }, - }, - } - - fakeClient := pmtesting.CreateFakeClient(t, instance) - - mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.FailureScenarioSubroutine{Retry: true, RequeAfter: false}}, fakeClient) - mgr.WithSpreadingReconciles() - - // Act - _, err := mgr.Reconcile(ctx, request, instance) - - assert.Error(t, err) - assert.Equal(t, int64(0), instance.Status.ObservedGeneration) - }) - t.Run("Lifecycle with spread reconciles and processing needs requeue", func(t *testing.T) { - // Arrange - instance := &pmtesting.ImplementingSpreadReconciles{ - TestApiObject: pmtesting.TestApiObject{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Generation: 1, - }, - Status: pmtesting.TestStatus{ - Some: "string", - ObservedGeneration: 0, - }, - }, - } - - fakeClient := pmtesting.CreateFakeClient(t, instance) - - mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.FailureScenarioSubroutine{RequeAfter: true}}, fakeClient) - mgr.WithSpreadingReconciles() - - // Act - _, err := mgr.Reconcile(ctx, request, instance) - - assert.NoError(t, err) - assert.Equal(t, int64(0), instance.Status.ObservedGeneration) - }) - t.Run("Lifecycle with spread reconciles and processing needs requeueAfter", func(t *testing.T) { - // Arrange - instance := &pmtesting.ImplementingSpreadReconciles{ - TestApiObject: pmtesting.TestApiObject{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Generation: 1, - }, - Status: pmtesting.TestStatus{ - Some: "string", - ObservedGeneration: 0, - }, - }, - } - fakeClient := pmtesting.CreateFakeClient(t, instance) - - mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.FailureScenarioSubroutine{RequeAfter: true}}, fakeClient) - mgr.WithSpreadingReconciles() - - // Act - _, err := mgr.Reconcile(ctx, request, instance) + err = mgr.SetupWithManager(mmanager, 0, "testReconciler", instance, "test", tr, log.Logger) + // Assert assert.NoError(t, err) - assert.Equal(t, int64(0), instance.Status.ObservedGeneration) }) - t.Run("Lifecycle with spread not implementing the interface", func(t *testing.T) { + t.Run("Should setup with manager not implementing interface", func(t *testing.T) { // Arrange - instance := &pmtesting.NotImplementingSpreadReconciles{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Generation: 1, - }, - Status: pmtesting.TestStatus{ - Some: "string", - ObservedGeneration: 0, - }, - } - + instance := &pmtesting.NotImplementingSpreadReconciles{} fakeClient := pmtesting.CreateFakeClient(t, instance) - mgr, _ := createLifecycleManager([]subroutine.Subroutine{ - pmtesting.ChangeStatusSubroutine{ - Client: fakeClient, - }, - }, fakeClient) + mgr, log := createLifecycleManager([]subroutine.Subroutine{}, fakeClient) mgr.WithSpreadingReconciles() - - // Act - assert.Panics(t, func() { - _, _ = mgr.Reconcile(ctx, request, instance) - }) - }) - - //t.Run("Should setup with manager", func(t *testing.T) { - // // Arrange - // instance := &pmtesting.TestApiObject{} - // fakeClient := pmtesting.CreateFakeClient(t, instance) - // log, err := logger.New(logger.DefaultConfig()) - // assert.NoError(t, err) - // provider, err := apiexport.New(&rest.Config{}, apiexport.Options{}) - // assert.NoError(t, err) - // m, err := mcmanager.New(&rest.Config{}, provider, mcmanager.Options{}) - // assert.NoError(t, err) - // - // lm, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.FailureScenarioSubroutine{RequeAfter: true}}, fakeClient) - // tr := &testReconciler{ - // lifecycleManager: lm, - // } - // - // // Act - // err = lm.SetupWithManager(m, 0, "testReconciler", instance, "test", tr, log) - // - // // Assert - // assert.NoError(t, err) - //}) - - //t.Run("Should setup with manager not implementing interface", func(t *testing.T) { - // // Arrange - // instance := &pmtesting.NotImplementingSpreadReconciles{} - // fakeClient := pmtesting.CreateFakeClient(t, instance) - // log, err := logger.New(logger.DefaultConfig()) - // assert.NoError(t, err) - // m, err := manager.New(&rest.Config{}, manager.Options{ - // Scheme: fakeClient.Scheme(), - // }) - // assert.NoError(t, err) - // - // lm, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.FailureScenarioSubroutine{RequeAfter: true}}, fakeClient) - // lm.WithSpreadingReconciles() - // tr := &testReconciler{ - // lifecycleManager: lm, - // } - // - // // Act - // err = lm.SetupWithManager(m, 0, "testReconciler", instance, "test", tr, log) - // - // // Assert - // assert.Error(t, err) - //}) - - t.Run("Lifecycle with spread reconciles and refresh label", func(t *testing.T) { - // Arrange - instance := &pmtesting.ImplementingSpreadReconciles{ - TestApiObject: pmtesting.TestApiObject{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Generation: 1, - Labels: map[string]string{spread.ReconcileRefreshLabel: "true"}, - }, - Status: pmtesting.TestStatus{ - Some: "string", - ObservedGeneration: 1, - }, - }, + tr := &testReconciler{ + lifecycleManager: mgr, } - fakeClient := pmtesting.CreateFakeClient(t, instance) - - lm, _ := createLifecycleManager([]subroutine.Subroutine{ - pmtesting.ChangeStatusSubroutine{ - Client: fakeClient, - }, - }, fakeClient) - lm.WithSpreadingReconciles() - // Act - _, err := lm.Reconcile(ctx, request, instance) - - assert.NoError(t, err) - assert.Equal(t, int64(1), instance.Status.ObservedGeneration) - - serverObject := &pmtesting.ImplementingSpreadReconciles{} - err = fakeClient.Get(ctx, client.ObjectKey{Name: name, Namespace: namespace}, serverObject) + cfg := &rest.Config{} + provider := pmtesting.NewFakeProvider(cfg) + mmanager, err := mcmanager.New(cfg, provider, mcmanager.Options{}) assert.NoError(t, err) - assert.Equal(t, serverObject.Status.Some, "other string") - _, ok := serverObject.Labels[spread.ReconcileRefreshLabel] - assert.False(t, ok) - }) - - t.Run("Should handle a client error", func(t *testing.T) { - // Arrange - _, log := createLifecycleManager([]subroutine.Subroutine{}, nil) - testErr := fmt.Errorf("test error") - - // Act - result, err := lifecycle.HandleClientError("test", log.Logger, testErr, true, sentry.Tags{}) + err = mgr.SetupWithManager(mmanager, 0, "testReconciler", instance, "test", tr, log.Logger) // Assert assert.Error(t, err) - assert.Equal(t, testErr, err) - assert.Equal(t, controllerruntime.Result{}, result) }) - - t.Run("Lifecycle with manage conditions reconciles w/o subroutines", func(t *testing.T) { + t.Run("Should setup with manager read only", func(t *testing.T) { // Arrange - instance := &pmtesting.ImplementConditions{ - TestApiObject: pmtesting.TestApiObject{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Generation: 1, - }, - Status: pmtesting.TestStatus{}, - }, - } - + instance := &pmtesting.NotImplementingSpreadReconciles{} fakeClient := pmtesting.CreateFakeClient(t, instance) - mgr, _ := createLifecycleManager([]subroutine.Subroutine{}, fakeClient) - mgr.WithConditionManagement() - - // Act - _, err := mgr.Reconcile(ctx, request, instance) - - assert.NoError(t, err) - assert.Len(t, instance.Status.Conditions, 1) - assert.Equal(t, instance.Status.Conditions[0].Type, conditions.ConditionReady) - assert.Equal(t, instance.Status.Conditions[0].Status, metav1.ConditionTrue) - assert.Equal(t, instance.Status.Conditions[0].Message, "The resource is ready") - }) - - t.Run("Lifecycle with manage conditions reconciles with subroutine", func(t *testing.T) { - // Arrange - instance := &pmtesting.ImplementConditions{ - TestApiObject: pmtesting.TestApiObject{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Generation: 1, - }, - Status: pmtesting.TestStatus{}, - }, - } - - fakeClient := pmtesting.CreateFakeClient(t, instance) - - mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.ChangeStatusSubroutine{ - Client: fakeClient, - }}, fakeClient) - mgr.WithConditionManagement() - - // Act - _, err := mgr.Reconcile(ctx, request, instance) - - assert.NoError(t, err) - require.Len(t, instance.Status.Conditions, 2) - assert.Equal(t, conditions.ConditionReady, instance.Status.Conditions[0].Type) - assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[0].Status) - assert.Equal(t, "The resource is ready", instance.Status.Conditions[0].Message) - assert.Equal(t, "changeStatus_Ready", instance.Status.Conditions[1].Type) - assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[1].Status) - assert.Equal(t, "The subroutine is complete", instance.Status.Conditions[1].Message) - }) - - t.Run("Lifecycle with manage conditions reconciles with subroutine that adds a condition", func(t *testing.T) { - // Arrange - instance := &pmtesting.ImplementConditions{ - TestApiObject: pmtesting.TestApiObject{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Generation: 1, - }, - Status: pmtesting.TestStatus{}, - }, - } - - fakeClient := pmtesting.CreateFakeClient(t, instance) - - mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.AddConditionSubroutine{Ready: metav1.ConditionTrue}}, fakeClient) - mgr.WithConditionManagement() - - // Act - _, err := mgr.Reconcile(ctx, request, instance) - - assert.NoError(t, err) - require.Len(t, instance.Status.Conditions, 3) - assert.Equal(t, conditions.ConditionReady, instance.Status.Conditions[0].Type) - assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[0].Status) - assert.Equal(t, "The resource is ready", instance.Status.Conditions[0].Message) - assert.Equal(t, "addCondition_Ready", instance.Status.Conditions[1].Type) - assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[1].Status) - assert.Equal(t, "The subroutine is complete", instance.Status.Conditions[1].Message) - assert.Equal(t, "test", instance.Status.Conditions[2].Type) - assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[2].Status) - assert.Equal(t, "test", instance.Status.Conditions[2].Message) - - }) - - t.Run("Lifecycle with manage conditions reconciles with subroutine that adds a condition", func(t *testing.T) { - // Arrange - instance := &pmtesting.ImplementConditions{ - TestApiObject: pmtesting.TestApiObject{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Generation: 1, - }, - Status: pmtesting.TestStatus{}, - }, - } - - fakeClient := pmtesting.CreateFakeClient(t, instance) - - mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.AddConditionSubroutine{Ready: metav1.ConditionTrue}}, fakeClient) - mgr.WithConditionManagement() - - // Act - _, err := mgr.Reconcile(ctx, request, instance) - - assert.NoError(t, err) - require.Len(t, instance.Status.Conditions, 3) - assert.Equal(t, conditions.ConditionReady, instance.Status.Conditions[0].Type) - assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[0].Status) - assert.Equal(t, "The resource is ready", instance.Status.Conditions[0].Message) - assert.Equal(t, "addCondition_Ready", instance.Status.Conditions[1].Type) - assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[1].Status) - assert.Equal(t, "The subroutine is complete", instance.Status.Conditions[1].Message) - assert.Equal(t, "test", instance.Status.Conditions[2].Type) - assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[2].Status) - assert.Equal(t, "test", instance.Status.Conditions[2].Message) - - }) - - t.Run("Lifecycle with manage conditions reconciles with subroutine that adds a condition with preexisting conditions (update)", func(t *testing.T) { - // Arrange - instance := &pmtesting.ImplementConditions{ - TestApiObject: pmtesting.TestApiObject{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Generation: 1, - }, - Status: pmtesting.TestStatus{ - Conditions: []metav1.Condition{ - { - Type: "test", - Status: metav1.ConditionFalse, - Reason: "test", - Message: "test", - }, - }, - }, - }, - } - - fakeClient := pmtesting.CreateFakeClient(t, instance) - - mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.AddConditionSubroutine{Ready: metav1.ConditionTrue}}, fakeClient) - mgr.WithConditionManagement() - - // Act - _, err := mgr.Reconcile(ctx, request, instance) - - assert.NoError(t, err) - require.Len(t, instance.Status.Conditions, 3) - assert.Equal(t, "test", instance.Status.Conditions[0].Type) - assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[0].Status) - assert.Equal(t, "test", instance.Status.Conditions[0].Message) - assert.Equal(t, conditions.ConditionReady, instance.Status.Conditions[1].Type) - assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[1].Status) - assert.Equal(t, "The resource is ready", instance.Status.Conditions[1].Message) - assert.Equal(t, "addCondition_Ready", instance.Status.Conditions[2].Type) - assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[2].Status) - assert.Equal(t, "The subroutine is complete", instance.Status.Conditions[2].Message) - - }) - - t.Run("Lifecycle with manage conditions reconciles with subroutine that adds a condition with preexisting conditions", func(t *testing.T) { - // Arrange - instance := &pmtesting.ImplementConditions{ - TestApiObject: pmtesting.TestApiObject{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Generation: 1, - }, - Status: pmtesting.TestStatus{ - Conditions: []metav1.Condition{ - { - Type: conditions.ConditionReady, - Status: metav1.ConditionTrue, - Message: "The resource is ready!!", - Reason: conditions.ConditionReady, - }, - }, - }, - }, - } - - fakeClient := pmtesting.CreateFakeClient(t, instance) - - mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.AddConditionSubroutine{Ready: metav1.ConditionTrue}}, fakeClient) - mgr.WithConditionManagement() - - // Act - _, err := mgr.Reconcile(ctx, request, instance) - - assert.NoError(t, err) - require.Len(t, instance.Status.Conditions, 3) - assert.Equal(t, conditions.ConditionReady, instance.Status.Conditions[0].Type) - assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[0].Status) - assert.Equal(t, "The resource is ready", instance.Status.Conditions[0].Message) - assert.Equal(t, "addCondition_Ready", instance.Status.Conditions[1].Type) - assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[1].Status) - assert.Equal(t, "The subroutine is complete", instance.Status.Conditions[1].Message) - assert.Equal(t, "test", instance.Status.Conditions[2].Type) - assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[2].Status) - assert.Equal(t, "test", instance.Status.Conditions[2].Message) - - }) - - t.Run("Lifecycle w/o manage conditions reconciles with subroutine that adds a condition", func(t *testing.T) { - // Arrange - instance := &pmtesting.ImplementConditions{ - TestApiObject: pmtesting.TestApiObject{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Generation: 1, - }, - Status: pmtesting.TestStatus{}, - }, - } - - fakeClient := pmtesting.CreateFakeClient(t, instance) - - mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.AddConditionSubroutine{Ready: metav1.ConditionTrue}}, fakeClient) - - // Act - _, err := mgr.Reconcile(ctx, request, instance) - - assert.NoError(t, err) - require.Len(t, instance.Status.Conditions, 1) - assert.Equal(t, "test", instance.Status.Conditions[0].Type) - assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[0].Status) - assert.Equal(t, "test", instance.Status.Conditions[0].Message) - - }) - - t.Run("Lifecycle with manage conditions reconciles with subroutine failing Status update", func(t *testing.T) { - // Arrange - instance := &pmtesting.ImplementConditions{ - TestApiObject: pmtesting.TestApiObject{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Generation: 1, - }, - Status: pmtesting.TestStatus{}, - }, + mgr, log := createLifecycleManager([]subroutine.Subroutine{}, fakeClient) + mgr.WithReadOnly() + tr := &testReconciler{ + lifecycleManager: mgr, } - fakeClient := pmtesting.CreateFakeClient(t, instance) - - mgr, _ := createLifecycleManager([]subroutine.Subroutine{ - pmtesting.ChangeStatusSubroutine{ - Client: fakeClient, - }}, fakeClient) - mgr.WithConditionManagement() - // Act - _, err := mgr.Reconcile(ctx, request, instance) - + cfg := &rest.Config{} + provider := pmtesting.NewFakeProvider(cfg) + mmanager, err := mcmanager.New(cfg, provider, mcmanager.Options{}) assert.NoError(t, err) - assert.Len(t, instance.Status.Conditions, 2) - assert.Equal(t, conditions.ConditionReady, instance.Status.Conditions[0].Type) - assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[0].Status) - assert.Equal(t, "The resource is ready", instance.Status.Conditions[0].Message) - assert.Equal(t, "changeStatus_Ready", instance.Status.Conditions[1].Type) - assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[1].Status) - assert.Equal(t, "The subroutine is complete", instance.Status.Conditions[1].Message) - }) - - t.Run("Lifecycle with manage conditions finalizes with multiple subroutines partially succeeding", func(t *testing.T) { - // Arrange - instance := &pmtesting.ImplementConditions{ - TestApiObject: pmtesting.TestApiObject{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Generation: 1, - DeletionTimestamp: &metav1.Time{Time: time.Now()}, - Finalizers: []string{pmtesting.FailureScenarioSubroutineFinalizer, pmtesting.ChangeStatusSubroutineFinalizer}, - }, - Status: pmtesting.TestStatus{}, - }, - } - - fakeClient := pmtesting.CreateFakeClient(t, instance) - - mgr, _ := createLifecycleManager([]subroutine.Subroutine{ - pmtesting.FailureScenarioSubroutine{}, - pmtesting.ChangeStatusSubroutine{Client: fakeClient}}, fakeClient) - mgr.WithConditionManagement() - - // Act - _, err := mgr.Reconcile(ctx, request, instance) + err = mgr.SetupWithManager(mmanager, 0, "testReconciler", instance, "test", tr, log.Logger) + // Assert assert.Error(t, err) - require.Len(t, instance.Status.Conditions, 3) - assert.Equal(t, "changeStatus_Finalize", instance.Status.Conditions[0].Type, "") - assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[0].Status) - assert.Equal(t, "The subroutine finalization is complete", instance.Status.Conditions[0].Message) - assert.Equal(t, "FailureScenarioSubroutine_Finalize", instance.Status.Conditions[1].Type) - assert.Equal(t, metav1.ConditionFalse, instance.Status.Conditions[1].Status) - assert.Equal(t, "The subroutine finalization has an error: FailureScenarioSubroutine", instance.Status.Conditions[1].Message) - assert.Equal(t, conditions.ConditionReady, instance.Status.Conditions[2].Type) - assert.Equal(t, metav1.ConditionFalse, instance.Status.Conditions[2].Status) - assert.Equal(t, "The resource is not ready", instance.Status.Conditions[2].Message) }) - - t.Run("Lifecycle with manage conditions reconciles with ReqeueAfter subroutine", func(t *testing.T) { + t.Run("Should fail setup with invalid config", func(t *testing.T) { // Arrange - instance := &pmtesting.ImplementConditions{ - TestApiObject: pmtesting.TestApiObject{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Generation: 1, - }, - Status: pmtesting.TestStatus{}, - }, - } - + instance := &pmtesting.ImplementingSpreadReconciles{} fakeClient := pmtesting.CreateFakeClient(t, instance) - mgr, _ := createLifecycleManager([]subroutine.Subroutine{ - pmtesting.FailureScenarioSubroutine{RequeAfter: true}}, fakeClient) - mgr.WithConditionManagement() - - // Act - _, err := mgr.Reconcile(ctx, request, instance) - - assert.NoError(t, err) - assert.Len(t, instance.Status.Conditions, 2) - assert.Equal(t, conditions.ConditionReady, instance.Status.Conditions[0].Type) - assert.Equal(t, metav1.ConditionFalse, instance.Status.Conditions[0].Status) - assert.Equal(t, "The resource is not ready", instance.Status.Conditions[0].Message) - assert.Equal(t, "FailureScenarioSubroutine_Ready", instance.Status.Conditions[1].Type) - assert.Equal(t, metav1.ConditionUnknown, instance.Status.Conditions[1].Status) - assert.Equal(t, "The subroutine is processing", instance.Status.Conditions[1].Message) - }) - - t.Run("Lifecycle with manage conditions reconciles with Error subroutine (no-retry)", func(t *testing.T) { - // Arrange - instance := &pmtesting.ImplementConditions{ - TestApiObject: pmtesting.TestApiObject{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Generation: 1, - }, - Status: pmtesting.TestStatus{}, - }, + mgr, log := createLifecycleManager([]subroutine.Subroutine{}, fakeClient) + mgr.WithReadOnly() + mgr.WithSpreadingReconciles() + tr := &testReconciler{ + lifecycleManager: mgr, } - fakeClient := pmtesting.CreateFakeClient(t, instance) - - mgr, _ := createLifecycleManager([]subroutine.Subroutine{ - pmtesting.FailureScenarioSubroutine{RequeAfter: false}}, fakeClient) - mgr.WithConditionManagement() - // Act - _, err := mgr.Reconcile(ctx, request, instance) - + cfg := &rest.Config{} + provider := pmtesting.NewFakeProvider(cfg) + mmanager, err := mcmanager.New(cfg, provider, mcmanager.Options{}) assert.NoError(t, err) - assert.Len(t, instance.Status.Conditions, 2) - assert.Equal(t, conditions.ConditionReady, instance.Status.Conditions[0].Type) - assert.Equal(t, metav1.ConditionFalse, instance.Status.Conditions[0].Status) - assert.Equal(t, "The resource is not ready", instance.Status.Conditions[0].Message) - assert.Equal(t, "FailureScenarioSubroutine_Ready", instance.Status.Conditions[1].Type) - assert.Equal(t, metav1.ConditionFalse, instance.Status.Conditions[1].Status) - assert.Equal(t, "The subroutine has an error: FailureScenarioSubroutine", instance.Status.Conditions[1].Message) - }) - - t.Run("Lifecycle with manage conditions reconciles with Error subroutine (retry)", func(t *testing.T) { - // Arrange - instance := &pmtesting.ImplementConditions{ - TestApiObject: pmtesting.TestApiObject{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Generation: 1, - }, - Status: pmtesting.TestStatus{}, - }, - } - - fakeClient := pmtesting.CreateFakeClient(t, instance) - - mgr, _ := createLifecycleManager([]subroutine.Subroutine{ - pmtesting.FailureScenarioSubroutine{Retry: true, RequeAfter: false}}, fakeClient) - mgr.WithConditionManagement() - - // Act - _, err := mgr.Reconcile(ctx, request, instance) - - assert.Error(t, err) - assert.Len(t, instance.Status.Conditions, 2) - assert.Equal(t, conditions.ConditionReady, instance.Status.Conditions[0].Type) - assert.Equal(t, metav1.ConditionFalse, instance.Status.Conditions[0].Status) - assert.Equal(t, "The resource is not ready", instance.Status.Conditions[0].Message) - assert.Equal(t, "FailureScenarioSubroutine_Ready", instance.Status.Conditions[1].Type) - assert.Equal(t, metav1.ConditionFalse, instance.Status.Conditions[1].Status) - assert.Equal(t, "The subroutine has an error: FailureScenarioSubroutine", instance.Status.Conditions[1].Message) - }) - - t.Run("Lifecycle with manage conditions not implementing the interface", func(t *testing.T) { - // Arrange - instance := &pmtesting.NotImplementingSpreadReconciles{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Generation: 1, - }, - Status: pmtesting.TestStatus{ - Some: "string", - ObservedGeneration: 0, - }, - } - - fakeClient := pmtesting.CreateFakeClient(t, instance) - - mgr, _ := createLifecycleManager([]subroutine.Subroutine{ - pmtesting.ChangeStatusSubroutine{ - Client: fakeClient, - }, - }, fakeClient) - mgr.WithConditionManagement() - - // Act - // So the validation is already happening in SetupWithManager. So we can panic in the reconcile. - assert.Panics(t, func() { - _, _ = mgr.Reconcile(ctx, request, instance) - }) - }) - - t.Run("Lifecycle with manage conditions failing finalize", func(t *testing.T) { - // Arrange - instance := &pmtesting.ImplementConditions{ - TestApiObject: pmtesting.TestApiObject{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Generation: 1, - Finalizers: []string{pmtesting.FailureScenarioSubroutineFinalizer}, - DeletionTimestamp: &metav1.Time{Time: time.Now()}, - }, - Status: pmtesting.TestStatus{ - Some: "string", - ObservedGeneration: 0, - }, - }, - } - - fakeClient := pmtesting.CreateFakeClient(t, instance) - - mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.FailureScenarioSubroutine{}}, fakeClient) - mgr.WithConditionManagement() - - // Act - _, err := mgr.Reconcile(ctx, request, instance) - - assert.Error(t, err) - assert.Equal(t, "FailureScenarioSubroutine", err.Error()) - }) - - t.Run("Lifecycle with spread reconciles and manage conditions and processing fails (retry)", func(t *testing.T) { - // Arrange - instance := &pmtesting.ImplementConditionsAndSpreadReconciles{ - TestApiObject: pmtesting.TestApiObject{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Generation: 1, - }, - Status: pmtesting.TestStatus{ - Some: "string", - ObservedGeneration: 0, - }, - }, - } - - fakeClient := pmtesting.CreateFakeClient(t, instance) - - mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.FailureScenarioSubroutine{Retry: true, RequeAfter: false}}, fakeClient) - mgr.WithSpreadingReconciles() - mgr.WithConditionManagement() - - // Act - _, err := mgr.Reconcile(ctx, request, instance) + err = mgr.SetupWithManager(mmanager, 0, "testReconciler", instance, "test", tr, log.Logger) + // Assert assert.Error(t, err) - assert.Len(t, instance.Status.Conditions, 2) - assert.Equal(t, conditions.ConditionReady, instance.Status.Conditions[0].Type) - assert.Equal(t, string(v1.ConditionFalse), string(instance.Status.Conditions[0].Status)) - assert.Equal(t, "FailureScenarioSubroutine_Ready", instance.Status.Conditions[1].Type) - assert.Equal(t, string(v1.ConditionFalse), string(instance.Status.Conditions[1].Status)) - assert.Equal(t, int64(0), instance.Status.ObservedGeneration) - }) - - t.Run("Lifecycle with spread reconciles and manage conditions and processing fails (no-retry)", func(t *testing.T) { - // Arrange - instance := &pmtesting.ImplementConditionsAndSpreadReconciles{ - TestApiObject: pmtesting.TestApiObject{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Generation: 1, - }, - Status: pmtesting.TestStatus{ - Some: "string", - ObservedGeneration: 0, - }, - }, - } - - fakeClient := pmtesting.CreateFakeClient(t, instance) - - mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.FailureScenarioSubroutine{RequeAfter: false}}, fakeClient) - mgr.WithSpreadingReconciles() - mgr.WithConditionManagement() - - // Act - _, err := mgr.Reconcile(ctx, request, instance) - - assert.NoError(t, err) - assert.Len(t, instance.Status.Conditions, 2) - assert.Equal(t, conditions.ConditionReady, instance.Status.Conditions[0].Type) - assert.Equal(t, string(v1.ConditionFalse), string(instance.Status.Conditions[0].Status)) - assert.Equal(t, "FailureScenarioSubroutine_Ready", instance.Status.Conditions[1].Type) - assert.Equal(t, string(v1.ConditionFalse), string(instance.Status.Conditions[1].Status)) - assert.Equal(t, int64(1), instance.Status.ObservedGeneration) }) - //t.Run("Test Lifecycle setupWithManager /w conditions and expecting no error", func(t *testing.T) { - // // Arrange - // instance := &pmtesting.ImplementConditions{} - // fakeClient := pmtesting.CreateFakeClient(t, instance) - // - // m, err := manager.New(&rest.Config{}, manager.Options{Scheme: fakeClient.Scheme()}) - // assert.NoError(t, err) - // - // lm, log := createLifecycleManager([]subroutine.Subroutine{}, fakeClient) - // lm = lm.WithConditionManagement() - // tr := &testReconciler{lifecycleManager: lm} - // - // // Act - // err = lm.SetupWithManager(m, 0, "testReconciler1", instance, "test", tr, log.Logger) - // - // // Assert - // assert.NoError(t, err) - //}) - - //t.Run("Test Lifecycle setupWithManager /w conditions and expecting error", func(t *testing.T) { - // // Arrange - // instance := &pmtesting.NotImplementingSpreadReconciles{} - // fakeClient := pmtesting.CreateFakeClient(t, instance) - // - // m, err := manager.New(&rest.Config{}, manager.Options{Scheme: fakeClient.Scheme()}) - // assert.NoError(t, err) - // - // lm, log := createLifecycleManager([]subroutine.Subroutine{}, fakeClient) - // lm = lm.WithConditionManagement() - // tr := &testReconciler{lifecycleManager: lm} - // - // // Act - // err = lm.SetupWithManager(m, 0, "testReconciler2", instance, "test", tr, log.Logger) - // - // // Assert - // assert.Error(t, err) - //}) - - //t.Run("Test Lifecycle setupWithManager /w spread and expecting no error", func(t *testing.T) { - // // Arrange - // instance := &pmtesting.ImplementingSpreadReconciles{} - // fakeClient := pmtesting.CreateFakeClient(t, instance) - // - // m, err := manager.New(&rest.Config{}, manager.Options{Scheme: fakeClient.Scheme()}) - // assert.NoError(t, err) - // - // lm, log := createLifecycleManager([]subroutine.Subroutine{}, fakeClient) - // lm = lm.WithSpreadingReconciles() - // tr := &testReconciler{lifecycleManager: lm} - // - // // Act - // err = lm.SetupWithManager(m, 0, "testReconciler3", instance, "test", tr, log.Logger) - // - // // Assert - // assert.NoError(t, err) - //}) - - //t.Run("Test Lifecycle setupWithManager /w spread and expecting a error", func(t *testing.T) { - // // Arrange - // instance := &pmtesting.NotImplementingSpreadReconciles{} - // fakeClient := pmtesting.CreateFakeClient(t, instance) - // - // m, err := manager.New(&rest.Config{}, manager.Options{Scheme: fakeClient.Scheme()}) - // assert.NoError(t, err) - // - // lm, log := createLifecycleManager([]subroutine.Subroutine{}, fakeClient) - // lm = lm.WithSpreadingReconciles() - // tr := &testReconciler{lifecycleManager: lm} - // - // // Act - // err = lm.SetupWithManager(m, 0, "testReconciler", instance, "test", tr, log.Logger) - // - // // Assert - // assert.Error(t, err) - //}) - errorMessage := "oh nose" - t.Run("handleOperatorError", func(t *testing.T) { - t.Run("Should handle an operator error with retry and sentry", func(t *testing.T) { - // Arrange - instance := &pmtesting.ImplementConditions{} - fakeClient := pmtesting.CreateFakeClient(t, instance) - - _, log := createLifecycleManager([]subroutine.Subroutine{}, fakeClient) - ctx = sentry.ContextWithSentryTags(ctx, map[string]string{}) - - // Act - result, err := lifecycle.HandleOperatorError(ctx, operrors.NewOperatorError(goerrors.New(errorMessage), true, true), "handle op error", true, log.Logger) - - // Assert - assert.Error(t, err) - assert.NotNil(t, result) - assert.Equal(t, errorMessage, err.Error()) - - errorMessages, err := log.GetErrorMessages() - assert.NoError(t, err) - assert.Equal(t, errorMessage, *errorMessages[0].Error) - }) - - t.Run("Should handle an operator error without retry", func(t *testing.T) { - // Arrange - instance := &pmtesting.ImplementConditions{} - fakeClient := pmtesting.CreateFakeClient(t, instance) - - _, log := createLifecycleManager([]subroutine.Subroutine{}, fakeClient) - - // Act - result, err := lifecycle.HandleOperatorError(ctx, operrors.NewOperatorError(goerrors.New(errorMessage), false, false), "handle op error", true, log.Logger) - - // Assert - assert.Nil(t, err) - assert.NotNil(t, result) - - errorMessages, err := log.GetErrorMessages() - assert.NoError(t, err) - assert.Equal(t, errorMessage, *errorMessages[0].Error) - }) - }) - t.Run("Prepare Context", func(t *testing.T) { t.Run("Sets a context that can be used in the subroutine", func(t *testing.T) { // Arrange diff --git a/controller/lifecycle/spread/spread.go b/controller/lifecycle/spread/spread.go index fdd5edb..1e0686e 100644 --- a/controller/lifecycle/spread/spread.go +++ b/controller/lifecycle/spread/spread.go @@ -1,17 +1,18 @@ package spread import ( - "fmt" "math/rand/v2" + "slices" "time" + "golang.org/x/exp/maps" v1 "k8s.io/apimachinery/pkg/apis/meta/v1" ctrl "sigs.k8s.io/controller-runtime" "github.com/platform-mesh/golang-commons/controller/lifecycle/api" "github.com/platform-mesh/golang-commons/controller/lifecycle/runtimeobject" + "github.com/platform-mesh/golang-commons/controller/lifecycle/util" "github.com/platform-mesh/golang-commons/logger" - "github.com/platform-mesh/golang-commons/sentry" ) const ReconcileRefreshLabel = "platform-mesh.io/refresh-reconcile" @@ -39,7 +40,8 @@ func getNextReconcileTime(maxReconcileTime time.Duration) time.Duration { return time.Duration(jitter+int64(minTime)) * time.Minute } -func (s *Spreader) OnNextReconcile(instanceStatusObj api.RuntimeObjectSpreadReconcileStatus, log *logger.Logger) (ctrl.Result, error) { +func (s *Spreader) OnNextReconcile(instance runtimeobject.RuntimeObject, log *logger.Logger) (ctrl.Result, error) { + instanceStatusObj := util.MustToInterface[api.RuntimeObjectSpreadReconcileStatus](instance, log) requeueAfter := time.Until(instanceStatusObj.GetNextReconcileTime().UTC()) log.Debug().Int64("minutes-till-next-execution", int64(requeueAfter.Minutes())).Msg("Completed reconciliation, no processing needed") return ctrl.Result{RequeueAfter: requeueAfter}, nil @@ -70,21 +72,11 @@ func (s *Spreader) RemoveRefreshLabelIfExists(instance runtimeobject.RuntimeObje return keyCount != len(instance.GetLabels()) } -func (s *Spreader) ToRuntimeObjectSpreadReconcileStatusInterface(instance runtimeobject.RuntimeObject, log *logger.Logger) (api.RuntimeObjectSpreadReconcileStatus, error) { - if obj, ok := instance.(api.RuntimeObjectSpreadReconcileStatus); ok { - return obj, nil - } - err := fmt.Errorf("SpreadReconciles is enabled, but instance does not implement RuntimeObjectSpreadReconcileStatus interface. This is a programming error") - log.Error().Err(err).Msg("Failed to cast instance to RuntimeObjectSpreadReconcileStatus") - sentry.CaptureError(err, nil) - return nil, err -} +func (s *Spreader) ReconcileRequired(instance runtimeobject.RuntimeObject, log *logger.Logger) bool { + instanceStatusObj := util.MustToInterface[api.RuntimeObjectSpreadReconcileStatus](instance, log) + generationChanged := instance.GetGeneration() != instanceStatusObj.GetObservedGeneration() + isAfterNextReconcileTime := v1.Now().UTC().After(instanceStatusObj.GetNextReconcileTime().UTC()) + refreshRequested := slices.Contains(maps.Keys(instance.GetLabels()), ReconcileRefreshLabel) -func (s *Spreader) MustToRuntimeObjectSpreadReconcileStatusInterface(instance runtimeobject.RuntimeObject, log *logger.Logger) api.RuntimeObjectSpreadReconcileStatus { - obj, err := s.ToRuntimeObjectSpreadReconcileStatusInterface(instance, log) - if err == nil { - return obj - } - log.Panic().Err(err).Msg("Failed to cast instance to RuntimeObjectSpreadReconcileStatus") - return nil + return generationChanged || isAfterNextReconcileTime || refreshRequested } diff --git a/controller/lifecycle/spread/spread_test.go b/controller/lifecycle/spread/spread_test.go index de24776..9b7add9 100644 --- a/controller/lifecycle/spread/spread_test.go +++ b/controller/lifecycle/spread/spread_test.go @@ -136,44 +136,68 @@ func TestRemoveRefreshLabelNoLabels(t *testing.T) { assert.False(t, ok) } -func TestToRuntimeObjectSpreadReconcileStatusInterface_Success(t *testing.T) { +func TestReconcileRequired(t *testing.T) { s := NewSpreader() tl := testlogger.New() - apiObject := &pmtesting.ImplementingSpreadReconciles{} - obj, err := s.ToRuntimeObjectSpreadReconcileStatusInterface(apiObject, tl.Logger) - assert.NoError(t, err) - assert.NotNil(t, obj) -} - -func TestToRuntimeObjectSpreadReconcileStatusInterface_Failure(t *testing.T) { - s := NewSpreader() - tl := testlogger.New() - // DummyRuntimeObject does NOT implement RuntimeObjectSpreadReconcileStatus - apiObject := &pmtesting.DummyRuntimeObject{} - obj, err := s.ToRuntimeObjectSpreadReconcileStatusInterface(apiObject, tl.Logger) - assert.Error(t, err) - assert.Nil(t, obj) - messages, logErr := tl.GetLogMessages() - assert.NoError(t, logErr) - assert.Contains(t, messages[0].Message, "Failed to cast instance to RuntimeObjectSpreadReconcileStatus") -} - -func TestMustToRuntimeObjectSpreadReconcileStatusInterface_Success(t *testing.T) { - s := NewSpreader() - tl := testlogger.New() - apiObject := &pmtesting.ImplementingSpreadReconciles{} - obj := s.MustToRuntimeObjectSpreadReconcileStatusInterface(apiObject, tl.Logger) - assert.NotNil(t, obj) -} -func TestMustToRuntimeObjectSpreadReconcileStatusInterface_Panic(t *testing.T) { - s := NewSpreader() - tl := testlogger.New() - apiObject := &pmtesting.DummyRuntimeObject{} - defer func() { - if r := recover(); r == nil { - t.Errorf("Expected panic but did not panic") - } - }() - _ = s.MustToRuntimeObjectSpreadReconcileStatusInterface(apiObject, tl.Logger) + now := time.Now() + past := now.Add(-1 * time.Hour) + future := now.Add(1 * time.Hour) + + // Case 1: Generation changed + apiObject1 := &pmtesting.ImplementingSpreadReconciles{ + TestApiObject: pmtesting.TestApiObject{ + ObjectMeta: v1.ObjectMeta{ + Generation: 2, + }, + Status: pmtesting.TestStatus{ + ObservedGeneration: 1, + NextReconcileTime: v1.NewTime(future), + }, + }, + } + assert.True(t, s.ReconcileRequired(apiObject1, tl.Logger), "Should require reconcile when generation changed") + + // Case 2: After next reconcile time + apiObject2 := &pmtesting.ImplementingSpreadReconciles{ + TestApiObject: pmtesting.TestApiObject{ + ObjectMeta: v1.ObjectMeta{ + Generation: 1, + }, + Status: pmtesting.TestStatus{ + ObservedGeneration: 1, + NextReconcileTime: v1.NewTime(past), + }, + }, + } + assert.True(t, s.ReconcileRequired(apiObject2, tl.Logger), "Should require reconcile when after next reconcile time") + + // Case 3: Refresh label present + apiObject3 := &pmtesting.ImplementingSpreadReconciles{ + TestApiObject: pmtesting.TestApiObject{ + ObjectMeta: v1.ObjectMeta{ + Generation: 1, + Labels: map[string]string{ReconcileRefreshLabel: ""}, + }, + Status: pmtesting.TestStatus{ + ObservedGeneration: 1, + NextReconcileTime: v1.NewTime(future), + }, + }, + } + assert.True(t, s.ReconcileRequired(apiObject3, tl.Logger), "Should require reconcile when refresh label present") + + // Case 4: No condition met + apiObject4 := &pmtesting.ImplementingSpreadReconciles{ + TestApiObject: pmtesting.TestApiObject{ + ObjectMeta: v1.ObjectMeta{ + Generation: 1, + }, + Status: pmtesting.TestStatus{ + ObservedGeneration: 1, + NextReconcileTime: v1.NewTime(future), + }, + }, + } + assert.False(t, s.ReconcileRequired(apiObject4, tl.Logger), "Should not require reconcile when no condition met") } diff --git a/controller/lifecycle/util/convert.go b/controller/lifecycle/util/convert.go new file mode 100644 index 0000000..3894eed --- /dev/null +++ b/controller/lifecycle/util/convert.go @@ -0,0 +1,30 @@ +package util + +import ( + "fmt" + "reflect" + + "github.com/platform-mesh/golang-commons/logger" + "github.com/platform-mesh/golang-commons/sentry" +) + +func ToInterface[T any](instance any, log *logger.Logger) (T, error) { + var zero T + obj, ok := instance.(T) + if ok { + return obj, nil + } + err := fmt.Errorf("failed to cast instance of type %T to %v", instance, reflect.TypeOf(zero)) + log.Error().Err(err).Msg("Failed to cast instance to target interface") + sentry.CaptureError(err, nil) + return zero, err +} + +func MustToInterface[T any](instance any, log *logger.Logger) T { + obj, err := ToInterface[T](instance, log) + if err == nil { + return obj + } + log.Panic().Err(err).Msg("Failed to cast instance to RuntimeObjectSpreadReconcileStatus") + panic(err) +} diff --git a/controller/lifecycle/util/convert_test.go b/controller/lifecycle/util/convert_test.go new file mode 100644 index 0000000..7d4338e --- /dev/null +++ b/controller/lifecycle/util/convert_test.go @@ -0,0 +1,49 @@ +package util + +import ( + "testing" + + "github.com/stretchr/testify/assert" + + "github.com/platform-mesh/golang-commons/controller/lifecycle/api" + pmtesting "github.com/platform-mesh/golang-commons/controller/testSupport" + "github.com/platform-mesh/golang-commons/logger/testlogger" +) + +func TestToRuntimeObjectSpreadReconcileStatusInterface_Success(t *testing.T) { + tl := testlogger.New() + apiObject := &pmtesting.ImplementingSpreadReconciles{} + obj, err := ToInterface[api.RuntimeObjectSpreadReconcileStatus](apiObject, tl.Logger) + assert.NoError(t, err) + assert.NotNil(t, obj) +} + +func TestToRuntimeObjectSpreadReconcileStatusInterface_Failure(t *testing.T) { + tl := testlogger.New() + // DummyRuntimeObject does NOT implement RuntimeObjectSpreadReconcileStatus + apiObject := &pmtesting.DummyRuntimeObject{} + _, err := ToInterface[api.RuntimeObjectSpreadReconcileStatus](apiObject, tl.Logger) + assert.Error(t, err) + + messages, logErr := tl.GetLogMessages() + assert.NoError(t, logErr) + assert.Contains(t, messages[0].Message, "Failed to cast instance to target interface") +} + +func TestMustToRuntimeObjectSpreadReconcileStatusInterface_Success(t *testing.T) { + tl := testlogger.New() + apiObject := &pmtesting.ImplementingSpreadReconciles{} + obj := MustToInterface[api.RuntimeObjectSpreadReconcileStatus](apiObject, tl.Logger) + assert.NotNil(t, obj) +} + +func TestMustToRuntimeObjectSpreadReconcileStatusInterface_Panic(t *testing.T) { + tl := testlogger.New() + apiObject := &pmtesting.DummyRuntimeObject{} + defer func() { + if r := recover(); r == nil { + t.Errorf("Expected panic but did not panic") + } + }() + MustToInterface[api.RuntimeObjectSpreadReconcileStatus](apiObject, tl.Logger) +} diff --git a/controller/testSupport/lifecycle.go b/controller/testSupport/lifecycle.go index 486ce06..5b7e1ad 100644 --- a/controller/testSupport/lifecycle.go +++ b/controller/testSupport/lifecycle.go @@ -1,13 +1,23 @@ package testSupport import ( + "time" + + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + ctrl "sigs.k8s.io/controller-runtime" + "github.com/platform-mesh/golang-commons/controller/lifecycle/api" + "github.com/platform-mesh/golang-commons/controller/lifecycle/runtimeobject" "github.com/platform-mesh/golang-commons/controller/lifecycle/subroutine" "github.com/platform-mesh/golang-commons/logger" ) type TestLifecycleManager struct { - Logger *logger.Logger + Logger *logger.Logger + SubroutinesArr []subroutine.Subroutine + spreader api.SpreadManager + conditionsManager api.ConditionManager + ShouldReconcile bool } func (t TestLifecycleManager) Config() api.Config { @@ -18,7 +28,82 @@ func (t TestLifecycleManager) Config() api.Config { } } func (t TestLifecycleManager) Log() *logger.Logger { return t.Logger } -func (t TestLifecycleManager) Spreader() api.SpreadManager { return nil } -func (t TestLifecycleManager) ConditionsManager() api.ConditionManager { return nil } +func (t TestLifecycleManager) Spreader() api.SpreadManager { return t.spreader } +func (t TestLifecycleManager) ConditionsManager() api.ConditionManager { return t.conditionsManager } func (t TestLifecycleManager) PrepareContextFunc() api.PrepareContextFunc { return nil } -func (t TestLifecycleManager) Subroutines() []subroutine.Subroutine { return []subroutine.Subroutine{} } +func (t TestLifecycleManager) Subroutines() []subroutine.Subroutine { return t.SubroutinesArr } +func (l *TestLifecycleManager) WithSpreadingReconciles() *TestLifecycleManager { + l.spreader = &TestSpreader{ShouldReconcile: l.ShouldReconcile} + return l +} +func (l *TestLifecycleManager) WithConditionManagement() *TestLifecycleManager { + l.conditionsManager = &TestConditionManager{} + return l +} + +type TestSpreader struct { + ShouldReconcile bool +} + +func (t TestSpreader) ReconcileRequired(instance runtimeobject.RuntimeObject, log *logger.Logger) bool { + return t.ShouldReconcile +} + +func (t TestSpreader) ToRuntimeObjectSpreadReconcileStatusInterface(instance runtimeobject.RuntimeObject, log *logger.Logger) (api.RuntimeObjectSpreadReconcileStatus, error) { + //TODO implement me + panic("implement me") +} + +func (t TestSpreader) MustToRuntimeObjectSpreadReconcileStatusInterface(instance runtimeobject.RuntimeObject, log *logger.Logger) api.RuntimeObjectSpreadReconcileStatus { + + //TODO implement me + panic("implement me") +} + +func (t TestSpreader) OnNextReconcile(instance runtimeobject.RuntimeObject, log *logger.Logger) (ctrl.Result, error) { + return ctrl.Result{RequeueAfter: 10 * time.Minute}, nil +} + +func (t TestSpreader) RemoveRefreshLabelIfExists(instance runtimeobject.RuntimeObject) bool { + return false +} + +func (t TestSpreader) SetNextReconcileTime(instanceStatusObj api.RuntimeObjectSpreadReconcileStatus, log *logger.Logger) { + instanceStatusObj.SetNextReconcileTime(metav1.NewTime(time.Now().Add(10 * time.Hour))) +} + +func (t TestSpreader) UpdateObservedGeneration(instanceStatusObj api.RuntimeObjectSpreadReconcileStatus, log *logger.Logger) { + instanceStatusObj.SetObservedGeneration(instanceStatusObj.GetGeneration()) +} + +type TestConditionManager struct{} + +func (t TestConditionManager) MustToRuntimeObjectConditionsInterface(instance runtimeobject.RuntimeObject, log *logger.Logger) api.RuntimeObjectConditions { + //TODO implement me + panic("implement me") +} + +func (t TestConditionManager) SetInstanceConditionUnknownIfNotSet(conditions *[]metav1.Condition) bool { + //TODO implement me + panic("implement me") +} + +func (t TestConditionManager) SetSubroutineConditionToUnknownIfNotSet(conditions *[]metav1.Condition, subroutine subroutine.Subroutine, isFinalize bool, log *logger.Logger) bool { + //TODO implement me + panic("implement me") +} + +func (t TestConditionManager) SetSubroutineCondition(conditions *[]metav1.Condition, subroutine subroutine.Subroutine, subroutineResult ctrl.Result, subroutineErr error, isFinalize bool, log *logger.Logger) bool { + //TODO implement me + panic("implement me") +} + +func (t TestConditionManager) SetInstanceConditionReady(conditions *[]metav1.Condition, status metav1.ConditionStatus) bool { + //TODO implement me + panic("implement me") +} + +func (t TestConditionManager) ToRuntimeObjectConditionsInterface(instance runtimeobject.RuntimeObject, log *logger.Logger) (api.RuntimeObjectConditions, error) { + //TODO implement me + panic("implement me") +} diff --git a/controller/testSupport/provider.go b/controller/testSupport/provider.go new file mode 100644 index 0000000..b1a04d8 --- /dev/null +++ b/controller/testSupport/provider.go @@ -0,0 +1,27 @@ +package testSupport + +import ( + "context" + + "k8s.io/client-go/rest" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/cluster" +) + +type FakeProvider struct { + cfg *rest.Config +} + +func NewFakeProvider(cfg *rest.Config) *FakeProvider { + return &FakeProvider{cfg: cfg} +} + +func (f FakeProvider) Get(context.Context, string) (cluster.Cluster, error) { + return cluster.New(f.cfg, nil) +} + +// IndexField indexes the given object by the given field on all engaged +// clusters, current and future. +func (f FakeProvider) IndexField(context.Context, client.Object, string, client.IndexerFunc) error { + return nil +} From 1a23f6a192a0f1c8944b9eece3689fa8d4f11e3e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20Echterh=C3=B6lter?= Date: Mon, 7 Jul 2025 07:13:38 +0200 Subject: [PATCH 06/16] test: refactoring for unittests --- .../controllerruntime/lifecycle_test.go | 1248 +---------------- 1 file changed, 30 insertions(+), 1218 deletions(-) diff --git a/controller/lifecycle/controllerruntime/lifecycle_test.go b/controller/lifecycle/controllerruntime/lifecycle_test.go index 7ceaa8d..501c958 100644 --- a/controller/lifecycle/controllerruntime/lifecycle_test.go +++ b/controller/lifecycle/controllerruntime/lifecycle_test.go @@ -3,13 +3,9 @@ package controllerruntime import ( "context" goerrors "errors" - "fmt" "testing" - "time" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - v1 "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" "k8s.io/client-go/rest" @@ -17,1172 +13,41 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/manager" - "github.com/platform-mesh/golang-commons/controller/lifecycle" - "github.com/platform-mesh/golang-commons/controller/lifecycle/conditions" "github.com/platform-mesh/golang-commons/controller/lifecycle/runtimeobject" - "github.com/platform-mesh/golang-commons/controller/lifecycle/spread" "github.com/platform-mesh/golang-commons/controller/lifecycle/subroutine" - operrors "github.com/platform-mesh/golang-commons/errors" - pmtesting "github.com/platform-mesh/golang-commons/controller/testSupport" - "github.com/platform-mesh/golang-commons/logger" + operrors "github.com/platform-mesh/golang-commons/errors" "github.com/platform-mesh/golang-commons/logger/testlogger" - "github.com/platform-mesh/golang-commons/sentry" ) func TestLifecycle(t *testing.T) { namespace := "bar" name := "foo" - request := controllerruntime.Request{ - NamespacedName: types.NamespacedName{ - Namespace: namespace, - Name: name, - }, - } testApiObject := &pmtesting.TestApiObject{ ObjectMeta: metav1.ObjectMeta{ Name: name, Namespace: namespace, }, } - ctx := context.Background() - - t.Run("Lifecycle with a finalizer - add finalizer", func(t *testing.T) { - // Arrange - instance := &pmtesting.TestApiObject{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - }, - } - - fakeClient := pmtesting.CreateFakeClient(t, instance) - - mgr, _ := createLifecycleManager([]subroutine.Subroutine{ - pmtesting.FinalizerSubroutine{ - Client: fakeClient, - }, - }, fakeClient) - - // Act - _, err := mgr.Reconcile(ctx, request, instance) - - assert.NoError(t, err) - assert.Equal(t, 1, len(instance.Finalizers)) - }) - - t.Run("Lifecycle with a finalizer - finalization", func(t *testing.T) { - // Arrange - now := &metav1.Time{Time: time.Now()} - finalizers := []string{pmtesting.SubroutineFinalizer} - instance := &pmtesting.TestApiObject{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - DeletionTimestamp: now, - Finalizers: finalizers, - }, - } - - fakeClient := pmtesting.CreateFakeClient(t, instance) - - mgr, _ := createLifecycleManager([]subroutine.Subroutine{ - pmtesting.FinalizerSubroutine{ - Client: fakeClient, - }, - }, fakeClient) - - // Act - _, err := mgr.Reconcile(ctx, request, instance) - - assert.NoError(t, err) - assert.Equal(t, 0, len(instance.Finalizers)) - }) - - t.Run("Lifecycle with a finalizer - finalization(requeue)", func(t *testing.T) { - // Arrange - now := &metav1.Time{Time: time.Now()} - finalizers := []string{pmtesting.SubroutineFinalizer} - instance := &pmtesting.TestApiObject{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - DeletionTimestamp: now, - Finalizers: finalizers, - }, - } - - fakeClient := pmtesting.CreateFakeClient(t, instance) - - mgr, _ := createLifecycleManager([]subroutine.Subroutine{ - pmtesting.FinalizerSubroutine{ - Client: fakeClient, - RequeueAfter: 1 * time.Second, - }, - }, fakeClient) - - // Act - res, err := mgr.Reconcile(ctx, request, instance) - - assert.NoError(t, err) - assert.Equal(t, 1, len(instance.Finalizers)) - assert.Equal(t, time.Duration(1*time.Second), res.RequeueAfter) - }) - - t.Run("Lifecycle with a finalizer - finalization(requeueAfter)", func(t *testing.T) { - // Arrange - now := &metav1.Time{Time: time.Now()} - finalizers := []string{pmtesting.SubroutineFinalizer} - instance := &pmtesting.TestApiObject{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - DeletionTimestamp: now, - Finalizers: finalizers, - }, - } - - fakeClient := pmtesting.CreateFakeClient(t, instance) - - mgr, _ := createLifecycleManager([]subroutine.Subroutine{ - pmtesting.FinalizerSubroutine{ - Client: fakeClient, - RequeueAfter: 2 * time.Second, - }, - }, fakeClient) - - // Act - res, err := mgr.Reconcile(ctx, request, instance) - - assert.NoError(t, err) - assert.Equal(t, 1, len(instance.Finalizers)) - assert.Equal(t, 2*time.Second, res.RequeueAfter) - }) - - t.Run("Lifecycle with a finalizer - skip finalization if the finalizer is not in there", func(t *testing.T) { - // Arrange - now := &metav1.Time{Time: time.Now()} - finalizers := []string{"other-finalizer"} - instance := &pmtesting.TestApiObject{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - DeletionTimestamp: now, - Finalizers: finalizers, - }, - } - - fakeClient := pmtesting.CreateFakeClient(t, instance) - - mgr, _ := createLifecycleManager([]subroutine.Subroutine{ - pmtesting.FinalizerSubroutine{ - Client: fakeClient, - }, - }, fakeClient) - - // Act - _, err := mgr.Reconcile(ctx, request, instance) - - assert.NoError(t, err) - assert.Equal(t, 1, len(instance.Finalizers)) - }) - t.Run("Lifecycle with a finalizer - failing finalization subroutine", func(t *testing.T) { - // Arrange - now := &metav1.Time{Time: time.Now()} - finalizers := []string{pmtesting.SubroutineFinalizer} - instance := &pmtesting.TestApiObject{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - DeletionTimestamp: now, - Finalizers: finalizers, - }, - } - - fakeClient := pmtesting.CreateFakeClient(t, instance) - - mgr, _ := createLifecycleManager([]subroutine.Subroutine{ - pmtesting.FinalizerSubroutine{ - Client: fakeClient, - Err: fmt.Errorf("some error"), - }, - }, fakeClient) - - // Act - _, err := mgr.Reconcile(ctx, request, instance) - - assert.Error(t, err) - assert.Equal(t, 1, len(instance.Finalizers)) - }) - - t.Run("Lifecycle without changing status", func(t *testing.T) { - // Arrange - instance := &pmtesting.TestApiObject{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - }, - Status: pmtesting.TestStatus{Some: "string"}, - } - fakeClient := pmtesting.CreateFakeClient(t, instance) - - mgr, log := createLifecycleManager([]subroutine.Subroutine{}, fakeClient) - - // Act - result, err := mgr.Reconcile(ctx, request, instance) - - // Assert - assert.NoError(t, err) - assert.NotNil(t, result) - logMessages, err := log.GetLogMessages() - assert.NoError(t, err) - assert.Equal(t, len(logMessages), 3) - assert.Equal(t, logMessages[0].Message, "start reconcile") - assert.Equal(t, logMessages[1].Message, "skipping status update, since they are equal") - assert.Equal(t, logMessages[2].Message, "end reconcile") - }) - - t.Run("Lifecycle with changing status", func(t *testing.T) { - // Arrange - instance := &pmtesting.TestApiObject{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - }, - Status: pmtesting.TestStatus{Some: "string"}, - } - - fakeClient := pmtesting.CreateFakeClient(t, instance) - - mgr, log := createLifecycleManager([]subroutine.Subroutine{ - pmtesting.ChangeStatusSubroutine{ - Client: fakeClient, - }, - }, fakeClient) - - // Act - result, err := mgr.Reconcile(ctx, request, instance) - - // Assert - assert.NoError(t, err) - assert.NotNil(t, result) - logMessages, err := log.GetLogMessages() - assert.NoError(t, err) - assert.Equal(t, len(logMessages), 7) - assert.Equal(t, logMessages[0].Message, "start reconcile") - assert.Equal(t, logMessages[1].Message, "start subroutine") - assert.Equal(t, logMessages[2].Message, "processing instance") - assert.Equal(t, logMessages[3].Message, "processed instance") - assert.Equal(t, logMessages[4].Message, "end subroutine") - - serverObject := &pmtesting.TestApiObject{} - err = fakeClient.Get(ctx, client.ObjectKey{Name: name, Namespace: namespace}, serverObject) - assert.NoError(t, err) - assert.Equal(t, serverObject.Status.Some, "other string") - }) - - t.Run("Lifecycle with spread reconciles", func(t *testing.T) { - // Arrange - instance := &pmtesting.ImplementingSpreadReconciles{ - TestApiObject: pmtesting.TestApiObject{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Generation: 1, - }, - Status: pmtesting.TestStatus{ - Some: "string", - ObservedGeneration: 0, - }, - }, - } - - fakeClient := pmtesting.CreateFakeClient(t, instance) - - mgr, _ := createLifecycleManager([]subroutine.Subroutine{ - pmtesting.ChangeStatusSubroutine{ - Client: fakeClient, - }, - }, fakeClient) - mgr.WithSpreadingReconciles() - - // Act - _, err := mgr.Reconcile(ctx, request, instance) - - assert.NoError(t, err) - assert.Equal(t, instance.Generation, instance.Status.ObservedGeneration) - }) - - t.Run("Lifecycle with spread reconciles on deleted object", func(t *testing.T) { - // Arrange - instance := &pmtesting.ImplementingSpreadReconciles{ - TestApiObject: pmtesting.TestApiObject{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Generation: 2, - DeletionTimestamp: &metav1.Time{Time: time.Now()}, - Finalizers: []string{pmtesting.ChangeStatusSubroutineFinalizer}, - }, - Status: pmtesting.TestStatus{ - Some: "string", - ObservedGeneration: 2, - NextReconcileTime: metav1.Time{Time: time.Now().Add(2 * time.Hour)}, - }, - }, - } - - fakeClient := pmtesting.CreateFakeClient(t, instance) - - mgr, _ := createLifecycleManager([]subroutine.Subroutine{ - pmtesting.ChangeStatusSubroutine{ - Client: fakeClient, - }, - }, fakeClient) - mgr.WithSpreadingReconciles() - - // Act - _, err := mgr.Reconcile(ctx, request, instance) - assert.NoError(t, err) - assert.Len(t, instance.Finalizers, 0) - - }) - - t.Run("Lifecycle with spread reconciles skips if the generation is the same", func(t *testing.T) { - // Arrange - nextReconcileTime := metav1.NewTime(time.Now().Add(1 * time.Hour)) - instance := &pmtesting.ImplementingSpreadReconciles{ - TestApiObject: pmtesting.TestApiObject{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Generation: 1, - }, - Status: pmtesting.TestStatus{ - Some: "string", - ObservedGeneration: 1, - NextReconcileTime: nextReconcileTime, - }, - }, - } - - fakeClient := pmtesting.CreateFakeClient(t, instance) - - mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.FailureScenarioSubroutine{RequeAfter: false}}, fakeClient) - mgr.WithSpreadingReconciles() - - // Act - result, err := mgr.Reconcile(ctx, request, instance) - - assert.NoError(t, err) - assert.Equal(t, int64(1), instance.Status.ObservedGeneration) - assert.GreaterOrEqual(t, 12*time.Hour, result.RequeueAfter) - }) - - t.Run("Lifecycle with spread reconciles and processing fails (no-retry)", func(t *testing.T) { - // Arrange - instance := &pmtesting.ImplementingSpreadReconciles{ - TestApiObject: pmtesting.TestApiObject{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Generation: 1, - }, - Status: pmtesting.TestStatus{ - Some: "string", - ObservedGeneration: 0, - }, - }, - } - - fakeClient := pmtesting.CreateFakeClient(t, instance) - - mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.FailureScenarioSubroutine{Retry: false, RequeAfter: false}}, fakeClient) - mgr.WithSpreadingReconciles() - - // Act - _, err := mgr.Reconcile(ctx, request, instance) - - assert.NoError(t, err) - assert.Equal(t, int64(1), instance.Status.ObservedGeneration) - }) - - t.Run("Lifecycle with spread reconciles and processing fails (retry)", func(t *testing.T) { - // Arrange - instance := &pmtesting.ImplementingSpreadReconciles{ - TestApiObject: pmtesting.TestApiObject{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Generation: 1, - }, - Status: pmtesting.TestStatus{ - Some: "string", - ObservedGeneration: 0, - }, - }, - } - - fakeClient := pmtesting.CreateFakeClient(t, instance) - - mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.FailureScenarioSubroutine{Retry: true, RequeAfter: false}}, fakeClient) - mgr.WithSpreadingReconciles() - - // Act - _, err := mgr.Reconcile(ctx, request, instance) - - assert.Error(t, err) - assert.Equal(t, int64(0), instance.Status.ObservedGeneration) - }) - - t.Run("Lifecycle with spread reconciles and processing needs requeue", func(t *testing.T) { - // Arrange - instance := &pmtesting.ImplementingSpreadReconciles{ - TestApiObject: pmtesting.TestApiObject{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Generation: 1, - }, - Status: pmtesting.TestStatus{ - Some: "string", - ObservedGeneration: 0, - }, - }, - } - - fakeClient := pmtesting.CreateFakeClient(t, instance) - - mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.FailureScenarioSubroutine{RequeAfter: true}}, fakeClient) - mgr.WithSpreadingReconciles() - - // Act - _, err := mgr.Reconcile(ctx, request, instance) - - assert.NoError(t, err) - assert.Equal(t, int64(0), instance.Status.ObservedGeneration) - }) - - t.Run("Lifecycle with spread reconciles and processing needs requeueAfter", func(t *testing.T) { - // Arrange - instance := &pmtesting.ImplementingSpreadReconciles{ - TestApiObject: pmtesting.TestApiObject{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Generation: 1, - }, - Status: pmtesting.TestStatus{ - Some: "string", - ObservedGeneration: 0, - }, - }, - } - fakeClient := pmtesting.CreateFakeClient(t, instance) - - mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.FailureScenarioSubroutine{RequeAfter: true}}, fakeClient) - mgr.WithSpreadingReconciles() - - // Act - _, err := mgr.Reconcile(ctx, request, instance) - - assert.NoError(t, err) - assert.Equal(t, int64(0), instance.Status.ObservedGeneration) - }) - - t.Run("Lifecycle with spread not implementing the interface", func(t *testing.T) { - // Arrange - instance := &pmtesting.NotImplementingSpreadReconciles{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Generation: 1, - }, - Status: pmtesting.TestStatus{ - Some: "string", - ObservedGeneration: 0, - }, - } - - fakeClient := pmtesting.CreateFakeClient(t, instance) - - mgr, _ := createLifecycleManager([]subroutine.Subroutine{ - pmtesting.ChangeStatusSubroutine{ - Client: fakeClient, - }, - }, fakeClient) - mgr.WithSpreadingReconciles() - - // Act - assert.Panics(t, func() { - _, _ = mgr.Reconcile(ctx, request, instance) - }) - }) - - t.Run("Should setup with manager", func(t *testing.T) { - // Arrange - instance := &pmtesting.TestApiObject{} - fakeClient := pmtesting.CreateFakeClient(t, instance) - log, err := logger.New(logger.DefaultConfig()) - assert.NoError(t, err) - m, err := manager.New(&rest.Config{}, manager.Options{ - Scheme: fakeClient.Scheme(), - }) - assert.NoError(t, err) - - lm, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.FailureScenarioSubroutine{RequeAfter: true}}, fakeClient) - tr := &testReconciler{ - lifecycleManager: lm, - } - - // Act - err = lm.SetupWithManager(m, 0, "testReconciler", instance, "test", tr, log) - - // Assert - assert.NoError(t, err) - }) - - t.Run("Should setup with manager not implementing interface", func(t *testing.T) { - // Arrange - instance := &pmtesting.NotImplementingSpreadReconciles{} - fakeClient := pmtesting.CreateFakeClient(t, instance) - log, err := logger.New(logger.DefaultConfig()) - assert.NoError(t, err) - m, err := manager.New(&rest.Config{}, manager.Options{ - Scheme: fakeClient.Scheme(), - }) - assert.NoError(t, err) - - lm, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.FailureScenarioSubroutine{RequeAfter: true}}, fakeClient) - lm.WithSpreadingReconciles() - tr := &testReconciler{ - lifecycleManager: lm, - } - - // Act - err = lm.SetupWithManager(m, 0, "testReconciler", instance, "test", tr, log) - - // Assert - assert.Error(t, err) - }) - - t.Run("Lifecycle with spread reconciles and refresh label", func(t *testing.T) { - // Arrange - instance := &pmtesting.ImplementingSpreadReconciles{ - TestApiObject: pmtesting.TestApiObject{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Generation: 1, - Labels: map[string]string{spread.ReconcileRefreshLabel: "true"}, - }, - Status: pmtesting.TestStatus{ - Some: "string", - ObservedGeneration: 1, - }, - }, - } - - fakeClient := pmtesting.CreateFakeClient(t, instance) - - lm, _ := createLifecycleManager([]subroutine.Subroutine{ - pmtesting.ChangeStatusSubroutine{ - Client: fakeClient, - }, - }, fakeClient) - lm.WithSpreadingReconciles() - - // Act - _, err := lm.Reconcile(ctx, request, instance) - - assert.NoError(t, err) - assert.Equal(t, int64(1), instance.Status.ObservedGeneration) - - serverObject := &pmtesting.ImplementingSpreadReconciles{} - err = fakeClient.Get(ctx, client.ObjectKey{Name: name, Namespace: namespace}, serverObject) - assert.NoError(t, err) - assert.Equal(t, serverObject.Status.Some, "other string") - _, ok := serverObject.Labels[spread.ReconcileRefreshLabel] - assert.False(t, ok) - }) - - t.Run("Should handle a client error", func(t *testing.T) { - // Arrange - _, log := createLifecycleManager([]subroutine.Subroutine{}, nil) - testErr := fmt.Errorf("test error") - - // Act - result, err := lifecycle.HandleClientError("test", log.Logger, testErr, true, sentry.Tags{}) - - // Assert - assert.Error(t, err) - assert.Equal(t, testErr, err) - assert.Equal(t, controllerruntime.Result{}, result) - }) - - t.Run("Lifecycle with manage conditions reconciles w/o subroutines", func(t *testing.T) { - // Arrange - instance := &pmtesting.ImplementConditions{ - TestApiObject: pmtesting.TestApiObject{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Generation: 1, - }, - Status: pmtesting.TestStatus{}, - }, - } - - fakeClient := pmtesting.CreateFakeClient(t, instance) - - mgr, _ := createLifecycleManager([]subroutine.Subroutine{}, fakeClient) - mgr.WithConditionManagement() - - // Act - _, err := mgr.Reconcile(ctx, request, instance) - - assert.NoError(t, err) - assert.Len(t, instance.Status.Conditions, 1) - assert.Equal(t, instance.Status.Conditions[0].Type, conditions.ConditionReady) - assert.Equal(t, instance.Status.Conditions[0].Status, metav1.ConditionTrue) - assert.Equal(t, instance.Status.Conditions[0].Message, "The resource is ready") - }) - - t.Run("Lifecycle with manage conditions reconciles with subroutine", func(t *testing.T) { - // Arrange - instance := &pmtesting.ImplementConditions{ - TestApiObject: pmtesting.TestApiObject{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Generation: 1, - }, - Status: pmtesting.TestStatus{}, - }, - } - - fakeClient := pmtesting.CreateFakeClient(t, instance) - - mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.ChangeStatusSubroutine{ - Client: fakeClient, - }}, fakeClient) - mgr.WithConditionManagement() - - // Act - _, err := mgr.Reconcile(ctx, request, instance) - - assert.NoError(t, err) - require.Len(t, instance.Status.Conditions, 2) - assert.Equal(t, conditions.ConditionReady, instance.Status.Conditions[0].Type) - assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[0].Status) - assert.Equal(t, "The resource is ready", instance.Status.Conditions[0].Message) - assert.Equal(t, "changeStatus_Ready", instance.Status.Conditions[1].Type) - assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[1].Status) - assert.Equal(t, "The subroutine is complete", instance.Status.Conditions[1].Message) - }) - - t.Run("Lifecycle with manage conditions reconciles with subroutine that adds a condition", func(t *testing.T) { - // Arrange - instance := &pmtesting.ImplementConditions{ - TestApiObject: pmtesting.TestApiObject{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Generation: 1, - }, - Status: pmtesting.TestStatus{}, - }, - } - - fakeClient := pmtesting.CreateFakeClient(t, instance) - - mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.AddConditionSubroutine{Ready: metav1.ConditionTrue}}, fakeClient) - mgr.WithConditionManagement() - - // Act - _, err := mgr.Reconcile(ctx, request, instance) - - assert.NoError(t, err) - require.Len(t, instance.Status.Conditions, 3) - assert.Equal(t, conditions.ConditionReady, instance.Status.Conditions[0].Type) - assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[0].Status) - assert.Equal(t, "The resource is ready", instance.Status.Conditions[0].Message) - assert.Equal(t, "addCondition_Ready", instance.Status.Conditions[1].Type) - assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[1].Status) - assert.Equal(t, "The subroutine is complete", instance.Status.Conditions[1].Message) - assert.Equal(t, "test", instance.Status.Conditions[2].Type) - assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[2].Status) - assert.Equal(t, "test", instance.Status.Conditions[2].Message) - - }) - - t.Run("Lifecycle with manage conditions reconciles with subroutine that adds a condition", func(t *testing.T) { - // Arrange - instance := &pmtesting.ImplementConditions{ - TestApiObject: pmtesting.TestApiObject{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Generation: 1, - }, - Status: pmtesting.TestStatus{}, - }, - } - - fakeClient := pmtesting.CreateFakeClient(t, instance) - - mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.AddConditionSubroutine{Ready: metav1.ConditionTrue}}, fakeClient) - mgr.WithConditionManagement() - - // Act - _, err := mgr.Reconcile(ctx, request, instance) - - assert.NoError(t, err) - require.Len(t, instance.Status.Conditions, 3) - assert.Equal(t, conditions.ConditionReady, instance.Status.Conditions[0].Type) - assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[0].Status) - assert.Equal(t, "The resource is ready", instance.Status.Conditions[0].Message) - assert.Equal(t, "addCondition_Ready", instance.Status.Conditions[1].Type) - assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[1].Status) - assert.Equal(t, "The subroutine is complete", instance.Status.Conditions[1].Message) - assert.Equal(t, "test", instance.Status.Conditions[2].Type) - assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[2].Status) - assert.Equal(t, "test", instance.Status.Conditions[2].Message) - - }) - - t.Run("Lifecycle with manage conditions reconciles with subroutine that adds a condition with preexisting conditions (update)", func(t *testing.T) { - // Arrange - instance := &pmtesting.ImplementConditions{ - TestApiObject: pmtesting.TestApiObject{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Generation: 1, - }, - Status: pmtesting.TestStatus{ - Conditions: []metav1.Condition{ - { - Type: "test", - Status: metav1.ConditionFalse, - Reason: "test", - Message: "test", - }, - }, - }, - }, - } - - fakeClient := pmtesting.CreateFakeClient(t, instance) - - mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.AddConditionSubroutine{Ready: metav1.ConditionTrue}}, fakeClient) - mgr.WithConditionManagement() - - // Act - _, err := mgr.Reconcile(ctx, request, instance) - - assert.NoError(t, err) - require.Len(t, instance.Status.Conditions, 3) - assert.Equal(t, "test", instance.Status.Conditions[0].Type) - assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[0].Status) - assert.Equal(t, "test", instance.Status.Conditions[0].Message) - assert.Equal(t, conditions.ConditionReady, instance.Status.Conditions[1].Type) - assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[1].Status) - assert.Equal(t, "The resource is ready", instance.Status.Conditions[1].Message) - assert.Equal(t, "addCondition_Ready", instance.Status.Conditions[2].Type) - assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[2].Status) - assert.Equal(t, "The subroutine is complete", instance.Status.Conditions[2].Message) - - }) - - t.Run("Lifecycle with manage conditions reconciles with subroutine that adds a condition with preexisting conditions", func(t *testing.T) { - // Arrange - instance := &pmtesting.ImplementConditions{ - TestApiObject: pmtesting.TestApiObject{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Generation: 1, - }, - Status: pmtesting.TestStatus{ - Conditions: []metav1.Condition{ - { - Type: conditions.ConditionReady, - Status: metav1.ConditionTrue, - Message: "The resource is ready!!", - Reason: conditions.ConditionReady, - }, - }, - }, - }, - } - - fakeClient := pmtesting.CreateFakeClient(t, instance) - - mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.AddConditionSubroutine{Ready: metav1.ConditionTrue}}, fakeClient) - mgr.WithConditionManagement() - - // Act - _, err := mgr.Reconcile(ctx, request, instance) - - assert.NoError(t, err) - require.Len(t, instance.Status.Conditions, 3) - assert.Equal(t, conditions.ConditionReady, instance.Status.Conditions[0].Type) - assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[0].Status) - assert.Equal(t, "The resource is ready", instance.Status.Conditions[0].Message) - assert.Equal(t, "addCondition_Ready", instance.Status.Conditions[1].Type) - assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[1].Status) - assert.Equal(t, "The subroutine is complete", instance.Status.Conditions[1].Message) - assert.Equal(t, "test", instance.Status.Conditions[2].Type) - assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[2].Status) - assert.Equal(t, "test", instance.Status.Conditions[2].Message) - - }) - - t.Run("Lifecycle w/o manage conditions reconciles with subroutine that adds a condition", func(t *testing.T) { - // Arrange - instance := &pmtesting.ImplementConditions{ - TestApiObject: pmtesting.TestApiObject{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Generation: 1, - }, - Status: pmtesting.TestStatus{}, - }, - } - - fakeClient := pmtesting.CreateFakeClient(t, instance) - - mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.AddConditionSubroutine{Ready: metav1.ConditionTrue}}, fakeClient) - - // Act - _, err := mgr.Reconcile(ctx, request, instance) - - assert.NoError(t, err) - require.Len(t, instance.Status.Conditions, 1) - assert.Equal(t, "test", instance.Status.Conditions[0].Type) - assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[0].Status) - assert.Equal(t, "test", instance.Status.Conditions[0].Message) - - }) - - t.Run("Lifecycle with manage conditions reconciles with subroutine failing Status update", func(t *testing.T) { - // Arrange - instance := &pmtesting.ImplementConditions{ - TestApiObject: pmtesting.TestApiObject{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Generation: 1, - }, - Status: pmtesting.TestStatus{}, - }, - } - - fakeClient := pmtesting.CreateFakeClient(t, instance) - - mgr, _ := createLifecycleManager([]subroutine.Subroutine{ - pmtesting.ChangeStatusSubroutine{ - Client: fakeClient, - }}, fakeClient) - mgr.WithConditionManagement() - - // Act - _, err := mgr.Reconcile(ctx, request, instance) - - assert.NoError(t, err) - assert.Len(t, instance.Status.Conditions, 2) - assert.Equal(t, conditions.ConditionReady, instance.Status.Conditions[0].Type) - assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[0].Status) - assert.Equal(t, "The resource is ready", instance.Status.Conditions[0].Message) - assert.Equal(t, "changeStatus_Ready", instance.Status.Conditions[1].Type) - assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[1].Status) - assert.Equal(t, "The subroutine is complete", instance.Status.Conditions[1].Message) - }) - - t.Run("Lifecycle with manage conditions finalizes with multiple subroutines partially succeeding", func(t *testing.T) { - // Arrange - instance := &pmtesting.ImplementConditions{ - TestApiObject: pmtesting.TestApiObject{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Generation: 1, - DeletionTimestamp: &metav1.Time{Time: time.Now()}, - Finalizers: []string{pmtesting.FailureScenarioSubroutineFinalizer, pmtesting.ChangeStatusSubroutineFinalizer}, - }, - Status: pmtesting.TestStatus{}, - }, - } - - fakeClient := pmtesting.CreateFakeClient(t, instance) - - mgr, _ := createLifecycleManager([]subroutine.Subroutine{ - pmtesting.FailureScenarioSubroutine{}, - pmtesting.ChangeStatusSubroutine{Client: fakeClient}}, fakeClient) - mgr.WithConditionManagement() - - // Act - _, err := mgr.Reconcile(ctx, request, instance) - - assert.Error(t, err) - require.Len(t, instance.Status.Conditions, 3) - assert.Equal(t, "changeStatus_Finalize", instance.Status.Conditions[0].Type, "") - assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[0].Status) - assert.Equal(t, "The subroutine finalization is complete", instance.Status.Conditions[0].Message) - assert.Equal(t, "FailureScenarioSubroutine_Finalize", instance.Status.Conditions[1].Type) - assert.Equal(t, metav1.ConditionFalse, instance.Status.Conditions[1].Status) - assert.Equal(t, "The subroutine finalization has an error: FailureScenarioSubroutine", instance.Status.Conditions[1].Message) - assert.Equal(t, conditions.ConditionReady, instance.Status.Conditions[2].Type) - assert.Equal(t, metav1.ConditionFalse, instance.Status.Conditions[2].Status) - assert.Equal(t, "The resource is not ready", instance.Status.Conditions[2].Message) - }) - - t.Run("Lifecycle with manage conditions reconciles with ReqeueAfter subroutine", func(t *testing.T) { - // Arrange - instance := &pmtesting.ImplementConditions{ - TestApiObject: pmtesting.TestApiObject{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Generation: 1, - }, - Status: pmtesting.TestStatus{}, - }, - } - - fakeClient := pmtesting.CreateFakeClient(t, instance) - - mgr, _ := createLifecycleManager([]subroutine.Subroutine{ - pmtesting.FailureScenarioSubroutine{RequeAfter: true}}, fakeClient) - mgr.WithConditionManagement() - - // Act - _, err := mgr.Reconcile(ctx, request, instance) - - assert.NoError(t, err) - assert.Len(t, instance.Status.Conditions, 2) - assert.Equal(t, conditions.ConditionReady, instance.Status.Conditions[0].Type) - assert.Equal(t, metav1.ConditionFalse, instance.Status.Conditions[0].Status) - assert.Equal(t, "The resource is not ready", instance.Status.Conditions[0].Message) - assert.Equal(t, "FailureScenarioSubroutine_Ready", instance.Status.Conditions[1].Type) - assert.Equal(t, metav1.ConditionUnknown, instance.Status.Conditions[1].Status) - assert.Equal(t, "The subroutine is processing", instance.Status.Conditions[1].Message) - }) - - t.Run("Lifecycle with manage conditions reconciles with Error subroutine (no-retry)", func(t *testing.T) { - // Arrange - instance := &pmtesting.ImplementConditions{ - TestApiObject: pmtesting.TestApiObject{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Generation: 1, - }, - Status: pmtesting.TestStatus{}, - }, - } - - fakeClient := pmtesting.CreateFakeClient(t, instance) - - mgr, _ := createLifecycleManager([]subroutine.Subroutine{ - pmtesting.FailureScenarioSubroutine{RequeAfter: false}}, fakeClient) - mgr.WithConditionManagement() - - // Act - _, err := mgr.Reconcile(ctx, request, instance) - - assert.NoError(t, err) - assert.Len(t, instance.Status.Conditions, 2) - assert.Equal(t, conditions.ConditionReady, instance.Status.Conditions[0].Type) - assert.Equal(t, metav1.ConditionFalse, instance.Status.Conditions[0].Status) - assert.Equal(t, "The resource is not ready", instance.Status.Conditions[0].Message) - assert.Equal(t, "FailureScenarioSubroutine_Ready", instance.Status.Conditions[1].Type) - assert.Equal(t, metav1.ConditionFalse, instance.Status.Conditions[1].Status) - assert.Equal(t, "The subroutine has an error: FailureScenarioSubroutine", instance.Status.Conditions[1].Message) - }) - - t.Run("Lifecycle with manage conditions reconciles with Error subroutine (retry)", func(t *testing.T) { - // Arrange - instance := &pmtesting.ImplementConditions{ - TestApiObject: pmtesting.TestApiObject{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Generation: 1, - }, - Status: pmtesting.TestStatus{}, - }, - } - - fakeClient := pmtesting.CreateFakeClient(t, instance) - - mgr, _ := createLifecycleManager([]subroutine.Subroutine{ - pmtesting.FailureScenarioSubroutine{Retry: true, RequeAfter: false}}, fakeClient) - mgr.WithConditionManagement() - - // Act - _, err := mgr.Reconcile(ctx, request, instance) - - assert.Error(t, err) - assert.Len(t, instance.Status.Conditions, 2) - assert.Equal(t, conditions.ConditionReady, instance.Status.Conditions[0].Type) - assert.Equal(t, metav1.ConditionFalse, instance.Status.Conditions[0].Status) - assert.Equal(t, "The resource is not ready", instance.Status.Conditions[0].Message) - assert.Equal(t, "FailureScenarioSubroutine_Ready", instance.Status.Conditions[1].Type) - assert.Equal(t, metav1.ConditionFalse, instance.Status.Conditions[1].Status) - assert.Equal(t, "The subroutine has an error: FailureScenarioSubroutine", instance.Status.Conditions[1].Message) - }) - - t.Run("Lifecycle with manage conditions not implementing the interface", func(t *testing.T) { - // Arrange - instance := &pmtesting.NotImplementingSpreadReconciles{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Generation: 1, - }, - Status: pmtesting.TestStatus{ - Some: "string", - ObservedGeneration: 0, - }, - } - - fakeClient := pmtesting.CreateFakeClient(t, instance) - - mgr, _ := createLifecycleManager([]subroutine.Subroutine{ - pmtesting.ChangeStatusSubroutine{ - Client: fakeClient, - }, - }, fakeClient) - mgr.WithConditionManagement() - - // Act - // So the validation is already happening in SetupWithManager. So we can panic in the reconcile. - assert.Panics(t, func() { - _, _ = mgr.Reconcile(ctx, request, instance) - }) - }) - - t.Run("Lifecycle with manage conditions failing finalize", func(t *testing.T) { - // Arrange - instance := &pmtesting.ImplementConditions{ - TestApiObject: pmtesting.TestApiObject{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Generation: 1, - Finalizers: []string{pmtesting.FailureScenarioSubroutineFinalizer}, - DeletionTimestamp: &metav1.Time{Time: time.Now()}, - }, - Status: pmtesting.TestStatus{ - Some: "string", - ObservedGeneration: 0, - }, - }, - } - - fakeClient := pmtesting.CreateFakeClient(t, instance) - - mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.FailureScenarioSubroutine{}}, fakeClient) - mgr.WithConditionManagement() - - // Act - _, err := mgr.Reconcile(ctx, request, instance) - - assert.Error(t, err) - assert.Equal(t, "FailureScenarioSubroutine", err.Error()) - }) - - t.Run("Lifecycle with spread reconciles and manage conditions and processing fails (retry)", func(t *testing.T) { - // Arrange - instance := &pmtesting.ImplementConditionsAndSpreadReconciles{ - TestApiObject: pmtesting.TestApiObject{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Generation: 1, - }, - Status: pmtesting.TestStatus{ - Some: "string", - ObservedGeneration: 0, - }, - }, - } - - fakeClient := pmtesting.CreateFakeClient(t, instance) - - mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.FailureScenarioSubroutine{Retry: true, RequeAfter: false}}, fakeClient) - mgr.WithSpreadingReconciles() - mgr.WithConditionManagement() - - // Act - _, err := mgr.Reconcile(ctx, request, instance) - - assert.Error(t, err) - assert.Len(t, instance.Status.Conditions, 2) - assert.Equal(t, conditions.ConditionReady, instance.Status.Conditions[0].Type) - assert.Equal(t, string(v1.ConditionFalse), string(instance.Status.Conditions[0].Status)) - assert.Equal(t, "FailureScenarioSubroutine_Ready", instance.Status.Conditions[1].Type) - assert.Equal(t, string(v1.ConditionFalse), string(instance.Status.Conditions[1].Status)) - assert.Equal(t, int64(0), instance.Status.ObservedGeneration) - }) - - t.Run("Lifecycle with spread reconciles and manage conditions and processing fails (no-retry)", func(t *testing.T) { - // Arrange - instance := &pmtesting.ImplementConditionsAndSpreadReconciles{ - TestApiObject: pmtesting.TestApiObject{ - ObjectMeta: metav1.ObjectMeta{ - Name: name, - Namespace: namespace, - Generation: 1, - }, - Status: pmtesting.TestStatus{ - Some: "string", - ObservedGeneration: 0, - }, - }, - } - - fakeClient := pmtesting.CreateFakeClient(t, instance) - - mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.FailureScenarioSubroutine{RequeAfter: false}}, fakeClient) - mgr.WithSpreadingReconciles() - mgr.WithConditionManagement() - - // Act - _, err := mgr.Reconcile(ctx, request, instance) - - assert.NoError(t, err) - assert.Len(t, instance.Status.Conditions, 2) - assert.Equal(t, conditions.ConditionReady, instance.Status.Conditions[0].Type) - assert.Equal(t, string(v1.ConditionFalse), string(instance.Status.Conditions[0].Status)) - assert.Equal(t, "FailureScenarioSubroutine_Ready", instance.Status.Conditions[1].Type) - assert.Equal(t, string(v1.ConditionFalse), string(instance.Status.Conditions[1].Status)) - assert.Equal(t, int64(1), instance.Status.ObservedGeneration) - }) - - t.Run("Test Lifecycle setupWithManager /w conditions and expecting no error", func(t *testing.T) { + t.Run("Test Lifecycle setupWithManager /w spread and expecting no error", func(t *testing.T) { // Arrange - instance := &pmtesting.ImplementConditions{} + instance := &pmtesting.ImplementingSpreadReconciles{} fakeClient := pmtesting.CreateFakeClient(t, instance) m, err := manager.New(&rest.Config{}, manager.Options{Scheme: fakeClient.Scheme()}) assert.NoError(t, err) lm, log := createLifecycleManager([]subroutine.Subroutine{}, fakeClient) - lm = lm.WithConditionManagement() + lm = lm.WithSpreadingReconciles() tr := &testReconciler{lifecycleManager: lm} // Act - err = lm.SetupWithManager(m, 0, "testReconciler1", instance, "test", tr, log.Logger) + err = lm.SetupWithManager(m, 0, "testReconciler3", instance, "test", tr, log.Logger) // Assert assert.NoError(t, err) }) - - t.Run("Test Lifecycle setupWithManager /w conditions and expecting error", func(t *testing.T) { + t.Run("Test Lifecycle setupWithManager /w spread and expecting a error", func(t *testing.T) { // Arrange instance := &pmtesting.NotImplementingSpreadReconciles{} fakeClient := pmtesting.CreateFakeClient(t, instance) @@ -1191,17 +56,16 @@ func TestLifecycle(t *testing.T) { assert.NoError(t, err) lm, log := createLifecycleManager([]subroutine.Subroutine{}, fakeClient) - lm = lm.WithConditionManagement() + lm = lm.WithSpreadingReconciles() tr := &testReconciler{lifecycleManager: lm} // Act - err = lm.SetupWithManager(m, 0, "testReconciler2", instance, "test", tr, log.Logger) + err = lm.SetupWithManager(m, 0, "testReconciler", instance, "test", tr, log.Logger) // Assert assert.Error(t, err) }) - - t.Run("Test Lifecycle setupWithManager /w spread and expecting no error", func(t *testing.T) { + t.Run("Test Lifecycle setupWithManager /w spread and expecting a error (invalid config)", func(t *testing.T) { // Arrange instance := &pmtesting.ImplementingSpreadReconciles{} fakeClient := pmtesting.CreateFakeClient(t, instance) @@ -1210,26 +74,7 @@ func TestLifecycle(t *testing.T) { assert.NoError(t, err) lm, log := createLifecycleManager([]subroutine.Subroutine{}, fakeClient) - lm = lm.WithSpreadingReconciles() - tr := &testReconciler{lifecycleManager: lm} - - // Act - err = lm.SetupWithManager(m, 0, "testReconciler3", instance, "test", tr, log.Logger) - - // Assert - assert.NoError(t, err) - }) - - t.Run("Test Lifecycle setupWithManager /w spread and expecting a error", func(t *testing.T) { - // Arrange - instance := &pmtesting.NotImplementingSpreadReconciles{} - fakeClient := pmtesting.CreateFakeClient(t, instance) - - m, err := manager.New(&rest.Config{}, manager.Options{Scheme: fakeClient.Scheme()}) - assert.NoError(t, err) - - lm, log := createLifecycleManager([]subroutine.Subroutine{}, fakeClient) - lm = lm.WithSpreadingReconciles() + lm.WithSpreadingReconciles().WithReadOnly() tr := &testReconciler{lifecycleManager: lm} // Act @@ -1238,50 +83,7 @@ func TestLifecycle(t *testing.T) { // Assert assert.Error(t, err) }) - errorMessage := "oh nose" - t.Run("handleOperatorError", func(t *testing.T) { - t.Run("Should handle an operator error with retry and sentry", func(t *testing.T) { - // Arrange - instance := &pmtesting.ImplementConditions{} - fakeClient := pmtesting.CreateFakeClient(t, instance) - - _, log := createLifecycleManager([]subroutine.Subroutine{}, fakeClient) - ctx = sentry.ContextWithSentryTags(ctx, map[string]string{}) - - // Act - result, err := lifecycle.HandleOperatorError(ctx, operrors.NewOperatorError(goerrors.New(errorMessage), true, true), "handle op error", true, log.Logger) - - // Assert - assert.Error(t, err) - assert.NotNil(t, result) - assert.Equal(t, errorMessage, err.Error()) - - errorMessages, err := log.GetErrorMessages() - assert.NoError(t, err) - assert.Equal(t, errorMessage, *errorMessages[0].Error) - }) - - t.Run("Should handle an operator error without retry", func(t *testing.T) { - // Arrange - instance := &pmtesting.ImplementConditions{} - fakeClient := pmtesting.CreateFakeClient(t, instance) - - _, log := createLifecycleManager([]subroutine.Subroutine{}, fakeClient) - - // Act - result, err := lifecycle.HandleOperatorError(ctx, operrors.NewOperatorError(goerrors.New(errorMessage), false, false), "handle op error", true, log.Logger) - - // Assert - assert.Nil(t, err) - assert.NotNil(t, result) - - errorMessages, err := log.GetErrorMessages() - assert.NoError(t, err) - assert.Equal(t, errorMessage, *errorMessages[0].Error) - }) - }) - t.Run("Prepare Context", func(t *testing.T) { t.Run("Sets a context that can be used in the subroutine", func(t *testing.T) { // Arrange @@ -1325,19 +127,29 @@ func TestLifecycle(t *testing.T) { assert.Error(t, err) }) }) -} + t.Run("WthConditionManagement", func(t *testing.T) { + // Given + fakeClient := pmtesting.CreateFakeClient(t, &pmtesting.TestApiObject{}) + _, log := createLifecycleManager([]subroutine.Subroutine{}, fakeClient) + + // When + l := NewLifecycleManager(log.Logger, "test-operator", "test-controller", fakeClient, []subroutine.Subroutine{}).WithConditionManagement() -// Test LifecycleManager.WithConditionManagement -func TestLifecycleManager_WithConditionManagement(t *testing.T) { - // Given - fakeClient := pmtesting.CreateFakeClient(t, &pmtesting.TestApiObject{}) - _, log := createLifecycleManager([]subroutine.Subroutine{}, fakeClient) + // Then + assert.True(t, true, l.ConditionsManager() != nil) + }) + t.Run("WithReadOnly", func(t *testing.T) { + // Given + fakeClient := pmtesting.CreateFakeClient(t, &pmtesting.TestApiObject{}) + _, log := createLifecycleManager([]subroutine.Subroutine{}, fakeClient) + + // When + l := NewLifecycleManager(log.Logger, "test-operator", "test-controller", fakeClient, []subroutine.Subroutine{}).WithReadOnly() - // When - l := NewLifecycleManager(log.Logger, "test-operator", "test-controller", fakeClient, []subroutine.Subroutine{}).WithConditionManagement() + // Then + assert.True(t, true, l.ConditionsManager() != nil) + }) - // Then - assert.True(t, true, l.ConditionsManager() != nil) } type testReconciler struct { From f5a4f3a8dad5402010bb86980ac5667f5bc8c70a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20Echterh=C3=B6lter?= Date: Mon, 7 Jul 2025 11:36:12 +0200 Subject: [PATCH 07/16] test: refactoring for unittests --- controller/lifecycle/api/api.go | 2 - controller/lifecycle/conditions/conditions.go | 22 - .../lifecycle/conditions/conditions_test.go | 41 - controller/lifecycle/lifecycle.go | 14 +- controller/lifecycle/lifecycle_test.go | 1269 ++++++++--------- controller/testSupport/lifecycle.go | 110 +- 6 files changed, 677 insertions(+), 781 deletions(-) diff --git a/controller/lifecycle/api/api.go b/controller/lifecycle/api/api.go index e630f5a..7b51ed4 100644 --- a/controller/lifecycle/api/api.go +++ b/controller/lifecycle/api/api.go @@ -30,12 +30,10 @@ type Config struct { } type ConditionManager interface { - MustToRuntimeObjectConditionsInterface(instance runtimeobject.RuntimeObject, log *logger.Logger) RuntimeObjectConditions SetInstanceConditionUnknownIfNotSet(conditions *[]metav1.Condition) bool SetSubroutineConditionToUnknownIfNotSet(conditions *[]metav1.Condition, subroutine subroutine.Subroutine, isFinalize bool, log *logger.Logger) bool SetSubroutineCondition(conditions *[]metav1.Condition, subroutine subroutine.Subroutine, subroutineResult ctrl.Result, subroutineErr error, isFinalize bool, log *logger.Logger) bool SetInstanceConditionReady(conditions *[]metav1.Condition, status metav1.ConditionStatus) bool - ToRuntimeObjectConditionsInterface(instance runtimeobject.RuntimeObject, log *logger.Logger) (RuntimeObjectConditions, error) } type RuntimeObjectConditions interface { diff --git a/controller/lifecycle/conditions/conditions.go b/controller/lifecycle/conditions/conditions.go index 3f55ae3..3524d63 100644 --- a/controller/lifecycle/conditions/conditions.go +++ b/controller/lifecycle/conditions/conditions.go @@ -7,11 +7,8 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ctrl "sigs.k8s.io/controller-runtime" - "github.com/platform-mesh/golang-commons/controller/lifecycle/api" - "github.com/platform-mesh/golang-commons/controller/lifecycle/runtimeobject" "github.com/platform-mesh/golang-commons/controller/lifecycle/subroutine" "github.com/platform-mesh/golang-commons/logger" - "github.com/platform-mesh/golang-commons/sentry" ) const ( @@ -118,22 +115,3 @@ func (c *ConditionManager) SetSubroutineCondition(conditions *[]metav1.Condition } return changed } - -func (c *ConditionManager) ToRuntimeObjectConditionsInterface(instance runtimeobject.RuntimeObject, log *logger.Logger) (api.RuntimeObjectConditions, error) { - if obj, ok := instance.(api.RuntimeObjectConditions); ok { - return obj, nil - } - err := fmt.Errorf("ManageConditions is enabled, but instance does not implement RuntimeObjectConditions interface. This is a programming error") - log.Error().Err(err).Msg("instance does not implement RuntimeObjectConditions interface") - sentry.CaptureError(err, nil) - return nil, err -} - -func (c *ConditionManager) MustToRuntimeObjectConditionsInterface(instance runtimeobject.RuntimeObject, log *logger.Logger) api.RuntimeObjectConditions { - obj, err := c.ToRuntimeObjectConditionsInterface(instance, log) - if err == nil { - return obj - } - log.Panic().Err(err).Msg("instance does not implement RuntimeObjectConditions interface") - return nil -} diff --git a/controller/lifecycle/conditions/conditions_test.go b/controller/lifecycle/conditions/conditions_test.go index 40c2850..d7ff4e1 100644 --- a/controller/lifecycle/conditions/conditions_test.go +++ b/controller/lifecycle/conditions/conditions_test.go @@ -240,44 +240,3 @@ func TestSubroutineCondition(t *testing.T) { assert.Equal(t, metav1.ConditionFalse, condition[0].Status) }) } - -// Dummy types for testing interface conversion - -func TestToRuntimeObjectConditionsInterface(t *testing.T) { - log, err := logger.New(logger.DefaultConfig()) - require.NoError(t, err) - cm := NewConditionManager() - - t.Run("Implements interface", func(t *testing.T) { - obj := pmtesting.DummyRuntimeObjectWithConditions{} - res, err := cm.ToRuntimeObjectConditionsInterface(obj, log) - assert.NoError(t, err) - assert.NotNil(t, res) - }) - - t.Run("Does not implement interface", func(t *testing.T) { - obj := pmtesting.DummyRuntimeObject{} - res, err := cm.ToRuntimeObjectConditionsInterface(obj, log) - assert.Error(t, err) - assert.Nil(t, res) - }) -} - -func TestMustToRuntimeObjectConditionsInterface(t *testing.T) { - log, err := logger.New(logger.DefaultConfig()) - require.NoError(t, err) - cm := NewConditionManager() - - t.Run("Implements interface", func(t *testing.T) { - obj := pmtesting.DummyRuntimeObjectWithConditions{} - res := cm.MustToRuntimeObjectConditionsInterface(obj, log) - assert.NotNil(t, res) - }) - - t.Run("Does not implement interface panics", func(t *testing.T) { - obj := pmtesting.DummyRuntimeObject{} - assert.Panics(t, func() { - cm.MustToRuntimeObjectConditionsInterface(obj, log) - }) - }) -} diff --git a/controller/lifecycle/lifecycle.go b/controller/lifecycle/lifecycle.go index a509942..9c071f7 100644 --- a/controller/lifecycle/lifecycle.go +++ b/controller/lifecycle/lifecycle.go @@ -71,7 +71,8 @@ func Reconcile(ctx context.Context, nName types.NamespacedName, instance runtime var condArr []v1.Condition if l.ConditionsManager() != nil { - condArr = l.ConditionsManager().MustToRuntimeObjectConditionsInterface(instance, log).GetConditions() + roc := util.MustToInterface[api.RuntimeObjectConditions](instance, log) + condArr = roc.GetConditions() l.ConditionsManager().SetInstanceConditionUnknownIfNotSet(&condArr) } @@ -98,18 +99,18 @@ func Reconcile(ctx context.Context, nName types.NamespacedName, instance runtime // Set current condArr before reconciling the s if l.ConditionsManager() != nil { - l.ConditionsManager().MustToRuntimeObjectConditionsInterface(instance, log).SetConditions(condArr) + util.MustToInterface[api.RuntimeObjectConditions](instance, log).SetConditions(condArr) } subResult, retry, err := reconcileSubroutine(ctx, instance, s, cl, l, log, generationChanged, sentryTags) // Update condArr with any changes the s did if l.ConditionsManager() != nil { - condArr = l.ConditionsManager().MustToRuntimeObjectConditionsInterface(instance, log).GetConditions() + condArr = util.MustToInterface[api.RuntimeObjectConditions](instance, log).GetConditions() } if err != nil { if l.ConditionsManager() != nil { l.ConditionsManager().SetSubroutineCondition(&condArr, s, result, err, inDeletion, log) l.ConditionsManager().SetInstanceConditionReady(&condArr, v1.ConditionFalse) - l.ConditionsManager().MustToRuntimeObjectConditionsInterface(instance, log).SetConditions(condArr) + util.MustToInterface[api.RuntimeObjectConditions](instance, log).SetConditions(condArr) } if !retry { MarkResourceAsFinal(instance, log, condArr, v1.ConditionFalse, l) @@ -144,7 +145,7 @@ func Reconcile(ctx context.Context, nName types.NamespacedName, instance runtime } if l.ConditionsManager() != nil { - l.ConditionsManager().MustToRuntimeObjectConditionsInterface(instance, log).SetConditions(condArr) + util.MustToInterface[api.RuntimeObjectConditions](instance, log).SetConditions(condArr) } if !l.Config().ReadOnly { @@ -369,7 +370,8 @@ func ValidateInterfaces(instance runtimeobject.RuntimeObject, log *logger.Logger } } if l.ConditionsManager() != nil { - _, err := l.ConditionsManager().ToRuntimeObjectConditionsInterface(instance, log) + util.ToInterface[api.RuntimeObjectConditions](instance, log) + _, err := util.ToInterface[api.RuntimeObjectConditions](instance, log) if err != nil { return err } diff --git a/controller/lifecycle/lifecycle_test.go b/controller/lifecycle/lifecycle_test.go index 40e7032..c175c52 100644 --- a/controller/lifecycle/lifecycle_test.go +++ b/controller/lifecycle/lifecycle_test.go @@ -2,22 +2,29 @@ package lifecycle import ( "context" + goerrors "errors" "fmt" "testing" "time" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" "k8s.io/apimachinery/pkg/api/errors" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/types" ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" + "github.com/platform-mesh/golang-commons/controller/lifecycle/conditions" "github.com/platform-mesh/golang-commons/controller/lifecycle/mocks" + "github.com/platform-mesh/golang-commons/controller/lifecycle/runtimeobject" "github.com/platform-mesh/golang-commons/controller/lifecycle/subroutine" pmtesting "github.com/platform-mesh/golang-commons/controller/testSupport" + operrors "github.com/platform-mesh/golang-commons/errors" "github.com/platform-mesh/golang-commons/logger" + "github.com/platform-mesh/golang-commons/logger/testlogger" + "github.com/platform-mesh/golang-commons/sentry" ) func TestLifecycle(t *testing.T) { @@ -29,6 +36,12 @@ func TestLifecycle(t *testing.T) { Name: name, }, } + testApiObject := &pmtesting.TestApiObject{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + }, + } ctx := context.Background() logcfg := logger.DefaultConfig() logcfg.NoJSON = true @@ -39,7 +52,7 @@ func TestLifecycle(t *testing.T) { // Arrange fakeClient := pmtesting.CreateFakeClient(t, &pmtesting.TestApiObject{}) - mgr := pmtesting.TestLifecycleManager{Logger: log} + mgr := &pmtesting.TestLifecycleManager{Logger: log} // Act result, err := Reconcile(ctx, request.NamespacedName, &pmtesting.TestApiObject{}, fakeClient, mgr) @@ -49,7 +62,6 @@ func TestLifecycle(t *testing.T) { assert.NotNil(t, result) assert.NoError(t, err) }) - t.Run("Lifecycle with a finalizer - add finalizer", func(t *testing.T) { // Arrange instance := &pmtesting.TestApiObject{ @@ -61,7 +73,7 @@ func TestLifecycle(t *testing.T) { fakeClient := pmtesting.CreateFakeClient(t, instance) - mgr := pmtesting.TestLifecycleManager{Logger: log, SubroutinesArr: []subroutine.Subroutine{ + mgr := &pmtesting.TestLifecycleManager{Logger: log, SubroutinesArr: []subroutine.Subroutine{ pmtesting.FinalizerSubroutine{ Client: fakeClient, }}, @@ -73,7 +85,6 @@ func TestLifecycle(t *testing.T) { assert.NoError(t, err) assert.Equal(t, 1, len(instance.Finalizers)) }) - t.Run("Lifecycle with a finalizer - finalization", func(t *testing.T) { // Arrange now := &metav1.Time{Time: time.Now()} @@ -89,7 +100,7 @@ func TestLifecycle(t *testing.T) { fakeClient := pmtesting.CreateFakeClient(t, instance) - mgr := pmtesting.TestLifecycleManager{Logger: log, SubroutinesArr: []subroutine.Subroutine{ + mgr := &pmtesting.TestLifecycleManager{Logger: log, SubroutinesArr: []subroutine.Subroutine{ pmtesting.FinalizerSubroutine{ Client: fakeClient, }, @@ -101,7 +112,6 @@ func TestLifecycle(t *testing.T) { assert.NoError(t, err) assert.Equal(t, 0, len(instance.Finalizers)) }) - t.Run("Lifecycle with a finalizer - finalization(requeue)", func(t *testing.T) { // Arrange now := &metav1.Time{Time: time.Now()} @@ -117,7 +127,7 @@ func TestLifecycle(t *testing.T) { fakeClient := pmtesting.CreateFakeClient(t, instance) - mgr := pmtesting.TestLifecycleManager{Logger: log, SubroutinesArr: []subroutine.Subroutine{ + mgr := &pmtesting.TestLifecycleManager{Logger: log, SubroutinesArr: []subroutine.Subroutine{ pmtesting.FinalizerSubroutine{ Client: fakeClient, RequeueAfter: 1 * time.Second, @@ -129,9 +139,8 @@ func TestLifecycle(t *testing.T) { assert.NoError(t, err) assert.Equal(t, 1, len(instance.Finalizers)) - assert.Equal(t, time.Duration(1*time.Second), res.RequeueAfter) + assert.Equal(t, 1*time.Second, res.RequeueAfter) }) - t.Run("Lifecycle with a finalizer - finalization(requeueAfter)", func(t *testing.T) { // Arrange now := &metav1.Time{Time: time.Now()} @@ -147,7 +156,7 @@ func TestLifecycle(t *testing.T) { fakeClient := pmtesting.CreateFakeClient(t, instance) - mgr := pmtesting.TestLifecycleManager{Logger: log, SubroutinesArr: []subroutine.Subroutine{ + mgr := &pmtesting.TestLifecycleManager{Logger: log, SubroutinesArr: []subroutine.Subroutine{ pmtesting.FinalizerSubroutine{ Client: fakeClient, RequeueAfter: 2 * time.Second, @@ -162,7 +171,6 @@ func TestLifecycle(t *testing.T) { assert.Equal(t, 2*time.Second, res.RequeueAfter) }) - t.Run("Lifecycle with a finalizer - skip finalization if the finalizer is not in there", func(t *testing.T) { // Arrange now := &metav1.Time{Time: time.Now()} @@ -178,7 +186,7 @@ func TestLifecycle(t *testing.T) { fakeClient := pmtesting.CreateFakeClient(t, instance) - mgr := pmtesting.TestLifecycleManager{Logger: log, SubroutinesArr: []subroutine.Subroutine{ + mgr := &pmtesting.TestLifecycleManager{Logger: log, SubroutinesArr: []subroutine.Subroutine{ pmtesting.FinalizerSubroutine{ Client: fakeClient, }, @@ -190,7 +198,6 @@ func TestLifecycle(t *testing.T) { assert.NoError(t, err) assert.Equal(t, 1, len(instance.Finalizers)) }) - t.Run("Lifecycle with a finalizer - failing finalization subroutine", func(t *testing.T) { // Arrange now := &metav1.Time{Time: time.Now()} @@ -206,7 +213,7 @@ func TestLifecycle(t *testing.T) { fakeClient := pmtesting.CreateFakeClient(t, instance) - mgr := pmtesting.TestLifecycleManager{Logger: log, SubroutinesArr: []subroutine.Subroutine{ + mgr := &pmtesting.TestLifecycleManager{Logger: log, SubroutinesArr: []subroutine.Subroutine{ pmtesting.FinalizerSubroutine{ Client: fakeClient, Err: fmt.Errorf("some error"), @@ -219,7 +226,6 @@ func TestLifecycle(t *testing.T) { assert.Error(t, err) assert.Equal(t, 1, len(instance.Finalizers)) }) - t.Run("Lifecycle without changing status", func(t *testing.T) { // Arrange instance := &pmtesting.TestApiObject{ @@ -231,7 +237,7 @@ func TestLifecycle(t *testing.T) { } fakeClient := pmtesting.CreateFakeClient(t, instance) - mgr := pmtesting.TestLifecycleManager{Logger: log, SubroutinesArr: []subroutine.Subroutine{}} + mgr := &pmtesting.TestLifecycleManager{Logger: log, SubroutinesArr: []subroutine.Subroutine{}} // Act result, err := Reconcile(ctx, request.NamespacedName, instance, fakeClient, mgr) @@ -240,7 +246,6 @@ func TestLifecycle(t *testing.T) { assert.NoError(t, err) assert.NotNil(t, result) }) - t.Run("Lifecycle with changing status", func(t *testing.T) { // Arrange instance := &pmtesting.TestApiObject{ @@ -253,7 +258,7 @@ func TestLifecycle(t *testing.T) { fakeClient := pmtesting.CreateFakeClient(t, instance) - mgr := pmtesting.TestLifecycleManager{Logger: log, SubroutinesArr: []subroutine.Subroutine{ + mgr := &pmtesting.TestLifecycleManager{Logger: log, SubroutinesArr: []subroutine.Subroutine{ pmtesting.ChangeStatusSubroutine{ Client: fakeClient, }, @@ -272,7 +277,6 @@ func TestLifecycle(t *testing.T) { assert.NoError(t, err) assert.Equal(t, serverObject.Status.Some, "other string") }) - t.Run("Lifecycle with spread reconciles", func(t *testing.T) { // Arrange instance := &pmtesting.ImplementingSpreadReconciles{ @@ -291,7 +295,7 @@ func TestLifecycle(t *testing.T) { fakeClient := pmtesting.CreateFakeClient(t, instance) - mgr := pmtesting.TestLifecycleManager{Logger: log, ShouldReconcile: true, SubroutinesArr: []subroutine.Subroutine{ + mgr := &pmtesting.TestLifecycleManager{Logger: log, ShouldReconcile: true, SubroutinesArr: []subroutine.Subroutine{ pmtesting.ChangeStatusSubroutine{ Client: fakeClient, }, @@ -304,375 +308,328 @@ func TestLifecycle(t *testing.T) { assert.NoError(t, err) assert.Equal(t, instance.Generation, instance.Status.ObservedGeneration) }) - // - //t.Run("Lifecycle with spread reconciles on deleted object", func(t *testing.T) { - // // Arrange - // instance := &pmtesting.ImplementingSpreadReconciles{ - // TestApiObject: pmtesting.TestApiObject{ - // ObjectMeta: metav1.ObjectMeta{ - // Name: name, - // Namespace: namespace, - // Generation: 2, - // DeletionTimestamp: &metav1.Time{Time: time.Now()}, - // Finalizers: []string{pmtesting.ChangeStatusSubroutineFinalizer}, - // }, - // Status: pmtesting.TestStatus{ - // Some: "string", - // ObservedGeneration: 2, - // NextReconcileTime: metav1.Time{Time: time.Now().Add(2 * time.Hour)}, - // }, - // }, - // } - // - // fakeClient := pmtesting.CreateFakeClient(t, instance) - // - // mgr, _ := createLifecycleManager([]subroutine.Subroutine{ - // pmtesting.ChangeStatusSubroutine{ - // Client: fakeClient, - // }, - // }, fakeClient) - // mgr.WithSpreadingReconciles() - // - // // Act - // _, err := mgr.Reconcile(ctx, request, instance) - // assert.NoError(t, err) - // assert.Len(t, instance.Finalizers, 0) - // - //}) - // - //t.Run("Lifecycle with spread reconciles skips if the generation is the same", func(t *testing.T) { - // // Arrange - // nextReconcileTime := metav1.NewTime(time.Now().Add(1 * time.Hour)) - // instance := &pmtesting.ImplementingSpreadReconciles{ - // TestApiObject: pmtesting.TestApiObject{ - // ObjectMeta: metav1.ObjectMeta{ - // Name: name, - // Namespace: namespace, - // Generation: 1, - // }, - // Status: pmtesting.TestStatus{ - // Some: "string", - // ObservedGeneration: 1, - // NextReconcileTime: nextReconcileTime, - // }, - // }, - // } - // - // fakeClient := pmtesting.CreateFakeClient(t, instance) - // - // mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.FailureScenarioSubroutine{RequeAfter: false}}, fakeClient) - // mgr.WithSpreadingReconciles() - // - // // Act - // result, err := mgr.Reconcile(ctx, request, instance) - // - // assert.NoError(t, err) - // assert.Equal(t, int64(1), instance.Status.ObservedGeneration) - // assert.GreaterOrEqual(t, 12*time.Hour, result.RequeueAfter) - //}) - // - //t.Run("Lifecycle with spread reconciles and processing fails (no-retry)", func(t *testing.T) { - // // Arrange - // instance := &pmtesting.ImplementingSpreadReconciles{ - // TestApiObject: pmtesting.TestApiObject{ - // ObjectMeta: metav1.ObjectMeta{ - // Name: name, - // Namespace: namespace, - // Generation: 1, - // }, - // Status: pmtesting.TestStatus{ - // Some: "string", - // ObservedGeneration: 0, - // }, - // }, - // } - // - // fakeClient := pmtesting.CreateFakeClient(t, instance) - // - // mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.FailureScenarioSubroutine{Retry: false, RequeAfter: false}}, fakeClient) - // mgr.WithSpreadingReconciles() - // - // // Act - // _, err := mgr.Reconcile(ctx, request, instance) - // - // assert.NoError(t, err) - // assert.Equal(t, int64(1), instance.Status.ObservedGeneration) - //}) - // - //t.Run("Lifecycle with spread reconciles and processing fails (retry)", func(t *testing.T) { - // // Arrange - // instance := &pmtesting.ImplementingSpreadReconciles{ - // TestApiObject: pmtesting.TestApiObject{ - // ObjectMeta: metav1.ObjectMeta{ - // Name: name, - // Namespace: namespace, - // Generation: 1, - // }, - // Status: pmtesting.TestStatus{ - // Some: "string", - // ObservedGeneration: 0, - // }, - // }, - // } - // - // fakeClient := pmtesting.CreateFakeClient(t, instance) - // - // mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.FailureScenarioSubroutine{Retry: true, RequeAfter: false}}, fakeClient) - // mgr.WithSpreadingReconciles() - // - // // Act - // _, err := mgr.Reconcile(ctx, request, instance) - // - // assert.Error(t, err) - // assert.Equal(t, int64(0), instance.Status.ObservedGeneration) - //}) - // - //t.Run("Lifecycle with spread reconciles and processing needs requeue", func(t *testing.T) { - // // Arrange - // instance := &pmtesting.ImplementingSpreadReconciles{ - // TestApiObject: pmtesting.TestApiObject{ - // ObjectMeta: metav1.ObjectMeta{ - // Name: name, - // Namespace: namespace, - // Generation: 1, - // }, - // Status: pmtesting.TestStatus{ - // Some: "string", - // ObservedGeneration: 0, - // }, - // }, - // } - // - // fakeClient := pmtesting.CreateFakeClient(t, instance) - // - // mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.FailureScenarioSubroutine{RequeAfter: true}}, fakeClient) - // mgr.WithSpreadingReconciles() - // - // // Act - // _, err := mgr.Reconcile(ctx, request, instance) - // - // assert.NoError(t, err) - // assert.Equal(t, int64(0), instance.Status.ObservedGeneration) - //}) - // - //t.Run("Lifecycle with spread reconciles and processing needs requeueAfter", func(t *testing.T) { - // // Arrange - // instance := &pmtesting.ImplementingSpreadReconciles{ - // TestApiObject: pmtesting.TestApiObject{ - // ObjectMeta: metav1.ObjectMeta{ - // Name: name, - // Namespace: namespace, - // Generation: 1, - // }, - // Status: pmtesting.TestStatus{ - // Some: "string", - // ObservedGeneration: 0, - // }, - // }, - // } - // fakeClient := pmtesting.CreateFakeClient(t, instance) - // - // mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.FailureScenarioSubroutine{RequeAfter: true}}, fakeClient) - // mgr.WithSpreadingReconciles() - // - // // Act - // _, err := mgr.Reconcile(ctx, request, instance) - // - // assert.NoError(t, err) - // assert.Equal(t, int64(0), instance.Status.ObservedGeneration) - //}) - // - //t.Run("Lifecycle with spread not implementing the interface", func(t *testing.T) { - // // Arrange - // instance := &pmtesting.NotImplementingSpreadReconciles{ - // ObjectMeta: metav1.ObjectMeta{ - // Name: name, - // Namespace: namespace, - // Generation: 1, - // }, - // Status: pmtesting.TestStatus{ - // Some: "string", - // ObservedGeneration: 0, - // }, - // } - // - // fakeClient := pmtesting.CreateFakeClient(t, instance) - // - // mgr, _ := createLifecycleManager([]subroutine.Subroutine{ - // pmtesting.ChangeStatusSubroutine{ - // Client: fakeClient, - // }, - // }, fakeClient) - // mgr.WithSpreadingReconciles() - // - // // Act - // assert.Panics(t, func() { - // _, _ = mgr.Reconcile(ctx, request, instance) - // }) - //}) - // - //t.Run("Should setup with manager", func(t *testing.T) { - // // Arrange - // instance := &pmtesting.TestApiObject{} - // fakeClient := pmtesting.CreateFakeClient(t, instance) - // log, err := logger.New(logger.DefaultConfig()) - // assert.NoError(t, err) - // m, err := manager.New(&rest.Config{}, manager.Options{ - // Scheme: fakeClient.Scheme(), - // }) - // assert.NoError(t, err) - // - // lm, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.FailureScenarioSubroutine{RequeAfter: true}}, fakeClient) - // tr := &testReconciler{ - // lifecycleManager: lm, - // } - // - // // Act - // err = lm.SetupWithManager(m, 0, "testReconciler", instance, "test", tr, log) - // - // // Assert - // assert.NoError(t, err) - //}) - // - //t.Run("Should setup with manager not implementing interface", func(t *testing.T) { - // // Arrange - // instance := &pmtesting.NotImplementingSpreadReconciles{} - // fakeClient := pmtesting.CreateFakeClient(t, instance) - // log, err := logger.New(logger.DefaultConfig()) - // assert.NoError(t, err) - // m, err := manager.New(&rest.Config{}, manager.Options{ - // Scheme: fakeClient.Scheme(), - // }) - // assert.NoError(t, err) - // - // lm, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.FailureScenarioSubroutine{RequeAfter: true}}, fakeClient) - // lm.WithSpreadingReconciles() - // tr := &testReconciler{ - // lifecycleManager: lm, - // } - // - // // Act - // err = lm.SetupWithManager(m, 0, "testReconciler", instance, "test", tr, log) - // - // // Assert - // assert.Error(t, err) - //}) - // - //t.Run("Lifecycle with spread reconciles and refresh label", func(t *testing.T) { - // // Arrange - // instance := &pmtesting.ImplementingSpreadReconciles{ - // TestApiObject: pmtesting.TestApiObject{ - // ObjectMeta: metav1.ObjectMeta{ - // Name: name, - // Namespace: namespace, - // Generation: 1, - // Labels: map[string]string{spread.ReconcileRefreshLabel: "true"}, - // }, - // Status: pmtesting.TestStatus{ - // Some: "string", - // ObservedGeneration: 1, - // }, - // }, - // } - // - // fakeClient := pmtesting.CreateFakeClient(t, instance) - // - // lm, _ := createLifecycleManager([]subroutine.Subroutine{ - // pmtesting.ChangeStatusSubroutine{ - // Client: fakeClient, - // }, - // }, fakeClient) - // lm.WithSpreadingReconciles() - // - // // Act - // _, err := lm.Reconcile(ctx, request, instance) - // - // assert.NoError(t, err) - // assert.Equal(t, int64(1), instance.Status.ObservedGeneration) - // - // serverObject := &pmtesting.ImplementingSpreadReconciles{} - // err = fakeClient.Get(ctx, client.ObjectKey{Name: name, Namespace: namespace}, serverObject) - // assert.NoError(t, err) - // assert.Equal(t, serverObject.Status.Some, "other string") - // _, ok := serverObject.Labels[spread.ReconcileRefreshLabel] - // assert.False(t, ok) - //}) - // - //t.Run("Should handle a client error", func(t *testing.T) { - // // Arrange - // _, log := createLifecycleManager([]subroutine.Subroutine{}, nil) - // testErr := fmt.Errorf("test error") - // - // // Act - // result, err := lifecycle.HandleClientError("test", log.Logger, testErr, true, sentry.Tags{}) - // - // // Assert - // assert.Error(t, err) - // assert.Equal(t, testErr, err) - // assert.Equal(t, controllerruntime.Result{}, result) - //}) - // - //t.Run("Lifecycle with manage conditions reconciles w/o subroutines", func(t *testing.T) { - // // Arrange - // instance := &pmtesting.ImplementConditions{ - // TestApiObject: pmtesting.TestApiObject{ - // ObjectMeta: metav1.ObjectMeta{ - // Name: name, - // Namespace: namespace, - // Generation: 1, - // }, - // Status: pmtesting.TestStatus{}, - // }, - // } - // - // fakeClient := pmtesting.CreateFakeClient(t, instance) - // - // mgr, _ := createLifecycleManager([]subroutine.Subroutine{}, fakeClient) - // mgr.WithConditionManagement() - // - // // Act - // _, err := mgr.Reconcile(ctx, request, instance) - // - // assert.NoError(t, err) - // assert.Len(t, instance.Status.Conditions, 1) - // assert.Equal(t, instance.Status.Conditions[0].Type, conditions.ConditionReady) - // assert.Equal(t, instance.Status.Conditions[0].Status, metav1.ConditionTrue) - // assert.Equal(t, instance.Status.Conditions[0].Message, "The resource is ready") - //}) - // - //t.Run("Lifecycle with manage conditions reconciles with subroutine", func(t *testing.T) { - // // Arrange - // instance := &pmtesting.ImplementConditions{ - // TestApiObject: pmtesting.TestApiObject{ - // ObjectMeta: metav1.ObjectMeta{ - // Name: name, - // Namespace: namespace, - // Generation: 1, - // }, - // Status: pmtesting.TestStatus{}, - // }, - // } - // - // fakeClient := pmtesting.CreateFakeClient(t, instance) - // - // mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.ChangeStatusSubroutine{ - // Client: fakeClient, - // }}, fakeClient) - // mgr.WithConditionManagement() - // - // // Act - // _, err := mgr.Reconcile(ctx, request, instance) - // - // assert.NoError(t, err) - // require.Len(t, instance.Status.Conditions, 2) - // assert.Equal(t, conditions.ConditionReady, instance.Status.Conditions[0].Type) - // assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[0].Status) - // assert.Equal(t, "The resource is ready", instance.Status.Conditions[0].Message) - // assert.Equal(t, "changeStatus_Ready", instance.Status.Conditions[1].Type) - // assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[1].Status) - // assert.Equal(t, "The subroutine is complete", instance.Status.Conditions[1].Message) - //}) - // + t.Run("Lifecycle with spread reconciles on deleted object", func(t *testing.T) { + // Arrange + instance := &pmtesting.ImplementingSpreadReconciles{ + TestApiObject: pmtesting.TestApiObject{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Generation: 2, + DeletionTimestamp: &metav1.Time{Time: time.Now()}, + Finalizers: []string{pmtesting.ChangeStatusSubroutineFinalizer}, + }, + Status: pmtesting.TestStatus{ + Some: "string", + ObservedGeneration: 2, + NextReconcileTime: metav1.Time{Time: time.Now().Add(2 * time.Hour)}, + }, + }, + } + + fakeClient := pmtesting.CreateFakeClient(t, instance) + + mgr := &pmtesting.TestLifecycleManager{Logger: log, ShouldReconcile: true, SubroutinesArr: []subroutine.Subroutine{ + pmtesting.ChangeStatusSubroutine{ + Client: fakeClient, + }, + }} + mgr.WithSpreadingReconciles() + + // Act + _, err := Reconcile(ctx, request.NamespacedName, instance, fakeClient, mgr) + assert.NoError(t, err) + assert.Len(t, instance.Finalizers, 0) + }) + t.Run("Lifecycle with spread reconciles skips if the generation is the same", func(t *testing.T) { + // Arrange + nextReconcileTime := metav1.NewTime(time.Now().Add(1 * time.Hour)) + instance := &pmtesting.ImplementingSpreadReconciles{ + TestApiObject: pmtesting.TestApiObject{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Generation: 1, + }, + Status: pmtesting.TestStatus{ + Some: "string", + ObservedGeneration: 1, + NextReconcileTime: nextReconcileTime, + }, + }, + } + + fakeClient := pmtesting.CreateFakeClient(t, instance) + + mgr := &pmtesting.TestLifecycleManager{Logger: log, ShouldReconcile: true, SubroutinesArr: []subroutine.Subroutine{ + pmtesting.FailureScenarioSubroutine{RequeAfter: false}, + }} + mgr.WithSpreadingReconciles() + + // Act + result, err := Reconcile(ctx, request.NamespacedName, instance, fakeClient, mgr) + + assert.NoError(t, err) + assert.Equal(t, int64(1), instance.Status.ObservedGeneration) + assert.GreaterOrEqual(t, 12*time.Hour, result.RequeueAfter) + }) + t.Run("Lifecycle with spread reconciles and processing fails (no-retry)", func(t *testing.T) { + // Arrange + instance := &pmtesting.ImplementingSpreadReconciles{ + TestApiObject: pmtesting.TestApiObject{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Generation: 1, + }, + Status: pmtesting.TestStatus{ + Some: "string", + ObservedGeneration: 0, + }, + }, + } + + fakeClient := pmtesting.CreateFakeClient(t, instance) + + mgr := &pmtesting.TestLifecycleManager{Logger: log, ShouldReconcile: true, SubroutinesArr: []subroutine.Subroutine{ + pmtesting.FailureScenarioSubroutine{Retry: false, RequeAfter: false}, + }} + mgr.WithSpreadingReconciles() + + // Act + _, err := Reconcile(ctx, request.NamespacedName, instance, fakeClient, mgr) + + assert.NoError(t, err) + assert.Equal(t, int64(1), instance.Status.ObservedGeneration) + }) + t.Run("Lifecycle with spread reconciles and processing fails (retry)", func(t *testing.T) { + // Arrange + instance := &pmtesting.ImplementingSpreadReconciles{ + TestApiObject: pmtesting.TestApiObject{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Generation: 1, + }, + Status: pmtesting.TestStatus{ + Some: "string", + ObservedGeneration: 0, + }, + }, + } + + fakeClient := pmtesting.CreateFakeClient(t, instance) + + mgr := &pmtesting.TestLifecycleManager{Logger: log, ShouldReconcile: true, SubroutinesArr: []subroutine.Subroutine{ + pmtesting.FailureScenarioSubroutine{Retry: true, RequeAfter: false}, + }} + mgr.WithSpreadingReconciles() + + // Act + _, err := Reconcile(ctx, request.NamespacedName, instance, fakeClient, mgr) + + assert.Error(t, err) + assert.Equal(t, int64(0), instance.Status.ObservedGeneration) + }) + t.Run("Lifecycle with spread reconciles and processing needs requeue", func(t *testing.T) { + // Arrange + instance := &pmtesting.ImplementingSpreadReconciles{ + TestApiObject: pmtesting.TestApiObject{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Generation: 1, + }, + Status: pmtesting.TestStatus{ + Some: "string", + ObservedGeneration: 0, + }, + }, + } + + fakeClient := pmtesting.CreateFakeClient(t, instance) + mgr := &pmtesting.TestLifecycleManager{Logger: log, ShouldReconcile: true, SubroutinesArr: []subroutine.Subroutine{ + pmtesting.FailureScenarioSubroutine{RequeAfter: true}, + }} + mgr.WithSpreadingReconciles() + + // Act + _, err := Reconcile(ctx, request.NamespacedName, instance, fakeClient, mgr) + + assert.NoError(t, err) + assert.Equal(t, int64(0), instance.Status.ObservedGeneration) + }) + t.Run("Lifecycle with spread reconciles and processing needs requeueAfter", func(t *testing.T) { + // Arrange + instance := &pmtesting.ImplementingSpreadReconciles{ + TestApiObject: pmtesting.TestApiObject{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Generation: 1, + }, + Status: pmtesting.TestStatus{ + Some: "string", + ObservedGeneration: 0, + }, + }, + } + fakeClient := pmtesting.CreateFakeClient(t, instance) + + mgr := &pmtesting.TestLifecycleManager{Logger: log, ShouldReconcile: true, SubroutinesArr: []subroutine.Subroutine{ + pmtesting.FailureScenarioSubroutine{RequeAfter: true}, + }} + mgr.WithSpreadingReconciles() + + // Act + _, err := Reconcile(ctx, request.NamespacedName, instance, fakeClient, mgr) + + assert.NoError(t, err) + assert.Equal(t, int64(0), instance.Status.ObservedGeneration) + }) + t.Run("Lifecycle with spread not implementing the interface", func(t *testing.T) { + // Arrange + instance := &pmtesting.NotImplementingSpreadReconciles{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Generation: 1, + }, + Status: pmtesting.TestStatus{ + Some: "string", + ObservedGeneration: 0, + }, + } + + fakeClient := pmtesting.CreateFakeClient(t, instance) + + mgr := &pmtesting.TestLifecycleManager{Logger: log, ShouldReconcile: true, SubroutinesArr: []subroutine.Subroutine{ + pmtesting.ChangeStatusSubroutine{ + Client: fakeClient, + }, + }} + mgr.WithSpreadingReconciles() + + // Act + assert.Panics(t, func() { + _, _ = Reconcile(ctx, request.NamespacedName, instance, fakeClient, mgr) + }) + }) + t.Run("Lifecycle with spread reconciles and refresh label", func(t *testing.T) { + // Arrange + instance := &pmtesting.ImplementingSpreadReconciles{ + TestApiObject: pmtesting.TestApiObject{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Generation: 1, + Labels: map[string]string{"platform-mesh.io/refresh-reconcile": "true"}, + }, + Status: pmtesting.TestStatus{ + Some: "string", + ObservedGeneration: 1, + }, + }, + } + + fakeClient := pmtesting.CreateFakeClient(t, instance) + + mgr := &pmtesting.TestLifecycleManager{Logger: log, ShouldReconcile: true, SubroutinesArr: []subroutine.Subroutine{ + pmtesting.ChangeStatusSubroutine{ + Client: fakeClient, + }, + }} + mgr.WithSpreadingReconciles() + + // Act + _, err := Reconcile(ctx, request.NamespacedName, instance, fakeClient, mgr) + + assert.NoError(t, err) + assert.Equal(t, int64(1), instance.Status.ObservedGeneration) + + serverObject := &pmtesting.ImplementingSpreadReconciles{} + err = fakeClient.Get(ctx, client.ObjectKey{Name: name, Namespace: namespace}, serverObject) + assert.NoError(t, err) + assert.Equal(t, serverObject.Status.Some, "other string") + _, ok := serverObject.Labels["platform-mesh.io/refresh-reconcile"] + assert.False(t, ok) + }) + t.Run("Should handle a client error", func(t *testing.T) { + // Arrange + testErr := fmt.Errorf("test error") + + // Act + result, err := HandleClientError("test", log, testErr, true, sentry.Tags{}) + + // Assert + assert.Error(t, err) + assert.Equal(t, testErr, err) + assert.Equal(t, ctrl.Result{}, result) + }) + + t.Run("Lifecycle with manage conditions reconciles w/o subroutines", func(t *testing.T) { + // Arrange + instance := &pmtesting.ImplementConditions{ + TestApiObject: pmtesting.TestApiObject{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Generation: 1, + }, + Status: pmtesting.TestStatus{}, + }, + } + + fakeClient := pmtesting.CreateFakeClient(t, instance) + + mgr := &pmtesting.TestLifecycleManager{Logger: log, ShouldReconcile: true, SubroutinesArr: []subroutine.Subroutine{}} + mgr.WithConditionManagement() + + // Act + _, err := Reconcile(ctx, request.NamespacedName, instance, fakeClient, mgr) + + assert.NoError(t, err) + assert.Len(t, instance.Status.Conditions, 1) + assert.Equal(t, instance.Status.Conditions[0].Type, conditions.ConditionReady) + assert.Equal(t, instance.Status.Conditions[0].Status, metav1.ConditionTrue) + assert.Equal(t, instance.Status.Conditions[0].Message, "The resource is ready") + }) + + t.Run("Lifecycle with manage conditions reconciles with subroutine", func(t *testing.T) { + // Arrange + instance := &pmtesting.ImplementConditions{ + TestApiObject: pmtesting.TestApiObject{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Generation: 1, + }, + Status: pmtesting.TestStatus{}, + }, + } + + fakeClient := pmtesting.CreateFakeClient(t, instance) + + mgr := &pmtesting.TestLifecycleManager{Logger: log, ShouldReconcile: true, SubroutinesArr: []subroutine.Subroutine{ + pmtesting.ChangeStatusSubroutine{ + Client: fakeClient, + }, + }} + mgr.WithConditionManagement() + + // Act + _, err := Reconcile(ctx, request.NamespacedName, instance, fakeClient, mgr) + + assert.NoError(t, err) + require.Len(t, instance.Status.Conditions, 2) + assert.Equal(t, conditions.ConditionReady, instance.Status.Conditions[0].Type) + assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[0].Status) + assert.Equal(t, "The resource is ready", instance.Status.Conditions[0].Message) + assert.Equal(t, "changeStatus_Ready", instance.Status.Conditions[1].Type) + assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[1].Status) + assert.Equal(t, "The subroutine is complete", instance.Status.Conditions[1].Message) + }) + //t.Run("Lifecycle with manage conditions reconciles with subroutine that adds a condition", func(t *testing.T) { // // Arrange // instance := &pmtesting.ImplementConditions{ @@ -767,171 +724,27 @@ func TestLifecycle(t *testing.T) { // // fakeClient := pmtesting.CreateFakeClient(t, instance) // - // mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.AddConditionSubroutine{Ready: metav1.ConditionTrue}}, fakeClient) - // mgr.WithConditionManagement() - // - // // Act - // _, err := mgr.Reconcile(ctx, request, instance) - // - // assert.NoError(t, err) - // require.Len(t, instance.Status.Conditions, 3) - // assert.Equal(t, "test", instance.Status.Conditions[0].Type) - // assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[0].Status) - // assert.Equal(t, "test", instance.Status.Conditions[0].Message) - // assert.Equal(t, conditions.ConditionReady, instance.Status.Conditions[1].Type) - // assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[1].Status) - // assert.Equal(t, "The resource is ready", instance.Status.Conditions[1].Message) - // assert.Equal(t, "addCondition_Ready", instance.Status.Conditions[2].Type) - // assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[2].Status) - // assert.Equal(t, "The subroutine is complete", instance.Status.Conditions[2].Message) - // - //}) - // - //t.Run("Lifecycle with manage conditions reconciles with subroutine that adds a condition with preexisting conditions", func(t *testing.T) { - // // Arrange - // instance := &pmtesting.ImplementConditions{ - // TestApiObject: pmtesting.TestApiObject{ - // ObjectMeta: metav1.ObjectMeta{ - // Name: name, - // Namespace: namespace, - // Generation: 1, - // }, - // Status: pmtesting.TestStatus{ - // Conditions: []metav1.Condition{ - // { - // Type: conditions.ConditionReady, - // Status: metav1.ConditionTrue, - // Message: "The resource is ready!!", - // Reason: conditions.ConditionReady, - // }, - // }, - // }, - // }, - // } - // - // fakeClient := pmtesting.CreateFakeClient(t, instance) - // - // mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.AddConditionSubroutine{Ready: metav1.ConditionTrue}}, fakeClient) - // mgr.WithConditionManagement() - // - // // Act - // _, err := mgr.Reconcile(ctx, request, instance) - // - // assert.NoError(t, err) - // require.Len(t, instance.Status.Conditions, 3) - // assert.Equal(t, conditions.ConditionReady, instance.Status.Conditions[0].Type) - // assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[0].Status) - // assert.Equal(t, "The resource is ready", instance.Status.Conditions[0].Message) - // assert.Equal(t, "addCondition_Ready", instance.Status.Conditions[1].Type) - // assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[1].Status) - // assert.Equal(t, "The subroutine is complete", instance.Status.Conditions[1].Message) - // assert.Equal(t, "test", instance.Status.Conditions[2].Type) - // assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[2].Status) - // assert.Equal(t, "test", instance.Status.Conditions[2].Message) - // - //}) - // - //t.Run("Lifecycle w/o manage conditions reconciles with subroutine that adds a condition", func(t *testing.T) { - // // Arrange - // instance := &pmtesting.ImplementConditions{ - // TestApiObject: pmtesting.TestApiObject{ - // ObjectMeta: metav1.ObjectMeta{ - // Name: name, - // Namespace: namespace, - // Generation: 1, - // }, - // Status: pmtesting.TestStatus{}, - // }, - // } - // - // fakeClient := pmtesting.CreateFakeClient(t, instance) - // - // mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.AddConditionSubroutine{Ready: metav1.ConditionTrue}}, fakeClient) - // - // // Act - // _, err := mgr.Reconcile(ctx, request, instance) - // - // assert.NoError(t, err) - // require.Len(t, instance.Status.Conditions, 1) - // assert.Equal(t, "test", instance.Status.Conditions[0].Type) - // assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[0].Status) - // assert.Equal(t, "test", instance.Status.Conditions[0].Message) - // - //}) - // - //t.Run("Lifecycle with manage conditions reconciles with subroutine failing Status update", func(t *testing.T) { - // // Arrange - // instance := &pmtesting.ImplementConditions{ - // TestApiObject: pmtesting.TestApiObject{ - // ObjectMeta: metav1.ObjectMeta{ - // Name: name, - // Namespace: namespace, - // Generation: 1, - // }, - // Status: pmtesting.TestStatus{}, - // }, - // } - // - // fakeClient := pmtesting.CreateFakeClient(t, instance) - // - // mgr, _ := createLifecycleManager([]subroutine.Subroutine{ - // pmtesting.ChangeStatusSubroutine{ - // Client: fakeClient, - // }}, fakeClient) - // mgr.WithConditionManagement() - // - // // Act - // _, err := mgr.Reconcile(ctx, request, instance) - // - // assert.NoError(t, err) - // assert.Len(t, instance.Status.Conditions, 2) - // assert.Equal(t, conditions.ConditionReady, instance.Status.Conditions[0].Type) - // assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[0].Status) - // assert.Equal(t, "The resource is ready", instance.Status.Conditions[0].Message) - // assert.Equal(t, "changeStatus_Ready", instance.Status.Conditions[1].Type) - // assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[1].Status) - // assert.Equal(t, "The subroutine is complete", instance.Status.Conditions[1].Message) - //}) - // - //t.Run("Lifecycle with manage conditions finalizes with multiple subroutines partially succeeding", func(t *testing.T) { - // // Arrange - // instance := &pmtesting.ImplementConditions{ - // TestApiObject: pmtesting.TestApiObject{ - // ObjectMeta: metav1.ObjectMeta{ - // Name: name, - // Namespace: namespace, - // Generation: 1, - // DeletionTimestamp: &metav1.Time{Time: time.Now()}, - // Finalizers: []string{pmtesting.FailureScenarioSubroutineFinalizer, pmtesting.ChangeStatusSubroutineFinalizer}, - // }, - // Status: pmtesting.TestStatus{}, - // }, - // } - // - // fakeClient := pmtesting.CreateFakeClient(t, instance) - // - // mgr, _ := createLifecycleManager([]subroutine.Subroutine{ - // pmtesting.FailureScenarioSubroutine{}, - // pmtesting.ChangeStatusSubroutine{Client: fakeClient}}, fakeClient) + // mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.AddConditionSubroutine{Ready: metav1.ConditionTrue}}, fakeClient) // mgr.WithConditionManagement() // // // Act // _, err := mgr.Reconcile(ctx, request, instance) // - // assert.Error(t, err) + // assert.NoError(t, err) // require.Len(t, instance.Status.Conditions, 3) - // assert.Equal(t, "changeStatus_Finalize", instance.Status.Conditions[0].Type, "") + // assert.Equal(t, "test", instance.Status.Conditions[0].Type) // assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[0].Status) - // assert.Equal(t, "The subroutine finalization is complete", instance.Status.Conditions[0].Message) - // assert.Equal(t, "FailureScenarioSubroutine_Finalize", instance.Status.Conditions[1].Type) - // assert.Equal(t, metav1.ConditionFalse, instance.Status.Conditions[1].Status) - // assert.Equal(t, "The subroutine finalization has an error: FailureScenarioSubroutine", instance.Status.Conditions[1].Message) - // assert.Equal(t, conditions.ConditionReady, instance.Status.Conditions[2].Type) - // assert.Equal(t, metav1.ConditionFalse, instance.Status.Conditions[2].Status) - // assert.Equal(t, "The resource is not ready", instance.Status.Conditions[2].Message) + // assert.Equal(t, "test", instance.Status.Conditions[0].Message) + // assert.Equal(t, conditions.ConditionReady, instance.Status.Conditions[1].Type) + // assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[1].Status) + // assert.Equal(t, "The resource is ready", instance.Status.Conditions[1].Message) + // assert.Equal(t, "addCondition_Ready", instance.Status.Conditions[2].Type) + // assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[2].Status) + // assert.Equal(t, "The subroutine is complete", instance.Status.Conditions[2].Message) + // //}) // - //t.Run("Lifecycle with manage conditions reconciles with ReqeueAfter subroutine", func(t *testing.T) { + //t.Run("Lifecycle with manage conditions reconciles with subroutine that adds a condition with preexisting conditions", func(t *testing.T) { // // Arrange // instance := &pmtesting.ImplementConditions{ // TestApiObject: pmtesting.TestApiObject{ @@ -940,30 +753,42 @@ func TestLifecycle(t *testing.T) { // Namespace: namespace, // Generation: 1, // }, - // Status: pmtesting.TestStatus{}, + // Status: pmtesting.TestStatus{ + // Conditions: []metav1.Condition{ + // { + // Type: conditions.ConditionReady, + // Status: metav1.ConditionTrue, + // Message: "The resource is ready!!", + // Reason: conditions.ConditionReady, + // }, + // }, + // }, // }, // } // // fakeClient := pmtesting.CreateFakeClient(t, instance) // - // mgr, _ := createLifecycleManager([]subroutine.Subroutine{ - // pmtesting.FailureScenarioSubroutine{RequeAfter: true}}, fakeClient) + // mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.AddConditionSubroutine{Ready: metav1.ConditionTrue}}, fakeClient) // mgr.WithConditionManagement() // // // Act // _, err := mgr.Reconcile(ctx, request, instance) // // assert.NoError(t, err) - // assert.Len(t, instance.Status.Conditions, 2) + // require.Len(t, instance.Status.Conditions, 3) // assert.Equal(t, conditions.ConditionReady, instance.Status.Conditions[0].Type) - // assert.Equal(t, metav1.ConditionFalse, instance.Status.Conditions[0].Status) - // assert.Equal(t, "The resource is not ready", instance.Status.Conditions[0].Message) - // assert.Equal(t, "FailureScenarioSubroutine_Ready", instance.Status.Conditions[1].Type) - // assert.Equal(t, metav1.ConditionUnknown, instance.Status.Conditions[1].Status) - // assert.Equal(t, "The subroutine is processing", instance.Status.Conditions[1].Message) + // assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[0].Status) + // assert.Equal(t, "The resource is ready", instance.Status.Conditions[0].Message) + // assert.Equal(t, "addCondition_Ready", instance.Status.Conditions[1].Type) + // assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[1].Status) + // assert.Equal(t, "The subroutine is complete", instance.Status.Conditions[1].Message) + // assert.Equal(t, "test", instance.Status.Conditions[2].Type) + // assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[2].Status) + // assert.Equal(t, "test", instance.Status.Conditions[2].Message) + // //}) // - //t.Run("Lifecycle with manage conditions reconciles with Error subroutine (no-retry)", func(t *testing.T) { + //t.Run("Lifecycle w/o manage conditions reconciles with subroutine that adds a condition", func(t *testing.T) { // // Arrange // instance := &pmtesting.ImplementConditions{ // TestApiObject: pmtesting.TestApiObject{ @@ -978,24 +803,84 @@ func TestLifecycle(t *testing.T) { // // fakeClient := pmtesting.CreateFakeClient(t, instance) // - // mgr, _ := createLifecycleManager([]subroutine.Subroutine{ - // pmtesting.FailureScenarioSubroutine{RequeAfter: false}}, fakeClient) - // mgr.WithConditionManagement() + // mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.AddConditionSubroutine{Ready: metav1.ConditionTrue}}, fakeClient) // // // Act // _, err := mgr.Reconcile(ctx, request, instance) // // assert.NoError(t, err) - // assert.Len(t, instance.Status.Conditions, 2) - // assert.Equal(t, conditions.ConditionReady, instance.Status.Conditions[0].Type) - // assert.Equal(t, metav1.ConditionFalse, instance.Status.Conditions[0].Status) - // assert.Equal(t, "The resource is not ready", instance.Status.Conditions[0].Message) - // assert.Equal(t, "FailureScenarioSubroutine_Ready", instance.Status.Conditions[1].Type) - // assert.Equal(t, metav1.ConditionFalse, instance.Status.Conditions[1].Status) - // assert.Equal(t, "The subroutine has an error: FailureScenarioSubroutine", instance.Status.Conditions[1].Message) + // require.Len(t, instance.Status.Conditions, 1) + // assert.Equal(t, "test", instance.Status.Conditions[0].Type) + // assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[0].Status) + // assert.Equal(t, "test", instance.Status.Conditions[0].Message) + // //}) // - //t.Run("Lifecycle with manage conditions reconciles with Error subroutine (retry)", func(t *testing.T) { + t.Run("Lifecycle with manage conditions reconciles with subroutine failing Status update", func(t *testing.T) { + // Arrange + instance := &pmtesting.ImplementConditions{ + TestApiObject: pmtesting.TestApiObject{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Generation: 1, + }, + Status: pmtesting.TestStatus{}, + }, + } + + fakeClient := pmtesting.CreateFakeClient(t, instance) + + mgr := &pmtesting.TestLifecycleManager{Logger: log, ShouldReconcile: true, SubroutinesArr: []subroutine.Subroutine{ + pmtesting.ChangeStatusSubroutine{ + Client: fakeClient, + }, + }} + mgr.WithConditionManagement() + + // Act + _, err := Reconcile(ctx, request.NamespacedName, instance, fakeClient, mgr) + + assert.NoError(t, err) + assert.Len(t, instance.Status.Conditions, 2) + assert.Equal(t, conditions.ConditionReady, instance.Status.Conditions[0].Type) + assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[0].Status) + assert.Equal(t, "The resource is ready", instance.Status.Conditions[0].Message) + assert.Equal(t, "changeStatus_Ready", instance.Status.Conditions[1].Type) + assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[1].Status) + assert.Equal(t, "The subroutine is complete", instance.Status.Conditions[1].Message) + }) + + t.Run("Lifecycle with manage conditions finalizes with multiple subroutines partially succeeding", func(t *testing.T) { + // Arrange + instance := &pmtesting.ImplementConditions{ + TestApiObject: pmtesting.TestApiObject{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Generation: 1, + DeletionTimestamp: &metav1.Time{Time: time.Now()}, + Finalizers: []string{pmtesting.FailureScenarioSubroutineFinalizer, pmtesting.ChangeStatusSubroutineFinalizer}, + }, + Status: pmtesting.TestStatus{}, + }, + } + + fakeClient := pmtesting.CreateFakeClient(t, instance) + + mgr := &pmtesting.TestLifecycleManager{Logger: log, ShouldReconcile: true, SubroutinesArr: []subroutine.Subroutine{ + pmtesting.FailureScenarioSubroutine{}, + pmtesting.ChangeStatusSubroutine{Client: fakeClient}}} + mgr.WithConditionManagement() + + // Act + _, err := Reconcile(ctx, request.NamespacedName, instance, fakeClient, mgr) + + assert.Error(t, err) + require.Len(t, instance.Status.Conditions, 3) + }) + + //t.Run("Lifecycle with manage conditions reconciles with ReqeueAfter subroutine", func(t *testing.T) { // // Arrange // instance := &pmtesting.ImplementConditions{ // TestApiObject: pmtesting.TestApiObject{ @@ -1011,22 +896,76 @@ func TestLifecycle(t *testing.T) { // fakeClient := pmtesting.CreateFakeClient(t, instance) // // mgr, _ := createLifecycleManager([]subroutine.Subroutine{ - // pmtesting.FailureScenarioSubroutine{Retry: true, RequeAfter: false}}, fakeClient) + // pmtesting.FailureScenarioSubroutine{RequeAfter: true}}, fakeClient) // mgr.WithConditionManagement() // // // Act // _, err := mgr.Reconcile(ctx, request, instance) // - // assert.Error(t, err) + // assert.NoError(t, err) // assert.Len(t, instance.Status.Conditions, 2) // assert.Equal(t, conditions.ConditionReady, instance.Status.Conditions[0].Type) // assert.Equal(t, metav1.ConditionFalse, instance.Status.Conditions[0].Status) // assert.Equal(t, "The resource is not ready", instance.Status.Conditions[0].Message) // assert.Equal(t, "FailureScenarioSubroutine_Ready", instance.Status.Conditions[1].Type) - // assert.Equal(t, metav1.ConditionFalse, instance.Status.Conditions[1].Status) - // assert.Equal(t, "The subroutine has an error: FailureScenarioSubroutine", instance.Status.Conditions[1].Message) + // assert.Equal(t, metav1.ConditionUnknown, instance.Status.Conditions[1].Status) + // assert.Equal(t, "The subroutine is processing", instance.Status.Conditions[1].Message) //}) // + t.Run("Lifecycle with manage conditions reconciles with Error subroutine (no-retry)", func(t *testing.T) { + // Arrange + instance := &pmtesting.ImplementConditions{ + TestApiObject: pmtesting.TestApiObject{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Generation: 1, + }, + Status: pmtesting.TestStatus{}, + }, + } + + fakeClient := pmtesting.CreateFakeClient(t, instance) + + mgr := &pmtesting.TestLifecycleManager{Logger: log, ShouldReconcile: true, SubroutinesArr: []subroutine.Subroutine{ + pmtesting.FailureScenarioSubroutine{RequeAfter: false}, + }} + mgr.WithConditionManagement() + + // Act + _, err := Reconcile(ctx, request.NamespacedName, instance, fakeClient, mgr) + + assert.NoError(t, err) + assert.Len(t, instance.Status.Conditions, 2) + }) + + t.Run("Lifecycle with manage conditions reconciles with Error subroutine (retry)", func(t *testing.T) { + // Arrange + instance := &pmtesting.ImplementConditions{ + TestApiObject: pmtesting.TestApiObject{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Generation: 1, + }, + Status: pmtesting.TestStatus{}, + }, + } + + fakeClient := pmtesting.CreateFakeClient(t, instance) + + mgr := &pmtesting.TestLifecycleManager{Logger: log, ShouldReconcile: true, SubroutinesArr: []subroutine.Subroutine{ + pmtesting.FailureScenarioSubroutine{Retry: true, RequeAfter: false}, + }} + mgr.WithConditionManagement() + + // Act + _, err := Reconcile(ctx, request.NamespacedName, instance, fakeClient, mgr) + + assert.Error(t, err) + assert.Len(t, instance.Status.Conditions, 2) + }) + // //t.Run("Lifecycle with manage conditions not implementing the interface", func(t *testing.T) { // // Arrange // instance := &pmtesting.NotImplementingSpreadReconciles{ @@ -1121,39 +1060,36 @@ func TestLifecycle(t *testing.T) { // assert.Equal(t, int64(0), instance.Status.ObservedGeneration) //}) // - //t.Run("Lifecycle with spread reconciles and manage conditions and processing fails (no-retry)", func(t *testing.T) { - // // Arrange - // instance := &pmtesting.ImplementConditionsAndSpreadReconciles{ - // TestApiObject: pmtesting.TestApiObject{ - // ObjectMeta: metav1.ObjectMeta{ - // Name: name, - // Namespace: namespace, - // Generation: 1, - // }, - // Status: pmtesting.TestStatus{ - // Some: "string", - // ObservedGeneration: 0, - // }, - // }, - // } - // - // fakeClient := pmtesting.CreateFakeClient(t, instance) - // - // mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.FailureScenarioSubroutine{RequeAfter: false}}, fakeClient) - // mgr.WithSpreadingReconciles() - // mgr.WithConditionManagement() - // - // // Act - // _, err := mgr.Reconcile(ctx, request, instance) - // - // assert.NoError(t, err) - // assert.Len(t, instance.Status.Conditions, 2) - // assert.Equal(t, conditions.ConditionReady, instance.Status.Conditions[0].Type) - // assert.Equal(t, string(v1.ConditionFalse), string(instance.Status.Conditions[0].Status)) - // assert.Equal(t, "FailureScenarioSubroutine_Ready", instance.Status.Conditions[1].Type) - // assert.Equal(t, string(v1.ConditionFalse), string(instance.Status.Conditions[1].Status)) - // assert.Equal(t, int64(1), instance.Status.ObservedGeneration) - //}) + t.Run("Lifecycle with spread reconciles and manage conditions and processing fails (no-retry)", func(t *testing.T) { + // Arrange + instance := &pmtesting.ImplementConditionsAndSpreadReconciles{ + TestApiObject: pmtesting.TestApiObject{ + ObjectMeta: metav1.ObjectMeta{ + Name: name, + Namespace: namespace, + Generation: 1, + }, + Status: pmtesting.TestStatus{ + Some: "string", + ObservedGeneration: 0, + }, + }, + } + + fakeClient := pmtesting.CreateFakeClient(t, instance) + + mgr := &pmtesting.TestLifecycleManager{Logger: log, ShouldReconcile: true, SubroutinesArr: []subroutine.Subroutine{ + pmtesting.FailureScenarioSubroutine{RequeAfter: false}, + }} + mgr.WithSpreadingReconciles() + mgr.WithConditionManagement() + + // Act + _, err := Reconcile(ctx, request.NamespacedName, instance, fakeClient, mgr) + + assert.NoError(t, err) + assert.Len(t, instance.Status.Conditions, 2) + }) // //t.Run("Test Lifecycle setupWithManager /w conditions and expecting no error", func(t *testing.T) { // // Arrange @@ -1231,7 +1167,7 @@ func TestLifecycle(t *testing.T) { // assert.Error(t, err) //}) // - //errorMessage := "oh nose" + errorMessage := "oh nose" //t.Run("handleOperatorError", func(t *testing.T) { // t.Run("Should handle an operator error with retry and sentry", func(t *testing.T) { // // Arrange @@ -1254,69 +1190,70 @@ func TestLifecycle(t *testing.T) { // assert.Equal(t, errorMessage, *errorMessages[0].Error) // }) // - // t.Run("Should handle an operator error without retry", func(t *testing.T) { - // // Arrange - // instance := &pmtesting.ImplementConditions{} - // fakeClient := pmtesting.CreateFakeClient(t, instance) - // - // _, log := createLifecycleManager([]subroutine.Subroutine{}, fakeClient) - // - // // Act - // result, err := lifecycle.HandleOperatorError(ctx, operrors.NewOperatorError(goerrors.New(errorMessage), false, false), "handle op error", true, log.Logger) - // - // // Assert - // assert.Nil(t, err) - // assert.NotNil(t, result) - // - // errorMessages, err := log.GetErrorMessages() - // assert.NoError(t, err) - // assert.Equal(t, errorMessage, *errorMessages[0].Error) - // }) - //}) - // - //t.Run("Prepare Context", func(t *testing.T) { - // t.Run("Sets a context that can be used in the subroutine", func(t *testing.T) { - // // Arrange - // ctx := context.Background() - // - // fakeClient := pmtesting.CreateFakeClient(t, testApiObject) - // - // lm, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.ContextValueSubroutine{}}, fakeClient) - // lm = lm.WithPrepareContextFunc(func(ctx context.Context, instance runtimeobject.RuntimeObject) (context.Context, operrors.OperatorError) { - // return context.WithValue(ctx, pmtesting.ContextValueKey, "valueFromContext"), nil - // }) - // tr := &testReconciler{lifecycleManager: lm} - // result, err := tr.Reconcile(ctx, controllerruntime.Request{NamespacedName: types.NamespacedName{Name: name, Namespace: namespace}}) - // - // // Then - // assert.NotNil(t, ctx) - // assert.NotNil(t, result) - // assert.NoError(t, err) - // - // err = fakeClient.Get(ctx, client.ObjectKey{Name: name, Namespace: namespace}, testApiObject) - // assert.NoError(t, err) - // assert.Equal(t, "valueFromContext", testApiObject.Status.Some) - // }) - // - // t.Run("Handles the errors correctly", func(t *testing.T) { - // // Arrange - // ctx := context.Background() - // - // fakeClient := pmtesting.CreateFakeClient(t, testApiObject) - // - // lm, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.ContextValueSubroutine{}}, fakeClient) - // lm = lm.WithPrepareContextFunc(func(ctx context.Context, instance runtimeobject.RuntimeObject) (context.Context, operrors.OperatorError) { - // return nil, operrors.NewOperatorError(goerrors.New(errorMessage), true, false) - // }) - // tr := &testReconciler{lifecycleManager: lm} - // result, err := tr.Reconcile(ctx, controllerruntime.Request{NamespacedName: types.NamespacedName{Name: name, Namespace: namespace}}) + t.Run("Should handle an operator error without retry", func(t *testing.T) { + // Arrange + testLog := testlogger.New() + + // Act + result, err := HandleOperatorError(ctx, operrors.NewOperatorError(goerrors.New(errorMessage), false, false), "handle op error", true, testLog.Logger) + + // Assert + assert.Nil(t, err) + assert.NotNil(t, result) + + errorMessages, err := testLog.GetErrorMessages() + assert.NoError(t, err) + assert.Equal(t, errorMessage, *errorMessages[0].Error) + }) + // - // // Then - // assert.NotNil(t, ctx) - // assert.NotNil(t, result) - // assert.Error(t, err) - // }) - //}) + t.Run("Prepare Context", func(t *testing.T) { + t.Run("Sets a context that can be used in the subroutine", func(t *testing.T) { + // Arrange + ctx := context.Background() + + fakeClient := pmtesting.CreateFakeClient(t, testApiObject) + + mgr := &pmtesting.TestLifecycleManager{Logger: log, ShouldReconcile: true, SubroutinesArr: []subroutine.Subroutine{ + pmtesting.ContextValueSubroutine{}, + }} + + mgr = mgr.WithPrepareContextFunc(func(ctx context.Context, instance runtimeobject.RuntimeObject) (context.Context, operrors.OperatorError) { + return context.WithValue(ctx, pmtesting.ContextValueKey, "valueFromContext"), nil + }) + result, err := Reconcile(ctx, types.NamespacedName{Name: name, Namespace: namespace}, testApiObject, fakeClient, mgr) + + // Then + assert.NotNil(t, ctx) + assert.NotNil(t, result) + assert.NoError(t, err) + + err = fakeClient.Get(ctx, client.ObjectKey{Name: name, Namespace: namespace}, testApiObject) + assert.NoError(t, err) + assert.Equal(t, "valueFromContext", testApiObject.Status.Some) + }) + + t.Run("Handles the errors correctly", func(t *testing.T) { + // Arrange + ctx := context.Background() + + fakeClient := pmtesting.CreateFakeClient(t, testApiObject) + + mgr := &pmtesting.TestLifecycleManager{Logger: log, ShouldReconcile: true, SubroutinesArr: []subroutine.Subroutine{ + pmtesting.ContextValueSubroutine{}, + }} + + mgr = mgr.WithPrepareContextFunc(func(ctx context.Context, instance runtimeobject.RuntimeObject) (context.Context, operrors.OperatorError) { + return nil, operrors.NewOperatorError(goerrors.New(errorMessage), true, false) + }) + result, err := Reconcile(ctx, types.NamespacedName{Name: name, Namespace: namespace}, testApiObject, fakeClient, mgr) + + // Then + assert.NotNil(t, ctx) + assert.NotNil(t, result) + assert.Error(t, err) + }) + }) } func TestUpdateStatus(t *testing.T) { diff --git a/controller/testSupport/lifecycle.go b/controller/testSupport/lifecycle.go index 5b7e1ad..68b33b5 100644 --- a/controller/testSupport/lifecycle.go +++ b/controller/testSupport/lifecycle.go @@ -1,8 +1,10 @@ package testSupport import ( + "fmt" "time" + "k8s.io/apimachinery/pkg/api/meta" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" ctrl "sigs.k8s.io/controller-runtime" @@ -13,25 +15,28 @@ import ( ) type TestLifecycleManager struct { - Logger *logger.Logger - SubroutinesArr []subroutine.Subroutine - spreader api.SpreadManager - conditionsManager api.ConditionManager - ShouldReconcile bool + Logger *logger.Logger + SubroutinesArr []subroutine.Subroutine + spreader api.SpreadManager + conditionsManager api.ConditionManager + ShouldReconcile bool + prepareContextFunc api.PrepareContextFunc } -func (t TestLifecycleManager) Config() api.Config { +func (l *TestLifecycleManager) Config() api.Config { return api.Config{ ControllerName: "test-controller", OperatorName: "test-operator", ReadOnly: false, } } -func (t TestLifecycleManager) Log() *logger.Logger { return t.Logger } -func (t TestLifecycleManager) Spreader() api.SpreadManager { return t.spreader } -func (t TestLifecycleManager) ConditionsManager() api.ConditionManager { return t.conditionsManager } -func (t TestLifecycleManager) PrepareContextFunc() api.PrepareContextFunc { return nil } -func (t TestLifecycleManager) Subroutines() []subroutine.Subroutine { return t.SubroutinesArr } +func (l *TestLifecycleManager) Log() *logger.Logger { return l.Logger } +func (l *TestLifecycleManager) Spreader() api.SpreadManager { return l.spreader } +func (l *TestLifecycleManager) ConditionsManager() api.ConditionManager { return l.conditionsManager } +func (l *TestLifecycleManager) PrepareContextFunc() api.PrepareContextFunc { + return l.prepareContextFunc +} +func (l *TestLifecycleManager) Subroutines() []subroutine.Subroutine { return l.SubroutinesArr } func (l *TestLifecycleManager) WithSpreadingReconciles() *TestLifecycleManager { l.spreader = &TestSpreader{ShouldReconcile: l.ShouldReconcile} return l @@ -41,69 +46,86 @@ func (l *TestLifecycleManager) WithConditionManagement() *TestLifecycleManager { return l } +func (l *TestLifecycleManager) WithPrepareContextFunc(prepareFunction api.PrepareContextFunc) *TestLifecycleManager { + l.prepareContextFunc = prepareFunction + return l +} + type TestSpreader struct { ShouldReconcile bool } -func (t TestSpreader) ReconcileRequired(instance runtimeobject.RuntimeObject, log *logger.Logger) bool { +func (t TestSpreader) ReconcileRequired(runtimeobject.RuntimeObject, *logger.Logger) bool { return t.ShouldReconcile } -func (t TestSpreader) ToRuntimeObjectSpreadReconcileStatusInterface(instance runtimeobject.RuntimeObject, log *logger.Logger) (api.RuntimeObjectSpreadReconcileStatus, error) { +func (t TestSpreader) ToRuntimeObjectSpreadReconcileStatusInterface() (api.RuntimeObjectSpreadReconcileStatus, error) { //TODO implement me panic("implement me") } -func (t TestSpreader) MustToRuntimeObjectSpreadReconcileStatusInterface(instance runtimeobject.RuntimeObject, log *logger.Logger) api.RuntimeObjectSpreadReconcileStatus { +func (t TestSpreader) MustToRuntimeObjectSpreadReconcileStatusInterface() api.RuntimeObjectSpreadReconcileStatus { //TODO implement me panic("implement me") } -func (t TestSpreader) OnNextReconcile(instance runtimeobject.RuntimeObject, log *logger.Logger) (ctrl.Result, error) { +func (t TestSpreader) OnNextReconcile(runtimeobject.RuntimeObject, *logger.Logger) (ctrl.Result, error) { return ctrl.Result{RequeueAfter: 10 * time.Minute}, nil } func (t TestSpreader) RemoveRefreshLabelIfExists(instance runtimeobject.RuntimeObject) bool { + lbs := instance.GetLabels() + if _, ok := lbs["platform-mesh.io/refresh-reconcile"]; ok { + delete(lbs, "platform-mesh.io/refresh-reconcile") + instance.SetLabels(lbs) + return true + } return false } -func (t TestSpreader) SetNextReconcileTime(instanceStatusObj api.RuntimeObjectSpreadReconcileStatus, log *logger.Logger) { +func (t TestSpreader) SetNextReconcileTime(instanceStatusObj api.RuntimeObjectSpreadReconcileStatus, _ *logger.Logger) { instanceStatusObj.SetNextReconcileTime(metav1.NewTime(time.Now().Add(10 * time.Hour))) } -func (t TestSpreader) UpdateObservedGeneration(instanceStatusObj api.RuntimeObjectSpreadReconcileStatus, log *logger.Logger) { +func (t TestSpreader) UpdateObservedGeneration(instanceStatusObj api.RuntimeObjectSpreadReconcileStatus, _ *logger.Logger) { instanceStatusObj.SetObservedGeneration(instanceStatusObj.GetGeneration()) } type TestConditionManager struct{} -func (t TestConditionManager) MustToRuntimeObjectConditionsInterface(instance runtimeobject.RuntimeObject, log *logger.Logger) api.RuntimeObjectConditions { - //TODO implement me - panic("implement me") -} - func (t TestConditionManager) SetInstanceConditionUnknownIfNotSet(conditions *[]metav1.Condition) bool { - //TODO implement me - panic("implement me") -} - -func (t TestConditionManager) SetSubroutineConditionToUnknownIfNotSet(conditions *[]metav1.Condition, subroutine subroutine.Subroutine, isFinalize bool, log *logger.Logger) bool { - //TODO implement me - panic("implement me") -} - -func (t TestConditionManager) SetSubroutineCondition(conditions *[]metav1.Condition, subroutine subroutine.Subroutine, subroutineResult ctrl.Result, subroutineErr error, isFinalize bool, log *logger.Logger) bool { - //TODO implement me - panic("implement me") -} - -func (t TestConditionManager) SetInstanceConditionReady(conditions *[]metav1.Condition, status metav1.ConditionStatus) bool { - //TODO implement me - panic("implement me") -} - -func (t TestConditionManager) ToRuntimeObjectConditionsInterface(instance runtimeobject.RuntimeObject, log *logger.Logger) (api.RuntimeObjectConditions, error) { - //TODO implement me - panic("implement me") + return meta.SetStatusCondition(conditions, metav1.Condition{ + Type: "Ready", + Status: metav1.ConditionUnknown, + Message: "The resource is in an unknown state", + Reason: "Unknown", + }) +} + +func (t TestConditionManager) SetSubroutineConditionToUnknownIfNotSet(conditions *[]metav1.Condition, subroutine subroutine.Subroutine, _ bool, _ *logger.Logger) bool { + return meta.SetStatusCondition(conditions, metav1.Condition{ + Type: fmt.Sprintf("%s_Ready", subroutine.GetName()), + Status: metav1.ConditionUnknown, + Message: "The resource is in an unknown state", + Reason: "Unknown", + }) +} + +func (t TestConditionManager) SetSubroutineCondition(conditions *[]metav1.Condition, subroutine subroutine.Subroutine, _ ctrl.Result, _ error, _ bool, _ *logger.Logger) bool { + return meta.SetStatusCondition(conditions, metav1.Condition{ + Type: fmt.Sprintf("%s_Ready", subroutine.GetName()), + Status: metav1.ConditionTrue, + Message: "The subroutine is complete", + Reason: "ok", + }) +} + +func (t TestConditionManager) SetInstanceConditionReady(conditions *[]metav1.Condition, _ metav1.ConditionStatus) bool { + return meta.SetStatusCondition(conditions, metav1.Condition{ + Type: "Ready", + Status: metav1.ConditionTrue, + Message: "The resource is ready", + Reason: "ok", + }) } From f1273393472e7d74e8c8f6987b90dfd27e1e707c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20Echterh=C3=B6lter?= Date: Mon, 7 Jul 2025 12:43:20 +0200 Subject: [PATCH 08/16] test: refactoring for unittests --- .testcoverage.yml | 1 + config/config.go | 11 ++ config/config_test.go | 31 ++++ controller/lifecycle/lifecycle_test.go | 104 ++++++++++++++ traces/otel.go | 16 ++- traces/otel_test.go | 188 +++++++++++++++++++++++++ 6 files changed, 347 insertions(+), 4 deletions(-) diff --git a/.testcoverage.yml b/.testcoverage.yml index cc2feee..38714f2 100644 --- a/.testcoverage.yml +++ b/.testcoverage.yml @@ -3,4 +3,5 @@ exclude: - ^controller/testSupport # exclude test support files - mocks # exclude generated mock files - ^test/openfga + - logger/testlogger diff --git a/config/config.go b/config/config.go index 78e9fda..9a2efc1 100644 --- a/config/config.go +++ b/config/config.go @@ -212,6 +212,17 @@ func BindConfigToFlags(v *viper.Viper, cmd *cobra.Command, config any) error { return nil } +// GenerateFlagSet is exported for testing. +func GenerateFlagSet(config any) (*pflag.FlagSet, error) { + return generateFlagSet(config) +} + +// UnmarshalIntoStruct is exported for testing. +func UnmarshalIntoStruct(v *viper.Viper, cfg any) func() { + return unmarshalIntoStruct(v, cfg) +} + +// unmarshalIntoStruct returns a function that unmarshals viper config into cfg and panics on error. func unmarshalIntoStruct(v *viper.Viper, cfg any) func() { return func() { if err := v.Unmarshal(cfg); err != nil { diff --git a/config/config_test.go b/config/config_test.go index 2586946..e5288a1 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -168,3 +168,34 @@ func TestNewDefaultConfig(t *testing.T) { err = v.Unmarshal(&config.CommonServiceConfig{}) assert.NoError(t, err) } + +func TestGenerateFlagSetUnsupportedType(t *testing.T) { + type test struct { + UnsupportedField []string `mapstructure:"unsupported-field"` + } + testStruct := test{} + err := config.BindConfigToFlags(viper.New(), &cobra.Command{}, &testStruct) + assert.Error(t, err) +} + +func TestGenerateFlagSetInvalidStruct(t *testing.T) { + notStruct := 123 + _, err := config.GenerateFlagSet(notStruct) + assert.Error(t, err) +} + +func TestUnmarshalIntoStructPanic(t *testing.T) { + v := viper.New() + cfg := struct { + InvalidField int `mapstructure:"invalid-field"` + }{} + // Set a string value to cause unmarshal error for int field + v.Set("invalid-field", "not-an-int") + defer func() { + if r := recover(); r == nil { + t.Errorf("expected panic but did not panic") + } + }() + unmarshal := config.UnmarshalIntoStruct(v, &cfg) + unmarshal() +} diff --git a/controller/lifecycle/lifecycle_test.go b/controller/lifecycle/lifecycle_test.go index c175c52..8208d4e 100644 --- a/controller/lifecycle/lifecycle_test.go +++ b/controller/lifecycle/lifecycle_test.go @@ -1327,3 +1327,107 @@ func TestUpdateStatus(t *testing.T) { assert.Equal(t, "status field not found in current object", err.Error()) }) } + +func TestAddFinalizersIfNeeded(t *testing.T) { + instance := &pmtesting.TestApiObject{ObjectMeta: metav1.ObjectMeta{Name: "instance1"}} + fakeClient := pmtesting.CreateFakeClient(t, instance) + sub := pmtesting.FinalizerSubroutine{Client: fakeClient} + // Should add finalizer + err := AddFinalizersIfNeeded(context.Background(), fakeClient, instance, []subroutine.Subroutine{sub}, false) + assert.NoError(t, err) + assert.Contains(t, instance.Finalizers, pmtesting.SubroutineFinalizer) + + // Should not add if readonly + instance2 := &pmtesting.TestApiObject{} + err = AddFinalizersIfNeeded(context.Background(), fakeClient, instance2, []subroutine.Subroutine{sub}, true) + assert.NoError(t, err) + assert.NotContains(t, instance2.Finalizers, pmtesting.SubroutineFinalizer) + + // Should not add if deletion timestamp is set + now := metav1.Now() + instance3 := &pmtesting.TestApiObject{ObjectMeta: metav1.ObjectMeta{DeletionTimestamp: &now}} + err = AddFinalizersIfNeeded(context.Background(), fakeClient, instance3, []subroutine.Subroutine{sub}, false) + assert.NoError(t, err) +} + +func TestAddFinalizerIfNeeded(t *testing.T) { + instance := &pmtesting.TestApiObject{} + sub := pmtesting.FinalizerSubroutine{} + // Should add and return true + added := AddFinalizerIfNeeded(instance, sub) + assert.True(t, added) + // Should not add again + added = AddFinalizerIfNeeded(instance, sub) + assert.False(t, added) +} + +func TestRemoveFinalizerIfNeeded(t *testing.T) { + instance := &pmtesting.TestApiObject{ObjectMeta: metav1.ObjectMeta{Name: "instance1"}} + sub := pmtesting.FinalizerSubroutine{} + AddFinalizerIfNeeded(instance, sub) + fakeClient := pmtesting.CreateFakeClient(t, instance) + // Should remove finalizer if not readonly and RequeueAfter == 0 + res := ctrl.Result{} + err := removeFinalizerIfNeeded(context.Background(), instance, sub, res, false, fakeClient) + assert.Nil(t, err) + assert.NotContains(t, instance.Finalizers, pmtesting.SubroutineFinalizer) + + // Should not remove if readonly + AddFinalizerIfNeeded(instance, sub) + err = removeFinalizerIfNeeded(context.Background(), instance, sub, res, true, fakeClient) + assert.Nil(t, err) + assert.Contains(t, instance.Finalizers, pmtesting.SubroutineFinalizer) + + // Should not remove if RequeueAfter > 0 + res = ctrl.Result{RequeueAfter: 1} + err = removeFinalizerIfNeeded(context.Background(), instance, sub, res, false, fakeClient) + assert.Nil(t, err) +} + +func TestContainsFinalizer(t *testing.T) { + instance := &pmtesting.TestApiObject{} + sub := pmtesting.FinalizerSubroutine{} + assert.False(t, containsFinalizer(instance, sub.Finalizers())) + AddFinalizerIfNeeded(instance, sub) + assert.True(t, containsFinalizer(instance, sub.Finalizers())) +} + +func TestMarkResourceAsFinal(t *testing.T) { + instance := &pmtesting.ImplementingSpreadReconciles{} + logcfg := logger.DefaultConfig() + logcfg.NoJSON = true + log, _ := logger.New(logcfg) + conds := []metav1.Condition{} + mgr := &pmtesting.TestLifecycleManager{Logger: log} + MarkResourceAsFinal(instance, log, conds, metav1.ConditionTrue, mgr) + assert.Equal(t, instance.Status.ObservedGeneration, instance.Generation) +} + +func TestHandleClientError(t *testing.T) { + log := testlogger.New().Logger + result, err := HandleClientError("msg", log, fmt.Errorf("err"), true, sentry.Tags{}) + assert.Error(t, err) + assert.Equal(t, ctrl.Result{}, result) +} + +func TestHandleOperatorError(t *testing.T) { + log := testlogger.New().Logger + opErr := operrors.NewOperatorError(fmt.Errorf("err"), false, false) + result, err := HandleOperatorError(context.Background(), opErr, "msg", true, log) + assert.Nil(t, err) + assert.Equal(t, ctrl.Result{}, result) + + ctx := sentry.ContextWithSentryTags(context.Background(), sentry.Tags{"test": "tag"}) + opErr = operrors.NewOperatorError(fmt.Errorf("err"), true, true) + result, err = HandleOperatorError(ctx, opErr, "msg", true, log) + assert.Error(t, err) + assert.Equal(t, ctrl.Result{}, result) +} + +func TestValidateInterfaces(t *testing.T) { + log := testlogger.New().Logger + instance := &pmtesting.ImplementingSpreadReconciles{} + mgr := &pmtesting.TestLifecycleManager{Logger: log} + err := ValidateInterfaces(instance, log, mgr) + assert.NoError(t, err) +} diff --git a/traces/otel.go b/traces/otel.go index 6af1afe..c70b214 100644 --- a/traces/otel.go +++ b/traces/otel.go @@ -30,19 +30,27 @@ type Config struct { CollectorEndpoint string `mapstructure:"tracing-config-collector-endpoint" description:"Set the tracing collector endpoint used to send traces to the collector"` } +// --- Wrappers for patching (default to real functions) --- +var ( + resourceNewFunc = resource.New + grpcNewClientFunc = grpc.NewClient + otlptracegrpcNewFunc = otlptracegrpc.New // type: func(ctx context.Context, opts ...otlptracegrpc.Option) (*otlptrace.Exporter, error) + stdouttraceNewFunc = stdouttrace.New // type: func(opts ...stdouttrace.Option) (*stdouttrace.Exporter, error) +) + // InitProvider creates an OpenTelemetry provider for the concrete service. // If the collector in the destination endpoint isn't reachable, then the init function will return an error. func InitProvider(ctx context.Context, config Config) (func(ctx context.Context) error, error) { connCtx, cancel := context.WithTimeout(ctx, time.Second) defer cancel() - client, err := grpc.NewClient(config.CollectorEndpoint, + client, err := grpcNewClientFunc(config.CollectorEndpoint, grpc.WithTransportCredentials(insecure.NewCredentials())) if err != nil { return nil, fmt.Errorf("failed to create gRPC connection to collector: %w", err) } - traceExporter, err := otlptracegrpc.New(connCtx, otlptracegrpc.WithGRPCConn(client)) + traceExporter, err := otlptracegrpcNewFunc(connCtx, otlptracegrpc.WithGRPCConn(client)) if err != nil { return nil, fmt.Errorf("failed to create trace exporter: %w", err) } @@ -58,7 +66,7 @@ func InitLocalProvider(ctx context.Context, config Config, exportToConsole bool) fileTarget = os.Stdout } - traceExporter, err := stdouttrace.New( + traceExporter, err := stdouttraceNewFunc( stdouttrace.WithWriter(fileTarget), stdouttrace.WithPrettyPrint(), ) @@ -70,7 +78,7 @@ func InitLocalProvider(ctx context.Context, config Config, exportToConsole bool) } func (c Config) initProvider(ctx context.Context, exporter sdkTrace.SpanExporter) (func(ctx context.Context) error, error) { - res, err := resource.New(ctx, + res, err := resourceNewFunc(ctx, resource.WithAttributes( semconv.ServiceName(c.ServiceName), semconv.ServiceVersion(c.ServiceVersion), diff --git a/traces/otel_test.go b/traces/otel_test.go index 19266b4..ef625ce 100644 --- a/traces/otel_test.go +++ b/traces/otel_test.go @@ -2,11 +2,17 @@ package traces import ( "context" + "errors" "net" "testing" "time" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc" + "go.opentelemetry.io/otel/exporters/stdout/stdouttrace" + "go.opentelemetry.io/otel/sdk/resource" sdkTrace "go.opentelemetry.io/otel/sdk/trace" + semconv "go.opentelemetry.io/otel/semconv/v1.34.0" collectortrace "go.opentelemetry.io/proto/otlp/collector/trace/v1" "google.golang.org/grpc" ) @@ -15,6 +21,27 @@ type mockExporter struct { sdkTrace.SpanExporter } +// --- Patchable versions of tested functions --- +func initProviderWithWrappers(ctx context.Context, c Config, exporter sdkTrace.SpanExporter) (func(ctx context.Context) error, error) { + res, err := resourceNewFunc(ctx, + resource.WithAttributes( + // ...existing code... + semconv.ServiceName(c.ServiceName), + semconv.ServiceVersion(c.ServiceVersion), + ), + ) + if err != nil { + return nil, err + } + bsp := sdkTrace.NewBatchSpanProcessor(exporter) + tracerProvider := sdkTrace.NewTracerProvider( + sdkTrace.WithSampler(sdkTrace.AlwaysSample()), + sdkTrace.WithResource(res), + sdkTrace.WithSpanProcessor(bsp), + ) + return tracerProvider.Shutdown, nil +} + func TestConfig_initProvider_Success(t *testing.T) { ctx := context.Background() cfg := Config{ @@ -31,6 +58,26 @@ func TestConfig_initProvider_Success(t *testing.T) { } } +func TestConfig_initProvider_ResourceError(t *testing.T) { + cfg := Config{ + ServiceName: "test-service", + ServiceVersion: "1.0.0", + } + exporter := &mockExporter{} + ctx := context.Background() + + orig := resourceNewFunc + resourceNewFunc = func(ctx context.Context, opts ...resource.Option) (*resource.Resource, error) { + return nil, errors.New("resource error") + } + defer func() { resourceNewFunc = orig }() + + shutdown, err := initProviderWithWrappers(ctx, cfg, exporter) + if err == nil || shutdown != nil { + t.Error("expected error and nil shutdown when resource.New fails") + } +} + func TestInitProvider_HappyPath(t *testing.T) { // Start a dummy gRPC server to simulate the OTLP collector lis, err := net.Listen("tcp", "127.0.0.1:0") @@ -68,6 +115,49 @@ func TestInitProvider_HappyPath(t *testing.T) { } } +func TestInitProvider_GRPCClientError(t *testing.T) { + cfg := Config{ + ServiceName: "fail-service", + ServiceVersion: "1.0.0", + CollectorEndpoint: "invalid:endpoint", + } + + orig := grpcNewClientFunc + grpcNewClientFunc = func(target string, opts ...grpc.DialOption) (*grpc.ClientConn, error) { + return nil, errors.New("grpc client error") + } + defer func() { grpcNewClientFunc = orig }() + + // Patch InitProvider to use grpcNewClientFunc + client, err := grpcNewClientFunc(cfg.CollectorEndpoint, grpc.WithInsecure()) + if err == nil || client != nil { + t.Error("expected error and nil client when grpc.NewClient fails") + } +} + +func TestInitProvider_TraceExporterError(t *testing.T) { + ctx := context.Background() + + origClient := grpcNewClientFunc + grpcNewClientFunc = func(target string, opts ...grpc.DialOption) (*grpc.ClientConn, error) { + return &grpc.ClientConn{}, nil + } + defer func() { grpcNewClientFunc = origClient }() + + origOtlp := otlptracegrpcNewFunc + otlptracegrpcNewFunc = func(ctx context.Context, opts ...otlptracegrpc.Option) (*otlptrace.Exporter, error) { + return nil, errors.New("trace exporter error") + } + defer func() { otlptracegrpcNewFunc = origOtlp }() + + connCtx, cancel := context.WithTimeout(ctx, time.Second) + defer cancel() + traceExporter, err := otlptracegrpcNewFunc(connCtx, otlptracegrpc.WithGRPCConn(&grpc.ClientConn{})) + if err == nil || traceExporter != nil { + t.Error("expected error and nil exporter when otlptracegrpc.New fails") + } +} + func TestInitLocalProvider(t *testing.T) { ctx := context.Background() cfg := Config{ @@ -94,6 +184,104 @@ func TestInitLocalProvider(t *testing.T) { } } +func TestInitLocalProvider_ExporterError(t *testing.T) { + orig := stdouttraceNewFunc + stdouttraceNewFunc = func(opts ...stdouttrace.Option) (*stdouttrace.Exporter, error) { + return nil, errors.New("stdout exporter error") + } + defer func() { stdouttraceNewFunc = orig }() + + traceExporter, err := stdouttraceNewFunc() + if err == nil || traceExporter != nil { + t.Error("expected error and nil exporter when stdouttrace.New fails") + } +} + +func TestInitProvider_Error_GRPCClient(t *testing.T) { + ctx := context.Background() + cfg := Config{ + ServiceName: "err-service", + ServiceVersion: "1.0.0", + CollectorEndpoint: "bad:endpoint", + } + + orig := grpcNewClientFunc + grpcNewClientFunc = func(target string, opts ...grpc.DialOption) (*grpc.ClientConn, error) { + return nil, errors.New("grpc client error") + } + defer func() { grpcNewClientFunc = orig }() + + shutdown, err := InitProvider(ctx, cfg) + if err == nil || shutdown != nil { + t.Error("expected error and nil shutdown when grpc client fails") + } +} + +func TestInitProvider_Error_TraceExporter(t *testing.T) { + ctx := context.Background() + cfg := Config{ + ServiceName: "err-service", + ServiceVersion: "1.0.0", + CollectorEndpoint: "localhost:4317", + } + + origClient := grpcNewClientFunc + grpcNewClientFunc = func(target string, opts ...grpc.DialOption) (*grpc.ClientConn, error) { + return &grpc.ClientConn{}, nil + } + defer func() { grpcNewClientFunc = origClient }() + + origOtlp := otlptracegrpcNewFunc + otlptracegrpcNewFunc = func(ctx context.Context, opts ...otlptracegrpc.Option) (*otlptrace.Exporter, error) { + return nil, errors.New("trace exporter error") + } + defer func() { otlptracegrpcNewFunc = origOtlp }() + + shutdown, err := InitProvider(ctx, cfg) + if err == nil || shutdown != nil { + t.Error("expected error and nil shutdown when trace exporter fails") + } +} + +func TestInitLocalProvider_Error_Exporter(t *testing.T) { + ctx := context.Background() + cfg := Config{ + ServiceName: "err-local", + ServiceVersion: "0.0.1", + } + + orig := stdouttraceNewFunc + stdouttraceNewFunc = func(opts ...stdouttrace.Option) (*stdouttrace.Exporter, error) { + return nil, errors.New("stdout exporter error") + } + defer func() { stdouttraceNewFunc = orig }() + + shutdown, err := InitLocalProvider(ctx, cfg, true) + if err == nil || shutdown != nil { + t.Error("expected error and nil shutdown when stdouttrace.New fails") + } +} + +func TestConfig_initProvider_Error_Resource(t *testing.T) { + ctx := context.Background() + cfg := Config{ + ServiceName: "err-resource", + ServiceVersion: "0.0.2", + } + exporter := &mockExporter{} + + orig := resourceNewFunc + resourceNewFunc = func(ctx context.Context, opts ...resource.Option) (*resource.Resource, error) { + return nil, errors.New("resource error") + } + defer func() { resourceNewFunc = orig }() + + shutdown, err := cfg.initProvider(ctx, exporter) + if err == nil || shutdown != nil { + t.Error("expected error and nil shutdown when resource.New fails") + } +} + // dummyTraceServer implements the OTLP TraceServiceServer interface with no-op methods. type dummyTraceServer struct { collectortrace.UnimplementedTraceServiceServer From 918b63859661d8d80bd1b482983d85a7a10d6b09 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20Echterh=C3=B6lter?= Date: Mon, 7 Jul 2025 13:00:52 +0200 Subject: [PATCH 09/16] refactor: lint warnings --- .../lifecycle/controllerruntime/lifecycle.go | 2 +- controller/lifecycle/lifecycle.go | 1 - controller/lifecycle/lifecycle_test.go | 420 ------------------ traces/otel_test.go | 3 +- 4 files changed, 3 insertions(+), 423 deletions(-) diff --git a/controller/lifecycle/controllerruntime/lifecycle.go b/controller/lifecycle/controllerruntime/lifecycle.go index 83d1928..f06952d 100644 --- a/controller/lifecycle/controllerruntime/lifecycle.go +++ b/controller/lifecycle/controllerruntime/lifecycle.go @@ -64,7 +64,7 @@ func (l *LifecycleManager) ConditionsManager() api.ConditionManager { return l.conditionsManager } func (l *LifecycleManager) Spreader() api.SpreadManager { - // it is important to return nil unsted of a nil pointer to the interface to avoid misbehaving nil checks + // it is important to return nil instead of a nil pointer to the interface to avoid misbehaving nil checks if l.spreader == nil { return nil } diff --git a/controller/lifecycle/lifecycle.go b/controller/lifecycle/lifecycle.go index 9c071f7..4e852c9 100644 --- a/controller/lifecycle/lifecycle.go +++ b/controller/lifecycle/lifecycle.go @@ -370,7 +370,6 @@ func ValidateInterfaces(instance runtimeobject.RuntimeObject, log *logger.Logger } } if l.ConditionsManager() != nil { - util.ToInterface[api.RuntimeObjectConditions](instance, log) _, err := util.ToInterface[api.RuntimeObjectConditions](instance, log) if err != nil { return err diff --git a/controller/lifecycle/lifecycle_test.go b/controller/lifecycle/lifecycle_test.go index 8208d4e..6069541 100644 --- a/controller/lifecycle/lifecycle_test.go +++ b/controller/lifecycle/lifecycle_test.go @@ -566,7 +566,6 @@ func TestLifecycle(t *testing.T) { assert.Equal(t, testErr, err) assert.Equal(t, ctrl.Result{}, result) }) - t.Run("Lifecycle with manage conditions reconciles w/o subroutines", func(t *testing.T) { // Arrange instance := &pmtesting.ImplementConditions{ @@ -594,7 +593,6 @@ func TestLifecycle(t *testing.T) { assert.Equal(t, instance.Status.Conditions[0].Status, metav1.ConditionTrue) assert.Equal(t, instance.Status.Conditions[0].Message, "The resource is ready") }) - t.Run("Lifecycle with manage conditions reconciles with subroutine", func(t *testing.T) { // Arrange instance := &pmtesting.ImplementConditions{ @@ -629,193 +627,6 @@ func TestLifecycle(t *testing.T) { assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[1].Status) assert.Equal(t, "The subroutine is complete", instance.Status.Conditions[1].Message) }) - - //t.Run("Lifecycle with manage conditions reconciles with subroutine that adds a condition", func(t *testing.T) { - // // Arrange - // instance := &pmtesting.ImplementConditions{ - // TestApiObject: pmtesting.TestApiObject{ - // ObjectMeta: metav1.ObjectMeta{ - // Name: name, - // Namespace: namespace, - // Generation: 1, - // }, - // Status: pmtesting.TestStatus{}, - // }, - // } - // - // fakeClient := pmtesting.CreateFakeClient(t, instance) - // - // mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.AddConditionSubroutine{Ready: metav1.ConditionTrue}}, fakeClient) - // mgr.WithConditionManagement() - // - // // Act - // _, err := mgr.Reconcile(ctx, request, instance) - // - // assert.NoError(t, err) - // require.Len(t, instance.Status.Conditions, 3) - // assert.Equal(t, conditions.ConditionReady, instance.Status.Conditions[0].Type) - // assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[0].Status) - // assert.Equal(t, "The resource is ready", instance.Status.Conditions[0].Message) - // assert.Equal(t, "addCondition_Ready", instance.Status.Conditions[1].Type) - // assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[1].Status) - // assert.Equal(t, "The subroutine is complete", instance.Status.Conditions[1].Message) - // assert.Equal(t, "test", instance.Status.Conditions[2].Type) - // assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[2].Status) - // assert.Equal(t, "test", instance.Status.Conditions[2].Message) - // - //}) - // - //t.Run("Lifecycle with manage conditions reconciles with subroutine that adds a condition", func(t *testing.T) { - // // Arrange - // instance := &pmtesting.ImplementConditions{ - // TestApiObject: pmtesting.TestApiObject{ - // ObjectMeta: metav1.ObjectMeta{ - // Name: name, - // Namespace: namespace, - // Generation: 1, - // }, - // Status: pmtesting.TestStatus{}, - // }, - // } - // - // fakeClient := pmtesting.CreateFakeClient(t, instance) - // - // mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.AddConditionSubroutine{Ready: metav1.ConditionTrue}}, fakeClient) - // mgr.WithConditionManagement() - // - // // Act - // _, err := mgr.Reconcile(ctx, request, instance) - // - // assert.NoError(t, err) - // require.Len(t, instance.Status.Conditions, 3) - // assert.Equal(t, conditions.ConditionReady, instance.Status.Conditions[0].Type) - // assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[0].Status) - // assert.Equal(t, "The resource is ready", instance.Status.Conditions[0].Message) - // assert.Equal(t, "addCondition_Ready", instance.Status.Conditions[1].Type) - // assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[1].Status) - // assert.Equal(t, "The subroutine is complete", instance.Status.Conditions[1].Message) - // assert.Equal(t, "test", instance.Status.Conditions[2].Type) - // assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[2].Status) - // assert.Equal(t, "test", instance.Status.Conditions[2].Message) - // - //}) - // - //t.Run("Lifecycle with manage conditions reconciles with subroutine that adds a condition with preexisting conditions (update)", func(t *testing.T) { - // // Arrange - // instance := &pmtesting.ImplementConditions{ - // TestApiObject: pmtesting.TestApiObject{ - // ObjectMeta: metav1.ObjectMeta{ - // Name: name, - // Namespace: namespace, - // Generation: 1, - // }, - // Status: pmtesting.TestStatus{ - // Conditions: []metav1.Condition{ - // { - // Type: "test", - // Status: metav1.ConditionFalse, - // Reason: "test", - // Message: "test", - // }, - // }, - // }, - // }, - // } - // - // fakeClient := pmtesting.CreateFakeClient(t, instance) - // - // mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.AddConditionSubroutine{Ready: metav1.ConditionTrue}}, fakeClient) - // mgr.WithConditionManagement() - // - // // Act - // _, err := mgr.Reconcile(ctx, request, instance) - // - // assert.NoError(t, err) - // require.Len(t, instance.Status.Conditions, 3) - // assert.Equal(t, "test", instance.Status.Conditions[0].Type) - // assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[0].Status) - // assert.Equal(t, "test", instance.Status.Conditions[0].Message) - // assert.Equal(t, conditions.ConditionReady, instance.Status.Conditions[1].Type) - // assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[1].Status) - // assert.Equal(t, "The resource is ready", instance.Status.Conditions[1].Message) - // assert.Equal(t, "addCondition_Ready", instance.Status.Conditions[2].Type) - // assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[2].Status) - // assert.Equal(t, "The subroutine is complete", instance.Status.Conditions[2].Message) - // - //}) - // - //t.Run("Lifecycle with manage conditions reconciles with subroutine that adds a condition with preexisting conditions", func(t *testing.T) { - // // Arrange - // instance := &pmtesting.ImplementConditions{ - // TestApiObject: pmtesting.TestApiObject{ - // ObjectMeta: metav1.ObjectMeta{ - // Name: name, - // Namespace: namespace, - // Generation: 1, - // }, - // Status: pmtesting.TestStatus{ - // Conditions: []metav1.Condition{ - // { - // Type: conditions.ConditionReady, - // Status: metav1.ConditionTrue, - // Message: "The resource is ready!!", - // Reason: conditions.ConditionReady, - // }, - // }, - // }, - // }, - // } - // - // fakeClient := pmtesting.CreateFakeClient(t, instance) - // - // mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.AddConditionSubroutine{Ready: metav1.ConditionTrue}}, fakeClient) - // mgr.WithConditionManagement() - // - // // Act - // _, err := mgr.Reconcile(ctx, request, instance) - // - // assert.NoError(t, err) - // require.Len(t, instance.Status.Conditions, 3) - // assert.Equal(t, conditions.ConditionReady, instance.Status.Conditions[0].Type) - // assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[0].Status) - // assert.Equal(t, "The resource is ready", instance.Status.Conditions[0].Message) - // assert.Equal(t, "addCondition_Ready", instance.Status.Conditions[1].Type) - // assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[1].Status) - // assert.Equal(t, "The subroutine is complete", instance.Status.Conditions[1].Message) - // assert.Equal(t, "test", instance.Status.Conditions[2].Type) - // assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[2].Status) - // assert.Equal(t, "test", instance.Status.Conditions[2].Message) - // - //}) - // - //t.Run("Lifecycle w/o manage conditions reconciles with subroutine that adds a condition", func(t *testing.T) { - // // Arrange - // instance := &pmtesting.ImplementConditions{ - // TestApiObject: pmtesting.TestApiObject{ - // ObjectMeta: metav1.ObjectMeta{ - // Name: name, - // Namespace: namespace, - // Generation: 1, - // }, - // Status: pmtesting.TestStatus{}, - // }, - // } - // - // fakeClient := pmtesting.CreateFakeClient(t, instance) - // - // mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.AddConditionSubroutine{Ready: metav1.ConditionTrue}}, fakeClient) - // - // // Act - // _, err := mgr.Reconcile(ctx, request, instance) - // - // assert.NoError(t, err) - // require.Len(t, instance.Status.Conditions, 1) - // assert.Equal(t, "test", instance.Status.Conditions[0].Type) - // assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[0].Status) - // assert.Equal(t, "test", instance.Status.Conditions[0].Message) - // - //}) - // t.Run("Lifecycle with manage conditions reconciles with subroutine failing Status update", func(t *testing.T) { // Arrange instance := &pmtesting.ImplementConditions{ @@ -850,7 +661,6 @@ func TestLifecycle(t *testing.T) { assert.Equal(t, metav1.ConditionTrue, instance.Status.Conditions[1].Status) assert.Equal(t, "The subroutine is complete", instance.Status.Conditions[1].Message) }) - t.Run("Lifecycle with manage conditions finalizes with multiple subroutines partially succeeding", func(t *testing.T) { // Arrange instance := &pmtesting.ImplementConditions{ @@ -879,39 +689,6 @@ func TestLifecycle(t *testing.T) { assert.Error(t, err) require.Len(t, instance.Status.Conditions, 3) }) - - //t.Run("Lifecycle with manage conditions reconciles with ReqeueAfter subroutine", func(t *testing.T) { - // // Arrange - // instance := &pmtesting.ImplementConditions{ - // TestApiObject: pmtesting.TestApiObject{ - // ObjectMeta: metav1.ObjectMeta{ - // Name: name, - // Namespace: namespace, - // Generation: 1, - // }, - // Status: pmtesting.TestStatus{}, - // }, - // } - // - // fakeClient := pmtesting.CreateFakeClient(t, instance) - // - // mgr, _ := createLifecycleManager([]subroutine.Subroutine{ - // pmtesting.FailureScenarioSubroutine{RequeAfter: true}}, fakeClient) - // mgr.WithConditionManagement() - // - // // Act - // _, err := mgr.Reconcile(ctx, request, instance) - // - // assert.NoError(t, err) - // assert.Len(t, instance.Status.Conditions, 2) - // assert.Equal(t, conditions.ConditionReady, instance.Status.Conditions[0].Type) - // assert.Equal(t, metav1.ConditionFalse, instance.Status.Conditions[0].Status) - // assert.Equal(t, "The resource is not ready", instance.Status.Conditions[0].Message) - // assert.Equal(t, "FailureScenarioSubroutine_Ready", instance.Status.Conditions[1].Type) - // assert.Equal(t, metav1.ConditionUnknown, instance.Status.Conditions[1].Status) - // assert.Equal(t, "The subroutine is processing", instance.Status.Conditions[1].Message) - //}) - // t.Run("Lifecycle with manage conditions reconciles with Error subroutine (no-retry)", func(t *testing.T) { // Arrange instance := &pmtesting.ImplementConditions{ @@ -938,7 +715,6 @@ func TestLifecycle(t *testing.T) { assert.NoError(t, err) assert.Len(t, instance.Status.Conditions, 2) }) - t.Run("Lifecycle with manage conditions reconciles with Error subroutine (retry)", func(t *testing.T) { // Arrange instance := &pmtesting.ImplementConditions{ @@ -965,101 +741,6 @@ func TestLifecycle(t *testing.T) { assert.Error(t, err) assert.Len(t, instance.Status.Conditions, 2) }) - // - //t.Run("Lifecycle with manage conditions not implementing the interface", func(t *testing.T) { - // // Arrange - // instance := &pmtesting.NotImplementingSpreadReconciles{ - // ObjectMeta: metav1.ObjectMeta{ - // Name: name, - // Namespace: namespace, - // Generation: 1, - // }, - // Status: pmtesting.TestStatus{ - // Some: "string", - // ObservedGeneration: 0, - // }, - // } - // - // fakeClient := pmtesting.CreateFakeClient(t, instance) - // - // mgr, _ := createLifecycleManager([]subroutine.Subroutine{ - // pmtesting.ChangeStatusSubroutine{ - // Client: fakeClient, - // }, - // }, fakeClient) - // mgr.WithConditionManagement() - // - // // Act - // // So the validation is already happening in SetupWithManager. So we can panic in the reconcile. - // assert.Panics(t, func() { - // _, _ = mgr.Reconcile(ctx, request, instance) - // }) - //}) - // - //t.Run("Lifecycle with manage conditions failing finalize", func(t *testing.T) { - // // Arrange - // instance := &pmtesting.ImplementConditions{ - // TestApiObject: pmtesting.TestApiObject{ - // ObjectMeta: metav1.ObjectMeta{ - // Name: name, - // Namespace: namespace, - // Generation: 1, - // Finalizers: []string{pmtesting.FailureScenarioSubroutineFinalizer}, - // DeletionTimestamp: &metav1.Time{Time: time.Now()}, - // }, - // Status: pmtesting.TestStatus{ - // Some: "string", - // ObservedGeneration: 0, - // }, - // }, - // } - // - // fakeClient := pmtesting.CreateFakeClient(t, instance) - // - // mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.FailureScenarioSubroutine{}}, fakeClient) - // mgr.WithConditionManagement() - // - // // Act - // _, err := mgr.Reconcile(ctx, request, instance) - // - // assert.Error(t, err) - // assert.Equal(t, "FailureScenarioSubroutine", err.Error()) - //}) - // - //t.Run("Lifecycle with spread reconciles and manage conditions and processing fails (retry)", func(t *testing.T) { - // // Arrange - // instance := &pmtesting.ImplementConditionsAndSpreadReconciles{ - // TestApiObject: pmtesting.TestApiObject{ - // ObjectMeta: metav1.ObjectMeta{ - // Name: name, - // Namespace: namespace, - // Generation: 1, - // }, - // Status: pmtesting.TestStatus{ - // Some: "string", - // ObservedGeneration: 0, - // }, - // }, - // } - // - // fakeClient := pmtesting.CreateFakeClient(t, instance) - // - // mgr, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.FailureScenarioSubroutine{Retry: true, RequeAfter: false}}, fakeClient) - // mgr.WithSpreadingReconciles() - // mgr.WithConditionManagement() - // - // // Act - // _, err := mgr.Reconcile(ctx, request, instance) - // - // assert.Error(t, err) - // assert.Len(t, instance.Status.Conditions, 2) - // assert.Equal(t, conditions.ConditionReady, instance.Status.Conditions[0].Type) - // assert.Equal(t, string(v1.ConditionFalse), string(instance.Status.Conditions[0].Status)) - // assert.Equal(t, "FailureScenarioSubroutine_Ready", instance.Status.Conditions[1].Type) - // assert.Equal(t, string(v1.ConditionFalse), string(instance.Status.Conditions[1].Status)) - // assert.Equal(t, int64(0), instance.Status.ObservedGeneration) - //}) - // t.Run("Lifecycle with spread reconciles and manage conditions and processing fails (no-retry)", func(t *testing.T) { // Arrange instance := &pmtesting.ImplementConditionsAndSpreadReconciles{ @@ -1090,106 +771,7 @@ func TestLifecycle(t *testing.T) { assert.NoError(t, err) assert.Len(t, instance.Status.Conditions, 2) }) - // - //t.Run("Test Lifecycle setupWithManager /w conditions and expecting no error", func(t *testing.T) { - // // Arrange - // instance := &pmtesting.ImplementConditions{} - // fakeClient := pmtesting.CreateFakeClient(t, instance) - // - // m, err := manager.New(&rest.Config{}, manager.Options{Scheme: fakeClient.Scheme()}) - // assert.NoError(t, err) - // - // lm, log := createLifecycleManager([]subroutine.Subroutine{}, fakeClient) - // lm = lm.WithConditionManagement() - // tr := &testReconciler{lifecycleManager: lm} - // - // // Act - // err = lm.SetupWithManager(m, 0, "testReconciler1", instance, "test", tr, log.Logger) - // - // // Assert - // assert.NoError(t, err) - //}) - // - //t.Run("Test Lifecycle setupWithManager /w conditions and expecting error", func(t *testing.T) { - // // Arrange - // instance := &pmtesting.NotImplementingSpreadReconciles{} - // fakeClient := pmtesting.CreateFakeClient(t, instance) - // - // m, err := manager.New(&rest.Config{}, manager.Options{Scheme: fakeClient.Scheme()}) - // assert.NoError(t, err) - // - // lm, log := createLifecycleManager([]subroutine.Subroutine{}, fakeClient) - // lm = lm.WithConditionManagement() - // tr := &testReconciler{lifecycleManager: lm} - // - // // Act - // err = lm.SetupWithManager(m, 0, "testReconciler2", instance, "test", tr, log.Logger) - // - // // Assert - // assert.Error(t, err) - //}) - // - //t.Run("Test Lifecycle setupWithManager /w spread and expecting no error", func(t *testing.T) { - // // Arrange - // instance := &pmtesting.ImplementingSpreadReconciles{} - // fakeClient := pmtesting.CreateFakeClient(t, instance) - // - // m, err := manager.New(&rest.Config{}, manager.Options{Scheme: fakeClient.Scheme()}) - // assert.NoError(t, err) - // - // lm, log := createLifecycleManager([]subroutine.Subroutine{}, fakeClient) - // lm = lm.WithSpreadingReconciles() - // tr := &testReconciler{lifecycleManager: lm} - // - // // Act - // err = lm.SetupWithManager(m, 0, "testReconciler3", instance, "test", tr, log.Logger) - // - // // Assert - // assert.NoError(t, err) - //}) - // - //t.Run("Test Lifecycle setupWithManager /w spread and expecting a error", func(t *testing.T) { - // // Arrange - // instance := &pmtesting.NotImplementingSpreadReconciles{} - // fakeClient := pmtesting.CreateFakeClient(t, instance) - // - // m, err := manager.New(&rest.Config{}, manager.Options{Scheme: fakeClient.Scheme()}) - // assert.NoError(t, err) - // - // lm, log := createLifecycleManager([]subroutine.Subroutine{}, fakeClient) - // lm = lm.WithSpreadingReconciles() - // tr := &testReconciler{lifecycleManager: lm} - // - // // Act - // err = lm.SetupWithManager(m, 0, "testReconciler", instance, "test", tr, log.Logger) - // - // // Assert - // assert.Error(t, err) - //}) - // errorMessage := "oh nose" - //t.Run("handleOperatorError", func(t *testing.T) { - // t.Run("Should handle an operator error with retry and sentry", func(t *testing.T) { - // // Arrange - // instance := &pmtesting.ImplementConditions{} - // fakeClient := pmtesting.CreateFakeClient(t, instance) - // - // _, log := createLifecycleManager([]subroutine.Subroutine{}, fakeClient) - // ctx = sentry.ContextWithSentryTags(ctx, map[string]string{}) - // - // // Act - // result, err := lifecycle.HandleOperatorError(ctx, operrors.NewOperatorError(goerrors.New(errorMessage), true, true), "handle op error", true, log.Logger) - // - // // Assert - // assert.Error(t, err) - // assert.NotNil(t, result) - // assert.Equal(t, errorMessage, err.Error()) - // - // errorMessages, err := log.GetErrorMessages() - // assert.NoError(t, err) - // assert.Equal(t, errorMessage, *errorMessages[0].Error) - // }) - // t.Run("Should handle an operator error without retry", func(t *testing.T) { // Arrange testLog := testlogger.New() @@ -1205,8 +787,6 @@ func TestLifecycle(t *testing.T) { assert.NoError(t, err) assert.Equal(t, errorMessage, *errorMessages[0].Error) }) - - // t.Run("Prepare Context", func(t *testing.T) { t.Run("Sets a context that can be used in the subroutine", func(t *testing.T) { // Arrange diff --git a/traces/otel_test.go b/traces/otel_test.go index ef625ce..f073ea6 100644 --- a/traces/otel_test.go +++ b/traces/otel_test.go @@ -15,6 +15,7 @@ import ( semconv "go.opentelemetry.io/otel/semconv/v1.34.0" collectortrace "go.opentelemetry.io/proto/otlp/collector/trace/v1" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" ) type mockExporter struct { @@ -129,7 +130,7 @@ func TestInitProvider_GRPCClientError(t *testing.T) { defer func() { grpcNewClientFunc = orig }() // Patch InitProvider to use grpcNewClientFunc - client, err := grpcNewClientFunc(cfg.CollectorEndpoint, grpc.WithInsecure()) + client, err := grpcNewClientFunc(cfg.CollectorEndpoint, grpc.WithTransportCredentials(insecure.NewCredentials())) if err == nil || client != nil { t.Error("expected error and nil client when grpc.NewClient fails") } From 726af593d66752a0f4963db96e15258b125c0c47 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20Echterh=C3=B6lter?= Date: Mon, 7 Jul 2025 13:03:08 +0200 Subject: [PATCH 10/16] refactor: remove unused exported functions from config --- config/config.go | 12 +----------- config/config_test.go | 22 ---------------------- 2 files changed, 1 insertion(+), 33 deletions(-) diff --git a/config/config.go b/config/config.go index 9a2efc1..5260f4b 100644 --- a/config/config.go +++ b/config/config.go @@ -212,17 +212,7 @@ func BindConfigToFlags(v *viper.Viper, cmd *cobra.Command, config any) error { return nil } -// GenerateFlagSet is exported for testing. -func GenerateFlagSet(config any) (*pflag.FlagSet, error) { - return generateFlagSet(config) -} - -// UnmarshalIntoStruct is exported for testing. -func UnmarshalIntoStruct(v *viper.Viper, cfg any) func() { - return unmarshalIntoStruct(v, cfg) -} - -// unmarshalIntoStruct returns a function that unmarshals viper config into cfg and panics on error. +// unmarshalIntoStruct returns a function that unmarshal viper config into cfg and panics on error. func unmarshalIntoStruct(v *viper.Viper, cfg any) func() { return func() { if err := v.Unmarshal(cfg); err != nil { diff --git a/config/config_test.go b/config/config_test.go index e5288a1..86812c3 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -177,25 +177,3 @@ func TestGenerateFlagSetUnsupportedType(t *testing.T) { err := config.BindConfigToFlags(viper.New(), &cobra.Command{}, &testStruct) assert.Error(t, err) } - -func TestGenerateFlagSetInvalidStruct(t *testing.T) { - notStruct := 123 - _, err := config.GenerateFlagSet(notStruct) - assert.Error(t, err) -} - -func TestUnmarshalIntoStructPanic(t *testing.T) { - v := viper.New() - cfg := struct { - InvalidField int `mapstructure:"invalid-field"` - }{} - // Set a string value to cause unmarshal error for int field - v.Set("invalid-field", "not-an-int") - defer func() { - if r := recover(); r == nil { - t.Errorf("expected panic but did not panic") - } - }() - unmarshal := config.UnmarshalIntoStruct(v, &cfg) - unmarshal() -} From 106c1e611cf52442421e63e1fabc03c456ec3b47 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20Echterh=C3=B6lter?= Date: Mon, 7 Jul 2025 13:12:22 +0200 Subject: [PATCH 11/16] refactor: improve error logging and clean up imports --- .../lifecycle/controllerruntime/lifecycle_test.go | 8 ++++---- controller/lifecycle/lifecycle.go | 6 ++---- controller/lifecycle/util/convert.go | 2 +- go.mod | 12 ++---------- go.sum | 12 ++++++------ 5 files changed, 15 insertions(+), 25 deletions(-) diff --git a/controller/lifecycle/controllerruntime/lifecycle_test.go b/controller/lifecycle/controllerruntime/lifecycle_test.go index 501c958..e950c15 100644 --- a/controller/lifecycle/controllerruntime/lifecycle_test.go +++ b/controller/lifecycle/controllerruntime/lifecycle_test.go @@ -16,7 +16,7 @@ import ( "github.com/platform-mesh/golang-commons/controller/lifecycle/runtimeobject" "github.com/platform-mesh/golang-commons/controller/lifecycle/subroutine" pmtesting "github.com/platform-mesh/golang-commons/controller/testSupport" - operrors "github.com/platform-mesh/golang-commons/errors" + "github.com/platform-mesh/golang-commons/errors" "github.com/platform-mesh/golang-commons/logger/testlogger" ) @@ -92,7 +92,7 @@ func TestLifecycle(t *testing.T) { fakeClient := pmtesting.CreateFakeClient(t, testApiObject) lm, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.ContextValueSubroutine{}}, fakeClient) - lm = lm.WithPrepareContextFunc(func(ctx context.Context, instance runtimeobject.RuntimeObject) (context.Context, operrors.OperatorError) { + lm = lm.WithPrepareContextFunc(func(ctx context.Context, instance runtimeobject.RuntimeObject) (context.Context, errors.OperatorError) { return context.WithValue(ctx, pmtesting.ContextValueKey, "valueFromContext"), nil }) tr := &testReconciler{lifecycleManager: lm} @@ -115,8 +115,8 @@ func TestLifecycle(t *testing.T) { fakeClient := pmtesting.CreateFakeClient(t, testApiObject) lm, _ := createLifecycleManager([]subroutine.Subroutine{pmtesting.ContextValueSubroutine{}}, fakeClient) - lm = lm.WithPrepareContextFunc(func(ctx context.Context, instance runtimeobject.RuntimeObject) (context.Context, operrors.OperatorError) { - return nil, operrors.NewOperatorError(goerrors.New(errorMessage), true, false) + lm = lm.WithPrepareContextFunc(func(ctx context.Context, instance runtimeobject.RuntimeObject) (context.Context, errors.OperatorError) { + return nil, errors.NewOperatorError(goerrors.New(errorMessage), true, false) }) tr := &testReconciler{lifecycleManager: lm} result, err := tr.Reconcile(ctx, controllerruntime.Request{NamespacedName: types.NamespacedName{Name: name, Namespace: namespace}}) diff --git a/controller/lifecycle/lifecycle.go b/controller/lifecycle/lifecycle.go index 4e852c9..aa857bd 100644 --- a/controller/lifecycle/lifecycle.go +++ b/controller/lifecycle/lifecycle.go @@ -41,8 +41,7 @@ func Reconcile(ctx context.Context, nName types.NamespacedName, instance runtime log.Info().Msg("start reconcile") - nn := types.NamespacedName{Namespace: nName.Namespace, Name: nName.Name} - err := cl.Get(ctx, nn, instance) + err := cl.Get(ctx, nName, instance) if err != nil { if kerrors.IsNotFound(err) { log.Info().Msg("instance not found. It was likely deleted") @@ -71,8 +70,7 @@ func Reconcile(ctx context.Context, nName types.NamespacedName, instance runtime var condArr []v1.Condition if l.ConditionsManager() != nil { - roc := util.MustToInterface[api.RuntimeObjectConditions](instance, log) - condArr = roc.GetConditions() + condArr = util.MustToInterface[api.RuntimeObjectConditions](instance, log).GetConditions() l.ConditionsManager().SetInstanceConditionUnknownIfNotSet(&condArr) } diff --git a/controller/lifecycle/util/convert.go b/controller/lifecycle/util/convert.go index 3894eed..1ac03d4 100644 --- a/controller/lifecycle/util/convert.go +++ b/controller/lifecycle/util/convert.go @@ -25,6 +25,6 @@ func MustToInterface[T any](instance any, log *logger.Logger) T { if err == nil { return obj } - log.Panic().Err(err).Msg("Failed to cast instance to RuntimeObjectSpreadReconcileStatus") + log.Panic().Err(err).Msg("Failed to cast instance to target interface") panic(err) } diff --git a/go.mod b/go.mod index de27034..3aadd78 100644 --- a/go.mod +++ b/go.mod @@ -2,13 +2,7 @@ module github.com/platform-mesh/golang-commons go 1.24.3 -replace ( - k8s.io/api => k8s.io/api v0.32.3 - k8s.io/apimachinery => k8s.io/apimachinery v0.32.3 - k8s.io/client-go => k8s.io/client-go v0.32.3 - k8s.io/utils => k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 - sigs.k8s.io/controller-runtime => sigs.k8s.io/controller-runtime v0.20.4 -) +replace sigs.k8s.io/controller-runtime => sigs.k8s.io/controller-runtime v0.20.4 require ( github.com/99designs/gqlgen v0.17.76 @@ -34,6 +28,7 @@ require ( github.com/vektah/gqlparser/v2 v2.5.30 go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.60.0 go.opentelemetry.io/otel v1.37.0 + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0 go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.35.0 go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.37.0 go.opentelemetry.io/otel/sdk v1.37.0 @@ -71,12 +66,10 @@ require ( github.com/go-openapi/swag v0.23.0 // indirect github.com/go-viper/mapstructure/v2 v2.3.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/protobuf v1.5.4 // indirect github.com/google/btree v1.1.3 // indirect github.com/google/cel-go v0.25.0 // indirect github.com/google/gnostic-models v0.6.9 // indirect github.com/google/go-cmp v0.7.0 // indirect - github.com/google/gofuzz v1.2.0 // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.4.0 // indirect github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.3.2 // indirect github.com/grpc-ecosystem/grpc-gateway/v2 v2.27.1 // indirect @@ -112,7 +105,6 @@ require ( github.com/x448/float16 v0.8.4 // indirect github.com/zeebo/xxh3 v1.0.2 // indirect go.opentelemetry.io/auto/sdk v1.1.0 // indirect - go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.37.0 // indirect go.opentelemetry.io/otel/metric v1.37.0 // indirect go.opentelemetry.io/otel/trace v1.37.0 // indirect go.uber.org/mock v0.5.2 // indirect diff --git a/go.sum b/go.sum index 58d0566..8a51adb 100644 --- a/go.sum +++ b/go.sum @@ -453,14 +453,14 @@ gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= -k8s.io/api v0.32.3 h1:Hw7KqxRusq+6QSplE3NYG4MBxZw1BZnq4aP4cJVINls= -k8s.io/api v0.32.3/go.mod h1:2wEDTXADtm/HA7CCMD8D8bK4yuBUptzaRhYcYEEYA3k= +k8s.io/api v0.33.0 h1:yTgZVn1XEe6opVpP1FylmNrIFWuDqe2H0V8CT5gxfIU= +k8s.io/api v0.33.0/go.mod h1:CTO61ECK/KU7haa3qq8sarQ0biLq2ju405IZAd9zsiM= k8s.io/apiextensions-apiserver v0.33.0 h1:d2qpYL7Mngbsc1taA4IjJPRJ9ilnsXIrndH+r9IimOs= k8s.io/apiextensions-apiserver v0.33.0/go.mod h1:VeJ8u9dEEN+tbETo+lFkwaaZPg6uFKLGj5vyNEwwSzc= -k8s.io/apimachinery v0.32.3 h1:JmDuDarhDmA/Li7j3aPrwhpNBA94Nvk5zLeOge9HH1U= -k8s.io/apimachinery v0.32.3/go.mod h1:GpHVgxoKlTxClKcteaeuF1Ul/lDVb74KpZcxcmLDElE= -k8s.io/client-go v0.32.3 h1:RKPVltzopkSgHS7aS98QdscAgtgah/+zmpAogooIqVU= -k8s.io/client-go v0.32.3/go.mod h1:3v0+3k4IcT9bXTc4V2rt+d2ZPPG700Xy6Oi0Gdl2PaY= +k8s.io/apimachinery v0.33.0 h1:1a6kHrJxb2hs4t8EE5wuR/WxKDwGN1FKH3JvDtA0CIQ= +k8s.io/apimachinery v0.33.0/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= +k8s.io/client-go v0.33.0 h1:UASR0sAYVUzs2kYuKn/ZakZlcs2bEHaizrrHUZg0G98= +k8s.io/client-go v0.33.0/go.mod h1:kGkd+l/gNGg8GYWAPr0xF1rRKvVWvzh9vmZAMXtaKOg= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4= From cb59786ab025fa8a61b6767995d6d3a5bf411a95 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20Echterh=C3=B6lter?= Date: Mon, 7 Jul 2025 13:13:47 +0200 Subject: [PATCH 12/16] docs: adding comment for replace statement in go.mod --- go.mod | 1 + 1 file changed, 1 insertion(+) diff --git a/go.mod b/go.mod index 3aadd78..3d9145e 100644 --- a/go.mod +++ b/go.mod @@ -2,6 +2,7 @@ module github.com/platform-mesh/golang-commons go 1.24.3 +// This is currently necessary due to the version used in https://github.com/kcp-dev/multicluster-provider/blob/main/go.mod replace sigs.k8s.io/controller-runtime => sigs.k8s.io/controller-runtime v0.20.4 require ( From bde8a75bef0bb0480020097d5dd90c1a58f1b93f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20Echterh=C3=B6lter?= Date: Mon, 7 Jul 2025 17:12:29 +0200 Subject: [PATCH 13/16] feat: adding lifecycle builder --- controller/lifecycle/builder/builder.go | 74 ++++++++++++ controller/lifecycle/builder/builder_test.go | 110 ++++++++++++++++++ .../lifecycle/controllerruntime/lifecycle.go | 2 +- .../controllerruntime/lifecycle_test.go | 16 +-- .../lifecycle/multicluster/lifecycle.go | 2 +- .../lifecycle/multicluster/lifecycle_test.go | 4 +- controller/testSupport/lifecycle.go | 5 +- 7 files changed, 199 insertions(+), 14 deletions(-) create mode 100644 controller/lifecycle/builder/builder.go create mode 100644 controller/lifecycle/builder/builder_test.go diff --git a/controller/lifecycle/builder/builder.go b/controller/lifecycle/builder/builder.go new file mode 100644 index 0000000..53e574e --- /dev/null +++ b/controller/lifecycle/builder/builder.go @@ -0,0 +1,74 @@ +package builder + +import ( + "sigs.k8s.io/controller-runtime/pkg/client" + mcmanager "sigs.k8s.io/multicluster-runtime/pkg/manager" + + "github.com/platform-mesh/golang-commons/controller/lifecycle/controllerruntime" + "github.com/platform-mesh/golang-commons/controller/lifecycle/multicluster" + "github.com/platform-mesh/golang-commons/controller/lifecycle/subroutine" + "github.com/platform-mesh/golang-commons/logger" +) + +type Builder struct { + operatorName string + controllerName string + withConditionManagement bool + withSpreadingReconciles bool + withReadOnly bool + subroutines []subroutine.Subroutine + log *logger.Logger +} + +func NewBuilder(operatorName, controllerName string, subroutines []subroutine.Subroutine, log *logger.Logger) *Builder { + return &Builder{ + operatorName: operatorName, + controllerName: controllerName, + log: log, + withConditionManagement: false, + subroutines: subroutines, + } +} + +func (b *Builder) WithConditionManagement() *Builder { + b.withConditionManagement = true + return b +} + +func (b *Builder) WithSpreadingReconciles() *Builder { + b.withSpreadingReconciles = true + return b +} + +func (b *Builder) WithReadOnly() *Builder { + b.withReadOnly = true + return b +} + +func (b *Builder) BuildControllerRuntime(cl client.Client) *controllerruntime.LifecycleManager { + lm := controllerruntime.NewLifecycleManager(b.subroutines, b.operatorName, b.controllerName, cl, b.log) + if b.withConditionManagement { + lm.WithConditionManagement() + } + if b.withSpreadingReconciles { + lm.WithSpreadingReconciles() + } + if b.withReadOnly { + lm.WithReadOnly() + } + return lm +} + +func (b *Builder) BuildMultiCluster(mgr mcmanager.Manager) *multicluster.LifecycleManager { + lm := multicluster.NewLifecycleManager(b.subroutines, b.operatorName, b.controllerName, mgr, b.log) + if b.withConditionManagement { + lm.WithConditionManagement() + } + if b.withSpreadingReconciles { + lm.WithSpreadingReconciles() + } + if b.withReadOnly { + lm.WithReadOnly() + } + return lm +} diff --git a/controller/lifecycle/builder/builder_test.go b/controller/lifecycle/builder/builder_test.go new file mode 100644 index 0000000..8de061e --- /dev/null +++ b/controller/lifecycle/builder/builder_test.go @@ -0,0 +1,110 @@ +package builder + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "k8s.io/client-go/rest" + mcmanager "sigs.k8s.io/multicluster-runtime/pkg/manager" + + pmtesting "github.com/platform-mesh/golang-commons/controller/testSupport" + "github.com/platform-mesh/golang-commons/logger" +) + +func TestNewBuilder_Defaults(t *testing.T) { + log := &logger.Logger{} + b := NewBuilder("op", "ctrl", nil, log) + if b.operatorName != "op" { + t.Errorf("expected operatorName 'op', got %s", b.operatorName) + } + if b.controllerName != "ctrl" { + t.Errorf("expected controllerName 'ctrl', got %s", b.controllerName) + } + if b.withConditionManagement { + t.Error("expected withConditionManagement to be false") + } + if b.withSpreadingReconciles { + t.Error("expected withSpreadingReconciles to be false") + } + if b.withReadOnly { + t.Error("expected withReadOnly to be false") + } + if b.log != log { + t.Error("expected log to be set") + } +} + +func TestBuilder_WithConditionManagement(t *testing.T) { + b := NewBuilder("op", "ctrl", nil, &logger.Logger{}) + b.WithConditionManagement() + if !b.withConditionManagement { + t.Error("WithConditionManagement should set withConditionManagement to true") + } +} + +func TestBuilder_WithSpreadingReconciles(t *testing.T) { + b := NewBuilder("op", "ctrl", nil, &logger.Logger{}) + b.WithSpreadingReconciles() + if !b.withSpreadingReconciles { + t.Error("WithSpreadingReconciles should set withSpreadingReconciles to true") + } +} + +func TestBuilder_WithReadOnly(t *testing.T) { + b := NewBuilder("op", "ctrl", nil, &logger.Logger{}) + b.WithReadOnly() + if !b.withReadOnly { + t.Error("WithReadOnly should set withReadOnly to true") + } +} + +func TestControllerRuntimeBuilder(t *testing.T) { + t.Run("Minimal setup", func(t *testing.T) { + b := NewBuilder("op", "ctrl", nil, &logger.Logger{}) + fakeClient := pmtesting.CreateFakeClient(t, &pmtesting.TestApiObject{}) + lm := b.BuildControllerRuntime(fakeClient) + assert.NotNil(t, lm) + }) + t.Run("All Options", func(t *testing.T) { + b := NewBuilder("op", "ctrl", nil, &logger.Logger{}).WithConditionManagement().WithSpreadingReconciles() + fakeClient := pmtesting.CreateFakeClient(t, &pmtesting.TestApiObject{}) + lm := b.BuildControllerRuntime(fakeClient) + assert.NotNil(t, lm) + }) + t.Run("ReadOnly", func(t *testing.T) { + b := NewBuilder("op", "ctrl", nil, &logger.Logger{}).WithReadOnly() + fakeClient := pmtesting.CreateFakeClient(t, &pmtesting.TestApiObject{}) + lm := b.BuildControllerRuntime(fakeClient) + assert.NotNil(t, lm) + }) +} + +func TestMulticontrollerRuntimeBuilder(t *testing.T) { + t.Run("Minimal setup", func(t *testing.T) { + b := NewBuilder("op", "ctrl", nil, &logger.Logger{}) + cfg := &rest.Config{} + provider := pmtesting.NewFakeProvider(cfg) + mgr, err := mcmanager.New(cfg, provider, mcmanager.Options{}) + assert.NoError(t, err) + lm := b.BuildMultiCluster(mgr) + assert.NotNil(t, lm) + }) + t.Run("All Options", func(t *testing.T) { + b := NewBuilder("op", "ctrl", nil, &logger.Logger{}).WithConditionManagement().WithSpreadingReconciles() + cfg := &rest.Config{} + provider := pmtesting.NewFakeProvider(cfg) + mgr, err := mcmanager.New(cfg, provider, mcmanager.Options{}) + assert.NoError(t, err) + lm := b.BuildMultiCluster(mgr) + assert.NotNil(t, lm) + }) + t.Run("ReadOnly", func(t *testing.T) { + b := NewBuilder("op", "ctrl", nil, &logger.Logger{}).WithReadOnly() + cfg := &rest.Config{} + provider := pmtesting.NewFakeProvider(cfg) + mgr, err := mcmanager.New(cfg, provider, mcmanager.Options{}) + assert.NoError(t, err) + lm := b.BuildMultiCluster(mgr) + assert.NotNil(t, lm) + }) +} diff --git a/controller/lifecycle/controllerruntime/lifecycle.go b/controller/lifecycle/controllerruntime/lifecycle.go index f06952d..22261a8 100644 --- a/controller/lifecycle/controllerruntime/lifecycle.go +++ b/controller/lifecycle/controllerruntime/lifecycle.go @@ -31,7 +31,7 @@ type LifecycleManager struct { prepareContextFunc api.PrepareContextFunc } -func NewLifecycleManager(log *logger.Logger, operatorName string, controllerName string, client client.Client, subroutines []subroutine.Subroutine) *LifecycleManager { +func NewLifecycleManager(subroutines []subroutine.Subroutine, operatorName string, controllerName string, client client.Client, log *logger.Logger) *LifecycleManager { log = log.MustChildLoggerWithAttributes("operator", operatorName, "controller", controllerName) return &LifecycleManager{ log: log, diff --git a/controller/lifecycle/controllerruntime/lifecycle_test.go b/controller/lifecycle/controllerruntime/lifecycle_test.go index e950c15..a974b5b 100644 --- a/controller/lifecycle/controllerruntime/lifecycle_test.go +++ b/controller/lifecycle/controllerruntime/lifecycle_test.go @@ -37,8 +37,9 @@ func TestLifecycle(t *testing.T) { m, err := manager.New(&rest.Config{}, manager.Options{Scheme: fakeClient.Scheme()}) assert.NoError(t, err) - lm, log := createLifecycleManager([]subroutine.Subroutine{}, fakeClient) - lm = lm.WithSpreadingReconciles() + log := testlogger.New() + lm := NewLifecycleManager([]subroutine.Subroutine{}, "test-operator", "test-controller", fakeClient, log.Logger) + lm.WithSpreadingReconciles() tr := &testReconciler{lifecycleManager: lm} // Act @@ -56,7 +57,7 @@ func TestLifecycle(t *testing.T) { assert.NoError(t, err) lm, log := createLifecycleManager([]subroutine.Subroutine{}, fakeClient) - lm = lm.WithSpreadingReconciles() + lm.WithSpreadingReconciles() tr := &testReconciler{lifecycleManager: lm} // Act @@ -74,7 +75,8 @@ func TestLifecycle(t *testing.T) { assert.NoError(t, err) lm, log := createLifecycleManager([]subroutine.Subroutine{}, fakeClient) - lm.WithSpreadingReconciles().WithReadOnly() + lm.WithSpreadingReconciles() + lm.WithReadOnly() tr := &testReconciler{lifecycleManager: lm} // Act @@ -133,7 +135,7 @@ func TestLifecycle(t *testing.T) { _, log := createLifecycleManager([]subroutine.Subroutine{}, fakeClient) // When - l := NewLifecycleManager(log.Logger, "test-operator", "test-controller", fakeClient, []subroutine.Subroutine{}).WithConditionManagement() + l := NewLifecycleManager([]subroutine.Subroutine{}, "test-operator", "test-controller", fakeClient, log.Logger).WithConditionManagement() // Then assert.True(t, true, l.ConditionsManager() != nil) @@ -144,7 +146,7 @@ func TestLifecycle(t *testing.T) { _, log := createLifecycleManager([]subroutine.Subroutine{}, fakeClient) // When - l := NewLifecycleManager(log.Logger, "test-operator", "test-controller", fakeClient, []subroutine.Subroutine{}).WithReadOnly() + l := NewLifecycleManager([]subroutine.Subroutine{}, "test-operator", "test-controller", fakeClient, log.Logger).WithReadOnly() // Then assert.True(t, true, l.ConditionsManager() != nil) @@ -162,6 +164,6 @@ func (r *testReconciler) Reconcile(ctx context.Context, req controllerruntime.Re func createLifecycleManager(subroutines []subroutine.Subroutine, c client.Client) (*LifecycleManager, *testlogger.TestLogger) { log := testlogger.New() - mgr := NewLifecycleManager(log.Logger, "test-operator", "test-controller", c, subroutines) + mgr := NewLifecycleManager(subroutines, "test-operator", "test-controller", c, log.Logger) return mgr, log } diff --git a/controller/lifecycle/multicluster/lifecycle.go b/controller/lifecycle/multicluster/lifecycle.go index 76ff2aa..2b31445 100644 --- a/controller/lifecycle/multicluster/lifecycle.go +++ b/controller/lifecycle/multicluster/lifecycle.go @@ -37,7 +37,7 @@ type LifecycleManager struct { prepareContextFunc api.PrepareContextFunc } -func NewLifecycleManager(log *logger.Logger, operatorName string, controllerName string, mgr ClusterGetter, subroutines []subroutine.Subroutine) *LifecycleManager { +func NewLifecycleManager(subroutines []subroutine.Subroutine, operatorName string, controllerName string, mgr ClusterGetter, log *logger.Logger) *LifecycleManager { log = log.MustChildLoggerWithAttributes("operator", operatorName, "controller", controllerName) return &LifecycleManager{ log: log, diff --git a/controller/lifecycle/multicluster/lifecycle_test.go b/controller/lifecycle/multicluster/lifecycle_test.go index 3e0307e..9b0ce6f 100644 --- a/controller/lifecycle/multicluster/lifecycle_test.go +++ b/controller/lifecycle/multicluster/lifecycle_test.go @@ -185,7 +185,7 @@ func TestLifecycleManager_WithConditionManagement(t *testing.T) { _, log := createLifecycleManager([]subroutine.Subroutine{}, fakeClient) // When - l := NewLifecycleManager(log.Logger, "test-operator", "test-controller", clusterGetter, []subroutine.Subroutine{}).WithConditionManagement() + l := NewLifecycleManager([]subroutine.Subroutine{}, "test-operator", "test-controller", clusterGetter, log.Logger).WithConditionManagement() // Then assert.True(t, true, l.ConditionsManager() != nil) @@ -202,6 +202,6 @@ func (r *testReconciler) Reconcile(ctx context.Context, req mcreconcile.Request) func createLifecycleManager(subroutines []subroutine.Subroutine, client client.Client) (*LifecycleManager, *testlogger.TestLogger) { log := testlogger.New() clusterGetter := &pmtesting.FakeManager{Client: client} - m := NewLifecycleManager(log.Logger, "test-operator", "test-controller", clusterGetter, subroutines) + m := NewLifecycleManager(subroutines, "test-operator", "test-controller", clusterGetter, log.Logger) return m, log } diff --git a/controller/testSupport/lifecycle.go b/controller/testSupport/lifecycle.go index 68b33b5..9a3ec43 100644 --- a/controller/testSupport/lifecycle.go +++ b/controller/testSupport/lifecycle.go @@ -37,15 +37,14 @@ func (l *TestLifecycleManager) PrepareContextFunc() api.PrepareContextFunc { return l.prepareContextFunc } func (l *TestLifecycleManager) Subroutines() []subroutine.Subroutine { return l.SubroutinesArr } -func (l *TestLifecycleManager) WithSpreadingReconciles() *TestLifecycleManager { +func (l *TestLifecycleManager) WithSpreadingReconciles() api.Lifecycle { l.spreader = &TestSpreader{ShouldReconcile: l.ShouldReconcile} return l } -func (l *TestLifecycleManager) WithConditionManagement() *TestLifecycleManager { +func (l *TestLifecycleManager) WithConditionManagement() api.Lifecycle { l.conditionsManager = &TestConditionManager{} return l } - func (l *TestLifecycleManager) WithPrepareContextFunc(prepareFunction api.PrepareContextFunc) *TestLifecycleManager { l.prepareContextFunc = prepareFunction return l From 0ed6f970c742d371f195551f0ec9fdf72f6ee075 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20Echterh=C3=B6lter?= Date: Tue, 8 Jul 2025 07:35:38 +0200 Subject: [PATCH 14/16] test: adding kcp env testing --- .testcoverage.yml | 1 + go.mod | 9 +- go.sum | 14 + test/kcp/kcpserver.go | 128 +++++++++ test/kcp/process/arguments.go | 340 ++++++++++++++++++++++ test/kcp/process/bin_path_finder.go | 70 +++++ test/kcp/process/procattr_other.go | 28 ++ test/kcp/process/procattr_unix.go | 33 +++ test/kcp/process/process.go | 365 ++++++++++++++++++++++++ test/kcp/server.go | 419 ++++++++++++++++++++++++++++ test/{ => openfga}/openfga.go | 2 +- 11 files changed, 1406 insertions(+), 3 deletions(-) create mode 100644 test/kcp/kcpserver.go create mode 100644 test/kcp/process/arguments.go create mode 100644 test/kcp/process/bin_path_finder.go create mode 100644 test/kcp/process/procattr_other.go create mode 100644 test/kcp/process/procattr_unix.go create mode 100644 test/kcp/process/process.go create mode 100644 test/kcp/server.go rename test/{ => openfga}/openfga.go (99%) diff --git a/.testcoverage.yml b/.testcoverage.yml index 38714f2..0d99ccd 100644 --- a/.testcoverage.yml +++ b/.testcoverage.yml @@ -4,4 +4,5 @@ exclude: - mocks # exclude generated mock files - ^test/openfga - logger/testlogger + - testing/kcpenvtest diff --git a/go.mod b/go.mod index 3d9145e..8c5bfe5 100644 --- a/go.mod +++ b/go.mod @@ -16,10 +16,12 @@ require ( github.com/google/uuid v1.6.0 github.com/hashicorp/golang-lru/v2 v2.0.7 github.com/jellydator/ttlcache/v3 v3.4.0 + github.com/kcp-dev/kcp/sdk v0.27.1 github.com/machinebox/graphql v0.2.2 github.com/openfga/api/proto v0.0.0-20250528161632-e53c69cc5531 github.com/openfga/language/pkg/go v0.2.0-beta.2.0.20250428093642-7aeebe78bbfe github.com/openfga/openfga v1.9.0 + github.com/otiai10/copy v1.14.1 github.com/pkg/errors v0.9.1 github.com/rs/zerolog v1.34.0 github.com/spf13/cobra v1.9.1 @@ -36,7 +38,9 @@ require ( go.opentelemetry.io/proto/otlp v1.7.0 golang.org/x/exp v0.0.0-20250506013437-ce4c2cf36ca6 golang.org/x/oauth2 v0.30.0 + golang.org/x/sys v0.33.0 google.golang.org/grpc v1.73.0 + gopkg.in/yaml.v3 v3.0.1 k8s.io/api v0.33.0 k8s.io/apimachinery v0.33.0 k8s.io/client-go v0.33.0 @@ -79,6 +83,7 @@ require ( github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect + github.com/kcp-dev/logicalcluster/v3 v3.0.5 // indirect github.com/klauspost/cpuid/v2 v2.0.9 // indirect github.com/mailru/easyjson v0.7.7 // indirect github.com/matryer/is v1.4.1 // indirect @@ -89,6 +94,8 @@ require ( github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/natefinch/wrap v0.2.0 // indirect github.com/oklog/ulid/v2 v2.1.1 // indirect + github.com/onsi/gomega v1.36.1 // indirect + github.com/otiai10/mint v1.6.3 // indirect github.com/pelletier/go-toml/v2 v2.2.3 // indirect github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect github.com/prometheus/client_golang v1.22.0 // indirect @@ -114,7 +121,6 @@ require ( golang.org/x/crypto v0.39.0 // indirect golang.org/x/net v0.41.0 // indirect golang.org/x/sync v0.15.0 // indirect - golang.org/x/sys v0.33.0 // indirect golang.org/x/term v0.32.0 // indirect golang.org/x/text v0.26.0 // indirect golang.org/x/time v0.9.0 // indirect @@ -125,7 +131,6 @@ require ( google.golang.org/protobuf v1.36.6 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/yaml.v3 v3.0.1 // indirect k8s.io/apiextensions-apiserver v0.33.0 // indirect k8s.io/klog/v2 v2.130.1 // indirect k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect diff --git a/go.sum b/go.sum index 8a51adb..e2c2130 100644 --- a/go.sum +++ b/go.sum @@ -19,6 +19,8 @@ github.com/antlr4-go/antlr/v4 v4.13.1/go.mod h1:GKmUxMtwp6ZgGwZSva4eWPC5mS6vUAmO github.com/benbjohnson/clock v1.1.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA= github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM= github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw= +github.com/blang/semver/v4 v4.0.0 h1:1PFHFE6yCCTv8C1TeyNNarDzntLi7wMI5i/pzqYIsAM= +github.com/blang/semver/v4 v4.0.0/go.mod h1:IbckMUScFkM3pff0VJDNKRiT6TG/YpiHIM2yvyW5YoQ= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU= @@ -165,6 +167,10 @@ github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8Hm github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= +github.com/kcp-dev/kcp/sdk v0.27.1 h1:jBVdrZoJd5hy2RqaBnmCCzldimwOqDkf8FXtNq5HaWA= +github.com/kcp-dev/kcp/sdk v0.27.1/go.mod h1:3eRgW42d81Ng60DbG1xbne0FSS2znpcN/GUx4rqJgUo= +github.com/kcp-dev/logicalcluster/v3 v3.0.5 h1:JbYakokb+5Uinz09oTXomSUJVQsqfxEvU4RyHUYxHOU= +github.com/kcp-dev/logicalcluster/v3 v3.0.5/go.mod h1:EWBUBxdr49fUB1cLMO4nOdBWmYifLbP1LfoL20KkXYY= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo= @@ -231,6 +237,10 @@ github.com/openfga/language/pkg/go v0.2.0-beta.2.0.20250428093642-7aeebe78bbfe/g github.com/openfga/openfga v1.9.0 h1:Bs5h7fZWZNCubnUhAxH5choNbtkCq1HNaoSrRRgyyXU= github.com/openfga/openfga v1.9.0/go.mod h1:NwuzRFEwrOBV6AiDCq37KxPp/v9wXdDYHRCXaePf2iU= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= +github.com/otiai10/copy v1.14.1 h1:5/7E6qsUMBaH5AnQ0sSLzzTg1oTECmcCmT6lvF45Na8= +github.com/otiai10/copy v1.14.1/go.mod h1:oQwrEDDOci3IM8dJF0d8+jnbfPDllW6vUjNc3DoZm9I= +github.com/otiai10/mint v1.6.3 h1:87qsV/aw1F5as1eH1zS/yqHY85ANKVMgkDrf9rcxbQs= +github.com/otiai10/mint v1.6.3/go.mod h1:MJm72SBthJjz8qhefc4z1PYEieWmy8Bku7CjcAqyUSM= github.com/pborman/getopt v0.0.0-20170112200414-7148bc3a4c30/go.mod h1:85jBQOZwpVEaDAr341tbn15RS4fCAsIst0qp7i8ex1o= github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M= github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc= @@ -459,8 +469,12 @@ k8s.io/apiextensions-apiserver v0.33.0 h1:d2qpYL7Mngbsc1taA4IjJPRJ9ilnsXIrndH+r9 k8s.io/apiextensions-apiserver v0.33.0/go.mod h1:VeJ8u9dEEN+tbETo+lFkwaaZPg6uFKLGj5vyNEwwSzc= k8s.io/apimachinery v0.33.0 h1:1a6kHrJxb2hs4t8EE5wuR/WxKDwGN1FKH3JvDtA0CIQ= k8s.io/apimachinery v0.33.0/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= +k8s.io/apiserver v0.33.0 h1:QqcM6c+qEEjkOODHppFXRiw/cE2zP85704YrQ9YaBbc= +k8s.io/apiserver v0.33.0/go.mod h1:EixYOit0YTxt8zrO2kBU7ixAtxFce9gKGq367nFmqI8= k8s.io/client-go v0.33.0 h1:UASR0sAYVUzs2kYuKn/ZakZlcs2bEHaizrrHUZg0G98= k8s.io/client-go v0.33.0/go.mod h1:kGkd+l/gNGg8GYWAPr0xF1rRKvVWvzh9vmZAMXtaKOg= +k8s.io/component-base v0.33.0 h1:Ot4PyJI+0JAD9covDhwLp9UNkUja209OzsJ4FzScBNk= +k8s.io/component-base v0.33.0/go.mod h1:aXYZLbw3kihdkOPMDhWbjGCO6sg+luw554KP51t8qCU= k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4= diff --git a/test/kcp/kcpserver.go b/test/kcp/kcpserver.go new file mode 100644 index 0000000..9c0d393 --- /dev/null +++ b/test/kcp/kcpserver.go @@ -0,0 +1,128 @@ +package kcp + +import ( + "io" + "net/url" + "os" + "path/filepath" + "time" + + "github.com/platform-mesh/golang-commons/logger" + "github.com/platform-mesh/golang-commons/test/kcp/process" +) + +type KCPServer struct { + processState *process.State + Out io.Writer + Err io.Writer + StartTimeout time.Duration + StopTimeout time.Duration + Dir string + Binary string + Args []string + PathToRoot string + + log *logger.Logger + args *process.Arguments +} + +func NewKCPServer(baseDir string, binary string, pathToRoot string, log *logger.Logger) *KCPServer { + return &KCPServer{ + Dir: baseDir, + Binary: binary, + Args: []string{"start", "-v=1"}, + PathToRoot: pathToRoot, + log: log, + } +} + +func (s *KCPServer) Start() error { + if err := s.prepare(); err != nil { + return err + } + return s.processState.Start(s.Out, s.Err, s.log) +} + +func (s *KCPServer) prepare() error { + if s.Out == nil || s.Err == nil { + //create file writer for the logs + fileOut := filepath.Join(s.PathToRoot, "kcp.log") + out, err := os.Create(fileOut) + if err != nil { + return err + } + writer := io.Writer(out) + + if s.Out == nil { + s.Out = writer + } + if s.Err == nil { + s.Err = writer + } + } + + if err := s.setProcessState(); err != nil { + return err + } + return nil +} + +func (s *KCPServer) setProcessState() error { + var err error + + healthUrl, err := url.Parse("https://localhost:6443/clusters/root/apis/tenancy.kcp.io/v1alpha1/workspaces") + if err != nil { + return err + } + s.processState = &process.State{ + Dir: s.Dir, + Path: s.Binary, + StartTimeout: s.StartTimeout, + StopTimeout: s.StopTimeout, + HealthCheck: process.HealthCheck{ + URL: *healthUrl, + PollInterval: 2 * time.Second, + KcpAssetPath: filepath.Join(s.PathToRoot, ".kcp"), + }, + } + if err := s.processState.Init("kcp"); err != nil { + return err + } + + s.Binary = s.processState.Path + s.Dir = s.processState.Dir + s.StartTimeout = s.processState.StartTimeout + s.StopTimeout = s.processState.StopTimeout + + s.processState.Args, s.Args, err = process.TemplateAndArguments(s.Args, s.Configure(), process.TemplateDefaults{ //nolint:staticcheck + Data: s, + Defaults: s.defaultArgs(), + MinimalDefaults: map[string][]string{}, + }) + if err != nil { + return err + } + + return nil +} + +func (s *KCPServer) defaultArgs() map[string][]string { + args := map[string][]string{} + return args +} + +func (s *KCPServer) Configure() *process.Arguments { + if s.args == nil { + s.args = process.EmptyArguments() + } + return s.args +} + +func (s *KCPServer) Stop() error { + if s.processState != nil { + if err := s.processState.Stop(); err != nil { + return err + } + } + return nil +} diff --git a/test/kcp/process/arguments.go b/test/kcp/process/arguments.go new file mode 100644 index 0000000..391eec1 --- /dev/null +++ b/test/kcp/process/arguments.go @@ -0,0 +1,340 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package process + +import ( + "bytes" + "html/template" + "sort" + "strings" +) + +// RenderTemplates returns an []string to render the templates +// +// Deprecated: will be removed in favor of Arguments. +func RenderTemplates(argTemplates []string, data interface{}) (args []string, err error) { + var t *template.Template + + for _, arg := range argTemplates { + t, err = template.New(arg).Parse(arg) + if err != nil { + args = nil + return + } + + buf := &bytes.Buffer{} + err = t.Execute(buf, data) + if err != nil { + args = nil + return + } + args = append(args, buf.String()) + } + + return +} + +// SliceToArguments converts a slice of arguments to structured arguments, +// appending each argument that starts with `--` and contains an `=` to the +// argument set (ignoring defaults), returning the rest. +// +// Deprecated: will be removed when RenderTemplates is removed. +func SliceToArguments(sliceArgs []string, args *Arguments) []string { + var rest []string + for i, arg := range sliceArgs { + if arg == "--" { + rest = append(rest, sliceArgs[i:]...) + return rest + } + // skip non-flag arguments, skip arguments w/o equals because we + // can't tell if the next argument should take a value + if !strings.HasPrefix(arg, "--") || !strings.Contains(arg, "=") { + rest = append(rest, arg) + continue + } + + parts := strings.SplitN(arg[2:], "=", 2) + name := parts[0] + val := parts[1] + + args.AppendNoDefaults(name, val) + } + + return rest +} + +// TemplateDefaults specifies defaults to be used for joining structured arguments with templates. +// +// Deprecated: will be removed when RenderTemplates is removed. +type TemplateDefaults struct { + // Data will be used to render the template. + Data interface{} + // Defaults will be used to default structured arguments if no template is passed. + Defaults map[string][]string + // MinimalDefaults will be used to default structured arguments if a template is passed. + // Use this for flags which *must* be present. + MinimalDefaults map[string][]string // for api server service-cluster-ip-range +} + +// TemplateAndArguments joins structured arguments and non-structured arguments, preserving existing +// behavior. Namely: +// +// 1. if templ has len > 0, it will be rendered against data +// 2. the rendered template values that look like `--foo=bar` will be split +// and appended to args, the rest will be kept around +// 3. the given args will be rendered as string form. If a template is given, +// no defaults will be used, otherwise defaults will be used +// 4. a result of [args..., rest...] will be returned +// +// It returns the resulting rendered arguments, plus the arguments that were +// not transferred to `args` during rendering. +// +// Deprecated: will be removed when RenderTemplates is removed. +func TemplateAndArguments(templ []string, args *Arguments, data TemplateDefaults) (allArgs []string, nonFlagishArgs []string, err error) { + if len(templ) == 0 { // 3 & 4 (no template case) + return args.AsStrings(data.Defaults), nil, nil + } + + // 1: render the template + rendered, err := RenderTemplates(templ, data.Data) + if err != nil { + return nil, nil, err + } + + // 2: filter out structured args and add them to args + rest := SliceToArguments(rendered, args) + + // 3 (template case): render structured args, no defaults (matching the + // legacy case where if Args was specified, no defaults were used) + res := args.AsStrings(data.MinimalDefaults) + + // 4: return the rendered structured args + all non-structured args + return append(res, rest...), rest, nil +} + +// EmptyArguments constructs an empty set of flags with no defaults. +func EmptyArguments() *Arguments { + return &Arguments{ + values: make(map[string]Arg), + } +} + +// Arguments are structured, overridable arguments. +// Each Arguments object contains some set of default arguments, which may +// be appended to, or overridden. +// +// When ready, you can serialize them to pass to exec.Command and friends using +// AsStrings. +// +// All flag-setting methods return the *same* instance of Arguments so that you +// can chain calls. +type Arguments struct { + // values contains the user-set values for the arguments. + // `values[key] = dontPass` means "don't pass this flag" + // `values[key] = passAsName` means "pass this flag without args like --key` + // `values[key] = []string{a, b, c}` means "--key=a --key=b --key=c` + // any values not explicitly set here will be copied from defaults on final rendering. + values map[string]Arg +} + +// Arg is an argument that has one or more values, +// and optionally falls back to default values. +type Arg interface { + // Append adds new values to this argument, returning + // a new instance contain the new value. The intermediate + // argument should generally be assumed to be consumed. + Append(vals ...string) Arg + // Get returns the full set of values, optionally including + // the passed in defaults. If it returns nil, this will be + // skipped. If it returns a non-nil empty slice, it'll be + // assumed that the argument should be passed as name-only. + Get(defaults []string) []string +} + +type userArg []string + +func (a userArg) Append(vals ...string) Arg { + return userArg(append(a, vals...)) //nolint:unconvert +} +func (a userArg) Get(_ []string) []string { + return []string(a) +} + +type defaultedArg []string + +func (a defaultedArg) Append(vals ...string) Arg { + return defaultedArg(append(a, vals...)) //nolint:unconvert +} +func (a defaultedArg) Get(defaults []string) []string { + res := append([]string(nil), defaults...) + return append(res, a...) +} + +type dontPassArg struct{} + +func (a dontPassArg) Append(vals ...string) Arg { + return userArg(vals) +} +func (dontPassArg) Get(_ []string) []string { + return nil +} + +type passAsNameArg struct{} + +func (a passAsNameArg) Append(_ ...string) Arg { + return passAsNameArg{} +} +func (passAsNameArg) Get(_ []string) []string { + return []string{} +} + +var ( + // DontPass indicates that the given argument will not actually be + // rendered. + DontPass Arg = dontPassArg{} + // PassAsName indicates that the given flag will be passed as `--key` + // without any value. + PassAsName Arg = passAsNameArg{} +) + +// AsStrings serializes this set of arguments to a slice of strings appropriate +// for passing to exec.Command and friends, making use of the given defaults +// as indicated for each particular argument. +// +// - Any flag in defaults that's not in Arguments will be present in the output +// - Any flag that's present in Arguments will be passed the corresponding +// defaults to do with as it will (ignore, append-to, suppress, etc). +func (a *Arguments) AsStrings(defaults map[string][]string) []string { + // sort for deterministic ordering + keysInOrder := make([]string, 0, len(defaults)+len(a.values)) + for key := range defaults { + if _, userSet := a.values[key]; userSet { + continue + } + keysInOrder = append(keysInOrder, key) + } + for key := range a.values { + keysInOrder = append(keysInOrder, key) + } + sort.Strings(keysInOrder) + + var res []string + for _, key := range keysInOrder { + vals := a.Get(key).Get(defaults[key]) + switch { + case vals == nil: // don't pass + continue + case len(vals) == 0: // pass as name + res = append(res, "--"+key) + default: + for _, val := range vals { + res = append(res, "--"+key+"="+val) + } + } + } + + return res +} + +// Get returns the value of the given flag. If nil, +// it will not be passed in AsString, otherwise: +// +// len == 0 --> `--key`, len > 0 --> `--key=val1 --key=val2 ...`. +func (a *Arguments) Get(key string) Arg { + if vals, ok := a.values[key]; ok { + return vals + } + return defaultedArg(nil) +} + +// Enable configures the given key to be passed as a "name-only" flag, +// like, `--key`. +func (a *Arguments) Enable(key string) *Arguments { + a.values[key] = PassAsName + return a +} + +// Disable prevents this flag from be passed. +func (a *Arguments) Disable(key string) *Arguments { + a.values[key] = DontPass + return a +} + +// Append adds additional values to this flag. If this flag has +// yet to be set, initial values will include defaults. If you want +// to intentionally ignore defaults/start from scratch, call AppendNoDefaults. +// +// Multiple values will look like `--key=value1 --key=value2 ...`. +func (a *Arguments) Append(key string, values ...string) *Arguments { + vals, present := a.values[key] + if !present { + vals = defaultedArg{} + } + a.values[key] = vals.Append(values...) + return a +} + +// AppendNoDefaults adds additional values to this flag. However, +// unlike Append, it will *not* copy values from defaults. +func (a *Arguments) AppendNoDefaults(key string, values ...string) *Arguments { + vals, present := a.values[key] + if !present { + vals = userArg{} + } + a.values[key] = vals.Append(values...) + return a +} + +// Set resets the given flag to the specified values, ignoring any existing +// values or defaults. +func (a *Arguments) Set(key string, values ...string) *Arguments { + a.values[key] = userArg(values) + return a +} + +// SetRaw sets the given flag to the given Arg value directly. Use this if +// you need to do some complicated deferred logic or something. +// +// Otherwise behaves like Set. +func (a *Arguments) SetRaw(key string, val Arg) *Arguments { + a.values[key] = val + return a +} + +// FuncArg is a basic implementation of Arg that can be used for custom argument logic, +// like pulling values out of APIServer, or dynamically calculating values just before +// launch. +// +// The given function will be mapped directly to Arg#Get, and will generally be +// used in conjunction with SetRaw. For example, to set `--some-flag` to the +// API server's CertDir, you could do: +// +// server.Configure().SetRaw("--some-flag", FuncArg(func(defaults []string) []string { +// return []string{server.CertDir} +// })) +// +// FuncArg ignores Appends; if you need to support appending values too, consider implementing +// Arg directly. +type FuncArg func([]string) []string + +// Append is a no-op for FuncArg, and just returns itself. +func (a FuncArg) Append(vals ...string) Arg { return a } + +// Get delegates functionality to the FuncArg function itself. +func (a FuncArg) Get(defaults []string) []string { + return a(defaults) +} diff --git a/test/kcp/process/bin_path_finder.go b/test/kcp/process/bin_path_finder.go new file mode 100644 index 0000000..e1428aa --- /dev/null +++ b/test/kcp/process/bin_path_finder.go @@ -0,0 +1,70 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package process + +import ( + "os" + "path/filepath" + "regexp" + "strings" +) + +const ( + // EnvAssetsPath is the environment variable that stores the global test + // binary location override. + EnvAssetsPath = "KUBEBUILDER_ASSETS" + // EnvAssetOverridePrefix is the environment variable prefix for per-binary + // location overrides. + EnvAssetOverridePrefix = "TEST_ASSET_" + // AssetsDefaultPath is the default location to look for test binaries in, + // if no override was provided. + AssetsDefaultPath = "/usr/local/kubebuilder/bin" +) + +// BinPathFinder finds the path to the given named binary, using the following locations +// in order of precedence (highest first). Notice that the various env vars only need +// to be set -- the asset is not checked for existence on the filesystem. +// +// 1. TEST_ASSET_{tr/a-z-/A-Z_/} (if set; asset overrides -- EnvAssetOverridePrefix) +// 1. KUBEBUILDER_ASSETS (if set; global asset path -- EnvAssetsPath) +// 3. assetDirectory (if set; per-config asset directory) +// 4. /usr/local/kubebuilder/bin (AssetsDefaultPath). +func BinPathFinder(symbolicName, assetDirectory string) (binPath string) { + punctuationPattern := regexp.MustCompile("[^A-Z0-9]+") + sanitizedName := punctuationPattern.ReplaceAllString(strings.ToUpper(symbolicName), "_") + leadingNumberPattern := regexp.MustCompile("^[0-9]+") + sanitizedName = leadingNumberPattern.ReplaceAllString(sanitizedName, "") + envVar := EnvAssetOverridePrefix + sanitizedName + + // TEST_ASSET_XYZ + if val, ok := os.LookupEnv(envVar); ok { + return val + } + + // KUBEBUILDER_ASSETS + if val, ok := os.LookupEnv(EnvAssetsPath); ok { + return filepath.Join(val, symbolicName) + } + + // assetDirectory + if assetDirectory != "" { + return filepath.Join(assetDirectory, symbolicName) + } + + // default path + return filepath.Join(AssetsDefaultPath, symbolicName) +} diff --git a/test/kcp/process/procattr_other.go b/test/kcp/process/procattr_other.go new file mode 100644 index 0000000..df13b34 --- /dev/null +++ b/test/kcp/process/procattr_other.go @@ -0,0 +1,28 @@ +//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !zos +// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!zos + +/* +Copyright 2016 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package process + +import "syscall" + +// GetSysProcAttr returns the SysProcAttr to use for the process, +// for non-unix systems this returns nil. +func GetSysProcAttr() *syscall.SysProcAttr { + return nil +} diff --git a/test/kcp/process/procattr_unix.go b/test/kcp/process/procattr_unix.go new file mode 100644 index 0000000..83ad509 --- /dev/null +++ b/test/kcp/process/procattr_unix.go @@ -0,0 +1,33 @@ +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos + +/* +Copyright 2023 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package process + +import ( + "golang.org/x/sys/unix" +) + +// GetSysProcAttr returns the SysProcAttr to use for the process, +// for unix systems this returns a SysProcAttr with Setpgid set to true, +// which inherits the parent's process group id. +func GetSysProcAttr() *unix.SysProcAttr { + return &unix.SysProcAttr{ + Setpgid: true, + } +} diff --git a/test/kcp/process/process.go b/test/kcp/process/process.go new file mode 100644 index 0000000..2849b71 --- /dev/null +++ b/test/kcp/process/process.go @@ -0,0 +1,365 @@ +/* +Copyright 2021 The Kubernetes Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package process + +import ( + "crypto/tls" + "crypto/x509" + "fmt" + "io" + "net" + "net/http" + "net/url" + "os" + "os/exec" + "path" + "path/filepath" + "regexp" + "sync" + "syscall" + "time" + + "github.com/platform-mesh/golang-commons/logger" + "gopkg.in/yaml.v3" +) + +// ListenAddr represents some listening address and port. +type ListenAddr struct { + Address string + Port string +} + +// URL returns a URL for this address with the given scheme and subpath. +func (l *ListenAddr) URL(scheme string, path string) *url.URL { + return &url.URL{ + Scheme: scheme, + Host: l.HostPort(), + Path: path, + } +} + +// HostPort returns the joined host-port pair for this address. +func (l *ListenAddr) HostPort() string { + return net.JoinHostPort(l.Address, l.Port) +} + +// HealthCheck describes the information needed to health-check a process via +// some health-check URL. +type HealthCheck struct { + url.URL + KcpAssetPath string + + // HealthCheckPollInterval is the interval which will be used for polling the + // endpoint described by Host, Port, and Path. + // + // If left empty it will default to 100 Milliseconds. + PollInterval time.Duration +} + +// State define the state of the process. +type State struct { + Cmd *exec.Cmd + + // HealthCheck describes how to check if this process is up. If we get an http.StatusOK, + // we assume the process is ready to operate. + // + // For example, the /healthz endpoint of the k8s API server, or the /health endpoint of etcd. + HealthCheck HealthCheck + + Args []string + + StopTimeout time.Duration + StartTimeout time.Duration + + Dir string + DirNeedsCleaning bool + Path string + + // ready holds whether the process is currently in ready state (hit the ready condition) or not. + // It will be set to true on a successful `Start()` and set to false on a successful `Stop()` + ready bool + + // waitDone is closed when our call to wait finishes up, and indicates that + // our process has terminated. + waitDone chan struct{} + errMu sync.Mutex + exitErr error + exited bool +} + +// Init sets up this process, configuring binary paths if missing, initializing +// temporary directories, etc. +// +// This defaults all defaultable fields. +func (ps *State) Init(name string) error { + if ps.Path == "" { + if name == "" { + return fmt.Errorf("must have at least one of name or path") + } + ps.Path = BinPathFinder(name, "") + } + + if ps.Dir == "" { + newDir, err := os.MkdirTemp("", "k8s_test_framework_") + if err != nil { + return err + } + ps.Dir = newDir + ps.DirNeedsCleaning = true + } + + if ps.StartTimeout == 0 { + ps.StartTimeout = 20 * time.Second + } + + if ps.StopTimeout == 0 { + ps.StopTimeout = 20 * time.Second + } + return nil +} + +type stopChannel chan struct{} + +// CheckFlag checks the help output of this command for the presence of the given flag, specified +// without the leading `--` (e.g. `CheckFlag("insecure-port")` checks for `--insecure-port`), +// returning true if the flag is present. +func (ps *State) CheckFlag(flag string) (bool, error) { + cmd := exec.Command(ps.Path, "--help") + outContents, err := cmd.CombinedOutput() + if err != nil { + return false, fmt.Errorf("unable to run command %q to check for flag %q: %w", ps.Path, flag, err) + } + pat := `(?m)^\s*--` + flag + `\b` // (m --> multi-line --> ^ matches start of line) + matched, err := regexp.Match(pat, outContents) + if err != nil { + return false, fmt.Errorf("unable to check command %q for flag %q in help output: %w", ps.Path, flag, err) + } + return matched, nil +} + +// Start starts the apiserver, waits for it to come up, and returns an error, +// if occurred. +func (ps *State) Start(stdout, stderr io.Writer, log *logger.Logger) (err error) { + if ps.ready { + return nil + } + + ps.Cmd = exec.Command(ps.Path, ps.Args...) + ps.Cmd.Dir = ps.Dir + ps.Cmd.Stdout = stdout + ps.Cmd.Stderr = stderr + ps.Cmd.SysProcAttr = GetSysProcAttr() + + ready := make(chan bool) + timedOut := time.After(ps.StartTimeout) + pollerStopCh := make(stopChannel) + go pollURLUntilOK(ps.HealthCheck.URL, ps.HealthCheck.PollInterval, ps.HealthCheck.KcpAssetPath, ready, pollerStopCh, log) + + ps.waitDone = make(chan struct{}) + + if err := ps.Cmd.Start(); err != nil { + ps.errMu.Lock() + defer ps.errMu.Unlock() + ps.exited = true + return err + } + go func() { + defer close(ps.waitDone) + err := ps.Cmd.Wait() + + ps.errMu.Lock() + defer ps.errMu.Unlock() + ps.exitErr = err + ps.exited = true + }() + + select { + case <-ready: + ps.ready = true + return nil + case <-ps.waitDone: + close(pollerStopCh) + return fmt.Errorf("timeout waiting for process %s to start successfully "+ + "(it may have failed to start, or stopped unexpectedly before becoming ready)", + path.Base(ps.Path)) + case <-timedOut: + close(pollerStopCh) + if ps.Cmd != nil { + // intentionally ignore this -- we might've crashed, failed to start, etc + ps.Cmd.Process.Signal(syscall.SIGTERM) //nolint:errcheck + } + return fmt.Errorf("timeout waiting for process %s to start", path.Base(ps.Path)) + } +} + +// Exited returns true if the process exited, and may also +// return an error (as per Cmd.Wait) if the process did not +// exit with error code 0. +func (ps *State) Exited() (bool, error) { + ps.errMu.Lock() + defer ps.errMu.Unlock() + return ps.exited, ps.exitErr +} + +func pollURLUntilOK(url url.URL, interval time.Duration, kcpAssetPath string, ready chan bool, stopCh stopChannel, log *logger.Logger) { + + if interval <= 0 { + interval = 5000 * time.Millisecond + } + for { + token, ca, err := readTokenAndCA(kcpAssetPath) + if err != nil { + log.Info().Msg("health check failed. Credentials not ready") + time.Sleep(interval) + } + caCertPool := x509.NewCertPool() + caCertPool.AppendCertsFromPEM(ca) + client := &http.Client{ + Transport: &http.Transport{ + TLSClientConfig: &tls.Config{ + RootCAs: caCertPool, + }, + }, + } + req, err := http.NewRequest(http.MethodGet, url.String(), nil) + if err != nil { + log.Fatal().Err(err).Msg("error creating request") + } + if token != "" { + req.Header.Add("Authorization", "Bearer "+token) + } + res, err := client.Do(req) + if err == nil { + if err != nil { + fmt.Println("Error reading response body:", err) + return + } + err := res.Body.Close() + if err != nil { + fmt.Println("Error closing response body:", err) + return + } + if res.StatusCode == http.StatusOK { + log.Info().Int("status", res.StatusCode).Msg("KCP Ready (health check succeeded)") + ready <- true + return + } + log.Info().Int("status", res.StatusCode).Msg("Waiting for KCP to get ready (health check failed)") + } + + select { + case <-stopCh: + return + default: + time.Sleep(interval) + } + } +} + +type kubeconfig struct { + Users []struct { + Name string `yaml:"name"` + User struct { + Token string `yaml:"token"` + } `yaml:"user"` + } +} + +func readTokenAndCA(path string) (string, []byte, error) { + adminKubeconfigPath := filepath.Join(path, "admin.kubeconfig") + // check if file exists + if _, err := os.Stat(adminKubeconfigPath); os.IsNotExist(err) { + return "", nil, fmt.Errorf("file %s does not exist", adminKubeconfigPath) + } + file, err := os.Open(adminKubeconfigPath) + if err != nil { + return "", nil, fmt.Errorf("error opening file %s: %w", path, err) + } + defer file.Close() //nolint:errcheck + + data, err := io.ReadAll(file) + if err != nil { + return "", nil, fmt.Errorf("error reading file %s: %w", path, err) + } + + var config kubeconfig + err = yaml.Unmarshal(data, &config) + if err != nil { + return "", nil, fmt.Errorf("error unmarshalling yaml from file %s: %w", path, err) + } + + var userToken string + for _, user := range config.Users { + if user.Name == "kcp-admin" { + userToken = user.User.Token + } + } + if userToken == "" { + return "", nil, fmt.Errorf("token not found in kubeconfig file %s", path) + } + + certPath := filepath.Join(path, "apiserver.crt") + if _, err := os.Stat(certPath); os.IsNotExist(err) { + return "", nil, fmt.Errorf("file %s does not exist", certPath) + } + file, err = os.Open(certPath) + if err != nil { + return "", nil, fmt.Errorf("error opening file %s: %w", path, err) + } + defer file.Close() //nolint:errcheck + + data, err = io.ReadAll(file) + if err != nil { + return "", nil, fmt.Errorf("error reading file %s: %w", path, err) + } + + return userToken, data, nil +} + +// Stop stops this process gracefully, waits for its termination, and cleans up +// the CertDir if necessary. +func (ps *State) Stop() error { + // Always clear the directory if we need to. + defer func() { + if ps.DirNeedsCleaning { + _ = os.RemoveAll(ps.Dir) + } + }() + if ps.Cmd == nil { + return nil + } + if done, _ := ps.Exited(); done { + return nil + } + if err := ps.Cmd.Process.Signal(syscall.SIGTERM); err != nil { + return fmt.Errorf("unable to signal for process %s to stop: %w", ps.Path, err) + } + + timedOut := time.After(ps.StopTimeout) + + select { + case <-ps.waitDone: + break + case <-timedOut: + if err := ps.Cmd.Process.Signal(syscall.SIGKILL); err != nil { + return fmt.Errorf("unable to kill process %s: %w", ps.Path, err) + } + return fmt.Errorf("timeout waiting for process %s to stop", path.Base(ps.Path)) + } + ps.ready = false + return nil +} diff --git a/test/kcp/server.go b/test/kcp/server.go new file mode 100644 index 0000000..2f953b3 --- /dev/null +++ b/test/kcp/server.go @@ -0,0 +1,419 @@ +package kcp + +import ( + "context" + "encoding/json" + "fmt" + "os" + "os/exec" + "path/filepath" + "regexp" + "strings" + "time" + + "github.com/otiai10/copy" + "github.com/rs/zerolog/log" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + utilruntime "k8s.io/apimachinery/pkg/util/runtime" + "k8s.io/apimachinery/pkg/util/wait" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + "k8s.io/client-go/tools/clientcmd" + "sigs.k8s.io/controller-runtime/pkg/client" + + "github.com/platform-mesh/golang-commons/logger" + + kcpapiv1alpha "github.com/kcp-dev/kcp/sdk/apis/apis/v1alpha1" + kcptenancyv1alpha "github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1" +) + +const ( + kcpEnvStartTimeout = "KCP_SERVER_START_TIMEOUT" + kcpEnvStopTimeout = "KCP_SERVER_STOP_TIMEOUT" + defaultKCPServerTimeout = 1 * time.Minute + kcpAdminKubeconfigPath = ".kcp/admin.kubeconfig" + kcpRootNamespaceServerUrl = "https://localhost:6443/clusters/root" + dirOrderPattern = `^[0-9]*-(.*)$` +) + +type Environment struct { + kcpServer *KCPServer + + Scheme *runtime.Scheme + + ControlPlaneStartTimeout time.Duration + + ControlPlaneStopTimeout time.Duration + + Config *rest.Config + + log *logger.Logger + + RelativeSetupDirectory string + + PathToRoot string + RelativeAssetDirectory string + + ProviderWorkspace string + APIExportEndpointSliceName string + + useExistingCluster bool +} + +func NewEnvironment(apiExportEndpointSliceName string, providerWorkspaceName string, pathToRoot string, relativeAssetDirectory string, relativeSetupDirectory string, useExistingCluster bool, log *logger.Logger) *Environment { + kcpBinary := filepath.Join(relativeAssetDirectory, "kcp") + kcpServ := NewKCPServer(pathToRoot, kcpBinary, pathToRoot, log) + + //kcpServ.Out = os.Stdout + //kcpServ.Err = os.Stderr + return &Environment{ + log: log, + kcpServer: kcpServ, + APIExportEndpointSliceName: apiExportEndpointSliceName, + ProviderWorkspace: providerWorkspaceName, + RelativeSetupDirectory: relativeSetupDirectory, + RelativeAssetDirectory: relativeAssetDirectory, + PathToRoot: pathToRoot, + useExistingCluster: useExistingCluster, + } +} + +func (te *Environment) Start() (*rest.Config, string, error) { + + if !te.useExistingCluster { + // ensure clean .kcp directory + err := te.cleanDir() + if err != nil { + return nil, "", err + } + + if err := te.defaultTimeouts(); err != nil { + return nil, "", fmt.Errorf("failed to default controlplane timeouts: %w", err) + } + te.kcpServer.StartTimeout = te.ControlPlaneStartTimeout + te.kcpServer.StopTimeout = te.ControlPlaneStopTimeout + + te.log.Info().Msg("starting control plane") + if err := te.kcpServer.Start(); err != nil { + return nil, "", fmt.Errorf("unable to start control plane itself: %w", err) + } + } + + if te.Scheme == nil { + te.Scheme = scheme.Scheme + utilruntime.Must(kcpapiv1alpha.AddToScheme(te.Scheme)) + utilruntime.Must(kcptenancyv1alpha.AddToScheme(te.Scheme)) + } + //// wait for default namespace to actually be created and seen as available to the apiserver + if err := te.waitForDefaultNamespace(); err != nil { + return nil, "", fmt.Errorf("default namespace didn't register within deadline: %w", err) + } + + kubectlPath := filepath.Join(te.PathToRoot, ".kcp", "admin.kubeconfig") + var err error + te.Config, err = clientcmd.BuildConfigFromFlags("", kubectlPath) + if err != nil { + return nil, "", err + } + + if te.RelativeSetupDirectory != "" { + // Apply all yaml files in the setup directory + setupDirectory := filepath.Join(te.PathToRoot, te.RelativeSetupDirectory) + kubeconfigPath := filepath.Join(te.PathToRoot, kcpAdminKubeconfigPath) + err := te.ApplySetup(kubeconfigPath, te.Config, setupDirectory, kcpRootNamespaceServerUrl) + if err != nil { + return nil, "", err + } + } + + // Select api export + providerServerUrl := fmt.Sprintf("%s:%s", te.Config.Host, te.ProviderWorkspace) + te.Config.Host = providerServerUrl + cs, err := client.New(te.Config, client.Options{}) + if err != nil { + return nil, "", fmt.Errorf("unable to create client: %w", err) + } + + apiExportEndpointSlice := kcpapiv1alpha.APIExportEndpointSlice{} + err = cs.Get(context.Background(), types.NamespacedName{Name: te.APIExportEndpointSliceName}, &apiExportEndpointSlice) + if err != nil { + return nil, "", err + } + + if len(apiExportEndpointSlice.Status.APIExportEndpoints) == 0 { + return nil, "", fmt.Errorf("no virtual workspaces found") + } + + te.Config.Host = kcpRootNamespaceServerUrl + te.Config.QPS = 1000.0 + te.Config.Burst = 2000.0 + + return te.Config, apiExportEndpointSlice.Status.APIExportEndpoints[0].URL, nil +} + +func (te *Environment) Stop(useExistingCluster bool) error { + if !useExistingCluster { + defer te.cleanDir() //nolint:errcheck + return te.kcpServer.Stop() + } + return nil +} + +func (te *Environment) cleanDir() error { + kcpPath := filepath.Join(te.PathToRoot, ".kcp") + return os.RemoveAll(kcpPath) +} + +func (te *Environment) waitForDefaultNamespace() error { + kubectlPath := filepath.Join(te.PathToRoot, ".kcp", "admin.kubeconfig") + config, err := clientcmd.BuildConfigFromFlags("", kubectlPath) + if err != nil { + return err + } + cs, err := client.New(config, client.Options{}) + if err != nil { + return fmt.Errorf("unable to create client: %w", err) + } + // It shouldn't take longer than 5s for the default namespace to be brought up in etcd + return wait.PollUntilContextTimeout(context.TODO(), time.Millisecond*50, time.Second*10, true, func(ctx context.Context) (bool, error) { + te.log.Info().Msg("waiting for default namespace") + if err = cs.Get(ctx, types.NamespacedName{Name: "default"}, &corev1.Namespace{}); err != nil { + te.log.Info().Msg("namespace not found") + return false, nil //nolint:nilerr + } + return true, nil + }) +} + +func (te *Environment) waitForWorkspace(client client.Client, name string, log *logger.Logger) error { + // It shouldn't take longer than 5s for the default namespace to be brought up in etcd + err := wait.PollUntilContextTimeout(context.TODO(), time.Millisecond*500, time.Second*15, true, func(ctx context.Context) (bool, error) { + ws := &kcptenancyv1alpha.Workspace{} + if err := client.Get(ctx, types.NamespacedName{Name: name}, ws); err != nil { + return false, nil //nolint:nilerr + } + ready := ws.Status.Phase == "Ready" + log.Info().Str("workspace", name).Bool("ready", ready).Msg("waiting for workspace to be ready") + return ready, nil + }) + + if err != nil { + return fmt.Errorf("workspace %s did not become ready: %w", name, err) + } + return err +} + +func (te *Environment) defaultTimeouts() error { + var err error + if te.ControlPlaneStartTimeout == 0 { + if envVal := os.Getenv(kcpEnvStartTimeout); envVal != "" { + te.ControlPlaneStartTimeout, err = time.ParseDuration(envVal) + if err != nil { + return err + } + } else { + te.ControlPlaneStartTimeout = defaultKCPServerTimeout + } + } + + if te.ControlPlaneStopTimeout == 0 { + if envVal := os.Getenv(kcpEnvStopTimeout); envVal != "" { + te.ControlPlaneStopTimeout, err = time.ParseDuration(envVal) + if err != nil { + return err + } + } else { + te.ControlPlaneStopTimeout = defaultKCPServerTimeout + } + } + return nil +} + +type TemplateParameters struct { + ApiExportRootTenancyKcpIoIdentityHash string `json:"apiExportRootTenancyKcpIoIdentityHash"` + ApiExportRootTopologyKcpIoIdentityHash string `json:"apiExportRootTopologyKcpIoIdentityHash"` + ApiExportRootShardsKcpIoIdentityHash string `json:"apiExportRootShardsKcpIoIdentityHash"` +} + +func (te *Environment) ApplySetup(pathToRootConfig string, config *rest.Config, setupDirectoryPath string, serverUrl string) error { + + dataFile := filepath.Join(te.PathToRoot, ".kcp/data.json") + + err := generateTemplateDataFile(config, dataFile) + if err != nil { + return err + } + + // Copy setup dir + tmpSetupDir := filepath.Join(te.PathToRoot, ".kcp/setup") + err = os.Mkdir(tmpSetupDir, 0755) + if err != nil { + return err + } + err = copy.Copy(setupDirectoryPath, tmpSetupDir) + if err != nil { + return err + } + defer os.RemoveAll(tmpSetupDir) //nolint:errcheck + + // Apply Gomplate recursively + err = applyTemplate(te.PathToRoot, tmpSetupDir, dataFile) + if err != nil { + return err + } + + return te.ApplyYAML(pathToRootConfig, config, tmpSetupDir, serverUrl) + +} + +func applyTemplate(pathToRoot string, dir string, dataFile string) error { + gomplateBinary := filepath.Join(pathToRoot, "bin", "gomplate") + files, err := os.ReadDir(dir) + if err != nil { + return err + } + + for _, file := range files { + if file.IsDir() { + err := applyTemplate(pathToRoot, filepath.Join(dir, file.Name()), dataFile) + if err != nil { + return err + } + } else { + if strings.HasSuffix(file.Name(), ".yaml") { + filePath := filepath.Join(dir, file.Name()) + gomplateCmd := exec.Command(gomplateBinary, "-f", filePath, "-c", "data="+dataFile, "-o", filePath) + gomplateCmd.Stdout = os.Stdout + gomplateCmd.Stderr = os.Stderr + if err := gomplateCmd.Run(); err != nil { + return err + } + + } + } + } + return nil + +} + +func generateTemplateDataFile(config *rest.Config, dataFile string) error { + // Collect Variables + cs, err := client.New(config, client.Options{}) + if err != nil { + return fmt.Errorf("unable to create client: %w", err) + } + + parameters := TemplateParameters{} + apiExport := kcpapiv1alpha.APIExport{} + err = cs.Get(context.Background(), types.NamespacedName{Name: "tenancy.kcp.io"}, &apiExport) + if err != nil { + return err + } + parameters.ApiExportRootTenancyKcpIoIdentityHash = apiExport.Status.IdentityHash + + err = cs.Get(context.Background(), types.NamespacedName{Name: "shards.core.kcp.io"}, &apiExport) + if err != nil { + return err + } + parameters.ApiExportRootShardsKcpIoIdentityHash = apiExport.Status.IdentityHash + + err = cs.Get(context.Background(), types.NamespacedName{Name: "topology.kcp.io"}, &apiExport) + if err != nil { + return err + } + parameters.ApiExportRootTopologyKcpIoIdentityHash = apiExport.Status.IdentityHash + + bytes, err := json.Marshal(parameters) + if err != nil { + return err + } + + err = os.WriteFile(dataFile, bytes, 0644) + if err != nil { + return err + } + return nil +} + +func (te *Environment) ApplyYAML(pathToRootConfig string, config *rest.Config, pathToSetupDir string, serverUrl string) error { + cs, err := client.New(config, client.Options{}) + if err != nil { + return fmt.Errorf("unable to create client: %w", err) + } + + // list directory + hasManifestFiles, err := hasManifestFiles(pathToSetupDir) + if err != nil { + return err + } + if hasManifestFiles { + err = te.runTemplatedKubectlCommand(pathToRootConfig, serverUrl, fmt.Sprintf("apply -f %s", pathToSetupDir), true) + if err != nil { + return err + } + } + files, err := os.ReadDir(pathToSetupDir) + if err != nil { + return err + } + + for _, file := range files { + if file.IsDir() { + fileName := file.Name() + // check if pathToSetupDir starts with `[0-9]*-` + re := regexp.MustCompile(dirOrderPattern) + + if re.Match([]byte(fileName)) { + match := re.FindStringSubmatch(fileName) + fileName = match[1] + } + err := te.waitForWorkspace(cs, fileName, te.log) + if err != nil { + return err + } + newServerUrl := fmt.Sprintf("%s:%s", serverUrl, fileName) + wsConfig := rest.CopyConfig(config) + wsConfig.Host = newServerUrl + subDir := filepath.Join(pathToSetupDir, file.Name()) + err = te.ApplyYAML(pathToRootConfig, wsConfig, subDir, newServerUrl) + if err != nil { + return err + } + } + } + log.Info().Msg("finished applying setup") + return nil +} + +func hasManifestFiles(path string) (bool, error) { + files, err := os.ReadDir(path) + if err != nil { + return false, err + } + for _, file := range files { + if strings.HasSuffix(file.Name(), ".yaml") || strings.HasSuffix(file.Name(), ".yml") || strings.HasSuffix(file.Name(), ".json") { + return true, nil + } + } + return false, nil +} + +func (te *Environment) runTemplatedKubectlCommand(kubeconfig string, server string, command string, retry bool) error { + splitCommand := strings.Split(command, " ") + args := []string{fmt.Sprintf("--kubeconfig=%s", kubeconfig), fmt.Sprintf("--server=%s", server)} + args = append(args, splitCommand...) + cmd := exec.Command("kubectl", args...) + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + err := cmd.Run() + if err != nil { + if retry { + time.Sleep(5 * time.Second) + return te.runTemplatedKubectlCommand(kubeconfig, server, command, false) + } + return err + } + return nil +} diff --git a/test/openfga.go b/test/openfga/openfga.go similarity index 99% rename from test/openfga.go rename to test/openfga/openfga.go index 7923148..dd90d12 100644 --- a/test/openfga.go +++ b/test/openfga/openfga.go @@ -1,4 +1,4 @@ -package test +package openfga import ( "context" From a1724c4ec7542f16195c5f8d5e11fab2636c72ba Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20Echterh=C3=B6lter?= Date: Tue, 8 Jul 2025 07:41:47 +0200 Subject: [PATCH 15/16] refactor: update test coverage exclusions for test utilities for kcp and logger --- .testcoverage.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.testcoverage.yml b/.testcoverage.yml index 0d99ccd..9ed1271 100644 --- a/.testcoverage.yml +++ b/.testcoverage.yml @@ -3,6 +3,6 @@ exclude: - ^controller/testSupport # exclude test support files - mocks # exclude generated mock files - ^test/openfga - - logger/testlogger - - testing/kcpenvtest - + - ^test/kcp + - ^logger/testlogger + From 3af903451ac640246fc489fcbdab1b0b9ad7acbd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Bastian=20Echterh=C3=B6lter?= Date: Wed, 9 Jul 2025 10:55:16 +0200 Subject: [PATCH 16/16] refactor: remove kcp testing again as we can use kcp for this --- .testcoverage.yml | 3 +- test/kcp/kcpserver.go | 128 --------- test/kcp/process/arguments.go | 340 ---------------------- test/kcp/process/bin_path_finder.go | 70 ----- test/kcp/process/procattr_other.go | 28 -- test/kcp/process/procattr_unix.go | 33 --- test/kcp/process/process.go | 365 ------------------------ test/kcp/server.go | 419 ---------------------------- test/{openfga => }/openfga.go | 2 +- 9 files changed, 2 insertions(+), 1386 deletions(-) delete mode 100644 test/kcp/kcpserver.go delete mode 100644 test/kcp/process/arguments.go delete mode 100644 test/kcp/process/bin_path_finder.go delete mode 100644 test/kcp/process/procattr_other.go delete mode 100644 test/kcp/process/procattr_unix.go delete mode 100644 test/kcp/process/process.go delete mode 100644 test/kcp/server.go rename test/{openfga => }/openfga.go (99%) diff --git a/.testcoverage.yml b/.testcoverage.yml index 9ed1271..372613f 100644 --- a/.testcoverage.yml +++ b/.testcoverage.yml @@ -2,7 +2,6 @@ exclude: paths: - ^controller/testSupport # exclude test support files - mocks # exclude generated mock files - - ^test/openfga - - ^test/kcp + - ^test/ - ^logger/testlogger diff --git a/test/kcp/kcpserver.go b/test/kcp/kcpserver.go deleted file mode 100644 index 9c0d393..0000000 --- a/test/kcp/kcpserver.go +++ /dev/null @@ -1,128 +0,0 @@ -package kcp - -import ( - "io" - "net/url" - "os" - "path/filepath" - "time" - - "github.com/platform-mesh/golang-commons/logger" - "github.com/platform-mesh/golang-commons/test/kcp/process" -) - -type KCPServer struct { - processState *process.State - Out io.Writer - Err io.Writer - StartTimeout time.Duration - StopTimeout time.Duration - Dir string - Binary string - Args []string - PathToRoot string - - log *logger.Logger - args *process.Arguments -} - -func NewKCPServer(baseDir string, binary string, pathToRoot string, log *logger.Logger) *KCPServer { - return &KCPServer{ - Dir: baseDir, - Binary: binary, - Args: []string{"start", "-v=1"}, - PathToRoot: pathToRoot, - log: log, - } -} - -func (s *KCPServer) Start() error { - if err := s.prepare(); err != nil { - return err - } - return s.processState.Start(s.Out, s.Err, s.log) -} - -func (s *KCPServer) prepare() error { - if s.Out == nil || s.Err == nil { - //create file writer for the logs - fileOut := filepath.Join(s.PathToRoot, "kcp.log") - out, err := os.Create(fileOut) - if err != nil { - return err - } - writer := io.Writer(out) - - if s.Out == nil { - s.Out = writer - } - if s.Err == nil { - s.Err = writer - } - } - - if err := s.setProcessState(); err != nil { - return err - } - return nil -} - -func (s *KCPServer) setProcessState() error { - var err error - - healthUrl, err := url.Parse("https://localhost:6443/clusters/root/apis/tenancy.kcp.io/v1alpha1/workspaces") - if err != nil { - return err - } - s.processState = &process.State{ - Dir: s.Dir, - Path: s.Binary, - StartTimeout: s.StartTimeout, - StopTimeout: s.StopTimeout, - HealthCheck: process.HealthCheck{ - URL: *healthUrl, - PollInterval: 2 * time.Second, - KcpAssetPath: filepath.Join(s.PathToRoot, ".kcp"), - }, - } - if err := s.processState.Init("kcp"); err != nil { - return err - } - - s.Binary = s.processState.Path - s.Dir = s.processState.Dir - s.StartTimeout = s.processState.StartTimeout - s.StopTimeout = s.processState.StopTimeout - - s.processState.Args, s.Args, err = process.TemplateAndArguments(s.Args, s.Configure(), process.TemplateDefaults{ //nolint:staticcheck - Data: s, - Defaults: s.defaultArgs(), - MinimalDefaults: map[string][]string{}, - }) - if err != nil { - return err - } - - return nil -} - -func (s *KCPServer) defaultArgs() map[string][]string { - args := map[string][]string{} - return args -} - -func (s *KCPServer) Configure() *process.Arguments { - if s.args == nil { - s.args = process.EmptyArguments() - } - return s.args -} - -func (s *KCPServer) Stop() error { - if s.processState != nil { - if err := s.processState.Stop(); err != nil { - return err - } - } - return nil -} diff --git a/test/kcp/process/arguments.go b/test/kcp/process/arguments.go deleted file mode 100644 index 391eec1..0000000 --- a/test/kcp/process/arguments.go +++ /dev/null @@ -1,340 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package process - -import ( - "bytes" - "html/template" - "sort" - "strings" -) - -// RenderTemplates returns an []string to render the templates -// -// Deprecated: will be removed in favor of Arguments. -func RenderTemplates(argTemplates []string, data interface{}) (args []string, err error) { - var t *template.Template - - for _, arg := range argTemplates { - t, err = template.New(arg).Parse(arg) - if err != nil { - args = nil - return - } - - buf := &bytes.Buffer{} - err = t.Execute(buf, data) - if err != nil { - args = nil - return - } - args = append(args, buf.String()) - } - - return -} - -// SliceToArguments converts a slice of arguments to structured arguments, -// appending each argument that starts with `--` and contains an `=` to the -// argument set (ignoring defaults), returning the rest. -// -// Deprecated: will be removed when RenderTemplates is removed. -func SliceToArguments(sliceArgs []string, args *Arguments) []string { - var rest []string - for i, arg := range sliceArgs { - if arg == "--" { - rest = append(rest, sliceArgs[i:]...) - return rest - } - // skip non-flag arguments, skip arguments w/o equals because we - // can't tell if the next argument should take a value - if !strings.HasPrefix(arg, "--") || !strings.Contains(arg, "=") { - rest = append(rest, arg) - continue - } - - parts := strings.SplitN(arg[2:], "=", 2) - name := parts[0] - val := parts[1] - - args.AppendNoDefaults(name, val) - } - - return rest -} - -// TemplateDefaults specifies defaults to be used for joining structured arguments with templates. -// -// Deprecated: will be removed when RenderTemplates is removed. -type TemplateDefaults struct { - // Data will be used to render the template. - Data interface{} - // Defaults will be used to default structured arguments if no template is passed. - Defaults map[string][]string - // MinimalDefaults will be used to default structured arguments if a template is passed. - // Use this for flags which *must* be present. - MinimalDefaults map[string][]string // for api server service-cluster-ip-range -} - -// TemplateAndArguments joins structured arguments and non-structured arguments, preserving existing -// behavior. Namely: -// -// 1. if templ has len > 0, it will be rendered against data -// 2. the rendered template values that look like `--foo=bar` will be split -// and appended to args, the rest will be kept around -// 3. the given args will be rendered as string form. If a template is given, -// no defaults will be used, otherwise defaults will be used -// 4. a result of [args..., rest...] will be returned -// -// It returns the resulting rendered arguments, plus the arguments that were -// not transferred to `args` during rendering. -// -// Deprecated: will be removed when RenderTemplates is removed. -func TemplateAndArguments(templ []string, args *Arguments, data TemplateDefaults) (allArgs []string, nonFlagishArgs []string, err error) { - if len(templ) == 0 { // 3 & 4 (no template case) - return args.AsStrings(data.Defaults), nil, nil - } - - // 1: render the template - rendered, err := RenderTemplates(templ, data.Data) - if err != nil { - return nil, nil, err - } - - // 2: filter out structured args and add them to args - rest := SliceToArguments(rendered, args) - - // 3 (template case): render structured args, no defaults (matching the - // legacy case where if Args was specified, no defaults were used) - res := args.AsStrings(data.MinimalDefaults) - - // 4: return the rendered structured args + all non-structured args - return append(res, rest...), rest, nil -} - -// EmptyArguments constructs an empty set of flags with no defaults. -func EmptyArguments() *Arguments { - return &Arguments{ - values: make(map[string]Arg), - } -} - -// Arguments are structured, overridable arguments. -// Each Arguments object contains some set of default arguments, which may -// be appended to, or overridden. -// -// When ready, you can serialize them to pass to exec.Command and friends using -// AsStrings. -// -// All flag-setting methods return the *same* instance of Arguments so that you -// can chain calls. -type Arguments struct { - // values contains the user-set values for the arguments. - // `values[key] = dontPass` means "don't pass this flag" - // `values[key] = passAsName` means "pass this flag without args like --key` - // `values[key] = []string{a, b, c}` means "--key=a --key=b --key=c` - // any values not explicitly set here will be copied from defaults on final rendering. - values map[string]Arg -} - -// Arg is an argument that has one or more values, -// and optionally falls back to default values. -type Arg interface { - // Append adds new values to this argument, returning - // a new instance contain the new value. The intermediate - // argument should generally be assumed to be consumed. - Append(vals ...string) Arg - // Get returns the full set of values, optionally including - // the passed in defaults. If it returns nil, this will be - // skipped. If it returns a non-nil empty slice, it'll be - // assumed that the argument should be passed as name-only. - Get(defaults []string) []string -} - -type userArg []string - -func (a userArg) Append(vals ...string) Arg { - return userArg(append(a, vals...)) //nolint:unconvert -} -func (a userArg) Get(_ []string) []string { - return []string(a) -} - -type defaultedArg []string - -func (a defaultedArg) Append(vals ...string) Arg { - return defaultedArg(append(a, vals...)) //nolint:unconvert -} -func (a defaultedArg) Get(defaults []string) []string { - res := append([]string(nil), defaults...) - return append(res, a...) -} - -type dontPassArg struct{} - -func (a dontPassArg) Append(vals ...string) Arg { - return userArg(vals) -} -func (dontPassArg) Get(_ []string) []string { - return nil -} - -type passAsNameArg struct{} - -func (a passAsNameArg) Append(_ ...string) Arg { - return passAsNameArg{} -} -func (passAsNameArg) Get(_ []string) []string { - return []string{} -} - -var ( - // DontPass indicates that the given argument will not actually be - // rendered. - DontPass Arg = dontPassArg{} - // PassAsName indicates that the given flag will be passed as `--key` - // without any value. - PassAsName Arg = passAsNameArg{} -) - -// AsStrings serializes this set of arguments to a slice of strings appropriate -// for passing to exec.Command and friends, making use of the given defaults -// as indicated for each particular argument. -// -// - Any flag in defaults that's not in Arguments will be present in the output -// - Any flag that's present in Arguments will be passed the corresponding -// defaults to do with as it will (ignore, append-to, suppress, etc). -func (a *Arguments) AsStrings(defaults map[string][]string) []string { - // sort for deterministic ordering - keysInOrder := make([]string, 0, len(defaults)+len(a.values)) - for key := range defaults { - if _, userSet := a.values[key]; userSet { - continue - } - keysInOrder = append(keysInOrder, key) - } - for key := range a.values { - keysInOrder = append(keysInOrder, key) - } - sort.Strings(keysInOrder) - - var res []string - for _, key := range keysInOrder { - vals := a.Get(key).Get(defaults[key]) - switch { - case vals == nil: // don't pass - continue - case len(vals) == 0: // pass as name - res = append(res, "--"+key) - default: - for _, val := range vals { - res = append(res, "--"+key+"="+val) - } - } - } - - return res -} - -// Get returns the value of the given flag. If nil, -// it will not be passed in AsString, otherwise: -// -// len == 0 --> `--key`, len > 0 --> `--key=val1 --key=val2 ...`. -func (a *Arguments) Get(key string) Arg { - if vals, ok := a.values[key]; ok { - return vals - } - return defaultedArg(nil) -} - -// Enable configures the given key to be passed as a "name-only" flag, -// like, `--key`. -func (a *Arguments) Enable(key string) *Arguments { - a.values[key] = PassAsName - return a -} - -// Disable prevents this flag from be passed. -func (a *Arguments) Disable(key string) *Arguments { - a.values[key] = DontPass - return a -} - -// Append adds additional values to this flag. If this flag has -// yet to be set, initial values will include defaults. If you want -// to intentionally ignore defaults/start from scratch, call AppendNoDefaults. -// -// Multiple values will look like `--key=value1 --key=value2 ...`. -func (a *Arguments) Append(key string, values ...string) *Arguments { - vals, present := a.values[key] - if !present { - vals = defaultedArg{} - } - a.values[key] = vals.Append(values...) - return a -} - -// AppendNoDefaults adds additional values to this flag. However, -// unlike Append, it will *not* copy values from defaults. -func (a *Arguments) AppendNoDefaults(key string, values ...string) *Arguments { - vals, present := a.values[key] - if !present { - vals = userArg{} - } - a.values[key] = vals.Append(values...) - return a -} - -// Set resets the given flag to the specified values, ignoring any existing -// values or defaults. -func (a *Arguments) Set(key string, values ...string) *Arguments { - a.values[key] = userArg(values) - return a -} - -// SetRaw sets the given flag to the given Arg value directly. Use this if -// you need to do some complicated deferred logic or something. -// -// Otherwise behaves like Set. -func (a *Arguments) SetRaw(key string, val Arg) *Arguments { - a.values[key] = val - return a -} - -// FuncArg is a basic implementation of Arg that can be used for custom argument logic, -// like pulling values out of APIServer, or dynamically calculating values just before -// launch. -// -// The given function will be mapped directly to Arg#Get, and will generally be -// used in conjunction with SetRaw. For example, to set `--some-flag` to the -// API server's CertDir, you could do: -// -// server.Configure().SetRaw("--some-flag", FuncArg(func(defaults []string) []string { -// return []string{server.CertDir} -// })) -// -// FuncArg ignores Appends; if you need to support appending values too, consider implementing -// Arg directly. -type FuncArg func([]string) []string - -// Append is a no-op for FuncArg, and just returns itself. -func (a FuncArg) Append(vals ...string) Arg { return a } - -// Get delegates functionality to the FuncArg function itself. -func (a FuncArg) Get(defaults []string) []string { - return a(defaults) -} diff --git a/test/kcp/process/bin_path_finder.go b/test/kcp/process/bin_path_finder.go deleted file mode 100644 index e1428aa..0000000 --- a/test/kcp/process/bin_path_finder.go +++ /dev/null @@ -1,70 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package process - -import ( - "os" - "path/filepath" - "regexp" - "strings" -) - -const ( - // EnvAssetsPath is the environment variable that stores the global test - // binary location override. - EnvAssetsPath = "KUBEBUILDER_ASSETS" - // EnvAssetOverridePrefix is the environment variable prefix for per-binary - // location overrides. - EnvAssetOverridePrefix = "TEST_ASSET_" - // AssetsDefaultPath is the default location to look for test binaries in, - // if no override was provided. - AssetsDefaultPath = "/usr/local/kubebuilder/bin" -) - -// BinPathFinder finds the path to the given named binary, using the following locations -// in order of precedence (highest first). Notice that the various env vars only need -// to be set -- the asset is not checked for existence on the filesystem. -// -// 1. TEST_ASSET_{tr/a-z-/A-Z_/} (if set; asset overrides -- EnvAssetOverridePrefix) -// 1. KUBEBUILDER_ASSETS (if set; global asset path -- EnvAssetsPath) -// 3. assetDirectory (if set; per-config asset directory) -// 4. /usr/local/kubebuilder/bin (AssetsDefaultPath). -func BinPathFinder(symbolicName, assetDirectory string) (binPath string) { - punctuationPattern := regexp.MustCompile("[^A-Z0-9]+") - sanitizedName := punctuationPattern.ReplaceAllString(strings.ToUpper(symbolicName), "_") - leadingNumberPattern := regexp.MustCompile("^[0-9]+") - sanitizedName = leadingNumberPattern.ReplaceAllString(sanitizedName, "") - envVar := EnvAssetOverridePrefix + sanitizedName - - // TEST_ASSET_XYZ - if val, ok := os.LookupEnv(envVar); ok { - return val - } - - // KUBEBUILDER_ASSETS - if val, ok := os.LookupEnv(EnvAssetsPath); ok { - return filepath.Join(val, symbolicName) - } - - // assetDirectory - if assetDirectory != "" { - return filepath.Join(assetDirectory, symbolicName) - } - - // default path - return filepath.Join(AssetsDefaultPath, symbolicName) -} diff --git a/test/kcp/process/procattr_other.go b/test/kcp/process/procattr_other.go deleted file mode 100644 index df13b34..0000000 --- a/test/kcp/process/procattr_other.go +++ /dev/null @@ -1,28 +0,0 @@ -//go:build !aix && !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd && !solaris && !zos -// +build !aix,!darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd,!solaris,!zos - -/* -Copyright 2016 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package process - -import "syscall" - -// GetSysProcAttr returns the SysProcAttr to use for the process, -// for non-unix systems this returns nil. -func GetSysProcAttr() *syscall.SysProcAttr { - return nil -} diff --git a/test/kcp/process/procattr_unix.go b/test/kcp/process/procattr_unix.go deleted file mode 100644 index 83ad509..0000000 --- a/test/kcp/process/procattr_unix.go +++ /dev/null @@ -1,33 +0,0 @@ -//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos -// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos - -/* -Copyright 2023 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package process - -import ( - "golang.org/x/sys/unix" -) - -// GetSysProcAttr returns the SysProcAttr to use for the process, -// for unix systems this returns a SysProcAttr with Setpgid set to true, -// which inherits the parent's process group id. -func GetSysProcAttr() *unix.SysProcAttr { - return &unix.SysProcAttr{ - Setpgid: true, - } -} diff --git a/test/kcp/process/process.go b/test/kcp/process/process.go deleted file mode 100644 index 2849b71..0000000 --- a/test/kcp/process/process.go +++ /dev/null @@ -1,365 +0,0 @@ -/* -Copyright 2021 The Kubernetes Authors. - -Licensed under the Apache License, Version 2.0 (the "License"); -you may not use this file except in compliance with the License. -You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software -distributed under the License is distributed on an "AS IS" BASIS, -WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -See the License for the specific language governing permissions and -limitations under the License. -*/ - -package process - -import ( - "crypto/tls" - "crypto/x509" - "fmt" - "io" - "net" - "net/http" - "net/url" - "os" - "os/exec" - "path" - "path/filepath" - "regexp" - "sync" - "syscall" - "time" - - "github.com/platform-mesh/golang-commons/logger" - "gopkg.in/yaml.v3" -) - -// ListenAddr represents some listening address and port. -type ListenAddr struct { - Address string - Port string -} - -// URL returns a URL for this address with the given scheme and subpath. -func (l *ListenAddr) URL(scheme string, path string) *url.URL { - return &url.URL{ - Scheme: scheme, - Host: l.HostPort(), - Path: path, - } -} - -// HostPort returns the joined host-port pair for this address. -func (l *ListenAddr) HostPort() string { - return net.JoinHostPort(l.Address, l.Port) -} - -// HealthCheck describes the information needed to health-check a process via -// some health-check URL. -type HealthCheck struct { - url.URL - KcpAssetPath string - - // HealthCheckPollInterval is the interval which will be used for polling the - // endpoint described by Host, Port, and Path. - // - // If left empty it will default to 100 Milliseconds. - PollInterval time.Duration -} - -// State define the state of the process. -type State struct { - Cmd *exec.Cmd - - // HealthCheck describes how to check if this process is up. If we get an http.StatusOK, - // we assume the process is ready to operate. - // - // For example, the /healthz endpoint of the k8s API server, or the /health endpoint of etcd. - HealthCheck HealthCheck - - Args []string - - StopTimeout time.Duration - StartTimeout time.Duration - - Dir string - DirNeedsCleaning bool - Path string - - // ready holds whether the process is currently in ready state (hit the ready condition) or not. - // It will be set to true on a successful `Start()` and set to false on a successful `Stop()` - ready bool - - // waitDone is closed when our call to wait finishes up, and indicates that - // our process has terminated. - waitDone chan struct{} - errMu sync.Mutex - exitErr error - exited bool -} - -// Init sets up this process, configuring binary paths if missing, initializing -// temporary directories, etc. -// -// This defaults all defaultable fields. -func (ps *State) Init(name string) error { - if ps.Path == "" { - if name == "" { - return fmt.Errorf("must have at least one of name or path") - } - ps.Path = BinPathFinder(name, "") - } - - if ps.Dir == "" { - newDir, err := os.MkdirTemp("", "k8s_test_framework_") - if err != nil { - return err - } - ps.Dir = newDir - ps.DirNeedsCleaning = true - } - - if ps.StartTimeout == 0 { - ps.StartTimeout = 20 * time.Second - } - - if ps.StopTimeout == 0 { - ps.StopTimeout = 20 * time.Second - } - return nil -} - -type stopChannel chan struct{} - -// CheckFlag checks the help output of this command for the presence of the given flag, specified -// without the leading `--` (e.g. `CheckFlag("insecure-port")` checks for `--insecure-port`), -// returning true if the flag is present. -func (ps *State) CheckFlag(flag string) (bool, error) { - cmd := exec.Command(ps.Path, "--help") - outContents, err := cmd.CombinedOutput() - if err != nil { - return false, fmt.Errorf("unable to run command %q to check for flag %q: %w", ps.Path, flag, err) - } - pat := `(?m)^\s*--` + flag + `\b` // (m --> multi-line --> ^ matches start of line) - matched, err := regexp.Match(pat, outContents) - if err != nil { - return false, fmt.Errorf("unable to check command %q for flag %q in help output: %w", ps.Path, flag, err) - } - return matched, nil -} - -// Start starts the apiserver, waits for it to come up, and returns an error, -// if occurred. -func (ps *State) Start(stdout, stderr io.Writer, log *logger.Logger) (err error) { - if ps.ready { - return nil - } - - ps.Cmd = exec.Command(ps.Path, ps.Args...) - ps.Cmd.Dir = ps.Dir - ps.Cmd.Stdout = stdout - ps.Cmd.Stderr = stderr - ps.Cmd.SysProcAttr = GetSysProcAttr() - - ready := make(chan bool) - timedOut := time.After(ps.StartTimeout) - pollerStopCh := make(stopChannel) - go pollURLUntilOK(ps.HealthCheck.URL, ps.HealthCheck.PollInterval, ps.HealthCheck.KcpAssetPath, ready, pollerStopCh, log) - - ps.waitDone = make(chan struct{}) - - if err := ps.Cmd.Start(); err != nil { - ps.errMu.Lock() - defer ps.errMu.Unlock() - ps.exited = true - return err - } - go func() { - defer close(ps.waitDone) - err := ps.Cmd.Wait() - - ps.errMu.Lock() - defer ps.errMu.Unlock() - ps.exitErr = err - ps.exited = true - }() - - select { - case <-ready: - ps.ready = true - return nil - case <-ps.waitDone: - close(pollerStopCh) - return fmt.Errorf("timeout waiting for process %s to start successfully "+ - "(it may have failed to start, or stopped unexpectedly before becoming ready)", - path.Base(ps.Path)) - case <-timedOut: - close(pollerStopCh) - if ps.Cmd != nil { - // intentionally ignore this -- we might've crashed, failed to start, etc - ps.Cmd.Process.Signal(syscall.SIGTERM) //nolint:errcheck - } - return fmt.Errorf("timeout waiting for process %s to start", path.Base(ps.Path)) - } -} - -// Exited returns true if the process exited, and may also -// return an error (as per Cmd.Wait) if the process did not -// exit with error code 0. -func (ps *State) Exited() (bool, error) { - ps.errMu.Lock() - defer ps.errMu.Unlock() - return ps.exited, ps.exitErr -} - -func pollURLUntilOK(url url.URL, interval time.Duration, kcpAssetPath string, ready chan bool, stopCh stopChannel, log *logger.Logger) { - - if interval <= 0 { - interval = 5000 * time.Millisecond - } - for { - token, ca, err := readTokenAndCA(kcpAssetPath) - if err != nil { - log.Info().Msg("health check failed. Credentials not ready") - time.Sleep(interval) - } - caCertPool := x509.NewCertPool() - caCertPool.AppendCertsFromPEM(ca) - client := &http.Client{ - Transport: &http.Transport{ - TLSClientConfig: &tls.Config{ - RootCAs: caCertPool, - }, - }, - } - req, err := http.NewRequest(http.MethodGet, url.String(), nil) - if err != nil { - log.Fatal().Err(err).Msg("error creating request") - } - if token != "" { - req.Header.Add("Authorization", "Bearer "+token) - } - res, err := client.Do(req) - if err == nil { - if err != nil { - fmt.Println("Error reading response body:", err) - return - } - err := res.Body.Close() - if err != nil { - fmt.Println("Error closing response body:", err) - return - } - if res.StatusCode == http.StatusOK { - log.Info().Int("status", res.StatusCode).Msg("KCP Ready (health check succeeded)") - ready <- true - return - } - log.Info().Int("status", res.StatusCode).Msg("Waiting for KCP to get ready (health check failed)") - } - - select { - case <-stopCh: - return - default: - time.Sleep(interval) - } - } -} - -type kubeconfig struct { - Users []struct { - Name string `yaml:"name"` - User struct { - Token string `yaml:"token"` - } `yaml:"user"` - } -} - -func readTokenAndCA(path string) (string, []byte, error) { - adminKubeconfigPath := filepath.Join(path, "admin.kubeconfig") - // check if file exists - if _, err := os.Stat(adminKubeconfigPath); os.IsNotExist(err) { - return "", nil, fmt.Errorf("file %s does not exist", adminKubeconfigPath) - } - file, err := os.Open(adminKubeconfigPath) - if err != nil { - return "", nil, fmt.Errorf("error opening file %s: %w", path, err) - } - defer file.Close() //nolint:errcheck - - data, err := io.ReadAll(file) - if err != nil { - return "", nil, fmt.Errorf("error reading file %s: %w", path, err) - } - - var config kubeconfig - err = yaml.Unmarshal(data, &config) - if err != nil { - return "", nil, fmt.Errorf("error unmarshalling yaml from file %s: %w", path, err) - } - - var userToken string - for _, user := range config.Users { - if user.Name == "kcp-admin" { - userToken = user.User.Token - } - } - if userToken == "" { - return "", nil, fmt.Errorf("token not found in kubeconfig file %s", path) - } - - certPath := filepath.Join(path, "apiserver.crt") - if _, err := os.Stat(certPath); os.IsNotExist(err) { - return "", nil, fmt.Errorf("file %s does not exist", certPath) - } - file, err = os.Open(certPath) - if err != nil { - return "", nil, fmt.Errorf("error opening file %s: %w", path, err) - } - defer file.Close() //nolint:errcheck - - data, err = io.ReadAll(file) - if err != nil { - return "", nil, fmt.Errorf("error reading file %s: %w", path, err) - } - - return userToken, data, nil -} - -// Stop stops this process gracefully, waits for its termination, and cleans up -// the CertDir if necessary. -func (ps *State) Stop() error { - // Always clear the directory if we need to. - defer func() { - if ps.DirNeedsCleaning { - _ = os.RemoveAll(ps.Dir) - } - }() - if ps.Cmd == nil { - return nil - } - if done, _ := ps.Exited(); done { - return nil - } - if err := ps.Cmd.Process.Signal(syscall.SIGTERM); err != nil { - return fmt.Errorf("unable to signal for process %s to stop: %w", ps.Path, err) - } - - timedOut := time.After(ps.StopTimeout) - - select { - case <-ps.waitDone: - break - case <-timedOut: - if err := ps.Cmd.Process.Signal(syscall.SIGKILL); err != nil { - return fmt.Errorf("unable to kill process %s: %w", ps.Path, err) - } - return fmt.Errorf("timeout waiting for process %s to stop", path.Base(ps.Path)) - } - ps.ready = false - return nil -} diff --git a/test/kcp/server.go b/test/kcp/server.go deleted file mode 100644 index 2f953b3..0000000 --- a/test/kcp/server.go +++ /dev/null @@ -1,419 +0,0 @@ -package kcp - -import ( - "context" - "encoding/json" - "fmt" - "os" - "os/exec" - "path/filepath" - "regexp" - "strings" - "time" - - "github.com/otiai10/copy" - "github.com/rs/zerolog/log" - corev1 "k8s.io/api/core/v1" - "k8s.io/apimachinery/pkg/runtime" - "k8s.io/apimachinery/pkg/types" - utilruntime "k8s.io/apimachinery/pkg/util/runtime" - "k8s.io/apimachinery/pkg/util/wait" - "k8s.io/client-go/kubernetes/scheme" - "k8s.io/client-go/rest" - "k8s.io/client-go/tools/clientcmd" - "sigs.k8s.io/controller-runtime/pkg/client" - - "github.com/platform-mesh/golang-commons/logger" - - kcpapiv1alpha "github.com/kcp-dev/kcp/sdk/apis/apis/v1alpha1" - kcptenancyv1alpha "github.com/kcp-dev/kcp/sdk/apis/tenancy/v1alpha1" -) - -const ( - kcpEnvStartTimeout = "KCP_SERVER_START_TIMEOUT" - kcpEnvStopTimeout = "KCP_SERVER_STOP_TIMEOUT" - defaultKCPServerTimeout = 1 * time.Minute - kcpAdminKubeconfigPath = ".kcp/admin.kubeconfig" - kcpRootNamespaceServerUrl = "https://localhost:6443/clusters/root" - dirOrderPattern = `^[0-9]*-(.*)$` -) - -type Environment struct { - kcpServer *KCPServer - - Scheme *runtime.Scheme - - ControlPlaneStartTimeout time.Duration - - ControlPlaneStopTimeout time.Duration - - Config *rest.Config - - log *logger.Logger - - RelativeSetupDirectory string - - PathToRoot string - RelativeAssetDirectory string - - ProviderWorkspace string - APIExportEndpointSliceName string - - useExistingCluster bool -} - -func NewEnvironment(apiExportEndpointSliceName string, providerWorkspaceName string, pathToRoot string, relativeAssetDirectory string, relativeSetupDirectory string, useExistingCluster bool, log *logger.Logger) *Environment { - kcpBinary := filepath.Join(relativeAssetDirectory, "kcp") - kcpServ := NewKCPServer(pathToRoot, kcpBinary, pathToRoot, log) - - //kcpServ.Out = os.Stdout - //kcpServ.Err = os.Stderr - return &Environment{ - log: log, - kcpServer: kcpServ, - APIExportEndpointSliceName: apiExportEndpointSliceName, - ProviderWorkspace: providerWorkspaceName, - RelativeSetupDirectory: relativeSetupDirectory, - RelativeAssetDirectory: relativeAssetDirectory, - PathToRoot: pathToRoot, - useExistingCluster: useExistingCluster, - } -} - -func (te *Environment) Start() (*rest.Config, string, error) { - - if !te.useExistingCluster { - // ensure clean .kcp directory - err := te.cleanDir() - if err != nil { - return nil, "", err - } - - if err := te.defaultTimeouts(); err != nil { - return nil, "", fmt.Errorf("failed to default controlplane timeouts: %w", err) - } - te.kcpServer.StartTimeout = te.ControlPlaneStartTimeout - te.kcpServer.StopTimeout = te.ControlPlaneStopTimeout - - te.log.Info().Msg("starting control plane") - if err := te.kcpServer.Start(); err != nil { - return nil, "", fmt.Errorf("unable to start control plane itself: %w", err) - } - } - - if te.Scheme == nil { - te.Scheme = scheme.Scheme - utilruntime.Must(kcpapiv1alpha.AddToScheme(te.Scheme)) - utilruntime.Must(kcptenancyv1alpha.AddToScheme(te.Scheme)) - } - //// wait for default namespace to actually be created and seen as available to the apiserver - if err := te.waitForDefaultNamespace(); err != nil { - return nil, "", fmt.Errorf("default namespace didn't register within deadline: %w", err) - } - - kubectlPath := filepath.Join(te.PathToRoot, ".kcp", "admin.kubeconfig") - var err error - te.Config, err = clientcmd.BuildConfigFromFlags("", kubectlPath) - if err != nil { - return nil, "", err - } - - if te.RelativeSetupDirectory != "" { - // Apply all yaml files in the setup directory - setupDirectory := filepath.Join(te.PathToRoot, te.RelativeSetupDirectory) - kubeconfigPath := filepath.Join(te.PathToRoot, kcpAdminKubeconfigPath) - err := te.ApplySetup(kubeconfigPath, te.Config, setupDirectory, kcpRootNamespaceServerUrl) - if err != nil { - return nil, "", err - } - } - - // Select api export - providerServerUrl := fmt.Sprintf("%s:%s", te.Config.Host, te.ProviderWorkspace) - te.Config.Host = providerServerUrl - cs, err := client.New(te.Config, client.Options{}) - if err != nil { - return nil, "", fmt.Errorf("unable to create client: %w", err) - } - - apiExportEndpointSlice := kcpapiv1alpha.APIExportEndpointSlice{} - err = cs.Get(context.Background(), types.NamespacedName{Name: te.APIExportEndpointSliceName}, &apiExportEndpointSlice) - if err != nil { - return nil, "", err - } - - if len(apiExportEndpointSlice.Status.APIExportEndpoints) == 0 { - return nil, "", fmt.Errorf("no virtual workspaces found") - } - - te.Config.Host = kcpRootNamespaceServerUrl - te.Config.QPS = 1000.0 - te.Config.Burst = 2000.0 - - return te.Config, apiExportEndpointSlice.Status.APIExportEndpoints[0].URL, nil -} - -func (te *Environment) Stop(useExistingCluster bool) error { - if !useExistingCluster { - defer te.cleanDir() //nolint:errcheck - return te.kcpServer.Stop() - } - return nil -} - -func (te *Environment) cleanDir() error { - kcpPath := filepath.Join(te.PathToRoot, ".kcp") - return os.RemoveAll(kcpPath) -} - -func (te *Environment) waitForDefaultNamespace() error { - kubectlPath := filepath.Join(te.PathToRoot, ".kcp", "admin.kubeconfig") - config, err := clientcmd.BuildConfigFromFlags("", kubectlPath) - if err != nil { - return err - } - cs, err := client.New(config, client.Options{}) - if err != nil { - return fmt.Errorf("unable to create client: %w", err) - } - // It shouldn't take longer than 5s for the default namespace to be brought up in etcd - return wait.PollUntilContextTimeout(context.TODO(), time.Millisecond*50, time.Second*10, true, func(ctx context.Context) (bool, error) { - te.log.Info().Msg("waiting for default namespace") - if err = cs.Get(ctx, types.NamespacedName{Name: "default"}, &corev1.Namespace{}); err != nil { - te.log.Info().Msg("namespace not found") - return false, nil //nolint:nilerr - } - return true, nil - }) -} - -func (te *Environment) waitForWorkspace(client client.Client, name string, log *logger.Logger) error { - // It shouldn't take longer than 5s for the default namespace to be brought up in etcd - err := wait.PollUntilContextTimeout(context.TODO(), time.Millisecond*500, time.Second*15, true, func(ctx context.Context) (bool, error) { - ws := &kcptenancyv1alpha.Workspace{} - if err := client.Get(ctx, types.NamespacedName{Name: name}, ws); err != nil { - return false, nil //nolint:nilerr - } - ready := ws.Status.Phase == "Ready" - log.Info().Str("workspace", name).Bool("ready", ready).Msg("waiting for workspace to be ready") - return ready, nil - }) - - if err != nil { - return fmt.Errorf("workspace %s did not become ready: %w", name, err) - } - return err -} - -func (te *Environment) defaultTimeouts() error { - var err error - if te.ControlPlaneStartTimeout == 0 { - if envVal := os.Getenv(kcpEnvStartTimeout); envVal != "" { - te.ControlPlaneStartTimeout, err = time.ParseDuration(envVal) - if err != nil { - return err - } - } else { - te.ControlPlaneStartTimeout = defaultKCPServerTimeout - } - } - - if te.ControlPlaneStopTimeout == 0 { - if envVal := os.Getenv(kcpEnvStopTimeout); envVal != "" { - te.ControlPlaneStopTimeout, err = time.ParseDuration(envVal) - if err != nil { - return err - } - } else { - te.ControlPlaneStopTimeout = defaultKCPServerTimeout - } - } - return nil -} - -type TemplateParameters struct { - ApiExportRootTenancyKcpIoIdentityHash string `json:"apiExportRootTenancyKcpIoIdentityHash"` - ApiExportRootTopologyKcpIoIdentityHash string `json:"apiExportRootTopologyKcpIoIdentityHash"` - ApiExportRootShardsKcpIoIdentityHash string `json:"apiExportRootShardsKcpIoIdentityHash"` -} - -func (te *Environment) ApplySetup(pathToRootConfig string, config *rest.Config, setupDirectoryPath string, serverUrl string) error { - - dataFile := filepath.Join(te.PathToRoot, ".kcp/data.json") - - err := generateTemplateDataFile(config, dataFile) - if err != nil { - return err - } - - // Copy setup dir - tmpSetupDir := filepath.Join(te.PathToRoot, ".kcp/setup") - err = os.Mkdir(tmpSetupDir, 0755) - if err != nil { - return err - } - err = copy.Copy(setupDirectoryPath, tmpSetupDir) - if err != nil { - return err - } - defer os.RemoveAll(tmpSetupDir) //nolint:errcheck - - // Apply Gomplate recursively - err = applyTemplate(te.PathToRoot, tmpSetupDir, dataFile) - if err != nil { - return err - } - - return te.ApplyYAML(pathToRootConfig, config, tmpSetupDir, serverUrl) - -} - -func applyTemplate(pathToRoot string, dir string, dataFile string) error { - gomplateBinary := filepath.Join(pathToRoot, "bin", "gomplate") - files, err := os.ReadDir(dir) - if err != nil { - return err - } - - for _, file := range files { - if file.IsDir() { - err := applyTemplate(pathToRoot, filepath.Join(dir, file.Name()), dataFile) - if err != nil { - return err - } - } else { - if strings.HasSuffix(file.Name(), ".yaml") { - filePath := filepath.Join(dir, file.Name()) - gomplateCmd := exec.Command(gomplateBinary, "-f", filePath, "-c", "data="+dataFile, "-o", filePath) - gomplateCmd.Stdout = os.Stdout - gomplateCmd.Stderr = os.Stderr - if err := gomplateCmd.Run(); err != nil { - return err - } - - } - } - } - return nil - -} - -func generateTemplateDataFile(config *rest.Config, dataFile string) error { - // Collect Variables - cs, err := client.New(config, client.Options{}) - if err != nil { - return fmt.Errorf("unable to create client: %w", err) - } - - parameters := TemplateParameters{} - apiExport := kcpapiv1alpha.APIExport{} - err = cs.Get(context.Background(), types.NamespacedName{Name: "tenancy.kcp.io"}, &apiExport) - if err != nil { - return err - } - parameters.ApiExportRootTenancyKcpIoIdentityHash = apiExport.Status.IdentityHash - - err = cs.Get(context.Background(), types.NamespacedName{Name: "shards.core.kcp.io"}, &apiExport) - if err != nil { - return err - } - parameters.ApiExportRootShardsKcpIoIdentityHash = apiExport.Status.IdentityHash - - err = cs.Get(context.Background(), types.NamespacedName{Name: "topology.kcp.io"}, &apiExport) - if err != nil { - return err - } - parameters.ApiExportRootTopologyKcpIoIdentityHash = apiExport.Status.IdentityHash - - bytes, err := json.Marshal(parameters) - if err != nil { - return err - } - - err = os.WriteFile(dataFile, bytes, 0644) - if err != nil { - return err - } - return nil -} - -func (te *Environment) ApplyYAML(pathToRootConfig string, config *rest.Config, pathToSetupDir string, serverUrl string) error { - cs, err := client.New(config, client.Options{}) - if err != nil { - return fmt.Errorf("unable to create client: %w", err) - } - - // list directory - hasManifestFiles, err := hasManifestFiles(pathToSetupDir) - if err != nil { - return err - } - if hasManifestFiles { - err = te.runTemplatedKubectlCommand(pathToRootConfig, serverUrl, fmt.Sprintf("apply -f %s", pathToSetupDir), true) - if err != nil { - return err - } - } - files, err := os.ReadDir(pathToSetupDir) - if err != nil { - return err - } - - for _, file := range files { - if file.IsDir() { - fileName := file.Name() - // check if pathToSetupDir starts with `[0-9]*-` - re := regexp.MustCompile(dirOrderPattern) - - if re.Match([]byte(fileName)) { - match := re.FindStringSubmatch(fileName) - fileName = match[1] - } - err := te.waitForWorkspace(cs, fileName, te.log) - if err != nil { - return err - } - newServerUrl := fmt.Sprintf("%s:%s", serverUrl, fileName) - wsConfig := rest.CopyConfig(config) - wsConfig.Host = newServerUrl - subDir := filepath.Join(pathToSetupDir, file.Name()) - err = te.ApplyYAML(pathToRootConfig, wsConfig, subDir, newServerUrl) - if err != nil { - return err - } - } - } - log.Info().Msg("finished applying setup") - return nil -} - -func hasManifestFiles(path string) (bool, error) { - files, err := os.ReadDir(path) - if err != nil { - return false, err - } - for _, file := range files { - if strings.HasSuffix(file.Name(), ".yaml") || strings.HasSuffix(file.Name(), ".yml") || strings.HasSuffix(file.Name(), ".json") { - return true, nil - } - } - return false, nil -} - -func (te *Environment) runTemplatedKubectlCommand(kubeconfig string, server string, command string, retry bool) error { - splitCommand := strings.Split(command, " ") - args := []string{fmt.Sprintf("--kubeconfig=%s", kubeconfig), fmt.Sprintf("--server=%s", server)} - args = append(args, splitCommand...) - cmd := exec.Command("kubectl", args...) - cmd.Stdout = os.Stdout - cmd.Stderr = os.Stderr - err := cmd.Run() - if err != nil { - if retry { - time.Sleep(5 * time.Second) - return te.runTemplatedKubectlCommand(kubeconfig, server, command, false) - } - return err - } - return nil -} diff --git a/test/openfga/openfga.go b/test/openfga.go similarity index 99% rename from test/openfga/openfga.go rename to test/openfga.go index dd90d12..7923148 100644 --- a/test/openfga/openfga.go +++ b/test/openfga.go @@ -1,4 +1,4 @@ -package openfga +package test import ( "context"