diff --git a/docs/libraries/clusteraccess.md b/docs/libraries/clusteraccess.md index 77ec280..3952daf 100644 --- a/docs/libraries/clusteraccess.md +++ b/docs/libraries/clusteraccess.md @@ -88,6 +88,9 @@ func (c *MyController) Reconcile(ctx context.Context, req reconcile.Request) (re The ClusterAccess Reconciler's `SkipWorkloadCluster` method can be used during initialization to disable creation of a `ClusterRequest` for a workload cluster. If for some reason the `AccessRequest` resources are required, they can be retrieved via `MCPAccessRequest` and `WorkloadAccessRequest`. +The ClusterAccess Reconciler remembers requests that are in deletion and won't create new resources for them. This means that `Reconcile` can safely be called at the beginning of a reconciliation that is going to delete resources and `ReconcileDelete` at the end of it, without the former one recreating resources the latter one has already removed. +A request is considered to be 'in deletion' when `ReconcileDelete` is called for it and it stops being 'in deletion' when `ReconcileDelete` returns with a `RequeueAfter` value of zero and no error. + ### ClusterAccess Reconciler - Advanced Instantiate the ClusterAccess Reconciler during controller setup and store the instance in the controller's struct. @@ -184,6 +187,9 @@ There are four getter methods that can be called after the cluster access has be Note that not all of these methods will always return something. For example, a registration created via `ExistingCluster(...)` references a `Cluster` directly and can therefore not return a `ClusterRequest`. `Access` and `AccessRequest` will only work if either token-based access or OIDC-based access has been configured during the registration, otherwise there won't be any `AccessRequest`. Any method which cannot return the expected value due to the resource not being configured will simply return `nil` instead, without an error. The error is only returned if something goes wrong during retrieval of the resource. +The ClusterAccess Reconciler remembers requests that are in deletion and won't create new resources for them. This means that `Reconcile` can safely be called at the beginning of a reconciliation that is going to delete resources and `ReconcileDelete` at the end of it, without the former one recreating resources the latter one has already removed. +A request is considered to be 'in deletion' when `ReconcileDelete` is called for it and it stops being 'in deletion' when `ReconcileDelete` returns with a `RequeueAfter` value of zero and no error. + #### Additional Data While probably not required for most cases, there might be some situations in which the generation of resources requires more information than just the `reconcile.Request`, for example if the controller fetches some kind of configuration that specifies the required access permissions. The ClusterAccess library enables this by allowing arbitrary arguments to be passed into some methods: `Reconcile`, `ReconcileDelete`, as well as the four getter methods `Access`, `AccessRequest`, `ClusterRequest`, and `Cluster` take any amount of optional arguments. Additional arguments that are passed into any of these methods will be passed to the generator functions (which have been passed into `WithTokenAccessGenerator`, `WithOIDCAccessGenerator`, and `WithNamespaceGenerator` during creation of the `ClusterRegistration`), which can use the additional information for generating the namespace or the spec for `AccessRequest` or `ClusterRequest`. diff --git a/lib/clusteraccess/advanced/clusteraccess.go b/lib/clusteraccess/advanced/clusteraccess.go index 256c0be..c329e9e 100644 --- a/lib/clusteraccess/advanced/clusteraccess.go +++ b/lib/clusteraccess/advanced/clusteraccess.go @@ -5,13 +5,15 @@ import ( "context" "fmt" "maps" - "slices" "strings" + "sync" "time" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "k8s.io/apimachinery/pkg/util/sets" "k8s.io/client-go/tools/clientcmd" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" @@ -29,6 +31,7 @@ import ( ) const caControllerName = "ClusterAccess" +const Finalizer = clustersv1alpha1.GroupName + "/clusteraccess" ////////////////// /// INTERFACES /// @@ -71,16 +74,20 @@ type ClusterAccessReconciler interface { // Reconcile creates the ClusterRequests and/or AccessRequests for the registered clusters. // This function should be called during all reconciliations of the reconciled object. // ctx is the context for the reconciliation. - // request is the object that is being reconciled + // request is the object that is being reconciled. // It returns a reconcile.Result and an error if the reconciliation failed. // The reconcile.Result may contain a RequeueAfter value to indicate that the reconciliation should be retried after a certain duration. // The duration is set by the WithRetryInterval method. // Any additional arguments provided are passed into all methods of the ClusterRegistration objects that are called. + // + // Note that Reconcile will not create any new resources if the current request is in deletion. + // A request is considered to be in deletion if ReconcileDelete has been called for it at least once and not successfully finished (= with RequeueAfter == 0 and no error) since then. + // This means that Reconcile can safely be called at the beginning of a deletion reconciliation without having to worry about re-creating already deleted resources. Reconcile(ctx context.Context, request reconcile.Request, additionalData ...any) (reconcile.Result, error) // ReconcileDelete deletes the ClusterRequests and/or AccessRequests for the registered clusters. // This function should be called during the deletion of the reconciled object. // ctx is the context for the reconciliation. - // request is the object that is being reconciled + // request is the object that is being reconciled. // It returns a reconcile.Result and an error if the reconciliation failed. // The reconcile.Result may contain a RequeueAfter value to indicate that the reconciliation should be retried after a certain duration. // The duration is set by the WithRetryInterval method. @@ -440,6 +447,8 @@ func ExistingClusterRequest(id, suffix string, generateClusterRequestRef ObjectR type reconcilerImpl struct { controllerName string platformClusterClient client.Client + requestsInDeletion sets.Set[reconcile.Request] + ridLock *sync.RWMutex interval time.Duration registrations map[string]ClusterRegistration @@ -456,6 +465,8 @@ func NewClusterAccessReconciler(platformClusterClient client.Client, controllerN return &reconcilerImpl{ controllerName: controllerName, platformClusterClient: platformClusterClient, + requestsInDeletion: sets.New[reconcile.Request](), + ridLock: &sync.RWMutex{}, interval: 5 * time.Second, registrations: map[string]ClusterRegistration{}, managedBy: DefaultManagedLabelGenerator, @@ -611,6 +622,10 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, request reconcile.Reques log := logging.FromContextOrDiscard(ctx).WithName(caControllerName) ctx = logging.NewContext(ctx, log) log.Info("Reconciling cluster access") + inDeletion := r.isInDeletion(request) + if inDeletion { + log.Info("Request is in deletion, missing resources will not be created") + } // iterate over registrations and ensure that the corresponding resources are ready for _, rawReg := range r.registrations { @@ -639,6 +654,10 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, request reconcile.Reques if !apierrors.IsNotFound(err) { return reconcile.Result{}, fmt.Errorf("unable to get platform cluster namespace '%s': %w", ns.Name, err) } + if inDeletion { + rlog.Debug("Skipping platform cluster namespace creation because this request is in deletion") + continue + } rlog.Info("Creating platform cluster namespace", "namespaceName", ns.Name) if err := r.platformClusterClient.Create(ctx, ns); err != nil { return reconcile.Result{}, fmt.Errorf("unable to create platform cluster namespace '%s': %w", ns.Name, err) @@ -665,9 +684,14 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, request reconcile.Reques exists = false } if !exists { + if inDeletion { + rlog.Debug("Skipping ClusterRequest creation because this request is in deletion", "crName", cr.Name, "crNamespace", cr.Namespace) + continue + } rlog.Info("Creating ClusterRequest", "crName", cr.Name, "crNamespace", cr.Namespace) cr.Labels = map[string]string{} maps.Copy(cr.Labels, expectedLabels) + controllerutil.AddFinalizer(cr, Finalizer) cr.Spec = *crSpec if err := r.platformClusterClient.Create(ctx, cr); err != nil { return reconcile.Result{}, fmt.Errorf("unable to create ClusterRequest '%s/%s': %w", cr.Namespace, cr.Name, err) @@ -711,6 +735,10 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, request reconcile.Reques exists = false } if !exists { + if inDeletion { + rlog.Debug("Skipping AccessRequest creation because this request is in deletion", "arName", ar.Name, "arNamespace", ar.Namespace) + continue + } rlog.Info("Creating AccessRequest", "arName", ar.Name, "arNamespace", ar.Namespace) // cluster and cluster request references are immutable, so set them only when creating new AccessRequests referenced := false @@ -755,30 +783,35 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, request reconcile.Reques } rlog.Debug("Updating AccessRequest", "arName", ar.Name, "arNamespace", ar.Namespace) } - if _, err := controllerutil.CreateOrUpdate(ctx, r.platformClusterClient, ar, func() error { - ar.Labels = maputils.Merge(ar.Labels, expectedLabels) - var err error - ar.Spec.Token, err = reg.AccessRequestTokenConfig() - if err != nil { - return fmt.Errorf("unable to generate AccessRequest token config from registration: %w", err) - } - ar.Spec.OIDC, err = reg.AccessRequestOIDCConfig() - if err != nil { - return fmt.Errorf("unable to generate AccessRequest OIDC config from registration: %w", err) + if exists && !ar.DeletionTimestamp.IsZero() { + rlog.Info("Not updating AccessRequest because it is being deleted", "arName", ar.Name, "arNamespace", ar.Namespace) + } else { + if _, err := controllerutil.CreateOrUpdate(ctx, r.platformClusterClient, ar, func() error { + ar.Labels = maputils.Merge(ar.Labels, expectedLabels) + controllerutil.AddFinalizer(ar, Finalizer) + var err error + ar.Spec.Token, err = reg.AccessRequestTokenConfig() + if err != nil { + return fmt.Errorf("unable to generate AccessRequest token config from registration: %w", err) + } + ar.Spec.OIDC, err = reg.AccessRequestOIDCConfig() + if err != nil { + return fmt.Errorf("unable to generate AccessRequest OIDC config from registration: %w", err) + } + return nil + }); err != nil { + return reconcile.Result{}, fmt.Errorf("unable to create or update AccessRequest '%s/%s': %w", ar.Namespace, ar.Name, err) } - return nil - }); err != nil { - return reconcile.Result{}, fmt.Errorf("unable to create or update AccessRequest '%s/%s': %w", ar.Namespace, ar.Name, err) - } - if !ar.Status.IsGranted() { - rlog.Info("Waiting for AccessRequest to be granted", "arName", ar.Name, "arNamespace", ar.Namespace) - if fc := r.fakingCallbacks[FakingCallback_WaitingForAccessRequestReadiness]; fc != nil { - rlog.Info("Executing faking callback, this message should only appear in unit tests", "key", FakingCallback_WaitingForAccessRequestReadiness) - if err := fc(ctx, r.platformClusterClient, FakingCallback_WaitingForAccessRequestReadiness, &request, cr, ar, nil, nil); err != nil { - return reconcile.Result{}, fmt.Errorf("faking callback for key '%s' failed: %w", FakingCallback_WaitingForAccessRequestReadiness, err) + if !ar.Status.IsGranted() { + rlog.Info("Waiting for AccessRequest to be granted", "arName", ar.Name, "arNamespace", ar.Namespace) + if fc := r.fakingCallbacks[FakingCallback_WaitingForAccessRequestReadiness]; fc != nil { + rlog.Info("Executing faking callback, this message should only appear in unit tests", "key", FakingCallback_WaitingForAccessRequestReadiness) + if err := fc(ctx, r.platformClusterClient, FakingCallback_WaitingForAccessRequestReadiness, &request, cr, ar, nil, nil); err != nil { + return reconcile.Result{}, fmt.Errorf("faking callback for key '%s' failed: %w", FakingCallback_WaitingForAccessRequestReadiness, err) + } } + return reconcile.Result{RequeueAfter: r.interval}, nil } - return reconcile.Result{RequeueAfter: r.interval}, nil } } } @@ -788,12 +821,16 @@ func (r *reconcilerImpl) Reconcile(ctx context.Context, request reconcile.Reques } // ReconcileDelete implements Reconciler. +// +//nolint:gocyclo func (r *reconcilerImpl) ReconcileDelete(ctx context.Context, request reconcile.Request, additionalData ...any) (reconcile.Result, error) { log := logging.FromContextOrDiscard(ctx).WithName(caControllerName) ctx = logging.NewContext(ctx, log) log.Info("Reconciling cluster access") + r.setInDeletion(request) - // iterate over registrations and ensure that the corresponding resources are ready + // iterate over registrations and ensure that the corresponding resources are being deleted + res := reconcile.Result{} for _, rawReg := range r.registrations { rlog := log.WithValues("id", rawReg.ID()) rlog.Debug("Processing registration") @@ -842,15 +879,24 @@ func (r *reconcilerImpl) ReconcileDelete(ctx context.Context, request reconcile. return reconcile.Result{}, fmt.Errorf("unable to delete AccessRequest '%s/%s': %w", ar.Namespace, ar.Name, err) } } else { - rlog.Info("Waiting for AccessRequest to be deleted", "arName", ar.Name, "arNamespace", ar.Namespace) + if fc := r.fakingCallbacks[FakingCallback_WaitingForAccessRequestDeletion]; fc != nil { + rlog.Info("Executing faking callback, this message should only appear in unit tests", "key", FakingCallback_WaitingForAccessRequestDeletion) + if err := fc(ctx, r.platformClusterClient, FakingCallback_WaitingForAccessRequestDeletion, &request, nil, ar, nil, nil); err != nil { + return reconcile.Result{}, fmt.Errorf("faking callback for key '%s' failed: %w", FakingCallback_WaitingForAccessRequestDeletion, err) + } + } } - if fc := r.fakingCallbacks[FakingCallback_WaitingForAccessRequestDeletion]; fc != nil { - rlog.Info("Executing faking callback, this message should only appear in unit tests", "key", FakingCallback_WaitingForAccessRequestDeletion) - if err := fc(ctx, r.platformClusterClient, FakingCallback_WaitingForAccessRequestDeletion, &request, nil, ar, nil, nil); err != nil { - return reconcile.Result{}, fmt.Errorf("faking callback for key '%s' failed: %w", FakingCallback_WaitingForAccessRequestDeletion, err) + if len(ar.Finalizers) == 1 && ar.Finalizers[0] == Finalizer { + // remove finalizer to allow deletion + rlog.Info("Removing clusteraccess finalizer from AccessRequest to allow deletion", "arName", ar.Name, "arNamespace", ar.Namespace, "finalizer", Finalizer) + if err := r.platformClusterClient.Patch(ctx, ar, client.RawPatch(types.JSONPatchType, []byte(`[{"op": "remove", "path": "/metadata/finalizers"}]`))); err != nil { + return reconcile.Result{}, fmt.Errorf("error removing finalizer from AccessRequest '%s/%s': %w", ar.Namespace, ar.Name, err) } + } else { + rlog.Info("Waiting for finalizers to be removed from AccessRequest", "arName", ar.Name, "arNamespace", ar.Namespace, "finalizers", ar.Finalizers) + res.RequeueAfter = r.interval + continue } - return reconcile.Result{RequeueAfter: r.interval}, nil } } } @@ -884,24 +930,36 @@ func (r *reconcilerImpl) ReconcileDelete(ctx context.Context, request reconcile. return reconcile.Result{}, fmt.Errorf("unable to delete ClusterRequest '%s/%s': %w", cr.Namespace, cr.Name, err) } } else { - rlog.Info("Waiting for ClusterRequest to be deleted", "crName", cr.Name, "crNamespace", cr.Namespace) + if fc := r.fakingCallbacks[FakingCallback_WaitingForClusterRequestDeletion]; fc != nil { + rlog.Info("Executing faking callback, this message should only appear in unit tests", "key", FakingCallback_WaitingForClusterRequestDeletion) + if err := fc(ctx, r.platformClusterClient, FakingCallback_WaitingForClusterRequestDeletion, &request, cr, nil, nil, nil); err != nil { + return reconcile.Result{}, fmt.Errorf("faking callback for key '%s' failed: %w", FakingCallback_WaitingForClusterRequestDeletion, err) + } + } } - if fc := r.fakingCallbacks[FakingCallback_WaitingForClusterRequestDeletion]; fc != nil { - rlog.Info("Executing faking callback, this message should only appear in unit tests", "key", FakingCallback_WaitingForClusterRequestDeletion) - if err := fc(ctx, r.platformClusterClient, FakingCallback_WaitingForClusterRequestDeletion, &request, cr, nil, nil, nil); err != nil { - return reconcile.Result{}, fmt.Errorf("faking callback for key '%s' failed: %w", FakingCallback_WaitingForClusterRequestDeletion, err) + if len(cr.Finalizers) == 1 && cr.Finalizers[0] == Finalizer { + // remove finalizer to allow deletion + rlog.Info("Removing clusteraccess finalizer from ClusterRequest to allow deletion", "crName", cr.Name, "crNamespace", cr.Namespace, "finalizer", Finalizer) + if err := r.platformClusterClient.Patch(ctx, cr, client.RawPatch(types.JSONPatchType, []byte(`[{"op": "remove", "path": "/metadata/finalizers"}]`))); err != nil { + return reconcile.Result{}, fmt.Errorf("error removing finalizer from ClusterRequest '%s/%s': %w", cr.Namespace, cr.Name, err) } + } else { + rlog.Info("Waiting for finalizers to be removed from ClusterRequest", "crName", cr.Name, "crNamespace", cr.Namespace, "finalizers", cr.Finalizers) + res.RequeueAfter = r.interval + continue } - return reconcile.Result{RequeueAfter: r.interval}, nil } } } // don't delete the namespace, because it might contain other resources } - log.Info("Successfully reconciled cluster access") + log.Info("Successfully reconciled cluster access", "requeueAfter", res.RequeueAfter) + if res.RequeueAfter == 0 { + r.unsetInDeletion(request) + } - return reconcile.Result{}, nil + return res, nil } // Register implements Reconciler. @@ -964,6 +1022,24 @@ const ( FakingCallback_WaitingForClusterRequestDeletion = "WaitingForClusterRequestDeletion" ) +func (r *reconcilerImpl) isInDeletion(req reconcile.Request) bool { + r.ridLock.RLock() + defer r.ridLock.RUnlock() + return r.requestsInDeletion.Has(req) +} + +func (r *reconcilerImpl) setInDeletion(req reconcile.Request) { + r.ridLock.Lock() + defer r.ridLock.Unlock() + r.requestsInDeletion.Insert(req) +} + +func (r *reconcilerImpl) unsetInDeletion(req reconcile.Request) { + r.ridLock.Lock() + defer r.ridLock.Unlock() + r.requestsInDeletion.Delete(req) +} + /////////////////////////// /// AUXILIARY FUNCTIONS /// /////////////////////////// @@ -1086,6 +1162,7 @@ func IdentityReferenceGenerator(req reconcile.Request, _ ...any) (*commonapi.Obj //////////////////////////////// // FakeClusterRequestReadiness returns a faking callback that sets the ClusterRequest to 'Granted'. +// Adds a 'clusterprovider' finalizer to the ClusterRequest, if not already present. // If the given ClusterSpec is not nil, it creates a corresponding Cluster next to the ClusterRequest, if it doesn't exist yet. // If during the callback, the Cluster is non-nil, with a non-empty name and namespace, but doesn't exist yet, it will be created with the data from the Cluster, ignoring the given ClusterSpec. // Otherwise, only the ClusterRequest's status is modified. @@ -1103,6 +1180,14 @@ func FakeClusterRequestReadiness(clusterSpec *clustersv1alpha1.ClusterSpec) Faki return nil } + // add finalizer, if not present + old := cr.DeepCopy() + if controllerutil.AddFinalizer(cr, "clusterprovider") { + if err := platformClusterClient.Patch(ctx, cr, client.MergeFrom(old)); err != nil { + return fmt.Errorf("unable to patch ClusterRequest '%s/%s': %w", cr.Namespace, cr.Name, err) + } + } + // create cluster, if desired if c != nil && c.Name != "" && c.Namespace != "" { // check if cluster exists @@ -1129,7 +1214,7 @@ func FakeClusterRequestReadiness(clusterSpec *clustersv1alpha1.ClusterSpec) Faki } // mock ClusterRequest status - old := cr.DeepCopy() + old = cr.DeepCopy() cr.Status.Cluster = &commonapi.ObjectReference{ Name: c.Name, Namespace: c.Namespace, @@ -1147,6 +1232,7 @@ func FakeClusterRequestReadiness(clusterSpec *clustersv1alpha1.ClusterSpec) Faki // The content of the secret's 'kubeconfig' key will one of the following: // - 'fake:cluster:/' if the Cluster could be determined (should be the case most of the time) // - 'fake:request:/' if no Cluster could be determined +// This function also adds a 'clusterprovider' finalizer to the AccessRequest, if not already present. // The callback is a no-op if the AccessRequest is already granted (Secret reference and existence are not checked in this case). // It returns an error if the AccessRequest is nil. // @@ -1161,22 +1247,28 @@ func FakeAccessRequestReadiness() FakingCallback { return nil } + old := ar.DeepCopy() + applied := false + // add finalizer, if not present + changed := controllerutil.AddFinalizer(ar, "clusterprovider") + // if a ClusterRequest is referenced, but no Cluster, try to identify the Cluster if ar.Spec.ClusterRef == nil { - old := ar.DeepCopy() if c != nil { ar.Spec.ClusterRef = &commonapi.ObjectReference{ Name: c.Name, Namespace: c.Namespace, } if err := platformClusterClient.Patch(ctx, ar, client.MergeFrom(old)); err != nil { - return fmt.Errorf("unable to update spec of AccessRequest '%s/%s': %w", ar.Namespace, ar.Name, err) + return fmt.Errorf("unable to patch AccessRequest '%s/%s': %w", ar.Namespace, ar.Name, err) } + applied = true } else if cr != nil && cr.Status.Cluster != nil { ar.Spec.ClusterRef = cr.Status.Cluster.DeepCopy() if err := platformClusterClient.Patch(ctx, ar, client.MergeFrom(old)); err != nil { - return fmt.Errorf("unable to update spec of AccessRequest '%s/%s': %w", ar.Namespace, ar.Name, err) + return fmt.Errorf("unable to patch AccessRequest '%s/%s': %w", ar.Namespace, ar.Name, err) } + applied = true } else if ar.Spec.RequestRef != nil { cr2 := &clustersv1alpha1.ClusterRequest{} cr2.Name = ar.Spec.RequestRef.Name @@ -1186,13 +1278,20 @@ func FakeAccessRequestReadiness() FakingCallback { old := ar.DeepCopy() ar.Spec.ClusterRef = cr2.Status.Cluster.DeepCopy() if err := platformClusterClient.Patch(ctx, ar, client.MergeFrom(old)); err != nil { - return fmt.Errorf("unable to update spec of AccessRequest '%s/%s': %w", ar.Namespace, ar.Name, err) + return fmt.Errorf("unable to patch AccessRequest '%s/%s': %w", ar.Namespace, ar.Name, err) } + applied = true } } } } + if changed && !applied { + if err := platformClusterClient.Patch(ctx, ar, client.MergeFrom(old)); err != nil { + return fmt.Errorf("unable to patch AccessRequest '%s/%s': %w", ar.Namespace, ar.Name, err) + } + } + // create secret s := &corev1.Secret{} s.Name = ar.Name @@ -1217,7 +1316,7 @@ func FakeAccessRequestReadiness() FakingCallback { } // mock AccessRequest status - old := ar.DeepCopy() + old = ar.DeepCopy() ar.Status.SecretRef = &commonapi.LocalObjectReference{ Name: s.Name, } @@ -1353,6 +1452,7 @@ func FakeAccessRequestDeletion(finalizersToRemoveFromAccessRequest, finalizersTo } // removeFinalizers takes an object and a list of finalizers to remove from that object. +// The special finalizer defined in 'Finalizer' will never be removed. // If the list contains "*", all finalizers will be removed. // It updates the object in-place and returns an indicator whether any finalizers were removed. func removeFinalizers(obj client.Object, finalizersToRemove ...string) bool { @@ -1361,15 +1461,19 @@ func removeFinalizers(obj client.Object, finalizersToRemove ...string) bool { } changed := false - if slices.Contains(finalizersToRemove, "*") { - obj.SetFinalizers(nil) - changed = true - } else { - for _, ftr := range finalizersToRemove { - if controllerutil.RemoveFinalizer(obj, ftr) { - changed = true - } + finsToRemove := sets.New(finalizersToRemove...) + fins := obj.GetFinalizers() + for i := 0; i < len(fins); i++ { + // keep the special finalizer + if fins[i] == Finalizer { + continue + } + if finsToRemove.Has("*") || finsToRemove.Has(fins[i]) { + fins = append(fins[:i], fins[i+1:]...) + changed = true + i-- } } + obj.SetFinalizers(fins) return changed } diff --git a/lib/clusteraccess/advanced/clusteraccess_test.go b/lib/clusteraccess/advanced/clusteraccess_test.go index 0421edd..010464c 100644 --- a/lib/clusteraccess/advanced/clusteraccess_test.go +++ b/lib/clusteraccess/advanced/clusteraccess_test.go @@ -403,6 +403,120 @@ var _ = Describe("Advanced Cluster Access", func() { expectNoRequeue(env.Ctx, rec, req, true) // now everything should be deleted }) + It("should handle deletion in the correct order and not recreate resources that are in deletion", func() { + env := defaultTestSetup("testdata", "test-01") + rec := advanced.NewClusterAccessReconciler(env.Client(), "foo"). + WithRetryInterval(100*time.Millisecond). + WithFakingCallback(advanced.FakingCallback_WaitingForClusterRequestReadiness, advanced.FakeClusterRequestReadiness(&clustersv1alpha1.ClusterSpec{ + Profile: "my-profile", + Purposes: []string{"test"}, + Tenancy: clustersv1alpha1.TENANCY_EXCLUSIVE, + })). + WithFakingCallback(advanced.FakingCallback_WaitingForAccessRequestReadiness, advanced.FakeAccessRequestReadiness()). + WithFakeClientGenerator(func(ctx context.Context, kcfgData []byte, scheme *runtime.Scheme, additionalData ...any) (client.Client, error) { + return fake.NewClientBuilder().WithScheme(scheme).Build(), nil + }) + roleRef := commonapi.RoleRef{ + Name: "cluster-admin", + Kind: "ClusterRole", + } + rec.Register(advanced.NewClusterRequest("foobar", "fb", advanced.StaticClusterRequestSpecGenerator(&clustersv1alpha1.ClusterRequestSpec{ + Purpose: "test", + WaitForClusterDeletion: ptr.To(true), + })).WithTokenAccess(&clustersv1alpha1.TokenConfig{RoleRefs: []commonapi.RoleRef{roleRef}}).Build()) + rec.Register(advanced.NewClusterRequest("foobaz", "fz", advanced.StaticClusterRequestSpecGenerator(&clustersv1alpha1.ClusterRequestSpec{ + Purpose: "test", + WaitForClusterDeletion: ptr.To(false), + })).WithTokenAccess(&clustersv1alpha1.TokenConfig{RoleRefs: []commonapi.RoleRef{roleRef}}).Build()) + req := testutils.RequestFromStrings("bar", "baz") + + Eventually(expectNoRequeue(env.Ctx, rec, req, false)).Should(Succeed()) + + // check AccessRequest 1 + ar, err := rec.AccessRequest(env.Ctx, req, "foobar") + Expect(err).ToNot(HaveOccurred()) + + // check ClusterRequest 1 + cr, err := rec.ClusterRequest(env.Ctx, req, "foobar") + Expect(err).ToNot(HaveOccurred()) + + // check AccessRequest 2 + ar2, err := rec.AccessRequest(env.Ctx, req, "foobaz") + Expect(err).ToNot(HaveOccurred()) + + // check ClusterRequest 2 + cr2, err := rec.ClusterRequest(env.Ctx, req, "foobaz") + Expect(err).ToNot(HaveOccurred()) + + // trigger deletion + res, err := rec.ReconcileDelete(env.Ctx, req) + Expect(err).ToNot(HaveOccurred()) + Expect(res.RequeueAfter).ToNot(BeZero()) + + // AccessRequests should have a deletion timestamp, but still exist with the special finalizer + // ClusterRequests should not have a deletion timestamp yet + Expect(env.Client().Get(env.Ctx, client.ObjectKeyFromObject(ar), ar)).To(Succeed()) + Expect(ar.DeletionTimestamp.IsZero()).To(BeFalse()) + Expect(ar.Finalizers).To(ContainElement(advanced.Finalizer)) + Expect(env.Client().Get(env.Ctx, client.ObjectKeyFromObject(cr), cr)).To(Succeed()) + Expect(cr.DeletionTimestamp.IsZero()).To(BeTrue()) + Expect(cr.Finalizers).To(ContainElement(advanced.Finalizer)) + + Expect(env.Client().Get(env.Ctx, client.ObjectKeyFromObject(ar2), ar2)).To(Succeed()) + Expect(ar2.DeletionTimestamp.IsZero()).To(BeFalse()) + Expect(ar2.Finalizers).To(ContainElement(advanced.Finalizer)) + Expect(env.Client().Get(env.Ctx, client.ObjectKeyFromObject(cr2), cr2)).To(Succeed()) + Expect(cr2.DeletionTimestamp.IsZero()).To(BeTrue()) + Expect(cr2.Finalizers).To(ContainElement(advanced.Finalizer)) + + // remove all foreign finalizers from AccessRequest 1 + ar.Finalizers = []string{advanced.Finalizer} + Expect(env.Client().Update(env.Ctx, ar)).To(Succeed()) + + // deleting should requeue + res, err = rec.ReconcileDelete(env.Ctx, req) + Expect(err).ToNot(HaveOccurred()) + Expect(res.RequeueAfter).ToNot(BeZero()) + + // AccessRequest 1 should be gone, ClusterRequest 1 should have a deletion timestamp now + Expect(env.Client().Get(env.Ctx, client.ObjectKeyFromObject(ar), ar)).To(MatchError(apierrors.IsNotFound, "IsNotFound")) + Expect(env.Client().Get(env.Ctx, client.ObjectKeyFromObject(cr), cr)).To(Succeed()) + Expect(cr.DeletionTimestamp.IsZero()).To(BeFalse()) + Expect(cr.Finalizers).To(ContainElement(advanced.Finalizer)) + + // AccessRequest 2 should still exist with deletion timestamp, ClusterRequest 2 should not have a deletion timestamp yet + Expect(env.Client().Get(env.Ctx, client.ObjectKeyFromObject(ar2), ar2)).To(Succeed()) + Expect(ar2.DeletionTimestamp.IsZero()).To(BeFalse()) + Expect(ar2.Finalizers).To(ContainElement(advanced.Finalizer)) + Expect(env.Client().Get(env.Ctx, client.ObjectKeyFromObject(cr2), cr2)).To(Succeed()) + Expect(cr2.DeletionTimestamp.IsZero()).To(BeTrue()) + Expect(cr2.Finalizers).To(ContainElement(advanced.Finalizer)) + + // reconciling should not recreate the already deleted AccessRequest 1 + res, err = rec.Reconcile(env.Ctx, req) + Expect(err).ToNot(HaveOccurred()) + Expect(res.RequeueAfter).To(BeZero()) + Expect(env.Client().Get(env.Ctx, client.ObjectKeyFromObject(ar), ar)).To(MatchError(apierrors.IsNotFound, "IsNotFound")) + + // remove all foreign finalizers from ClusterRequest 1 and AccessRequest 2 + cr.Finalizers = []string{advanced.Finalizer} + Expect(env.Client().Update(env.Ctx, cr)).To(Succeed()) + ar2.Finalizers = []string{advanced.Finalizer} + Expect(env.Client().Update(env.Ctx, ar2)).To(Succeed()) + + // deleting should requeue + res, err = rec.ReconcileDelete(env.Ctx, req) + Expect(err).ToNot(HaveOccurred()) + Expect(res.RequeueAfter).ToNot(BeZero()) + + // ClusterRequest 1 should be gone, AccessRequest 2 should be gone, ClusterRequest 2 should have a deletion timestamp now + Expect(env.Client().Get(env.Ctx, client.ObjectKeyFromObject(cr), cr)).To(MatchError(apierrors.IsNotFound, "IsNotFound")) + Expect(env.Client().Get(env.Ctx, client.ObjectKeyFromObject(ar2), ar2)).To(MatchError(apierrors.IsNotFound, "IsNotFound")) + Expect(env.Client().Get(env.Ctx, client.ObjectKeyFromObject(cr2), cr2)).To(Succeed()) + Expect(cr2.DeletionTimestamp.IsZero()).To(BeFalse()) + Expect(cr2.Finalizers).To(ContainElement(advanced.Finalizer)) + }) + Context("Conflict Detection", func() { It("should correctly handle conflicts with existing ClusterRequests", func() {