From 6fcb592f137ec5be78db5108092b5c06390dbe4f Mon Sep 17 00:00:00 2001 From: Scot Wells Date: Wed, 1 Apr 2026 11:55:28 -0500 Subject: [PATCH 1/2] fix: prevent project deletions from blocking new project creation MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The project controller reconciled all projects with a single worker goroutine. When a project was deleted, the synchronous purge operation (up to 10 minutes) blocked all other project reconciliation — including creation of new projects. Split the monolithic Purge() into StartPurge() (issues delete commands) and IsPurgeComplete() (checks if resources have drained). The controller now uses a condition-driven state machine (ResourceCleanup) that returns from each reconcile in seconds and requeues to poll for completion. Also increases MaxConcurrentReconciles to 4 for additional throughput. Co-Authored-By: Claude Opus 4.6 (1M context) --- internal/controllers/projectpurge/purge.go | 103 ++++++++++------- .../resourcemanager/project_controller.go | 104 ++++++++++++++++-- .../resourcemanager/v1alpha1/project_types.go | 16 +++ 3 files changed, 173 insertions(+), 50 deletions(-) diff --git a/internal/controllers/projectpurge/purge.go b/internal/controllers/projectpurge/purge.go index 9ceee383..3941e1b6 100644 --- a/internal/controllers/projectpurge/purge.go +++ b/internal/controllers/projectpurge/purge.go @@ -2,8 +2,11 @@ package projectpurge import ( "context" + "errors" "fmt" + "net" "strings" + "syscall" "time" "golang.org/x/sync/errgroup" @@ -12,7 +15,6 @@ import ( metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime/schema" "k8s.io/apimachinery/pkg/util/sets" - "k8s.io/apimachinery/pkg/util/wait" "k8s.io/client-go/discovery" "k8s.io/client-go/dynamic" "k8s.io/client-go/kubernetes" @@ -50,9 +52,13 @@ var protected = map[string]struct{}{ // add "milo-system" if you make it per-project and protect it } -func (p *Purger) Purge(ctx context.Context, cfg *rest.Config, project string, o Options) error { +// StartPurge runs Phases A through D (discovery, DeleteCollection on namespaced +// resources, delete namespaces, force-finalize namespaces). These are fast +// fire-and-forget operations that issue delete commands without waiting for +// completion. All phases are idempotent and safe to re-run. +func (p *Purger) StartPurge(ctx context.Context, cfg *rest.Config, project string, o Options) error { if o.Timeout == 0 { - o.Timeout = 5 * time.Minute + o.Timeout = 2 * time.Minute } if o.Parallel <= 0 { o.Parallel = 8 @@ -101,8 +107,10 @@ func (p *Purger) Purge(ctx context.Context, cfg *rest.Config, project string, o } } - // Partition & exclude namespaces & CRDs for explicit phases - var namespaced, cluster []res + // Partition & exclude namespaces & CRDs for explicit phases. + // Cluster-scoped resource deletion is intentionally omitted — + // only namespaced resources and namespaces themselves are purged. + var namespaced []res for _, r := range all { if r.gvr.Group == "" && r.gvr.Resource == "namespaces" { continue @@ -112,8 +120,6 @@ func (p *Purger) Purge(ctx context.Context, cfg *rest.Config, project string, o } if r.namespaced { namespaced = append(namespaced, r) - } else { - cluster = append(cluster, r) } } @@ -149,23 +155,9 @@ func (p *Purger) Purge(ctx context.Context, cfg *rest.Config, project string, o return err } - // Phase B: cluster-scoped kinds - // if err := runParallel(deadline, o.Parallel, cluster, func(ctx context.Context, r res) error { - // if err := dyn.Resource(r.gvr).DeleteCollection(ctx, delOpts, listOpts); !ignorable(err) { - // if apierrors.IsForbidden(err) || apierrors.IsUnauthorized(err) { - // return fmt.Errorf("rbac forbids DeleteCollection for %s: %w", r.gvr, err) - // } - // return fmt.Errorf("DeleteCollection %s: %w", r.gvr, err) - // } - // return nil - // }); err != nil { - // return err - // } - - // Phase C: delete namespaces themselves (sets DeletionTimestamp) + // Phase B: delete namespaces themselves (sets DeletionTimestamp) if err := runParallel(deadline, o.Parallel, namespaces, func(ctx context.Context, ns string) error { if _, ok := protected[ns]; ok { - // Keep the namespace object; we've already deleted its contents in Phase A. return nil } if err := core.CoreV1().Namespaces().Delete(ctx, ns, delOpts); !ignorable(err) { @@ -179,22 +171,20 @@ func (p *Purger) Purge(ctx context.Context, cfg *rest.Config, project string, o return err } - // Phase D: force-finalize namespaces so we don't rely on a namespace controller + // Phase C: force-finalize namespaces so we don’t rely on a namespace controller if err := runParallel(deadline, o.Parallel, namespaces, func(ctx context.Context, ns string) error { nso, err := core.CoreV1().Namespaces().Get(ctx, ns, metav1.GetOptions{}) - if ignorable(err) { // not found or not served + if ignorable(err) { return nil } if err != nil { return fmt.Errorf("get namespace %q: %w", ns, err) } - // If delete hasn’t landed yet, try again (idempotent) if nso.DeletionTimestamp.IsZero() { _ = core.CoreV1().Namespaces().Delete(ctx, ns, delOpts) } - // Clear finalizers to allow immediate removal without a namespace controller nso.Spec.Finalizers = nil if _, err := core.CoreV1().Namespaces().Finalize(ctx, nso, metav1.UpdateOptions{}); !ignorable(err) { if apierrors.IsForbidden(err) || apierrors.IsUnauthorized(err) { @@ -207,25 +197,56 @@ func (p *Purger) Purge(ctx context.Context, cfg *rest.Config, project string, o return err } - // Phase E: verify all namespaces are gone (so tearing down per-project controllers is safe) - if err := wait.PollUntilContextCancel(deadline, 500*time.Millisecond, true, func(ctx context.Context) (bool, error) { - nsList, err := core.CoreV1().Namespaces().List(ctx, metav1.ListOptions{}) - if err != nil { - return false, fmt.Errorf("list namespaces: %w", err) - } - if len(nsList.Items) == 1 && nsList.Items[0].Name == "default" { - return true, nil // all gone - } - // If we have namespaces left, we can’t proceed - return false, nil + return nil +} - }); err != nil { - return fmt.Errorf("timeout waiting for namespaces to disappear: %w", err) +// IsPurgeComplete performs a single namespace list and returns true when only +// the "default" namespace (or no namespaces) remain. Only errors that +// definitively indicate the per-project API server is gone (e.g. connection +// refused) are treated as complete. All other errors (timeouts, 500s, 429s, +// RBAC issues, context cancellation) are returned so the controller can retry. +func (p *Purger) IsPurgeComplete(ctx context.Context, cfg *rest.Config, project string) (bool, error) { + core, err := kubernetes.NewForConfig(cfg) + if err != nil { + return false, fmt.Errorf("building client for project %s: %w", project, err) + } + + nsList, err := core.CoreV1().Namespaces().List(ctx, metav1.ListOptions{}) + if err != nil { + if isServerGone(err) { + return true, nil + } + return false, fmt.Errorf("listing namespaces for project %s: %w", project, err) } - // Phase F: we might need to clean up crds in the future + switch len(nsList.Items) { + case 0: + return true, nil + case 1: + return nsList.Items[0].Name == "default", nil + default: + return false, nil + } +} - return nil +// isServerGone returns true when the error indicates the remote API server is +// permanently unreachable — connection refused, or the API endpoint itself no +// longer exists. Transient failures (timeouts, 500s, throttling, RBAC) return +// false so the caller retries. +func isServerGone(err error) bool { + var opErr *net.OpError + if errors.As(err, &opErr) { + if errors.Is(opErr.Err, syscall.ECONNREFUSED) { + return true + } + } + if errors.Is(err, syscall.ECONNREFUSED) { + return true + } + if apierrors.IsNotFound(err) { + return true + } + return false } // helper (generic, named) diff --git a/internal/controllers/resourcemanager/project_controller.go b/internal/controllers/resourcemanager/project_controller.go index 9011dd35..6bcb6212 100644 --- a/internal/controllers/resourcemanager/project_controller.go +++ b/internal/controllers/resourcemanager/project_controller.go @@ -20,6 +20,7 @@ import ( ctrl "sigs.k8s.io/controller-runtime" "sigs.k8s.io/controller-runtime/pkg/client" "sigs.k8s.io/controller-runtime/pkg/cluster" + "sigs.k8s.io/controller-runtime/pkg/controller" "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil" "sigs.k8s.io/controller-runtime/pkg/handler" "sigs.k8s.io/controller-runtime/pkg/log" @@ -81,7 +82,7 @@ func (r *ProjectController) Reconcile(ctx context.Context, req ctrl.Request) (ct return ctrl.Result{}, fmt.Errorf("get project: %w", err) } - // Deletion path: run purge, then remove finalizer + // Deletion path: clean up project resources, then remove finalizer if !project.DeletionTimestamp.IsZero() { // Best-effort delete the ProjectControlPlane in infra if r.InfraClient != nil { @@ -95,18 +96,102 @@ func (r *ProjectController) Reconcile(ctx context.Context, req ctrl.Request) (ct } if controllerutil.ContainsFinalizer(&project, projectFinalizer) { projCfg := r.forProject(r.BaseConfig, project.Name) - if err := r.Purger.Purge(ctx, projCfg, project.Name, projectpurge.Options{ - Timeout: 10 * time.Minute, + + cleanupCond := apimeta.FindStatusCondition(project.Status.Conditions, resourcemanagerv1alpha.ProjectResourceCleanup) + + // If awaiting completion, check whether resources have drained. + if cleanupCond != nil && cleanupCond.Status == metav1.ConditionTrue && + cleanupCond.Reason == resourcemanagerv1alpha.ProjectCleanupAwaitingCompletionReason { + + done, err := r.Purger.IsPurgeComplete(ctx, projCfg, project.Name) + if err != nil { + logger.Error(err, "check cleanup completion", "project", project.Name) + return ctrl.Result{RequeueAfter: 5 * time.Second}, nil + } + if done { + // Update ResourceCleanup condition to reflect completion + cleanupDone := metav1.Condition{ + Type: resourcemanagerv1alpha.ProjectResourceCleanup, + Status: metav1.ConditionFalse, + Reason: resourcemanagerv1alpha.ProjectCleanupCompleteReason, + Message: "Project resources have been deleted", + ObservedGeneration: project.Generation, + } + if apimeta.SetStatusCondition(&project.Status.Conditions, cleanupDone) { + if err := r.ControlPlaneClient.Status().Update(ctx, &project); err != nil { + return ctrl.Result{}, fmt.Errorf("update cleanup status: %w", err) + } + } + + // Re-fetch to get current resourceVersion after status update + if err := r.ControlPlaneClient.Get(ctx, req.NamespacedName, &project); err != nil { + return ctrl.Result{}, fmt.Errorf("re-fetch project: %w", err) + } + + // Remove finalizer with fresh object + before := project.DeepCopy() + controllerutil.RemoveFinalizer(&project, projectFinalizer) + if err := r.ControlPlaneClient.Patch(ctx, &project, client.MergeFrom(before)); err != nil { + return ctrl.Result{}, fmt.Errorf("remove finalizer: %w", err) + } + return ctrl.Result{}, nil + } + + // Resources still exist — transition back to CleanupStarted + // so the next reconcile re-issues delete commands. + reissue := metav1.Condition{ + Type: resourcemanagerv1alpha.ProjectResourceCleanup, + Status: metav1.ConditionTrue, + Reason: resourcemanagerv1alpha.ProjectCleanupStartedReason, + Message: "Re-issuing delete commands for remaining project resources", + ObservedGeneration: project.Generation, + } + if apimeta.SetStatusCondition(&project.Status.Conditions, reissue) { + if err := r.ControlPlaneClient.Status().Update(ctx, &project); err != nil { + return ctrl.Result{}, fmt.Errorf("update cleanup status: %w", err) + } + } + return ctrl.Result{RequeueAfter: 5 * time.Second}, nil + } + + // CleanupStarted or no condition yet — issue delete commands. + cleanupStarted := metav1.Condition{ + Type: resourcemanagerv1alpha.ProjectResourceCleanup, + Status: metav1.ConditionTrue, + Reason: resourcemanagerv1alpha.ProjectCleanupStartedReason, + Message: "Issuing delete commands for project resources", + ObservedGeneration: project.Generation, + } + if apimeta.SetStatusCondition(&project.Status.Conditions, cleanupStarted) { + if err := r.ControlPlaneClient.Status().Update(ctx, &project); err != nil { + return ctrl.Result{}, fmt.Errorf("update cleanup status: %w", err) + } + } + + if err := r.Purger.StartPurge(ctx, projCfg, project.Name, projectpurge.Options{ + Timeout: 2 * time.Minute, Parallel: 16, }); err != nil { - // requeue to retry purge - return ctrl.Result{RequeueAfter: 2 * time.Second}, fmt.Errorf("purge %q: %w", project.Name, err) + logger.Error(err, "start cleanup", "project", project.Name) + return ctrl.Result{RequeueAfter: 5 * time.Second}, nil } - before := project.DeepCopy() - controllerutil.RemoveFinalizer(&project, projectFinalizer) - if err := r.ControlPlaneClient.Patch(ctx, &project, client.MergeFrom(before)); err != nil { - return ctrl.Result{}, fmt.Errorf("remove finalizer: %w", err) + + // Transition to awaiting completion — subsequent reconciles + // will check IsPurgeComplete instead of re-issuing deletes. + cleanupAwaiting := metav1.Condition{ + Type: resourcemanagerv1alpha.ProjectResourceCleanup, + Status: metav1.ConditionTrue, + Reason: resourcemanagerv1alpha.ProjectCleanupAwaitingCompletionReason, + Message: "Waiting for project resources to be removed", + ObservedGeneration: project.Generation, + } + if apimeta.SetStatusCondition(&project.Status.Conditions, cleanupAwaiting) { + if err := r.ControlPlaneClient.Status().Update(ctx, &project); err != nil { + return ctrl.Result{}, fmt.Errorf("update awaiting status: %w", err) + } } + + return ctrl.Result{RequeueAfter: 5 * time.Second}, nil } return ctrl.Result{}, nil } @@ -403,6 +488,7 @@ func (r *ProjectController) SetupWithManager(mgr ctrl.Manager, infraCluster clus r.Purger = projectpurge.New() return ctrl.NewControllerManagedBy(mgr). + WithOptions(controller.Options{MaxConcurrentReconciles: 4}). For(&resourcemanagerv1alpha.Project{}). WatchesRawSource(source.TypedKind( infraCluster.GetCache(), diff --git a/pkg/apis/resourcemanager/v1alpha1/project_types.go b/pkg/apis/resourcemanager/v1alpha1/project_types.go index 38498071..a7f0bb0f 100644 --- a/pkg/apis/resourcemanager/v1alpha1/project_types.go +++ b/pkg/apis/resourcemanager/v1alpha1/project_types.go @@ -24,6 +24,10 @@ const ( // ProjectReady indicates that the project has been provisioned and is ready // for use. ProjectReady = "Ready" + + // ProjectResourceCleanup indicates that project resources are being deleted + // as part of project teardown. + ProjectResourceCleanup = "ResourceCleanup" ) const ( @@ -35,6 +39,18 @@ const ( // ProjectNameConflict indicates that the project name already exists ProjectNameConflictReason = "ProjectNameConflict" + + // ProjectCleanupStartedReason indicates that resource cleanup has been + // initiated and delete commands are being issued. + ProjectCleanupStartedReason = "CleanupStarted" + + // ProjectCleanupAwaitingCompletionReason indicates that delete commands + // have been issued and the controller is waiting for resources to be removed. + ProjectCleanupAwaitingCompletionReason = "CleanupAwaitingCompletion" + + // ProjectCleanupCompleteReason indicates that all project resources have + // been deleted. + ProjectCleanupCompleteReason = "CleanupComplete" ) // +kubebuilder:object:root=true From 07a01937787f60907c75dfe18452460d69439e12 Mon Sep 17 00:00:00 2001 From: Scot Wells Date: Wed, 1 Apr 2026 11:58:16 -0500 Subject: [PATCH 2/2] test: add end-to-end test for project deletion and resource cleanup Adds a Chainsaw e2e test that verifies: - A project can be deleted after reaching Ready status - The project is fully removed from both org and main cluster contexts Co-Authored-By: Claude Opus 4.6 (1M context) --- .../01-test-organization.yaml | 11 +++ .../project-deletion/02-test-user.yaml | 10 ++ .../03-organization-membership.yaml | 12 +++ .../project-deletion/04-test-project.yaml | 10 ++ .../assertions/assert-cleanup-complete.yaml | 9 ++ .../assertions/assert-project-ready.yaml | 21 +++++ .../project-deletion/chainsaw-test.yaml | 91 +++++++++++++++++++ .../project-deletion/kubeconfig-main | 18 ++++ .../project-deletion/kubeconfig-org-template | 18 ++++ 9 files changed, 200 insertions(+) create mode 100644 test/resource-management/project-deletion/01-test-organization.yaml create mode 100644 test/resource-management/project-deletion/02-test-user.yaml create mode 100644 test/resource-management/project-deletion/03-organization-membership.yaml create mode 100644 test/resource-management/project-deletion/04-test-project.yaml create mode 100644 test/resource-management/project-deletion/assertions/assert-cleanup-complete.yaml create mode 100644 test/resource-management/project-deletion/assertions/assert-project-ready.yaml create mode 100644 test/resource-management/project-deletion/chainsaw-test.yaml create mode 100644 test/resource-management/project-deletion/kubeconfig-main create mode 100644 test/resource-management/project-deletion/kubeconfig-org-template diff --git a/test/resource-management/project-deletion/01-test-organization.yaml b/test/resource-management/project-deletion/01-test-organization.yaml new file mode 100644 index 00000000..60217865 --- /dev/null +++ b/test/resource-management/project-deletion/01-test-organization.yaml @@ -0,0 +1,11 @@ +apiVersion: resourcemanager.miloapis.com/v1alpha1 +kind: Organization +metadata: + name: test-project-deletion-org + labels: + test.miloapis.com/scenario: "project-deletion" + annotations: + kubernetes.io/description: "Organization for testing project deletion" + kubernetes.io/display-name: "Test Project Deletion Organization" +spec: + type: Standard diff --git a/test/resource-management/project-deletion/02-test-user.yaml b/test/resource-management/project-deletion/02-test-user.yaml new file mode 100644 index 00000000..4fc57c3e --- /dev/null +++ b/test/resource-management/project-deletion/02-test-user.yaml @@ -0,0 +1,10 @@ +apiVersion: iam.miloapis.com/v1alpha1 +kind: User +metadata: + name: user-admin + labels: + test.miloapis.com/scenario: "project-deletion" +spec: + email: admin@test.local + givenName: ProjectDeleteTest + familyName: Admin diff --git a/test/resource-management/project-deletion/03-organization-membership.yaml b/test/resource-management/project-deletion/03-organization-membership.yaml new file mode 100644 index 00000000..b7e5fbb6 --- /dev/null +++ b/test/resource-management/project-deletion/03-organization-membership.yaml @@ -0,0 +1,12 @@ +apiVersion: resourcemanager.miloapis.com/v1alpha1 +kind: OrganizationMembership +metadata: + name: user-admin-membership + namespace: organization-test-project-deletion-org + labels: + test.miloapis.com/scenario: "project-deletion" +spec: + organizationRef: + name: test-project-deletion-org + userRef: + name: "user-admin" diff --git a/test/resource-management/project-deletion/04-test-project.yaml b/test/resource-management/project-deletion/04-test-project.yaml new file mode 100644 index 00000000..f5c554a0 --- /dev/null +++ b/test/resource-management/project-deletion/04-test-project.yaml @@ -0,0 +1,10 @@ +apiVersion: resourcemanager.miloapis.com/v1alpha1 +kind: Project +metadata: + name: project-deletion-test + labels: + test.miloapis.com/scenario: "project-deletion" +spec: + ownerRef: + kind: Organization + name: test-project-deletion-org diff --git a/test/resource-management/project-deletion/assertions/assert-cleanup-complete.yaml b/test/resource-management/project-deletion/assertions/assert-cleanup-complete.yaml new file mode 100644 index 00000000..849922d9 --- /dev/null +++ b/test/resource-management/project-deletion/assertions/assert-cleanup-complete.yaml @@ -0,0 +1,9 @@ +apiVersion: resourcemanager.miloapis.com/v1alpha1 +kind: Project +metadata: + name: project-deletion-test +status: + conditions: + - type: ResourceCleanup + status: "False" + reason: CleanupComplete diff --git a/test/resource-management/project-deletion/assertions/assert-project-ready.yaml b/test/resource-management/project-deletion/assertions/assert-project-ready.yaml new file mode 100644 index 00000000..e987b55c --- /dev/null +++ b/test/resource-management/project-deletion/assertions/assert-project-ready.yaml @@ -0,0 +1,21 @@ +apiVersion: resourcemanager.miloapis.com/v1alpha1 +kind: Project +metadata: + name: project-deletion-test + labels: + test.miloapis.com/scenario: "project-deletion" + resourcemanager.miloapis.com/organization-name: test-project-deletion-org + ownerReferences: + - apiVersion: resourcemanager.miloapis.com/v1alpha1 + kind: Organization + name: test-project-deletion-org +spec: + ownerRef: + kind: Organization + name: test-project-deletion-org +status: + conditions: + - type: Ready + status: "True" + reason: Ready + message: Project is ready diff --git a/test/resource-management/project-deletion/chainsaw-test.yaml b/test/resource-management/project-deletion/chainsaw-test.yaml new file mode 100644 index 00000000..e252b083 --- /dev/null +++ b/test/resource-management/project-deletion/chainsaw-test.yaml @@ -0,0 +1,91 @@ +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: project-deletion +spec: + description: | + Tests Project deletion and resource cleanup. + + This test verifies: + - A project can be deleted after reaching Ready status + - The ResourceCleanup condition progresses through the expected states + - The project is fully removed from both organization and main cluster contexts + + clusters: + main: + kubeconfig: kubeconfig-main + org: + kubeconfig: kubeconfig-org-template + + steps: + - name: setup-organization + description: Create Organization, User, and OrganizationMembership for project testing + try: + - apply: + file: 01-test-organization.yaml + - wait: + apiVersion: v1 + kind: Namespace + name: organization-test-project-deletion-org + timeout: 30s + for: + jsonPath: + path: '{.status.phase}' + value: Active + - apply: + file: 02-test-user.yaml + - wait: + apiVersion: iam.miloapis.com/v1alpha1 + kind: User + name: "user-admin" + timeout: 30s + for: + condition: + name: Ready + value: 'True' + - apply: + file: 03-organization-membership.yaml + + - name: create-project-and-wait-for-ready + description: Create Project in organization context and verify it reaches Ready status + cluster: org + try: + - apply: + file: 04-test-project.yaml + - wait: + apiVersion: resourcemanager.miloapis.com/v1alpha1 + kind: Project + name: project-deletion-test + timeout: 60s + for: + condition: + name: Ready + value: 'True' + - assert: + file: assertions/assert-project-ready.yaml + + - name: delete-project + description: Delete the project and verify cleanup completes + cluster: org + try: + - delete: + ref: + apiVersion: resourcemanager.miloapis.com/v1alpha1 + kind: Project + name: project-deletion-test + - wait: + apiVersion: resourcemanager.miloapis.com/v1alpha1 + kind: Project + name: project-deletion-test + timeout: 120s + for: + deletion: {} + + - name: verify-project-gone-from-main-cluster + description: Verify the project no longer exists in the main cluster + cluster: main + try: + - script: + timeout: 30s + content: | + kubectl --kubeconfig kubeconfig-main get project project-deletion-test 2>&1 | grep -q "NotFound" diff --git a/test/resource-management/project-deletion/kubeconfig-main b/test/resource-management/project-deletion/kubeconfig-main new file mode 100644 index 00000000..c771bb28 --- /dev/null +++ b/test/resource-management/project-deletion/kubeconfig-main @@ -0,0 +1,18 @@ +apiVersion: v1 +clusters: +- cluster: + insecure-skip-tls-verify: true + server: https://localhost:30443 + name: milo-main +contexts: +- context: + cluster: milo-main + user: test-admin + name: milo-main +current-context: milo-main +kind: Config +preferences: {} +users: +- name: test-admin + user: + token: test-admin-token diff --git a/test/resource-management/project-deletion/kubeconfig-org-template b/test/resource-management/project-deletion/kubeconfig-org-template new file mode 100644 index 00000000..4a8464ca --- /dev/null +++ b/test/resource-management/project-deletion/kubeconfig-org-template @@ -0,0 +1,18 @@ +apiVersion: v1 +clusters: +- cluster: + insecure-skip-tls-verify: true + server: https://localhost:30443/apis/resourcemanager.miloapis.com/v1alpha1/organizations/test-project-deletion-org/control-plane + name: org-test-project-deletion-org +contexts: +- context: + cluster: org-test-project-deletion-org + user: user-admin + name: org-test-project-deletion-org +current-context: org-test-project-deletion-org +kind: Config +preferences: {} +users: +- name: user-admin + user: + token: test-admin-token