Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

🌱 cluster workspace rename #2558

Merged
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion pkg/admission/kubequota/kubequota_admission.go
Original file line number Diff line number Diff line change
Expand Up @@ -144,7 +144,7 @@ func (k *KubeResourceQuota) Validate(ctx context.Context, a admission.Attributes
}

k.clusterWorkspaceDeletionMonitorStarter.Do(func() {
m := newClusterWorkspaceDeletionMonitor(k.logicalClusterInformer, k.stopQuotaAdmissionForCluster)
m := newLogicalClusterDeletionMonitor(k.logicalClusterInformer, k.stopQuotaAdmissionForCluster)
go m.Start(k.serverDone)
})

Expand Down
30 changes: 15 additions & 15 deletions pkg/admission/kubequota/kubequota_clusterworkspace_monitor.go
Original file line number Diff line number Diff line change
Expand Up @@ -32,21 +32,21 @@ import (
corev1alpha1informers "github.com/kcp-dev/kcp/pkg/client/informers/externalversions/core/v1alpha1"
)

const clusterWorkspaceDeletionMonitorControllerName = "kcp-kubequota-cluster-workspace-deletion-monitor"
const logicalClusterDeletionMonitorControllerName = "kcp-kubequota-logical-cluster-deletion-monitor"

// clusterWorkspaceDeletionMonitor monitors LogicalClusters and terminates QuotaAdmission for a logical cluster
// logicalClusterDeletionMonitor monitors LogicalClusters and terminates QuotaAdmission for a logical cluster
// when its corresponding ClusterWorkspace is deleted.
type clusterWorkspaceDeletionMonitor struct {
type logicalClusterDeletionMonitor struct {
queue workqueue.RateLimitingInterface
stopFunc func(name logicalcluster.Name)
}

func newClusterWorkspaceDeletionMonitor(
func newLogicalClusterDeletionMonitor(
workspaceInformer corev1alpha1informers.LogicalClusterClusterInformer,
stopFunc func(logicalcluster.Name),
) *clusterWorkspaceDeletionMonitor {
m := &clusterWorkspaceDeletionMonitor{
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), clusterWorkspaceDeletionMonitorControllerName),
) *logicalClusterDeletionMonitor {
m := &logicalClusterDeletionMonitor{
queue: workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), logicalClusterDeletionMonitorControllerName),
stopFunc: stopFunc,
}

Expand All @@ -59,7 +59,7 @@ func newClusterWorkspaceDeletionMonitor(
return m
}

func (m *clusterWorkspaceDeletionMonitor) enqueue(obj interface{}) {
func (m *logicalClusterDeletionMonitor) enqueue(obj interface{}) {
key, err := kcpcache.DeletionHandlingMetaClusterNamespaceKeyFunc(obj)
if err != nil {
runtime.HandleError(err)
Expand All @@ -69,24 +69,24 @@ func (m *clusterWorkspaceDeletionMonitor) enqueue(obj interface{}) {
m.queue.Add(key)
}

func (m *clusterWorkspaceDeletionMonitor) Start(stop <-chan struct{}) {
func (m *logicalClusterDeletionMonitor) Start(stop <-chan struct{}) {
defer runtime.HandleCrash()
defer m.queue.ShutDown()

klog.Infof("Starting %s controller", clusterWorkspaceDeletionMonitorControllerName)
defer klog.Infof("Shutting down %s controller", clusterWorkspaceDeletionMonitorControllerName)
klog.Infof("Starting %s controller", logicalClusterDeletionMonitorControllerName)
defer klog.Infof("Shutting down %s controller", logicalClusterDeletionMonitorControllerName)

go wait.Until(m.startWorker, time.Second, stop)

<-stop
}

func (m *clusterWorkspaceDeletionMonitor) startWorker() {
func (m *logicalClusterDeletionMonitor) startWorker() {
for m.processNextWorkItem() {
}
}

func (m *clusterWorkspaceDeletionMonitor) processNextWorkItem() bool {
func (m *logicalClusterDeletionMonitor) processNextWorkItem() bool {
// Wait until there is a new item in the working queue
k, quit := m.queue.Get()
if quit {
Expand All @@ -99,7 +99,7 @@ func (m *clusterWorkspaceDeletionMonitor) processNextWorkItem() bool {
defer m.queue.Done(key)

if err := m.process(key); err != nil {
runtime.HandleError(fmt.Errorf("clusterWorkspaceDeletionMonitor failed to sync %q, err: %w", key, err))
runtime.HandleError(fmt.Errorf("logicalClusterDeletionMonitor failed to sync %q, err: %w", key, err))

m.queue.AddRateLimited(key)

Expand All @@ -112,7 +112,7 @@ func (m *clusterWorkspaceDeletionMonitor) processNextWorkItem() bool {
return true
}

func (m *clusterWorkspaceDeletionMonitor) process(key string) error {
func (m *logicalClusterDeletionMonitor) process(key string) error {
clusterName, _, _, err := kcpcache.SplitMetaClusterNamespaceKey(key)
if err != nil {
runtime.HandleError(err)
Expand Down
6 changes: 3 additions & 3 deletions pkg/proxy/index/index_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,12 +52,12 @@ type Index interface {
LookupURL(path logicalcluster.Path) (url string, found bool)
}

type ClusterWorkspaceClientGetter func(shard *corev1alpha1.Shard) (kcpclientset.ClusterInterface, error)
type ClusterClientGetter func(shard *corev1alpha1.Shard) (kcpclientset.ClusterInterface, error)

func NewController(
ctx context.Context,
shardInformer corev1alpha1informers.ShardInformer,
clientGetter ClusterWorkspaceClientGetter,
clientGetter ClusterClientGetter,
) *Controller {
queue := workqueue.NewNamedRateLimitingQueue(workqueue.DefaultControllerRateLimiter(), controllerName)

Expand Down Expand Up @@ -112,7 +112,7 @@ func NewController(
type Controller struct {
queue workqueue.RateLimitingInterface

clientGetter ClusterWorkspaceClientGetter
clientGetter ClusterClientGetter

shardIndexer cache.Indexer
shardLister corev1alpha1listers.ShardLister
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -196,7 +196,7 @@ func (c *Controller) process(ctx context.Context, key string) error {
ws, err := c.logicalClusterLister.Cluster(clusterName).Get(name)
if err != nil {
if kerrors.IsNotFound(err) {
logger.V(2).Info("ClusterWorkspace not found - stopping garbage collector controller for it (if needed)")
logger.V(2).Info("LogicalCluster not found - stopping garbage collector controller for it (if needed)")

c.lock.Lock()
cancel, ok := c.cancelFuncs[clusterName]
Expand Down Expand Up @@ -230,15 +230,15 @@ func (c *Controller) process(ctx context.Context, key string) error {
ctx = klog.NewContext(ctx, logger)
c.cancelFuncs[clusterName] = cancel

if err := c.startGarbageCollectorForClusterWorkspace(ctx, clusterName); err != nil {
if err := c.startGarbageCollectorForLogicalCluster(ctx, clusterName); err != nil {
cancel()
return fmt.Errorf("error starting garbage collector controller for cluster %q: %w", clusterName, err)
}

return nil
}

func (c *Controller) startGarbageCollectorForClusterWorkspace(ctx context.Context, clusterName logicalcluster.Name) error {
func (c *Controller) startGarbageCollectorForLogicalCluster(ctx context.Context, clusterName logicalcluster.Name) error {
logger := klog.FromContext(ctx)

kubeClient := c.kubeClusterClient.Cluster(clusterName.Path())
Expand Down
4 changes: 2 additions & 2 deletions pkg/reconciler/kubequota/kubequota_controller.go
Original file line number Diff line number Diff line change
Expand Up @@ -239,15 +239,15 @@ func (c *Controller) process(ctx context.Context, key string) error {
ctx = klog.NewContext(ctx, logger)
c.cancelFuncs[clusterName] = cancel

if err := c.startQuotaForClusterWorkspace(ctx, clusterName); err != nil {
if err := c.startQuotaForLogicalCluster(ctx, clusterName); err != nil {
cancel()
return fmt.Errorf("error starting quota controller for cluster %q: %w", clusterName, err)
}

return nil
}

func (c *Controller) startQuotaForClusterWorkspace(ctx context.Context, clusterName logicalcluster.Name) error {
func (c *Controller) startQuotaForLogicalCluster(ctx context.Context, clusterName logicalcluster.Name) error {
logger := klog.FromContext(ctx)
resourceQuotaControllerClient := c.kubeClusterClient.Cluster(clusterName.Path())

Expand Down
4 changes: 2 additions & 2 deletions pkg/virtual/initializingworkspaces/builder/build.go
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ func BuildVirtualWorkspace(
dynamicClusterClient: dynamicClusterClient,
exposeSubresources: false,
resource: &logicalClusterResource,
storageProvider: provideFilteredClusterWorkspacesReadOnlyRestStorage(getTenancyIdentity),
storageProvider: provideFilteredLogicalClusterReadOnlyRestStorage(getTenancyIdentity),
}, nil
},
}
Expand Down Expand Up @@ -164,7 +164,7 @@ func BuildVirtualWorkspace(
dynamicClusterClient: dynamicClusterClient,
exposeSubresources: true,
resource: &logicalClusterResource,
storageProvider: provideDelegatingClusterWorkspacesRestStorage(getTenancyIdentity),
storageProvider: provideDelegatingLogicalClusterRestStorage(getTenancyIdentity),
}, nil
},
}
Expand Down
4 changes: 2 additions & 2 deletions pkg/virtual/initializingworkspaces/builder/forwarding.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,7 +60,7 @@ func initializingWorkspaceRequirements(initializer corev1alpha1.LogicalClusterIn
return requirements, nil
}

func provideFilteredClusterWorkspacesReadOnlyRestStorage(getTenancyIdentity func() (string, error)) func(
func provideFilteredLogicalClusterReadOnlyRestStorage(getTenancyIdentity func() (string, error)) func(
ctx context.Context,
clusterClient kcpdynamic.ClusterInterface,
initializer corev1alpha1.LogicalClusterInitializer,
Expand Down Expand Up @@ -89,7 +89,7 @@ func provideFilteredClusterWorkspacesReadOnlyRestStorage(getTenancyIdentity func
}
}

func provideDelegatingClusterWorkspacesRestStorage(getTenancyIdentity func() (string, error)) func(
func provideDelegatingLogicalClusterRestStorage(getTenancyIdentity func() (string, error)) func(
ctx context.Context,
clusterClient kcpdynamic.ClusterInterface,
initializer corev1alpha1.LogicalClusterInitializer,
Expand Down