diff --git a/.github/workflows/operator-ci.yml b/.github/workflows/operator-ci.yml
index 1bb7330e0..548653a17 100644
--- a/.github/workflows/operator-ci.yml
+++ b/.github/workflows/operator-ci.yml
@@ -187,3 +187,12 @@ jobs:
chainsaw test --test-dir test/e2e/chainsaw/operator/single-tenancy/setup --config .chainsaw.yaml
chainsaw test --test-dir test/e2e/chainsaw/operator/single-tenancy/test-scenarios --config .chainsaw.yaml
chainsaw test --test-dir test/e2e/chainsaw/operator/single-tenancy/cleanup --config .chainsaw.yaml
+
+ - name: Install Ginkgo CLI
+ run: |
+ go install github.com/onsi/ginkgo/v2/ginkgo@latest
+
+ - name: Run Ginkgo tests
+ run: |
+ go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest
+ KUBEBUILDER_ASSETS="$(setup-envtest use 1.28.0 -p path)" ginkgo --github-output test/e2e/operator
diff --git a/cmd/thv-operator/Taskfile.yml b/cmd/thv-operator/Taskfile.yml
index e36667bc2..2b92ff3a4 100644
--- a/cmd/thv-operator/Taskfile.yml
+++ b/cmd/thv-operator/Taskfile.yml
@@ -198,7 +198,16 @@ tasks:
- go install sigs.k8s.io/controller-runtime/tools/setup-envtest@latest
- KUBEBUILDER_ASSETS="$(shell setup-envtest use 1.28.0 -p path)" go test ./cmd/thv-operator/... -coverprofile cover.out
+ operator-e2e-test-ginkgo:
+ desc: Run E2E tests for the operator with Ginkgo
+ cmds:
+ - KUBEBUILDER_ASSETS="$(setup-envtest use 1.28.0 -p path)" ginkgo -v test/e2e/operator
+
+ # Backwards compatibility
operator-e2e-test:
+ deps: [operator-e2e-test-chainsaw]
+
+ operator-e2e-test-chainsaw:
desc: Run E2E tests for the operator
cmds:
- |
diff --git a/cmd/thv-operator/api/v1alpha1/mcpregistry_types.go b/cmd/thv-operator/api/v1alpha1/mcpregistry_types.go
index 40478afc1..6b7708b83 100644
--- a/cmd/thv-operator/api/v1alpha1/mcpregistry_types.go
+++ b/cmd/thv-operator/api/v1alpha1/mcpregistry_types.go
@@ -97,7 +97,7 @@ type GitSource struct {
// Repository is the Git repository URL (HTTP/HTTPS/SSH)
// +kubebuilder:validation:Required
// +kubebuilder:validation:MinLength=1
- // +kubebuilder:validation:Pattern="^(https?://|git@|ssh://|git://).*"
+ // +kubebuilder:validation:Pattern="^(file:///|https?://|git@|ssh://|git://).*"
Repository string `json:"repository"`
// Branch is the Git branch to use (mutually exclusive with Tag and Commit)
@@ -186,6 +186,10 @@ type MCPRegistryStatus struct {
// +optional
APIStatus *APIStatus `json:"apiStatus,omitempty"`
+ // LastAppliedFilterHash is the hash of the last applied filter
+ // +optional
+ LastAppliedFilterHash string `json:"lastAppliedFilterHash,omitempty"`
+
// StorageRef is a reference to the internal storage location
// +optional
StorageRef *StorageReference `json:"storageRef,omitempty"`
diff --git a/cmd/thv-operator/controllers/mcpregistry_controller.go b/cmd/thv-operator/controllers/mcpregistry_controller.go
index 92e5091aa..58b3b2ad8 100644
--- a/cmd/thv-operator/controllers/mcpregistry_controller.go
+++ b/cmd/thv-operator/controllers/mcpregistry_controller.go
@@ -2,6 +2,9 @@ package controllers
import (
"context"
+ "crypto/sha256"
+ "encoding/hex"
+ "encoding/json"
"fmt"
"time"
@@ -208,7 +211,7 @@ func (r *MCPRegistryReconciler) Reconcile(ctx context.Context, req ctrl.Request)
r.deriveOverallStatus(ctx, mcpRegistry, statusManager, statusDeriver)
// 8. Apply all status changes in a single batch update
- if statusUpdateErr := statusManager.Apply(ctx, r.Client); statusUpdateErr != nil {
+ if statusUpdateErr := r.applyStatusUpdates(ctx, r.Client, mcpRegistry, statusManager); statusUpdateErr != nil {
ctxLogger.Error(statusUpdateErr, "Failed to apply batched status update")
// Return the status update error only if there was no main reconciliation error
if syncErr == nil {
@@ -386,20 +389,23 @@ func (*MCPRegistryReconciler) deriveOverallStatus(
statusManager mcpregistrystatus.StatusManager, statusDeriver mcpregistrystatus.StatusDeriver) {
ctxLogger := log.FromContext(ctx)
+ syncStatus := statusManager.Sync().Status()
+ if syncStatus == nil {
+ syncStatus = mcpRegistry.Status.SyncStatus
+ }
+ apiStatus := statusManager.API().Status()
+ if apiStatus == nil {
+ apiStatus = mcpRegistry.Status.APIStatus
+ }
// Use the StatusDeriver to determine the overall phase and message
// based on current sync and API statuses
- derivedPhase, derivedMessage := statusDeriver.DeriveOverallStatus(
- mcpRegistry.Status.SyncStatus,
- mcpRegistry.Status.APIStatus,
- )
+ derivedPhase, derivedMessage := statusDeriver.DeriveOverallStatus(syncStatus, apiStatus)
// Only update phase and message if they've changed
statusManager.SetOverallStatus(derivedPhase, derivedMessage)
- ctxLogger.Info("Updated overall status",
- "oldPhase", mcpRegistry.Status.Phase,
- "newPhase", derivedPhase,
- "oldMessage", mcpRegistry.Status.Message,
- "newMessage", derivedMessage)
+ ctxLogger.Info("Updated overall status", "syncStatus", syncStatus, "apiStatus", apiStatus,
+ "oldPhase", mcpRegistry.Status.Phase, "newPhase", derivedPhase,
+ "oldMessage", mcpRegistry.Status.Message, "newMessage", derivedMessage)
}
// SetupWithManager sets up the controller with the Manager.
@@ -411,3 +417,86 @@ func (r *MCPRegistryReconciler) SetupWithManager(mgr ctrl.Manager) error {
Owns(&corev1.ConfigMap{}).
Complete(r)
}
+
+// Apply applies all collected status changes in a single batch update.
+// Only actual changes are applied to the status to avoid unnecessary reconciliations
+func (r *MCPRegistryReconciler) applyStatusUpdates(
+ ctx context.Context, k8sClient client.Client,
+ mcpRegistry *mcpv1alpha1.MCPRegistry, statusManager mcpregistrystatus.StatusManager) error {
+
+ ctxLogger := log.FromContext(ctx)
+
+ // Refetch the latest version of the resource to avoid conflicts
+ latestRegistry := &mcpv1alpha1.MCPRegistry{}
+ if err := k8sClient.Get(ctx, client.ObjectKeyFromObject(mcpRegistry), latestRegistry); err != nil {
+ ctxLogger.Error(err, "Failed to fetch latest MCPRegistry version for status update")
+ return fmt.Errorf("failed to fetch latest MCPRegistry version: %w", err)
+ }
+ latestRegistryStatus := latestRegistry.Status
+ hasUpdates := false
+
+ // Apply manual sync trigger change if necessary
+ if mcpRegistry.Annotations != nil {
+ if triggerValue := mcpRegistry.Annotations[mcpregistrystatus.SyncTriggerAnnotation]; triggerValue != "" {
+ if latestRegistryStatus.LastManualSyncTrigger != triggerValue {
+ latestRegistryStatus.LastManualSyncTrigger = triggerValue
+ hasUpdates = true
+ ctxLogger.Info("Updated LastManualSyncTrigger", "trigger", triggerValue)
+ }
+ }
+ }
+
+ // Apply filter change if necessary
+ currentFilterJSON, err := json.Marshal(mcpRegistry.Spec.Filter)
+ if err != nil {
+ ctxLogger.Error(err, "Failed to marshal current filter")
+ return fmt.Errorf("failed to marshal current filter: %w", err)
+ }
+ currentFilterHash := sha256.Sum256(currentFilterJSON)
+ currentFilterHashStr := hex.EncodeToString(currentFilterHash[:])
+ if latestRegistryStatus.LastAppliedFilterHash != currentFilterHashStr {
+ latestRegistryStatus.LastAppliedFilterHash = currentFilterHashStr
+ hasUpdates = true
+ ctxLogger.Info("Updated LastAppliedFilterHash", "hash", currentFilterHashStr)
+ }
+
+ // Update storage reference if necessary
+ storageRef := r.storageManager.GetStorageReference(latestRegistry)
+ if storageRef != nil {
+ if latestRegistryStatus.StorageRef == nil || latestRegistryStatus.StorageRef.ConfigMapRef.Name != storageRef.ConfigMapRef.Name {
+ latestRegistryStatus.StorageRef = storageRef
+ hasUpdates = true
+ ctxLogger.Info("Updated StorageRef", "storageRef", storageRef)
+ }
+ }
+
+ // Apply status changes from status manager
+ hasUpdates = statusManager.UpdateStatus(ctx, &latestRegistryStatus) || hasUpdates
+
+ // Single status update using the latest version
+ if hasUpdates {
+ latestRegistry.Status = latestRegistryStatus
+ if err := k8sClient.Status().Update(ctx, latestRegistry); err != nil {
+ ctxLogger.Error(err, "Failed to apply batched status update")
+ return fmt.Errorf("failed to apply batched status update: %w", err)
+ }
+ var syncPhase mcpv1alpha1.SyncPhase
+ if latestRegistryStatus.SyncStatus != nil {
+ syncPhase = latestRegistryStatus.SyncStatus.Phase
+ }
+ var apiPhase string
+ if latestRegistryStatus.APIStatus != nil {
+ apiPhase = string(latestRegistryStatus.APIStatus.Phase)
+ }
+ ctxLogger.V(1).Info("Applied batched status updates",
+ "phase", latestRegistryStatus.Phase,
+ "syncPhase", syncPhase,
+ "apiPhase", apiPhase,
+ "message", latestRegistryStatus.Message,
+ "conditionsCount", len(latestRegistryStatus.Conditions))
+ } else {
+ ctxLogger.V(1).Info("No batched status updates applied")
+ }
+
+ return nil
+}
diff --git a/cmd/thv-operator/pkg/mcpregistrystatus/collector.go b/cmd/thv-operator/pkg/mcpregistrystatus/collector.go
index 7cdb443bf..f6cc9efcb 100644
--- a/cmd/thv-operator/pkg/mcpregistrystatus/collector.go
+++ b/cmd/thv-operator/pkg/mcpregistrystatus/collector.go
@@ -3,11 +3,9 @@ package mcpregistrystatus
import (
"context"
- "fmt"
"k8s.io/apimachinery/pkg/api/meta"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/log"
mcpv1alpha1 "github.com/stacklok/toolhive/cmd/thv-operator/api/v1alpha1"
@@ -122,58 +120,46 @@ func (s *StatusCollector) SetAPIStatus(phase mcpv1alpha1.APIPhase, message strin
s.hasChanges = true
}
-// Apply applies all collected status changes in a single batch update.
-func (s *StatusCollector) Apply(ctx context.Context, k8sClient client.Client) error {
- if !s.hasChanges {
- return nil
- }
+// UpdateStatus applies all collected status changes in a single batch update.
+// Requires the MCPRegistryStatus being the updated version from the cluster
+func (s *StatusCollector) UpdateStatus(ctx context.Context, mcpRegistryStatus *mcpv1alpha1.MCPRegistryStatus) bool {
ctxLogger := log.FromContext(ctx)
- // Refetch the latest version of the resource to avoid conflicts
- latestRegistry := &mcpv1alpha1.MCPRegistry{}
- if err := k8sClient.Get(ctx, client.ObjectKeyFromObject(s.mcpRegistry), latestRegistry); err != nil {
- ctxLogger.Error(err, "Failed to fetch latest MCPRegistry version for status update")
- return fmt.Errorf("failed to fetch latest MCPRegistry version: %w", err)
- }
-
- // Apply phase change
- if s.phase != nil {
- latestRegistry.Status.Phase = *s.phase
- }
-
- // Apply message change
- if s.message != nil {
- latestRegistry.Status.Message = *s.message
+ if s.hasChanges {
+ // Apply phase change
+ if s.phase != nil {
+ mcpRegistryStatus.Phase = *s.phase
+ }
+
+ // Apply message change
+ if s.message != nil {
+ mcpRegistryStatus.Message = *s.message
+ }
+
+ // Apply sync status change
+ if s.syncStatus != nil {
+ mcpRegistryStatus.SyncStatus = s.syncStatus
+ }
+
+ // Apply API status change
+ if s.apiStatus != nil {
+ mcpRegistryStatus.APIStatus = s.apiStatus
+ }
+
+ // Apply condition changes
+ for _, condition := range s.conditions {
+ meta.SetStatusCondition(&mcpRegistryStatus.Conditions, condition)
+ }
+
+ ctxLogger.V(1).Info("Batched status update applied",
+ "phase", s.phase,
+ "message", s.message,
+ "conditionsCount", len(s.conditions))
+ return true
}
-
- // Apply sync status change
- if s.syncStatus != nil {
- latestRegistry.Status.SyncStatus = s.syncStatus
- }
-
- // Apply API status change
- if s.apiStatus != nil {
- latestRegistry.Status.APIStatus = s.apiStatus
- }
-
- // Apply condition changes
- for _, condition := range s.conditions {
- meta.SetStatusCondition(&latestRegistry.Status.Conditions, condition)
- }
-
- // Single status update using the latest version
- if err := k8sClient.Status().Update(ctx, latestRegistry); err != nil {
- ctxLogger.Error(err, "Failed to apply batched status update")
- return fmt.Errorf("failed to apply batched status update: %w", err)
- }
-
- ctxLogger.V(1).Info("Applied batched status update",
- "phase", s.phase,
- "message", s.message,
- "conditionsCount", len(s.conditions))
-
- return nil
+ ctxLogger.V(1).Info("No batched status update needed")
+ return false
}
// StatusManager interface methods
@@ -196,6 +182,11 @@ func (s *StatusCollector) SetOverallStatus(phase mcpv1alpha1.MCPRegistryPhase, m
// SyncStatusCollector implementation
+// Status returns the sync status
+func (sc *syncStatusCollector) Status() *mcpv1alpha1.SyncStatus {
+ return sc.parent.syncStatus
+}
+
// SetSyncCondition sets a sync-related condition
func (sc *syncStatusCollector) SetSyncCondition(condition metav1.Condition) {
sc.parent.conditions[condition.Type] = condition
@@ -210,6 +201,11 @@ func (sc *syncStatusCollector) SetSyncStatus(phase mcpv1alpha1.SyncPhase, messag
// APIStatusCollector implementation
+// Status returns the API status
+func (ac *apiStatusCollector) Status() *mcpv1alpha1.APIStatus {
+ return ac.parent.apiStatus
+}
+
// SetAPIStatus delegates to the parent's SetAPIStatus method
func (ac *apiStatusCollector) SetAPIStatus(phase mcpv1alpha1.APIPhase, message string, endpoint string) {
ac.parent.SetAPIStatus(phase, message, endpoint)
diff --git a/cmd/thv-operator/pkg/mcpregistrystatus/collector_test.go b/cmd/thv-operator/pkg/mcpregistrystatus/collector_test.go
index 86e778757..8dbdbbf1a 100644
--- a/cmd/thv-operator/pkg/mcpregistrystatus/collector_test.go
+++ b/cmd/thv-operator/pkg/mcpregistrystatus/collector_test.go
@@ -322,9 +322,8 @@ func TestStatusCollector_Apply(t *testing.T) {
t.Parallel()
collector := NewStatusManager(registry).(*StatusCollector)
- err := collector.Apply(ctx, k8sClient)
-
- assert.NoError(t, err)
+ hasUpdates := collector.UpdateStatus(ctx, ®istry.Status)
+ assert.False(t, hasUpdates)
})
t.Run("verifies hasChanges behavior", func(t *testing.T) {
@@ -394,7 +393,7 @@ func TestStatusCollector_MultipleConditions(t *testing.T) {
assert.Contains(t, collector.conditions, mcpv1alpha1.ConditionAPIReady)
}
-func TestStatusCollector_ApplyErrors(t *testing.T) {
+func TestStatusCollector_NoUpdates(t *testing.T) {
t.Parallel()
ctx := context.Background()
@@ -406,9 +405,6 @@ func TestStatusCollector_ApplyErrors(t *testing.T) {
t.Run("error fetching latest registry", func(t *testing.T) {
t.Parallel()
- // Create client that will fail on Get
- k8sClient := fake.NewClientBuilder().WithScheme(scheme).Build()
-
// Create collector with registry that doesn't exist in client
registry := &mcpv1alpha1.MCPRegistry{
ObjectMeta: metav1.ObjectMeta{
@@ -417,12 +413,10 @@ func TestStatusCollector_ApplyErrors(t *testing.T) {
},
}
- collector := newStatusCollector(registry)
- collector.SetPhase(mcpv1alpha1.MCPRegistryPhaseReady) // Make some changes
+ collector := newStatusCollector(registry) // No changes
+ hasUpdates := collector.UpdateStatus(ctx, ®istry.Status)
+ assert.False(t, hasUpdates)
- err := collector.Apply(ctx, k8sClient)
- assert.Error(t, err)
- assert.Contains(t, err.Error(), "failed to fetch latest MCPRegistry version")
})
}
diff --git a/cmd/thv-operator/pkg/sync/constants.go b/cmd/thv-operator/pkg/mcpregistrystatus/constants.go
similarity index 86%
rename from cmd/thv-operator/pkg/sync/constants.go
rename to cmd/thv-operator/pkg/mcpregistrystatus/constants.go
index ef0b7bc57..ee958f5ca 100644
--- a/cmd/thv-operator/pkg/sync/constants.go
+++ b/cmd/thv-operator/pkg/mcpregistrystatus/constants.go
@@ -1,4 +1,4 @@
-package sync
+package mcpregistrystatus
const (
// SyncTriggerAnnotation is the annotation key used to trigger registry synchronization
diff --git a/cmd/thv-operator/pkg/mcpregistrystatus/types.go b/cmd/thv-operator/pkg/mcpregistrystatus/types.go
index c1d8c7311..174da8d62 100644
--- a/cmd/thv-operator/pkg/mcpregistrystatus/types.go
+++ b/cmd/thv-operator/pkg/mcpregistrystatus/types.go
@@ -5,7 +5,6 @@ import (
"context"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
- "sigs.k8s.io/controller-runtime/pkg/client"
mcpv1alpha1 "github.com/stacklok/toolhive/cmd/thv-operator/api/v1alpha1"
)
@@ -26,10 +25,13 @@ func (e *Error) Unwrap() error {
return e.Err
}
-//go:generate mockgen -destination=mocks/mock_status.go -package=mocks -source=types.go SyncStatusCollector,APIStatusCollector,StatusDeriver,StatusManager
+//go:generate mockgen -destination=mocks/mock_collector.go -package=mocks -source=types.go SyncStatusCollector,APIStatusCollector,StatusDeriver,StatusManager
// SyncStatusCollector handles sync-related status updates
type SyncStatusCollector interface {
+ // Status returns the sync status
+ Status() *mcpv1alpha1.SyncStatus
+
// SetSyncStatus sets the detailed sync status
SetSyncStatus(phase mcpv1alpha1.SyncPhase, message string, attemptCount int,
lastSyncTime *metav1.Time, lastSyncHash string, serverCount int)
@@ -40,6 +42,9 @@ type SyncStatusCollector interface {
// APIStatusCollector handles API-related status updates
type APIStatusCollector interface {
+ // Status returns the API status
+ Status() *mcpv1alpha1.APIStatus
+
// SetAPIStatus sets the detailed API status
SetAPIStatus(phase mcpv1alpha1.APIPhase, message string, endpoint string)
@@ -67,6 +72,6 @@ type StatusManager interface {
// SetCondition sets a general condition
SetCondition(conditionType, reason, message string, status metav1.ConditionStatus)
- // Apply applies all collected status changes in a single batch update
- Apply(ctx context.Context, k8sClient client.Client) error
+ // UpdateStatus updates the status of the MCPRegistry if any change happened
+ UpdateStatus(ctx context.Context, mcpRegistryStatus *mcpv1alpha1.MCPRegistryStatus) bool
}
diff --git a/cmd/thv-operator/pkg/sync/detectors.go b/cmd/thv-operator/pkg/sync/detectors.go
index 4fc7fb240..2fe84ed3b 100644
--- a/cmd/thv-operator/pkg/sync/detectors.go
+++ b/cmd/thv-operator/pkg/sync/detectors.go
@@ -7,6 +7,7 @@ import (
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
mcpv1alpha1 "github.com/stacklok/toolhive/cmd/thv-operator/api/v1alpha1"
+ "github.com/stacklok/toolhive/cmd/thv-operator/pkg/mcpregistrystatus"
"github.com/stacklok/toolhive/cmd/thv-operator/pkg/sources"
)
@@ -54,7 +55,7 @@ func (*DefaultManualSyncChecker) IsManualSyncRequested(mcpRegistry *mcpv1alpha1.
return false, ManualSyncReasonNoAnnotations
}
- triggerValue := mcpRegistry.Annotations[SyncTriggerAnnotation]
+ triggerValue := mcpRegistry.Annotations[mcpregistrystatus.SyncTriggerAnnotation]
if triggerValue == "" {
return false, ManualSyncReasonNoTrigger
}
@@ -86,7 +87,7 @@ func (*DefaultAutomaticSyncChecker) IsIntervalSyncNeeded(mcpRegistry *mcpv1alpha
// Check for last sync time in syncStatus first, then fallback
var lastSyncTime *metav1.Time
if mcpRegistry.Status.SyncStatus != nil {
- lastSyncTime = mcpRegistry.Status.SyncStatus.LastSyncTime
+ lastSyncTime = mcpRegistry.Status.SyncStatus.LastAttempt
}
// If we don't have a last sync time, sync is needed
diff --git a/cmd/thv-operator/pkg/sync/detectors_test.go b/cmd/thv-operator/pkg/sync/detectors_test.go
index 63ce68023..4e825b280 100644
--- a/cmd/thv-operator/pkg/sync/detectors_test.go
+++ b/cmd/thv-operator/pkg/sync/detectors_test.go
@@ -13,6 +13,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client/fake"
mcpv1alpha1 "github.com/stacklok/toolhive/cmd/thv-operator/api/v1alpha1"
+ "github.com/stacklok/toolhive/cmd/thv-operator/pkg/mcpregistrystatus"
"github.com/stacklok/toolhive/cmd/thv-operator/pkg/sources"
)
@@ -236,7 +237,7 @@ func TestDefaultManualSyncChecker_IsManualSyncRequested(t *testing.T) {
mcpRegistry: &mcpv1alpha1.MCPRegistry{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
- SyncTriggerAnnotation: "",
+ mcpregistrystatus.SyncTriggerAnnotation: "",
},
},
},
@@ -248,7 +249,7 @@ func TestDefaultManualSyncChecker_IsManualSyncRequested(t *testing.T) {
mcpRegistry: &mcpv1alpha1.MCPRegistry{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
- SyncTriggerAnnotation: "trigger-123",
+ mcpregistrystatus.SyncTriggerAnnotation: "trigger-123",
},
},
Status: mcpv1alpha1.MCPRegistryStatus{
@@ -263,7 +264,7 @@ func TestDefaultManualSyncChecker_IsManualSyncRequested(t *testing.T) {
mcpRegistry: &mcpv1alpha1.MCPRegistry{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
- SyncTriggerAnnotation: "trigger-456",
+ mcpregistrystatus.SyncTriggerAnnotation: "trigger-456",
},
},
Status: mcpv1alpha1.MCPRegistryStatus{
@@ -278,7 +279,7 @@ func TestDefaultManualSyncChecker_IsManualSyncRequested(t *testing.T) {
mcpRegistry: &mcpv1alpha1.MCPRegistry{
ObjectMeta: metav1.ObjectMeta{
Annotations: map[string]string{
- SyncTriggerAnnotation: "first-trigger",
+ mcpregistrystatus.SyncTriggerAnnotation: "first-trigger",
},
},
Status: mcpv1alpha1.MCPRegistryStatus{
@@ -382,6 +383,7 @@ func TestDefaultAutomaticSyncChecker_IsIntervalSyncNeeded(t *testing.T) {
},
Status: mcpv1alpha1.MCPRegistryStatus{
SyncStatus: &mcpv1alpha1.SyncStatus{
+ LastAttempt: &metav1.Time{Time: now.Add(-30 * time.Minute)}, // 30 minutes ago
LastSyncTime: &metav1.Time{Time: now.Add(-30 * time.Minute)}, // 30 minutes ago
},
},
diff --git a/cmd/thv-operator/pkg/sync/manager.go b/cmd/thv-operator/pkg/sync/manager.go
index 05a2a4755..3683b53ce 100644
--- a/cmd/thv-operator/pkg/sync/manager.go
+++ b/cmd/thv-operator/pkg/sync/manager.go
@@ -2,10 +2,12 @@ package sync
import (
"context"
+ "crypto/sha256"
+ "encoding/hex"
+ "encoding/json"
"fmt"
"time"
- metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/client"
@@ -30,6 +32,9 @@ const (
ReasonRegistryNotReady = "registry-not-ready"
ReasonRequeueTimeNotElapsed = "requeue-time-not-elapsed"
+ // Filter change related reasons
+ ReasonFilterChanged = "filter-changed"
+
// Data change related reasons
ReasonSourceDataChanged = "source-data-changed"
ReasonErrorCheckingChanges = "error-checking-data-changes"
@@ -139,6 +144,7 @@ func NewDefaultSyncManager(k8sClient client.Client, scheme *runtime.Scheme,
}
// ShouldSync determines if a sync operation is needed and when the next sync should occur
+// nolint:gocyclo
func (s *DefaultSyncManager) ShouldSync(
ctx context.Context,
mcpRegistry *mcpv1alpha1.MCPRegistry) (bool, string, *time.Time) {
@@ -149,52 +155,77 @@ func (s *DefaultSyncManager) ShouldSync(
return false, ReasonAlreadyInProgress, nil
}
- // Check if sync is needed based on registry state
- if syncNeeded := s.isSyncNeededForState(mcpRegistry); syncNeeded {
- if requeueElapsed := s.isRequeueElapsed(mcpRegistry); requeueElapsed {
- return true, ReasonRegistryNotReady, nil
- }
- ctxLogger.Info("Sync not needed because requeue time not elapsed",
- "requeueTime", DefaultSyncRequeueAfter, "lastAttempt", mcpRegistry.Status.SyncStatus.LastAttempt)
- return false, ReasonRequeueTimeNotElapsed, nil
- }
-
- // Check if source data has changed by comparing hash
- dataChanged, err := s.dataChangeDetector.IsDataChanged(ctx, mcpRegistry)
- if err != nil {
- ctxLogger.Error(err, "Failed to determine if data has changed")
- return true, ReasonErrorCheckingChanges, nil
- }
+ // Check if requeue time has elapsed and pre-compute next sync time
+ requeueElapsed, nextSyncTime := s.calculateNextSyncTime(ctx, mcpRegistry)
+ // Check if sync is needed based on registry state
+ syncNeededForState := s.isSyncNeededForState(mcpRegistry)
// Check for manual sync trigger first (always update trigger tracking)
manualSyncRequested, _ := s.manualSyncChecker.IsManualSyncRequested(mcpRegistry)
- // Manual sync was requested - but only sync if data has actually changed
- if manualSyncRequested {
- if dataChanged {
- return true, ReasonManualWithChanges, nil
+ // Check if filter has changed
+ filterChanged := s.isFilterChanged(ctx, mcpRegistry)
+
+ shouldSync := false
+ reason := ReasonUpToDateNoPolicy
+
+ if syncNeededForState {
+ if !requeueElapsed {
+ ctxLogger.Info("Sync not needed because requeue time not elapsed",
+ "requeueTime", DefaultSyncRequeueAfter, "lastAttempt", mcpRegistry.Status.SyncStatus.LastAttempt)
+ reason = ReasonRequeueTimeNotElapsed
+ } else {
+ shouldSync = true
}
- // Manual sync requested but no data changes - update trigger tracking only
- return true, ReasonManualNoChanges, nil
}
- if dataChanged {
- return true, ReasonSourceDataChanged, nil
+ if !shouldSync && manualSyncRequested {
+ // Manual sync requested
+ shouldSync = true
+ nextSyncTime = nil
}
- // Data hasn't changed - check if we need to schedule future checks
- if mcpRegistry.Spec.SyncPolicy != nil {
- _, nextSyncTime, err := s.automaticSyncChecker.IsIntervalSyncNeeded(mcpRegistry)
+ if !shouldSync && filterChanged {
+ // Filter changed
+ shouldSync = true
+ reason = ReasonFilterChanged
+ } else if shouldSync || requeueElapsed {
+ // Check if source data has changed by comparing hash
+ dataChanged, err := s.dataChangeDetector.IsDataChanged(ctx, mcpRegistry)
if err != nil {
- ctxLogger.Error(err, "Failed to determine if interval sync is needed")
- return true, ReasonErrorParsingInterval, nil
+ ctxLogger.Error(err, "Failed to determine if data has changed")
+ shouldSync = true
+ reason = ReasonErrorCheckingChanges
+ } else {
+ ctxLogger.Info("Checked data changes", "dataChanged", dataChanged)
+ if dataChanged {
+ shouldSync = true
+ if syncNeededForState {
+ reason = ReasonRegistryNotReady
+ } else if manualSyncRequested {
+ reason = ReasonManualWithChanges
+ } else {
+ reason = ReasonSourceDataChanged
+ }
+ } else {
+ shouldSync = false
+ if syncNeededForState {
+ reason = ReasonUpToDateWithPolicy
+ } else {
+ reason = ReasonManualNoChanges
+ }
+ }
}
-
- // No sync needed since data hasn't changed, but schedule next check
- return false, ReasonUpToDateWithPolicy, &nextSyncTime
}
- // No automatic sync policy, registry is up-to-date
- return false, ReasonUpToDateNoPolicy, nil
+ ctxLogger.Info("ShouldSync", "syncNeededForState", syncNeededForState, "filterChanged", filterChanged,
+ "requeueElapsed", requeueElapsed, "manualSyncRequested", manualSyncRequested, "nextSyncTime",
+ nextSyncTime)
+ ctxLogger.Info("ShouldSync returning", "shouldSync", shouldSync, "reason", reason, "nextSyncTime", nextSyncTime)
+
+ if shouldSync {
+ return shouldSync, reason, nil
+ }
+ return shouldSync, reason, nextSyncTime
}
// isSyncNeededForState checks if sync is needed based on the registry's current state
@@ -214,28 +245,62 @@ func (*DefaultSyncManager) isSyncNeededForState(mcpRegistry *mcpv1alpha1.MCPRegi
return false
}
- // Fallback to old behavior when sync status is not available
- if mcpRegistry.Status.Phase == mcpv1alpha1.MCPRegistryPhaseFailed {
- return true
- }
- // If phase is Pending but we have LastSyncTime, sync was completed before
- var lastSyncTime *metav1.Time
- if mcpRegistry.Status.SyncStatus != nil {
- lastSyncTime = mcpRegistry.Status.SyncStatus.LastSyncTime
+ // If we don't have sync status, sync is needed
+ return true
+}
+
+// isFilterChanged checks if the filter has changed compared to the last applied configuration
+func (*DefaultSyncManager) isFilterChanged(ctx context.Context, mcpRegistry *mcpv1alpha1.MCPRegistry) bool {
+ logger := log.FromContext(ctx)
+
+ currentFilter := mcpRegistry.Spec.Filter
+ currentFilterJSON, err := json.Marshal(currentFilter)
+ if err != nil {
+ logger.Error(err, "Failed to marshal current filter")
+ return false
}
- if mcpRegistry.Status.Phase == mcpv1alpha1.MCPRegistryPhasePending && lastSyncTime == nil {
- return true
+ currentFilterHash := sha256.Sum256(currentFilterJSON)
+ currentHashStr := hex.EncodeToString(currentFilterHash[:])
+
+ lastHash := mcpRegistry.Status.LastAppliedFilterHash
+ if lastHash == "" {
+ // First time - no change
+ return false
}
- // For all other cases (Ready, or Pending with LastSyncTime), no sync needed based on state
- return false
+
+ logger.V(1).Info("Current filter hash", "currentFilterHash", currentHashStr)
+ logger.V(1).Info("Last applied filter hash", "lastHash", lastHash)
+ return currentHashStr != lastHash
}
-// isRequeueElapsed checks if the requeue time has elapsed
-func (*DefaultSyncManager) isRequeueElapsed(mcpRegistry *mcpv1alpha1.MCPRegistry) bool {
- if mcpRegistry.Status.SyncStatus != nil && mcpRegistry.Status.SyncStatus.LastAttempt != nil {
- return time.Now().After(mcpRegistry.Status.SyncStatus.LastAttempt.Add(DefaultSyncRequeueAfter))
+// calculateNextSyncTime checks if the requeue or sync policy time has elapsed and calculates the next requeue time
+func (s *DefaultSyncManager) calculateNextSyncTime(ctx context.Context, mcpRegistry *mcpv1alpha1.MCPRegistry) (bool, *time.Time) {
+ ctxLogger := log.FromContext(ctx)
+
+ // First consider the requeue time
+ requeueElapsed := false
+ var nextSyncTime time.Time
+ if mcpRegistry.Status.SyncStatus != nil {
+ if mcpRegistry.Status.SyncStatus.LastAttempt != nil {
+ nextSyncTime = mcpRegistry.Status.SyncStatus.LastAttempt.Add(DefaultSyncRequeueAfter)
+ }
}
- return true
+
+ // If we have a sync policy, check if the next automatic sync time is sooner than the next requeue time
+ if mcpRegistry.Spec.SyncPolicy != nil {
+ autoSyncNeeded, nextAutomaticSyncTime, err := s.automaticSyncChecker.IsIntervalSyncNeeded(mcpRegistry)
+ if err != nil {
+ ctxLogger.Error(err, "Failed to determine if interval sync is needed")
+ }
+
+ // Resync at the earlier time between the next sync time and the next automatic sync time
+ if autoSyncNeeded && nextSyncTime.After(nextAutomaticSyncTime) {
+ nextSyncTime = nextAutomaticSyncTime
+ }
+ }
+
+ requeueElapsed = time.Now().After(nextSyncTime)
+ return requeueElapsed, &nextSyncTime
}
// PerformSync performs the complete sync operation for the MCPRegistry
@@ -254,11 +319,6 @@ func (s *DefaultSyncManager) PerformSync(
return ctrl.Result{RequeueAfter: DefaultSyncRequeueAfter}, nil, err
}
- // Update the core registry fields that sync manager owns
- if err := s.updateCoreRegistryFields(ctx, mcpRegistry, fetchResult); err != nil {
- return ctrl.Result{}, nil, err
- }
-
// Return sync result with data for status collector
syncResult := &Result{
Hash: fetchResult.Hash,
@@ -281,7 +341,7 @@ func (s *DefaultSyncManager) UpdateManualSyncTriggerOnly(
// Update manual sync trigger tracking
if mcpRegistry.Annotations != nil {
- if triggerValue := mcpRegistry.Annotations[SyncTriggerAnnotation]; triggerValue != "" {
+ if triggerValue := mcpRegistry.Annotations[mcpregistrystatus.SyncTriggerAnnotation]; triggerValue != "" {
mcpRegistry.Status.LastManualSyncTrigger = triggerValue
ctxLogger.Info("Manual sync trigger processed (no data changes)", "trigger", triggerValue)
}
@@ -419,56 +479,3 @@ func (s *DefaultSyncManager) storeRegistryData(
return nil
}
-
-// updateCoreRegistryFields updates the core registry fields after a successful sync
-// Note: Does not update phase, sync status, or API status - those are handled by the controller operation
-func (s *DefaultSyncManager) updateCoreRegistryFields(
- ctx context.Context,
- mcpRegistry *mcpv1alpha1.MCPRegistry,
- fetchResult *sources.FetchResult) *mcpregistrystatus.Error {
- ctxLogger := log.FromContext(ctx)
-
- // Refresh the object to get latest resourceVersion before final update
- if err := s.client.Get(ctx, client.ObjectKeyFromObject(mcpRegistry), mcpRegistry); err != nil {
- ctxLogger.Error(err, "Failed to refresh MCPRegistry object")
- return &mcpregistrystatus.Error{
- Err: err,
- Message: fmt.Sprintf("Failed to refresh MCPRegistry object: %v", err),
- ConditionType: mcpv1alpha1.ConditionSyncSuccessful,
- ConditionReason: "ObjectRefreshFailed",
- }
- }
-
- // Get storage reference
- storageRef := s.storageManager.GetStorageReference(mcpRegistry)
-
- // Update storage reference only - status fields are now handled by status collector
- if storageRef != nil {
- mcpRegistry.Status.StorageRef = storageRef
- }
-
- // Update manual sync trigger tracking if annotation exists
- if mcpRegistry.Annotations != nil {
- if triggerValue := mcpRegistry.Annotations[SyncTriggerAnnotation]; triggerValue != "" {
- mcpRegistry.Status.LastManualSyncTrigger = triggerValue
- ctxLogger.Info("Manual sync trigger processed", "trigger", triggerValue)
- }
- }
-
- // Single final status update
- if err := s.client.Status().Update(ctx, mcpRegistry); err != nil {
- ctxLogger.Error(err, "Failed to update core registry fields")
- return &mcpregistrystatus.Error{
- Err: err,
- Message: fmt.Sprintf("Failed to update core registry fields: %v", err),
- ConditionType: mcpv1alpha1.ConditionSyncSuccessful,
- ConditionReason: "StatusUpdateFailed",
- }
- }
-
- ctxLogger.Info("MCPRegistry sync completed successfully",
- "serverCount", fetchResult.ServerCount,
- "hash", fetchResult.Hash)
-
- return nil
-}
diff --git a/cmd/thv-operator/pkg/sync/manager_additional_test.go b/cmd/thv-operator/pkg/sync/manager_additional_test.go
index c78eed19d..a2ffe314c 100644
--- a/cmd/thv-operator/pkg/sync/manager_additional_test.go
+++ b/cmd/thv-operator/pkg/sync/manager_additional_test.go
@@ -89,14 +89,14 @@ func TestDefaultSyncManager_isSyncNeededForState(t *testing.T) {
description: "Should not need sync when sync complete but overall pending (waiting for API)",
},
{
- name: "sync not needed when no sync status and ready phase",
+ name: "sync needed when no sync status and ready phase",
mcpRegistry: &mcpv1alpha1.MCPRegistry{
Status: mcpv1alpha1.MCPRegistryStatus{
Phase: mcpv1alpha1.MCPRegistryPhaseReady,
},
},
- expected: false,
- description: "Should not need sync when overall phase is ready",
+ expected: true,
+ description: "Should need sync when no sync status",
},
}
@@ -172,7 +172,7 @@ func TestDefaultSyncManager_isSyncNeededForState_EdgeCases(t *testing.T) {
// This should not panic but return sensible default
result := manager.isSyncNeededForState(&mcpv1alpha1.MCPRegistry{})
- assert.False(t, result, "Should not need sync for empty registry")
+ assert.True(t, result, "Should need sync for empty registry")
})
t.Run("handles registry with empty status", func(t *testing.T) {
@@ -182,7 +182,7 @@ func TestDefaultSyncManager_isSyncNeededForState_EdgeCases(t *testing.T) {
Status: mcpv1alpha1.MCPRegistryStatus{},
}
result := manager.isSyncNeededForState(registry)
- assert.False(t, result, "Should not need sync for empty status")
+ assert.True(t, result, "Should need sync for empty status")
})
t.Run("handles registry with sync status but empty phase", func(t *testing.T) {
diff --git a/cmd/thv-operator/pkg/sync/manager_test.go b/cmd/thv-operator/pkg/sync/manager_test.go
index b4764c87a..e9358984a 100644
--- a/cmd/thv-operator/pkg/sync/manager_test.go
+++ b/cmd/thv-operator/pkg/sync/manager_test.go
@@ -14,6 +14,7 @@ import (
"sigs.k8s.io/controller-runtime/pkg/client/fake"
mcpv1alpha1 "github.com/stacklok/toolhive/cmd/thv-operator/api/v1alpha1"
+ "github.com/stacklok/toolhive/cmd/thv-operator/pkg/mcpregistrystatus"
"github.com/stacklok/toolhive/cmd/thv-operator/pkg/sources"
)
@@ -129,14 +130,14 @@ func TestDefaultSyncManager_ShouldSync(t *testing.T) {
expectedNextTime: false,
},
{
- name: "manual sync requested with new trigger value",
+ name: "manual sync not needed with new trigger value and same hash",
mcpRegistry: &mcpv1alpha1.MCPRegistry{
ObjectMeta: metav1.ObjectMeta{
Name: "test-registry",
Namespace: "test-namespace",
UID: types.UID("test-uid"),
Annotations: map[string]string{
- SyncTriggerAnnotation: "manual-sync-123",
+ mcpregistrystatus.SyncTriggerAnnotation: "manual-sync-123",
},
},
Spec: mcpv1alpha1.MCPRegistrySpec{
@@ -167,7 +168,7 @@ func TestDefaultSyncManager_ShouldSync(t *testing.T) {
"registry.json": "test", // This will produce the same hash as above
},
},
- expectedSyncNeeded: true,
+ expectedSyncNeeded: false,
expectedReason: ReasonManualNoChanges, // No data changes but manual trigger
expectedNextTime: false,
},
@@ -198,10 +199,10 @@ func TestDefaultSyncManager_ShouldSync(t *testing.T) {
// We expect some errors for ConfigMap not found, but that's okay for this test
if tt.expectedSyncNeeded {
- assert.True(t, syncNeeded, "Expected sync to be needed")
+ assert.True(t, syncNeeded, "Expected sync to be needed for "+tt.name)
assert.Equal(t, tt.expectedReason, reason, "Expected specific sync reason")
} else {
- assert.False(t, syncNeeded, "Expected sync not to be needed")
+ assert.False(t, syncNeeded, "Expected sync not to be needed for "+tt.name)
assert.Equal(t, tt.expectedReason, reason, "Expected specific sync reason")
}
@@ -307,7 +308,7 @@ func TestDefaultSyncManager_PerformSync(t *testing.T) {
Namespace: "test-namespace",
UID: types.UID("test-uid"),
Annotations: map[string]string{
- SyncTriggerAnnotation: "manual-123",
+ mcpregistrystatus.SyncTriggerAnnotation: "manual-123",
},
},
Spec: mcpv1alpha1.MCPRegistrySpec{
@@ -521,10 +522,10 @@ func TestDefaultSyncManager_PerformSync(t *testing.T) {
assert.Len(t, tt.mcpRegistry.Status.Conditions, 0, "Sync manager should not set conditions")
}
- // Verify manual sync trigger is processed if annotation exists (this is still done by sync manager)
+ // Verify manual sync trigger is not processed if annotation exists (this is not done by sync manager)
if tt.mcpRegistry.Annotations != nil {
- if triggerValue := tt.mcpRegistry.Annotations[SyncTriggerAnnotation]; triggerValue != "" {
- assert.Equal(t, triggerValue, tt.mcpRegistry.Status.LastManualSyncTrigger)
+ if triggerValue := tt.mcpRegistry.Annotations[mcpregistrystatus.SyncTriggerAnnotation]; triggerValue != "" {
+ assert.NotEqual(t, triggerValue, tt.mcpRegistry.Status.LastManualSyncTrigger)
}
}
})
@@ -552,7 +553,7 @@ func TestDefaultSyncManager_UpdateManualSyncTriggerOnly(t *testing.T) {
Namespace: "test-namespace",
UID: types.UID("test-uid"),
Annotations: map[string]string{
- SyncTriggerAnnotation: "manual-trigger-123",
+ mcpregistrystatus.SyncTriggerAnnotation: "manual-trigger-123",
},
},
Spec: mcpv1alpha1.MCPRegistrySpec{
diff --git a/deploy/charts/operator-crds/Chart.yaml b/deploy/charts/operator-crds/Chart.yaml
index 28e807727..e3a13ae36 100644
--- a/deploy/charts/operator-crds/Chart.yaml
+++ b/deploy/charts/operator-crds/Chart.yaml
@@ -2,5 +2,5 @@ apiVersion: v2
name: toolhive-operator-crds
description: A Helm chart for installing the ToolHive Operator CRDs into Kubernetes.
type: application
-version: 0.0.32
+version: 0.0.33
appVersion: "0.0.1"
diff --git a/deploy/charts/operator-crds/README.md b/deploy/charts/operator-crds/README.md
index a85c9a955..059ffd648 100644
--- a/deploy/charts/operator-crds/README.md
+++ b/deploy/charts/operator-crds/README.md
@@ -1,7 +1,7 @@
# ToolHive Operator CRDs Helm Chart
-
+

A Helm chart for installing the ToolHive Operator CRDs into Kubernetes.
diff --git a/deploy/charts/operator-crds/crds/toolhive.stacklok.dev_mcpregistries.yaml b/deploy/charts/operator-crds/crds/toolhive.stacklok.dev_mcpregistries.yaml
index 82fdbca38..e3c2a30e3 100644
--- a/deploy/charts/operator-crds/crds/toolhive.stacklok.dev_mcpregistries.yaml
+++ b/deploy/charts/operator-crds/crds/toolhive.stacklok.dev_mcpregistries.yaml
@@ -160,7 +160,7 @@ spec:
repository:
description: Repository is the Git repository URL (HTTP/HTTPS/SSH)
minLength: 1
- pattern: ^(https?://|git@|ssh://|git://).*
+ pattern: ^(file:///|https?://|git@|ssh://|git://).*
type: string
tag:
description: Tag is the Git tag to use (mutually exclusive
@@ -297,6 +297,10 @@ spec:
x-kubernetes-list-map-keys:
- type
x-kubernetes-list-type: map
+ lastAppliedFilterHash:
+ description: LastAppliedFilterHash is the hash of the last applied
+ filter
+ type: string
lastManualSyncTrigger:
description: |-
LastManualSyncTrigger tracks the last processed manual sync annotation value
diff --git a/docs/operator/crd-api.md b/docs/operator/crd-api.md
index b287d5e73..32e18dfea 100644
--- a/docs/operator/crd-api.md
+++ b/docs/operator/crd-api.md
@@ -174,7 +174,7 @@ _Appears in:_
| Field | Description | Default | Validation |
| --- | --- | --- | --- |
-| `repository` _string_ | Repository is the Git repository URL (HTTP/HTTPS/SSH) | | MinLength: 1
Pattern: `^(https?://\|git@\|ssh://\|git://).*`
Required: \{\}
|
+| `repository` _string_ | Repository is the Git repository URL (HTTP/HTTPS/SSH) | | MinLength: 1
Pattern: `^(file:///\|https?://\|git@\|ssh://\|git://).*`
Required: \{\}
|
| `branch` _string_ | Branch is the Git branch to use (mutually exclusive with Tag and Commit) | | MinLength: 1
|
| `tag` _string_ | Tag is the Git tag to use (mutually exclusive with Branch and Commit) | | MinLength: 1
|
| `commit` _string_ | Commit is the Git commit SHA to use (mutually exclusive with Branch and Tag) | | MinLength: 1
|
@@ -364,6 +364,7 @@ _Appears in:_
| `message` _string_ | Message provides additional information about the current phase | | |
| `syncStatus` _[SyncStatus](#syncstatus)_ | SyncStatus provides detailed information about data synchronization | | |
| `apiStatus` _[APIStatus](#apistatus)_ | APIStatus provides detailed information about the API service | | |
+| `lastAppliedFilterHash` _string_ | LastAppliedFilterHash is the hash of the last applied filter | | |
| `storageRef` _[StorageReference](#storagereference)_ | StorageRef is a reference to the internal storage location | | |
| `lastManualSyncTrigger` _string_ | LastManualSyncTrigger tracks the last processed manual sync annotation value
Used to detect new manual sync requests via toolhive.stacklok.dev/sync-trigger annotation | | |
| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#condition-v1-meta) array_ | Conditions represent the latest available observations of the MCPRegistry's state | | |
diff --git a/test/e2e/operator/configmap_helpers.go b/test/e2e/operator/configmap_helpers.go
new file mode 100644
index 000000000..b08e85945
--- /dev/null
+++ b/test/e2e/operator/configmap_helpers.go
@@ -0,0 +1,215 @@
+package operator_test
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+
+ ginkgo "github.com/onsi/ginkgo/v2"
+ "github.com/onsi/gomega"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/types"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+// ConfigMapTestHelper provides utilities for ConfigMap testing and validation
+type ConfigMapTestHelper struct {
+ Client client.Client
+ Context context.Context
+ Namespace string
+}
+
+// NewConfigMapTestHelper creates a new test helper for ConfigMap operations
+func NewConfigMapTestHelper(ctx context.Context, k8sClient client.Client, namespace string) *ConfigMapTestHelper {
+ return &ConfigMapTestHelper{
+ Client: k8sClient,
+ Context: ctx,
+ Namespace: namespace,
+ }
+}
+
+// RegistryServer represents a server definition in the registry
+type RegistryServer struct {
+ Name string `json:"name"`
+ Description string `json:"description,omitempty"`
+ Tier string `json:"tier"`
+ Status string `json:"status"`
+ Transport string `json:"transport"`
+ Tools []string `json:"tools"`
+ Image string `json:"image"`
+ Tags []string `json:"tags,omitempty"`
+}
+
+// ToolHiveRegistryData represents the ToolHive registry format
+type ToolHiveRegistryData struct {
+ Version string `json:"version"`
+ LastUpdated string `json:"last_updated"`
+ Servers map[string]RegistryServer `json:"servers"`
+ RemoteServers map[string]RegistryServer `json:"remoteServers"`
+}
+
+// UpstreamRegistryData represents the upstream MCP registry format
+type UpstreamRegistryData struct {
+ Servers map[string]RegistryServer `json:"servers"`
+}
+
+// ConfigMapBuilder provides a fluent interface for building ConfigMaps
+type ConfigMapBuilder struct {
+ configMap *corev1.ConfigMap
+}
+
+// NewConfigMapBuilder creates a new ConfigMap builder
+func (h *ConfigMapTestHelper) NewConfigMapBuilder(name string) *ConfigMapBuilder {
+ return &ConfigMapBuilder{
+ configMap: &corev1.ConfigMap{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: h.Namespace,
+ Labels: map[string]string{
+ "test.toolhive.io/suite": "operator-e2e",
+ },
+ },
+ Data: make(map[string]string),
+ },
+ }
+}
+
+// WithLabel adds a label to the ConfigMap
+func (cb *ConfigMapBuilder) WithLabel(key, value string) *ConfigMapBuilder {
+ if cb.configMap.Labels == nil {
+ cb.configMap.Labels = make(map[string]string)
+ }
+ cb.configMap.Labels[key] = value
+ return cb
+}
+
+// WithData adds arbitrary data to the ConfigMap
+func (cb *ConfigMapBuilder) WithData(key, value string) *ConfigMapBuilder {
+ cb.configMap.Data[key] = value
+ return cb
+}
+
+// WithToolHiveRegistry adds ToolHive format registry data
+func (cb *ConfigMapBuilder) WithToolHiveRegistry(key string, servers []RegistryServer) *ConfigMapBuilder {
+ // Convert slice to map using server names as keys
+ serverMap := make(map[string]RegistryServer)
+ for _, server := range servers {
+ serverMap[server.Name] = server
+ }
+
+ registryData := ToolHiveRegistryData{
+ Version: "1.0.0",
+ LastUpdated: "2025-01-15T10:30:00Z",
+ Servers: serverMap,
+ RemoteServers: make(map[string]RegistryServer),
+ }
+ jsonData, err := json.MarshalIndent(registryData, "", " ")
+ gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to marshal ToolHive registry data")
+ cb.configMap.Data[key] = string(jsonData)
+ return cb
+}
+
+// WithUpstreamRegistry adds upstream MCP format registry data
+func (cb *ConfigMapBuilder) WithUpstreamRegistry(key string, servers map[string]RegistryServer) *ConfigMapBuilder {
+ registryData := UpstreamRegistryData{Servers: servers}
+ jsonData, err := json.MarshalIndent(registryData, "", " ")
+ gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to marshal upstream registry data")
+ cb.configMap.Data[key] = string(jsonData)
+ return cb
+}
+
+// Build returns the constructed ConfigMap
+func (cb *ConfigMapBuilder) Build() *corev1.ConfigMap {
+ return cb.configMap.DeepCopy()
+}
+
+// Create builds and creates the ConfigMap in the cluster
+func (cb *ConfigMapBuilder) Create(h *ConfigMapTestHelper) *corev1.ConfigMap {
+ configMap := cb.Build()
+ err := h.Client.Create(h.Context, configMap)
+ gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to create ConfigMap")
+ return configMap
+}
+
+// CreateSampleToolHiveRegistry creates a ConfigMap with sample ToolHive registry data
+func (h *ConfigMapTestHelper) CreateSampleToolHiveRegistry(name string) (*corev1.ConfigMap, int) {
+ servers := []RegistryServer{
+ {
+ Name: "filesystem",
+ Description: "File system operations for secure file access",
+ Tier: "Community",
+ Status: "Active",
+ Transport: "stdio",
+ Tools: []string{"filesystem_tool"},
+ Image: "filesystem/server:latest",
+ Tags: []string{"filesystem", "files"},
+ },
+ {
+ Name: "fetch",
+ Description: "Web content fetching with readability processing",
+ Tier: "Community",
+ Status: "Active",
+ Transport: "stdio",
+ Tools: []string{"fetch_tool"},
+ Image: "fetch/server:latest",
+ Tags: []string{"web", "fetch", "readability"},
+ },
+ }
+
+ return h.NewConfigMapBuilder(name).
+ WithToolHiveRegistry("registry.json", servers).
+ Create(h), len(servers)
+}
+
+// GetConfigMap retrieves a ConfigMap by name
+func (h *ConfigMapTestHelper) GetConfigMap(name string) (*corev1.ConfigMap, error) {
+ cm := &corev1.ConfigMap{}
+ err := h.Client.Get(h.Context, types.NamespacedName{
+ Namespace: h.Namespace,
+ Name: name,
+ }, cm)
+ return cm, err
+}
+
+// UpdateConfigMap updates an existing ConfigMap
+func (h *ConfigMapTestHelper) UpdateConfigMap(configMap *corev1.ConfigMap) error {
+ return h.Client.Update(h.Context, configMap)
+}
+
+// DeleteConfigMap deletes a ConfigMap by name
+func (h *ConfigMapTestHelper) DeleteConfigMap(name string) error {
+ cm := &corev1.ConfigMap{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: h.Namespace,
+ },
+ }
+ return h.Client.Delete(h.Context, cm)
+}
+
+// ListConfigMaps returns all ConfigMaps in the namespace
+func (h *ConfigMapTestHelper) ListConfigMaps() (*corev1.ConfigMapList, error) {
+ cmList := &corev1.ConfigMapList{}
+ err := h.Client.List(h.Context, cmList, client.InNamespace(h.Namespace))
+ return cmList, err
+}
+
+// CleanupConfigMaps deletes all test ConfigMaps in the namespace
+func (h *ConfigMapTestHelper) CleanupConfigMaps() error {
+ cmList, err := h.ListConfigMaps()
+ if err != nil {
+ return err
+ }
+
+ for _, cm := range cmList.Items {
+ // Only delete ConfigMaps with our test label
+ if cm.Labels != nil && cm.Labels["test.toolhive.io/suite"] == "operator-e2e" {
+ ginkgo.By(fmt.Sprintf("deleting ConfigMap %s", cm.Name))
+ if err := h.Client.Delete(h.Context, &cm); err != nil {
+ return err
+ }
+ }
+ }
+ return nil
+}
diff --git a/test/e2e/operator/doc.go b/test/e2e/operator/doc.go
new file mode 100644
index 000000000..2004d4acf
--- /dev/null
+++ b/test/e2e/operator/doc.go
@@ -0,0 +1,3 @@
+// Package operator_test provides end-to-end tests for the ToolHive operator controllers.
+// This package tests MCPRegistry and other operator functionality using Ginkgo and Kubernetes APIs.
+package operator_test
diff --git a/test/e2e/operator/factories.go b/test/e2e/operator/factories.go
new file mode 100644
index 000000000..df608ae85
--- /dev/null
+++ b/test/e2e/operator/factories.go
@@ -0,0 +1,448 @@
+package operator_test
+
+import (
+ "context"
+ "crypto/rand"
+ "fmt"
+ "math/big"
+ "time"
+
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ mcpv1alpha1 "github.com/stacklok/toolhive/cmd/thv-operator/api/v1alpha1"
+)
+
+// TestDataFactory provides utilities for generating test data and resources
+type TestDataFactory struct {
+ Client client.Client
+ Context context.Context
+ Namespace string
+}
+
+// NewTestDataFactory creates a new test data factory
+func NewTestDataFactory(ctx context.Context, k8sClient client.Client, namespace string) *TestDataFactory {
+ return &TestDataFactory{
+ Client: k8sClient,
+ Context: ctx,
+ Namespace: namespace,
+ }
+}
+
+// MCPRegistryTemplate represents a template for creating MCPRegistry instances
+type MCPRegistryTemplate struct {
+ NamePrefix string
+ ConfigMapPrefix string
+ SyncInterval string
+ Format string
+ Labels map[string]string
+ Annotations map[string]string
+ ServerCount int
+ WithSyncPolicy bool
+ WithFilter bool
+}
+
+// DefaultMCPRegistryTemplate returns a default template for MCPRegistry creation
+func (*TestDataFactory) DefaultMCPRegistryTemplate() MCPRegistryTemplate {
+ return MCPRegistryTemplate{
+ NamePrefix: "test-registry",
+ ConfigMapPrefix: "test-data",
+ SyncInterval: "1h",
+ Format: mcpv1alpha1.RegistryFormatToolHive,
+ Labels: map[string]string{
+ "test.toolhive.io/suite": "operator-e2e",
+ },
+ Annotations: make(map[string]string),
+ ServerCount: 2,
+ WithSyncPolicy: true,
+ WithFilter: false,
+ }
+}
+
+// CreateMCPRegistryFromTemplate creates an MCPRegistry based on a template
+func (f *TestDataFactory) CreateMCPRegistryFromTemplate(template MCPRegistryTemplate) (
+ *mcpv1alpha1.MCPRegistry, *corev1.ConfigMap, error) {
+ // Generate unique names
+ registryName := f.GenerateUniqueName(template.NamePrefix)
+ configMapName := f.GenerateUniqueName(template.ConfigMapPrefix)
+
+ // Create ConfigMap with test data
+ configMap, err := f.CreateTestConfigMap(configMapName, template.Format, template.ServerCount)
+ if err != nil {
+ return nil, nil, fmt.Errorf("failed to create test ConfigMap: %w", err)
+ }
+
+ // Create MCPRegistry
+ registry := &mcpv1alpha1.MCPRegistry{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: registryName,
+ Namespace: f.Namespace,
+ Labels: copyMap(template.Labels),
+ Annotations: copyMap(template.Annotations),
+ },
+ Spec: mcpv1alpha1.MCPRegistrySpec{
+ Source: mcpv1alpha1.MCPRegistrySource{
+ Type: mcpv1alpha1.RegistrySourceTypeConfigMap,
+ Format: template.Format,
+ ConfigMap: &mcpv1alpha1.ConfigMapSource{
+ Name: configMapName,
+ Key: "registry.json",
+ },
+ },
+ },
+ }
+
+ // Add sync policy if requested
+ if template.WithSyncPolicy {
+ registry.Spec.SyncPolicy = &mcpv1alpha1.SyncPolicy{
+ Interval: template.SyncInterval,
+ }
+ }
+
+ // Add filter if requested
+ if template.WithFilter {
+ registry.Spec.Filter = &mcpv1alpha1.RegistryFilter{
+ NameFilters: &mcpv1alpha1.NameFilter{
+ Include: []string{"*"},
+ Exclude: []string{"test-*"},
+ },
+ }
+ }
+
+ // Create the registry
+ if err := f.Client.Create(f.Context, registry); err != nil {
+ // Clean up ConfigMap if registry creation fails
+ _ = f.Client.Delete(f.Context, configMap)
+ return nil, nil, fmt.Errorf("failed to create MCPRegistry: %w", err)
+ }
+
+ return registry, configMap, nil
+}
+
+// CreateTestConfigMap creates a ConfigMap with test registry data
+func (f *TestDataFactory) CreateTestConfigMap(name, format string, serverCount int) (*corev1.ConfigMap, error) {
+ configMapHelper := NewConfigMapTestHelper(f.Context, f.Client, f.Namespace)
+
+ switch format {
+ case mcpv1alpha1.RegistryFormatToolHive:
+ servers := f.GenerateTestServers(serverCount)
+ return configMapHelper.NewConfigMapBuilder(name).
+ WithToolHiveRegistry("registry.json", servers).
+ Create(configMapHelper), nil
+
+ case mcpv1alpha1.RegistryFormatUpstream:
+ servers := f.GenerateTestServersMap(serverCount)
+ return configMapHelper.NewConfigMapBuilder(name).
+ WithUpstreamRegistry("registry.json", servers).
+ Create(configMapHelper), nil
+
+ default:
+ return nil, fmt.Errorf("unsupported registry format: %s", format)
+ }
+}
+
+// GenerateTestServers generates a slice of test servers for ToolHive format
+func (f *TestDataFactory) GenerateTestServers(count int) []RegistryServer {
+ servers := make([]RegistryServer, count)
+ for i := 0; i < count; i++ {
+ servers[i] = f.GenerateTestServer(i)
+ }
+ return servers
+}
+
+// GenerateTestServersMap generates a map of test servers for upstream format
+func (f *TestDataFactory) GenerateTestServersMap(count int) map[string]RegistryServer {
+ servers := make(map[string]RegistryServer)
+ for i := 0; i < count; i++ {
+ server := f.GenerateTestServer(i)
+ servers[server.Name] = server
+ }
+ return servers
+}
+
+// GenerateTestServer generates a single test server
+func (*TestDataFactory) GenerateTestServer(index int) RegistryServer {
+ serverTypes := []string{"filesystem", "fetch", "database", "search", "email"}
+ transports := []string{"stdio", "sse", "http"}
+
+ serverType := serverTypes[index%len(serverTypes)]
+ transport := transports[index%len(transports)]
+
+ return RegistryServer{
+ Name: fmt.Sprintf("%s-server-%d", serverType, index),
+ Description: fmt.Sprintf("Test %s server for e2e testing", serverType),
+ Tier: "Community",
+ Status: "Active",
+ Transport: transport,
+ Tools: []string{fmt.Sprintf("%s_tool", serverType)},
+ Image: fmt.Sprintf("%s/server:1.%d.0", serverType, index),
+ Tags: []string{serverType, "test", fmt.Sprintf("v1-%d", index)},
+ }
+}
+
+// GenerateUniqueName generates a unique name with timestamp and random suffix
+func (*TestDataFactory) GenerateUniqueName(prefix string) string {
+ timestamp := time.Now().Unix()
+ // Use crypto/rand for secure random number generation
+ randomBig, _ := rand.Int(rand.Reader, big.NewInt(1000))
+ randomSuffix := randomBig.Int64()
+ return fmt.Sprintf("%s-%d-%d", prefix, timestamp, randomSuffix)
+}
+
+// CreateTestSecret creates a test secret for authentication
+func (f *TestDataFactory) CreateTestSecret(name string, data map[string][]byte) (*corev1.Secret, error) {
+ secret := &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: f.Namespace,
+ Labels: map[string]string{
+ "test.toolhive.io/suite": "operator-e2e",
+ },
+ },
+ Data: data,
+ }
+
+ if err := f.Client.Create(f.Context, secret); err != nil {
+ return nil, fmt.Errorf("failed to create secret: %w", err)
+ }
+
+ return secret, nil
+}
+
+// TestScenario represents a complete test scenario with multiple resources
+type TestScenario struct {
+ Name string
+ Description string
+ Registries []MCPRegistryTemplate
+ ConfigMaps []string
+ Secrets []string
+}
+
+// CommonTestScenarios returns a set of common test scenarios
+func (f *TestDataFactory) CommonTestScenarios() map[string]TestScenario {
+ return map[string]TestScenario{
+ "basic-registry": {
+ Name: "Basic Registry",
+ Description: "Single registry with ConfigMap source and sync policy",
+ Registries: []MCPRegistryTemplate{
+ f.DefaultMCPRegistryTemplate(),
+ },
+ },
+ "manual-sync-registry": {
+ Name: "Manual Sync Registry",
+ Description: "Registry without automatic sync policy",
+ Registries: []MCPRegistryTemplate{
+ func() MCPRegistryTemplate {
+ template := f.DefaultMCPRegistryTemplate()
+ template.WithSyncPolicy = false
+ return template
+ }(),
+ },
+ },
+ "upstream-format-registry": {
+ Name: "Upstream Format Registry",
+ Description: "Registry using upstream MCP format",
+ Registries: []MCPRegistryTemplate{
+ func() MCPRegistryTemplate {
+ template := f.DefaultMCPRegistryTemplate()
+ template.Format = mcpv1alpha1.RegistryFormatUpstream
+ return template
+ }(),
+ },
+ },
+ "filtered-registry": {
+ Name: "Filtered Registry",
+ Description: "Registry with content filtering",
+ Registries: []MCPRegistryTemplate{
+ func() MCPRegistryTemplate {
+ template := f.DefaultMCPRegistryTemplate()
+ template.WithFilter = true
+ return template
+ }(),
+ },
+ },
+ "multiple-registries": {
+ Name: "Multiple Registries",
+ Description: "Multiple registries with different configurations",
+ Registries: []MCPRegistryTemplate{
+ f.DefaultMCPRegistryTemplate(),
+ func() MCPRegistryTemplate {
+ template := f.DefaultMCPRegistryTemplate()
+ template.NamePrefix = "secondary-registry"
+ template.Format = mcpv1alpha1.RegistryFormatUpstream
+ template.SyncInterval = "30m"
+ return template
+ }(),
+ },
+ },
+ }
+}
+
+// CreateTestScenario creates all resources for a test scenario
+func (f *TestDataFactory) CreateTestScenario(scenario TestScenario) (*TestScenarioResources, error) {
+ resources := &TestScenarioResources{
+ Registries: make([]*mcpv1alpha1.MCPRegistry, 0),
+ ConfigMaps: make([]*corev1.ConfigMap, 0),
+ Secrets: make([]*corev1.Secret, 0),
+ }
+
+ // Create registries
+ for _, template := range scenario.Registries {
+ registry, configMap, err := f.CreateMCPRegistryFromTemplate(template)
+ if err != nil {
+ // Clean up already created resources
+ _ = f.CleanupTestScenarioResources(resources)
+ return nil, fmt.Errorf("failed to create registry from template: %w", err)
+ }
+ resources.Registries = append(resources.Registries, registry)
+ resources.ConfigMaps = append(resources.ConfigMaps, configMap)
+ }
+
+ return resources, nil
+}
+
+// TestScenarioResources holds all resources created for a test scenario
+type TestScenarioResources struct {
+ Registries []*mcpv1alpha1.MCPRegistry
+ ConfigMaps []*corev1.ConfigMap
+ Secrets []*corev1.Secret
+}
+
+// CleanupTestScenarioResources cleans up all resources in a test scenario
+func (f *TestDataFactory) CleanupTestScenarioResources(resources *TestScenarioResources) error {
+ var errors []error
+
+ // Delete registries
+ for _, registry := range resources.Registries {
+ if err := f.Client.Delete(f.Context, registry); err != nil {
+ errors = append(errors, fmt.Errorf("failed to delete registry %s: %w", registry.Name, err))
+ }
+ }
+
+ // Delete ConfigMaps
+ for _, cm := range resources.ConfigMaps {
+ if err := f.Client.Delete(f.Context, cm); err != nil {
+ errors = append(errors, fmt.Errorf("failed to delete ConfigMap %s: %w", cm.Name, err))
+ }
+ }
+
+ // Delete Secrets
+ for _, secret := range resources.Secrets {
+ if err := f.Client.Delete(f.Context, secret); err != nil {
+ errors = append(errors, fmt.Errorf("failed to delete Secret %s: %w", secret.Name, err))
+ }
+ }
+
+ if len(errors) > 0 {
+ return fmt.Errorf("cleanup errors: %v", errors)
+ }
+
+ return nil
+}
+
+// RandomRegistryData generates random registry data for stress testing
+func (f *TestDataFactory) RandomRegistryData(serverCount int) []RegistryServer {
+ servers := make([]RegistryServer, serverCount)
+
+ for i := 0; i < serverCount; i++ {
+ serverName := f.randomServerName()
+ servers[i] = RegistryServer{
+ Name: serverName,
+ Description: f.randomDescription(),
+ Tier: f.randomTier(),
+ Status: "Active",
+ Transport: f.randomTransport(),
+ Tools: []string{fmt.Sprintf("%s_tool", serverName)},
+ Image: fmt.Sprintf("%s/server:%s", serverName, f.randomVersion()),
+ Tags: f.randomTags(),
+ }
+ }
+
+ return servers
+}
+
+// Helper functions for random data generation
+func (*TestDataFactory) randomServerName() string {
+ prefixes := []string{"test", "demo", "sample", "mock", "fake"}
+ suffixes := []string{"server", "service", "tool", "handler", "processor"}
+
+ prefixBig, _ := rand.Int(rand.Reader, big.NewInt(int64(len(prefixes))))
+ suffixBig, _ := rand.Int(rand.Reader, big.NewInt(int64(len(suffixes))))
+ numBig, _ := rand.Int(rand.Reader, big.NewInt(1000))
+
+ prefix := prefixes[prefixBig.Int64()]
+ suffix := suffixes[suffixBig.Int64()]
+
+ return fmt.Sprintf("%s-%s-%d", prefix, suffix, numBig.Int64())
+}
+
+func (*TestDataFactory) randomDescription() string {
+ templates := []string{
+ "A test server for %s operations",
+ "Mock %s implementation for testing",
+ "Sample %s service with basic functionality",
+ "Demo %s tool for development purposes",
+ }
+
+ operations := []string{"file", "network", "database", "authentication", "processing"}
+
+ templateBig, _ := rand.Int(rand.Reader, big.NewInt(int64(len(templates))))
+ operationBig, _ := rand.Int(rand.Reader, big.NewInt(int64(len(operations))))
+
+ template := templates[templateBig.Int64()]
+ operation := operations[operationBig.Int64()]
+
+ return fmt.Sprintf(template, operation)
+}
+
+func (*TestDataFactory) randomVersion() string {
+ majorBig, _ := rand.Int(rand.Reader, big.NewInt(3))
+ minorBig, _ := rand.Int(rand.Reader, big.NewInt(10))
+ patchBig, _ := rand.Int(rand.Reader, big.NewInt(20))
+
+ major := majorBig.Int64() + 1
+ minor := minorBig.Int64()
+ patch := patchBig.Int64()
+
+ return fmt.Sprintf("%d.%d.%d", major, minor, patch)
+}
+
+func (*TestDataFactory) randomTransport() string {
+ transports := []string{"stdio", "sse", "http"}
+ transportBig, _ := rand.Int(rand.Reader, big.NewInt(int64(len(transports))))
+ return transports[transportBig.Int64()]
+}
+
+func (*TestDataFactory) randomTier() string {
+ tiers := []string{"Community", "Official", "Enterprise"}
+ tierBig, _ := rand.Int(rand.Reader, big.NewInt(int64(len(tiers))))
+ return tiers[tierBig.Int64()]
+}
+
+func (*TestDataFactory) randomTags() []string {
+ allTags := []string{"test", "demo", "sample", "mock", "development", "staging", "production"}
+ countBig, _ := rand.Int(rand.Reader, big.NewInt(3))
+ count := int(countBig.Int64()) + 1
+
+ tags := make([]string, count)
+ for i := 0; i < count; i++ {
+ tagBig, _ := rand.Int(rand.Reader, big.NewInt(int64(len(allTags))))
+ tags[i] = allTags[tagBig.Int64()]
+ }
+
+ return tags
+}
+
+// Utility function to copy maps
+func copyMap(m map[string]string) map[string]string {
+ if m == nil {
+ return nil
+ }
+
+ result := make(map[string]string)
+ for k, v := range m {
+ result[k] = v
+ }
+ return result
+}
diff --git a/test/e2e/operator/git_test_helpers.go b/test/e2e/operator/git_test_helpers.go
new file mode 100644
index 000000000..7bb831a2b
--- /dev/null
+++ b/test/e2e/operator/git_test_helpers.go
@@ -0,0 +1,145 @@
+package operator_test
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "time"
+
+ "github.com/onsi/gomega"
+)
+
+// GitTestHelper manages Git repositories for testing
+type GitTestHelper struct {
+ ctx context.Context
+ tempDir string
+ repositories []*GitTestRepository
+}
+
+// GitTestRepository represents a test Git repository
+type GitTestRepository struct {
+ Name string
+ Path string
+ CloneURL string
+}
+
+// NewGitTestHelper creates a new Git test helper
+func NewGitTestHelper(ctx context.Context) *GitTestHelper {
+ tempDir, err := os.MkdirTemp("", "git-test-repos-*")
+ gomega.Expect(err).NotTo(gomega.HaveOccurred())
+
+ return &GitTestHelper{
+ ctx: ctx,
+ tempDir: tempDir,
+ repositories: make([]*GitTestRepository, 0),
+ }
+}
+
+// CreateRepository creates a new Git repository for testing
+func (g *GitTestHelper) CreateRepository(name string) *GitTestRepository {
+ repoPath := filepath.Join(g.tempDir, name)
+ err := os.MkdirAll(repoPath, 0750)
+ gomega.Expect(err).NotTo(gomega.HaveOccurred())
+
+ // Initialize Git repository with main branch
+ g.runGitCommand(repoPath, "init", "--initial-branch=main")
+ g.runGitCommand(repoPath, "config", "user.name", "Test User")
+ g.runGitCommand(repoPath, "config", "user.email", "test@example.com")
+
+ // Create initial commit to establish main branch
+ initialFile := filepath.Join(repoPath, "README.md")
+ err = os.WriteFile(initialFile, []byte("# Test Repository\n"), 0600)
+ gomega.Expect(err).NotTo(gomega.HaveOccurred())
+
+ g.runGitCommand(repoPath, "add", "README.md")
+ g.runGitCommand(repoPath, "commit", "-m", "Initial commit")
+
+ repo := &GitTestRepository{
+ Name: name,
+ Path: repoPath,
+ CloneURL: fmt.Sprintf("file://%s", repoPath), // Use file:// URL for local testing
+ }
+
+ g.repositories = append(g.repositories, repo)
+ return repo
+}
+
+// CommitRegistryData commits registry data to the specified file in the repository
+func (g *GitTestHelper) CommitRegistryData(
+ repo *GitTestRepository, filename string, servers []RegistryServer, commitMessage string) {
+ registryData := ToolHiveRegistryData{
+ Version: "1.0.0",
+ LastUpdated: time.Now().Format(time.RFC3339),
+ Servers: make(map[string]RegistryServer),
+ }
+
+ for _, server := range servers {
+ registryData.Servers[server.Name] = server
+ }
+
+ jsonData, err := json.MarshalIndent(registryData, "", " ")
+ gomega.Expect(err).NotTo(gomega.HaveOccurred())
+
+ filePath := filepath.Join(repo.Path, filename)
+ err = os.WriteFile(filePath, jsonData, 0600)
+ gomega.Expect(err).NotTo(gomega.HaveOccurred())
+
+ g.runGitCommand(repo.Path, "add", filename)
+ g.runGitCommand(repo.Path, "commit", "-m", commitMessage)
+}
+
+// CommitRegistryDataAtPath commits registry data to a nested path in the repository
+func (g *GitTestHelper) CommitRegistryDataAtPath(
+ repo *GitTestRepository, filePath string, servers []RegistryServer, commitMessage string) {
+ registryData := ToolHiveRegistryData{
+ Version: "1.0.0",
+ LastUpdated: time.Now().Format(time.RFC3339),
+ Servers: make(map[string]RegistryServer),
+ }
+
+ for _, server := range servers {
+ registryData.Servers[server.Name] = server
+ }
+
+ jsonData, err := json.MarshalIndent(registryData, "", " ")
+ gomega.Expect(err).NotTo(gomega.HaveOccurred())
+
+ fullPath := filepath.Join(repo.Path, filePath)
+ dir := filepath.Dir(fullPath)
+ err = os.MkdirAll(dir, 0750)
+ gomega.Expect(err).NotTo(gomega.HaveOccurred())
+
+ err = os.WriteFile(fullPath, jsonData, 0600)
+ gomega.Expect(err).NotTo(gomega.HaveOccurred())
+
+ g.runGitCommand(repo.Path, "add", filePath)
+ g.runGitCommand(repo.Path, "commit", "-m", commitMessage)
+}
+
+// CreateBranch creates a new branch and switches to it
+func (g *GitTestHelper) CreateBranch(repo *GitTestRepository, branchName string) {
+ g.runGitCommand(repo.Path, "checkout", "-b", branchName)
+}
+
+// SwitchBranch switches to an existing branch
+func (g *GitTestHelper) SwitchBranch(repo *GitTestRepository, branchName string) {
+ g.runGitCommand(repo.Path, "checkout", branchName)
+}
+
+// CleanupRepositories removes all test repositories
+func (g *GitTestHelper) CleanupRepositories() error {
+ return os.RemoveAll(g.tempDir)
+}
+
+// runGitCommand runs a Git command in the specified directory
+func (*GitTestHelper) runGitCommand(dir string, args ...string) {
+ cmd := exec.Command("git", args...)
+ cmd.Dir = dir
+ output, err := cmd.CombinedOutput()
+ if err != nil {
+ gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Git command failed: %s\nOutput: %s", cmd.String(), string(output))
+ }
+}
diff --git a/test/e2e/operator/helpers_test.go b/test/e2e/operator/helpers_test.go
new file mode 100644
index 000000000..43dcc78af
--- /dev/null
+++ b/test/e2e/operator/helpers_test.go
@@ -0,0 +1,243 @@
+package operator_test
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/onsi/gomega"
+ corev1 "k8s.io/api/core/v1"
+ apierrors "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/types"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ mcpv1alpha1 "github.com/stacklok/toolhive/cmd/thv-operator/api/v1alpha1"
+)
+
+// KubernetesTestHelper provides utilities for Kubernetes testing
+type KubernetesTestHelper struct {
+ Client client.Client
+ Context context.Context
+ Namespace string
+}
+
+// NewKubernetesTestHelper creates a new test helper for the given namespace
+func NewKubernetesTestHelper(namespace string) *KubernetesTestHelper {
+ return &KubernetesTestHelper{
+ Client: k8sClient,
+ Context: ctx,
+ Namespace: namespace,
+ }
+}
+
+// CreateMCPRegistry creates an MCPRegistry with the given spec
+func (h *KubernetesTestHelper) CreateMCPRegistry(name string, spec mcpv1alpha1.MCPRegistrySpec) *mcpv1alpha1.MCPRegistry {
+ registry := &mcpv1alpha1.MCPRegistry{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: h.Namespace,
+ },
+ Spec: spec,
+ }
+
+ err := h.Client.Create(h.Context, registry)
+ gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to create MCPRegistry")
+
+ return registry
+}
+
+// GetMCPRegistry retrieves an MCPRegistry by name
+func (h *KubernetesTestHelper) GetMCPRegistry(name string) (*mcpv1alpha1.MCPRegistry, error) {
+ registry := &mcpv1alpha1.MCPRegistry{}
+ err := h.Client.Get(h.Context, types.NamespacedName{
+ Namespace: h.Namespace,
+ Name: name,
+ }, registry)
+ return registry, err
+}
+
+// UpdateMCPRegistry updates an existing MCPRegistry
+func (h *KubernetesTestHelper) UpdateMCPRegistry(registry *mcpv1alpha1.MCPRegistry) error {
+ return h.Client.Update(h.Context, registry)
+}
+
+// DeleteMCPRegistry deletes an MCPRegistry by name
+func (h *KubernetesTestHelper) DeleteMCPRegistry(name string) error {
+ registry := &mcpv1alpha1.MCPRegistry{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: h.Namespace,
+ },
+ }
+ return h.Client.Delete(h.Context, registry)
+}
+
+// WaitForMCPRegistryPhase waits for the MCPRegistry to reach the specified phase
+func (h *KubernetesTestHelper) WaitForMCPRegistryPhase(name string, phase mcpv1alpha1.MCPRegistryPhase, timeout time.Duration) {
+ gomega.Eventually(func() mcpv1alpha1.MCPRegistryPhase {
+ registry, err := h.GetMCPRegistry(name)
+ if err != nil {
+ return ""
+ }
+ return registry.Status.Phase
+ }, timeout, time.Second).Should(gomega.Equal(phase), "MCPRegistry should reach phase %s", phase)
+}
+
+// WaitForMCPRegistryCondition waits for a specific condition to be true
+func (h *KubernetesTestHelper) WaitForMCPRegistryCondition(
+ name string, conditionType string, status metav1.ConditionStatus, timeout time.Duration) {
+ gomega.Eventually(func() metav1.ConditionStatus {
+ registry, err := h.GetMCPRegistry(name)
+ if err != nil {
+ return metav1.ConditionUnknown
+ }
+
+ for _, condition := range registry.Status.Conditions {
+ if condition.Type == conditionType {
+ return condition.Status
+ }
+ }
+ return metav1.ConditionUnknown
+ },
+ timeout, time.Second).Should(
+ gomega.Equal(status), "MCPRegistry should have condition %s with status %s", conditionType, status)
+}
+
+// WaitForMCPRegistryDeletion waits for the MCPRegistry to be deleted
+func (h *KubernetesTestHelper) WaitForMCPRegistryDeletion(name string, timeout time.Duration) {
+ gomega.Eventually(func() bool {
+ _, err := h.GetMCPRegistry(name)
+ return apierrors.IsNotFound(err)
+ }, timeout, time.Second).Should(gomega.BeTrue(), "MCPRegistry should be deleted")
+}
+
+// CreateConfigMap creates a ConfigMap for testing
+func (h *KubernetesTestHelper) CreateConfigMap(name string, data map[string]string) *corev1.ConfigMap {
+ cm := &corev1.ConfigMap{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: h.Namespace,
+ },
+ Data: data,
+ }
+
+ err := h.Client.Create(h.Context, cm)
+ gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to create ConfigMap")
+
+ return cm
+}
+
+// GetConfigMap retrieves a ConfigMap by name
+func (h *KubernetesTestHelper) GetConfigMap(name string) (*corev1.ConfigMap, error) {
+ cm := &corev1.ConfigMap{}
+ err := h.Client.Get(h.Context, types.NamespacedName{
+ Namespace: h.Namespace,
+ Name: name,
+ }, cm)
+ return cm, err
+}
+
+// WaitForConfigMap waits for a ConfigMap to exist
+func (h *KubernetesTestHelper) WaitForConfigMap(name string, timeout time.Duration) *corev1.ConfigMap {
+ var cm *corev1.ConfigMap
+ gomega.Eventually(func() error {
+ var err error
+ cm, err = h.GetConfigMap(name)
+ return err
+ }, timeout, time.Second).Should(gomega.Succeed(), "ConfigMap should be created")
+ return cm
+}
+
+// WaitForConfigMapData waits for a ConfigMap to contain specific data
+func (h *KubernetesTestHelper) WaitForConfigMapData(name, key, expectedValue string, timeout time.Duration) {
+ gomega.Eventually(func() string {
+ cm, err := h.GetConfigMap(name)
+ if err != nil {
+ return ""
+ }
+ return cm.Data[key]
+ }, timeout, time.Second).Should(gomega.Equal(expectedValue), "ConfigMap should contain expected data")
+}
+
+// CreateSecret creates a Secret for testing
+func (h *KubernetesTestHelper) CreateSecret(name string, data map[string][]byte) *corev1.Secret {
+ secret := &corev1.Secret{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: h.Namespace,
+ },
+ Data: data,
+ }
+
+ err := h.Client.Create(h.Context, secret)
+ gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to create Secret")
+
+ return secret
+}
+
+// CleanupResources removes all test resources in the namespace
+func (h *KubernetesTestHelper) CleanupResources() {
+ // Delete all MCPRegistries
+ registryList := &mcpv1alpha1.MCPRegistryList{}
+ err := h.Client.List(h.Context, registryList, client.InNamespace(h.Namespace))
+ if err == nil {
+ for _, registry := range registryList.Items {
+ _ = h.Client.Delete(h.Context, ®istry)
+ }
+ }
+
+ // Delete all ConfigMaps
+ cmList := &corev1.ConfigMapList{}
+ err = h.Client.List(h.Context, cmList, client.InNamespace(h.Namespace))
+ if err == nil {
+ for _, cm := range cmList.Items {
+ _ = h.Client.Delete(h.Context, &cm)
+ }
+ }
+
+ // Delete all Secrets
+ secretList := &corev1.SecretList{}
+ err = h.Client.List(h.Context, secretList, client.InNamespace(h.Namespace))
+ if err == nil {
+ for _, secret := range secretList.Items {
+ _ = h.Client.Delete(h.Context, &secret)
+ }
+ }
+}
+
+// TriggerManualSync adds an annotation to trigger a manual sync
+func (h *KubernetesTestHelper) TriggerManualSync(registryName string) error {
+ registry, err := h.GetMCPRegistry(registryName)
+ if err != nil {
+ return err
+ }
+
+ if registry.Annotations == nil {
+ registry.Annotations = make(map[string]string)
+ }
+ registry.Annotations["toolhive.stacklok.dev/manual-sync"] = fmt.Sprintf("%d", time.Now().Unix())
+
+ return h.UpdateMCPRegistry(registry)
+}
+
+// WaitForSyncCompletion waits for a sync operation to complete
+func (h *KubernetesTestHelper) WaitForSyncCompletion(registryName string, timeout time.Duration) {
+ gomega.Eventually(func() bool {
+ registry, err := h.GetMCPRegistry(registryName)
+ if err != nil {
+ return false
+ }
+
+ // Check if sync is in progress
+ for _, condition := range registry.Status.Conditions {
+ if condition.Type == "Syncing" && condition.Status == metav1.ConditionTrue {
+ return false // Still syncing
+ }
+ }
+
+ // Sync should be complete (either success or failure)
+ return registry.Status.Phase == mcpv1alpha1.MCPRegistryPhaseReady ||
+ registry.Status.Phase == mcpv1alpha1.MCPRegistryPhaseFailed
+ }, timeout, time.Second).Should(gomega.BeTrue(), "Sync operation should complete")
+}
diff --git a/test/e2e/operator/k8s_helpers.go b/test/e2e/operator/k8s_helpers.go
new file mode 100644
index 000000000..e4532ae34
--- /dev/null
+++ b/test/e2e/operator/k8s_helpers.go
@@ -0,0 +1,83 @@
+package operator_test
+
+import (
+ "context"
+
+ appsv1 "k8s.io/api/apps/v1"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/types"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+// K8sResourceTestHelper provides utilities for testing Kubernetes resources
+type K8sResourceTestHelper struct {
+ ctx context.Context
+ k8sClient client.Client
+ namespace string
+}
+
+// NewK8sResourceTestHelper creates a new test helper for Kubernetes resources
+func NewK8sResourceTestHelper(ctx context.Context, k8sClient client.Client, namespace string) *K8sResourceTestHelper {
+ return &K8sResourceTestHelper{
+ ctx: ctx,
+ k8sClient: k8sClient,
+ namespace: namespace,
+ }
+}
+
+// GetDeployment retrieves a deployment by name
+func (h *K8sResourceTestHelper) GetDeployment(name string) (*appsv1.Deployment, error) {
+ deployment := &appsv1.Deployment{}
+ err := h.k8sClient.Get(h.ctx, types.NamespacedName{
+ Namespace: h.namespace,
+ Name: name,
+ }, deployment)
+ return deployment, err
+}
+
+// GetService retrieves a service by name
+func (h *K8sResourceTestHelper) GetService(name string) (*corev1.Service, error) {
+ service := &corev1.Service{}
+ err := h.k8sClient.Get(h.ctx, types.NamespacedName{
+ Namespace: h.namespace,
+ Name: name,
+ }, service)
+ return service, err
+}
+
+// GetConfigMap retrieves a configmap by name
+func (h *K8sResourceTestHelper) GetConfigMap(name string) (*corev1.ConfigMap, error) {
+ configMap := &corev1.ConfigMap{}
+ err := h.k8sClient.Get(h.ctx, types.NamespacedName{
+ Namespace: h.namespace,
+ Name: name,
+ }, configMap)
+ return configMap, err
+}
+
+// DeploymentExists checks if a deployment exists
+func (h *K8sResourceTestHelper) DeploymentExists(name string) bool {
+ _, err := h.GetDeployment(name)
+ return err == nil
+}
+
+// ServiceExists checks if a service exists
+func (h *K8sResourceTestHelper) ServiceExists(name string) bool {
+ _, err := h.GetService(name)
+ return err == nil
+}
+
+// IsDeploymentReady checks if a deployment is ready (all replicas available)
+func (h *K8sResourceTestHelper) IsDeploymentReady(name string) bool {
+ deployment, err := h.GetDeployment(name)
+ if err != nil {
+ return false
+ }
+
+ // Check if deployment has at least one replica and all are available
+ if deployment.Spec.Replicas == nil || *deployment.Spec.Replicas == 0 {
+ return false
+ }
+
+ return deployment.Status.ReadyReplicas == *deployment.Spec.Replicas
+}
diff --git a/test/e2e/operator/registry_automatic_sync_test.go b/test/e2e/operator/registry_automatic_sync_test.go
new file mode 100644
index 000000000..9c48356ad
--- /dev/null
+++ b/test/e2e/operator/registry_automatic_sync_test.go
@@ -0,0 +1,400 @@
+package operator_test
+
+import (
+ "context"
+ "encoding/json"
+ "time"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/types"
+
+ mcpv1alpha1 "github.com/stacklok/toolhive/cmd/thv-operator/api/v1alpha1"
+ "github.com/stacklok/toolhive/cmd/thv-operator/controllers"
+ "github.com/stacklok/toolhive/cmd/thv-operator/pkg/sync"
+)
+
+var _ = Describe("MCPRegistry Automatic Sync with ConfigMap", Label("k8s", "registry"), func() {
+ var (
+ ctx context.Context
+ registryHelper *MCPRegistryTestHelper
+ configMapHelper *ConfigMapTestHelper
+ statusHelper *StatusTestHelper
+ testNamespace string
+ originalSyncRequeue time.Duration
+ originalControllerRetry time.Duration
+ )
+ const (
+ shortSyncRequeue = time.Second * 10
+ shortControllerRetry = time.Second * 10
+ )
+
+ BeforeEach(func() {
+ ctx = context.Background()
+ testNamespace = createTestNamespace(ctx)
+
+ // Initialize helpers
+ registryHelper = NewMCPRegistryTestHelper(ctx, k8sClient, testNamespace)
+ configMapHelper = NewConfigMapTestHelper(ctx, k8sClient, testNamespace)
+ statusHelper = NewStatusTestHelper(ctx, k8sClient, testNamespace)
+
+ // Store original values to restore later
+ originalSyncRequeue = sync.DefaultSyncRequeueAfter
+ originalControllerRetry = controllers.DefaultControllerRetryAfter
+
+ By("Setting shorter retry interval for faster testing")
+ // Set shorter intervals for faster test execution
+ sync.DefaultSyncRequeueAfter = shortSyncRequeue
+ controllers.DefaultControllerRetryAfter = shortControllerRetry
+ })
+
+ AfterEach(func() {
+ // Clean up test resources
+ Expect(registryHelper.CleanupRegistries()).To(Succeed())
+ Expect(configMapHelper.CleanupConfigMaps()).To(Succeed())
+ deleteTestNamespace(ctx, testNamespace)
+ // Restore original values when test completes
+ sync.DefaultSyncRequeueAfter = originalSyncRequeue
+ controllers.DefaultControllerRetryAfter = originalControllerRetry
+ })
+
+ Context("Automatic Sync Scenarios", func() {
+ var (
+ registryName string
+ configMapName string
+ originalServers []RegistryServer
+ updatedServers []RegistryServer
+ )
+
+ BeforeEach(func() {
+ names := NewUniqueNames("auto-sync")
+ registryName = names.RegistryName
+ configMapName = names.ConfigMapName
+
+ // Create test registry data
+ originalServers = CreateOriginalTestServers()
+ // Create updated registry data (for later tests)
+ updatedServers = CreateUpdatedTestServers()
+ })
+
+ It("should perform automatic sync at configured intervals", func() {
+ By("Creating a ConfigMap with registry data")
+ configMap := configMapHelper.NewConfigMapBuilder(configMapName).
+ WithToolHiveRegistry("registry.json", originalServers).
+ Create(configMapHelper)
+
+ By("Creating an MCPRegistry with short sync interval")
+ mcpRegistry := CreateMCPRegistryWithSyncPolicy(registryName, testNamespace,
+ "Auto Sync Test Registry", configMapName, "10s")
+ Expect(k8sClient.Create(ctx, mcpRegistry)).To(Succeed())
+
+ By("Waiting for initial sync to complete")
+ statusHelper.WaitForPhaseAny(registryName, []mcpv1alpha1.MCPRegistryPhase{mcpv1alpha1.MCPRegistryPhaseReady, mcpv1alpha1.MCPRegistryPhasePending}, 30*time.Second)
+ // Capture first sync time
+ firstSyncRegistry := &mcpv1alpha1.MCPRegistry{}
+ Expect(k8sClient.Get(ctx, types.NamespacedName{
+ Name: registryName,
+ Namespace: testNamespace,
+ }, firstSyncRegistry)).To(Succeed())
+
+ Expect(firstSyncRegistry.Status).NotTo(BeNil())
+ Expect(firstSyncRegistry.Status.SyncStatus.Phase).To(Equal(mcpv1alpha1.SyncPhaseComplete))
+ firstSyncTime := firstSyncRegistry.Status.SyncStatus.LastSyncTime
+ Expect(firstSyncTime).NotTo(BeNil())
+ serverCount := firstSyncRegistry.Status.SyncStatus.ServerCount
+ Expect(serverCount).To(Equal(1)) // Original registry has 1 server
+
+ By("Verifying initial storage ConfigMap was created")
+ storageConfigMapName := firstSyncRegistry.GetStorageName()
+ storageConfigMap := &corev1.ConfigMap{}
+ Expect(k8sClient.Get(ctx, types.NamespacedName{
+ Name: storageConfigMapName,
+ Namespace: testNamespace,
+ }, storageConfigMap)).To(Succeed())
+
+ By("Verifying storage data matches original ConfigMap")
+ var storedRegistry ToolHiveRegistryData
+ Expect(json.Unmarshal([]byte(storageConfigMap.Data["registry.json"]), &storedRegistry)).To(Succeed())
+ verifyServerContent(storedRegistry, originalServers)
+
+ By("Updating the source ConfigMap")
+ Expect(UpdateConfigMapWithServers(configMap, updatedServers)).To(Succeed())
+ Expect(k8sClient.Update(ctx, configMap)).To(Succeed())
+
+ By("Waiting for automatic re-sync (should happen within 15s)")
+ Eventually(func() bool {
+ registry := &mcpv1alpha1.MCPRegistry{}
+ if err := k8sClient.Get(ctx, types.NamespacedName{
+ Name: registryName,
+ Namespace: testNamespace,
+ }, registry); err != nil {
+ return false
+ }
+
+ // Check if sync time was updated and server count changed
+ if registry.Status.SyncStatus == nil {
+ return false
+ }
+
+ newSyncTime := registry.Status.SyncStatus.LastSyncTime
+ newServerCount := registry.Status.SyncStatus.ServerCount
+
+ return newSyncTime != nil &&
+ newSyncTime.After(firstSyncTime.Time) &&
+ newServerCount == 2 // Updated registry has 2 servers
+ }, 20*time.Second, 2*time.Second).Should(BeTrue(), "Registry should automatically re-sync within interval")
+
+ By("Verifying updated storage data matches new ConfigMap")
+ updatedStorageConfigMap := &corev1.ConfigMap{}
+ Expect(k8sClient.Get(ctx, types.NamespacedName{
+ Name: storageConfigMapName,
+ Namespace: testNamespace,
+ }, updatedStorageConfigMap)).To(Succeed())
+
+ var newStoredRegistry ToolHiveRegistryData
+ Expect(json.Unmarshal([]byte(updatedStorageConfigMap.Data["registry.json"]), &newStoredRegistry)).To(Succeed())
+
+ By("Storage should contain updated registry data")
+ verifyServerContent(newStoredRegistry, updatedServers)
+ })
+
+ It("should retry failed syncs and increment attempt counter", func() {
+ By("Creating an MCPRegistry without the source ConfigMap (sync will fail)")
+ mcpRegistry := CreateMCPRegistryWithSyncPolicy(registryName, testNamespace,
+ "Retry Test Registry", configMapName, "5s") // This ConfigMap doesn't exist yet
+ Expect(k8sClient.Create(ctx, mcpRegistry)).To(Succeed())
+
+ By("Waiting for sync to fail")
+ statusHelper.WaitForPhase(registryName, mcpv1alpha1.MCPRegistryPhaseFailed, 30*time.Second)
+
+ // Verify attempt counter incremented
+ failedRegistry := &mcpv1alpha1.MCPRegistry{}
+ Expect(k8sClient.Get(ctx, types.NamespacedName{
+ Name: registryName,
+ Namespace: testNamespace,
+ }, failedRegistry)).To(Succeed())
+
+ Expect(failedRegistry.Status.Phase).To(Equal(mcpv1alpha1.MCPRegistryPhaseFailed))
+ Expect(failedRegistry.Status.SyncStatus.Phase).To(Equal(mcpv1alpha1.SyncPhaseFailed))
+ initialAttemptCount := failedRegistry.Status.SyncStatus.AttemptCount
+ Expect(initialAttemptCount).To(BeNumerically(">", 0))
+
+ By("Waiting for retry attempt and verifying attempt counter increments")
+ Eventually(func() int {
+ registry := &mcpv1alpha1.MCPRegistry{}
+ if err := k8sClient.Get(ctx, types.NamespacedName{
+ Name: registryName,
+ Namespace: testNamespace,
+ }, registry); err != nil {
+ return -1
+ }
+
+ if registry.Status.SyncStatus == nil {
+ return -1
+ }
+
+ return registry.Status.SyncStatus.AttemptCount
+ }, 15*time.Second, 2*time.Second).Should(BeNumerically(">", initialAttemptCount),
+ "Attempt count should increment on retry")
+
+ By("Creating the missing ConfigMap")
+ _ = configMapHelper.NewConfigMapBuilder(configMapName).
+ WithToolHiveRegistry("registry.json", originalServers).
+ Create(configMapHelper)
+
+ By("Waiting for sync to succeed after ConfigMap creation")
+ Eventually(func() mcpv1alpha1.MCPRegistryPhase {
+ registry := &mcpv1alpha1.MCPRegistry{}
+ if err := k8sClient.Get(ctx, types.NamespacedName{
+ Name: registryName,
+ Namespace: testNamespace,
+ }, registry); err != nil {
+ return ""
+ }
+ return registry.Status.Phase
+ }, 30*time.Second, 2*time.Second).Should(BeElementOf(
+ mcpv1alpha1.MCPRegistryPhaseReady,
+ mcpv1alpha1.MCPRegistryPhasePending,
+ ))
+
+ By("Verifying sync data is now correct")
+ successRegistry := &mcpv1alpha1.MCPRegistry{}
+ Expect(k8sClient.Get(ctx, types.NamespacedName{
+ Name: registryName,
+ Namespace: testNamespace,
+ }, successRegistry)).To(Succeed())
+
+ Expect(successRegistry.Status.SyncStatus.ServerCount).To(Equal(1))
+ Expect(successRegistry.Status.SyncStatus.LastSyncTime).NotTo(BeNil())
+
+ By("Verifying storage ConfigMap was created with correct data")
+ storageConfigMapName := successRegistry.GetStorageName()
+ storageConfigMap := &corev1.ConfigMap{}
+ Expect(k8sClient.Get(ctx, types.NamespacedName{
+ Name: storageConfigMapName,
+ Namespace: testNamespace,
+ }, storageConfigMap)).To(Succeed())
+
+ var storedRegistry ToolHiveRegistryData
+ Expect(json.Unmarshal([]byte(storageConfigMap.Data["registry.json"]), &storedRegistry)).To(Succeed())
+ verifyServerContent(storedRegistry, originalServers)
+ })
+
+ It("should fail sync when source ConfigMap is deleted after successful sync", func() {
+ By("Creating a ConfigMap with registry data")
+ configMap := configMapHelper.NewConfigMapBuilder(configMapName).
+ WithToolHiveRegistry("registry.json", originalServers).
+ Create(configMapHelper)
+
+ By("Creating an MCPRegistry with automatic sync")
+ mcpRegistry := CreateMCPRegistryWithSyncPolicy(registryName, testNamespace,
+ "ConfigMap Deletion Test Registry", configMapName, "8s")
+ Expect(k8sClient.Create(ctx, mcpRegistry)).To(Succeed())
+
+ By("Waiting for initial sync to complete")
+ Eventually(func() mcpv1alpha1.MCPRegistryPhase {
+ registry := &mcpv1alpha1.MCPRegistry{}
+ if err := k8sClient.Get(ctx, types.NamespacedName{
+ Name: registryName,
+ Namespace: testNamespace,
+ }, registry); err != nil {
+ return ""
+ }
+ return registry.Status.Phase
+ }, 30*time.Second, 2*time.Second).Should(BeElementOf(
+ mcpv1alpha1.MCPRegistryPhaseReady,
+ mcpv1alpha1.MCPRegistryPhasePending,
+ ))
+
+ // Capture successful sync state
+ successRegistry := &mcpv1alpha1.MCPRegistry{}
+ Expect(k8sClient.Get(ctx, types.NamespacedName{
+ Name: registryName,
+ Namespace: testNamespace,
+ }, successRegistry)).To(Succeed())
+
+ successSyncTime := successRegistry.Status.SyncStatus.LastSyncTime
+ successServerCount := successRegistry.Status.SyncStatus.ServerCount
+ successSyncHash := successRegistry.Status.SyncStatus.LastSyncHash
+
+ Expect(successServerCount).To(Equal(1))
+ Expect(successSyncTime).NotTo(BeNil())
+ Expect(successSyncHash).NotTo(BeEmpty())
+
+ By("Verifying storage ConfigMap exists with correct data")
+ storageConfigMapName := successRegistry.GetStorageName()
+ storageConfigMap := &corev1.ConfigMap{}
+ Expect(k8sClient.Get(ctx, types.NamespacedName{
+ Name: storageConfigMapName,
+ Namespace: testNamespace,
+ }, storageConfigMap)).To(Succeed())
+
+ var storedRegistry ToolHiveRegistryData
+ Expect(json.Unmarshal([]byte(storageConfigMap.Data["registry.json"]), &storedRegistry)).To(Succeed())
+ verifyServerContent(storedRegistry, originalServers)
+
+ By("Deleting the source ConfigMap")
+ Expect(k8sClient.Delete(ctx, configMap)).To(Succeed())
+
+ By("Waiting for sync to fail due to missing ConfigMap")
+ statusHelper.WaitForPhase(registryName, mcpv1alpha1.MCPRegistryPhaseFailed, 20*time.Second)
+
+ By("Verifying sync failure preserves previous successful sync data")
+ failedRegistry := &mcpv1alpha1.MCPRegistry{}
+ Expect(k8sClient.Get(ctx, types.NamespacedName{
+ Name: registryName,
+ Namespace: testNamespace,
+ }, failedRegistry)).To(Succeed())
+
+ // Previous sync data should be preserved
+ Expect(failedRegistry.Status.SyncStatus.LastSyncTime).To(Equal(successSyncTime))
+ Expect(failedRegistry.Status.SyncStatus.LastSyncHash).To(Equal(successSyncHash))
+ Expect(failedRegistry.Status.SyncStatus.ServerCount).To(Equal(successServerCount))
+ Expect(failedRegistry.Status.SyncStatus.AttemptCount).To(BeNumerically(">", 0))
+
+ By("Verifying storage ConfigMap still exists with previous data")
+ preservedStorageConfigMap := &corev1.ConfigMap{}
+ Expect(k8sClient.Get(ctx, types.NamespacedName{
+ Name: storageConfigMapName,
+ Namespace: testNamespace,
+ }, preservedStorageConfigMap)).To(Succeed())
+
+ var preservedRegistry ToolHiveRegistryData
+ Expect(json.Unmarshal([]byte(preservedStorageConfigMap.Data["registry.json"]), &preservedRegistry)).To(Succeed())
+ verifyServerContent(preservedRegistry, originalServers)
+
+ By("Verifying overall registry phase reflects the failure")
+ Expect(failedRegistry.Status.Phase).To(Equal(mcpv1alpha1.MCPRegistryPhaseFailed))
+ })
+
+ It("should verify persistence data matches original ConfigMap content exactly", func() {
+ By("Creating a complex ConfigMap with multiple servers and metadata")
+ complexServers := CreateComplexTestServers()
+
+ _ = configMapHelper.NewConfigMapBuilder(configMapName).
+ WithToolHiveRegistry("registry.json", complexServers).
+ Create(configMapHelper)
+
+ By("Creating an MCPRegistry")
+ mcpRegistry := CreateMCPRegistryWithSyncPolicy(registryName, testNamespace,
+ "Content Verification Registry", configMapName, "30s")
+ Expect(k8sClient.Create(ctx, mcpRegistry)).To(Succeed())
+
+ By("Waiting for sync to complete")
+ Eventually(func() mcpv1alpha1.MCPRegistryPhase {
+ registry := &mcpv1alpha1.MCPRegistry{}
+ if err := k8sClient.Get(ctx, types.NamespacedName{
+ Name: registryName,
+ Namespace: testNamespace,
+ }, registry); err != nil {
+ return ""
+ }
+ return registry.Status.Phase
+ }, 30*time.Second, 2*time.Second).Should(BeElementOf(
+ mcpv1alpha1.MCPRegistryPhaseReady,
+ mcpv1alpha1.MCPRegistryPhasePending,
+ ))
+
+ By("Retrieving and verifying storage ConfigMap content")
+ registry := &mcpv1alpha1.MCPRegistry{}
+ Expect(k8sClient.Get(ctx, types.NamespacedName{
+ Name: registryName,
+ Namespace: testNamespace,
+ }, registry)).To(Succeed())
+
+ storageConfigMapName := registry.GetStorageName()
+ storageConfigMap := &corev1.ConfigMap{}
+ Expect(k8sClient.Get(ctx, types.NamespacedName{
+ Name: storageConfigMapName,
+ Namespace: testNamespace,
+ }, storageConfigMap)).To(Succeed())
+
+ var storedRegistry ToolHiveRegistryData
+ Expect(json.Unmarshal([]byte(storageConfigMap.Data["registry.json"]), &storedRegistry)).To(Succeed())
+
+ By("Verifying exact content match")
+ Expect(registry.Status.SyncStatus.ServerCount).To(Equal(2))
+ verifyServerContent(storedRegistry, complexServers)
+
+ // Verify metadata
+ Expect(storedRegistry.Version).To(Equal("1.0.0"))
+
+ By("Verifying hash consistency")
+ Expect(registry.Status.SyncStatus.LastSyncHash).NotTo(BeEmpty())
+
+ By("Verifying timing constants are accessible and configurable from test code")
+ // These variables can be used in tests to adjust timeout expectations and behavior
+ syncRequeueTime := sync.DefaultSyncRequeueAfter
+ controllerRetryTime := controllers.DefaultControllerRetryAfter
+
+ // Verify default values
+ Expect(syncRequeueTime).To(Equal(shortSyncRequeue))
+ Expect(controllerRetryTime).To(Equal(shortControllerRetry))
+
+ // Verify constants are also available
+ Expect(sync.DefaultSyncRequeueAfterConstant).To(Equal(5 * time.Minute))
+ Expect(controllers.DefaultControllerRetryAfterConstant).To(Equal(5 * time.Minute))
+ })
+ })
+})
diff --git a/test/e2e/operator/registry_filtering_test.go b/test/e2e/operator/registry_filtering_test.go
new file mode 100644
index 000000000..317f7a9ac
--- /dev/null
+++ b/test/e2e/operator/registry_filtering_test.go
@@ -0,0 +1,408 @@
+package operator_test
+
+import (
+ "context"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+ corev1 "k8s.io/api/core/v1"
+
+ mcpv1alpha1 "github.com/stacklok/toolhive/cmd/thv-operator/api/v1alpha1"
+)
+
+var _ = Describe("MCPRegistry Filtering", Label("k8s", "registry"), func() {
+ var (
+ ctx context.Context
+ registryHelper *MCPRegistryTestHelper
+ configMapHelper *ConfigMapTestHelper
+ statusHelper *StatusTestHelper
+ timingHelper *TimingTestHelper
+ k8sHelper *K8sResourceTestHelper
+ testNamespace string
+ )
+
+ BeforeEach(func() {
+ ctx = context.Background()
+ testNamespace = createTestNamespace(ctx)
+
+ // Initialize helpers
+ registryHelper = NewMCPRegistryTestHelper(ctx, k8sClient, testNamespace)
+ configMapHelper = NewConfigMapTestHelper(ctx, k8sClient, testNamespace)
+ statusHelper = NewStatusTestHelper(ctx, k8sClient, testNamespace)
+ timingHelper = NewTimingTestHelper(ctx, k8sClient)
+ k8sHelper = NewK8sResourceTestHelper(ctx, k8sClient, testNamespace)
+ })
+
+ AfterEach(func() {
+ // Clean up test resources
+ Expect(registryHelper.CleanupRegistries()).To(Succeed())
+ Expect(configMapHelper.CleanupConfigMaps()).To(Succeed())
+ deleteTestNamespace(ctx, testNamespace)
+ })
+
+ Context("Name-based filtering", func() {
+ var configMap *corev1.ConfigMap
+
+ BeforeEach(func() {
+ // Create ConfigMap with multiple servers for filtering tests
+ configMap = configMapHelper.NewConfigMapBuilder("filter-test-config").
+ WithToolHiveRegistry("registry.json", []RegistryServer{
+ {
+ Name: "production-server",
+ Description: "Production server",
+ Tier: "Official",
+ Status: "Active",
+ Transport: "stdio",
+ Tools: []string{"prod_tool"},
+ Image: "test/prod:1.0.0",
+ Tags: []string{"production", "stable"},
+ },
+ {
+ Name: "test-server-alpha",
+ Description: "Test server alpha",
+ Tier: "Community",
+ Status: "Active",
+ Transport: "streamable-http",
+ Tools: []string{"test_tool_alpha"},
+ Image: "test/alpha:1.0.0",
+ Tags: []string{"testing", "experimental"},
+ },
+ {
+ Name: "test-server-beta",
+ Description: "Test server beta",
+ Tier: "Community",
+ Status: "Active",
+ Transport: "stdio",
+ Tools: []string{"test_tool_beta"},
+ Image: "test/beta:1.0.0",
+ Tags: []string{"testing", "beta"},
+ },
+ {
+ Name: "dev-server",
+ Description: "Development server",
+ Tier: "Community",
+ Status: "Active",
+ Transport: "sse",
+ Tools: []string{"dev_tool"},
+ Image: "test/dev:1.0.0",
+ Tags: []string{"development", "unstable"},
+ },
+ }).
+ Create(configMapHelper)
+ })
+
+ It("should apply name include filters correctly", func() {
+ // Create registry with name include filter
+ registry := registryHelper.NewRegistryBuilder("name-include-test").
+ WithConfigMapSource(configMap.Name, "registry.json").
+ WithNameIncludeFilter([]string{"production-*", "dev-*"}).
+ Create(registryHelper)
+
+ // Wait for registry initialization
+ registryHelper.WaitForRegistryInitialization(registry.Name, timingHelper, statusHelper)
+
+ // Verify filtering applied - should include only production-server and dev-server
+ updatedRegistry, err := registryHelper.GetRegistry(registry.Name)
+ Expect(err).NotTo(HaveOccurred())
+
+ Expect(updatedRegistry.Status.SyncStatus).NotTo(BeNil())
+ Expect(updatedRegistry.Status.SyncStatus.Phase).To(Equal(mcpv1alpha1.SyncPhaseComplete))
+ Expect(updatedRegistry.Status.SyncStatus.ServerCount).To(Equal(2)) // Only production-server and dev-server
+
+ // Verify storage contains filtered content
+ storageConfigMapName := updatedRegistry.Status.StorageRef.ConfigMapRef.Name
+ storageConfigMap, err := k8sHelper.GetConfigMap(storageConfigMapName)
+ Expect(err).NotTo(HaveOccurred())
+
+ filteredData := storageConfigMap.Data["registry.json"]
+ Expect(filteredData).To(ContainSubstring("production-server"))
+ Expect(filteredData).To(ContainSubstring("dev-server"))
+ Expect(filteredData).NotTo(ContainSubstring("test-server-alpha"))
+ Expect(filteredData).NotTo(ContainSubstring("test-server-beta"))
+ })
+
+ It("should apply name exclude filters correctly", func() {
+ // Create registry with name exclude filter
+ registry := registryHelper.NewRegistryBuilder("name-exclude-test").
+ WithConfigMapSource(configMap.Name, "registry.json").
+ WithNameExcludeFilter([]string{"test-*"}).
+ Create(registryHelper)
+
+ // Wait for registry initialization
+ registryHelper.WaitForRegistryInitialization(registry.Name, timingHelper, statusHelper)
+
+ // Verify filtering applied - should exclude test-server-alpha and test-server-beta
+ updatedRegistry, err := registryHelper.GetRegistry(registry.Name)
+ Expect(err).NotTo(HaveOccurred())
+
+ Expect(updatedRegistry.Status.SyncStatus).NotTo(BeNil())
+ Expect(updatedRegistry.Status.SyncStatus.Phase).To(Equal(mcpv1alpha1.SyncPhaseComplete))
+ Expect(updatedRegistry.Status.SyncStatus.ServerCount).To(Equal(2)) // Only production-server and dev-server
+
+ // Verify storage contains filtered content
+ storageConfigMapName := updatedRegistry.Status.StorageRef.ConfigMapRef.Name
+ storageConfigMap, err := k8sHelper.GetConfigMap(storageConfigMapName)
+ Expect(err).NotTo(HaveOccurred())
+
+ filteredData := storageConfigMap.Data["registry.json"]
+ Expect(filteredData).To(ContainSubstring("production-server"))
+ Expect(filteredData).To(ContainSubstring("dev-server"))
+ Expect(filteredData).NotTo(ContainSubstring("test-server-alpha"))
+ Expect(filteredData).NotTo(ContainSubstring("test-server-beta"))
+ })
+
+ It("should apply both name include and exclude filters correctly", func() {
+ // Create registry with both include and exclude filters
+ registry := registryHelper.NewRegistryBuilder("name-include-exclude-test").
+ WithConfigMapSource(configMap.Name, "registry.json").
+ WithNameIncludeFilter([]string{"*-server*"}). // Include all servers
+ WithNameExcludeFilter([]string{"test-*", "dev-*"}). // Exclude test and dev servers
+ Create(registryHelper)
+
+ // Wait for registry initialization
+ registryHelper.WaitForRegistryInitialization(registry.Name, timingHelper, statusHelper)
+
+ // Verify filtering applied - should only include production-server
+ updatedRegistry, err := registryHelper.GetRegistry(registry.Name)
+ Expect(err).NotTo(HaveOccurred())
+
+ Expect(updatedRegistry.Status.SyncStatus).NotTo(BeNil())
+ Expect(updatedRegistry.Status.SyncStatus.Phase).To(Equal(mcpv1alpha1.SyncPhaseComplete))
+ Expect(updatedRegistry.Status.SyncStatus.ServerCount).To(Equal(1)) // Only production-server
+
+ // Verify storage contains filtered content
+ storageConfigMapName := updatedRegistry.Status.StorageRef.ConfigMapRef.Name
+ storageConfigMap, err := k8sHelper.GetConfigMap(storageConfigMapName)
+ Expect(err).NotTo(HaveOccurred())
+
+ filteredData := storageConfigMap.Data["registry.json"]
+ Expect(filteredData).To(ContainSubstring("production-server"))
+ Expect(filteredData).NotTo(ContainSubstring("test-server-alpha"))
+ Expect(filteredData).NotTo(ContainSubstring("test-server-beta"))
+ Expect(filteredData).NotTo(ContainSubstring("dev-server"))
+ })
+ })
+
+ Context("Tag-based filtering", func() {
+ var configMap *corev1.ConfigMap
+
+ BeforeEach(func() {
+ // Create ConfigMap with servers having different tags
+ configMap = configMapHelper.NewConfigMapBuilder("tag-filter-config").
+ WithToolHiveRegistry("registry.json", []RegistryServer{
+ {
+ Name: "stable-server",
+ Description: "Stable production server",
+ Tier: "Official",
+ Status: "Active",
+ Transport: "stdio",
+ Tools: []string{"stable_tool"},
+ Image: "test/stable:1.0.0",
+ Tags: []string{"production", "stable", "verified"},
+ },
+ {
+ Name: "beta-server",
+ Description: "Beta testing server",
+ Tier: "Community",
+ Status: "Active",
+ Transport: "streamable-http",
+ Tools: []string{"beta_tool"},
+ Image: "test/beta:1.0.0",
+ Tags: []string{"testing", "beta"},
+ },
+ {
+ Name: "experimental-server",
+ Description: "Experimental server",
+ Tier: "Community",
+ Status: "Active",
+ Transport: "stdio",
+ Tools: []string{"experimental_tool"},
+ Image: "test/experimental:1.0.0",
+ Tags: []string{"experimental", "unstable"},
+ },
+ }).
+ Create(configMapHelper)
+ })
+
+ It("should apply tag include filters correctly", func() {
+ // Create registry with tag include filter
+ registry := registryHelper.NewRegistryBuilder("tag-include-test").
+ WithConfigMapSource(configMap.Name, "registry.json").
+ WithTagIncludeFilter([]string{"production", "testing"}).
+ Create(registryHelper)
+
+ // Wait for registry initialization
+ registryHelper.WaitForRegistryInitialization(registry.Name, timingHelper, statusHelper)
+
+ // Verify filtering applied - should include stable-server and beta-server
+ updatedRegistry, err := registryHelper.GetRegistry(registry.Name)
+ Expect(err).NotTo(HaveOccurred())
+
+ Expect(updatedRegistry.Status.SyncStatus).NotTo(BeNil())
+ Expect(updatedRegistry.Status.SyncStatus.Phase).To(Equal(mcpv1alpha1.SyncPhaseComplete))
+ Expect(updatedRegistry.Status.SyncStatus.ServerCount).To(Equal(2)) // stable-server and beta-server
+
+ // Verify storage contains filtered content
+ storageConfigMapName := updatedRegistry.Status.StorageRef.ConfigMapRef.Name
+ storageConfigMap, err := k8sHelper.GetConfigMap(storageConfigMapName)
+ Expect(err).NotTo(HaveOccurred())
+
+ filteredData := storageConfigMap.Data["registry.json"]
+ Expect(filteredData).To(ContainSubstring("stable-server"))
+ Expect(filteredData).To(ContainSubstring("beta-server"))
+ Expect(filteredData).NotTo(ContainSubstring("experimental-server"))
+ })
+
+ It("should apply tag exclude filters correctly", func() {
+ // Create registry with tag exclude filter
+ registry := registryHelper.NewRegistryBuilder("tag-exclude-test").
+ WithConfigMapSource(configMap.Name, "registry.json").
+ WithTagExcludeFilter([]string{"experimental", "unstable"}).
+ Create(registryHelper)
+
+ // Wait for registry initialization
+ registryHelper.WaitForRegistryInitialization(registry.Name, timingHelper, statusHelper)
+
+ // Verify filtering applied - should exclude experimental-server
+ updatedRegistry, err := registryHelper.GetRegistry(registry.Name)
+ Expect(err).NotTo(HaveOccurred())
+
+ Expect(updatedRegistry.Status.SyncStatus).NotTo(BeNil())
+ Expect(updatedRegistry.Status.SyncStatus.Phase).To(Equal(mcpv1alpha1.SyncPhaseComplete))
+ Expect(updatedRegistry.Status.SyncStatus.ServerCount).To(Equal(2)) // stable-server and beta-server
+
+ // Verify storage contains filtered content
+ storageConfigMapName := updatedRegistry.Status.StorageRef.ConfigMapRef.Name
+ storageConfigMap, err := k8sHelper.GetConfigMap(storageConfigMapName)
+ Expect(err).NotTo(HaveOccurred())
+
+ filteredData := storageConfigMap.Data["registry.json"]
+ Expect(filteredData).To(ContainSubstring("stable-server"))
+ Expect(filteredData).To(ContainSubstring("beta-server"))
+ Expect(filteredData).NotTo(ContainSubstring("experimental-server"))
+ })
+ })
+
+ Context("Filter updates", func() {
+ var configMap *corev1.ConfigMap
+ var registry *mcpv1alpha1.MCPRegistry
+
+ BeforeEach(func() {
+ // Create ConfigMap with multiple servers
+ configMap = configMapHelper.NewConfigMapBuilder("update-filter-config").
+ WithToolHiveRegistry("registry.json", []RegistryServer{
+ {
+ Name: "server-alpha",
+ Description: "Server alpha",
+ Tier: "Community",
+ Status: "Active",
+ Transport: "stdio",
+ Tools: []string{"alpha_tool"},
+ Image: "test/alpha:1.0.0",
+ Tags: []string{"alpha", "testing"},
+ },
+ {
+ Name: "server-beta",
+ Description: "Server beta",
+ Tier: "Community",
+ Status: "Active",
+ Transport: "streamable-http",
+ Tools: []string{"beta_tool"},
+ Image: "test/beta:1.0.0",
+ Tags: []string{"beta", "testing"},
+ },
+ {
+ Name: "server-prod",
+ Description: "Production server",
+ Tier: "Official",
+ Status: "Active",
+ Transport: "stdio",
+ Tools: []string{"prod_tool"},
+ Image: "test/prod:1.0.0",
+ Tags: []string{"production", "stable"},
+ },
+ }).
+ Create(configMapHelper)
+
+ // Create registry without any sync policy (manual sync only)
+ registry = registryHelper.NewRegistryBuilder("filter-update-test").
+ WithConfigMapSource(configMap.Name, "registry.json").
+ WithNameIncludeFilter([]string{"server-alpha", "server-beta"}). // Initially include alpha and beta
+ Create(registryHelper)
+
+ // Wait for initial sync
+ registryHelper.WaitForRegistryInitialization(registry.Name, timingHelper, statusHelper)
+ })
+
+ It("should update storage content when filters are changed", func() {
+ // Verify initial filtering - should have 2 servers
+ updatedRegistry, err := registryHelper.GetRegistry(registry.Name)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(updatedRegistry.Status.SyncStatus.ServerCount).To(Equal(2))
+
+ // Get initial storage content
+ initialStorageConfigMapName := updatedRegistry.Status.StorageRef.ConfigMapRef.Name
+ initialStorageConfigMap, err := k8sHelper.GetConfigMap(initialStorageConfigMapName)
+ Expect(err).NotTo(HaveOccurred())
+ initialData := initialStorageConfigMap.Data["registry.json"]
+ Expect(initialData).To(ContainSubstring("server-alpha"))
+ Expect(initialData).To(ContainSubstring("server-beta"))
+ Expect(initialData).NotTo(ContainSubstring("server-prod"))
+
+ By("updating the filter to include all servers")
+ // Update registry filter to include all servers
+ updatedRegistry.Spec.Filter.NameFilters.Include = []string{"*"}
+ Expect(registryHelper.UpdateRegistry(updatedRegistry)).To(Succeed())
+
+ // Wait for sync to complete with new filter
+ timingHelper.WaitForControllerReconciliation(func() interface{} {
+ currentRegistry, err := registryHelper.GetRegistry(registry.Name)
+ if err != nil {
+ return false
+ }
+ return currentRegistry.Status.SyncStatus.ServerCount == 3 // All 3 servers now included
+ }).Should(BeTrue(), "Registry should sync with updated filter")
+
+ By("verifying storage content reflects the filter change")
+ // Verify updated storage content
+ finalRegistry, err := registryHelper.GetRegistry(registry.Name)
+ Expect(err).NotTo(HaveOccurred())
+ finalStorageConfigMapName := finalRegistry.Status.StorageRef.ConfigMapRef.Name
+ finalStorageConfigMap, err := k8sHelper.GetConfigMap(finalStorageConfigMapName)
+ Expect(err).NotTo(HaveOccurred())
+ finalData := finalStorageConfigMap.Data["registry.json"]
+ Expect(finalData).To(ContainSubstring("server-alpha"))
+ Expect(finalData).To(ContainSubstring("server-beta"))
+ Expect(finalData).To(ContainSubstring("server-prod")) // Now included
+
+ By("updating the filter to exclude beta and alpha servers")
+ // Update filter again to exclude alpha and beta
+ finalRegistry.Spec.Filter.NameFilters = &mcpv1alpha1.NameFilter{
+ Include: []string{"*"},
+ Exclude: []string{"*-alpha", "*-beta"},
+ }
+ Expect(registryHelper.UpdateRegistry(finalRegistry)).To(Succeed())
+
+ // Wait for sync to complete with new exclusion filter
+ timingHelper.WaitForControllerReconciliation(func() interface{} {
+ currentRegistry, err := registryHelper.GetRegistry(registry.Name)
+ if err != nil {
+ return false
+ }
+ return currentRegistry.Status.SyncStatus.ServerCount == 1 // Only server-prod
+ }).Should(BeTrue(), "Registry should sync with updated exclusion filter")
+
+ By("verifying final storage content reflects the exclusion")
+ // Verify final storage content
+ endRegistry, err := registryHelper.GetRegistry(registry.Name)
+ Expect(err).NotTo(HaveOccurred())
+ endStorageConfigMapName := endRegistry.Status.StorageRef.ConfigMapRef.Name
+ endStorageConfigMap, err := k8sHelper.GetConfigMap(endStorageConfigMapName)
+ Expect(err).NotTo(HaveOccurred())
+ endData := endStorageConfigMap.Data["registry.json"]
+ Expect(endData).NotTo(ContainSubstring("server-alpha"))
+ Expect(endData).NotTo(ContainSubstring("server-beta"))
+ Expect(endData).To(ContainSubstring("server-prod")) // Only this remains
+ })
+ })
+})
diff --git a/test/e2e/operator/registry_git_automatic_sync_test.go b/test/e2e/operator/registry_git_automatic_sync_test.go
new file mode 100644
index 000000000..5bc72dd4d
--- /dev/null
+++ b/test/e2e/operator/registry_git_automatic_sync_test.go
@@ -0,0 +1,421 @@
+package operator_test
+
+import (
+ "context"
+ "encoding/json"
+ "time"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/types"
+
+ mcpv1alpha1 "github.com/stacklok/toolhive/cmd/thv-operator/api/v1alpha1"
+ "github.com/stacklok/toolhive/cmd/thv-operator/controllers"
+ "github.com/stacklok/toolhive/cmd/thv-operator/pkg/sync"
+)
+
+var _ = Describe("MCPRegistry Git Automatic Sync", Label("k8s", "registry"), func() {
+ var (
+ ctx context.Context
+ registryHelper *MCPRegistryTestHelper
+ gitHelper *GitTestHelper
+ statusHelper *StatusTestHelper
+ testNamespace string
+ originalSyncRequeue time.Duration
+ originalControllerRetry time.Duration
+ )
+ const (
+ shortSyncRequeue = time.Second * 10
+ shortControllerRetry = time.Second * 10
+ )
+
+ BeforeEach(func() {
+ ctx = context.Background()
+ testNamespace = createTestNamespace(ctx)
+
+ // Initialize helpers
+ registryHelper = NewMCPRegistryTestHelper(ctx, k8sClient, testNamespace)
+ gitHelper = NewGitTestHelper(ctx)
+ statusHelper = NewStatusTestHelper(ctx, k8sClient, testNamespace)
+
+ // Store original values to restore later
+ originalSyncRequeue = sync.DefaultSyncRequeueAfter
+ originalControllerRetry = controllers.DefaultControllerRetryAfter
+
+ By("Setting shorter retry interval for faster testing")
+ // Set shorter intervals for faster test execution
+ sync.DefaultSyncRequeueAfter = shortSyncRequeue
+ controllers.DefaultControllerRetryAfter = shortControllerRetry
+ })
+
+ AfterEach(func() {
+ // Clean up test resources
+ Expect(registryHelper.CleanupRegistries()).To(Succeed())
+ Expect(gitHelper.CleanupRepositories()).To(Succeed())
+ deleteTestNamespace(ctx, testNamespace)
+ // Restore original values when test completes
+ sync.DefaultSyncRequeueAfter = originalSyncRequeue
+ controllers.DefaultControllerRetryAfter = originalControllerRetry
+ })
+
+ Context("Git Automatic Sync Scenarios", func() {
+ var (
+ registryName string
+ gitRepo *GitTestRepository
+ originalServers []RegistryServer
+ updatedServers []RegistryServer
+ )
+
+ BeforeEach(func() {
+ names := NewUniqueNames("git-auto-sync")
+ registryName = names.RegistryName
+
+ // Create test registry data
+ originalServers = CreateOriginalTestServers()
+ updatedServers = CreateUpdatedTestServers()
+ })
+
+ It("should perform automatic sync at configured intervals from Git repository", func() {
+ By("Creating a Git repository with registry data")
+ gitRepo = gitHelper.CreateRepository("test-registry-repo")
+ gitHelper.CommitRegistryData(gitRepo, "registry.json", originalServers, "Initial registry data")
+
+ By("Creating an MCPRegistry with short sync interval and Git source")
+ mcpRegistry := CreateMCPRegistryWithGitSource(registryName, testNamespace,
+ "Git Auto Sync Test Registry", gitRepo.CloneURL, "main", "registry.json", "10s")
+ Expect(k8sClient.Create(ctx, mcpRegistry)).To(Succeed())
+
+ By("Waiting for initial sync to complete")
+ statusHelper.WaitForPhaseAny(registryName, []mcpv1alpha1.MCPRegistryPhase{mcpv1alpha1.MCPRegistryPhaseReady, mcpv1alpha1.MCPRegistryPhasePending}, 30*time.Second)
+
+ // Capture first sync time
+ firstSyncRegistry := &mcpv1alpha1.MCPRegistry{}
+ Expect(k8sClient.Get(ctx, types.NamespacedName{
+ Name: registryName,
+ Namespace: testNamespace,
+ }, firstSyncRegistry)).To(Succeed())
+
+ Expect(firstSyncRegistry.Status).NotTo(BeNil())
+ Expect(firstSyncRegistry.Status.SyncStatus.Phase).To(Equal(mcpv1alpha1.SyncPhaseComplete))
+ firstSyncTime := firstSyncRegistry.Status.SyncStatus.LastSyncTime
+ Expect(firstSyncTime).NotTo(BeNil())
+ serverCount := firstSyncRegistry.Status.SyncStatus.ServerCount
+ Expect(serverCount).To(Equal(1)) // Original registry has 1 server
+
+ By("Verifying initial storage ConfigMap was created")
+ storageConfigMapName := firstSyncRegistry.GetStorageName()
+ storageConfigMap := &corev1.ConfigMap{}
+ Expect(k8sClient.Get(ctx, types.NamespacedName{
+ Name: storageConfigMapName,
+ Namespace: testNamespace,
+ }, storageConfigMap)).To(Succeed())
+
+ By("Verifying storage data matches original Git repository content")
+ var storedRegistry ToolHiveRegistryData
+ Expect(json.Unmarshal([]byte(storageConfigMap.Data["registry.json"]), &storedRegistry)).To(Succeed())
+ verifyServerContent(storedRegistry, originalServers)
+
+ By("Updating the Git repository with new data")
+ gitHelper.CommitRegistryData(gitRepo, "registry.json", updatedServers, "Updated registry with 2 servers")
+
+ By("Waiting for automatic re-sync (should happen within 15s)")
+ Eventually(func() bool {
+ registry := &mcpv1alpha1.MCPRegistry{}
+ if err := k8sClient.Get(ctx, types.NamespacedName{
+ Name: registryName,
+ Namespace: testNamespace,
+ }, registry); err != nil {
+ return false
+ }
+
+ // Check if sync time was updated and server count changed
+ if registry.Status.SyncStatus == nil {
+ return false
+ }
+
+ newSyncTime := registry.Status.SyncStatus.LastSyncTime
+ newServerCount := registry.Status.SyncStatus.ServerCount
+
+ return newSyncTime != nil &&
+ newSyncTime.After(firstSyncTime.Time) &&
+ newServerCount == 2 // Updated registry has 2 servers
+ }, 20*time.Second, 2*time.Second).Should(BeTrue(), "Registry should automatically re-sync within interval")
+
+ By("Verifying updated storage data matches new Git repository content")
+ updatedStorageConfigMap := &corev1.ConfigMap{}
+ Expect(k8sClient.Get(ctx, types.NamespacedName{
+ Name: storageConfigMapName,
+ Namespace: testNamespace,
+ }, updatedStorageConfigMap)).To(Succeed())
+
+ var newStoredRegistry ToolHiveRegistryData
+ Expect(json.Unmarshal([]byte(updatedStorageConfigMap.Data["registry.json"]), &newStoredRegistry)).To(Succeed())
+
+ By("Storage should contain updated registry data from Git")
+ verifyServerContent(newStoredRegistry, updatedServers)
+ })
+
+ It("should retry failed syncs when Git repository becomes accessible", func() {
+ By("Creating an MCPRegistry with inaccessible Git repository (sync will fail)")
+ mcpRegistry := CreateMCPRegistryWithGitSource(registryName, testNamespace,
+ "Git Retry Test Registry", "https://invalid-git-repo.example.com/repo.git", "main", "registry.json", "5s")
+ Expect(k8sClient.Create(ctx, mcpRegistry)).To(Succeed())
+
+ By("Waiting for sync to fail")
+ statusHelper.WaitForPhase(registryName, mcpv1alpha1.MCPRegistryPhaseFailed, 30*time.Second)
+
+ // Verify attempt counter incremented
+ failedRegistry := &mcpv1alpha1.MCPRegistry{}
+ Expect(k8sClient.Get(ctx, types.NamespacedName{
+ Name: registryName,
+ Namespace: testNamespace,
+ }, failedRegistry)).To(Succeed())
+
+ Expect(failedRegistry.Status.Phase).To(Equal(mcpv1alpha1.MCPRegistryPhaseFailed))
+ Expect(failedRegistry.Status.SyncStatus.Phase).To(Equal(mcpv1alpha1.SyncPhaseFailed))
+ initialAttemptCount := failedRegistry.Status.SyncStatus.AttemptCount
+ Expect(initialAttemptCount).To(BeNumerically(">", 0))
+
+ By("Waiting for retry attempt and verifying attempt counter increments")
+ Eventually(func() int {
+ registry := &mcpv1alpha1.MCPRegistry{}
+ if err := k8sClient.Get(ctx, types.NamespacedName{
+ Name: registryName,
+ Namespace: testNamespace,
+ }, registry); err != nil {
+ return -1
+ }
+
+ if registry.Status.SyncStatus == nil {
+ return -1
+ }
+
+ return registry.Status.SyncStatus.AttemptCount
+ }, 15*time.Second, 2*time.Second).Should(BeNumerically(">", initialAttemptCount),
+ "Attempt count should increment on retry")
+
+ By("Updating MCPRegistry with valid Git repository")
+ gitRepo = gitHelper.CreateRepository("valid-test-repo")
+ gitHelper.CommitRegistryData(gitRepo, "registry.json", originalServers, "Initial registry data")
+
+ // Update the registry spec to point to valid repository
+ Expect(k8sClient.Get(ctx, types.NamespacedName{
+ Name: registryName,
+ Namespace: testNamespace,
+ }, failedRegistry)).To(Succeed())
+
+ failedRegistry.Spec.Source.Git.Repository = gitRepo.CloneURL
+ Expect(k8sClient.Update(ctx, failedRegistry)).To(Succeed())
+
+ By("Waiting for sync to succeed after Git repository becomes accessible")
+ Eventually(func() mcpv1alpha1.MCPRegistryPhase {
+ registry := &mcpv1alpha1.MCPRegistry{}
+ if err := k8sClient.Get(ctx, types.NamespacedName{
+ Name: registryName,
+ Namespace: testNamespace,
+ }, registry); err != nil {
+ return ""
+ }
+ return registry.Status.Phase
+ }, 30*time.Second, 2*time.Second).Should(BeElementOf(
+ mcpv1alpha1.MCPRegistryPhaseReady,
+ mcpv1alpha1.MCPRegistryPhasePending,
+ ))
+
+ By("Verifying sync data is now correct")
+ successRegistry := &mcpv1alpha1.MCPRegistry{}
+ Expect(k8sClient.Get(ctx, types.NamespacedName{
+ Name: registryName,
+ Namespace: testNamespace,
+ }, successRegistry)).To(Succeed())
+
+ Expect(successRegistry.Status.SyncStatus.ServerCount).To(Equal(1))
+ Expect(successRegistry.Status.SyncStatus.LastSyncTime).NotTo(BeNil())
+
+ By("Verifying storage ConfigMap was created with correct data from Git")
+ storageConfigMapName := successRegistry.GetStorageName()
+ storageConfigMap := &corev1.ConfigMap{}
+ Expect(k8sClient.Get(ctx, types.NamespacedName{
+ Name: storageConfigMapName,
+ Namespace: testNamespace,
+ }, storageConfigMap)).To(Succeed())
+
+ var storedRegistry ToolHiveRegistryData
+ Expect(json.Unmarshal([]byte(storageConfigMap.Data["registry.json"]), &storedRegistry)).To(Succeed())
+ verifyServerContent(storedRegistry, originalServers)
+ })
+
+ It("should handle different Git branches and tags", func() {
+ By("Creating a Git repository with registry data on main branch")
+ gitRepo = gitHelper.CreateRepository("multi-branch-repo")
+ gitHelper.CommitRegistryData(gitRepo, "registry.json", originalServers, "Initial data on main")
+
+ By("Creating a feature branch with updated data")
+ gitHelper.CreateBranch(gitRepo, "feature/updated-registry")
+ gitHelper.CommitRegistryData(gitRepo, "registry.json", updatedServers, "Updated data on feature branch")
+
+ By("Creating an MCPRegistry pointing to the feature branch")
+ mcpRegistry := CreateMCPRegistryWithGitSource(registryName, testNamespace,
+ "Git Branch Test Registry", gitRepo.CloneURL, "feature/updated-registry", "registry.json", "30s")
+ Expect(k8sClient.Create(ctx, mcpRegistry)).To(Succeed())
+
+ By("Waiting for sync to complete")
+ Eventually(func() mcpv1alpha1.MCPRegistryPhase {
+ registry := &mcpv1alpha1.MCPRegistry{}
+ if err := k8sClient.Get(ctx, types.NamespacedName{
+ Name: registryName,
+ Namespace: testNamespace,
+ }, registry); err != nil {
+ return ""
+ }
+ return registry.Status.Phase
+ }, 30*time.Second, 2*time.Second).Should(BeElementOf(
+ mcpv1alpha1.MCPRegistryPhaseReady,
+ mcpv1alpha1.MCPRegistryPhasePending,
+ ))
+
+ By("Verifying data comes from the feature branch")
+ registry := &mcpv1alpha1.MCPRegistry{}
+ Expect(k8sClient.Get(ctx, types.NamespacedName{
+ Name: registryName,
+ Namespace: testNamespace,
+ }, registry)).To(Succeed())
+
+ Expect(registry.Status.SyncStatus.ServerCount).To(Equal(2)) // Feature branch has 2 servers
+
+ storageConfigMapName := registry.GetStorageName()
+ storageConfigMap := &corev1.ConfigMap{}
+ Expect(k8sClient.Get(ctx, types.NamespacedName{
+ Name: storageConfigMapName,
+ Namespace: testNamespace,
+ }, storageConfigMap)).To(Succeed())
+
+ var storedRegistry ToolHiveRegistryData
+ Expect(json.Unmarshal([]byte(storageConfigMap.Data["registry.json"]), &storedRegistry)).To(Succeed())
+ verifyServerContent(storedRegistry, updatedServers)
+ })
+
+ It("should handle Git repository with different file paths", func() {
+ By("Creating a Git repository with registry data in subdirectory")
+ gitRepo = gitHelper.CreateRepository("nested-path-repo")
+ gitHelper.CommitRegistryDataAtPath(gitRepo, "configs/registries/registry.json", originalServers, "Registry in nested path")
+
+ By("Creating an MCPRegistry pointing to the nested file path")
+ mcpRegistry := CreateMCPRegistryWithGitSource(registryName, testNamespace,
+ "Git Path Test Registry", gitRepo.CloneURL, "main", "configs/registries/registry.json", "30s")
+ Expect(k8sClient.Create(ctx, mcpRegistry)).To(Succeed())
+
+ By("Waiting for sync to complete")
+ Eventually(func() mcpv1alpha1.MCPRegistryPhase {
+ registry := &mcpv1alpha1.MCPRegistry{}
+ if err := k8sClient.Get(ctx, types.NamespacedName{
+ Name: registryName,
+ Namespace: testNamespace,
+ }, registry); err != nil {
+ return ""
+ }
+ return registry.Status.Phase
+ }, 30*time.Second, 2*time.Second).Should(BeElementOf(
+ mcpv1alpha1.MCPRegistryPhaseReady,
+ mcpv1alpha1.MCPRegistryPhasePending,
+ ))
+
+ By("Verifying data was correctly fetched from nested path")
+ registry := &mcpv1alpha1.MCPRegistry{}
+ Expect(k8sClient.Get(ctx, types.NamespacedName{
+ Name: registryName,
+ Namespace: testNamespace,
+ }, registry)).To(Succeed())
+
+ Expect(registry.Status.SyncStatus.ServerCount).To(Equal(1))
+
+ storageConfigMapName := registry.GetStorageName()
+ storageConfigMap := &corev1.ConfigMap{}
+ Expect(k8sClient.Get(ctx, types.NamespacedName{
+ Name: storageConfigMapName,
+ Namespace: testNamespace,
+ }, storageConfigMap)).To(Succeed())
+
+ var storedRegistry ToolHiveRegistryData
+ Expect(json.Unmarshal([]byte(storageConfigMap.Data["registry.json"]), &storedRegistry)).To(Succeed())
+ verifyServerContent(storedRegistry, originalServers)
+ })
+
+ It("should verify Git sync with complex registry content", func() {
+ By("Creating a Git repository with complex server configurations")
+ complexServers := CreateComplexTestServers()
+ gitRepo = gitHelper.CreateRepository("complex-content-repo")
+ gitHelper.CommitRegistryData(gitRepo, "registry.json", complexServers, "Complex registry with multiple server types")
+
+ By("Creating an MCPRegistry")
+ mcpRegistry := CreateMCPRegistryWithGitSource(registryName, testNamespace,
+ "Git Complex Content Test Registry", gitRepo.CloneURL, "main", "registry.json", "30s")
+ Expect(k8sClient.Create(ctx, mcpRegistry)).To(Succeed())
+
+ By("Waiting for sync to complete")
+ Eventually(func() mcpv1alpha1.MCPRegistryPhase {
+ registry := &mcpv1alpha1.MCPRegistry{}
+ if err := k8sClient.Get(ctx, types.NamespacedName{
+ Name: registryName,
+ Namespace: testNamespace,
+ }, registry); err != nil {
+ return ""
+ }
+ return registry.Status.Phase
+ }, 30*time.Second, 2*time.Second).Should(BeElementOf(
+ mcpv1alpha1.MCPRegistryPhaseReady,
+ mcpv1alpha1.MCPRegistryPhasePending,
+ ))
+
+ By("Retrieving and verifying storage ConfigMap content")
+ registry := &mcpv1alpha1.MCPRegistry{}
+ Expect(k8sClient.Get(ctx, types.NamespacedName{
+ Name: registryName,
+ Namespace: testNamespace,
+ }, registry)).To(Succeed())
+
+ storageConfigMapName := registry.GetStorageName()
+ storageConfigMap := &corev1.ConfigMap{}
+ Expect(k8sClient.Get(ctx, types.NamespacedName{
+ Name: storageConfigMapName,
+ Namespace: testNamespace,
+ }, storageConfigMap)).To(Succeed())
+
+ var storedRegistry ToolHiveRegistryData
+ Expect(json.Unmarshal([]byte(storageConfigMap.Data["registry.json"]), &storedRegistry)).To(Succeed())
+
+ By("Verifying exact content match from Git repository")
+ Expect(registry.Status.SyncStatus.ServerCount).To(Equal(2))
+ verifyServerContent(storedRegistry, complexServers)
+
+ // Verify metadata
+ Expect(storedRegistry.Version).To(Equal("1.0.0"))
+
+ By("Verifying hash consistency")
+ Expect(registry.Status.SyncStatus.LastSyncHash).NotTo(BeEmpty())
+
+ By("Verifying timing constants are still configurable for Git tests")
+ syncRequeueTime := sync.DefaultSyncRequeueAfter
+ controllerRetryTime := controllers.DefaultControllerRetryAfter
+
+ // Verify test values
+ Expect(syncRequeueTime).To(Equal(shortSyncRequeue))
+ Expect(controllerRetryTime).To(Equal(shortControllerRetry))
+
+ // Verify constants are still available
+ Expect(sync.DefaultSyncRequeueAfterConstant).To(Equal(5 * time.Minute))
+ Expect(controllers.DefaultControllerRetryAfterConstant).To(Equal(5 * time.Minute))
+ })
+
+ It("should handle Git authentication and private repositories", func() {
+ Skip("Private repository authentication tests require additional Git server setup")
+
+ // This test would cover:
+ // - SSH key-based authentication
+ // - HTTPS token-based authentication
+ // - Kubernetes Secret integration for credentials
+ // - Authentication failure scenarios
+ })
+ })
+})
diff --git a/test/e2e/operator/registry_helpers.go b/test/e2e/operator/registry_helpers.go
new file mode 100644
index 000000000..a0a2ecabb
--- /dev/null
+++ b/test/e2e/operator/registry_helpers.go
@@ -0,0 +1,329 @@
+package operator_test
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/onsi/ginkgo/v2"
+ "github.com/onsi/gomega"
+ "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/types"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ mcpv1alpha1 "github.com/stacklok/toolhive/cmd/thv-operator/api/v1alpha1"
+)
+
+// MCPRegistryTestHelper provides specialized utilities for MCPRegistry testing
+type MCPRegistryTestHelper struct {
+ Client client.Client
+ Context context.Context
+ Namespace string
+}
+
+// NewMCPRegistryTestHelper creates a new test helper for MCPRegistry operations
+func NewMCPRegistryTestHelper(ctx context.Context, k8sClient client.Client, namespace string) *MCPRegistryTestHelper {
+ return &MCPRegistryTestHelper{
+ Client: k8sClient,
+ Context: ctx,
+ Namespace: namespace,
+ }
+}
+
+// RegistryBuilder provides a fluent interface for building MCPRegistry objects
+type RegistryBuilder struct {
+ registry *mcpv1alpha1.MCPRegistry
+}
+
+// NewRegistryBuilder creates a new MCPRegistry builder
+func (h *MCPRegistryTestHelper) NewRegistryBuilder(name string) *RegistryBuilder {
+ return &RegistryBuilder{
+ registry: &mcpv1alpha1.MCPRegistry{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: h.Namespace,
+ Labels: map[string]string{
+ "test.toolhive.io/suite": "operator-e2e",
+ },
+ },
+ Spec: mcpv1alpha1.MCPRegistrySpec{},
+ },
+ }
+}
+
+// WithConfigMapSource configures the registry with a ConfigMap source
+func (rb *RegistryBuilder) WithConfigMapSource(configMapName, key string) *RegistryBuilder {
+ rb.registry.Spec.Source = mcpv1alpha1.MCPRegistrySource{
+ Type: mcpv1alpha1.RegistrySourceTypeConfigMap,
+ Format: mcpv1alpha1.RegistryFormatToolHive,
+ ConfigMap: &mcpv1alpha1.ConfigMapSource{
+ Name: configMapName,
+ Key: key,
+ },
+ }
+ return rb
+}
+
+// WithUpstreamFormat configures the registry to use upstream MCP format
+func (rb *RegistryBuilder) WithUpstreamFormat() *RegistryBuilder {
+ rb.registry.Spec.Source.Format = mcpv1alpha1.RegistryFormatUpstream
+ return rb
+}
+
+// WithSyncPolicy configures the sync policy
+func (rb *RegistryBuilder) WithSyncPolicy(interval string) *RegistryBuilder {
+ rb.registry.Spec.SyncPolicy = &mcpv1alpha1.SyncPolicy{
+ Interval: interval,
+ }
+ return rb
+}
+
+// WithAnnotation adds an annotation to the registry
+func (rb *RegistryBuilder) WithAnnotation(key, value string) *RegistryBuilder {
+ if rb.registry.Annotations == nil {
+ rb.registry.Annotations = make(map[string]string)
+ }
+ rb.registry.Annotations[key] = value
+ return rb
+}
+
+// WithLabel adds a label to the registry
+func (rb *RegistryBuilder) WithLabel(key, value string) *RegistryBuilder {
+ if rb.registry.Labels == nil {
+ rb.registry.Labels = make(map[string]string)
+ }
+ rb.registry.Labels[key] = value
+ return rb
+}
+
+// WithNameIncludeFilter sets name include patterns for filtering
+func (rb *RegistryBuilder) WithNameIncludeFilter(patterns []string) *RegistryBuilder {
+ if rb.registry.Spec.Filter == nil {
+ rb.registry.Spec.Filter = &mcpv1alpha1.RegistryFilter{}
+ }
+ if rb.registry.Spec.Filter.NameFilters == nil {
+ rb.registry.Spec.Filter.NameFilters = &mcpv1alpha1.NameFilter{}
+ }
+ rb.registry.Spec.Filter.NameFilters.Include = patterns
+ return rb
+}
+
+// WithNameExcludeFilter sets name exclude patterns for filtering
+func (rb *RegistryBuilder) WithNameExcludeFilter(patterns []string) *RegistryBuilder {
+ if rb.registry.Spec.Filter == nil {
+ rb.registry.Spec.Filter = &mcpv1alpha1.RegistryFilter{}
+ }
+ if rb.registry.Spec.Filter.NameFilters == nil {
+ rb.registry.Spec.Filter.NameFilters = &mcpv1alpha1.NameFilter{}
+ }
+ rb.registry.Spec.Filter.NameFilters.Exclude = patterns
+ return rb
+}
+
+// WithTagIncludeFilter sets tag include patterns for filtering
+func (rb *RegistryBuilder) WithTagIncludeFilter(tags []string) *RegistryBuilder {
+ if rb.registry.Spec.Filter == nil {
+ rb.registry.Spec.Filter = &mcpv1alpha1.RegistryFilter{}
+ }
+ if rb.registry.Spec.Filter.Tags == nil {
+ rb.registry.Spec.Filter.Tags = &mcpv1alpha1.TagFilter{}
+ }
+ rb.registry.Spec.Filter.Tags.Include = tags
+ return rb
+}
+
+// WithTagExcludeFilter sets tag exclude patterns for filtering
+func (rb *RegistryBuilder) WithTagExcludeFilter(tags []string) *RegistryBuilder {
+ if rb.registry.Spec.Filter == nil {
+ rb.registry.Spec.Filter = &mcpv1alpha1.RegistryFilter{}
+ }
+ if rb.registry.Spec.Filter.Tags == nil {
+ rb.registry.Spec.Filter.Tags = &mcpv1alpha1.TagFilter{}
+ }
+ rb.registry.Spec.Filter.Tags.Exclude = tags
+ return rb
+}
+
+// Build returns the constructed MCPRegistry
+func (rb *RegistryBuilder) Build() *mcpv1alpha1.MCPRegistry {
+ return rb.registry.DeepCopy()
+}
+
+// Create builds and creates the MCPRegistry in the cluster
+func (rb *RegistryBuilder) Create(h *MCPRegistryTestHelper) *mcpv1alpha1.MCPRegistry {
+ registry := rb.Build()
+ err := h.Client.Create(h.Context, registry)
+ gomega.Expect(err).NotTo(gomega.HaveOccurred(), "Failed to create MCPRegistry")
+ return registry
+}
+
+// CreateBasicConfigMapRegistry creates a simple MCPRegistry with ConfigMap source
+func (h *MCPRegistryTestHelper) CreateBasicConfigMapRegistry(name, configMapName string) *mcpv1alpha1.MCPRegistry {
+ return h.NewRegistryBuilder(name).
+ WithConfigMapSource(configMapName, "registry.json").
+ WithSyncPolicy("1h").
+ Create(h)
+}
+
+// CreateManualSyncRegistry creates an MCPRegistry with manual sync only
+func (h *MCPRegistryTestHelper) CreateManualSyncRegistry(name, configMapName string) *mcpv1alpha1.MCPRegistry {
+ return h.NewRegistryBuilder(name).
+ WithConfigMapSource(configMapName, "registry.json").
+ Create(h)
+}
+
+// CreateUpstreamFormatRegistry creates an MCPRegistry with upstream format
+func (h *MCPRegistryTestHelper) CreateUpstreamFormatRegistry(name, configMapName string) *mcpv1alpha1.MCPRegistry {
+ return h.NewRegistryBuilder(name).
+ WithConfigMapSource(configMapName, "registry.json").
+ WithUpstreamFormat().
+ WithSyncPolicy("30m").
+ Create(h)
+}
+
+// GetRegistry retrieves an MCPRegistry by name
+func (h *MCPRegistryTestHelper) GetRegistry(name string) (*mcpv1alpha1.MCPRegistry, error) {
+ registry := &mcpv1alpha1.MCPRegistry{}
+ err := h.Client.Get(h.Context, types.NamespacedName{
+ Namespace: h.Namespace,
+ Name: name,
+ }, registry)
+ return registry, err
+}
+
+// UpdateRegistry updates an existing MCPRegistry
+func (h *MCPRegistryTestHelper) UpdateRegistry(registry *mcpv1alpha1.MCPRegistry) error {
+ return h.Client.Update(h.Context, registry)
+}
+
+// PatchRegistry patches an MCPRegistry with the given patch
+func (h *MCPRegistryTestHelper) PatchRegistry(name string, patch client.Patch) error {
+ registry := &mcpv1alpha1.MCPRegistry{}
+ registry.Name = name
+ registry.Namespace = h.Namespace
+ return h.Client.Patch(h.Context, registry, patch)
+}
+
+// DeleteRegistry deletes an MCPRegistry by name
+func (h *MCPRegistryTestHelper) DeleteRegistry(name string) error {
+ registry := &mcpv1alpha1.MCPRegistry{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: h.Namespace,
+ },
+ }
+ return h.Client.Delete(h.Context, registry)
+}
+
+// TriggerManualSync adds the manual sync annotation to trigger a sync
+func (h *MCPRegistryTestHelper) TriggerManualSync(name string) error {
+ registry, err := h.GetRegistry(name)
+ if err != nil {
+ return err
+ }
+
+ if registry.Annotations == nil {
+ registry.Annotations = make(map[string]string)
+ }
+ registry.Annotations["toolhive.stacklok.dev/manual-sync"] = fmt.Sprintf("%d", time.Now().Unix())
+
+ return h.UpdateRegistry(registry)
+}
+
+// GetRegistryStatus returns the current status of an MCPRegistry
+func (h *MCPRegistryTestHelper) GetRegistryStatus(name string) (*mcpv1alpha1.MCPRegistryStatus, error) {
+ registry, err := h.GetRegistry(name)
+ if err != nil {
+ return nil, err
+ }
+ return ®istry.Status, nil
+}
+
+// GetRegistryPhase returns the current phase of an MCPRegistry
+func (h *MCPRegistryTestHelper) GetRegistryPhase(name string) (mcpv1alpha1.MCPRegistryPhase, error) {
+ status, err := h.GetRegistryStatus(name)
+ if err != nil {
+ return "", err
+ }
+ return status.Phase, nil
+}
+
+// GetRegistryCondition returns a specific condition from the registry status
+func (h *MCPRegistryTestHelper) GetRegistryCondition(name, conditionType string) (*metav1.Condition, error) {
+ status, err := h.GetRegistryStatus(name)
+ if err != nil {
+ return nil, err
+ }
+
+ for _, condition := range status.Conditions {
+ if condition.Type == conditionType {
+ return &condition, nil
+ }
+ }
+ return nil, fmt.Errorf("condition %s not found", conditionType)
+}
+
+// ListRegistries returns all MCPRegistries in the namespace
+func (h *MCPRegistryTestHelper) ListRegistries() (*mcpv1alpha1.MCPRegistryList, error) {
+ registryList := &mcpv1alpha1.MCPRegistryList{}
+ err := h.Client.List(h.Context, registryList, client.InNamespace(h.Namespace))
+ return registryList, err
+}
+
+// CleanupRegistries deletes all MCPRegistries in the namespace
+func (h *MCPRegistryTestHelper) CleanupRegistries() error {
+ registryList, err := h.ListRegistries()
+ if err != nil {
+ return err
+ }
+
+ for _, registry := range registryList.Items {
+ if err := h.Client.Delete(h.Context, ®istry); err != nil {
+ return err
+ }
+
+ // Wait for registry to be actually deleted
+ ginkgo.By(fmt.Sprintf("waiting for registry %s to be deleted", registry.Name))
+ gomega.Eventually(func() bool {
+ _, err := h.GetRegistry(registry.Name)
+ return err != nil && errors.IsNotFound(err)
+ }, LongTimeout, DefaultPollingInterval).Should(gomega.BeTrue())
+ }
+ return nil
+}
+
+// WaitForRegistryInitialization waits for common initialization steps after registry creation:
+// 1. Wait for finalizer to be added
+// 2. Wait for controller to process the registry into an acceptable initial phase
+func (h *MCPRegistryTestHelper) WaitForRegistryInitialization(registryName string,
+ timingHelper *TimingTestHelper, statusHelper *StatusTestHelper) {
+ // Wait for finalizer to be added
+ ginkgo.By("waiting for finalizer to be added")
+ timingHelper.WaitForControllerReconciliation(func() interface{} {
+ updatedRegistry, err := h.GetRegistry(registryName)
+ if err != nil {
+ return false
+ }
+ return containsFinalizer(updatedRegistry.Finalizers, "mcpregistry.toolhive.stacklok.dev/finalizer")
+ }).Should(gomega.BeTrue())
+
+ // Wait for controller to process and verify initial status
+ ginkgo.By("waiting for controller to process and verify initial status")
+ statusHelper.WaitForPhaseAny(registryName, []mcpv1alpha1.MCPRegistryPhase{
+ mcpv1alpha1.MCPRegistryPhasePending,
+ mcpv1alpha1.MCPRegistryPhaseReady,
+ mcpv1alpha1.MCPRegistryPhaseSyncing,
+ }, MediumTimeout)
+}
+
+// containsFinalizer checks if the registry finalizer exists in the list
+func containsFinalizer(finalizers []string, _ string) bool {
+ const registryFinalizer = "mcpregistry.toolhive.stacklok.dev/finalizer"
+ for _, f := range finalizers {
+ if f == registryFinalizer {
+ return true
+ }
+ }
+ return false
+}
diff --git a/test/e2e/operator/registry_lifecycle_test.go b/test/e2e/operator/registry_lifecycle_test.go
new file mode 100644
index 000000000..fe2a1e6eb
--- /dev/null
+++ b/test/e2e/operator/registry_lifecycle_test.go
@@ -0,0 +1,530 @@
+package operator_test
+
+import (
+ "context"
+ "fmt"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ mcpv1alpha1 "github.com/stacklok/toolhive/cmd/thv-operator/api/v1alpha1"
+)
+
+const (
+ registryFinalizerName = "mcpregistry.toolhive.stacklok.dev/finalizer"
+)
+
+var _ = Describe("MCPRegistry Lifecycle Management", Label("k8s", "registry"), func() {
+ var (
+ ctx context.Context
+ registryHelper *MCPRegistryTestHelper
+ configMapHelper *ConfigMapTestHelper
+ statusHelper *StatusTestHelper
+ timingHelper *TimingTestHelper
+ k8sHelper *K8sResourceTestHelper
+ testNamespace string
+ )
+
+ BeforeEach(func() {
+ ctx = context.Background()
+ testNamespace = createTestNamespace(ctx)
+
+ // Initialize helpers
+ registryHelper = NewMCPRegistryTestHelper(ctx, k8sClient, testNamespace)
+ configMapHelper = NewConfigMapTestHelper(ctx, k8sClient, testNamespace)
+ statusHelper = NewStatusTestHelper(ctx, k8sClient, testNamespace)
+ timingHelper = NewTimingTestHelper(ctx, k8sClient)
+ k8sHelper = NewK8sResourceTestHelper(ctx, k8sClient, testNamespace)
+ })
+
+ AfterEach(func() {
+ // Clean up test resources
+ Expect(registryHelper.CleanupRegistries()).To(Succeed())
+ Expect(configMapHelper.CleanupConfigMaps()).To(Succeed())
+ deleteTestNamespace(ctx, testNamespace)
+ })
+
+ Context("Basic Registry Creation", func() {
+ It("should create MCPRegistry with correct initial status", func() {
+ // Create test ConfigMap
+ configMap, numServers := configMapHelper.CreateSampleToolHiveRegistry("test-config")
+
+ // Create MCPRegistry
+ registry := registryHelper.NewRegistryBuilder("test-registry").
+ WithConfigMapSource(configMap.Name, "registry.json").
+ WithSyncPolicy("1h").
+ Create(registryHelper)
+
+ // Verify registry was created
+ Expect(registry.Name).To(Equal("test-registry"))
+ Expect(registry.Namespace).To(Equal(testNamespace))
+
+ // Verify initial spec
+ Expect(registry.Spec.Source.Type).To(Equal(mcpv1alpha1.RegistrySourceTypeConfigMap))
+ Expect(registry.Spec.Source.ConfigMap.Name).To(Equal(configMap.Name))
+ Expect(registry.Spec.SyncPolicy.Interval).To(Equal("1h"))
+
+ // Wait for registry initialization (finalizer + initial status)
+ registryHelper.WaitForRegistryInitialization(registry.Name, timingHelper, statusHelper)
+
+ By("verifying storage ConfigMap is defined in status and exists")
+ updatedRegistry, err := registryHelper.GetRegistry(registry.Name)
+ Expect(err).NotTo(HaveOccurred())
+
+ // Verify storage reference is set in status
+ Expect(updatedRegistry.Status.StorageRef).NotTo(BeNil())
+ Expect(updatedRegistry.Status.StorageRef.Type).To(Equal("configmap"))
+ Expect(updatedRegistry.Status.StorageRef.ConfigMapRef).NotTo(BeNil())
+ Expect(updatedRegistry.Status.StorageRef.ConfigMapRef.Name).NotTo(BeEmpty())
+
+ // Verify the storage ConfigMap actually exists
+ storageConfigMapName := updatedRegistry.Status.StorageRef.ConfigMapRef.Name
+ storageConfigMap, err := k8sHelper.GetConfigMap(storageConfigMapName)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(storageConfigMap.Name).To(Equal(storageConfigMapName))
+ Expect(storageConfigMap.Namespace).To(Equal(testNamespace))
+
+ // Verify it has the registry.json key
+ Expect(storageConfigMap.Data).To(HaveKey("registry.json"))
+ Expect(storageConfigMap.Data["registry.json"]).NotTo(BeEmpty())
+
+ By("verifying Registry API Service and Deployment exist")
+ apiResourceName := updatedRegistry.GetAPIResourceName()
+
+ // Wait for Service to be created
+ timingHelper.WaitForControllerReconciliation(func() interface{} {
+ return k8sHelper.ServiceExists(apiResourceName)
+ }).Should(BeTrue(), "Registry API Service should exist")
+
+ // Wait for Deployment to be created
+ timingHelper.WaitForControllerReconciliation(func() interface{} {
+ return k8sHelper.DeploymentExists(apiResourceName)
+ }).Should(BeTrue(), "Registry API Deployment should exist")
+
+ // Verify the Service has correct configuration
+ service, err := k8sHelper.GetService(apiResourceName)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(service.Name).To(Equal(apiResourceName))
+ Expect(service.Namespace).To(Equal(testNamespace))
+ Expect(service.Spec.Ports).To(HaveLen(1))
+ Expect(service.Spec.Ports[0].Name).To(Equal("http"))
+
+ // Verify the Deployment has correct configuration
+ deployment, err := k8sHelper.GetDeployment(apiResourceName)
+ Expect(err).NotTo(HaveOccurred())
+ Expect(deployment.Name).To(Equal(apiResourceName))
+ Expect(deployment.Namespace).To(Equal(testNamespace))
+ Expect(deployment.Spec.Template.Spec.Containers).To(HaveLen(1))
+ Expect(deployment.Spec.Template.Spec.Containers[0].Name).To(Equal("registry-api"))
+
+ By("verifying deployment has proper ownership")
+ Expect(deployment.OwnerReferences).To(HaveLen(1))
+ Expect(deployment.OwnerReferences[0].Kind).To(Equal("MCPRegistry"))
+ Expect(deployment.OwnerReferences[0].Name).To(Equal(registry.Name))
+
+ By("verifying registry status")
+ updatedRegistry, err = registryHelper.GetRegistry(registry.Name)
+ Expect(err).NotTo(HaveOccurred())
+ // In envtest, the deployment won't actually be ready, so expect Pending phase
+ // but verify that sync is complete and API deployment is in progress
+ Expect(updatedRegistry.Status.Phase).To(BeElementOf(
+ mcpv1alpha1.MCPRegistryPhasePending, // API deployment in progress
+ mcpv1alpha1.MCPRegistryPhaseReady, // If somehow API becomes ready
+ ))
+
+ // Verify sync is complete
+ Expect(updatedRegistry.Status.SyncStatus).NotTo(BeNil())
+ Expect(updatedRegistry.Status.SyncStatus.Phase).To(Equal(mcpv1alpha1.SyncPhaseComplete))
+ Expect(updatedRegistry.Status.SyncStatus.AttemptCount).To(Equal(0))
+ Expect(updatedRegistry.Status.SyncStatus.ServerCount).To(Equal(numServers))
+
+ // Verify API status exists and shows deployment
+ Expect(updatedRegistry.Status.APIStatus).NotTo(BeNil())
+ Expect(updatedRegistry.Status.APIStatus.Phase).To(BeElementOf(
+ mcpv1alpha1.APIPhaseDeploying, // Deployment created but not ready
+ mcpv1alpha1.APIPhaseReady, // If somehow becomes ready
+ ))
+ if updatedRegistry.Status.APIStatus.Phase == mcpv1alpha1.APIPhaseReady {
+ Expect(updatedRegistry.Status.APIStatus.Endpoint).To(Equal(fmt.Sprintf("http://%s.%s.svc.cluster.local:8080", apiResourceName, testNamespace)))
+ }
+ By("BYE")
+ })
+
+ It("should handle registry with minimal configuration", func() {
+ // Create minimal ConfigMap
+ configMap := configMapHelper.NewConfigMapBuilder("minimal-config").
+ WithToolHiveRegistry("registry.json", []RegistryServer{
+ {
+ Name: "test-server",
+ Description: "Test server",
+ Tier: "Community",
+ Status: "Active",
+ Transport: "stdio",
+ Tools: []string{"test_tool"},
+ Image: "test/server:1.0.0",
+ },
+ }).
+ Create(configMapHelper)
+
+ // Create minimal registry (no sync policy)
+ registry := registryHelper.NewRegistryBuilder("minimal-registry").
+ WithConfigMapSource(configMap.Name, "registry.json").
+ Create(registryHelper)
+
+ // Verify creation
+ Expect(registry.Spec.SyncPolicy).To(BeNil())
+
+ // Wait for registry initialization (finalizer + initial status)
+ registryHelper.WaitForRegistryInitialization(registry.Name, timingHelper, statusHelper)
+
+ updatedRegistry, err := registryHelper.GetRegistry(registry.Name)
+ Expect(err).NotTo(HaveOccurred())
+
+ // Verify sync status is or complete
+ Expect(updatedRegistry.Status.SyncStatus).NotTo(BeNil())
+ Expect(updatedRegistry.Status.SyncStatus.Phase).To(Equal(mcpv1alpha1.SyncPhaseComplete))
+ Expect(updatedRegistry.Status.SyncStatus.ServerCount).To(Equal(1))
+ })
+
+ It("should set correct metadata labels and annotations", func() {
+ configMap, _ := configMapHelper.CreateSampleToolHiveRegistry("labeled-config")
+
+ registry := registryHelper.NewRegistryBuilder("labeled-registry").
+ WithConfigMapSource(configMap.Name, "registry.json").
+ WithLabel("app", "test").
+ WithLabel("version", "1.0").
+ WithAnnotation("description", "Test registry").
+ Create(registryHelper)
+
+ registryHelper.WaitForRegistryInitialization(registry.Name, timingHelper, statusHelper)
+
+ // Verify labels and annotations
+ Expect(registry.Labels).To(HaveKeyWithValue("app", "test"))
+ Expect(registry.Labels).To(HaveKeyWithValue("version", "1.0"))
+ Expect(registry.Annotations).To(HaveKeyWithValue("description", "Test registry"))
+ })
+ })
+
+ Context("Finalizer Management", func() {
+ It("should add finalizer on creation", func() {
+ configMap, _ := configMapHelper.CreateSampleToolHiveRegistry("finalizer-config")
+
+ registry := registryHelper.NewRegistryBuilder("finalizer-test").
+ WithConfigMapSource(configMap.Name, "registry.json").
+ Create(registryHelper)
+
+ // Wait for finalizer to be added
+ timingHelper.WaitForControllerReconciliation(func() interface{} {
+ updatedRegistry, err := registryHelper.GetRegistry(registry.Name)
+ if err != nil {
+ return false
+ }
+ return containsFinalizer(updatedRegistry.Finalizers, registryFinalizerName)
+ }).Should(BeTrue())
+ })
+
+ It("should remove finalizer during deletion", func() {
+ configMap, _ := configMapHelper.CreateSampleToolHiveRegistry("deletion-config")
+
+ registry := registryHelper.NewRegistryBuilder("deletion-test").
+ WithConfigMapSource(configMap.Name, "registry.json").
+ Create(registryHelper)
+
+ // Wait for finalizer to be added
+ timingHelper.WaitForControllerReconciliation(func() interface{} {
+ updatedRegistry, err := registryHelper.GetRegistry(registry.Name)
+ if err != nil {
+ return false
+ }
+ return containsFinalizer(updatedRegistry.Finalizers, registryFinalizerName)
+ }).Should(BeTrue())
+
+ // Delete the registry
+ Expect(registryHelper.DeleteRegistry(registry.Name)).To(Succeed())
+
+ // Verify registry enters terminating phase
+ By("waiting for registry to enter terminating phase")
+ statusHelper.WaitForPhase(registry.Name, mcpv1alpha1.MCPRegistryPhaseTerminating, MediumTimeout)
+
+ By("waiting for finalizer to be removed")
+ timingHelper.WaitForControllerReconciliation(func() interface{} {
+ updatedRegistry, err := registryHelper.GetRegistry(registry.Name)
+ if err != nil {
+ return true // Registry might be deleted, which means finalizer was removed
+ }
+ return !containsFinalizer(updatedRegistry.Finalizers, registryFinalizerName)
+ }).Should(BeTrue())
+
+ // Verify registry is eventually deleted (finalizer removed)
+ By("waiting for registry to be deleted")
+ timingHelper.WaitForControllerReconciliation(func() interface{} {
+ _, err := registryHelper.GetRegistry(registry.Name)
+ return errors.IsNotFound(err)
+ }).Should(BeTrue())
+ })
+ })
+
+ Context("Deletion Handling", func() {
+ It("should perform graceful deletion with cleanup", func() {
+ configMap, _ := configMapHelper.CreateSampleToolHiveRegistry("cleanup-config")
+
+ registry := registryHelper.NewRegistryBuilder("cleanup-test").
+ WithConfigMapSource(configMap.Name, "registry.json").
+ WithSyncPolicy("30m").
+ Create(registryHelper)
+
+ // Wait for registry to be ready
+ statusHelper.WaitForPhaseAny(registry.Name, []mcpv1alpha1.MCPRegistryPhase{mcpv1alpha1.MCPRegistryPhaseReady, mcpv1alpha1.MCPRegistryPhasePending}, MediumTimeout)
+
+ // Store initial storage reference for cleanup verification
+ status, err := registryHelper.GetRegistryStatus(registry.Name)
+ Expect(err).NotTo(HaveOccurred())
+ initialStorageRef := status.StorageRef
+
+ // Delete the registry
+ Expect(registryHelper.DeleteRegistry(registry.Name)).To(Succeed())
+
+ // Verify graceful deletion process
+ statusHelper.WaitForPhase(registry.Name, mcpv1alpha1.MCPRegistryPhaseTerminating, QuickTimeout)
+
+ // Verify cleanup of associated resources (if any storage was created)
+ if initialStorageRef != nil && initialStorageRef.ConfigMapRef != nil {
+ timingHelper.WaitForControllerReconciliation(func() interface{} {
+ _, err := configMapHelper.GetConfigMap(initialStorageRef.ConfigMapRef.Name)
+ // Storage ConfigMap should be cleaned up or marked for deletion
+ return errors.IsNotFound(err)
+ }).Should(BeTrue())
+ }
+
+ // Verify complete deletion
+ timingHelper.WaitForControllerReconciliation(func() interface{} {
+ _, err := registryHelper.GetRegistry(registry.Name)
+ return errors.IsNotFound(err)
+ }).Should(BeTrue())
+ })
+
+ It("should handle deletion when source ConfigMap is missing", func() {
+ configMap, _ := configMapHelper.CreateSampleToolHiveRegistry("missing-config")
+
+ registry := registryHelper.NewRegistryBuilder("missing-source-test").
+ WithConfigMapSource(configMap.Name, "registry.json").
+ Create(registryHelper)
+
+ // Delete the source ConfigMap first
+ Expect(configMapHelper.DeleteConfigMap(configMap.Name)).To(Succeed())
+
+ // Now delete the registry - should still succeed
+ Expect(registryHelper.DeleteRegistry(registry.Name)).To(Succeed())
+
+ // Verify deletion completes despite missing source
+ timingHelper.WaitForControllerReconciliation(func() interface{} {
+ _, err := registryHelper.GetRegistry(registry.Name)
+ return errors.IsNotFound(err)
+ }).Should(BeTrue())
+ })
+ })
+
+ Context("Spec Validation", func() {
+ It("should reject invalid source configuration", func() {
+ // Try to create registry with missing ConfigMap reference
+ invalidRegistry := &mcpv1alpha1.MCPRegistry{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "invalid-registry",
+ Namespace: testNamespace,
+ },
+ Spec: mcpv1alpha1.MCPRegistrySpec{
+ Source: mcpv1alpha1.MCPRegistrySource{
+ Type: mcpv1alpha1.RegistrySourceTypeConfigMap,
+ // Missing ConfigMap field
+ },
+ },
+ }
+
+ // Should fail validation
+ err := k8sClient.Create(ctx, invalidRegistry)
+ By("verifying validation error")
+ Expect(err).To(HaveOccurred())
+ Expect(err.Error()).To(ContainSubstring("configMap field is required"))
+ })
+
+ It("should reject invalid sync interval", func() {
+ configMap, _ := configMapHelper.CreateSampleToolHiveRegistry("interval-config")
+
+ invalidRegistry := &mcpv1alpha1.MCPRegistry{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "invalid-interval",
+ Namespace: testNamespace,
+ },
+ Spec: mcpv1alpha1.MCPRegistrySpec{
+ Source: mcpv1alpha1.MCPRegistrySource{
+ Type: mcpv1alpha1.RegistrySourceTypeConfigMap,
+ ConfigMap: &mcpv1alpha1.ConfigMapSource{
+ Name: configMap.Name,
+ Key: "registry.json",
+ },
+ },
+ SyncPolicy: &mcpv1alpha1.SyncPolicy{
+ Interval: "invalid-duration",
+ },
+ },
+ }
+
+ // Should fail validation
+ err := k8sClient.Create(ctx, invalidRegistry)
+ Expect(err).To(HaveOccurred())
+ })
+
+ It("should handle missing source ConfigMap gracefully", func() {
+ registry := registryHelper.NewRegistryBuilder("missing-configmap").
+ WithConfigMapSource("nonexistent-configmap", "registry.json").
+ Create(registryHelper)
+
+ By("waiting for registry to enter failed state")
+ // Should enter failed state due to missing source
+ statusHelper.WaitForPhase(registry.Name, mcpv1alpha1.MCPRegistryPhaseFailed, MediumTimeout)
+
+ // Check condition reflects the problem
+ statusHelper.WaitForCondition(registry.Name, mcpv1alpha1.ConditionSyncSuccessful,
+ metav1.ConditionFalse, MediumTimeout)
+
+ updatedRegistry, err := registryHelper.GetRegistry(registry.Name)
+ Expect(err).NotTo(HaveOccurred())
+
+ By("verifying sync status")
+ Expect(updatedRegistry.Status.SyncStatus).NotTo(BeNil())
+ Expect(updatedRegistry.Status.SyncStatus.Phase).To(Equal(mcpv1alpha1.SyncPhaseFailed))
+ Expect(updatedRegistry.Status.SyncStatus.AttemptCount).To(Equal(1))
+
+ By("verifying API status")
+ Expect(updatedRegistry.Status.APIStatus).NotTo(BeNil())
+ Expect(updatedRegistry.Status.APIStatus.Phase).To(Equal(mcpv1alpha1.APIPhaseDeploying))
+ Expect(updatedRegistry.Status.APIStatus.Endpoint).To(BeEmpty())
+ })
+ })
+
+ Context("Multiple Registry Management", func() {
+ var configMap1, configMap2 *corev1.ConfigMap
+ It("should handle multiple registries in same namespace", func() {
+ // Create multiple ConfigMaps
+ configMap1, _ = configMapHelper.CreateSampleToolHiveRegistry("config-1")
+ configMap2, _ = configMapHelper.CreateSampleToolHiveRegistry("config-2")
+
+ // Create multiple registries
+ registry1 := registryHelper.NewRegistryBuilder("registry-1").
+ WithConfigMapSource(configMap1.Name, "registry.json").
+ WithSyncPolicy("1h").
+ Create(registryHelper)
+
+ registry2 := registryHelper.NewRegistryBuilder("registry-2").
+ WithConfigMapSource(configMap2.Name, "registry.json").
+ // WithUpstreamFormat().
+ WithSyncPolicy("30m").
+ Create(registryHelper)
+
+ // Both should become ready independently
+ statusHelper.WaitForPhaseAny(registry1.Name, []mcpv1alpha1.MCPRegistryPhase{mcpv1alpha1.MCPRegistryPhaseReady, mcpv1alpha1.MCPRegistryPhasePending}, MediumTimeout)
+ statusHelper.WaitForPhaseAny(registry2.Name, []mcpv1alpha1.MCPRegistryPhase{mcpv1alpha1.MCPRegistryPhaseReady, mcpv1alpha1.MCPRegistryPhasePending}, MediumTimeout)
+
+ // Verify they operate independently
+ Expect(registry1.Spec.SyncPolicy.Interval).To(Equal("1h"))
+ Expect(registry2.Spec.SyncPolicy.Interval).To(Equal("30m"))
+ Expect(registry1.Spec.Source.Format).To(Equal(mcpv1alpha1.RegistryFormatToolHive))
+ Expect(registry2.Spec.Source.Format).To(Equal(mcpv1alpha1.RegistryFormatToolHive))
+ })
+
+ It("should allow multiple registries with same ConfigMap source", func() {
+ // Create shared ConfigMap
+ sharedConfigMap, _ := configMapHelper.CreateSampleToolHiveRegistry("shared-config")
+
+ // Create multiple registries using same source
+ registry1 := registryHelper.NewRegistryBuilder("shared-registry-1").
+ WithConfigMapSource(sharedConfigMap.Name, "registry.json").
+ WithSyncPolicy("1h").
+ Create(registryHelper)
+
+ registry2 := registryHelper.NewRegistryBuilder("shared-registry-2").
+ WithConfigMapSource(sharedConfigMap.Name, "registry.json").
+ WithSyncPolicy("2h").
+ Create(registryHelper)
+
+ // Both should become ready
+ statusHelper.WaitForPhaseAny(registry1.Name, []mcpv1alpha1.MCPRegistryPhase{mcpv1alpha1.MCPRegistryPhaseReady, mcpv1alpha1.MCPRegistryPhasePending}, MediumTimeout)
+ statusHelper.WaitForPhaseAny(registry2.Name, []mcpv1alpha1.MCPRegistryPhase{mcpv1alpha1.MCPRegistryPhaseReady, mcpv1alpha1.MCPRegistryPhasePending}, MediumTimeout)
+
+ // Both should have same server count from shared source
+ sharedNumServers := 2 // Sample ToolHive registry has 2 servers
+ statusHelper.WaitForServerCount(registry1.Name, sharedNumServers, MediumTimeout)
+ statusHelper.WaitForServerCount(registry2.Name, sharedNumServers, MediumTimeout)
+ })
+
+ It("should handle registry name conflicts gracefully", func() {
+ configMap, _ := configMapHelper.CreateSampleToolHiveRegistry("conflict-config")
+
+ // Create first registry
+ registry1 := registryHelper.NewRegistryBuilder("conflict-registry").
+ WithConfigMapSource(configMap.Name, "registry.json").
+ Create(registryHelper)
+
+ // Try to create second registry with same name - should fail
+ duplicateRegistry := &mcpv1alpha1.MCPRegistry{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "conflict-registry",
+ Namespace: testNamespace,
+ },
+ Spec: mcpv1alpha1.MCPRegistrySpec{
+ Source: mcpv1alpha1.MCPRegistrySource{
+ Type: mcpv1alpha1.RegistrySourceTypeConfigMap,
+ ConfigMap: &mcpv1alpha1.ConfigMapSource{
+ Name: configMap.Name,
+ Key: "registry.json",
+ },
+ },
+ },
+ }
+
+ err := k8sClient.Create(ctx, duplicateRegistry)
+ Expect(err).To(HaveOccurred())
+ Expect(errors.IsAlreadyExists(err)).To(BeTrue())
+
+ // Original registry should still be functional
+ statusHelper.WaitForPhaseAny(registry1.Name, []mcpv1alpha1.MCPRegistryPhase{mcpv1alpha1.MCPRegistryPhaseReady, mcpv1alpha1.MCPRegistryPhasePending}, MediumTimeout)
+ })
+ })
+})
+
+// Helper function to create test namespace
+func createTestNamespace(ctx context.Context) string {
+ namespace := &corev1.Namespace{
+ ObjectMeta: metav1.ObjectMeta{
+ GenerateName: "test-registry-lifecycle-",
+ Labels: map[string]string{
+ "test.toolhive.io/suite": "operator-e2e",
+ },
+ },
+ }
+
+ Expect(k8sClient.Create(ctx, namespace)).To(Succeed())
+ return namespace.Name
+}
+
+// Helper function to delete test namespace
+func deleteTestNamespace(ctx context.Context, name string) {
+ namespace := &corev1.Namespace{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ },
+ }
+
+ By(fmt.Sprintf("deleting namespace %s", name))
+ _ = k8sClient.Delete(ctx, namespace)
+ By(fmt.Sprintf("deleted namespace %s", name))
+
+ // Wait for namespace deletion
+ // Eventually(func() bool {
+ // err := k8sClient.Get(ctx, types.NamespacedName{Name: name}, namespace)
+ // return errors.IsNotFound(err)
+ // }, LongTimeout, DefaultPollingInterval).Should(BeTrue())
+}
diff --git a/test/e2e/operator/registry_manual_sync_test.go b/test/e2e/operator/registry_manual_sync_test.go
new file mode 100644
index 000000000..6ce066968
--- /dev/null
+++ b/test/e2e/operator/registry_manual_sync_test.go
@@ -0,0 +1,473 @@
+package operator_test
+
+import (
+ "context"
+ "encoding/json"
+ "time"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/types"
+
+ mcpv1alpha1 "github.com/stacklok/toolhive/cmd/thv-operator/api/v1alpha1"
+ "github.com/stacklok/toolhive/cmd/thv-operator/pkg/mcpregistrystatus"
+)
+
+var _ = Describe("MCPRegistry Manual Sync with ConfigMap", Label("k8s", "registry"), func() {
+ var (
+ ctx context.Context
+ registryHelper *MCPRegistryTestHelper
+ configMapHelper *ConfigMapTestHelper
+ statusHelper *StatusTestHelper
+ testNamespace string
+ )
+
+ BeforeEach(func() {
+ ctx = context.Background()
+ testNamespace = createTestNamespace(ctx)
+
+ // Initialize helpers
+ registryHelper = NewMCPRegistryTestHelper(ctx, k8sClient, testNamespace)
+ configMapHelper = NewConfigMapTestHelper(ctx, k8sClient, testNamespace)
+ statusHelper = NewStatusTestHelper(ctx, k8sClient, testNamespace)
+ })
+
+ AfterEach(func() {
+ // Clean up test resources
+ Expect(registryHelper.CleanupRegistries()).To(Succeed())
+ Expect(configMapHelper.CleanupConfigMaps()).To(Succeed())
+ deleteTestNamespace(ctx, testNamespace)
+ })
+
+ Context("Manual Sync Trigger Scenarios", func() {
+ var (
+ registryName string
+ configMapName string
+ originalServers []RegistryServer
+ updatedServers []RegistryServer
+ )
+
+ BeforeEach(func() {
+ names := NewUniqueNames("manual-sync")
+ registryName = names.RegistryName
+ configMapName = names.ConfigMapName
+
+ // Create test registry data
+ originalServers = CreateOriginalTestServers()
+ // Create updated registry data (for later tests)
+ updatedServers = CreateUpdatedTestServers()
+ })
+
+ It("should trigger sync when manual sync annotation is added", func() {
+ By("Creating a ConfigMap with registry data")
+ configMap := configMapHelper.NewConfigMapBuilder(configMapName).
+ WithToolHiveRegistry("registry.json", originalServers).
+ Create(configMapHelper)
+
+ By("Creating an MCPRegistry without automatic sync policy")
+ mcpRegistry := CreateMCPRegistryManualOnly(registryName, testNamespace,
+ "Manual Sync Test Registry", configMapName) // No SyncPolicy - manual sync only
+ Expect(k8sClient.Create(ctx, mcpRegistry)).To(Succeed())
+
+ By("Waiting for initial sync to complete")
+ statusHelper.WaitForPhaseAny(registryName, []mcpv1alpha1.MCPRegistryPhase{mcpv1alpha1.MCPRegistryPhaseReady, mcpv1alpha1.MCPRegistryPhasePending}, 30*time.Second)
+
+ // Capture first sync time
+ firstSyncRegistry := &mcpv1alpha1.MCPRegistry{}
+ Expect(k8sClient.Get(ctx, types.NamespacedName{
+ Name: registryName,
+ Namespace: testNamespace,
+ }, firstSyncRegistry)).To(Succeed())
+
+ Expect(firstSyncRegistry.Status).NotTo(BeNil())
+ Expect(firstSyncRegistry.Status.SyncStatus.Phase).To(Equal(mcpv1alpha1.SyncPhaseComplete))
+ firstSyncTime := firstSyncRegistry.Status.SyncStatus.LastSyncTime
+ Expect(firstSyncTime).NotTo(BeNil())
+ serverCount := firstSyncRegistry.Status.SyncStatus.ServerCount
+ Expect(serverCount).To(Equal(1)) // Original registry has 1 server
+
+ By("Verifying initial storage ConfigMap was created")
+ storageConfigMapName := firstSyncRegistry.GetStorageName()
+ storageConfigMap := &corev1.ConfigMap{}
+ Expect(k8sClient.Get(ctx, types.NamespacedName{
+ Name: storageConfigMapName,
+ Namespace: testNamespace,
+ }, storageConfigMap)).To(Succeed())
+
+ By("Verifying storage data matches original ConfigMap")
+ var storedRegistry ToolHiveRegistryData
+ Expect(json.Unmarshal([]byte(storageConfigMap.Data["registry.json"]), &storedRegistry)).To(Succeed())
+ verifyServerContent(storedRegistry, originalServers)
+
+ By("Updating the source ConfigMap")
+ Expect(UpdateConfigMapWithServers(configMap, updatedServers)).To(Succeed())
+ Expect(k8sClient.Update(ctx, configMap)).To(Succeed())
+
+ By("Adding manual sync trigger annotation")
+ names := NewUniqueNames("manual-sync")
+ triggerValue := names.GenerateTriggerValue("manual-sync")
+ // Refresh the registry object first
+ Expect(k8sClient.Get(ctx, types.NamespacedName{
+ Name: registryName,
+ Namespace: testNamespace,
+ }, mcpRegistry)).To(Succeed())
+
+ AddManualSyncTrigger(mcpRegistry, triggerValue, mcpregistrystatus.SyncTriggerAnnotation)
+ Expect(k8sClient.Update(ctx, mcpRegistry)).To(Succeed())
+
+ By("Waiting for manual sync to complete")
+ Eventually(func() bool {
+ registry := &mcpv1alpha1.MCPRegistry{}
+ if err := k8sClient.Get(ctx, types.NamespacedName{
+ Name: registryName,
+ Namespace: testNamespace,
+ }, registry); err != nil {
+ return false
+ }
+
+ // Check if sync time was updated and server count changed
+ if registry.Status.SyncStatus == nil {
+ return false
+ }
+
+ newSyncTime := registry.Status.SyncStatus.LastSyncTime
+ newServerCount := registry.Status.SyncStatus.ServerCount
+
+ return newSyncTime != nil &&
+ newSyncTime.After(firstSyncTime.Time) &&
+ newServerCount == 2 && // Updated registry has 2 servers
+ registry.Status.LastManualSyncTrigger == triggerValue // Trigger was processed
+ }, 30*time.Second, 2*time.Second).Should(BeTrue(), "Registry should sync when manual trigger annotation is added")
+
+ By("Verifying updated storage data matches new ConfigMap")
+ updatedStorageConfigMap := &corev1.ConfigMap{}
+ Expect(k8sClient.Get(ctx, types.NamespacedName{
+ Name: storageConfigMapName,
+ Namespace: testNamespace,
+ }, updatedStorageConfigMap)).To(Succeed())
+
+ var newStoredRegistry ToolHiveRegistryData
+ Expect(json.Unmarshal([]byte(updatedStorageConfigMap.Data["registry.json"]), &newStoredRegistry)).To(Succeed())
+
+ By("Storage should contain updated registry data")
+ verifyServerContent(newStoredRegistry, updatedServers)
+ })
+
+ It("should handle manual sync with no data changes", func() {
+ By("Creating a ConfigMap with registry data")
+ _ = configMapHelper.NewConfigMapBuilder(configMapName).
+ WithToolHiveRegistry("registry.json", originalServers).
+ Create(configMapHelper)
+
+ By("Creating an MCPRegistry")
+ mcpRegistry := CreateMCPRegistryManualOnly(registryName, testNamespace,
+ "Manual Sync No Changes Test Registry", configMapName)
+ Expect(k8sClient.Create(ctx, mcpRegistry)).To(Succeed())
+
+ By("Waiting for initial sync to complete")
+ statusHelper.WaitForPhaseAny(registryName, []mcpv1alpha1.MCPRegistryPhase{mcpv1alpha1.MCPRegistryPhaseReady, mcpv1alpha1.MCPRegistryPhasePending}, 30*time.Second)
+
+ // Capture initial sync state
+ initialRegistry := &mcpv1alpha1.MCPRegistry{}
+ Expect(k8sClient.Get(ctx, types.NamespacedName{
+ Name: registryName,
+ Namespace: testNamespace,
+ }, initialRegistry)).To(Succeed())
+
+ initialSyncTime := initialRegistry.Status.SyncStatus.LastSyncTime
+ initialSyncHash := initialRegistry.Status.SyncStatus.LastSyncHash
+ initialServerCount := initialRegistry.Status.SyncStatus.ServerCount
+
+ By("Triggering manual sync without data changes")
+ names := NewUniqueNames("no-changes-sync")
+ triggerValue := names.GenerateTriggerValue("no-changes-sync")
+ AddManualSyncTrigger(initialRegistry, triggerValue, mcpregistrystatus.SyncTriggerAnnotation)
+ Expect(k8sClient.Update(ctx, initialRegistry)).To(Succeed())
+
+ By("Waiting for manual sync trigger to be processed")
+ Eventually(func() bool {
+ registry := &mcpv1alpha1.MCPRegistry{}
+ if err := k8sClient.Get(ctx, types.NamespacedName{
+ Name: registryName,
+ Namespace: testNamespace,
+ }, registry); err != nil {
+ return false
+ }
+
+ // Check if trigger was processed (should update LastManualSyncTrigger)
+ return registry.Status.LastManualSyncTrigger == triggerValue
+ }, 20*time.Second, 2*time.Second).Should(BeTrue(), "Manual sync trigger should be processed even with no data changes")
+
+ By("Verifying sync data remains unchanged")
+ finalRegistry := &mcpv1alpha1.MCPRegistry{}
+ Expect(k8sClient.Get(ctx, types.NamespacedName{
+ Name: registryName,
+ Namespace: testNamespace,
+ }, finalRegistry)).To(Succeed())
+
+ // Sync data should remain the same since no changes occurred
+ Expect(finalRegistry.Status.SyncStatus.LastSyncTime).To(Equal(initialSyncTime))
+ Expect(finalRegistry.Status.SyncStatus.LastSyncHash).To(Equal(initialSyncHash))
+ Expect(finalRegistry.Status.SyncStatus.ServerCount).To(Equal(initialServerCount))
+ })
+
+ It("should retry failed manual syncs when source becomes available", func() {
+ By("Creating an MCPRegistry without the source ConfigMap (sync will fail)")
+ mcpRegistry := CreateMCPRegistryManualOnly(registryName, testNamespace,
+ "Manual Retry Test Registry", configMapName) // This ConfigMap doesn't exist yet
+ Expect(k8sClient.Create(ctx, mcpRegistry)).To(Succeed())
+
+ By("Waiting for sync to fail")
+ statusHelper.WaitForPhase(registryName, mcpv1alpha1.MCPRegistryPhaseFailed, 30*time.Second)
+
+ By("Triggering manual sync while source is still missing")
+ names1 := NewUniqueNames("manual-retry-1")
+ triggerValue1 := names1.GenerateTriggerValue("manual-retry-1")
+ // Refresh the registry object first
+ Expect(k8sClient.Get(ctx, types.NamespacedName{
+ Name: registryName,
+ Namespace: testNamespace,
+ }, mcpRegistry)).To(Succeed())
+
+ AddManualSyncTrigger(mcpRegistry, triggerValue1, mcpregistrystatus.SyncTriggerAnnotation)
+ Expect(k8sClient.Update(ctx, mcpRegistry)).To(Succeed())
+
+ By("Verifying manual sync also fails")
+ Eventually(func() bool {
+ registry := &mcpv1alpha1.MCPRegistry{}
+ if err := k8sClient.Get(ctx, types.NamespacedName{
+ Name: registryName,
+ Namespace: testNamespace,
+ }, registry); err != nil {
+ return false
+ }
+
+ return registry.Status.Phase == mcpv1alpha1.MCPRegistryPhaseFailed &&
+ registry.Status.LastManualSyncTrigger == triggerValue1
+ }, 20*time.Second, 2*time.Second).Should(BeTrue(), "Manual sync should also fail when source is missing")
+
+ By("Creating the missing ConfigMap")
+ _ = configMapHelper.NewConfigMapBuilder(configMapName).
+ WithToolHiveRegistry("registry.json", originalServers).
+ Create(configMapHelper)
+
+ By("Triggering manual sync after ConfigMap creation")
+ names2 := NewUniqueNames("manual-retry-2")
+ triggerValue2 := names2.GenerateTriggerValue("manual-retry-2")
+ // Refresh the registry object first
+ Expect(k8sClient.Get(ctx, types.NamespacedName{
+ Name: registryName,
+ Namespace: testNamespace,
+ }, mcpRegistry)).To(Succeed())
+
+ AddManualSyncTrigger(mcpRegistry, triggerValue2, mcpregistrystatus.SyncTriggerAnnotation)
+ Expect(k8sClient.Update(ctx, mcpRegistry)).To(Succeed())
+
+ By("Waiting for manual sync to succeed after ConfigMap creation")
+ Eventually(func() mcpv1alpha1.MCPRegistryPhase {
+ registry := &mcpv1alpha1.MCPRegistry{}
+ if err := k8sClient.Get(ctx, types.NamespacedName{
+ Name: registryName,
+ Namespace: testNamespace,
+ }, registry); err != nil {
+ return ""
+ }
+ return registry.Status.Phase
+ }, 30*time.Second, 2*time.Second).Should(BeElementOf(
+ mcpv1alpha1.MCPRegistryPhaseReady,
+ mcpv1alpha1.MCPRegistryPhasePending,
+ ))
+
+ By("Verifying sync data is now correct")
+ successRegistry := &mcpv1alpha1.MCPRegistry{}
+ Expect(k8sClient.Get(ctx, types.NamespacedName{
+ Name: registryName,
+ Namespace: testNamespace,
+ }, successRegistry)).To(Succeed())
+
+ Expect(successRegistry.Status.SyncStatus.ServerCount).To(Equal(1))
+ Expect(successRegistry.Status.SyncStatus.LastSyncTime).NotTo(BeNil())
+ Expect(successRegistry.Status.LastManualSyncTrigger).To(Equal(triggerValue2))
+
+ By("Verifying storage ConfigMap was created with correct data")
+ storageConfigMapName := successRegistry.GetStorageName()
+ storageConfigMap := &corev1.ConfigMap{}
+ Expect(k8sClient.Get(ctx, types.NamespacedName{
+ Name: storageConfigMapName,
+ Namespace: testNamespace,
+ }, storageConfigMap)).To(Succeed())
+
+ var storedRegistry ToolHiveRegistryData
+ Expect(json.Unmarshal([]byte(storageConfigMap.Data["registry.json"]), &storedRegistry)).To(Succeed())
+ verifyServerContent(storedRegistry, originalServers)
+ })
+
+ It("should fail manual sync when source ConfigMap is deleted after successful sync", func() {
+ By("Creating a ConfigMap with registry data")
+ configMap := configMapHelper.NewConfigMapBuilder(configMapName).
+ WithToolHiveRegistry("registry.json", originalServers).
+ Create(configMapHelper)
+
+ By("Creating an MCPRegistry")
+ mcpRegistry := CreateMCPRegistryManualOnly(registryName, testNamespace,
+ "Manual ConfigMap Deletion Test Registry", configMapName)
+ Expect(k8sClient.Create(ctx, mcpRegistry)).To(Succeed())
+
+ By("Waiting for initial sync to complete")
+ statusHelper.WaitForPhaseAny(registryName, []mcpv1alpha1.MCPRegistryPhase{mcpv1alpha1.MCPRegistryPhaseReady, mcpv1alpha1.MCPRegistryPhasePending}, 30*time.Second)
+
+ // Capture successful sync state
+ successRegistry := &mcpv1alpha1.MCPRegistry{}
+ Expect(k8sClient.Get(ctx, types.NamespacedName{
+ Name: registryName,
+ Namespace: testNamespace,
+ }, successRegistry)).To(Succeed())
+
+ successSyncTime := successRegistry.Status.SyncStatus.LastSyncTime
+ successServerCount := successRegistry.Status.SyncStatus.ServerCount
+ successSyncHash := successRegistry.Status.SyncStatus.LastSyncHash
+
+ Expect(successServerCount).To(Equal(1))
+ Expect(successSyncTime).NotTo(BeNil())
+ Expect(successSyncHash).NotTo(BeEmpty())
+
+ By("Verifying storage ConfigMap exists with correct data")
+ storageConfigMapName := successRegistry.GetStorageName()
+ storageConfigMap := &corev1.ConfigMap{}
+ Expect(k8sClient.Get(ctx, types.NamespacedName{
+ Name: storageConfigMapName,
+ Namespace: testNamespace,
+ }, storageConfigMap)).To(Succeed())
+
+ var storedRegistry ToolHiveRegistryData
+ Expect(json.Unmarshal([]byte(storageConfigMap.Data["registry.json"]), &storedRegistry)).To(Succeed())
+ verifyServerContent(storedRegistry, originalServers)
+
+ By("Deleting the source ConfigMap")
+ Expect(k8sClient.Delete(ctx, configMap)).To(Succeed())
+
+ By("Triggering manual sync after ConfigMap deletion")
+ names := NewUniqueNames("manual-after-deletion")
+ triggerValue := names.GenerateTriggerValue("manual-after-deletion")
+ // Refresh the registry object first
+ Expect(k8sClient.Get(ctx, types.NamespacedName{
+ Name: registryName,
+ Namespace: testNamespace,
+ }, mcpRegistry)).To(Succeed())
+
+ AddManualSyncTrigger(mcpRegistry, triggerValue, mcpregistrystatus.SyncTriggerAnnotation)
+ Expect(k8sClient.Update(ctx, mcpRegistry)).To(Succeed())
+
+ By("Waiting for manual sync to fail due to missing ConfigMap")
+ Eventually(func() mcpv1alpha1.MCPRegistryPhase {
+ registry := &mcpv1alpha1.MCPRegistry{}
+ if err := k8sClient.Get(ctx, types.NamespacedName{
+ Name: registryName,
+ Namespace: testNamespace,
+ }, registry); err != nil {
+ return ""
+ }
+ return registry.Status.Phase
+ }, 20*time.Second, 2*time.Second).Should(Equal(mcpv1alpha1.MCPRegistryPhaseFailed))
+
+ By("Verifying manual sync failure preserves previous successful sync data")
+ failedRegistry := &mcpv1alpha1.MCPRegistry{}
+ Expect(k8sClient.Get(ctx, types.NamespacedName{
+ Name: registryName,
+ Namespace: testNamespace,
+ }, failedRegistry)).To(Succeed())
+
+ // Previous sync data should be preserved
+ Expect(failedRegistry.Status.SyncStatus.LastSyncTime).To(Equal(successSyncTime))
+ Expect(failedRegistry.Status.SyncStatus.LastSyncHash).To(Equal(successSyncHash))
+ Expect(failedRegistry.Status.SyncStatus.ServerCount).To(Equal(successServerCount))
+ Expect(failedRegistry.Status.LastManualSyncTrigger).To(Equal(triggerValue))
+
+ By("Verifying storage ConfigMap still exists with previous data")
+ preservedStorageConfigMap := &corev1.ConfigMap{}
+ Expect(k8sClient.Get(ctx, types.NamespacedName{
+ Name: storageConfigMapName,
+ Namespace: testNamespace,
+ }, preservedStorageConfigMap)).To(Succeed())
+
+ var preservedRegistry ToolHiveRegistryData
+ Expect(json.Unmarshal([]byte(preservedStorageConfigMap.Data["registry.json"]), &preservedRegistry)).To(Succeed())
+ verifyServerContent(preservedRegistry, originalServers)
+
+ By("Verifying overall registry phase reflects the failure")
+ Expect(failedRegistry.Status.Phase).To(Equal(mcpv1alpha1.MCPRegistryPhaseFailed))
+ })
+
+ It("should verify manual sync triggers work with complex registry content", func() {
+ By("Creating a complex ConfigMap with multiple servers and metadata")
+ complexServers := CreateComplexTestServers()
+
+ _ = configMapHelper.NewConfigMapBuilder(configMapName).
+ WithToolHiveRegistry("registry.json", complexServers).
+ Create(configMapHelper)
+
+ By("Creating an MCPRegistry")
+ mcpRegistry := CreateMCPRegistryManualOnly(registryName, testNamespace,
+ "Manual Complex Content Test Registry", configMapName)
+ Expect(k8sClient.Create(ctx, mcpRegistry)).To(Succeed())
+
+ By("Waiting for initial sync to complete")
+ statusHelper.WaitForPhaseAny(registryName, []mcpv1alpha1.MCPRegistryPhase{mcpv1alpha1.MCPRegistryPhaseReady, mcpv1alpha1.MCPRegistryPhasePending}, 30*time.Second)
+
+ By("Triggering manual sync")
+ names := NewUniqueNames("complex-manual-sync")
+ triggerValue := names.GenerateTriggerValue("complex-manual-sync")
+ // Refresh the registry object first
+ Expect(k8sClient.Get(ctx, types.NamespacedName{
+ Name: registryName,
+ Namespace: testNamespace,
+ }, mcpRegistry)).To(Succeed())
+
+ AddManualSyncTrigger(mcpRegistry, triggerValue, mcpregistrystatus.SyncTriggerAnnotation)
+ Expect(k8sClient.Update(ctx, mcpRegistry)).To(Succeed())
+
+ By("Waiting for sync to complete")
+ Eventually(func() string {
+ registry := &mcpv1alpha1.MCPRegistry{}
+ if err := k8sClient.Get(ctx, types.NamespacedName{
+ Name: registryName,
+ Namespace: testNamespace,
+ }, registry); err != nil {
+ return ""
+ }
+ return registry.Status.LastManualSyncTrigger
+ }, 30*time.Second, 2*time.Second).Should(Equal(triggerValue))
+
+ By("Retrieving and verifying storage ConfigMap content")
+ registry := &mcpv1alpha1.MCPRegistry{}
+ Expect(k8sClient.Get(ctx, types.NamespacedName{
+ Name: registryName,
+ Namespace: testNamespace,
+ }, registry)).To(Succeed())
+
+ storageConfigMapName := registry.GetStorageName()
+ storageConfigMap := &corev1.ConfigMap{}
+ Expect(k8sClient.Get(ctx, types.NamespacedName{
+ Name: storageConfigMapName,
+ Namespace: testNamespace,
+ }, storageConfigMap)).To(Succeed())
+
+ var storedRegistry ToolHiveRegistryData
+ Expect(json.Unmarshal([]byte(storageConfigMap.Data["registry.json"]), &storedRegistry)).To(Succeed())
+
+ By("Verifying exact content match")
+ Expect(registry.Status.SyncStatus.ServerCount).To(Equal(2))
+ verifyServerContent(storedRegistry, complexServers)
+
+ // Verify metadata
+ Expect(storedRegistry.Version).To(Equal("1.0.0"))
+
+ By("Verifying hash consistency")
+ Expect(registry.Status.SyncStatus.LastSyncHash).NotTo(BeEmpty())
+
+ By("Verifying manual sync trigger was processed")
+ Expect(registry.Status.LastManualSyncTrigger).To(Equal(triggerValue))
+ })
+ })
+})
diff --git a/test/e2e/operator/registry_sync_test_utils.go b/test/e2e/operator/registry_sync_test_utils.go
new file mode 100644
index 000000000..28220fbf3
--- /dev/null
+++ b/test/e2e/operator/registry_sync_test_utils.go
@@ -0,0 +1,223 @@
+package operator_test
+
+import (
+ "encoding/json"
+ "fmt"
+ "time"
+
+ "github.com/onsi/gomega"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+ mcpv1alpha1 "github.com/stacklok/toolhive/cmd/thv-operator/api/v1alpha1"
+)
+
+// Common test data builders for registry sync tests
+
+// CreateOriginalTestServers creates the standard original test server data
+func CreateOriginalTestServers() []RegistryServer {
+ return []RegistryServer{
+ {
+ Name: "test-server-1",
+ Description: "Test server 1",
+ Tier: "Community",
+ Status: "Active",
+ Transport: "stdio",
+ Tools: []string{"test_tool_1"},
+ Image: "docker.io/test/server1:latest",
+ Tags: []string{"testing", "original"},
+ },
+ }
+}
+
+// CreateUpdatedTestServers creates the standard updated test server data
+func CreateUpdatedTestServers() []RegistryServer {
+ return []RegistryServer{
+ {
+ Name: "test-server-1",
+ Description: "Test server 1 updated",
+ Tier: "Community",
+ Status: "Active",
+ Transport: "stdio",
+ Tools: []string{"test_tool_1", "test_tool_2"},
+ Image: "docker.io/test/server1:v1.1",
+ Tags: []string{"testing", "updated"},
+ },
+ {
+ Name: "test-server-2",
+ Description: "Test server 2",
+ Tier: "Official",
+ Status: "Active",
+ Transport: "sse",
+ Tools: []string{"test_tool_3"},
+ Image: "docker.io/test/server2:latest",
+ Tags: []string{"testing", "new"},
+ },
+ }
+}
+
+// CreateComplexTestServers creates complex test server data with multiple server types
+func CreateComplexTestServers() []RegistryServer {
+ return []RegistryServer{
+ {
+ Name: "database-server",
+ Description: "PostgreSQL database connector",
+ Tier: "Official",
+ Status: "Active",
+ Transport: "sse",
+ Tools: []string{"execute_query", "list_tables", "backup_db"},
+ Image: "docker.io/postgres/mcp-server:v1.2.0",
+ Tags: []string{"database", "postgresql", "production"},
+ },
+ {
+ Name: "file-manager",
+ Description: "File system operations",
+ Tier: "Community",
+ Status: "Active",
+ Transport: "stdio",
+ Tools: []string{"read_file", "write_file", "list_dir"},
+ Image: "docker.io/mcp/filesystem:latest",
+ Tags: []string{"filesystem", "files", "utility"},
+ },
+ }
+}
+
+// UpdateConfigMapWithServers updates a ConfigMap with new server data
+func UpdateConfigMapWithServers(configMap *corev1.ConfigMap, servers []RegistryServer) error {
+ updatedRegistryData := ToolHiveRegistryData{
+ Version: "1.0.1",
+ LastUpdated: time.Now().Format(time.RFC3339),
+ Servers: make(map[string]RegistryServer),
+ }
+ for _, server := range servers {
+ updatedRegistryData.Servers[server.Name] = server
+ }
+ jsonData, err := json.MarshalIndent(updatedRegistryData, "", " ")
+ if err != nil {
+ return err
+ }
+ configMap.Data["registry.json"] = string(jsonData)
+ return nil
+}
+
+// CreateBasicMCPRegistrySpec creates a basic MCPRegistry spec for testing
+func CreateBasicMCPRegistrySpec(displayName, configMapName string,
+ syncPolicy *mcpv1alpha1.SyncPolicy) mcpv1alpha1.MCPRegistrySpec {
+ spec := mcpv1alpha1.MCPRegistrySpec{
+ DisplayName: displayName,
+ Source: mcpv1alpha1.MCPRegistrySource{
+ Type: mcpv1alpha1.RegistrySourceTypeConfigMap,
+ Format: mcpv1alpha1.RegistryFormatToolHive,
+ ConfigMap: &mcpv1alpha1.ConfigMapSource{
+ Name: configMapName,
+ Key: "registry.json",
+ },
+ },
+ }
+ if syncPolicy != nil {
+ spec.SyncPolicy = syncPolicy
+ }
+ return spec
+}
+
+// CreateMCPRegistryWithSyncPolicy creates an MCPRegistry with automatic sync policy
+func CreateMCPRegistryWithSyncPolicy(name, namespace, displayName, configMapName, interval string) *mcpv1alpha1.MCPRegistry {
+ return &mcpv1alpha1.MCPRegistry{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: namespace,
+ },
+ Spec: CreateBasicMCPRegistrySpec(displayName, configMapName, &mcpv1alpha1.SyncPolicy{
+ Interval: interval,
+ }),
+ }
+}
+
+// CreateMCPRegistryManualOnly creates an MCPRegistry without automatic sync policy (manual only)
+func CreateMCPRegistryManualOnly(name, namespace, displayName, configMapName string) *mcpv1alpha1.MCPRegistry {
+ return &mcpv1alpha1.MCPRegistry{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: namespace,
+ },
+ Spec: CreateBasicMCPRegistrySpec(displayName, configMapName, nil),
+ }
+}
+
+// CreateMCPRegistryWithGitSource creates an MCPRegistry with Git source and automatic sync policy
+func CreateMCPRegistryWithGitSource(
+ name, namespace, displayName, repository,
+ branch, path, interval string) *mcpv1alpha1.MCPRegistry {
+ return &mcpv1alpha1.MCPRegistry{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Namespace: namespace,
+ },
+ Spec: mcpv1alpha1.MCPRegistrySpec{
+ DisplayName: displayName,
+ Source: mcpv1alpha1.MCPRegistrySource{
+ Type: mcpv1alpha1.RegistrySourceTypeGit,
+ Format: mcpv1alpha1.RegistryFormatToolHive,
+ Git: &mcpv1alpha1.GitSource{
+ Repository: repository,
+ Branch: branch,
+ Path: path,
+ },
+ },
+ SyncPolicy: &mcpv1alpha1.SyncPolicy{
+ Interval: interval,
+ },
+ },
+ }
+}
+
+// AddManualSyncTrigger adds a manual sync trigger annotation to an MCPRegistry
+func AddManualSyncTrigger(mcpRegistry *mcpv1alpha1.MCPRegistry, triggerValue string, syncTriggerAnnotation string) {
+ if mcpRegistry.Annotations == nil {
+ mcpRegistry.Annotations = make(map[string]string)
+ }
+ mcpRegistry.Annotations[syncTriggerAnnotation] = triggerValue
+}
+
+// UniqueNames is a struct that contains unique names for test resources
+type UniqueNames struct {
+ RegistryName string
+ ConfigMapName string
+ Timestamp int64
+}
+
+// NewUniqueNames creates a new set of unique names for test resources
+func NewUniqueNames(prefix string) *UniqueNames {
+ timestamp := time.Now().Unix()
+ return &UniqueNames{
+ RegistryName: fmt.Sprintf("%s-registry-%d", prefix, timestamp),
+ ConfigMapName: fmt.Sprintf("%s-data-%d", prefix, timestamp),
+ Timestamp: timestamp,
+ }
+}
+
+// GenerateTriggerValue generates a unique trigger value for manual sync
+func (u *UniqueNames) GenerateTriggerValue(operation string) string {
+ return fmt.Sprintf("%s-%d", operation, u.Timestamp)
+}
+
+// verifyServerContent is a helper function to verify that stored registry server content
+// matches the expected servers array. It performs comprehensive field-by-field comparison.
+func verifyServerContent(storedRegistry ToolHiveRegistryData, expectedServers []RegistryServer) {
+ gomega.Expect(storedRegistry.Servers).To(gomega.HaveLen(len(expectedServers)))
+
+ for _, expectedServer := range expectedServers {
+ serverName := expectedServer.Name
+ gomega.Expect(storedRegistry.Servers).To(gomega.HaveKey(serverName))
+
+ actualServer := storedRegistry.Servers[serverName]
+ gomega.Expect(actualServer.Name).To(gomega.Equal(expectedServer.Name))
+ gomega.Expect(actualServer.Description).To(gomega.Equal(expectedServer.Description))
+ gomega.Expect(actualServer.Tier).To(gomega.Equal(expectedServer.Tier))
+ gomega.Expect(actualServer.Status).To(gomega.Equal(expectedServer.Status))
+ gomega.Expect(actualServer.Transport).To(gomega.Equal(expectedServer.Transport))
+ gomega.Expect(actualServer.Image).To(gomega.Equal(expectedServer.Image))
+ gomega.Expect(actualServer.Tools).To(gomega.Equal(expectedServer.Tools))
+ gomega.Expect(actualServer.Tags).To(gomega.Equal(expectedServer.Tags))
+ }
+}
diff --git a/test/e2e/operator/status_helpers.go b/test/e2e/operator/status_helpers.go
new file mode 100644
index 000000000..cce389cc2
--- /dev/null
+++ b/test/e2e/operator/status_helpers.go
@@ -0,0 +1,139 @@
+package operator_test
+
+import (
+ "context"
+ "fmt"
+ "time"
+
+ "github.com/onsi/ginkgo/v2"
+ "github.com/onsi/gomega"
+ "k8s.io/apimachinery/pkg/api/errors"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+
+ mcpv1alpha1 "github.com/stacklok/toolhive/cmd/thv-operator/api/v1alpha1"
+)
+
+// StatusTestHelper provides utilities for MCPRegistry status testing and validation
+type StatusTestHelper struct {
+ registryHelper *MCPRegistryTestHelper
+}
+
+// NewStatusTestHelper creates a new test helper for status operations
+func NewStatusTestHelper(ctx context.Context, k8sClient client.Client, namespace string) *StatusTestHelper {
+ return &StatusTestHelper{
+ registryHelper: NewMCPRegistryTestHelper(ctx, k8sClient, namespace),
+ }
+}
+
+// WaitForPhase waits for an MCPRegistry to reach the specified phase
+func (h *StatusTestHelper) WaitForPhase(registryName string, expectedPhase mcpv1alpha1.MCPRegistryPhase, timeout time.Duration) {
+ h.WaitForPhaseAny(registryName, []mcpv1alpha1.MCPRegistryPhase{expectedPhase}, timeout)
+}
+
+// WaitForPhaseAny waits for an MCPRegistry to reach any of the specified phases
+func (h *StatusTestHelper) WaitForPhaseAny(registryName string,
+ expectedPhases []mcpv1alpha1.MCPRegistryPhase, timeout time.Duration) {
+ gomega.Eventually(func() mcpv1alpha1.MCPRegistryPhase {
+ ginkgo.By(fmt.Sprintf("waiting for registry %s to reach one of phases %v", registryName, expectedPhases))
+ registry, err := h.registryHelper.GetRegistry(registryName)
+ if err != nil {
+ if errors.IsNotFound(err) {
+ ginkgo.By(fmt.Sprintf("registry %s not found", registryName))
+ return mcpv1alpha1.MCPRegistryPhaseTerminating
+ }
+ return ""
+ }
+ return registry.Status.Phase
+ }, timeout, time.Second).Should(gomega.BeElementOf(expectedPhases),
+ "MCPRegistry %s should reach one of phases %v", registryName, expectedPhases)
+}
+
+// WaitForCondition waits for a specific condition to have the expected status
+func (h *StatusTestHelper) WaitForCondition(registryName, conditionType string,
+ expectedStatus metav1.ConditionStatus, timeout time.Duration) {
+ gomega.Eventually(func() metav1.ConditionStatus {
+ condition, err := h.registryHelper.GetRegistryCondition(registryName, conditionType)
+ if err != nil {
+ return metav1.ConditionUnknown
+ }
+ return condition.Status
+ }, timeout, time.Second).Should(gomega.Equal(expectedStatus),
+ "MCPRegistry %s should have condition %s with status %s", registryName, conditionType, expectedStatus)
+}
+
+// WaitForConditionReason waits for a condition to have a specific reason
+func (h *StatusTestHelper) WaitForConditionReason(registryName, conditionType, expectedReason string, timeout time.Duration) {
+ gomega.Eventually(func() string {
+ condition, err := h.registryHelper.GetRegistryCondition(registryName, conditionType)
+ if err != nil {
+ return ""
+ }
+ return condition.Reason
+ }, timeout, time.Second).Should(gomega.Equal(expectedReason),
+ "MCPRegistry %s condition %s should have reason %s", registryName, conditionType, expectedReason)
+}
+
+// WaitForServerCount waits for the registry to report a specific server count
+func (h *StatusTestHelper) WaitForServerCount(registryName string, expectedCount int, timeout time.Duration) {
+ gomega.Eventually(func() int {
+ status, err := h.registryHelper.GetRegistryStatus(registryName)
+ if err != nil {
+ return -1
+ }
+ return status.SyncStatus.ServerCount
+ }, timeout, time.Second).Should(gomega.Equal(expectedCount),
+ "MCPRegistry %s should have server count %d", registryName, expectedCount)
+}
+
+// WaitForLastSyncTime waits for the registry to update its last sync time
+func (h *StatusTestHelper) WaitForLastSyncTime(registryName string, afterTime time.Time, timeout time.Duration) {
+ gomega.Eventually(func() bool {
+ status, err := h.registryHelper.GetRegistryStatus(registryName)
+ if err != nil || status.SyncStatus.LastSyncTime == nil {
+ return false
+ }
+ return status.SyncStatus.LastSyncTime.After(afterTime)
+ }, timeout, time.Second).Should(gomega.BeTrue(),
+ "MCPRegistry %s should update last sync time after %s", registryName, afterTime)
+}
+
+// WaitForLastSyncHash waits for the registry to have a non-empty last sync hash
+func (h *StatusTestHelper) WaitForLastSyncHash(registryName string, timeout time.Duration) {
+ gomega.Eventually(func() string {
+ status, err := h.registryHelper.GetRegistryStatus(registryName)
+ if err != nil {
+ return ""
+ }
+ return status.SyncStatus.LastSyncHash
+ }, timeout, time.Second).ShouldNot(gomega.BeEmpty(),
+ "MCPRegistry %s should have a last sync hash", registryName)
+}
+
+// WaitForSyncCompletion waits for a sync operation to complete (either success or failure)
+func (h *StatusTestHelper) WaitForSyncCompletion(registryName string, timeout time.Duration) {
+ gomega.Eventually(func() bool {
+ registry, err := h.registryHelper.GetRegistry(registryName)
+ if err != nil {
+ return false
+ }
+
+ // Check if sync is no longer in progress
+ phase := registry.Status.Phase
+ return phase == mcpv1alpha1.MCPRegistryPhaseReady ||
+ phase == mcpv1alpha1.MCPRegistryPhaseFailed
+ }, timeout, time.Second).Should(gomega.BeTrue(),
+ "MCPRegistry %s sync operation should complete", registryName)
+}
+
+// WaitForManualSyncProcessed waits for a manual sync annotation to be processed
+func (h *StatusTestHelper) WaitForManualSyncProcessed(registryName, triggerValue string, timeout time.Duration) {
+ gomega.Eventually(func() string {
+ status, err := h.registryHelper.GetRegistryStatus(registryName)
+ if err != nil {
+ return ""
+ }
+ return status.LastManualSyncTrigger
+ }, timeout, time.Second).Should(gomega.Equal(triggerValue),
+ "MCPRegistry %s should process manual sync trigger %s", registryName, triggerValue)
+}
diff --git a/test/e2e/operator/suite_test.go b/test/e2e/operator/suite_test.go
new file mode 100644
index 000000000..563a30d4c
--- /dev/null
+++ b/test/e2e/operator/suite_test.go
@@ -0,0 +1,197 @@
+package operator_test
+
+import (
+ "context"
+ "fmt"
+ "os"
+ "path/filepath"
+ "testing"
+ "time"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/client-go/kubernetes/scheme"
+ "k8s.io/client-go/rest"
+ ctrl "sigs.k8s.io/controller-runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/envtest"
+ logf "sigs.k8s.io/controller-runtime/pkg/log"
+ "sigs.k8s.io/controller-runtime/pkg/log/zap"
+ metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
+
+ mcpv1alpha1 "github.com/stacklok/toolhive/cmd/thv-operator/api/v1alpha1"
+ "github.com/stacklok/toolhive/cmd/thv-operator/controllers"
+)
+
+var (
+ cfg *rest.Config
+ k8sClient client.Client
+ testEnv *envtest.Environment
+ testMgr ctrl.Manager
+ ctx context.Context
+ cancel context.CancelFunc
+)
+
+func TestOperatorE2E(t *testing.T) { //nolint:paralleltest // E2E tests should not run in parallel
+ RegisterFailHandler(Fail)
+ RunSpecs(t, "Operator E2E Suite")
+}
+
+var _ = BeforeSuite(func() {
+ logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true)))
+
+ ctx, cancel = context.WithCancel(context.TODO())
+
+ // Enable experimental features for MCPRegistry controller
+ By("enabling experimental features")
+ err := os.Setenv("ENABLE_EXPERIMENTAL_FEATURES", "true")
+ Expect(err).NotTo(HaveOccurred())
+
+ By("bootstrapping test environment")
+
+ // Check if we should use an existing cluster (for CI/CD)
+ useExistingCluster := os.Getenv("USE_EXISTING_CLUSTER") == "true"
+
+ // // Get kubebuilder assets path
+ kubebuilderAssets := os.Getenv("KUBEBUILDER_ASSETS")
+
+ if !useExistingCluster {
+ By(fmt.Sprintf("using kubebuilder assets from: %s", kubebuilderAssets))
+ if kubebuilderAssets == "" {
+ By("WARNING: no kubebuilder assets found, test may fail")
+ }
+ }
+
+ testEnv = &envtest.Environment{
+ UseExistingCluster: &useExistingCluster,
+ CRDDirectoryPaths: []string{
+ filepath.Join("..", "..", "..", "deploy", "charts", "operator-crds", "crds"),
+ },
+ ErrorIfCRDPathMissing: true,
+ BinaryAssetsDirectory: kubebuilderAssets,
+ }
+
+ cfg, err = testEnv.Start()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cfg).NotTo(BeNil())
+
+ // Add MCPRegistry scheme
+ err = mcpv1alpha1.AddToScheme(scheme.Scheme)
+ Expect(err).NotTo(HaveOccurred())
+
+ // Create controller-runtime client
+ k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
+ Expect(err).NotTo(HaveOccurred())
+ Expect(k8sClient).NotTo(BeNil())
+
+ // Verify MCPRegistry CRD is available
+ By("verifying MCPRegistry CRD is available")
+ Eventually(func() error {
+ mcpRegistry := &mcpv1alpha1.MCPRegistry{}
+ return k8sClient.Get(ctx, client.ObjectKey{
+ Namespace: "default",
+ Name: "test-availability-check",
+ }, mcpRegistry)
+ }, time.Minute, time.Second).Should(MatchError(ContainSubstring("not found")))
+
+ // Set up the manager for controllers (only for envtest, not existing cluster)
+ if !useExistingCluster {
+ By("setting up controller manager for envtest")
+ testMgr, err = ctrl.NewManager(cfg, ctrl.Options{
+ Scheme: scheme.Scheme,
+ Metrics: metricsserver.Options{
+ BindAddress: "0", // Disable metrics server for tests
+ },
+ HealthProbeBindAddress: "0", // Disable health probe for tests
+ })
+ Expect(err).NotTo(HaveOccurred())
+
+ // Set up MCPRegistry controller
+ By("setting up MCPRegistry controller")
+ err = controllers.NewMCPRegistryReconciler(testMgr.GetClient(), testMgr.GetScheme()).SetupWithManager(testMgr)
+ Expect(err).NotTo(HaveOccurred())
+
+ // Start the manager in the background
+ By("starting controller manager")
+ go func() {
+ defer GinkgoRecover()
+ err = testMgr.Start(ctx)
+ Expect(err).NotTo(HaveOccurred(), "failed to run manager")
+ }()
+
+ // Wait for the manager to be ready
+ By("waiting for controller manager to be ready")
+ Eventually(func() bool {
+ return testMgr.GetCache().WaitForCacheSync(ctx)
+ }, time.Minute, time.Second).Should(BeTrue())
+ }
+})
+
+var _ = AfterSuite(func() {
+ cancel()
+ By("tearing down the test environment")
+ err := testEnv.Stop()
+ Expect(err).NotTo(HaveOccurred())
+})
+
+// TestNamespace represents a test namespace with automatic cleanup
+type TestNamespace struct {
+ Name string
+ Namespace *corev1.Namespace
+ Client client.Client
+ ctx context.Context
+}
+
+// NewTestNamespace creates a new test namespace with a unique name
+func NewTestNamespace(namePrefix string) *TestNamespace {
+ timestamp := time.Now().Unix()
+ name := fmt.Sprintf("%s-%d", namePrefix, timestamp)
+
+ ns := &corev1.Namespace{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: name,
+ Labels: map[string]string{
+ "test.toolhive.io/suite": "operator-e2e",
+ "test.toolhive.io/prefix": namePrefix,
+ },
+ },
+ }
+
+ return &TestNamespace{
+ Name: name,
+ Namespace: ns,
+ Client: k8sClient,
+ ctx: ctx,
+ }
+}
+
+// Create creates the namespace in the cluster
+func (tn *TestNamespace) Create() error {
+ return tn.Client.Create(tn.ctx, tn.Namespace)
+}
+
+// Delete deletes the namespace and all its resources
+func (tn *TestNamespace) Delete() error {
+ return tn.Client.Delete(tn.ctx, tn.Namespace)
+}
+
+// WaitForDeletion waits for the namespace to be fully deleted
+func (tn *TestNamespace) WaitForDeletion(timeout time.Duration) {
+ Eventually(func() bool {
+ ns := &corev1.Namespace{}
+ err := tn.Client.Get(tn.ctx, client.ObjectKey{Name: tn.Name}, ns)
+ return err != nil
+ }, timeout, time.Second).Should(BeTrue(), "namespace should be deleted")
+}
+
+// GetClient returns a client scoped to this namespace
+func (tn *TestNamespace) GetClient() client.Client {
+ return tn.Client
+}
+
+// GetContext returns the test context
+func (tn *TestNamespace) GetContext() context.Context {
+ return tn.ctx
+}
diff --git a/test/e2e/operator/timing_helpers.go b/test/e2e/operator/timing_helpers.go
new file mode 100644
index 000000000..1a0bf9a7b
--- /dev/null
+++ b/test/e2e/operator/timing_helpers.go
@@ -0,0 +1,149 @@
+package operator_test
+
+import (
+ "context"
+ "time"
+
+ "github.com/onsi/gomega"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+)
+
+// TimingTestHelper provides utilities for timing and synchronization in async operations
+type TimingTestHelper struct {
+ Client client.Client
+ Context context.Context
+}
+
+// NewTimingTestHelper creates a new test helper for timing operations
+func NewTimingTestHelper(ctx context.Context, k8sClient client.Client) *TimingTestHelper {
+ return &TimingTestHelper{
+ Client: k8sClient,
+ Context: ctx,
+ }
+}
+
+// Common timeout values for different types of operations
+const (
+ // QuickTimeout for operations that should complete quickly (e.g., resource creation)
+ QuickTimeout = 10 * time.Second
+
+ // MediumTimeout for operations that may take some time (e.g., controller reconciliation)
+ MediumTimeout = 30 * time.Second
+
+ // LongTimeout for operations that may take a while (e.g., sync operations)
+ LongTimeout = 2 * time.Minute
+
+ // ExtraLongTimeout for operations that may take very long (e.g., complex e2e scenarios)
+ ExtraLongTimeout = 5 * time.Minute
+
+ // DefaultPollingInterval for Eventually/Consistently checks
+ DefaultPollingInterval = 1 * time.Second
+
+ // FastPollingInterval for operations that need frequent checks
+ FastPollingInterval = 200 * time.Millisecond
+
+ // SlowPollingInterval for operations that don't need frequent checks
+ SlowPollingInterval = 5 * time.Second
+)
+
+// EventuallyWithTimeout runs an Eventually check with custom timeout and polling
+func (*TimingTestHelper) EventuallyWithTimeout(assertion func() interface{},
+ timeout, polling time.Duration) gomega.AsyncAssertion {
+ return gomega.Eventually(assertion, timeout, polling)
+}
+
+// ConsistentlyWithTimeout runs a Consistently check with custom timeout and polling
+func (*TimingTestHelper) ConsistentlyWithTimeout(assertion func() interface{},
+ duration, polling time.Duration) gomega.AsyncAssertion {
+ return gomega.Consistently(assertion, duration, polling)
+}
+
+// WaitForResourceCreation waits for a resource to be created with quick timeout
+func (*TimingTestHelper) WaitForResourceCreation(assertion func() interface{}) gomega.AsyncAssertion {
+ return gomega.Eventually(assertion, QuickTimeout, FastPollingInterval)
+}
+
+// WaitForControllerReconciliation waits for controller to reconcile changes
+func (*TimingTestHelper) WaitForControllerReconciliation(assertion func() interface{}) gomega.AsyncAssertion {
+ return gomega.Eventually(assertion, MediumTimeout, DefaultPollingInterval)
+}
+
+// WaitForSyncOperation waits for a sync operation to complete
+func (*TimingTestHelper) WaitForSyncOperation(assertion func() interface{}) gomega.AsyncAssertion {
+ return gomega.Eventually(assertion, LongTimeout, DefaultPollingInterval)
+}
+
+// WaitForComplexOperation waits for complex multi-step operations
+func (*TimingTestHelper) WaitForComplexOperation(assertion func() interface{}) gomega.AsyncAssertion {
+ return gomega.Eventually(assertion, ExtraLongTimeout, SlowPollingInterval)
+}
+
+// EnsureStableState ensures a condition remains stable for a period
+func (*TimingTestHelper) EnsureStableState(assertion func() interface{}, duration time.Duration) gomega.AsyncAssertion {
+ return gomega.Consistently(assertion, duration, DefaultPollingInterval)
+}
+
+// EnsureQuickStability ensures a condition remains stable for a short period
+func (h *TimingTestHelper) EnsureQuickStability(assertion func() interface{}) gomega.AsyncAssertion {
+ return h.EnsureStableState(assertion, 5*time.Second)
+}
+
+// TimeoutConfig represents timeout configuration for different scenarios
+type TimeoutConfig struct {
+ Timeout time.Duration
+ PollingInterval time.Duration
+ Description string
+}
+
+// GetTimeoutForOperation returns appropriate timeout configuration for different operation types
+func (*TimingTestHelper) GetTimeoutForOperation(operationType string) TimeoutConfig {
+ switch operationType {
+ case "create":
+ return TimeoutConfig{
+ Timeout: QuickTimeout,
+ PollingInterval: FastPollingInterval,
+ Description: "Resource creation",
+ }
+ case "reconcile":
+ return TimeoutConfig{
+ Timeout: MediumTimeout,
+ PollingInterval: DefaultPollingInterval,
+ Description: "Controller reconciliation",
+ }
+ case "sync":
+ return TimeoutConfig{
+ Timeout: LongTimeout,
+ PollingInterval: DefaultPollingInterval,
+ Description: "Sync operation",
+ }
+ case "complex":
+ return TimeoutConfig{
+ Timeout: ExtraLongTimeout,
+ PollingInterval: SlowPollingInterval,
+ Description: "Complex operation",
+ }
+ case "delete":
+ return TimeoutConfig{
+ Timeout: MediumTimeout,
+ PollingInterval: DefaultPollingInterval,
+ Description: "Resource deletion",
+ }
+ case "status-update":
+ return TimeoutConfig{
+ Timeout: MediumTimeout,
+ PollingInterval: FastPollingInterval,
+ Description: "Status update",
+ }
+ default:
+ return TimeoutConfig{
+ Timeout: MediumTimeout,
+ PollingInterval: DefaultPollingInterval,
+ Description: "Default operation",
+ }
+ }
+}
+
+// WaitWithCustomTimeout waits with custom timeout configuration
+func (*TimingTestHelper) WaitWithCustomTimeout(assertion func() interface{}, config TimeoutConfig) gomega.AsyncAssertion {
+ return gomega.Eventually(assertion, config.Timeout, config.PollingInterval)
+}