diff --git a/cmd/thv-operator/Taskfile.yml b/cmd/thv-operator/Taskfile.yml
index a2c6580ed..b5356b06b 100644
--- a/cmd/thv-operator/Taskfile.yml
+++ b/cmd/thv-operator/Taskfile.yml
@@ -220,7 +220,7 @@ tasks:
- chainsaw test --test-dir test/e2e/chainsaw/operator/single-tenancy/setup
- chainsaw test --test-dir test/e2e/chainsaw/operator/single-tenancy/test-scenarios
- chainsaw test --test-dir test/e2e/chainsaw/operator/single-tenancy/cleanup
-
+
operator-run:
desc: Run the operator controller locally
cmds:
diff --git a/cmd/thv-operator/api/v1alpha1/mcpgroup_types.go b/cmd/thv-operator/api/v1alpha1/mcpgroup_types.go
new file mode 100644
index 000000000..ab90ecbde
--- /dev/null
+++ b/cmd/thv-operator/api/v1alpha1/mcpgroup_types.go
@@ -0,0 +1,89 @@
+package v1alpha1
+
+import (
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+)
+
+// MCPGroupSpec defines the desired state of MCPGroup
+type MCPGroupSpec struct {
+ // Description provides human-readable context
+ // +optional
+ Description string `json:"description,omitempty"`
+}
+
+// MCPGroupStatus defines observed state
+type MCPGroupStatus struct {
+ // Phase indicates current state
+ // +optional
+ // +kubebuilder:default=Pending
+ Phase MCPGroupPhase `json:"phase,omitempty"`
+
+ // Servers lists server names in this group
+ // +optional
+ Servers []string `json:"servers"`
+
+ // ServerCount is the number of servers
+ // +optional
+ ServerCount int `json:"serverCount"`
+
+ // Conditions represent observations
+ // +optional
+ Conditions []metav1.Condition `json:"conditions,omitempty"`
+}
+
+// MCPGroupPhase represents the lifecycle phase of an MCPGroup
+// +kubebuilder:validation:Enum=Ready;Pending;Failed
+type MCPGroupPhase string
+
+const (
+ // MCPGroupPhaseReady indicates the MCPGroup is ready
+ MCPGroupPhaseReady MCPGroupPhase = "Ready"
+
+ // MCPGroupPhasePending indicates the MCPGroup is pending
+ MCPGroupPhasePending MCPGroupPhase = "Pending"
+
+ // MCPGroupPhaseFailed indicates the MCPGroup has failed
+ MCPGroupPhaseFailed MCPGroupPhase = "Failed"
+)
+
+// Condition types for MCPGroup
+const (
+ ConditionTypeMCPServersChecked = "MCPServersChecked"
+)
+
+// MCPGroupConditionReason represents the reason for a condition's last transition
+const (
+ ConditionReasonListMCPServersFailed = "ListMCPServersCheckFailed"
+ ConditionReasonListMCPServersSucceeded = "ListMCPServersCheckSucceeded"
+)
+
+//+kubebuilder:object:root=true
+//+kubebuilder:subresource:status
+//+kubebuilder:resource:shortName=mcpg;mcpgroup
+//nolint:lll
+//+kubebuilder:printerColumn:name="Servers",type="integer",JSONPath=".status.serverCount",description="The number of MCPServers in this group"
+//+kubebuilder:printerColumn:name="Phase",type="string",JSONPath=".status.phase",description="The phase of the MCPGroup"
+//+kubebuilder:printerColumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="The age of the MCPGroup"
+//+kubebuilder:printcolumn:name="Ready",type="string",JSONPath=".status.conditions[?(@.type=='MCPServersChecked')].status"
+
+// MCPGroup is the Schema for the mcpgroups API
+type MCPGroup struct {
+ metav1.TypeMeta `json:",inline"` // nolint:revive
+ metav1.ObjectMeta `json:"metadata,omitempty"`
+
+ Spec MCPGroupSpec `json:"spec,omitempty"`
+ Status MCPGroupStatus `json:"status,omitempty"`
+}
+
+//+kubebuilder:object:root=true
+
+// MCPGroupList contains a list of MCPGroup
+type MCPGroupList struct {
+ metav1.TypeMeta `json:",inline"` // nolint:revive
+ metav1.ListMeta `json:"metadata,omitempty"`
+ Items []MCPGroup `json:"items"`
+}
+
+func init() {
+ SchemeBuilder.Register(&MCPGroup{}, &MCPGroupList{})
+}
diff --git a/cmd/thv-operator/api/v1alpha1/mcpserver_types.go b/cmd/thv-operator/api/v1alpha1/mcpserver_types.go
index 9668acf92..976e1ab48 100644
--- a/cmd/thv-operator/api/v1alpha1/mcpserver_types.go
+++ b/cmd/thv-operator/api/v1alpha1/mcpserver_types.go
@@ -9,6 +9,9 @@ import (
const (
// ConditionImageValidated indicates whether this image is fine to be used
ConditionImageValidated = "ImageValidated"
+
+ // ConditionGroupRefValidated indicates whether the GroupRef is valid
+ ConditionGroupRefValidated = "GroupRefValidated"
)
const (
@@ -22,6 +25,17 @@ const (
ConditionReasonImageValidationSkipped = "ImageValidationSkipped"
)
+const (
+ // ConditionReasonGroupRefValidated indicates the GroupRef is valid
+ ConditionReasonGroupRefValidated = "GroupRefIsValid"
+
+ // ConditionReasonGroupRefNotFound indicates the GroupRef is invalid
+ ConditionReasonGroupRefNotFound = "GroupRefNotFound"
+
+ // ConditionReasonGroupRefNotReady indicates the referenced MCPGroup is not in the Ready state
+ ConditionReasonGroupRefNotReady = "GroupRefNotReady"
+)
+
// MCPServerSpec defines the desired state of MCPServer
type MCPServerSpec struct {
// Image is the container image for the MCP server
@@ -131,6 +145,11 @@ type MCPServerSpec struct {
// +kubebuilder:default=false
// +optional
TrustProxyHeaders bool `json:"trustProxyHeaders,omitempty"`
+
+ // GroupRef is the name of the MCPGroup this server belongs to
+ // Must reference an existing MCPGroup in the same namespace
+ // +optional
+ GroupRef string `json:"groupRef,omitempty"`
}
// ResourceOverrides defines overrides for annotations and labels on created resources
diff --git a/cmd/thv-operator/api/v1alpha1/zz_generated.deepcopy.go b/cmd/thv-operator/api/v1alpha1/zz_generated.deepcopy.go
index 5a1368a58..1150b1aee 100644
--- a/cmd/thv-operator/api/v1alpha1/zz_generated.deepcopy.go
+++ b/cmd/thv-operator/api/v1alpha1/zz_generated.deepcopy.go
@@ -329,6 +329,107 @@ func (in *MCPExternalAuthConfigStatus) DeepCopy() *MCPExternalAuthConfigStatus {
return out
}
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MCPGroup) DeepCopyInto(out *MCPGroup) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ObjectMeta.DeepCopyInto(&out.ObjectMeta)
+ out.Spec = in.Spec
+ in.Status.DeepCopyInto(&out.Status)
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MCPGroup.
+func (in *MCPGroup) DeepCopy() *MCPGroup {
+ if in == nil {
+ return nil
+ }
+ out := new(MCPGroup)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *MCPGroup) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MCPGroupList) DeepCopyInto(out *MCPGroupList) {
+ *out = *in
+ out.TypeMeta = in.TypeMeta
+ in.ListMeta.DeepCopyInto(&out.ListMeta)
+ if in.Items != nil {
+ in, out := &in.Items, &out.Items
+ *out = make([]MCPGroup, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MCPGroupList.
+func (in *MCPGroupList) DeepCopy() *MCPGroupList {
+ if in == nil {
+ return nil
+ }
+ out := new(MCPGroupList)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
+func (in *MCPGroupList) DeepCopyObject() runtime.Object {
+ if c := in.DeepCopy(); c != nil {
+ return c
+ }
+ return nil
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MCPGroupSpec) DeepCopyInto(out *MCPGroupSpec) {
+ *out = *in
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MCPGroupSpec.
+func (in *MCPGroupSpec) DeepCopy() *MCPGroupSpec {
+ if in == nil {
+ return nil
+ }
+ out := new(MCPGroupSpec)
+ in.DeepCopyInto(out)
+ return out
+}
+
+// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
+func (in *MCPGroupStatus) DeepCopyInto(out *MCPGroupStatus) {
+ *out = *in
+ if in.Servers != nil {
+ in, out := &in.Servers, &out.Servers
+ *out = make([]string, len(*in))
+ copy(*out, *in)
+ }
+ if in.Conditions != nil {
+ in, out := &in.Conditions, &out.Conditions
+ *out = make([]v1.Condition, len(*in))
+ for i := range *in {
+ (*in)[i].DeepCopyInto(&(*out)[i])
+ }
+ }
+}
+
+// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MCPGroupStatus.
+func (in *MCPGroupStatus) DeepCopy() *MCPGroupStatus {
+ if in == nil {
+ return nil
+ }
+ out := new(MCPGroupStatus)
+ in.DeepCopyInto(out)
+ return out
+}
+
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
func (in *MCPRegistry) DeepCopyInto(out *MCPRegistry) {
*out = *in
diff --git a/cmd/thv-operator/controllers/mcpgroup_controller.go b/cmd/thv-operator/controllers/mcpgroup_controller.go
new file mode 100644
index 000000000..55cc03f82
--- /dev/null
+++ b/cmd/thv-operator/controllers/mcpgroup_controller.go
@@ -0,0 +1,259 @@
+package controllers
+
+import (
+ "context"
+ "sort"
+ "time"
+
+ "k8s.io/apimachinery/pkg/api/errors"
+ "k8s.io/apimachinery/pkg/api/meta"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/types"
+ ctrl "sigs.k8s.io/controller-runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/controller/controllerutil"
+ "sigs.k8s.io/controller-runtime/pkg/handler"
+ "sigs.k8s.io/controller-runtime/pkg/log"
+
+ mcpv1alpha1 "github.com/stacklok/toolhive/cmd/thv-operator/api/v1alpha1"
+)
+
+const (
+ // MCPGroupFinalizerName is the name of the finalizer for MCPGroup
+ MCPGroupFinalizerName = "toolhive.stacklok.dev/mcpgroup-finalizer"
+)
+
+// MCPGroupReconciler reconciles a MCPGroup object
+type MCPGroupReconciler struct {
+ client.Client
+}
+
+// +kubebuilder:rbac:groups=toolhive.stacklok.dev,resources=mcpgroups,verbs=get;list;watch;create;update;patch;delete
+// +kubebuilder:rbac:groups=toolhive.stacklok.dev,resources=mcpgroups/status,verbs=get;update;patch
+// +kubebuilder:rbac:groups=toolhive.stacklok.dev,resources=mcpgroups/finalizers,verbs=update
+// +kubebuilder:rbac:groups=toolhive.stacklok.dev,resources=mcpservers,verbs=get;list;watch
+// +kubebuilder:rbac:groups=toolhive.stacklok.dev,resources=mcpservers/status,verbs=get;update;patch
+
+// Reconcile is part of the main kubernetes reconciliation loop
+// which aims to move the current state of the cluster closer to the desired state.
+func (r *MCPGroupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) {
+ ctxLogger := log.FromContext(ctx)
+ ctxLogger.Info("Reconciling MCPGroup", "mcpgroup", req.NamespacedName)
+
+ // Fetch the MCPGroup instance
+ mcpGroup := &mcpv1alpha1.MCPGroup{}
+ err := r.Get(ctx, req.NamespacedName, mcpGroup)
+ if err != nil {
+ if errors.IsNotFound(err) {
+ // Request object not found, could have been deleted after reconcile request.
+ // Return and don't requeue
+ ctxLogger.Info("MCPGroup resource not found. Ignoring since object must be deleted.")
+ return ctrl.Result{}, nil
+ }
+ // Error reading the object - requeue the request.
+ ctxLogger.Error(err, "Failed to get MCPGroup", "mcpgroup", req.NamespacedName)
+ return ctrl.Result{}, err
+ }
+
+ // Check if the MCPGroup is being deleted
+ if !mcpGroup.DeletionTimestamp.IsZero() {
+ return r.handleDeletion(ctx, mcpGroup)
+ }
+
+ // Add finalizer if it doesn't exist
+ if !controllerutil.ContainsFinalizer(mcpGroup, MCPGroupFinalizerName) {
+ controllerutil.AddFinalizer(mcpGroup, MCPGroupFinalizerName)
+ if err := r.Update(ctx, mcpGroup); err != nil {
+ ctxLogger.Error(err, "Failed to add finalizer")
+ return ctrl.Result{}, err
+ }
+ // Requeue to continue processing after finalizer is added
+ return ctrl.Result{RequeueAfter: 500 * time.Millisecond}, nil
+ }
+
+ // Find MCPServers that reference this MCPGroup
+ mcpServers, err := r.findReferencingMCPServers(ctx, mcpGroup)
+ if err != nil {
+ ctxLogger.Error(err, "Failed to list MCPServers")
+ mcpGroup.Status.Phase = mcpv1alpha1.MCPGroupPhaseFailed
+ meta.SetStatusCondition(&mcpGroup.Status.Conditions, metav1.Condition{
+ Type: mcpv1alpha1.ConditionTypeMCPServersChecked,
+ Status: metav1.ConditionFalse,
+ Reason: mcpv1alpha1.ConditionReasonListMCPServersFailed,
+ Message: "Failed to list MCPServers in namespace",
+ ObservedGeneration: mcpGroup.Generation,
+ })
+ mcpGroup.Status.ServerCount = 0
+ mcpGroup.Status.Servers = nil
+ // Update the MCPGroup status to reflect the failure
+ if updateErr := r.Status().Update(ctx, mcpGroup); updateErr != nil {
+ if errors.IsConflict(err) {
+ // Requeue to retry with fresh data
+ return ctrl.Result{Requeue: true}, nil
+ }
+ ctxLogger.Error(updateErr, "Failed to update MCPGroup status after list failure")
+ }
+ return ctrl.Result{}, nil
+ }
+
+ meta.SetStatusCondition(&mcpGroup.Status.Conditions, metav1.Condition{
+ Type: mcpv1alpha1.ConditionTypeMCPServersChecked,
+ Status: metav1.ConditionTrue,
+ Reason: mcpv1alpha1.ConditionReasonListMCPServersSucceeded,
+ Message: "Successfully listed MCPServers in namespace",
+ ObservedGeneration: mcpGroup.Generation,
+ })
+
+ // Set MCPGroup status fields
+ mcpGroup.Status.ServerCount = len(mcpServers)
+ if len(mcpServers) == 0 {
+ mcpGroup.Status.Servers = []string{}
+ } else {
+ // If there are servers, extract their names
+ mcpGroup.Status.Servers = make([]string, len(mcpServers))
+ for i, server := range mcpServers {
+ mcpGroup.Status.Servers[i] = server.Name
+ }
+ sort.Strings(mcpGroup.Status.Servers)
+ }
+
+ mcpGroup.Status.Phase = mcpv1alpha1.MCPGroupPhaseReady
+
+ // Update the MCPGroup status
+ if err := r.Status().Update(ctx, mcpGroup); err != nil {
+ if errors.IsConflict(err) {
+ // Requeue to retry with fresh data
+ return ctrl.Result{Requeue: true}, nil
+ }
+ ctxLogger.Error(err, "Failed to update MCPGroup status")
+ return ctrl.Result{}, err
+ }
+
+ ctxLogger.Info("Successfully reconciled MCPGroup", "serverCount", mcpGroup.Status.ServerCount)
+ return ctrl.Result{}, nil
+}
+
+// handleDeletion handles the deletion of an MCPGroup
+func (r *MCPGroupReconciler) handleDeletion(ctx context.Context, mcpGroup *mcpv1alpha1.MCPGroup) (ctrl.Result, error) {
+ ctxLogger := log.FromContext(ctx)
+
+ if controllerutil.ContainsFinalizer(mcpGroup, MCPGroupFinalizerName) {
+ // Find all MCPServers that reference this group
+ referencingServers, err := r.findReferencingMCPServers(ctx, mcpGroup)
+ if err != nil {
+ ctxLogger.Error(err, "Failed to find referencing MCPServers during deletion")
+ return ctrl.Result{}, err
+ }
+
+ // Update conditions on all referencing MCPServers to indicate the group is being deleted
+ if len(referencingServers) > 0 {
+ ctxLogger.Info("Updating conditions on referencing MCPServers", "count", len(referencingServers))
+ r.updateReferencingServersOnDeletion(ctx, referencingServers, mcpGroup.Name)
+ }
+
+ // Remove the finalizer to allow deletion
+ controllerutil.RemoveFinalizer(mcpGroup, MCPGroupFinalizerName)
+ if err := r.Update(ctx, mcpGroup); err != nil {
+ if errors.IsConflict(err) {
+ // Requeue to retry with fresh data
+ return ctrl.Result{Requeue: true}, nil
+ }
+ ctxLogger.Error(err, "Failed to remove finalizer")
+ return ctrl.Result{}, err
+ }
+ ctxLogger.Info("Removed finalizer from MCPGroup", "mcpgroup", mcpGroup.Name)
+ }
+
+ return ctrl.Result{}, nil
+}
+
+// findReferencingMCPServers finds all MCPServers that reference the given MCPGroup
+func (r *MCPGroupReconciler) findReferencingMCPServers(
+ ctx context.Context, mcpGroup *mcpv1alpha1.MCPGroup) ([]mcpv1alpha1.MCPServer, error) {
+
+ mcpServerList := &mcpv1alpha1.MCPServerList{}
+ listOpts := []client.ListOption{
+ client.InNamespace(mcpGroup.Namespace),
+ client.MatchingFields{"spec.groupRef": mcpGroup.Name},
+ }
+ if err := r.List(ctx, mcpServerList, listOpts...); err != nil {
+ return nil, err
+ }
+
+ return mcpServerList.Items, nil
+}
+
+// updateReferencingServersOnDeletion updates the conditions on MCPServers to indicate their group is being deleted
+func (r *MCPGroupReconciler) updateReferencingServersOnDeletion(
+ ctx context.Context, servers []mcpv1alpha1.MCPServer, groupName string) {
+ ctxLogger := log.FromContext(ctx)
+
+ for _, server := range servers {
+ // Update the condition to indicate the group is being deleted
+ meta.SetStatusCondition(&server.Status.Conditions, metav1.Condition{
+ Type: mcpv1alpha1.ConditionGroupRefValidated,
+ Status: metav1.ConditionFalse,
+ Reason: mcpv1alpha1.ConditionReasonGroupRefNotFound,
+ Message: "Referenced MCPGroup is being deleted",
+ ObservedGeneration: server.Generation,
+ })
+
+ // Update the server status
+ if err := r.Status().Update(ctx, &server); err != nil {
+ ctxLogger.Error(err, "Failed to update MCPServer condition during group deletion",
+ "mcpserver", server.Name, "mcpgroup", groupName)
+ // Continue with other servers even if one fails
+ continue
+ }
+ ctxLogger.Info("Updated MCPServer condition for group deletion",
+ "mcpserver", server.Name, "mcpgroup", groupName)
+ }
+}
+
+func (r *MCPGroupReconciler) findMCPGroupForMCPServer(ctx context.Context, obj client.Object) []ctrl.Request {
+ ctxLogger := log.FromContext(ctx)
+
+ // Get the MCPServer object
+ mcpServer, ok := obj.(*mcpv1alpha1.MCPServer)
+ if !ok {
+ ctxLogger.Error(nil, "Object is not an MCPServer", "object", obj.GetName())
+ return []ctrl.Request{}
+ }
+ if mcpServer.Spec.GroupRef == "" {
+ // No MCPGroup reference, nothing to do
+ return []ctrl.Request{}
+ }
+
+ // Find which MCPGroup this MCPServer belongs to
+ ctxLogger.Info(
+ "Finding MCPGroup for MCPServer",
+ "namespace",
+ obj.GetNamespace(),
+ "mcpserver",
+ obj.GetName(),
+ "groupRef",
+ mcpServer.Spec.GroupRef)
+ group := &mcpv1alpha1.MCPGroup{}
+ if err := r.Get(ctx, types.NamespacedName{Namespace: obj.GetNamespace(), Name: mcpServer.Spec.GroupRef}, group); err != nil {
+ ctxLogger.Error(err, "Failed to get MCPGroup for MCPServer", "namespace", obj.GetNamespace(), "name", mcpServer.Spec.GroupRef)
+ return []ctrl.Request{}
+ }
+ return []ctrl.Request{
+ {
+ NamespacedName: types.NamespacedName{
+ Namespace: obj.GetNamespace(),
+ Name: group.Name,
+ },
+ },
+ }
+}
+
+// SetupWithManager sets up the controller with the Manager.
+func (r *MCPGroupReconciler) SetupWithManager(mgr ctrl.Manager) error {
+ return ctrl.NewControllerManagedBy(mgr).
+ For(&mcpv1alpha1.MCPGroup{}).
+ Watches(
+ &mcpv1alpha1.MCPServer{}, handler.EnqueueRequestsFromMapFunc(r.findMCPGroupForMCPServer),
+ ).
+ Complete(r)
+}
diff --git a/cmd/thv-operator/controllers/mcpgroup_controller_test.go b/cmd/thv-operator/controllers/mcpgroup_controller_test.go
new file mode 100644
index 000000000..13f58b8a6
--- /dev/null
+++ b/cmd/thv-operator/controllers/mcpgroup_controller_test.go
@@ -0,0 +1,1000 @@
+package controllers
+
+import (
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "k8s.io/apimachinery/pkg/types"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/client/fake"
+ "sigs.k8s.io/controller-runtime/pkg/reconcile"
+
+ mcpv1alpha1 "github.com/stacklok/toolhive/cmd/thv-operator/api/v1alpha1"
+)
+
+// TestMCPGroupReconciler_Reconcile_BasicLogic tests the core reconciliation logic
+// using a fake client to avoid needing a real Kubernetes cluster
+func TestMCPGroupReconciler_Reconcile_BasicLogic(t *testing.T) {
+ t.Parallel()
+
+ tests := []struct {
+ name string
+ mcpGroup *mcpv1alpha1.MCPGroup
+ mcpServers []*mcpv1alpha1.MCPServer
+ expectedServerCount int
+ expectedServerNames []string
+ expectedPhase mcpv1alpha1.MCPGroupPhase
+ }{
+ {
+ name: "group with two running servers should be ready",
+ mcpGroup: &mcpv1alpha1.MCPGroup{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-group",
+ Namespace: "default",
+ },
+ },
+ mcpServers: []*mcpv1alpha1.MCPServer{
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "server1",
+ Namespace: "default",
+ },
+ Spec: mcpv1alpha1.MCPServerSpec{
+ Image: "test-image",
+ GroupRef: "test-group",
+ },
+ Status: mcpv1alpha1.MCPServerStatus{
+ Phase: mcpv1alpha1.MCPServerPhaseRunning,
+ },
+ },
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "server2",
+ Namespace: "default",
+ },
+ Spec: mcpv1alpha1.MCPServerSpec{
+ Image: "test-image",
+ GroupRef: "test-group",
+ },
+ Status: mcpv1alpha1.MCPServerStatus{
+ Phase: mcpv1alpha1.MCPServerPhaseRunning,
+ },
+ },
+ },
+ expectedServerCount: 2,
+ expectedServerNames: []string{"server1", "server2"},
+ expectedPhase: mcpv1alpha1.MCPGroupPhaseReady,
+ },
+ {
+ name: "group with servers regardless of status should be ready",
+ mcpGroup: &mcpv1alpha1.MCPGroup{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-group",
+ Namespace: "default",
+ },
+ },
+ mcpServers: []*mcpv1alpha1.MCPServer{
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "server1",
+ Namespace: "default",
+ },
+ Spec: mcpv1alpha1.MCPServerSpec{
+ Image: "test-image",
+ GroupRef: "test-group",
+ },
+ Status: mcpv1alpha1.MCPServerStatus{
+ Phase: mcpv1alpha1.MCPServerPhaseRunning,
+ },
+ },
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "server2",
+ Namespace: "default",
+ },
+ Spec: mcpv1alpha1.MCPServerSpec{
+ Image: "test-image",
+ GroupRef: "test-group",
+ },
+ Status: mcpv1alpha1.MCPServerStatus{
+ Phase: mcpv1alpha1.MCPServerPhaseFailed,
+ },
+ },
+ },
+ expectedServerCount: 2,
+ expectedServerNames: []string{"server1", "server2"},
+ expectedPhase: mcpv1alpha1.MCPGroupPhaseReady, // Controller doesn't check individual server phases
+ },
+ {
+ name: "group with mixed server phases should be ready",
+ mcpGroup: &mcpv1alpha1.MCPGroup{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-group",
+ Namespace: "default",
+ },
+ },
+ mcpServers: []*mcpv1alpha1.MCPServer{
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "server1",
+ Namespace: "default",
+ },
+ Spec: mcpv1alpha1.MCPServerSpec{
+ Image: "test-image",
+ GroupRef: "test-group",
+ },
+ Status: mcpv1alpha1.MCPServerStatus{
+ Phase: mcpv1alpha1.MCPServerPhaseRunning,
+ },
+ },
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "server2",
+ Namespace: "default",
+ },
+ Spec: mcpv1alpha1.MCPServerSpec{
+ Image: "test-image",
+ GroupRef: "test-group",
+ },
+ Status: mcpv1alpha1.MCPServerStatus{
+ Phase: mcpv1alpha1.MCPServerPhasePending,
+ },
+ },
+ },
+ expectedServerCount: 2,
+ expectedServerNames: []string{"server1", "server2"},
+ expectedPhase: mcpv1alpha1.MCPGroupPhaseReady, // Controller doesn't check individual server phases
+ },
+ {
+ name: "group with no servers should be ready",
+ mcpGroup: &mcpv1alpha1.MCPGroup{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-group",
+ Namespace: "default",
+ },
+ },
+ mcpServers: []*mcpv1alpha1.MCPServer{},
+ expectedServerCount: 0,
+ expectedServerNames: []string{},
+ expectedPhase: mcpv1alpha1.MCPGroupPhaseReady,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ t.Parallel()
+
+ ctx := t.Context()
+ scheme := runtime.NewScheme()
+ require.NoError(t, mcpv1alpha1.AddToScheme(scheme))
+ require.NoError(t, corev1.AddToScheme(scheme))
+
+ // Create fake client with objects
+ objs := []client.Object{tt.mcpGroup}
+ for _, server := range tt.mcpServers {
+ objs = append(objs, server)
+ }
+
+ fakeClient := fake.NewClientBuilder().
+ WithScheme(scheme).
+ WithObjects(objs...).
+ WithStatusSubresource(&mcpv1alpha1.MCPGroup{}).
+ WithIndex(&mcpv1alpha1.MCPServer{}, "spec.groupRef", func(obj client.Object) []string {
+ mcpServer := obj.(*mcpv1alpha1.MCPServer)
+ if mcpServer.Spec.GroupRef == "" {
+ return nil
+ }
+ return []string{mcpServer.Spec.GroupRef}
+ }).
+ Build()
+
+ r := &MCPGroupReconciler{
+ Client: fakeClient,
+ }
+
+ // Reconcile
+ req := reconcile.Request{
+ NamespacedName: types.NamespacedName{
+ Name: tt.mcpGroup.Name,
+ Namespace: tt.mcpGroup.Namespace,
+ },
+ }
+
+ // First reconcile adds the finalizer
+ result, err := r.Reconcile(ctx, req)
+ require.NoError(t, err)
+ assert.True(t, result.RequeueAfter > 0, "Should requeue after adding finalizer")
+
+ // Second reconcile processes normally
+ result, err = r.Reconcile(ctx, req)
+ require.NoError(t, err)
+ assert.False(t, result.RequeueAfter > 0, "Should not requeue")
+
+ // Check the updated MCPGroup
+ var updatedGroup mcpv1alpha1.MCPGroup
+ err = fakeClient.Get(ctx, req.NamespacedName, &updatedGroup)
+ require.NoError(t, err)
+
+ assert.Equal(t, tt.expectedServerCount, updatedGroup.Status.ServerCount)
+ assert.Equal(t, tt.expectedPhase, updatedGroup.Status.Phase)
+ assert.ElementsMatch(t, tt.expectedServerNames, updatedGroup.Status.Servers)
+ })
+ }
+}
+
+// TestMCPGroupReconciler_ServerFiltering tests the logic for filtering servers by groupRef
+func TestMCPGroupReconciler_ServerFiltering(t *testing.T) {
+ t.Parallel()
+
+ tests := []struct {
+ name string
+ groupName string
+ namespace string
+ mcpServers []*mcpv1alpha1.MCPServer
+ expectedServerNames []string
+ expectedCount int
+ }{
+ {
+ name: "filters servers by exact groupRef match",
+ groupName: "test-group",
+ namespace: "default",
+ mcpServers: []*mcpv1alpha1.MCPServer{
+ {
+ ObjectMeta: metav1.ObjectMeta{Name: "server1", Namespace: "default"},
+ Spec: mcpv1alpha1.MCPServerSpec{Image: "test", GroupRef: "test-group"},
+ },
+ {
+ ObjectMeta: metav1.ObjectMeta{Name: "server2", Namespace: "default"},
+ Spec: mcpv1alpha1.MCPServerSpec{Image: "test", GroupRef: "other-group"},
+ },
+ {
+ ObjectMeta: metav1.ObjectMeta{Name: "server3", Namespace: "default"},
+ Spec: mcpv1alpha1.MCPServerSpec{Image: "test", GroupRef: "test-group"},
+ },
+ },
+ expectedServerNames: []string{"server1", "server3"},
+ expectedCount: 2,
+ },
+ {
+ name: "excludes servers without groupRef",
+ groupName: "test-group",
+ namespace: "default",
+ mcpServers: []*mcpv1alpha1.MCPServer{
+ {
+ ObjectMeta: metav1.ObjectMeta{Name: "server1", Namespace: "default"},
+ Spec: mcpv1alpha1.MCPServerSpec{Image: "test", GroupRef: "test-group"},
+ },
+ {
+ ObjectMeta: metav1.ObjectMeta{Name: "server2", Namespace: "default"},
+ Spec: mcpv1alpha1.MCPServerSpec{Image: "test"},
+ },
+ },
+ expectedServerNames: []string{"server1"},
+ expectedCount: 1,
+ },
+ {
+ name: "excludes servers from different namespaces",
+ groupName: "test-group",
+ namespace: "namespace-a",
+ mcpServers: []*mcpv1alpha1.MCPServer{
+ {
+ ObjectMeta: metav1.ObjectMeta{Name: "server1", Namespace: "namespace-a"},
+ Spec: mcpv1alpha1.MCPServerSpec{Image: "test", GroupRef: "test-group"},
+ },
+ {
+ ObjectMeta: metav1.ObjectMeta{Name: "server2", Namespace: "namespace-b"},
+ Spec: mcpv1alpha1.MCPServerSpec{Image: "test", GroupRef: "test-group"},
+ },
+ },
+ expectedServerNames: []string{"server1"},
+ expectedCount: 1,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ t.Parallel()
+
+ ctx := t.Context()
+ scheme := runtime.NewScheme()
+ require.NoError(t, mcpv1alpha1.AddToScheme(scheme))
+ require.NoError(t, corev1.AddToScheme(scheme))
+
+ mcpGroup := &mcpv1alpha1.MCPGroup{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: tt.groupName,
+ Namespace: tt.namespace,
+ },
+ }
+
+ objs := []client.Object{mcpGroup}
+ for _, server := range tt.mcpServers {
+ objs = append(objs, server)
+ }
+
+ fakeClient := fake.NewClientBuilder().
+ WithScheme(scheme).
+ WithObjects(objs...).
+ WithStatusSubresource(&mcpv1alpha1.MCPGroup{}).
+ WithIndex(&mcpv1alpha1.MCPServer{}, "spec.groupRef", func(obj client.Object) []string {
+ mcpServer := obj.(*mcpv1alpha1.MCPServer)
+ if mcpServer.Spec.GroupRef == "" {
+ return nil
+ }
+ return []string{mcpServer.Spec.GroupRef}
+ }).
+ Build()
+
+ r := &MCPGroupReconciler{
+ Client: fakeClient,
+ }
+
+ req := reconcile.Request{
+ NamespacedName: types.NamespacedName{
+ Name: tt.groupName,
+ Namespace: tt.namespace,
+ },
+ }
+
+ // First reconcile adds the finalizer
+ result, err := r.Reconcile(ctx, req)
+ require.NoError(t, err)
+ assert.True(t, result.RequeueAfter > 0, "Should requeue after adding finalizer")
+
+ // Second reconcile processes normally
+ result, err = r.Reconcile(ctx, req)
+ require.NoError(t, err)
+ assert.False(t, result.RequeueAfter > 0, "Should not requeue")
+
+ var updatedGroup mcpv1alpha1.MCPGroup
+ err = fakeClient.Get(ctx, req.NamespacedName, &updatedGroup)
+ require.NoError(t, err)
+
+ assert.Equal(t, tt.expectedCount, updatedGroup.Status.ServerCount)
+ assert.ElementsMatch(t, tt.expectedServerNames, updatedGroup.Status.Servers)
+ })
+ }
+}
+
+// TestMCPGroupReconciler_findMCPGroupForMCPServer tests the watch mapping function
+func TestMCPGroupReconciler_findMCPGroupForMCPServer(t *testing.T) {
+ t.Parallel()
+
+ tests := []struct {
+ name string
+ mcpServer *mcpv1alpha1.MCPServer
+ mcpGroups []*mcpv1alpha1.MCPGroup
+ expectedRequests int
+ expectedGroupName string
+ }{
+ {
+ name: "server with groupRef finds matching group",
+ mcpServer: &mcpv1alpha1.MCPServer{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-server",
+ Namespace: "default",
+ },
+ Spec: mcpv1alpha1.MCPServerSpec{
+ Image: "test-image",
+ GroupRef: "test-group",
+ },
+ },
+ mcpGroups: []*mcpv1alpha1.MCPGroup{
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-group",
+ Namespace: "default",
+ },
+ },
+ },
+ expectedRequests: 1,
+ expectedGroupName: "test-group",
+ },
+ {
+ name: "server without groupRef returns empty",
+ mcpServer: &mcpv1alpha1.MCPServer{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-server",
+ Namespace: "default",
+ },
+ Spec: mcpv1alpha1.MCPServerSpec{
+ Image: "test-image",
+ // No GroupRef
+ },
+ },
+ mcpGroups: []*mcpv1alpha1.MCPGroup{
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-group",
+ Namespace: "default",
+ },
+ },
+ },
+ expectedRequests: 0,
+ },
+ {
+ name: "server with non-existent groupRef returns empty",
+ mcpServer: &mcpv1alpha1.MCPServer{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-server",
+ Namespace: "default",
+ },
+ Spec: mcpv1alpha1.MCPServerSpec{
+ Image: "test-image",
+ GroupRef: "non-existent-group",
+ },
+ },
+ mcpGroups: []*mcpv1alpha1.MCPGroup{
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-group",
+ Namespace: "default",
+ },
+ },
+ },
+ expectedRequests: 0,
+ },
+ {
+ name: "server finds correct group among multiple groups",
+ mcpServer: &mcpv1alpha1.MCPServer{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-server",
+ Namespace: "default",
+ },
+ Spec: mcpv1alpha1.MCPServerSpec{
+ Image: "test-image",
+ GroupRef: "group-b",
+ },
+ },
+ mcpGroups: []*mcpv1alpha1.MCPGroup{
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "group-a",
+ Namespace: "default",
+ },
+ },
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "group-b",
+ Namespace: "default",
+ },
+ },
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "group-c",
+ Namespace: "default",
+ },
+ },
+ },
+ expectedRequests: 1,
+ expectedGroupName: "group-b",
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ t.Parallel()
+
+ ctx := t.Context()
+ scheme := runtime.NewScheme()
+ require.NoError(t, mcpv1alpha1.AddToScheme(scheme))
+ require.NoError(t, corev1.AddToScheme(scheme))
+
+ // Create fake client with objects
+ objs := []client.Object{}
+ for _, group := range tt.mcpGroups {
+ objs = append(objs, group)
+ }
+
+ fakeClient := fake.NewClientBuilder().
+ WithScheme(scheme).
+ WithObjects(objs...).
+ WithIndex(&mcpv1alpha1.MCPServer{}, "spec.groupRef", func(obj client.Object) []string {
+ mcpServer := obj.(*mcpv1alpha1.MCPServer)
+ if mcpServer.Spec.GroupRef == "" {
+ return nil
+ }
+ return []string{mcpServer.Spec.GroupRef}
+ }).
+ Build()
+
+ r := &MCPGroupReconciler{
+ Client: fakeClient,
+ }
+
+ requests := r.findMCPGroupForMCPServer(ctx, tt.mcpServer)
+
+ assert.Len(t, requests, tt.expectedRequests)
+ if tt.expectedRequests > 0 {
+ assert.Equal(t, tt.expectedGroupName, requests[0].Name)
+ assert.Equal(t, tt.mcpServer.Namespace, requests[0].Namespace)
+ }
+ })
+ }
+}
+
+// TestMCPGroupReconciler_GroupNotFound tests handling of non-existent groups
+func TestMCPGroupReconciler_GroupNotFound(t *testing.T) {
+ t.Parallel()
+
+ ctx := t.Context()
+ scheme := runtime.NewScheme()
+ require.NoError(t, mcpv1alpha1.AddToScheme(scheme))
+ require.NoError(t, corev1.AddToScheme(scheme))
+
+ fakeClient := fake.NewClientBuilder().
+ WithScheme(scheme).
+ WithIndex(&mcpv1alpha1.MCPServer{}, "spec.groupRef", func(obj client.Object) []string {
+ mcpServer := obj.(*mcpv1alpha1.MCPServer)
+ if mcpServer.Spec.GroupRef == "" {
+ return nil
+ }
+ return []string{mcpServer.Spec.GroupRef}
+ }).
+ Build()
+
+ r := &MCPGroupReconciler{
+ Client: fakeClient,
+ }
+
+ // Reconcile a non-existent group
+ req := reconcile.Request{
+ NamespacedName: types.NamespacedName{
+ Name: "non-existent-group",
+ Namespace: "default",
+ },
+ }
+
+ result, err := r.Reconcile(ctx, req)
+ require.NoError(t, err)
+ assert.False(t, result.RequeueAfter > 0, "Should not requeue for non-existent group")
+}
+
+// TestMCPGroupReconciler_Conditions tests the MCPServersChecked condition
+func TestMCPGroupReconciler_Conditions(t *testing.T) {
+ t.Parallel()
+
+ tests := []struct {
+ name string
+ mcpGroup *mcpv1alpha1.MCPGroup
+ mcpServers []*mcpv1alpha1.MCPServer
+ expectedConditionStatus metav1.ConditionStatus
+ expectedConditionReason string
+ expectedPhase mcpv1alpha1.MCPGroupPhase
+ }{
+ {
+ name: "MCPServersChecked condition is True when listing succeeds",
+ mcpGroup: &mcpv1alpha1.MCPGroup{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-group",
+ Namespace: "default",
+ },
+ },
+ mcpServers: []*mcpv1alpha1.MCPServer{
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "server1",
+ Namespace: "default",
+ },
+ Spec: mcpv1alpha1.MCPServerSpec{
+ Image: "test-image",
+ GroupRef: "test-group",
+ },
+ },
+ },
+ expectedConditionStatus: metav1.ConditionTrue,
+ expectedConditionReason: mcpv1alpha1.ConditionReasonListMCPServersSucceeded,
+ expectedPhase: mcpv1alpha1.MCPGroupPhaseReady,
+ },
+ {
+ name: "MCPServersChecked condition is True even with no servers",
+ mcpGroup: &mcpv1alpha1.MCPGroup{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-group",
+ Namespace: "default",
+ },
+ },
+ mcpServers: []*mcpv1alpha1.MCPServer{},
+ expectedConditionStatus: metav1.ConditionTrue,
+ expectedConditionReason: mcpv1alpha1.ConditionReasonListMCPServersSucceeded,
+ expectedPhase: mcpv1alpha1.MCPGroupPhaseReady,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ t.Parallel()
+
+ ctx := t.Context()
+ scheme := runtime.NewScheme()
+ require.NoError(t, mcpv1alpha1.AddToScheme(scheme))
+ require.NoError(t, corev1.AddToScheme(scheme))
+
+ objs := []client.Object{tt.mcpGroup}
+ for _, server := range tt.mcpServers {
+ objs = append(objs, server)
+ }
+
+ fakeClient := fake.NewClientBuilder().
+ WithScheme(scheme).
+ WithObjects(objs...).
+ WithStatusSubresource(&mcpv1alpha1.MCPGroup{}).
+ WithIndex(&mcpv1alpha1.MCPServer{}, "spec.groupRef", func(obj client.Object) []string {
+ mcpServer := obj.(*mcpv1alpha1.MCPServer)
+ if mcpServer.Spec.GroupRef == "" {
+ return nil
+ }
+ return []string{mcpServer.Spec.GroupRef}
+ }).
+ Build()
+
+ r := &MCPGroupReconciler{
+ Client: fakeClient,
+ }
+
+ req := reconcile.Request{
+ NamespacedName: types.NamespacedName{
+ Name: tt.mcpGroup.Name,
+ Namespace: tt.mcpGroup.Namespace,
+ },
+ }
+
+ // First reconcile adds the finalizer
+ result, err := r.Reconcile(ctx, req)
+ require.NoError(t, err)
+ assert.True(t, result.RequeueAfter > 0, "Should requeue after adding finalizer")
+
+ // Second reconcile processes normally
+ result, err = r.Reconcile(ctx, req)
+ require.NoError(t, err)
+ assert.False(t, result.RequeueAfter > 0, "Should not requeue")
+
+ var updatedGroup mcpv1alpha1.MCPGroup
+ err = fakeClient.Get(ctx, req.NamespacedName, &updatedGroup)
+ require.NoError(t, err)
+
+ assert.Equal(t, tt.expectedPhase, updatedGroup.Status.Phase)
+
+ // Check the MCPServersChecked condition
+ var condition *metav1.Condition
+ for i := range updatedGroup.Status.Conditions {
+ if updatedGroup.Status.Conditions[i].Type == mcpv1alpha1.ConditionTypeMCPServersChecked {
+ condition = &updatedGroup.Status.Conditions[i]
+ break
+ }
+ }
+
+ require.NotNil(t, condition, "MCPServersChecked condition should be present")
+ assert.Equal(t, tt.expectedConditionStatus, condition.Status)
+ if tt.expectedConditionReason != "" {
+ assert.Equal(t, tt.expectedConditionReason, condition.Reason)
+ }
+ })
+ }
+}
+
+// TestMCPGroupReconciler_Finalizer tests finalizer addition and behavior
+func TestMCPGroupReconciler_Finalizer(t *testing.T) {
+ t.Parallel()
+
+ ctx := t.Context()
+ scheme := runtime.NewScheme()
+ require.NoError(t, mcpv1alpha1.AddToScheme(scheme))
+ require.NoError(t, corev1.AddToScheme(scheme))
+
+ mcpGroup := &mcpv1alpha1.MCPGroup{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-group",
+ Namespace: "default",
+ },
+ }
+
+ fakeClient := fake.NewClientBuilder().
+ WithScheme(scheme).
+ WithObjects(mcpGroup).
+ WithStatusSubresource(&mcpv1alpha1.MCPGroup{}, &mcpv1alpha1.MCPServer{}).
+ WithIndex(&mcpv1alpha1.MCPServer{}, "spec.groupRef", func(obj client.Object) []string {
+ mcpServer := obj.(*mcpv1alpha1.MCPServer)
+ if mcpServer.Spec.GroupRef == "" {
+ return nil
+ }
+ return []string{mcpServer.Spec.GroupRef}
+ }).
+ Build()
+
+ r := &MCPGroupReconciler{
+ Client: fakeClient,
+ }
+
+ req := reconcile.Request{
+ NamespacedName: types.NamespacedName{
+ Name: mcpGroup.Name,
+ Namespace: mcpGroup.Namespace,
+ },
+ }
+
+ // First reconcile should add the finalizer
+ result, err := r.Reconcile(ctx, req)
+ require.NoError(t, err)
+ assert.True(t, result.RequeueAfter > 0, "Should requeue after adding finalizer")
+
+ // Verify finalizer was added
+ var updatedGroup mcpv1alpha1.MCPGroup
+ err = fakeClient.Get(ctx, req.NamespacedName, &updatedGroup)
+ require.NoError(t, err)
+ assert.Contains(t, updatedGroup.Finalizers, MCPGroupFinalizerName)
+
+ // Second reconcile should proceed with normal logic
+ result, err = r.Reconcile(ctx, req)
+ require.NoError(t, err)
+ assert.False(t, result.RequeueAfter > 0, "Should not requeue")
+}
+
+// TestMCPGroupReconciler_Deletion tests deletion with finalizer cleanup
+func TestMCPGroupReconciler_Deletion(t *testing.T) {
+ t.Parallel()
+
+ tests := []struct {
+ name string
+ mcpServers []*mcpv1alpha1.MCPServer
+ expectedServerConditionType string
+ shouldUpdateServers bool
+ }{
+ {
+ name: "deletion updates referencing servers",
+ mcpServers: []*mcpv1alpha1.MCPServer{
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "server1",
+ Namespace: "default",
+ },
+ Spec: mcpv1alpha1.MCPServerSpec{
+ Image: "test-image",
+ GroupRef: "test-group",
+ },
+ },
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "server2",
+ Namespace: "default",
+ },
+ Spec: mcpv1alpha1.MCPServerSpec{
+ Image: "test-image",
+ GroupRef: "test-group",
+ },
+ },
+ },
+ expectedServerConditionType: mcpv1alpha1.ConditionGroupRefValidated,
+ shouldUpdateServers: true,
+ },
+ {
+ name: "deletion with no referencing servers succeeds",
+ mcpServers: []*mcpv1alpha1.MCPServer{
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "server1",
+ Namespace: "default",
+ },
+ Spec: mcpv1alpha1.MCPServerSpec{
+ Image: "test-image",
+ GroupRef: "other-group",
+ },
+ },
+ },
+ shouldUpdateServers: false,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ t.Parallel()
+
+ ctx := t.Context()
+ scheme := runtime.NewScheme()
+ require.NoError(t, mcpv1alpha1.AddToScheme(scheme))
+ require.NoError(t, corev1.AddToScheme(scheme))
+
+ // Create group with finalizer and deletion timestamp
+ now := metav1.Now()
+ mcpGroup := &mcpv1alpha1.MCPGroup{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-group",
+ Namespace: "default",
+ Finalizers: []string{MCPGroupFinalizerName},
+ DeletionTimestamp: &now,
+ },
+ }
+
+ objs := []client.Object{mcpGroup}
+ for _, server := range tt.mcpServers {
+ objs = append(objs, server)
+ }
+
+ fakeClient := fake.NewClientBuilder().
+ WithScheme(scheme).
+ WithObjects(objs...).
+ WithStatusSubresource(&mcpv1alpha1.MCPGroup{}, &mcpv1alpha1.MCPServer{}).
+ WithIndex(&mcpv1alpha1.MCPServer{}, "spec.groupRef", func(obj client.Object) []string {
+ mcpServer := obj.(*mcpv1alpha1.MCPServer)
+ if mcpServer.Spec.GroupRef == "" {
+ return nil
+ }
+ return []string{mcpServer.Spec.GroupRef}
+ }).
+ Build()
+
+ r := &MCPGroupReconciler{
+ Client: fakeClient,
+ }
+
+ req := reconcile.Request{
+ NamespacedName: types.NamespacedName{
+ Name: mcpGroup.Name,
+ Namespace: mcpGroup.Namespace,
+ },
+ }
+
+ // Reconcile should handle deletion
+ result, err := r.Reconcile(ctx, req)
+ require.NoError(t, err)
+ assert.False(t, result.RequeueAfter > 0, "Should not requeue on deletion")
+
+ // Verify finalizer was removed (group might already be deleted by fake client)
+ var updatedGroup mcpv1alpha1.MCPGroup
+ err = fakeClient.Get(ctx, req.NamespacedName, &updatedGroup)
+ // If the group still exists, verify finalizer was removed
+ if err == nil {
+ assert.NotContains(t, updatedGroup.Finalizers, MCPGroupFinalizerName)
+ }
+
+ // If servers should be updated, verify their conditions
+ if tt.shouldUpdateServers {
+ for _, server := range tt.mcpServers {
+ if server.Spec.GroupRef == "test-group" {
+ var updatedServer mcpv1alpha1.MCPServer
+ err = fakeClient.Get(ctx, types.NamespacedName{
+ Name: server.Name,
+ Namespace: server.Namespace,
+ }, &updatedServer)
+ require.NoError(t, err)
+
+ // Check that the GroupRefValidated condition was set to False
+ var condition *metav1.Condition
+ for i := range updatedServer.Status.Conditions {
+ if updatedServer.Status.Conditions[i].Type == tt.expectedServerConditionType {
+ condition = &updatedServer.Status.Conditions[i]
+ break
+ }
+ }
+
+ require.NotNil(t, condition, "GroupRefValidated condition should be present")
+ assert.Equal(t, metav1.ConditionFalse, condition.Status)
+ assert.Equal(t, mcpv1alpha1.ConditionReasonGroupRefNotFound, condition.Reason)
+ assert.Contains(t, condition.Message, "being deleted")
+ }
+ }
+ }
+ })
+ }
+}
+
+// TestMCPGroupReconciler_findReferencingMCPServers tests finding servers that reference a group
+func TestMCPGroupReconciler_findReferencingMCPServers(t *testing.T) {
+ t.Parallel()
+
+ tests := []struct {
+ name string
+ groupName string
+ namespace string
+ mcpServers []*mcpv1alpha1.MCPServer
+ expectedCount int
+ expectedNames []string
+ }{
+ {
+ name: "finds servers with matching groupRef",
+ groupName: "test-group",
+ namespace: "default",
+ mcpServers: []*mcpv1alpha1.MCPServer{
+ {
+ ObjectMeta: metav1.ObjectMeta{Name: "server1", Namespace: "default"},
+ Spec: mcpv1alpha1.MCPServerSpec{Image: "test", GroupRef: "test-group"},
+ },
+ {
+ ObjectMeta: metav1.ObjectMeta{Name: "server2", Namespace: "default"},
+ Spec: mcpv1alpha1.MCPServerSpec{Image: "test", GroupRef: "other-group"},
+ },
+ {
+ ObjectMeta: metav1.ObjectMeta{Name: "server3", Namespace: "default"},
+ Spec: mcpv1alpha1.MCPServerSpec{Image: "test", GroupRef: "test-group"},
+ },
+ },
+ expectedCount: 2,
+ expectedNames: []string{"server1", "server3"},
+ },
+ {
+ name: "returns empty when no servers reference the group",
+ groupName: "test-group",
+ namespace: "default",
+ mcpServers: []*mcpv1alpha1.MCPServer{
+ {
+ ObjectMeta: metav1.ObjectMeta{Name: "server1", Namespace: "default"},
+ Spec: mcpv1alpha1.MCPServerSpec{Image: "test", GroupRef: "other-group"},
+ },
+ },
+ expectedCount: 0,
+ expectedNames: []string{},
+ },
+ {
+ name: "excludes servers from different namespaces",
+ groupName: "test-group",
+ namespace: "namespace-a",
+ mcpServers: []*mcpv1alpha1.MCPServer{
+ {
+ ObjectMeta: metav1.ObjectMeta{Name: "server1", Namespace: "namespace-a"},
+ Spec: mcpv1alpha1.MCPServerSpec{Image: "test", GroupRef: "test-group"},
+ },
+ {
+ ObjectMeta: metav1.ObjectMeta{Name: "server2", Namespace: "namespace-b"},
+ Spec: mcpv1alpha1.MCPServerSpec{Image: "test", GroupRef: "test-group"},
+ },
+ },
+ expectedCount: 1,
+ expectedNames: []string{"server1"},
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ t.Parallel()
+
+ ctx := t.Context()
+ scheme := runtime.NewScheme()
+ require.NoError(t, mcpv1alpha1.AddToScheme(scheme))
+ require.NoError(t, corev1.AddToScheme(scheme))
+
+ mcpGroup := &mcpv1alpha1.MCPGroup{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: tt.groupName,
+ Namespace: tt.namespace,
+ },
+ }
+
+ objs := []client.Object{}
+ for _, server := range tt.mcpServers {
+ objs = append(objs, server)
+ }
+
+ fakeClient := fake.NewClientBuilder().
+ WithScheme(scheme).
+ WithObjects(objs...).
+ WithIndex(&mcpv1alpha1.MCPServer{}, "spec.groupRef", func(obj client.Object) []string {
+ mcpServer := obj.(*mcpv1alpha1.MCPServer)
+ if mcpServer.Spec.GroupRef == "" {
+ return nil
+ }
+ return []string{mcpServer.Spec.GroupRef}
+ }).
+ Build()
+
+ r := &MCPGroupReconciler{
+ Client: fakeClient,
+ }
+
+ servers, err := r.findReferencingMCPServers(ctx, mcpGroup)
+ require.NoError(t, err)
+ assert.Len(t, servers, tt.expectedCount)
+
+ if tt.expectedCount > 0 {
+ serverNames := make([]string, len(servers))
+ for i, s := range servers {
+ serverNames[i] = s.Name
+ }
+ assert.ElementsMatch(t, tt.expectedNames, serverNames)
+ }
+ })
+ }
+}
diff --git a/cmd/thv-operator/controllers/mcpserver_controller.go b/cmd/thv-operator/controllers/mcpserver_controller.go
index bd516e0db..863d5254b 100644
--- a/cmd/thv-operator/controllers/mcpserver_controller.go
+++ b/cmd/thv-operator/controllers/mcpserver_controller.go
@@ -159,6 +159,9 @@ func (r *MCPServerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
return ctrl.Result{Requeue: true}, nil
}
+ // Check if the GroupRef is valid if specified
+ r.validateGroupRef(ctx, mcpServer)
+
// Check if MCPToolConfig is referenced and handle it
if err := r.handleToolConfig(ctx, mcpServer); err != nil {
ctxLogger.Error(err, "Failed to handle MCPToolConfig")
@@ -379,6 +382,49 @@ func (r *MCPServerReconciler) Reconcile(ctx context.Context, req ctrl.Request) (
return ctrl.Result{}, nil
}
+func (r *MCPServerReconciler) validateGroupRef(ctx context.Context, mcpServer *mcpv1alpha1.MCPServer) {
+ if mcpServer.Spec.GroupRef == "" {
+ // No group reference, nothing to validate
+ return
+ }
+
+ ctxLogger := log.FromContext(ctx)
+
+ // Find the referenced MCPGroup
+ group := &mcpv1alpha1.MCPGroup{}
+ if err := r.Get(ctx, types.NamespacedName{Namespace: mcpServer.Namespace, Name: mcpServer.Spec.GroupRef}, group); err != nil {
+ ctxLogger.Error(err, "Failed to validate GroupRef")
+ meta.SetStatusCondition(&mcpServer.Status.Conditions, metav1.Condition{
+ Type: mcpv1alpha1.ConditionGroupRefValidated,
+ Status: metav1.ConditionFalse,
+ Reason: mcpv1alpha1.ConditionReasonGroupRefNotFound,
+ Message: err.Error(),
+ ObservedGeneration: mcpServer.Generation,
+ })
+ } else if group.Status.Phase != mcpv1alpha1.MCPGroupPhaseReady {
+ meta.SetStatusCondition(&mcpServer.Status.Conditions, metav1.Condition{
+ Type: mcpv1alpha1.ConditionGroupRefValidated,
+ Status: metav1.ConditionFalse,
+ Reason: mcpv1alpha1.ConditionReasonGroupRefNotReady,
+ Message: "GroupRef is not in Ready state",
+ ObservedGeneration: mcpServer.Generation,
+ })
+ } else {
+ meta.SetStatusCondition(&mcpServer.Status.Conditions, metav1.Condition{
+ Type: mcpv1alpha1.ConditionGroupRefValidated,
+ Status: metav1.ConditionTrue,
+ Reason: mcpv1alpha1.ConditionReasonGroupRefValidated,
+ Message: "GroupRef is valid and in Ready state",
+ ObservedGeneration: mcpServer.Generation,
+ })
+ }
+
+ if err := r.Status().Update(ctx, mcpServer); err != nil {
+ ctxLogger.Error(err, "Failed to update MCPServer status after GroupRef validation")
+ }
+
+}
+
// setImageValidationCondition is a helper function to set the image validation status condition
// This reduces code duplication in the image validation logic
func setImageValidationCondition(mcpServer *mcpv1alpha1.MCPServer, status metav1.ConditionStatus, reason, message string) {
diff --git a/cmd/thv-operator/controllers/mcpserver_groupref_test.go b/cmd/thv-operator/controllers/mcpserver_groupref_test.go
new file mode 100644
index 000000000..e3e186139
--- /dev/null
+++ b/cmd/thv-operator/controllers/mcpserver_groupref_test.go
@@ -0,0 +1,346 @@
+package controllers
+
+import (
+ "context"
+ "testing"
+
+ "github.com/stretchr/testify/assert"
+ "github.com/stretchr/testify/require"
+ corev1 "k8s.io/api/core/v1"
+ "k8s.io/apimachinery/pkg/api/meta"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/client/fake"
+
+ mcpv1alpha1 "github.com/stacklok/toolhive/cmd/thv-operator/api/v1alpha1"
+)
+
+// TestMCPServerReconciler_ValidateGroupRef tests the validateGroupRef function
+func TestMCPServerReconciler_ValidateGroupRef(t *testing.T) {
+ t.Parallel()
+
+ tests := []struct {
+ name string
+ mcpServer *mcpv1alpha1.MCPServer
+ mcpGroups []*mcpv1alpha1.MCPGroup
+ expectedConditionStatus metav1.ConditionStatus
+ expectedConditionReason string
+ expectedConditionMsg string
+ }{
+ {
+ name: "GroupRef validated when group exists and is Ready",
+ mcpServer: &mcpv1alpha1.MCPServer{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-server",
+ Namespace: "default",
+ },
+ Spec: mcpv1alpha1.MCPServerSpec{
+ Image: "test-image",
+ GroupRef: "test-group",
+ },
+ },
+ mcpGroups: []*mcpv1alpha1.MCPGroup{
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-group",
+ Namespace: "default",
+ },
+ Status: mcpv1alpha1.MCPGroupStatus{
+ Phase: mcpv1alpha1.MCPGroupPhaseReady,
+ },
+ },
+ },
+ expectedConditionStatus: metav1.ConditionTrue,
+ expectedConditionReason: "",
+ },
+ {
+ name: "GroupRef not validated when group does not exist",
+ mcpServer: &mcpv1alpha1.MCPServer{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-server",
+ Namespace: "default",
+ },
+ Spec: mcpv1alpha1.MCPServerSpec{
+ Image: "test-image",
+ GroupRef: "non-existent-group",
+ },
+ },
+ mcpGroups: []*mcpv1alpha1.MCPGroup{},
+ expectedConditionStatus: metav1.ConditionFalse,
+ expectedConditionReason: mcpv1alpha1.ConditionReasonGroupRefNotFound,
+ },
+ {
+ name: "GroupRef not validated when group is Pending",
+ mcpServer: &mcpv1alpha1.MCPServer{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-server",
+ Namespace: "default",
+ },
+ Spec: mcpv1alpha1.MCPServerSpec{
+ Image: "test-image",
+ GroupRef: "test-group",
+ },
+ },
+ mcpGroups: []*mcpv1alpha1.MCPGroup{
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-group",
+ Namespace: "default",
+ },
+ Status: mcpv1alpha1.MCPGroupStatus{
+ Phase: mcpv1alpha1.MCPGroupPhasePending,
+ },
+ },
+ },
+ expectedConditionStatus: metav1.ConditionFalse,
+ expectedConditionReason: mcpv1alpha1.ConditionReasonGroupRefNotReady,
+ expectedConditionMsg: "GroupRef is not in Ready state",
+ },
+ {
+ name: "GroupRef not validated when group is Failed",
+ mcpServer: &mcpv1alpha1.MCPServer{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-server",
+ Namespace: "default",
+ },
+ Spec: mcpv1alpha1.MCPServerSpec{
+ Image: "test-image",
+ GroupRef: "test-group",
+ },
+ },
+ mcpGroups: []*mcpv1alpha1.MCPGroup{
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-group",
+ Namespace: "default",
+ },
+ Status: mcpv1alpha1.MCPGroupStatus{
+ Phase: mcpv1alpha1.MCPGroupPhaseFailed,
+ },
+ },
+ },
+ expectedConditionStatus: metav1.ConditionFalse,
+ expectedConditionReason: mcpv1alpha1.ConditionReasonGroupRefNotReady,
+ expectedConditionMsg: "GroupRef is not in Ready state",
+ },
+ {
+ name: "No validation when GroupRef is empty",
+ mcpServer: &mcpv1alpha1.MCPServer{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-server",
+ Namespace: "default",
+ },
+ Spec: mcpv1alpha1.MCPServerSpec{
+ Image: "test-image",
+ // No GroupRef
+ },
+ },
+ mcpGroups: []*mcpv1alpha1.MCPGroup{
+ {
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-group",
+ Namespace: "default",
+ },
+ Status: mcpv1alpha1.MCPGroupStatus{
+ Phase: mcpv1alpha1.MCPGroupPhaseReady,
+ },
+ },
+ },
+ expectedConditionStatus: "", // No condition should be set
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ t.Parallel()
+
+ ctx := context.Background()
+ scheme := runtime.NewScheme()
+ require.NoError(t, mcpv1alpha1.AddToScheme(scheme))
+ require.NoError(t, corev1.AddToScheme(scheme))
+
+ objs := []client.Object{}
+ for _, group := range tt.mcpGroups {
+ objs = append(objs, group)
+ }
+
+ fakeClient := fake.NewClientBuilder().
+ WithScheme(scheme).
+ WithObjects(objs...).
+ WithStatusSubresource(&mcpv1alpha1.MCPGroup{}).
+ Build()
+
+ r := &MCPServerReconciler{
+ Client: fakeClient,
+ Scheme: scheme,
+ }
+
+ r.validateGroupRef(ctx, tt.mcpServer)
+
+ // Check the condition if we expected one
+ if tt.expectedConditionStatus != "" {
+ condition := meta.FindStatusCondition(tt.mcpServer.Status.Conditions, mcpv1alpha1.ConditionGroupRefValidated)
+ require.NotNil(t, condition, "GroupRefValidated condition should be present")
+ assert.Equal(t, tt.expectedConditionStatus, condition.Status)
+ if tt.expectedConditionReason != "" {
+ assert.Equal(t, tt.expectedConditionReason, condition.Reason)
+ }
+ if tt.expectedConditionMsg != "" {
+ assert.Equal(t, tt.expectedConditionMsg, condition.Message)
+ }
+ } else {
+ // No condition should be set when GroupRef is empty
+ condition := meta.FindStatusCondition(tt.mcpServer.Status.Conditions, mcpv1alpha1.ConditionGroupRefValidated)
+ assert.Nil(t, condition, "GroupRefValidated condition should not be present when GroupRef is empty")
+ }
+ })
+ }
+}
+
+// TestMCPServerReconciler_GroupRefValidation_Integration tests GroupRef validation in the context of reconciliation
+func TestMCPServerReconciler_GroupRefValidation_Integration(t *testing.T) {
+ t.Parallel()
+
+ tests := []struct {
+ name string
+ mcpServer *mcpv1alpha1.MCPServer
+ mcpGroup *mcpv1alpha1.MCPGroup
+ expectedConditionStatus metav1.ConditionStatus
+ expectedConditionReason string
+ }{
+ {
+ name: "Server with valid GroupRef gets validated condition",
+ mcpServer: &mcpv1alpha1.MCPServer{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-server",
+ Namespace: "default",
+ },
+ Spec: mcpv1alpha1.MCPServerSpec{
+ Image: "test-image",
+ GroupRef: "test-group",
+ },
+ },
+ mcpGroup: &mcpv1alpha1.MCPGroup{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-group",
+ Namespace: "default",
+ },
+ Status: mcpv1alpha1.MCPGroupStatus{
+ Phase: mcpv1alpha1.MCPGroupPhaseReady,
+ },
+ },
+ expectedConditionStatus: metav1.ConditionTrue,
+ },
+ {
+ name: "Server with GroupRef to non-Ready group gets failed condition",
+ mcpServer: &mcpv1alpha1.MCPServer{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-server",
+ Namespace: "default",
+ },
+ Spec: mcpv1alpha1.MCPServerSpec{
+ Image: "test-image",
+ GroupRef: "test-group",
+ },
+ },
+ mcpGroup: &mcpv1alpha1.MCPGroup{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-group",
+ Namespace: "default",
+ },
+ Status: mcpv1alpha1.MCPGroupStatus{
+ Phase: mcpv1alpha1.MCPGroupPhasePending,
+ },
+ },
+ expectedConditionStatus: metav1.ConditionFalse,
+ expectedConditionReason: mcpv1alpha1.ConditionReasonGroupRefNotReady,
+ },
+ }
+
+ for _, tt := range tests {
+ t.Run(tt.name, func(t *testing.T) {
+ t.Parallel()
+
+ ctx := context.Background()
+ scheme := runtime.NewScheme()
+ require.NoError(t, mcpv1alpha1.AddToScheme(scheme))
+ require.NoError(t, corev1.AddToScheme(scheme))
+
+ objs := []client.Object{tt.mcpServer}
+ if tt.mcpGroup != nil {
+ objs = append(objs, tt.mcpGroup)
+ }
+
+ fakeClient := fake.NewClientBuilder().
+ WithScheme(scheme).
+ WithObjects(objs...).
+ WithStatusSubresource(&mcpv1alpha1.MCPServer{}, &mcpv1alpha1.MCPGroup{}).
+ Build()
+
+ r := &MCPServerReconciler{
+ Client: fakeClient,
+ Scheme: scheme,
+ }
+
+ r.validateGroupRef(ctx, tt.mcpServer)
+
+ condition := meta.FindStatusCondition(tt.mcpServer.Status.Conditions, mcpv1alpha1.ConditionGroupRefValidated)
+ require.NotNil(t, condition, "GroupRefValidated condition should be present")
+ assert.Equal(t, tt.expectedConditionStatus, condition.Status)
+ if tt.expectedConditionReason != "" {
+ assert.Equal(t, tt.expectedConditionReason, condition.Reason)
+ }
+ })
+ }
+}
+
+// TestMCPServerReconciler_GroupRefCrossNamespace tests that GroupRef only works within same namespace
+func TestMCPServerReconciler_GroupRefCrossNamespace(t *testing.T) {
+ t.Parallel()
+
+ ctx := context.Background()
+ scheme := runtime.NewScheme()
+ require.NoError(t, mcpv1alpha1.AddToScheme(scheme))
+ require.NoError(t, corev1.AddToScheme(scheme))
+
+ mcpServer := &mcpv1alpha1.MCPServer{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-server",
+ Namespace: "namespace-a",
+ },
+ Spec: mcpv1alpha1.MCPServerSpec{
+ Image: "test-image",
+ GroupRef: "test-group",
+ },
+ }
+
+ mcpGroup := &mcpv1alpha1.MCPGroup{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "test-group",
+ Namespace: "namespace-b", // Different namespace
+ },
+ Status: mcpv1alpha1.MCPGroupStatus{
+ Phase: mcpv1alpha1.MCPGroupPhaseReady,
+ },
+ }
+
+ fakeClient := fake.NewClientBuilder().
+ WithScheme(scheme).
+ WithObjects(mcpServer, mcpGroup).
+ WithStatusSubresource(&mcpv1alpha1.MCPServer{}, &mcpv1alpha1.MCPGroup{}).
+ Build()
+
+ r := &MCPServerReconciler{
+ Client: fakeClient,
+ Scheme: scheme,
+ }
+
+ r.validateGroupRef(ctx, mcpServer)
+
+ // Should fail to find the group because it's in a different namespace
+ condition := meta.FindStatusCondition(mcpServer.Status.Conditions, mcpv1alpha1.ConditionGroupRefValidated)
+ require.NotNil(t, condition, "GroupRefValidated condition should be present")
+ assert.Equal(t, metav1.ConditionFalse, condition.Status)
+ assert.Equal(t, mcpv1alpha1.ConditionReasonGroupRefNotFound, condition.Reason)
+}
diff --git a/cmd/thv-operator/main.go b/cmd/thv-operator/main.go
index 2b3b70260..fdaafb803 100644
--- a/cmd/thv-operator/main.go
+++ b/cmd/thv-operator/main.go
@@ -3,6 +3,7 @@
package main
import (
+ "context"
"flag"
"os"
"strings"
@@ -15,6 +16,7 @@ import (
_ "k8s.io/client-go/plugin/pkg/client/auth"
ctrl "sigs.k8s.io/controller-runtime"
"sigs.k8s.io/controller-runtime/pkg/cache"
+ "sigs.k8s.io/controller-runtime/pkg/client"
"sigs.k8s.io/controller-runtime/pkg/healthz"
"sigs.k8s.io/controller-runtime/pkg/log"
metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" // Import for metricsserver
@@ -75,9 +77,25 @@ func main() {
os.Exit(1)
}
+ // Set up field indexing for MCPServer.Spec.GroupRef
+ if err := mgr.GetFieldIndexer().IndexField(
+ context.Background(),
+ &mcpv1alpha1.MCPServer{},
+ "spec.groupRef",
+ func(obj client.Object) []string {
+ mcpServer := obj.(*mcpv1alpha1.MCPServer)
+ if mcpServer.Spec.GroupRef == "" {
+ return nil
+ }
+ return []string{mcpServer.Spec.GroupRef}
+ },
+ ); err != nil {
+ setupLog.Error(err, "unable to create field index for spec.groupRef")
+ os.Exit(1)
+ }
+
// Create a shared platform detector for all controllers
sharedPlatformDetector := controllers.NewSharedPlatformDetector()
-
rec := &controllers.MCPServerReconciler{
Client: mgr.GetClient(),
Scheme: mgr.GetScheme(),
@@ -115,6 +133,14 @@ func main() {
setupLog.Error(err, "unable to create controller", "controller", "MCPRegistry")
os.Exit(1)
}
+
+ // Set up MCPGroup controller
+ if err = (&controllers.MCPGroupReconciler{
+ Client: mgr.GetClient(),
+ }).SetupWithManager(mgr); err != nil {
+ setupLog.Error(err, "unable to create controller", "controller", "MCPGroup")
+ os.Exit(1)
+ }
//+kubebuilder:scaffold:builder
if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil {
diff --git a/cmd/thv-operator/test-integration/mcp-group/mcpgroup_controller_integration_test.go b/cmd/thv-operator/test-integration/mcp-group/mcpgroup_controller_integration_test.go
new file mode 100644
index 000000000..bef345415
--- /dev/null
+++ b/cmd/thv-operator/test-integration/mcp-group/mcpgroup_controller_integration_test.go
@@ -0,0 +1,718 @@
+package operator_test
+
+import (
+ "fmt"
+ "time"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+ corev1 "k8s.io/api/core/v1"
+ metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+ "k8s.io/apimachinery/pkg/types"
+
+ mcpv1alpha1 "github.com/stacklok/toolhive/cmd/thv-operator/api/v1alpha1"
+)
+
+var _ = Describe("MCPGroup Controller Integration Tests", func() {
+ const (
+ timeout = time.Second * 30
+ interval = time.Millisecond * 250
+ )
+
+ Context("When creating an MCPGroup with existing MCPServers", Ordered, func() {
+ var (
+ namespace string
+ mcpGroupName string
+ mcpGroup *mcpv1alpha1.MCPGroup
+ server1 *mcpv1alpha1.MCPServer
+ server2 *mcpv1alpha1.MCPServer
+ serverNoGroup *mcpv1alpha1.MCPServer
+ )
+
+ BeforeAll(func() {
+ namespace = fmt.Sprintf("test-mcpgroup-%d", time.Now().Unix())
+ mcpGroupName = "test-group"
+
+ // Create namespace
+ ns := &corev1.Namespace{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: namespace,
+ },
+ }
+ Expect(k8sClient.Create(ctx, ns)).Should(Succeed())
+
+ // Create MCPServers first
+ server1 = &mcpv1alpha1.MCPServer{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "server1",
+ Namespace: namespace,
+ },
+ Spec: mcpv1alpha1.MCPServerSpec{
+ Image: "example/mcp-server:latest",
+ GroupRef: mcpGroupName,
+ },
+ }
+ Expect(k8sClient.Create(ctx, server1)).Should(Succeed())
+
+ server2 = &mcpv1alpha1.MCPServer{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "server2",
+ Namespace: namespace,
+ },
+ Spec: mcpv1alpha1.MCPServerSpec{
+ Image: "example/mcp-server:latest",
+ GroupRef: mcpGroupName,
+ },
+ }
+ Expect(k8sClient.Create(ctx, server2)).Should(Succeed())
+
+ serverNoGroup = &mcpv1alpha1.MCPServer{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "server-no-group",
+ Namespace: namespace,
+ },
+ Spec: mcpv1alpha1.MCPServerSpec{
+ Image: "example/mcp-server:latest",
+ // No GroupRef
+ },
+ }
+ Expect(k8sClient.Create(ctx, serverNoGroup)).Should(Succeed())
+
+ // Update server statuses to Running
+ Eventually(func() error {
+ freshServer := &mcpv1alpha1.MCPServer{}
+ if err := k8sClient.Get(ctx, types.NamespacedName{Name: server1.Name, Namespace: namespace}, freshServer); err != nil {
+ return err
+ }
+ freshServer.Status.Phase = mcpv1alpha1.MCPServerPhaseRunning
+ return k8sClient.Status().Update(ctx, freshServer)
+ }, timeout, interval).Should(Succeed())
+
+ Eventually(func() error {
+ freshServer := &mcpv1alpha1.MCPServer{}
+ if err := k8sClient.Get(ctx, types.NamespacedName{Name: server2.Name, Namespace: namespace}, freshServer); err != nil {
+ return err
+ }
+ freshServer.Status.Phase = mcpv1alpha1.MCPServerPhaseRunning
+ return k8sClient.Status().Update(ctx, freshServer)
+ }, timeout, interval).Should(Succeed())
+
+ // Verify the statuses were updated
+ Eventually(func() bool {
+ freshServer := &mcpv1alpha1.MCPServer{}
+ if err := k8sClient.Get(ctx, types.NamespacedName{Name: server1.Name, Namespace: namespace}, freshServer); err != nil {
+ return false
+ }
+ return freshServer.Status.Phase == mcpv1alpha1.MCPServerPhaseRunning
+ }, timeout, interval).Should(BeTrue())
+
+ Eventually(func() bool {
+ freshServer := &mcpv1alpha1.MCPServer{}
+ if err := k8sClient.Get(ctx, types.NamespacedName{Name: server2.Name, Namespace: namespace}, freshServer); err != nil {
+ return false
+ }
+ return freshServer.Status.Phase == mcpv1alpha1.MCPServerPhaseRunning
+ }, timeout, interval).Should(BeTrue())
+
+ // Now create the MCPGroup
+ mcpGroup = &mcpv1alpha1.MCPGroup{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: mcpGroupName,
+ Namespace: namespace,
+ },
+ Spec: mcpv1alpha1.MCPGroupSpec{
+ Description: "Test group for integration tests",
+ },
+ }
+ Expect(k8sClient.Create(ctx, mcpGroup)).Should(Succeed())
+ })
+
+ AfterAll(func() {
+ // Clean up
+ Expect(k8sClient.Delete(ctx, server1)).Should(Succeed())
+ Expect(k8sClient.Delete(ctx, server2)).Should(Succeed())
+ Expect(k8sClient.Delete(ctx, serverNoGroup)).Should(Succeed())
+ Expect(k8sClient.Delete(ctx, mcpGroup)).Should(Succeed())
+
+ // Delete namespace
+ ns := &corev1.Namespace{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: namespace,
+ },
+ }
+ Expect(k8sClient.Delete(ctx, ns)).Should(Succeed())
+ })
+
+ It("Should find existing MCPServers and update status", func() {
+ // Check that the group found both servers
+ Eventually(func() int {
+ updatedGroup := &mcpv1alpha1.MCPGroup{}
+ if err := k8sClient.Get(ctx, types.NamespacedName{
+ Name: mcpGroupName,
+ Namespace: namespace,
+ }, updatedGroup); err != nil {
+ return -1
+ }
+ return updatedGroup.Status.ServerCount
+ }, timeout, interval).Should(Equal(2))
+
+ // The group should be Ready after successful reconciliation
+ Eventually(func() mcpv1alpha1.MCPGroupPhase {
+ updatedGroup := &mcpv1alpha1.MCPGroup{}
+ if err := k8sClient.Get(ctx, types.NamespacedName{
+ Name: mcpGroupName,
+ Namespace: namespace,
+ }, updatedGroup); err != nil {
+ return ""
+ }
+ return updatedGroup.Status.Phase
+ }, timeout, interval).Should(Equal(mcpv1alpha1.MCPGroupPhaseReady))
+
+ // Verify the servers are in the group
+ updatedGroup := &mcpv1alpha1.MCPGroup{}
+ Expect(k8sClient.Get(ctx, types.NamespacedName{
+ Name: mcpGroupName,
+ Namespace: namespace,
+ }, updatedGroup)).Should(Succeed())
+
+ Expect(updatedGroup.Status.Servers).To(ContainElements("server1", "server2"))
+ Expect(updatedGroup.Status.Servers).NotTo(ContainElement("server-no-group"))
+ })
+ })
+
+ Context("When creating a new MCPServer with groupRef", Ordered, func() {
+ var (
+ namespace string
+ mcpGroupName string
+ mcpGroup *mcpv1alpha1.MCPGroup
+ newServer *mcpv1alpha1.MCPServer
+ )
+
+ BeforeAll(func() {
+ namespace = fmt.Sprintf("test-new-server-%d", time.Now().Unix())
+ mcpGroupName = "test-group-new-server"
+
+ // Create namespace
+ ns := &corev1.Namespace{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: namespace,
+ },
+ }
+ Expect(k8sClient.Create(ctx, ns)).Should(Succeed())
+
+ // Create MCPGroup first
+ mcpGroup = &mcpv1alpha1.MCPGroup{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: mcpGroupName,
+ Namespace: namespace,
+ },
+ Spec: mcpv1alpha1.MCPGroupSpec{
+ Description: "Test group for new server",
+ },
+ }
+ Expect(k8sClient.Create(ctx, mcpGroup)).Should(Succeed())
+
+ // Wait for initial reconciliation
+ Eventually(func() bool {
+ updatedGroup := &mcpv1alpha1.MCPGroup{}
+ err := k8sClient.Get(ctx, types.NamespacedName{
+ Name: mcpGroupName,
+ Namespace: namespace,
+ }, updatedGroup)
+ return err == nil && updatedGroup.Status.Phase == mcpv1alpha1.MCPGroupPhaseReady
+ }, timeout, interval).Should(BeTrue())
+ })
+
+ AfterAll(func() {
+ // Clean up
+ if newServer != nil {
+ Expect(k8sClient.Delete(ctx, newServer)).Should(Succeed())
+ }
+ Expect(k8sClient.Delete(ctx, mcpGroup)).Should(Succeed())
+
+ // Delete namespace
+ ns := &corev1.Namespace{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: namespace,
+ },
+ }
+ Expect(k8sClient.Delete(ctx, ns)).Should(Succeed())
+ })
+
+ It("Should trigger MCPGroup reconciliation when server is created", func() {
+ // Create new server with groupRef
+ newServer = &mcpv1alpha1.MCPServer{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "new-server",
+ Namespace: namespace,
+ },
+ Spec: mcpv1alpha1.MCPServerSpec{
+ Image: "example/mcp-server:latest",
+ GroupRef: mcpGroupName,
+ },
+ }
+ Expect(k8sClient.Create(ctx, newServer)).Should(Succeed())
+
+ // Update server status to Running
+ Eventually(func() error {
+ if err := k8sClient.Get(ctx, types.NamespacedName{Name: newServer.Name, Namespace: namespace}, newServer); err != nil {
+ return err
+ }
+ newServer.Status.Phase = mcpv1alpha1.MCPServerPhaseRunning
+ return k8sClient.Status().Update(ctx, newServer)
+ }, timeout, interval).Should(Succeed())
+
+ // Wait for MCPGroup to be updated
+ Eventually(func() bool {
+ updatedGroup := &mcpv1alpha1.MCPGroup{}
+ err := k8sClient.Get(ctx, types.NamespacedName{
+ Name: mcpGroupName,
+ Namespace: namespace,
+ }, updatedGroup)
+ if err != nil {
+ return false
+ }
+
+ return updatedGroup.Status.ServerCount == 1 &&
+ updatedGroup.Status.Phase == mcpv1alpha1.MCPGroupPhaseReady
+ }, timeout, interval).Should(BeTrue())
+
+ // Verify the server is in the group
+ updatedGroup := &mcpv1alpha1.MCPGroup{}
+ Expect(k8sClient.Get(ctx, types.NamespacedName{
+ Name: mcpGroupName,
+ Namespace: namespace,
+ }, updatedGroup)).Should(Succeed())
+
+ Expect(updatedGroup.Status.Servers).To(ContainElement("new-server"))
+ })
+ })
+
+ Context("When deleting an MCPServer from a group", Ordered, func() {
+ var (
+ namespace string
+ mcpGroupName string
+ mcpGroup *mcpv1alpha1.MCPGroup
+ server1 *mcpv1alpha1.MCPServer
+ server2 *mcpv1alpha1.MCPServer
+ )
+
+ BeforeAll(func() {
+ namespace = fmt.Sprintf("test-delete-server-%d", time.Now().Unix())
+ mcpGroupName = "test-group-delete"
+
+ // Create namespace
+ ns := &corev1.Namespace{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: namespace,
+ },
+ }
+ Expect(k8sClient.Create(ctx, ns)).Should(Succeed())
+
+ // Create MCPServers
+ server1 = &mcpv1alpha1.MCPServer{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "server1",
+ Namespace: namespace,
+ },
+ Spec: mcpv1alpha1.MCPServerSpec{
+ Image: "example/mcp-server:latest",
+ GroupRef: mcpGroupName,
+ },
+ }
+ Expect(k8sClient.Create(ctx, server1)).Should(Succeed())
+
+ server2 = &mcpv1alpha1.MCPServer{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "server2",
+ Namespace: namespace,
+ },
+ Spec: mcpv1alpha1.MCPServerSpec{
+ Image: "example/mcp-server:latest",
+ GroupRef: mcpGroupName,
+ },
+ }
+ Expect(k8sClient.Create(ctx, server2)).Should(Succeed())
+
+ // Update server statuses to Running
+ Eventually(func() error {
+ freshServer := &mcpv1alpha1.MCPServer{}
+ if err := k8sClient.Get(ctx, types.NamespacedName{Name: server1.Name, Namespace: namespace}, freshServer); err != nil {
+ return err
+ }
+ freshServer.Status.Phase = mcpv1alpha1.MCPServerPhaseRunning
+ return k8sClient.Status().Update(ctx, freshServer)
+ }, timeout, interval).Should(Succeed())
+
+ Eventually(func() error {
+ freshServer := &mcpv1alpha1.MCPServer{}
+ if err := k8sClient.Get(ctx, types.NamespacedName{Name: server2.Name, Namespace: namespace}, freshServer); err != nil {
+ return err
+ }
+ freshServer.Status.Phase = mcpv1alpha1.MCPServerPhaseRunning
+ return k8sClient.Status().Update(ctx, freshServer)
+ }, timeout, interval).Should(Succeed())
+
+ // Create MCPGroup
+ mcpGroup = &mcpv1alpha1.MCPGroup{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: mcpGroupName,
+ Namespace: namespace,
+ },
+ Spec: mcpv1alpha1.MCPGroupSpec{
+ Description: "Test group for server deletion",
+ },
+ }
+ Expect(k8sClient.Create(ctx, mcpGroup)).Should(Succeed())
+
+ // Wait for initial reconciliation with both servers
+ Eventually(func() bool {
+ updatedGroup := &mcpv1alpha1.MCPGroup{}
+ err := k8sClient.Get(ctx, types.NamespacedName{
+ Name: mcpGroupName,
+ Namespace: namespace,
+ }, updatedGroup)
+ return err == nil && updatedGroup.Status.ServerCount == 2
+ }, timeout, interval).Should(BeTrue())
+ })
+
+ AfterAll(func() {
+ // Clean up remaining resources
+ // server1 is deleted in the test, so only check if it still exists
+ if err := k8sClient.Get(ctx, types.NamespacedName{Name: server1.Name, Namespace: namespace}, server1); err == nil {
+ Expect(k8sClient.Delete(ctx, server1)).Should(Succeed())
+ }
+ Expect(k8sClient.Delete(ctx, server2)).Should(Succeed())
+ Expect(k8sClient.Delete(ctx, mcpGroup)).Should(Succeed())
+
+ // Delete namespace
+ ns := &corev1.Namespace{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: namespace,
+ },
+ }
+ Expect(k8sClient.Delete(ctx, ns)).Should(Succeed())
+ })
+
+ It("Should remain Ready after checking servers in namespace", func() {
+ // The MCPGroup should remain Ready because it can successfully list servers
+ // in the namespace. The MCPGroup phase is based on the ability to query
+ // servers, not on the state or count of servers.
+ updatedGroup := &mcpv1alpha1.MCPGroup{}
+ Expect(k8sClient.Get(ctx, types.NamespacedName{
+ Name: mcpGroupName,
+ Namespace: namespace,
+ }, updatedGroup)).Should(Succeed())
+
+ // The MCPGroup should be Ready with 2 servers
+ Expect(updatedGroup.Status.Phase).To(Equal(mcpv1alpha1.MCPGroupPhaseReady))
+ Expect(updatedGroup.Status.ServerCount).To(Equal(2))
+
+ // Trigger a reconciliation by updating the MCPGroup spec
+ Eventually(func() error {
+ freshGroup := &mcpv1alpha1.MCPGroup{}
+ if err := k8sClient.Get(ctx, types.NamespacedName{Name: mcpGroupName, Namespace: namespace}, freshGroup); err != nil {
+ return err
+ }
+ freshGroup.Spec.Description = "Test group for server deletion - updated"
+ return k8sClient.Update(ctx, freshGroup)
+ }, timeout, interval).Should(Succeed())
+
+ // After reconciliation, the MCPGroup should still be Ready
+ Eventually(func() mcpv1alpha1.MCPGroupPhase {
+ updatedGroup := &mcpv1alpha1.MCPGroup{}
+ if err := k8sClient.Get(ctx, types.NamespacedName{
+ Name: mcpGroupName,
+ Namespace: namespace,
+ }, updatedGroup); err != nil {
+ return ""
+ }
+ return updatedGroup.Status.Phase
+ }, timeout, interval).Should(Equal(mcpv1alpha1.MCPGroupPhaseReady))
+ })
+ })
+
+ Context("When an MCPServer changes state", Ordered, func() {
+ var (
+ namespace string
+ mcpGroupName string
+ mcpGroup *mcpv1alpha1.MCPGroup
+ server1 *mcpv1alpha1.MCPServer
+ server2 *mcpv1alpha1.MCPServer
+ )
+
+ BeforeAll(func() {
+ namespace = fmt.Sprintf("test-server-state-%d", time.Now().Unix())
+ mcpGroupName = "test-group-state"
+
+ // Create namespace
+ ns := &corev1.Namespace{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: namespace,
+ },
+ }
+ Expect(k8sClient.Create(ctx, ns)).Should(Succeed())
+
+ // Create MCPServers
+ server1 = &mcpv1alpha1.MCPServer{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "server1",
+ Namespace: namespace,
+ },
+ Spec: mcpv1alpha1.MCPServerSpec{
+ Image: "example/mcp-server:latest",
+ GroupRef: mcpGroupName,
+ },
+ }
+ Expect(k8sClient.Create(ctx, server1)).Should(Succeed())
+
+ server2 = &mcpv1alpha1.MCPServer{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "server2",
+ Namespace: namespace,
+ },
+ Spec: mcpv1alpha1.MCPServerSpec{
+ Image: "example/mcp-server:latest",
+ GroupRef: mcpGroupName,
+ },
+ }
+ Expect(k8sClient.Create(ctx, server2)).Should(Succeed())
+
+ // Update server statuses to Running
+ Eventually(func() error {
+ freshServer := &mcpv1alpha1.MCPServer{}
+ if err := k8sClient.Get(ctx, types.NamespacedName{Name: server1.Name, Namespace: namespace}, freshServer); err != nil {
+ return err
+ }
+ freshServer.Status.Phase = mcpv1alpha1.MCPServerPhaseRunning
+ return k8sClient.Status().Update(ctx, freshServer)
+ }, timeout, interval).Should(Succeed())
+
+ Eventually(func() error {
+ freshServer := &mcpv1alpha1.MCPServer{}
+ if err := k8sClient.Get(ctx, types.NamespacedName{Name: server2.Name, Namespace: namespace}, freshServer); err != nil {
+ return err
+ }
+ freshServer.Status.Phase = mcpv1alpha1.MCPServerPhaseRunning
+ return k8sClient.Status().Update(ctx, freshServer)
+ }, timeout, interval).Should(Succeed())
+
+ // Create MCPGroup
+ mcpGroup = &mcpv1alpha1.MCPGroup{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: mcpGroupName,
+ Namespace: namespace,
+ },
+ Spec: mcpv1alpha1.MCPGroupSpec{
+ Description: "Test group for state changes",
+ },
+ }
+ Expect(k8sClient.Create(ctx, mcpGroup)).Should(Succeed())
+
+ // Wait for initial reconciliation - the group should find the servers
+ Eventually(func() int {
+ updatedGroup := &mcpv1alpha1.MCPGroup{}
+ if err := k8sClient.Get(ctx, types.NamespacedName{
+ Name: mcpGroupName,
+ Namespace: namespace,
+ }, updatedGroup); err != nil {
+ return -1
+ }
+ return updatedGroup.Status.ServerCount
+ }, timeout, interval).Should(Equal(2))
+ })
+
+ AfterAll(func() {
+ // Clean up
+ Expect(k8sClient.Delete(ctx, server1)).Should(Succeed())
+ Expect(k8sClient.Delete(ctx, server2)).Should(Succeed())
+ Expect(k8sClient.Delete(ctx, mcpGroup)).Should(Succeed())
+
+ // Delete namespace
+ ns := &corev1.Namespace{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: namespace,
+ },
+ }
+ Expect(k8sClient.Delete(ctx, ns)).Should(Succeed())
+ })
+
+ It("Should remain Ready when reconciled after server status changes", func() {
+ // Update server1 status to Failed
+ Eventually(func() error {
+ freshServer := &mcpv1alpha1.MCPServer{}
+ if err := k8sClient.Get(ctx, types.NamespacedName{Name: server1.Name, Namespace: namespace}, freshServer); err != nil {
+ return err
+ }
+ freshServer.Status.Phase = mcpv1alpha1.MCPServerPhaseFailed
+ return k8sClient.Status().Update(ctx, freshServer)
+ }, timeout, interval).Should(Succeed())
+
+ // Status changes don't trigger MCPGroup reconciliation, so we need to trigger it
+ // by updating the MCPGroup spec (e.g., adding/updating description)
+ Eventually(func() error {
+ freshGroup := &mcpv1alpha1.MCPGroup{}
+ if err := k8sClient.Get(ctx, types.NamespacedName{Name: mcpGroupName, Namespace: namespace}, freshGroup); err != nil {
+ return err
+ }
+ freshGroup.Spec.Description = "Test group for state changes - updated"
+ return k8sClient.Update(ctx, freshGroup)
+ }, timeout, interval).Should(Succeed())
+
+ // The MCPGroup should still be Ready because it doesn't check individual server phases
+ // (it only checks if servers exist). This reflects the simplified controller logic.
+ Eventually(func() mcpv1alpha1.MCPGroupPhase {
+ updatedGroup := &mcpv1alpha1.MCPGroup{}
+ if err := k8sClient.Get(ctx, types.NamespacedName{
+ Name: mcpGroupName,
+ Namespace: namespace,
+ }, updatedGroup); err != nil {
+ return ""
+ }
+ return updatedGroup.Status.Phase
+ }, timeout, interval).Should(Equal(mcpv1alpha1.MCPGroupPhaseReady))
+
+ // Verify both servers are still counted
+ updatedGroup := &mcpv1alpha1.MCPGroup{}
+ Expect(k8sClient.Get(ctx, types.NamespacedName{
+ Name: mcpGroupName,
+ Namespace: namespace,
+ }, updatedGroup)).Should(Succeed())
+ Expect(updatedGroup.Status.ServerCount).To(Equal(2))
+ })
+ })
+
+ Context("When testing namespace isolation", Ordered, func() {
+ var (
+ namespaceA string
+ namespaceB string
+ mcpGroupName string
+ mcpGroupA *mcpv1alpha1.MCPGroup
+ serverA *mcpv1alpha1.MCPServer
+ serverB *mcpv1alpha1.MCPServer
+ )
+
+ BeforeAll(func() {
+ namespaceA = fmt.Sprintf("test-ns-a-%d", time.Now().Unix())
+ namespaceB = fmt.Sprintf("test-ns-b-%d", time.Now().Unix())
+ mcpGroupName = "test-group"
+
+ // Create namespaces
+ nsA := &corev1.Namespace{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: namespaceA,
+ },
+ }
+ Expect(k8sClient.Create(ctx, nsA)).Should(Succeed())
+
+ nsB := &corev1.Namespace{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: namespaceB,
+ },
+ }
+ Expect(k8sClient.Create(ctx, nsB)).Should(Succeed())
+
+ // Create server in namespace A
+ serverA = &mcpv1alpha1.MCPServer{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "server-a",
+ Namespace: namespaceA,
+ },
+ Spec: mcpv1alpha1.MCPServerSpec{
+ Image: "example/mcp-server:latest",
+ GroupRef: mcpGroupName,
+ },
+ }
+ Expect(k8sClient.Create(ctx, serverA)).Should(Succeed())
+
+ // Create server in namespace B with same group name
+ serverB = &mcpv1alpha1.MCPServer{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: "server-b",
+ Namespace: namespaceB,
+ },
+ Spec: mcpv1alpha1.MCPServerSpec{
+ Image: "example/mcp-server:latest",
+ GroupRef: mcpGroupName, // Same group name, different namespace
+ },
+ }
+ Expect(k8sClient.Create(ctx, serverB)).Should(Succeed())
+
+ // Update server statuses
+ Eventually(func() error {
+ freshServer := &mcpv1alpha1.MCPServer{}
+ if err := k8sClient.Get(ctx, types.NamespacedName{Name: serverA.Name, Namespace: namespaceA}, freshServer); err != nil {
+ return err
+ }
+ freshServer.Status.Phase = mcpv1alpha1.MCPServerPhaseRunning
+ return k8sClient.Status().Update(ctx, freshServer)
+ }, timeout, interval).Should(Succeed())
+
+ Eventually(func() error {
+ freshServer := &mcpv1alpha1.MCPServer{}
+ if err := k8sClient.Get(ctx, types.NamespacedName{Name: serverB.Name, Namespace: namespaceB}, freshServer); err != nil {
+ return err
+ }
+ freshServer.Status.Phase = mcpv1alpha1.MCPServerPhaseRunning
+ return k8sClient.Status().Update(ctx, freshServer)
+ }, timeout, interval).Should(Succeed())
+
+ // Create MCPGroup in namespace A
+ mcpGroupA = &mcpv1alpha1.MCPGroup{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: mcpGroupName,
+ Namespace: namespaceA,
+ },
+ Spec: mcpv1alpha1.MCPGroupSpec{
+ Description: "Test group in namespace A",
+ },
+ }
+ Expect(k8sClient.Create(ctx, mcpGroupA)).Should(Succeed())
+ })
+
+ AfterAll(func() {
+ // Clean up
+ Expect(k8sClient.Delete(ctx, serverA)).Should(Succeed())
+ Expect(k8sClient.Delete(ctx, serverB)).Should(Succeed())
+ Expect(k8sClient.Delete(ctx, mcpGroupA)).Should(Succeed())
+
+ // Delete namespaces
+ nsA := &corev1.Namespace{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: namespaceA,
+ },
+ }
+ Expect(k8sClient.Delete(ctx, nsA)).Should(Succeed())
+
+ nsB := &corev1.Namespace{
+ ObjectMeta: metav1.ObjectMeta{
+ Name: namespaceB,
+ },
+ }
+ Expect(k8sClient.Delete(ctx, nsB)).Should(Succeed())
+ })
+
+ It("Should only include servers from the same namespace", func() {
+ // Wait for reconciliation
+ Eventually(func() bool {
+ updatedGroup := &mcpv1alpha1.MCPGroup{}
+ err := k8sClient.Get(ctx, types.NamespacedName{
+ Name: mcpGroupName,
+ Namespace: namespaceA,
+ }, updatedGroup)
+ return err == nil && updatedGroup.Status.ServerCount > 0
+ }, timeout, interval).Should(BeTrue())
+
+ // Verify only server-a is in the group
+ updatedGroup := &mcpv1alpha1.MCPGroup{}
+ Expect(k8sClient.Get(ctx, types.NamespacedName{
+ Name: mcpGroupName,
+ Namespace: namespaceA,
+ }, updatedGroup)).Should(Succeed())
+
+ Expect(updatedGroup.Status.ServerCount).To(Equal(1))
+ Expect(updatedGroup.Status.Servers).To(ContainElement("server-a"))
+ Expect(updatedGroup.Status.Servers).NotTo(ContainElement("server-b"))
+ })
+ })
+})
diff --git a/cmd/thv-operator/test-integration/mcp-group/suite_test.go b/cmd/thv-operator/test-integration/mcp-group/suite_test.go
new file mode 100644
index 000000000..db8d42f53
--- /dev/null
+++ b/cmd/thv-operator/test-integration/mcp-group/suite_test.go
@@ -0,0 +1,135 @@
+package operator_test
+
+import (
+ "context"
+ "path/filepath"
+ "testing"
+ "time"
+
+ . "github.com/onsi/ginkgo/v2"
+ . "github.com/onsi/gomega"
+ "go.uber.org/zap/zapcore"
+ appsv1 "k8s.io/api/apps/v1"
+ corev1 "k8s.io/api/core/v1"
+ rbacv1 "k8s.io/api/rbac/v1"
+ "k8s.io/client-go/kubernetes/scheme"
+ "k8s.io/client-go/rest"
+ ctrl "sigs.k8s.io/controller-runtime"
+ "sigs.k8s.io/controller-runtime/pkg/client"
+ "sigs.k8s.io/controller-runtime/pkg/envtest"
+ logf "sigs.k8s.io/controller-runtime/pkg/log"
+ "sigs.k8s.io/controller-runtime/pkg/log/zap"
+ metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server"
+
+ mcpv1alpha1 "github.com/stacklok/toolhive/cmd/thv-operator/api/v1alpha1"
+ "github.com/stacklok/toolhive/cmd/thv-operator/controllers"
+)
+
+// These tests use Ginkgo (BDD-style Go testing framework). Refer to
+// http://onsi.github.io/ginkgo/ to learn more about Ginkgo.
+
+var (
+ cfg *rest.Config
+ k8sClient client.Client
+ testEnv *envtest.Environment
+ ctx context.Context
+ cancel context.CancelFunc
+)
+
+func TestControllers(t *testing.T) {
+ t.Parallel()
+ RegisterFailHandler(Fail)
+ RunSpecs(t, "MCPGroup Controller Integration Suite")
+}
+
+var _ = BeforeSuite(func() {
+ logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true), zap.Level(zapcore.DebugLevel)))
+
+ ctx, cancel = context.WithCancel(context.TODO())
+
+ By("bootstrapping test environment")
+ testEnv = &envtest.Environment{
+ CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "..", "deploy", "charts", "operator-crds", "crds")},
+ ErrorIfCRDPathMissing: true,
+ }
+
+ var err error
+ // cfg is defined in this file globally.
+ cfg, err = testEnv.Start()
+ Expect(err).NotTo(HaveOccurred())
+ Expect(cfg).NotTo(BeNil())
+
+ err = mcpv1alpha1.AddToScheme(scheme.Scheme)
+ Expect(err).NotTo(HaveOccurred())
+
+ // Add other schemes that the controllers use
+ err = appsv1.AddToScheme(scheme.Scheme)
+ Expect(err).NotTo(HaveOccurred())
+
+ err = corev1.AddToScheme(scheme.Scheme)
+ Expect(err).NotTo(HaveOccurred())
+
+ err = rbacv1.AddToScheme(scheme.Scheme)
+ Expect(err).NotTo(HaveOccurred())
+
+ //+kubebuilder:scaffold:scheme
+
+ k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme})
+ Expect(err).NotTo(HaveOccurred())
+ Expect(k8sClient).NotTo(BeNil())
+
+ // Start the controller manager
+ k8sManager, err := ctrl.NewManager(cfg, ctrl.Options{
+ Scheme: scheme.Scheme,
+ Metrics: metricsserver.Options{
+ BindAddress: "0", // Disable metrics server for tests to avoid port conflicts
+ },
+ HealthProbeBindAddress: "0", // Disable health probe for tests
+ })
+ Expect(err).ToNot(HaveOccurred())
+
+ // Set up field indexing for MCPServer.Spec.GroupRef
+ err = k8sManager.GetFieldIndexer().IndexField(
+ context.Background(),
+ &mcpv1alpha1.MCPServer{},
+ "spec.groupRef",
+ func(obj client.Object) []string {
+ mcpServer := obj.(*mcpv1alpha1.MCPServer)
+ if mcpServer.Spec.GroupRef == "" {
+ return nil
+ }
+ return []string{mcpServer.Spec.GroupRef}
+ },
+ )
+ Expect(err).ToNot(HaveOccurred())
+
+ // Register the MCPGroup controller
+ err = (&controllers.MCPGroupReconciler{
+ Client: k8sManager.GetClient(),
+ }).SetupWithManager(k8sManager)
+ Expect(err).ToNot(HaveOccurred())
+
+ // Register the MCPServer controller (needed for watch tests)
+ err = (&controllers.MCPServerReconciler{
+ Client: k8sManager.GetClient(),
+ Scheme: k8sManager.GetScheme(),
+ }).SetupWithManager(k8sManager)
+ Expect(err).ToNot(HaveOccurred())
+
+ // Start the manager in a goroutine
+ go func() {
+ defer GinkgoRecover()
+ err = k8sManager.Start(ctx)
+ Expect(err).ToNot(HaveOccurred(), "failed to run manager")
+ }()
+
+})
+
+var _ = AfterSuite(func() {
+ By("tearing down the test environment")
+ cancel()
+ // Give it some time to shut down gracefully
+ time.Sleep(100 * time.Millisecond)
+ err := testEnv.Stop()
+ Expect(err).NotTo(HaveOccurred())
+})
diff --git a/deploy/charts/operator-crds/Chart.yaml b/deploy/charts/operator-crds/Chart.yaml
index 255f4a91e..78fe9643a 100644
--- a/deploy/charts/operator-crds/Chart.yaml
+++ b/deploy/charts/operator-crds/Chart.yaml
@@ -2,5 +2,5 @@ apiVersion: v2
name: toolhive-operator-crds
description: A Helm chart for installing the ToolHive Operator CRDs into Kubernetes.
type: application
-version: 0.0.36
+version: 0.0.37
appVersion: "0.0.1"
diff --git a/deploy/charts/operator-crds/crds/toolhive.stacklok.dev_mcpgroups.yaml b/deploy/charts/operator-crds/crds/toolhive.stacklok.dev_mcpgroups.yaml
new file mode 100644
index 000000000..28257e381
--- /dev/null
+++ b/deploy/charts/operator-crds/crds/toolhive.stacklok.dev_mcpgroups.yaml
@@ -0,0 +1,134 @@
+---
+apiVersion: apiextensions.k8s.io/v1
+kind: CustomResourceDefinition
+metadata:
+ annotations:
+ controller-gen.kubebuilder.io/version: v0.17.3
+ name: mcpgroups.toolhive.stacklok.dev
+spec:
+ group: toolhive.stacklok.dev
+ names:
+ kind: MCPGroup
+ listKind: MCPGroupList
+ plural: mcpgroups
+ shortNames:
+ - mcpg
+ - mcpgroup
+ singular: mcpgroup
+ scope: Namespaced
+ versions:
+ - additionalPrinterColumns:
+ - jsonPath: .status.conditions[?(@.type=='MCPServersChecked')].status
+ name: Ready
+ type: string
+ name: v1alpha1
+ schema:
+ openAPIV3Schema:
+ description: MCPGroup is the Schema for the mcpgroups API
+ properties:
+ apiVersion:
+ description: |-
+ APIVersion defines the versioned schema of this representation of an object.
+ Servers should convert recognized schemas to the latest internal value, and
+ may reject unrecognized values.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources
+ type: string
+ kind:
+ description: |-
+ Kind is a string value representing the REST resource this object represents.
+ Servers may infer this from the endpoint the client submits requests to.
+ Cannot be updated.
+ In CamelCase.
+ More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
+ type: string
+ metadata:
+ type: object
+ spec:
+ description: MCPGroupSpec defines the desired state of MCPGroup
+ properties:
+ description:
+ description: Description provides human-readable context
+ type: string
+ type: object
+ status:
+ description: MCPGroupStatus defines observed state
+ properties:
+ conditions:
+ description: Conditions represent observations
+ items:
+ description: Condition contains details for one aspect of the current
+ state of this API Resource.
+ properties:
+ lastTransitionTime:
+ description: |-
+ lastTransitionTime is the last time the condition transitioned from one status to another.
+ This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable.
+ format: date-time
+ type: string
+ message:
+ description: |-
+ message is a human readable message indicating details about the transition.
+ This may be an empty string.
+ maxLength: 32768
+ type: string
+ observedGeneration:
+ description: |-
+ observedGeneration represents the .metadata.generation that the condition was set based upon.
+ For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date
+ with respect to the current state of the instance.
+ format: int64
+ minimum: 0
+ type: integer
+ reason:
+ description: |-
+ reason contains a programmatic identifier indicating the reason for the condition's last transition.
+ Producers of specific condition types may define expected values and meanings for this field,
+ and whether the values are considered a guaranteed API.
+ The value should be a CamelCase string.
+ This field may not be empty.
+ maxLength: 1024
+ minLength: 1
+ pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$
+ type: string
+ status:
+ description: status of the condition, one of True, False, Unknown.
+ enum:
+ - "True"
+ - "False"
+ - Unknown
+ type: string
+ type:
+ description: type of condition in CamelCase or in foo.example.com/CamelCase.
+ maxLength: 316
+ pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$
+ type: string
+ required:
+ - lastTransitionTime
+ - message
+ - reason
+ - status
+ - type
+ type: object
+ type: array
+ phase:
+ default: Pending
+ description: Phase indicates current state
+ enum:
+ - Ready
+ - Pending
+ - Failed
+ type: string
+ serverCount:
+ description: ServerCount is the number of servers
+ type: integer
+ servers:
+ description: Servers lists server names in this group
+ items:
+ type: string
+ type: array
+ type: object
+ type: object
+ served: true
+ storage: true
+ subresources:
+ status: {}
diff --git a/deploy/charts/operator-crds/crds/toolhive.stacklok.dev_mcpservers.yaml b/deploy/charts/operator-crds/crds/toolhive.stacklok.dev_mcpservers.yaml
index 44808fcab..2eb9089b3 100644
--- a/deploy/charts/operator-crds/crds/toolhive.stacklok.dev_mcpservers.yaml
+++ b/deploy/charts/operator-crds/crds/toolhive.stacklok.dev_mcpservers.yaml
@@ -142,6 +142,11 @@ spec:
required:
- name
type: object
+ groupRef:
+ description: |-
+ GroupRef is the name of the MCPGroup this server belongs to
+ Must reference an existing MCPGroup in the same namespace
+ type: string
image:
description: Image is the container image for the MCP server
type: string
diff --git a/deploy/charts/operator/Chart.yaml b/deploy/charts/operator/Chart.yaml
index d80c2330a..bc9c283da 100644
--- a/deploy/charts/operator/Chart.yaml
+++ b/deploy/charts/operator/Chart.yaml
@@ -2,5 +2,5 @@ apiVersion: v2
name: toolhive-operator
description: A Helm chart for deploying the ToolHive Operator into Kubernetes.
type: application
-version: 0.2.23
+version: 0.2.25
appVersion: "0.3.7"
diff --git a/deploy/charts/operator/templates/clusterrole/role.yaml b/deploy/charts/operator/templates/clusterrole/role.yaml
index 5685e52d1..3cdbae899 100644
--- a/deploy/charts/operator/templates/clusterrole/role.yaml
+++ b/deploy/charts/operator/templates/clusterrole/role.yaml
@@ -101,6 +101,7 @@ rules:
- toolhive.stacklok.dev
resources:
- mcpexternalauthconfigs
+ - mcpgroups
- mcpregistries
- mcpservers
- mcptoolconfigs
@@ -116,6 +117,7 @@ rules:
- toolhive.stacklok.dev
resources:
- mcpexternalauthconfigs/finalizers
+ - mcpgroups/finalizers
- mcpregistries/finalizers
- mcpservers/finalizers
- mcptoolconfigs/finalizers
@@ -125,6 +127,7 @@ rules:
- toolhive.stacklok.dev
resources:
- mcpexternalauthconfigs/status
+ - mcpgroups/status
- mcpregistries/status
- mcpservers/status
- mcptoolconfigs/status
diff --git a/docs/operator/crd-api.md b/docs/operator/crd-api.md
index b824fc196..f911ab18d 100644
--- a/docs/operator/crd-api.md
+++ b/docs/operator/crd-api.md
@@ -11,6 +11,8 @@ Package v1alpha1 contains API Schema definitions for the toolhive v1alpha1 API g
### Resource Types
- [MCPExternalAuthConfig](#mcpexternalauthconfig)
- [MCPExternalAuthConfigList](#mcpexternalauthconfiglist)
+- [MCPGroup](#mcpgroup)
+- [MCPGroupList](#mcpgrouplist)
- [MCPRegistry](#mcpregistry)
- [MCPRegistryList](#mcpregistrylist)
- [MCPRemoteProxy](#mcpremoteproxy)
@@ -350,6 +352,102 @@ _Appears in:_
| `referencingServers` _string array_ | ReferencingServers is a list of MCPServer resources that reference this MCPExternalAuthConfig
This helps track which servers need to be reconciled when this config changes | | |
+#### MCPGroup
+
+
+
+MCPGroup is the Schema for the mcpgroups API
+
+
+
+_Appears in:_
+- [MCPGroupList](#mcpgrouplist)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `apiVersion` _string_ | `toolhive.stacklok.dev/v1alpha1` | | |
+| `kind` _string_ | `MCPGroup` | | |
+| `kind` _string_ | Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | |
+| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | |
+| `metadata` _[ObjectMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#objectmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
+| `spec` _[MCPGroupSpec](#mcpgroupspec)_ | | | |
+| `status` _[MCPGroupStatus](#mcpgroupstatus)_ | | | |
+
+
+#### MCPGroupList
+
+
+
+MCPGroupList contains a list of MCPGroup
+
+
+
+
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `apiVersion` _string_ | `toolhive.stacklok.dev/v1alpha1` | | |
+| `kind` _string_ | `MCPGroupList` | | |
+| `kind` _string_ | Kind is a string value representing the REST resource this object represents.
Servers may infer this from the endpoint the client submits requests to.
Cannot be updated.
In CamelCase.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds | | |
+| `apiVersion` _string_ | APIVersion defines the versioned schema of this representation of an object.
Servers should convert recognized schemas to the latest internal value, and
may reject unrecognized values.
More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources | | |
+| `metadata` _[ListMeta](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#listmeta-v1-meta)_ | Refer to Kubernetes API documentation for fields of `metadata`. | | |
+| `items` _[MCPGroup](#mcpgroup) array_ | | | |
+
+
+#### MCPGroupPhase
+
+_Underlying type:_ _string_
+
+MCPGroupPhase represents the lifecycle phase of an MCPGroup
+
+_Validation:_
+- Enum: [Ready Pending Failed]
+
+_Appears in:_
+- [MCPGroupStatus](#mcpgroupstatus)
+
+| Field | Description |
+| --- | --- |
+| `Ready` | MCPGroupPhaseReady indicates the MCPGroup is ready
|
+| `Pending` | MCPGroupPhasePending indicates the MCPGroup is pending
|
+| `Failed` | MCPGroupPhaseFailed indicates the MCPGroup has failed
|
+
+
+#### MCPGroupSpec
+
+
+
+MCPGroupSpec defines the desired state of MCPGroup
+
+
+
+_Appears in:_
+- [MCPGroup](#mcpgroup)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `description` _string_ | Description provides human-readable context | | |
+
+
+#### MCPGroupStatus
+
+
+
+MCPGroupStatus defines observed state
+
+
+
+_Appears in:_
+- [MCPGroup](#mcpgroup)
+
+| Field | Description | Default | Validation |
+| --- | --- | --- | --- |
+| `phase` _[MCPGroupPhase](#mcpgroupphase)_ | Phase indicates current state | Pending | Enum: [Ready Pending Failed]
|
+| `servers` _string array_ | Servers lists server names in this group | | |
+| `serverCount` _integer_ | ServerCount is the number of servers | | |
+| `conditions` _[Condition](https://kubernetes.io/docs/reference/generated/kubernetes-api/v1.27/#condition-v1-meta) array_ | Conditions represent observations | | |
+
+
#### MCPRegistry
@@ -685,6 +783,7 @@ _Appears in:_
| `externalAuthConfigRef` _[ExternalAuthConfigRef](#externalauthconfigref)_ | ExternalAuthConfigRef references a MCPExternalAuthConfig resource for external authentication.
The referenced MCPExternalAuthConfig must exist in the same namespace as this MCPServer. | | |
| `telemetry` _[TelemetryConfig](#telemetryconfig)_ | Telemetry defines observability configuration for the MCP server | | |
| `trustProxyHeaders` _boolean_ | TrustProxyHeaders indicates whether to trust X-Forwarded-* headers from reverse proxies
When enabled, the proxy will use X-Forwarded-Proto, X-Forwarded-Host, X-Forwarded-Port,
and X-Forwarded-Prefix headers to construct endpoint URLs | false | |
+| `groupRef` _string_ | GroupRef is the name of the MCPGroup this server belongs to
Must reference an existing MCPGroup in the same namespace | | |
#### MCPServerStatus
diff --git a/pkg/container/docker/sdk/client_windows.go b/pkg/container/docker/sdk/client_windows.go
index 24bac7abc..63d271076 100644
--- a/pkg/container/docker/sdk/client_windows.go
+++ b/pkg/container/docker/sdk/client_windows.go
@@ -13,6 +13,7 @@ import (
"github.com/Microsoft/go-winio"
"github.com/docker/docker/client"
+
"github.com/stacklok/toolhive/pkg/container/runtime"
"github.com/stacklok/toolhive/pkg/logger"
)
diff --git a/test/e2e/chainsaw/operator/multi-tenancy/setup/assert-rbac-clusterrole.yaml b/test/e2e/chainsaw/operator/multi-tenancy/setup/assert-rbac-clusterrole.yaml
index 5685e52d1..3cdbae899 100644
--- a/test/e2e/chainsaw/operator/multi-tenancy/setup/assert-rbac-clusterrole.yaml
+++ b/test/e2e/chainsaw/operator/multi-tenancy/setup/assert-rbac-clusterrole.yaml
@@ -101,6 +101,7 @@ rules:
- toolhive.stacklok.dev
resources:
- mcpexternalauthconfigs
+ - mcpgroups
- mcpregistries
- mcpservers
- mcptoolconfigs
@@ -116,6 +117,7 @@ rules:
- toolhive.stacklok.dev
resources:
- mcpexternalauthconfigs/finalizers
+ - mcpgroups/finalizers
- mcpregistries/finalizers
- mcpservers/finalizers
- mcptoolconfigs/finalizers
@@ -125,6 +127,7 @@ rules:
- toolhive.stacklok.dev
resources:
- mcpexternalauthconfigs/status
+ - mcpgroups/status
- mcpregistries/status
- mcpservers/status
- mcptoolconfigs/status
diff --git a/test/e2e/chainsaw/operator/single-tenancy/setup/assert-rbac-clusterrole.yaml b/test/e2e/chainsaw/operator/single-tenancy/setup/assert-rbac-clusterrole.yaml
index 5685e52d1..3cdbae899 100644
--- a/test/e2e/chainsaw/operator/single-tenancy/setup/assert-rbac-clusterrole.yaml
+++ b/test/e2e/chainsaw/operator/single-tenancy/setup/assert-rbac-clusterrole.yaml
@@ -101,6 +101,7 @@ rules:
- toolhive.stacklok.dev
resources:
- mcpexternalauthconfigs
+ - mcpgroups
- mcpregistries
- mcpservers
- mcptoolconfigs
@@ -116,6 +117,7 @@ rules:
- toolhive.stacklok.dev
resources:
- mcpexternalauthconfigs/finalizers
+ - mcpgroups/finalizers
- mcpregistries/finalizers
- mcpservers/finalizers
- mcptoolconfigs/finalizers
@@ -125,6 +127,7 @@ rules:
- toolhive.stacklok.dev
resources:
- mcpexternalauthconfigs/status
+ - mcpgroups/status
- mcpregistries/status
- mcpservers/status
- mcptoolconfigs/status
diff --git a/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/invalid-groupref/assert-mcpserver-groupref-not-validated.yaml b/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/invalid-groupref/assert-mcpserver-groupref-not-validated.yaml
new file mode 100644
index 000000000..c86cd4532
--- /dev/null
+++ b/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/invalid-groupref/assert-mcpserver-groupref-not-validated.yaml
@@ -0,0 +1,8 @@
+apiVersion: toolhive.stacklok.dev/v1alpha1
+kind: MCPServer
+metadata:
+ name: test-server
+ namespace: toolhive-system
+status:
+ (conditions[?type == 'GroupRefValidated'] | [0].status): "False"
+ (conditions[?type == 'GroupRefValidated'] | [0].reason): "GroupRefNotFound"
diff --git a/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/invalid-groupref/chainsaw-test.yaml b/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/invalid-groupref/chainsaw-test.yaml
new file mode 100644
index 000000000..b18bdb645
--- /dev/null
+++ b/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/invalid-groupref/chainsaw-test.yaml
@@ -0,0 +1,28 @@
+apiVersion: chainsaw.kyverno.io/v1alpha1
+kind: Test
+metadata:
+ name: invalid-groupref-test
+spec:
+ description: Test MCPServer with GroupRef pointing to non-existent group
+ timeouts:
+ apply: 30s
+ assert: 30s
+ steps:
+ - name: verify-operator
+ description: Ensure operator is ready before testing
+ try:
+ - assert:
+ file: ../../../setup/assert-operator-ready.yaml
+ - name: Create MCPServer with non-existent GroupRef
+ try:
+ - apply:
+ file: mcpserver.yaml
+ - assert:
+ file: assert-mcpserver-groupref-not-validated.yaml
+ - name: Cleanup
+ try:
+ - delete:
+ ref:
+ apiVersion: toolhive.stacklok.dev/v1alpha1
+ kind: MCPServer
+ name: test-server
diff --git a/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/invalid-groupref/mcpserver.yaml b/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/invalid-groupref/mcpserver.yaml
new file mode 100644
index 000000000..a012ccaf5
--- /dev/null
+++ b/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/invalid-groupref/mcpserver.yaml
@@ -0,0 +1,9 @@
+apiVersion: toolhive.stacklok.dev/v1alpha1
+kind: MCPServer
+metadata:
+ name: test-server
+ namespace: toolhive-system
+spec:
+ image: ghcr.io/modelcontextprotocol/servers/filesystem:0.6.2
+ transport: stdio
+ groupRef: non-existent-group
diff --git a/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/lifecycle/assert-mcpgroup-empty.yaml b/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/lifecycle/assert-mcpgroup-empty.yaml
new file mode 100644
index 000000000..2496539c8
--- /dev/null
+++ b/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/lifecycle/assert-mcpgroup-empty.yaml
@@ -0,0 +1,9 @@
+apiVersion: toolhive.stacklok.dev/v1alpha1
+kind: MCPGroup
+metadata:
+ name: mcpgroup-test-group-1
+ namespace: toolhive-system
+status:
+ phase: Ready
+ serverCount: 0
+ servers: []
diff --git a/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/lifecycle/assert-mcpgroup-with-server.yaml b/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/lifecycle/assert-mcpgroup-with-server.yaml
new file mode 100644
index 000000000..53c224afc
--- /dev/null
+++ b/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/lifecycle/assert-mcpgroup-with-server.yaml
@@ -0,0 +1,9 @@
+apiVersion: toolhive.stacklok.dev/v1alpha1
+kind: MCPGroup
+metadata:
+ name: mcpgroup-test-group-1
+ namespace: toolhive-system
+status:
+ phase: Ready
+ serverCount: 1
+ servers: ["test-server-with-groupref-1"]
diff --git a/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/lifecycle/chainsaw-test.yaml b/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/lifecycle/chainsaw-test.yaml
new file mode 100644
index 000000000..cd82ff43f
--- /dev/null
+++ b/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/lifecycle/chainsaw-test.yaml
@@ -0,0 +1,35 @@
+apiVersion: chainsaw.kyverno.io/v1alpha1
+kind: Test
+metadata:
+ name: mcpgroup-lifecycle-test
+spec:
+ description: Test basic MCPGroup creation, update, and status
+ timeouts:
+ apply: 30s
+ assert: 30s
+ cleanup: 30s
+ exec: 300s
+ steps:
+ - name: verify-operator
+ description: Ensure operator is ready before testing
+ try:
+ - assert:
+ file: ../../../setup/assert-operator-ready.yaml
+ - name: create-mcpgroup
+ description: Create an MCPGroup and verify it's created
+ try:
+ - apply:
+ file: mcpgroup.yaml
+ - assert:
+ file: mcpgroup.yaml
+ - assert:
+ file: assert-mcpgroup-empty.yaml
+ - name: add-mcpserver
+ description: Create an MCPServer that references the MCPGroup and verify status update
+ try:
+ - apply:
+ file: mcpserver-1.yaml
+ - assert:
+ file: mcpserver-1.yaml
+ - assert:
+ file: assert-mcpgroup-with-server.yaml
diff --git a/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/lifecycle/mcpgroup.yaml b/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/lifecycle/mcpgroup.yaml
new file mode 100644
index 000000000..0b1ed39bb
--- /dev/null
+++ b/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/lifecycle/mcpgroup.yaml
@@ -0,0 +1,7 @@
+apiVersion: toolhive.stacklok.dev/v1alpha1
+kind: MCPGroup
+metadata:
+ name: mcpgroup-test-group-1
+ namespace: toolhive-system
+spec:
+ description: "Test group for basic lifecycle testing"
diff --git a/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/lifecycle/mcpserver-1.yaml b/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/lifecycle/mcpserver-1.yaml
new file mode 100644
index 000000000..4f611d2b9
--- /dev/null
+++ b/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/lifecycle/mcpserver-1.yaml
@@ -0,0 +1,20 @@
+apiVersion: toolhive.stacklok.dev/v1alpha1
+kind: MCPServer
+metadata:
+ name: test-server-with-groupref-1
+ namespace: toolhive-system
+spec:
+ image: ghcr.io/stackloklabs/yardstick/yardstick-server:0.0.2
+ transport: stdio
+ groupRef: mcpgroup-test-group-1
+ env:
+ - name: TRANSPORT
+ value: stdio
+ port: 8080
+ resources:
+ limits:
+ cpu: "100m"
+ memory: "128Mi"
+ requests:
+ cpu: "50m"
+ memory: "64Mi"
diff --git a/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/valid-groupref/assert-mcpgroup-ready.yaml b/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/valid-groupref/assert-mcpgroup-ready.yaml
new file mode 100644
index 000000000..dba76268d
--- /dev/null
+++ b/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/valid-groupref/assert-mcpgroup-ready.yaml
@@ -0,0 +1,7 @@
+apiVersion: toolhive.stacklok.dev/v1alpha1
+kind: MCPGroup
+metadata:
+ name: test-group
+ namespace: toolhive-system
+status:
+ phase: Ready
diff --git a/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/valid-groupref/assert-mcpserver-groupref-validated.yaml b/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/valid-groupref/assert-mcpserver-groupref-validated.yaml
new file mode 100644
index 000000000..6a0d2ffe1
--- /dev/null
+++ b/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/valid-groupref/assert-mcpserver-groupref-validated.yaml
@@ -0,0 +1,7 @@
+apiVersion: toolhive.stacklok.dev/v1alpha1
+kind: MCPServer
+metadata:
+ name: test-server
+ namespace: toolhive-system
+status:
+ (conditions[?type == 'GroupRefValidated'] | [0].status): "True"
diff --git a/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/valid-groupref/chainsaw-test.yaml b/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/valid-groupref/chainsaw-test.yaml
new file mode 100644
index 000000000..a19aa1631
--- /dev/null
+++ b/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/valid-groupref/chainsaw-test.yaml
@@ -0,0 +1,39 @@
+apiVersion: chainsaw.kyverno.io/v1alpha1
+kind: Test
+metadata:
+ name: valid-groupref-test
+spec:
+ description: Test MCPServer with valid GroupRef
+ timeouts:
+ apply: 30s
+ assert: 30s
+ steps:
+ - name: verify-operator
+ description: Ensure operator is ready before testing
+ try:
+ - assert:
+ file: ../../../setup/assert-operator-ready.yaml
+ - name: Create MCPGroup
+ try:
+ - apply:
+ file: mcpgroup.yaml
+ - assert:
+ file: assert-mcpgroup-ready.yaml
+ - name: Create MCPServer with GroupRef
+ try:
+ - apply:
+ file: mcpserver.yaml
+ - assert:
+ file: assert-mcpserver-groupref-validated.yaml
+ - name: Cleanup
+ try:
+ - delete:
+ ref:
+ apiVersion: toolhive.stacklok.dev/v1alpha1
+ kind: MCPServer
+ name: test-server
+ - delete:
+ ref:
+ apiVersion: toolhive.stacklok.dev/v1alpha1
+ kind: MCPGroup
+ name: test-group
diff --git a/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/valid-groupref/mcpgroup.yaml b/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/valid-groupref/mcpgroup.yaml
new file mode 100644
index 000000000..10a282cff
--- /dev/null
+++ b/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/valid-groupref/mcpgroup.yaml
@@ -0,0 +1,7 @@
+apiVersion: toolhive.stacklok.dev/v1alpha1
+kind: MCPGroup
+metadata:
+ name: test-group
+ namespace: toolhive-system
+spec:
+ description: "Test group for GroupRef validation"
diff --git a/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/valid-groupref/mcpserver.yaml b/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/valid-groupref/mcpserver.yaml
new file mode 100644
index 000000000..9cf38e36f
--- /dev/null
+++ b/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/valid-groupref/mcpserver.yaml
@@ -0,0 +1,9 @@
+apiVersion: toolhive.stacklok.dev/v1alpha1
+kind: MCPServer
+metadata:
+ name: test-server
+ namespace: toolhive-system
+spec:
+ image: ghcr.io/modelcontextprotocol/servers/filesystem:0.6.2
+ transport: stdio
+ groupRef: test-group