From 9ad4e9e29ec606791485381c65a84da724e6c79c Mon Sep 17 00:00:00 2001 From: Trey Date: Wed, 15 Oct 2025 09:52:30 -0700 Subject: [PATCH] MCPGroup types and controller Implements MCPGroup types and k8s controller per [this proposal](https://github.com/stacklok/toolhive/blob/73707af47db039f51b2bda07d1ef8eccb25ba819/docs/proposals/kubernetes-mcpgroup-crd.md). The MCP group controller reconciles MCPGroup resources and watches for changes to MCPServer resources. When the latter happens, it requests reconciliation of the server's referenced MCPGroup. The MCP server controller checks the groupRef during reconciliation and sets status conditions accordingly (no fatal error if the groupRef is invalid). Generated code and manifests. ```console toolhive % task operator-generate toolhive % task task operator-manifests ``` Added unit, integration, and e2e (chainsaw) tests. ```console toolhive % task operator-test toolhive % task operator-test-integration toolhive % task operator-e2e-test ``` --- cmd/thv-operator/Taskfile.yml | 2 +- .../api/v1alpha1/mcpgroup_types.go | 86 +++ .../api/v1alpha1/mcpserver_types.go | 19 + .../api/v1alpha1/zz_generated.deepcopy.go | 101 +++ .../controllers/mcpgroup_controller.go | 145 ++++ .../controllers/mcpgroup_controller_test.go | 625 +++++++++++++++ .../controllers/mcpserver_controller.go | 38 + .../controllers/mcpserver_groupref_test.go | 346 +++++++++ cmd/thv-operator/main.go | 8 + .../mcpgroup_controller_integration_test.go | 718 ++++++++++++++++++ .../test-integration/mcp-group/suite_test.go | 120 +++ .../crds/toolhive.stacklok.dev_mcpgroups.yaml | 127 ++++ .../toolhive.stacklok.dev_mcpservers.yaml | 5 + .../operator/templates/clusterrole/role.yaml | 3 + .../setup/assert-rbac-clusterrole.yaml | 3 + .../setup/assert-rbac-clusterrole.yaml | 3 + ...sert-mcpserver-groupref-not-validated.yaml | 8 + .../invalid-groupref/chainsaw-test.yaml | 28 + .../mcpgroup/invalid-groupref/mcpserver.yaml | 9 + .../lifecycle/assert-mcpgroup-empty.yaml | 9 + .../assert-mcpgroup-with-server.yaml | 9 + .../mcpgroup/lifecycle/chainsaw-test.yaml | 35 + .../mcpgroup/lifecycle/mcpgroup.yaml | 7 + .../mcpgroup/lifecycle/mcpserver-1.yaml | 20 + .../valid-groupref/assert-mcpgroup-ready.yaml | 7 + .../assert-mcpserver-groupref-validated.yaml | 7 + .../valid-groupref/chainsaw-test.yaml | 39 + .../mcpgroup/valid-groupref/mcpgroup.yaml | 7 + .../mcpgroup/valid-groupref/mcpserver.yaml | 9 + 29 files changed, 2542 insertions(+), 1 deletion(-) create mode 100644 cmd/thv-operator/api/v1alpha1/mcpgroup_types.go create mode 100644 cmd/thv-operator/controllers/mcpgroup_controller.go create mode 100644 cmd/thv-operator/controllers/mcpgroup_controller_test.go create mode 100644 cmd/thv-operator/controllers/mcpserver_groupref_test.go create mode 100644 cmd/thv-operator/test-integration/mcp-group/mcpgroup_controller_integration_test.go create mode 100644 cmd/thv-operator/test-integration/mcp-group/suite_test.go create mode 100644 deploy/charts/operator-crds/crds/toolhive.stacklok.dev_mcpgroups.yaml create mode 100644 test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/invalid-groupref/assert-mcpserver-groupref-not-validated.yaml create mode 100644 test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/invalid-groupref/chainsaw-test.yaml create mode 100644 test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/invalid-groupref/mcpserver.yaml create mode 100644 test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/lifecycle/assert-mcpgroup-empty.yaml create mode 100644 test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/lifecycle/assert-mcpgroup-with-server.yaml create mode 100644 test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/lifecycle/chainsaw-test.yaml create mode 100644 test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/lifecycle/mcpgroup.yaml create mode 100644 test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/lifecycle/mcpserver-1.yaml create mode 100644 test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/valid-groupref/assert-mcpgroup-ready.yaml create mode 100644 test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/valid-groupref/assert-mcpserver-groupref-validated.yaml create mode 100644 test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/valid-groupref/chainsaw-test.yaml create mode 100644 test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/valid-groupref/mcpgroup.yaml create mode 100644 test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/valid-groupref/mcpserver.yaml diff --git a/cmd/thv-operator/Taskfile.yml b/cmd/thv-operator/Taskfile.yml index a2c6580ed..b5356b06b 100644 --- a/cmd/thv-operator/Taskfile.yml +++ b/cmd/thv-operator/Taskfile.yml @@ -220,7 +220,7 @@ tasks: - chainsaw test --test-dir test/e2e/chainsaw/operator/single-tenancy/setup - chainsaw test --test-dir test/e2e/chainsaw/operator/single-tenancy/test-scenarios - chainsaw test --test-dir test/e2e/chainsaw/operator/single-tenancy/cleanup - + operator-run: desc: Run the operator controller locally cmds: diff --git a/cmd/thv-operator/api/v1alpha1/mcpgroup_types.go b/cmd/thv-operator/api/v1alpha1/mcpgroup_types.go new file mode 100644 index 000000000..193ce29a0 --- /dev/null +++ b/cmd/thv-operator/api/v1alpha1/mcpgroup_types.go @@ -0,0 +1,86 @@ +package v1alpha1 + +import ( + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" +) + +// MCPGroupSpec defines the desired state of MCPGroup +type MCPGroupSpec struct { + // Description provides human-readable context + // +optional + Description string `json:"description,omitempty"` +} + +// MCPGroupStatus defines observed state +type MCPGroupStatus struct { + // Phase indicates current state + // +optional + // +kubebuilder:default=Pending + Phase MCPGroupPhase `json:"phase,omitempty"` + + // Servers lists server names in this group + // +optional + Servers []string `json:"servers"` + + // ServerCount is the number of servers + // +optional + ServerCount int `json:"serverCount"` + + // Conditions represent observations + // +optional + Conditions []metav1.Condition `json:"conditions,omitempty"` +} + +// MCPGroupPhase represents the lifecycle phase of an MCPGroup +// +kubebuilder:validation:Enum=Ready;Pending;Failed +type MCPGroupPhase string + +const ( + // MCPGroupPhaseReady indicates the MCPGroup is ready + MCPGroupPhaseReady MCPGroupPhase = "Ready" + + // MCPGroupPhasePending indicates the MCPGroup is pending + MCPGroupPhasePending MCPGroupPhase = "Pending" + + // MCPGroupPhaseFailed indicates the MCPGroup has failed + MCPGroupPhaseFailed MCPGroupPhase = "Failed" +) + +// Condition types for MCPGroup +const ( + ConditionTypeMCPServersChecked = "MCPServersChecked" +) + +// MCPGroupConditionReason represents the reason for a condition's last transition +const ( + ConditionReasonListMCPServersFailed = "ListMCPServersFailed" + ConditionReasonListMCPServersSucceeded = "ListMCPServersSucceeded" +) + +//+kubebuilder:object:root=true +//+kubebuilder:subresource:status +//+kubebuilder:printerColumn:name="Servers",type="integer",JSONPath=".status.serverCount",description="The number of MCPServers in this group" +//+kubebuilder:printerColumn:name="Phase",type="string",JSONPath=".status.phase",description="The phase of the MCPGroup" +//+kubebuilder:printerColumn:name="Age",type="date",JSONPath=".metadata.creationTimestamp",description="The age of the MCPGroup" + +// MCPGroup is the Schema for the mcpgroups API +type MCPGroup struct { + metav1.TypeMeta `json:",inline"` // nolint:revive + metav1.ObjectMeta `json:"metadata,omitempty"` + + Spec MCPGroupSpec `json:"spec,omitempty"` + Status MCPGroupStatus `json:"status,omitempty"` +} + +//+kubebuilder:object:root=true + +// MCPGroupList contains a list of MCPGroup +type MCPGroupList struct { + metav1.TypeMeta `json:",inline"` // nolint:revive + metav1.ListMeta `json:"metadata,omitempty"` + Items []MCPGroup `json:"items"` +} + +func init() { + SchemeBuilder.Register(&MCPGroup{}, &MCPGroupList{}) +} diff --git a/cmd/thv-operator/api/v1alpha1/mcpserver_types.go b/cmd/thv-operator/api/v1alpha1/mcpserver_types.go index b54fea6af..a1b503b87 100644 --- a/cmd/thv-operator/api/v1alpha1/mcpserver_types.go +++ b/cmd/thv-operator/api/v1alpha1/mcpserver_types.go @@ -9,6 +9,9 @@ import ( const ( // ConditionImageValidated indicates whether this image is fine to be used ConditionImageValidated = "ImageValidated" + + // ConditionGroupRefValidated indicates whether the GroupRef is valid + ConditionGroupRefValidated = "GroupRefValidated" ) const ( @@ -22,6 +25,17 @@ const ( ConditionReasonImageValidationSkipped = "ImageValidationSkipped" ) +const ( + // ConditionReasonGroupRefValidated indicates the GroupRef is valid + ConditionReasonGroupRefValidated = "GroupRefIsValid" + + // ConditionReasonGroupRefInvalid indicates the GroupRef is invalid + ConditionReasonGroupRefNotFound = "GroupRefNotFound" + + // ConditionReasonGroupRefError indicates the referenced MCPGroup is not in the Ready state + ConditionReasonGroupRefNotReady = "GroupRefNotReady" +) + // MCPServerSpec defines the desired state of MCPServer type MCPServerSpec struct { // Image is the container image for the MCP server @@ -126,6 +140,11 @@ type MCPServerSpec struct { // +kubebuilder:default=false // +optional TrustProxyHeaders bool `json:"trustProxyHeaders,omitempty"` + + // GroupRef is the name of the MCPGroup this server belongs to + // Must reference an existing MCPGroup in the same namespace + // +optional + GroupRef string `json:"groupRef,omitempty"` } // ResourceOverrides defines overrides for annotations and labels on created resources diff --git a/cmd/thv-operator/api/v1alpha1/zz_generated.deepcopy.go b/cmd/thv-operator/api/v1alpha1/zz_generated.deepcopy.go index 4f9a24f21..e23d8b93b 100644 --- a/cmd/thv-operator/api/v1alpha1/zz_generated.deepcopy.go +++ b/cmd/thv-operator/api/v1alpha1/zz_generated.deepcopy.go @@ -215,6 +215,107 @@ func (in *KubernetesOIDCConfig) DeepCopy() *KubernetesOIDCConfig { return out } +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MCPGroup) DeepCopyInto(out *MCPGroup) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ObjectMeta.DeepCopyInto(&out.ObjectMeta) + out.Spec = in.Spec + in.Status.DeepCopyInto(&out.Status) +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MCPGroup. +func (in *MCPGroup) DeepCopy() *MCPGroup { + if in == nil { + return nil + } + out := new(MCPGroup) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MCPGroup) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MCPGroupList) DeepCopyInto(out *MCPGroupList) { + *out = *in + out.TypeMeta = in.TypeMeta + in.ListMeta.DeepCopyInto(&out.ListMeta) + if in.Items != nil { + in, out := &in.Items, &out.Items + *out = make([]MCPGroup, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MCPGroupList. +func (in *MCPGroupList) DeepCopy() *MCPGroupList { + if in == nil { + return nil + } + out := new(MCPGroupList) + in.DeepCopyInto(out) + return out +} + +// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object. +func (in *MCPGroupList) DeepCopyObject() runtime.Object { + if c := in.DeepCopy(); c != nil { + return c + } + return nil +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MCPGroupSpec) DeepCopyInto(out *MCPGroupSpec) { + *out = *in +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MCPGroupSpec. +func (in *MCPGroupSpec) DeepCopy() *MCPGroupSpec { + if in == nil { + return nil + } + out := new(MCPGroupSpec) + in.DeepCopyInto(out) + return out +} + +// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. +func (in *MCPGroupStatus) DeepCopyInto(out *MCPGroupStatus) { + *out = *in + if in.Servers != nil { + in, out := &in.Servers, &out.Servers + *out = make([]string, len(*in)) + copy(*out, *in) + } + if in.Conditions != nil { + in, out := &in.Conditions, &out.Conditions + *out = make([]v1.Condition, len(*in)) + for i := range *in { + (*in)[i].DeepCopyInto(&(*out)[i]) + } + } +} + +// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new MCPGroupStatus. +func (in *MCPGroupStatus) DeepCopy() *MCPGroupStatus { + if in == nil { + return nil + } + out := new(MCPGroupStatus) + in.DeepCopyInto(out) + return out +} + // DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil. func (in *MCPRegistry) DeepCopyInto(out *MCPRegistry) { *out = *in diff --git a/cmd/thv-operator/controllers/mcpgroup_controller.go b/cmd/thv-operator/controllers/mcpgroup_controller.go new file mode 100644 index 000000000..e088dac10 --- /dev/null +++ b/cmd/thv-operator/controllers/mcpgroup_controller.go @@ -0,0 +1,145 @@ +package controllers + +import ( + "context" + + mcpv1alpha1 "github.com/stacklok/toolhive/cmd/thv-operator/api/v1alpha1" + "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/handler" + "sigs.k8s.io/controller-runtime/pkg/log" +) + +type MCPGroupReconciler struct { + client.Client +} + +// +kubebuilder:rbac:groups=toolhive.stacklok.dev,resources=mcpgroups,verbs=get;list;watch;create;update;patch;delete +// +kubebuilder:rbac:groups=toolhive.stacklok.dev,resources=mcpgroups/status,verbs=get;update;patch +// +kubebuilder:rbac:groups=toolhive.stacklok.dev,resources=mcpgroups/finalizers,verbs=update +// +kubebuilder:rbac:groups=toolhive.stacklok.dev,resources=mcpservers,verbs=get;list;watch + +// Reconcile is part of the main kubernetes reconciliation loop +// which aims to move the current state of the cluster closer to the desired state. +func (r *MCPGroupReconciler) Reconcile(ctx context.Context, req ctrl.Request) (ctrl.Result, error) { + ctxLogger := log.FromContext(ctx) + ctxLogger.Info("Reconciling MCPGroup", "mcpgroup", req.NamespacedName) + + // Fetch the MCPGroup instance + mcpGroup := &mcpv1alpha1.MCPGroup{} + err := r.Get(ctx, req.NamespacedName, mcpGroup) + if err != nil { + if errors.IsNotFound(err) { + // Request object not found, could have been deleted after reconcile request. + // Return and don't requeue + ctxLogger.Info("MCPGroup resource not found. Ignoring since object must be deleted.") + return ctrl.Result{}, nil + } + // Error reading the object - requeue the request. + ctxLogger.Error(err, "Failed to get MCPGroup", "mcpgroup", req.NamespacedName) + return ctrl.Result{}, err + } + + // List all MCPServers in the same namespace + mcpServerList := &mcpv1alpha1.MCPServerList{} + if err := r.List(ctx, mcpServerList, client.InNamespace(req.Namespace)); err != nil { + ctxLogger.Error(err, "Failed to list MCPServers") + mcpGroup.Status.Phase = mcpv1alpha1.MCPGroupPhaseFailed + meta.SetStatusCondition(&mcpGroup.Status.Conditions, metav1.Condition{ + Type: mcpv1alpha1.ConditionTypeMCPServersChecked, + Status: metav1.ConditionFalse, + Reason: mcpv1alpha1.ConditionReasonListMCPServersFailed, + Message: "Failed to list MCPServers in namespace", + }) + mcpGroup.Status.ServerCount = 0 + mcpGroup.Status.Servers = nil + // Update the MCPGroup status to reflect the failure + if updateErr := r.Status().Update(ctx, mcpGroup); updateErr != nil { + ctxLogger.Error(updateErr, "Failed to update MCPGroup status after list failure") + } + return ctrl.Result{}, nil + } else { + meta.SetStatusCondition(&mcpGroup.Status.Conditions, metav1.Condition{ + Type: mcpv1alpha1.ConditionTypeMCPServersChecked, + Status: metav1.ConditionTrue, + Reason: mcpv1alpha1.ConditionReasonListMCPServersSucceeded, + Message: "Successfully listed MCPServers in namespace", + }) + } + + // Filter servers that belong to this group + filteredServers := []mcpv1alpha1.MCPServer{} + for _, server := range mcpServerList.Items { + if server.Spec.GroupRef == mcpGroup.Name { + filteredServers = append(filteredServers, server) + } + } + + // Set server count and names + mcpGroup.Status.ServerCount = len(filteredServers) + if len(filteredServers) == 0 { + // Ensure servers is an empty slice, not nil + mcpGroup.Status.Servers = []string{} + } else { + mcpGroup.Status.Servers = make([]string, len(filteredServers)) + for i, server := range filteredServers { + mcpGroup.Status.Servers[i] = server.Name + } + } + + // Set status conditions + mcpGroup.Status.Phase = mcpv1alpha1.MCPGroupPhaseReady + + // Update the MCPGroup status + if err := r.Status().Update(ctx, mcpGroup); err != nil { + ctxLogger.Error(err, "Failed to update MCPGroup status") + return ctrl.Result{}, err + } + + ctxLogger.Info("Successfully reconciled MCPGroup", "serverCount", mcpGroup.Status.ServerCount) + return ctrl.Result{}, nil +} + +func (r *MCPGroupReconciler) findMCPGroupForMCPServer(ctx context.Context, obj client.Object) []ctrl.Request { + ctxLogger := log.FromContext(ctx) + + // Get the MCPServer object + mcpServer, ok := obj.(*mcpv1alpha1.MCPServer) + if !ok { + ctxLogger.Error(nil, "Object is not an MCPServer", "object", obj.GetName()) + return []ctrl.Request{} + } + if mcpServer.Spec.GroupRef == "" { + // No MCPGroup reference, nothing to do + return []ctrl.Request{} + } + + // Find which MCPGroup this MCPServer belongs to + ctxLogger.Info("Finding MCPGroup for MCPServer", "namespace", obj.GetNamespace(), "mcpserver", obj.GetName(), "groupRef", mcpServer.Spec.GroupRef) + group := &mcpv1alpha1.MCPGroup{} + if err := r.Get(ctx, types.NamespacedName{Namespace: obj.GetNamespace(), Name: mcpServer.Spec.GroupRef}, group); err != nil { + ctxLogger.Error(err, "Failed to get MCPGroup for MCPServer", "namespace", obj.GetNamespace(), "name", mcpServer.Spec.GroupRef) + return []ctrl.Request{} + } + return []ctrl.Request{ + { + NamespacedName: types.NamespacedName{ + Namespace: obj.GetNamespace(), + Name: group.Name, + }, + }, + } +} + +func (r *MCPGroupReconciler) SetupWithManager(mgr ctrl.Manager) error { + return ctrl.NewControllerManagedBy(mgr). + For(&mcpv1alpha1.MCPGroup{}). + Watches( + &mcpv1alpha1.MCPServer{}, handler.EnqueueRequestsFromMapFunc(r.findMCPGroupForMCPServer), + ). + Complete(r) +} diff --git a/cmd/thv-operator/controllers/mcpgroup_controller_test.go b/cmd/thv-operator/controllers/mcpgroup_controller_test.go new file mode 100644 index 000000000..aedfd75bc --- /dev/null +++ b/cmd/thv-operator/controllers/mcpgroup_controller_test.go @@ -0,0 +1,625 @@ +package controllers + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "k8s.io/apimachinery/pkg/types" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + "sigs.k8s.io/controller-runtime/pkg/reconcile" + + mcpv1alpha1 "github.com/stacklok/toolhive/cmd/thv-operator/api/v1alpha1" +) + +// TestMCPGroupReconciler_Reconcile_BasicLogic tests the core reconciliation logic +// using a fake client to avoid needing a real Kubernetes cluster +func TestMCPGroupReconciler_Reconcile_BasicLogic(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + mcpGroup *mcpv1alpha1.MCPGroup + mcpServers []*mcpv1alpha1.MCPServer + expectedServerCount int + expectedServerNames []string + expectedPhase mcpv1alpha1.MCPGroupPhase + }{ + { + name: "group with two running servers should be ready", + mcpGroup: &mcpv1alpha1.MCPGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-group", + Namespace: "default", + }, + }, + mcpServers: []*mcpv1alpha1.MCPServer{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "server1", + Namespace: "default", + }, + Spec: mcpv1alpha1.MCPServerSpec{ + Image: "test-image", + GroupRef: "test-group", + }, + Status: mcpv1alpha1.MCPServerStatus{ + Phase: mcpv1alpha1.MCPServerPhaseRunning, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "server2", + Namespace: "default", + }, + Spec: mcpv1alpha1.MCPServerSpec{ + Image: "test-image", + GroupRef: "test-group", + }, + Status: mcpv1alpha1.MCPServerStatus{ + Phase: mcpv1alpha1.MCPServerPhaseRunning, + }, + }, + }, + expectedServerCount: 2, + expectedServerNames: []string{"server1", "server2"}, + expectedPhase: mcpv1alpha1.MCPGroupPhaseReady, + }, + { + name: "group with servers regardless of status should be ready", + mcpGroup: &mcpv1alpha1.MCPGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-group", + Namespace: "default", + }, + }, + mcpServers: []*mcpv1alpha1.MCPServer{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "server1", + Namespace: "default", + }, + Spec: mcpv1alpha1.MCPServerSpec{ + Image: "test-image", + GroupRef: "test-group", + }, + Status: mcpv1alpha1.MCPServerStatus{ + Phase: mcpv1alpha1.MCPServerPhaseRunning, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "server2", + Namespace: "default", + }, + Spec: mcpv1alpha1.MCPServerSpec{ + Image: "test-image", + GroupRef: "test-group", + }, + Status: mcpv1alpha1.MCPServerStatus{ + Phase: mcpv1alpha1.MCPServerPhaseFailed, + }, + }, + }, + expectedServerCount: 2, + expectedServerNames: []string{"server1", "server2"}, + expectedPhase: mcpv1alpha1.MCPGroupPhaseReady, // Controller doesn't check individual server phases + }, + { + name: "group with mixed server phases should be ready", + mcpGroup: &mcpv1alpha1.MCPGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-group", + Namespace: "default", + }, + }, + mcpServers: []*mcpv1alpha1.MCPServer{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "server1", + Namespace: "default", + }, + Spec: mcpv1alpha1.MCPServerSpec{ + Image: "test-image", + GroupRef: "test-group", + }, + Status: mcpv1alpha1.MCPServerStatus{ + Phase: mcpv1alpha1.MCPServerPhaseRunning, + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "server2", + Namespace: "default", + }, + Spec: mcpv1alpha1.MCPServerSpec{ + Image: "test-image", + GroupRef: "test-group", + }, + Status: mcpv1alpha1.MCPServerStatus{ + Phase: mcpv1alpha1.MCPServerPhasePending, + }, + }, + }, + expectedServerCount: 2, + expectedServerNames: []string{"server1", "server2"}, + expectedPhase: mcpv1alpha1.MCPGroupPhaseReady, // Controller doesn't check individual server phases + }, + { + name: "group with no servers should be ready", + mcpGroup: &mcpv1alpha1.MCPGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-group", + Namespace: "default", + }, + }, + mcpServers: []*mcpv1alpha1.MCPServer{}, + expectedServerCount: 0, + expectedServerNames: []string{}, + expectedPhase: mcpv1alpha1.MCPGroupPhaseReady, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + ctx := t.Context() + scheme := runtime.NewScheme() + require.NoError(t, mcpv1alpha1.AddToScheme(scheme)) + require.NoError(t, corev1.AddToScheme(scheme)) + + // Create fake client with objects + objs := []client.Object{tt.mcpGroup} + for _, server := range tt.mcpServers { + objs = append(objs, server) + } + + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(objs...). + WithStatusSubresource(&mcpv1alpha1.MCPGroup{}). + Build() + + r := &MCPGroupReconciler{ + Client: fakeClient, + } + + // Reconcile + req := reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: tt.mcpGroup.Name, + Namespace: tt.mcpGroup.Namespace, + }, + } + + result, err := r.Reconcile(ctx, req) + require.NoError(t, err) + assert.False(t, result.Requeue) + + // Check the updated MCPGroup + var updatedGroup mcpv1alpha1.MCPGroup + err = fakeClient.Get(ctx, req.NamespacedName, &updatedGroup) + require.NoError(t, err) + + assert.Equal(t, tt.expectedServerCount, updatedGroup.Status.ServerCount) + assert.Equal(t, tt.expectedPhase, updatedGroup.Status.Phase) + assert.ElementsMatch(t, tt.expectedServerNames, updatedGroup.Status.Servers) + }) + } +} + +// TestMCPGroupReconciler_ServerFiltering tests the logic for filtering servers by groupRef +func TestMCPGroupReconciler_ServerFiltering(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + groupName string + namespace string + mcpServers []*mcpv1alpha1.MCPServer + expectedServerNames []string + expectedCount int + }{ + { + name: "filters servers by exact groupRef match", + groupName: "test-group", + namespace: "default", + mcpServers: []*mcpv1alpha1.MCPServer{ + { + ObjectMeta: metav1.ObjectMeta{Name: "server1", Namespace: "default"}, + Spec: mcpv1alpha1.MCPServerSpec{Image: "test", GroupRef: "test-group"}, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "server2", Namespace: "default"}, + Spec: mcpv1alpha1.MCPServerSpec{Image: "test", GroupRef: "other-group"}, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "server3", Namespace: "default"}, + Spec: mcpv1alpha1.MCPServerSpec{Image: "test", GroupRef: "test-group"}, + }, + }, + expectedServerNames: []string{"server1", "server3"}, + expectedCount: 2, + }, + { + name: "excludes servers without groupRef", + groupName: "test-group", + namespace: "default", + mcpServers: []*mcpv1alpha1.MCPServer{ + { + ObjectMeta: metav1.ObjectMeta{Name: "server1", Namespace: "default"}, + Spec: mcpv1alpha1.MCPServerSpec{Image: "test", GroupRef: "test-group"}, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "server2", Namespace: "default"}, + Spec: mcpv1alpha1.MCPServerSpec{Image: "test"}, + }, + }, + expectedServerNames: []string{"server1"}, + expectedCount: 1, + }, + { + name: "excludes servers from different namespaces", + groupName: "test-group", + namespace: "namespace-a", + mcpServers: []*mcpv1alpha1.MCPServer{ + { + ObjectMeta: metav1.ObjectMeta{Name: "server1", Namespace: "namespace-a"}, + Spec: mcpv1alpha1.MCPServerSpec{Image: "test", GroupRef: "test-group"}, + }, + { + ObjectMeta: metav1.ObjectMeta{Name: "server2", Namespace: "namespace-b"}, + Spec: mcpv1alpha1.MCPServerSpec{Image: "test", GroupRef: "test-group"}, + }, + }, + expectedServerNames: []string{"server1"}, + expectedCount: 1, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + ctx := t.Context() + scheme := runtime.NewScheme() + require.NoError(t, mcpv1alpha1.AddToScheme(scheme)) + require.NoError(t, corev1.AddToScheme(scheme)) + + mcpGroup := &mcpv1alpha1.MCPGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: tt.groupName, + Namespace: tt.namespace, + }, + } + + objs := []client.Object{mcpGroup} + for _, server := range tt.mcpServers { + objs = append(objs, server) + } + + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(objs...). + WithStatusSubresource(&mcpv1alpha1.MCPGroup{}). + Build() + + r := &MCPGroupReconciler{ + Client: fakeClient, + } + + req := reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: tt.groupName, + Namespace: tt.namespace, + }, + } + + result, err := r.Reconcile(ctx, req) + require.NoError(t, err) + assert.False(t, result.Requeue) + + var updatedGroup mcpv1alpha1.MCPGroup + err = fakeClient.Get(ctx, req.NamespacedName, &updatedGroup) + require.NoError(t, err) + + assert.Equal(t, tt.expectedCount, updatedGroup.Status.ServerCount) + assert.ElementsMatch(t, tt.expectedServerNames, updatedGroup.Status.Servers) + }) + } +} + +// TestMCPGroupReconciler_findMCPGroupForMCPServer tests the watch mapping function +func TestMCPGroupReconciler_findMCPGroupForMCPServer(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + mcpServer *mcpv1alpha1.MCPServer + mcpGroups []*mcpv1alpha1.MCPGroup + expectedRequests int + expectedGroupName string + }{ + { + name: "server with groupRef finds matching group", + mcpServer: &mcpv1alpha1.MCPServer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-server", + Namespace: "default", + }, + Spec: mcpv1alpha1.MCPServerSpec{ + Image: "test-image", + GroupRef: "test-group", + }, + }, + mcpGroups: []*mcpv1alpha1.MCPGroup{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-group", + Namespace: "default", + }, + }, + }, + expectedRequests: 1, + expectedGroupName: "test-group", + }, + { + name: "server without groupRef returns empty", + mcpServer: &mcpv1alpha1.MCPServer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-server", + Namespace: "default", + }, + Spec: mcpv1alpha1.MCPServerSpec{ + Image: "test-image", + // No GroupRef + }, + }, + mcpGroups: []*mcpv1alpha1.MCPGroup{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-group", + Namespace: "default", + }, + }, + }, + expectedRequests: 0, + }, + { + name: "server with non-existent groupRef returns empty", + mcpServer: &mcpv1alpha1.MCPServer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-server", + Namespace: "default", + }, + Spec: mcpv1alpha1.MCPServerSpec{ + Image: "test-image", + GroupRef: "non-existent-group", + }, + }, + mcpGroups: []*mcpv1alpha1.MCPGroup{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-group", + Namespace: "default", + }, + }, + }, + expectedRequests: 0, + }, + { + name: "server finds correct group among multiple groups", + mcpServer: &mcpv1alpha1.MCPServer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-server", + Namespace: "default", + }, + Spec: mcpv1alpha1.MCPServerSpec{ + Image: "test-image", + GroupRef: "group-b", + }, + }, + mcpGroups: []*mcpv1alpha1.MCPGroup{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "group-a", + Namespace: "default", + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "group-b", + Namespace: "default", + }, + }, + { + ObjectMeta: metav1.ObjectMeta{ + Name: "group-c", + Namespace: "default", + }, + }, + }, + expectedRequests: 1, + expectedGroupName: "group-b", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + ctx := t.Context() + scheme := runtime.NewScheme() + require.NoError(t, mcpv1alpha1.AddToScheme(scheme)) + require.NoError(t, corev1.AddToScheme(scheme)) + + // Create fake client with objects + objs := []client.Object{} + for _, group := range tt.mcpGroups { + objs = append(objs, group) + } + + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(objs...). + Build() + + r := &MCPGroupReconciler{ + Client: fakeClient, + } + + requests := r.findMCPGroupForMCPServer(ctx, tt.mcpServer) + + assert.Len(t, requests, tt.expectedRequests) + if tt.expectedRequests > 0 { + assert.Equal(t, tt.expectedGroupName, requests[0].Name) + assert.Equal(t, tt.mcpServer.Namespace, requests[0].Namespace) + } + }) + } +} + +// TestMCPGroupReconciler_GroupNotFound tests handling of non-existent groups +func TestMCPGroupReconciler_GroupNotFound(t *testing.T) { + t.Parallel() + + ctx := t.Context() + scheme := runtime.NewScheme() + require.NoError(t, mcpv1alpha1.AddToScheme(scheme)) + require.NoError(t, corev1.AddToScheme(scheme)) + + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + Build() + + r := &MCPGroupReconciler{ + Client: fakeClient, + } + + // Reconcile a non-existent group + req := reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: "non-existent-group", + Namespace: "default", + }, + } + + result, err := r.Reconcile(ctx, req) + require.NoError(t, err) + assert.False(t, result.Requeue) +} + +// TestMCPGroupReconciler_Conditions tests the MCPServersChecked condition +func TestMCPGroupReconciler_Conditions(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + mcpGroup *mcpv1alpha1.MCPGroup + mcpServers []*mcpv1alpha1.MCPServer + expectedConditionStatus metav1.ConditionStatus + expectedConditionReason string + expectedPhase mcpv1alpha1.MCPGroupPhase + }{ + { + name: "MCPServersChecked condition is True when listing succeeds", + mcpGroup: &mcpv1alpha1.MCPGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-group", + Namespace: "default", + }, + }, + mcpServers: []*mcpv1alpha1.MCPServer{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "server1", + Namespace: "default", + }, + Spec: mcpv1alpha1.MCPServerSpec{ + Image: "test-image", + GroupRef: "test-group", + }, + }, + }, + expectedConditionStatus: metav1.ConditionTrue, + expectedConditionReason: mcpv1alpha1.ConditionReasonListMCPServersSucceeded, + expectedPhase: mcpv1alpha1.MCPGroupPhaseReady, + }, + { + name: "MCPServersChecked condition is True even with no servers", + mcpGroup: &mcpv1alpha1.MCPGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-group", + Namespace: "default", + }, + }, + mcpServers: []*mcpv1alpha1.MCPServer{}, + expectedConditionStatus: metav1.ConditionTrue, + expectedConditionReason: mcpv1alpha1.ConditionReasonListMCPServersSucceeded, + expectedPhase: mcpv1alpha1.MCPGroupPhaseReady, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + ctx := t.Context() + scheme := runtime.NewScheme() + require.NoError(t, mcpv1alpha1.AddToScheme(scheme)) + require.NoError(t, corev1.AddToScheme(scheme)) + + objs := []client.Object{tt.mcpGroup} + for _, server := range tt.mcpServers { + objs = append(objs, server) + } + + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(objs...). + WithStatusSubresource(&mcpv1alpha1.MCPGroup{}). + Build() + + r := &MCPGroupReconciler{ + Client: fakeClient, + } + + req := reconcile.Request{ + NamespacedName: types.NamespacedName{ + Name: tt.mcpGroup.Name, + Namespace: tt.mcpGroup.Namespace, + }, + } + + result, err := r.Reconcile(ctx, req) + require.NoError(t, err) + assert.False(t, result.Requeue) + + var updatedGroup mcpv1alpha1.MCPGroup + err = fakeClient.Get(ctx, req.NamespacedName, &updatedGroup) + require.NoError(t, err) + + assert.Equal(t, tt.expectedPhase, updatedGroup.Status.Phase) + + // Check the MCPServersChecked condition + var condition *metav1.Condition + for i := range updatedGroup.Status.Conditions { + if updatedGroup.Status.Conditions[i].Type == mcpv1alpha1.ConditionTypeMCPServersChecked { + condition = &updatedGroup.Status.Conditions[i] + break + } + } + + require.NotNil(t, condition, "MCPServersChecked condition should be present") + assert.Equal(t, tt.expectedConditionStatus, condition.Status) + if tt.expectedConditionReason != "" { + assert.Equal(t, tt.expectedConditionReason, condition.Reason) + } + }) + } +} diff --git a/cmd/thv-operator/controllers/mcpserver_controller.go b/cmd/thv-operator/controllers/mcpserver_controller.go index f3ac65c05..6ba94c909 100644 --- a/cmd/thv-operator/controllers/mcpserver_controller.go +++ b/cmd/thv-operator/controllers/mcpserver_controller.go @@ -183,6 +183,9 @@ func (r *MCPServerReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( return ctrl.Result{Requeue: true}, nil } + // Check if the GroupRef is valid if specified + r.validateGroupRef(ctx, mcpServer) + // Check if MCPToolConfig is referenced and handle it if err := r.handleToolConfig(ctx, mcpServer); err != nil { ctxLogger.Error(err, "Failed to handle MCPToolConfig") @@ -392,6 +395,41 @@ func (r *MCPServerReconciler) Reconcile(ctx context.Context, req ctrl.Request) ( return ctrl.Result{}, nil } +func (r *MCPServerReconciler) validateGroupRef(ctx context.Context, mcpServer *mcpv1alpha1.MCPServer) { + if mcpServer.Spec.GroupRef == "" { + // No group reference, nothing to validate + return + } + + ctxLogger := log.FromContext(ctx) + + // Find the referenced MCPGroup + group := &mcpv1alpha1.MCPGroup{} + if err := r.Get(ctx, types.NamespacedName{Namespace: mcpServer.Namespace, Name: mcpServer.Spec.GroupRef}, group); err != nil { + ctxLogger.Error(err, "Failed to validate GroupRef") + meta.SetStatusCondition(&mcpServer.Status.Conditions, metav1.Condition{ + Type: mcpv1alpha1.ConditionGroupRefValidated, + Status: metav1.ConditionFalse, + Reason: mcpv1alpha1.ConditionReasonGroupRefNotFound, + Message: err.Error(), + }) + } else if group.Status.Phase != mcpv1alpha1.MCPGroupPhaseReady { + meta.SetStatusCondition(&mcpServer.Status.Conditions, metav1.Condition{ + Type: mcpv1alpha1.ConditionGroupRefValidated, + Status: metav1.ConditionFalse, + Reason: mcpv1alpha1.ConditionReasonGroupRefNotReady, + Message: "GroupRef is not in Ready state", + }) + } else { + meta.SetStatusCondition(&mcpServer.Status.Conditions, metav1.Condition{ + Type: mcpv1alpha1.ConditionGroupRefValidated, + Status: metav1.ConditionTrue, + Reason: mcpv1alpha1.ConditionReasonGroupRefValidated, + Message: "GroupRef is valid and in Ready state", + }) + } +} + // setImageValidationCondition is a helper function to set the image validation status condition // This reduces code duplication in the image validation logic func setImageValidationCondition(mcpServer *mcpv1alpha1.MCPServer, status metav1.ConditionStatus, reason, message string) { diff --git a/cmd/thv-operator/controllers/mcpserver_groupref_test.go b/cmd/thv-operator/controllers/mcpserver_groupref_test.go new file mode 100644 index 000000000..e3e186139 --- /dev/null +++ b/cmd/thv-operator/controllers/mcpserver_groupref_test.go @@ -0,0 +1,346 @@ +package controllers + +import ( + "context" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + corev1 "k8s.io/api/core/v1" + "k8s.io/apimachinery/pkg/api/meta" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/client/fake" + + mcpv1alpha1 "github.com/stacklok/toolhive/cmd/thv-operator/api/v1alpha1" +) + +// TestMCPServerReconciler_ValidateGroupRef tests the validateGroupRef function +func TestMCPServerReconciler_ValidateGroupRef(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + mcpServer *mcpv1alpha1.MCPServer + mcpGroups []*mcpv1alpha1.MCPGroup + expectedConditionStatus metav1.ConditionStatus + expectedConditionReason string + expectedConditionMsg string + }{ + { + name: "GroupRef validated when group exists and is Ready", + mcpServer: &mcpv1alpha1.MCPServer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-server", + Namespace: "default", + }, + Spec: mcpv1alpha1.MCPServerSpec{ + Image: "test-image", + GroupRef: "test-group", + }, + }, + mcpGroups: []*mcpv1alpha1.MCPGroup{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-group", + Namespace: "default", + }, + Status: mcpv1alpha1.MCPGroupStatus{ + Phase: mcpv1alpha1.MCPGroupPhaseReady, + }, + }, + }, + expectedConditionStatus: metav1.ConditionTrue, + expectedConditionReason: "", + }, + { + name: "GroupRef not validated when group does not exist", + mcpServer: &mcpv1alpha1.MCPServer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-server", + Namespace: "default", + }, + Spec: mcpv1alpha1.MCPServerSpec{ + Image: "test-image", + GroupRef: "non-existent-group", + }, + }, + mcpGroups: []*mcpv1alpha1.MCPGroup{}, + expectedConditionStatus: metav1.ConditionFalse, + expectedConditionReason: mcpv1alpha1.ConditionReasonGroupRefNotFound, + }, + { + name: "GroupRef not validated when group is Pending", + mcpServer: &mcpv1alpha1.MCPServer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-server", + Namespace: "default", + }, + Spec: mcpv1alpha1.MCPServerSpec{ + Image: "test-image", + GroupRef: "test-group", + }, + }, + mcpGroups: []*mcpv1alpha1.MCPGroup{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-group", + Namespace: "default", + }, + Status: mcpv1alpha1.MCPGroupStatus{ + Phase: mcpv1alpha1.MCPGroupPhasePending, + }, + }, + }, + expectedConditionStatus: metav1.ConditionFalse, + expectedConditionReason: mcpv1alpha1.ConditionReasonGroupRefNotReady, + expectedConditionMsg: "GroupRef is not in Ready state", + }, + { + name: "GroupRef not validated when group is Failed", + mcpServer: &mcpv1alpha1.MCPServer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-server", + Namespace: "default", + }, + Spec: mcpv1alpha1.MCPServerSpec{ + Image: "test-image", + GroupRef: "test-group", + }, + }, + mcpGroups: []*mcpv1alpha1.MCPGroup{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-group", + Namespace: "default", + }, + Status: mcpv1alpha1.MCPGroupStatus{ + Phase: mcpv1alpha1.MCPGroupPhaseFailed, + }, + }, + }, + expectedConditionStatus: metav1.ConditionFalse, + expectedConditionReason: mcpv1alpha1.ConditionReasonGroupRefNotReady, + expectedConditionMsg: "GroupRef is not in Ready state", + }, + { + name: "No validation when GroupRef is empty", + mcpServer: &mcpv1alpha1.MCPServer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-server", + Namespace: "default", + }, + Spec: mcpv1alpha1.MCPServerSpec{ + Image: "test-image", + // No GroupRef + }, + }, + mcpGroups: []*mcpv1alpha1.MCPGroup{ + { + ObjectMeta: metav1.ObjectMeta{ + Name: "test-group", + Namespace: "default", + }, + Status: mcpv1alpha1.MCPGroupStatus{ + Phase: mcpv1alpha1.MCPGroupPhaseReady, + }, + }, + }, + expectedConditionStatus: "", // No condition should be set + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + ctx := context.Background() + scheme := runtime.NewScheme() + require.NoError(t, mcpv1alpha1.AddToScheme(scheme)) + require.NoError(t, corev1.AddToScheme(scheme)) + + objs := []client.Object{} + for _, group := range tt.mcpGroups { + objs = append(objs, group) + } + + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(objs...). + WithStatusSubresource(&mcpv1alpha1.MCPGroup{}). + Build() + + r := &MCPServerReconciler{ + Client: fakeClient, + Scheme: scheme, + } + + r.validateGroupRef(ctx, tt.mcpServer) + + // Check the condition if we expected one + if tt.expectedConditionStatus != "" { + condition := meta.FindStatusCondition(tt.mcpServer.Status.Conditions, mcpv1alpha1.ConditionGroupRefValidated) + require.NotNil(t, condition, "GroupRefValidated condition should be present") + assert.Equal(t, tt.expectedConditionStatus, condition.Status) + if tt.expectedConditionReason != "" { + assert.Equal(t, tt.expectedConditionReason, condition.Reason) + } + if tt.expectedConditionMsg != "" { + assert.Equal(t, tt.expectedConditionMsg, condition.Message) + } + } else { + // No condition should be set when GroupRef is empty + condition := meta.FindStatusCondition(tt.mcpServer.Status.Conditions, mcpv1alpha1.ConditionGroupRefValidated) + assert.Nil(t, condition, "GroupRefValidated condition should not be present when GroupRef is empty") + } + }) + } +} + +// TestMCPServerReconciler_GroupRefValidation_Integration tests GroupRef validation in the context of reconciliation +func TestMCPServerReconciler_GroupRefValidation_Integration(t *testing.T) { + t.Parallel() + + tests := []struct { + name string + mcpServer *mcpv1alpha1.MCPServer + mcpGroup *mcpv1alpha1.MCPGroup + expectedConditionStatus metav1.ConditionStatus + expectedConditionReason string + }{ + { + name: "Server with valid GroupRef gets validated condition", + mcpServer: &mcpv1alpha1.MCPServer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-server", + Namespace: "default", + }, + Spec: mcpv1alpha1.MCPServerSpec{ + Image: "test-image", + GroupRef: "test-group", + }, + }, + mcpGroup: &mcpv1alpha1.MCPGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-group", + Namespace: "default", + }, + Status: mcpv1alpha1.MCPGroupStatus{ + Phase: mcpv1alpha1.MCPGroupPhaseReady, + }, + }, + expectedConditionStatus: metav1.ConditionTrue, + }, + { + name: "Server with GroupRef to non-Ready group gets failed condition", + mcpServer: &mcpv1alpha1.MCPServer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-server", + Namespace: "default", + }, + Spec: mcpv1alpha1.MCPServerSpec{ + Image: "test-image", + GroupRef: "test-group", + }, + }, + mcpGroup: &mcpv1alpha1.MCPGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-group", + Namespace: "default", + }, + Status: mcpv1alpha1.MCPGroupStatus{ + Phase: mcpv1alpha1.MCPGroupPhasePending, + }, + }, + expectedConditionStatus: metav1.ConditionFalse, + expectedConditionReason: mcpv1alpha1.ConditionReasonGroupRefNotReady, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + + ctx := context.Background() + scheme := runtime.NewScheme() + require.NoError(t, mcpv1alpha1.AddToScheme(scheme)) + require.NoError(t, corev1.AddToScheme(scheme)) + + objs := []client.Object{tt.mcpServer} + if tt.mcpGroup != nil { + objs = append(objs, tt.mcpGroup) + } + + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(objs...). + WithStatusSubresource(&mcpv1alpha1.MCPServer{}, &mcpv1alpha1.MCPGroup{}). + Build() + + r := &MCPServerReconciler{ + Client: fakeClient, + Scheme: scheme, + } + + r.validateGroupRef(ctx, tt.mcpServer) + + condition := meta.FindStatusCondition(tt.mcpServer.Status.Conditions, mcpv1alpha1.ConditionGroupRefValidated) + require.NotNil(t, condition, "GroupRefValidated condition should be present") + assert.Equal(t, tt.expectedConditionStatus, condition.Status) + if tt.expectedConditionReason != "" { + assert.Equal(t, tt.expectedConditionReason, condition.Reason) + } + }) + } +} + +// TestMCPServerReconciler_GroupRefCrossNamespace tests that GroupRef only works within same namespace +func TestMCPServerReconciler_GroupRefCrossNamespace(t *testing.T) { + t.Parallel() + + ctx := context.Background() + scheme := runtime.NewScheme() + require.NoError(t, mcpv1alpha1.AddToScheme(scheme)) + require.NoError(t, corev1.AddToScheme(scheme)) + + mcpServer := &mcpv1alpha1.MCPServer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-server", + Namespace: "namespace-a", + }, + Spec: mcpv1alpha1.MCPServerSpec{ + Image: "test-image", + GroupRef: "test-group", + }, + } + + mcpGroup := &mcpv1alpha1.MCPGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: "test-group", + Namespace: "namespace-b", // Different namespace + }, + Status: mcpv1alpha1.MCPGroupStatus{ + Phase: mcpv1alpha1.MCPGroupPhaseReady, + }, + } + + fakeClient := fake.NewClientBuilder(). + WithScheme(scheme). + WithObjects(mcpServer, mcpGroup). + WithStatusSubresource(&mcpv1alpha1.MCPServer{}, &mcpv1alpha1.MCPGroup{}). + Build() + + r := &MCPServerReconciler{ + Client: fakeClient, + Scheme: scheme, + } + + r.validateGroupRef(ctx, mcpServer) + + // Should fail to find the group because it's in a different namespace + condition := meta.FindStatusCondition(mcpServer.Status.Conditions, mcpv1alpha1.ConditionGroupRefValidated) + require.NotNil(t, condition, "GroupRefValidated condition should be present") + assert.Equal(t, metav1.ConditionFalse, condition.Status) + assert.Equal(t, mcpv1alpha1.ConditionReasonGroupRefNotFound, condition.Reason) +} diff --git a/cmd/thv-operator/main.go b/cmd/thv-operator/main.go index 7a15e63ae..7db167464 100644 --- a/cmd/thv-operator/main.go +++ b/cmd/thv-operator/main.go @@ -93,6 +93,14 @@ func main() { setupLog.Error(err, "unable to create controller", "controller", "MCPRegistry") os.Exit(1) } + + // Set up MCPGroup controller + if err = (&controllers.MCPGroupReconciler{ + Client: mgr.GetClient(), + }).SetupWithManager(mgr); err != nil { + setupLog.Error(err, "unable to create controller", "controller", "MCPGroup") + os.Exit(1) + } //+kubebuilder:scaffold:builder if err := mgr.AddHealthzCheck("healthz", healthz.Ping); err != nil { diff --git a/cmd/thv-operator/test-integration/mcp-group/mcpgroup_controller_integration_test.go b/cmd/thv-operator/test-integration/mcp-group/mcpgroup_controller_integration_test.go new file mode 100644 index 000000000..bef345415 --- /dev/null +++ b/cmd/thv-operator/test-integration/mcp-group/mcpgroup_controller_integration_test.go @@ -0,0 +1,718 @@ +package operator_test + +import ( + "fmt" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + corev1 "k8s.io/api/core/v1" + metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/types" + + mcpv1alpha1 "github.com/stacklok/toolhive/cmd/thv-operator/api/v1alpha1" +) + +var _ = Describe("MCPGroup Controller Integration Tests", func() { + const ( + timeout = time.Second * 30 + interval = time.Millisecond * 250 + ) + + Context("When creating an MCPGroup with existing MCPServers", Ordered, func() { + var ( + namespace string + mcpGroupName string + mcpGroup *mcpv1alpha1.MCPGroup + server1 *mcpv1alpha1.MCPServer + server2 *mcpv1alpha1.MCPServer + serverNoGroup *mcpv1alpha1.MCPServer + ) + + BeforeAll(func() { + namespace = fmt.Sprintf("test-mcpgroup-%d", time.Now().Unix()) + mcpGroupName = "test-group" + + // Create namespace + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespace, + }, + } + Expect(k8sClient.Create(ctx, ns)).Should(Succeed()) + + // Create MCPServers first + server1 = &mcpv1alpha1.MCPServer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "server1", + Namespace: namespace, + }, + Spec: mcpv1alpha1.MCPServerSpec{ + Image: "example/mcp-server:latest", + GroupRef: mcpGroupName, + }, + } + Expect(k8sClient.Create(ctx, server1)).Should(Succeed()) + + server2 = &mcpv1alpha1.MCPServer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "server2", + Namespace: namespace, + }, + Spec: mcpv1alpha1.MCPServerSpec{ + Image: "example/mcp-server:latest", + GroupRef: mcpGroupName, + }, + } + Expect(k8sClient.Create(ctx, server2)).Should(Succeed()) + + serverNoGroup = &mcpv1alpha1.MCPServer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "server-no-group", + Namespace: namespace, + }, + Spec: mcpv1alpha1.MCPServerSpec{ + Image: "example/mcp-server:latest", + // No GroupRef + }, + } + Expect(k8sClient.Create(ctx, serverNoGroup)).Should(Succeed()) + + // Update server statuses to Running + Eventually(func() error { + freshServer := &mcpv1alpha1.MCPServer{} + if err := k8sClient.Get(ctx, types.NamespacedName{Name: server1.Name, Namespace: namespace}, freshServer); err != nil { + return err + } + freshServer.Status.Phase = mcpv1alpha1.MCPServerPhaseRunning + return k8sClient.Status().Update(ctx, freshServer) + }, timeout, interval).Should(Succeed()) + + Eventually(func() error { + freshServer := &mcpv1alpha1.MCPServer{} + if err := k8sClient.Get(ctx, types.NamespacedName{Name: server2.Name, Namespace: namespace}, freshServer); err != nil { + return err + } + freshServer.Status.Phase = mcpv1alpha1.MCPServerPhaseRunning + return k8sClient.Status().Update(ctx, freshServer) + }, timeout, interval).Should(Succeed()) + + // Verify the statuses were updated + Eventually(func() bool { + freshServer := &mcpv1alpha1.MCPServer{} + if err := k8sClient.Get(ctx, types.NamespacedName{Name: server1.Name, Namespace: namespace}, freshServer); err != nil { + return false + } + return freshServer.Status.Phase == mcpv1alpha1.MCPServerPhaseRunning + }, timeout, interval).Should(BeTrue()) + + Eventually(func() bool { + freshServer := &mcpv1alpha1.MCPServer{} + if err := k8sClient.Get(ctx, types.NamespacedName{Name: server2.Name, Namespace: namespace}, freshServer); err != nil { + return false + } + return freshServer.Status.Phase == mcpv1alpha1.MCPServerPhaseRunning + }, timeout, interval).Should(BeTrue()) + + // Now create the MCPGroup + mcpGroup = &mcpv1alpha1.MCPGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: mcpGroupName, + Namespace: namespace, + }, + Spec: mcpv1alpha1.MCPGroupSpec{ + Description: "Test group for integration tests", + }, + } + Expect(k8sClient.Create(ctx, mcpGroup)).Should(Succeed()) + }) + + AfterAll(func() { + // Clean up + Expect(k8sClient.Delete(ctx, server1)).Should(Succeed()) + Expect(k8sClient.Delete(ctx, server2)).Should(Succeed()) + Expect(k8sClient.Delete(ctx, serverNoGroup)).Should(Succeed()) + Expect(k8sClient.Delete(ctx, mcpGroup)).Should(Succeed()) + + // Delete namespace + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespace, + }, + } + Expect(k8sClient.Delete(ctx, ns)).Should(Succeed()) + }) + + It("Should find existing MCPServers and update status", func() { + // Check that the group found both servers + Eventually(func() int { + updatedGroup := &mcpv1alpha1.MCPGroup{} + if err := k8sClient.Get(ctx, types.NamespacedName{ + Name: mcpGroupName, + Namespace: namespace, + }, updatedGroup); err != nil { + return -1 + } + return updatedGroup.Status.ServerCount + }, timeout, interval).Should(Equal(2)) + + // The group should be Ready after successful reconciliation + Eventually(func() mcpv1alpha1.MCPGroupPhase { + updatedGroup := &mcpv1alpha1.MCPGroup{} + if err := k8sClient.Get(ctx, types.NamespacedName{ + Name: mcpGroupName, + Namespace: namespace, + }, updatedGroup); err != nil { + return "" + } + return updatedGroup.Status.Phase + }, timeout, interval).Should(Equal(mcpv1alpha1.MCPGroupPhaseReady)) + + // Verify the servers are in the group + updatedGroup := &mcpv1alpha1.MCPGroup{} + Expect(k8sClient.Get(ctx, types.NamespacedName{ + Name: mcpGroupName, + Namespace: namespace, + }, updatedGroup)).Should(Succeed()) + + Expect(updatedGroup.Status.Servers).To(ContainElements("server1", "server2")) + Expect(updatedGroup.Status.Servers).NotTo(ContainElement("server-no-group")) + }) + }) + + Context("When creating a new MCPServer with groupRef", Ordered, func() { + var ( + namespace string + mcpGroupName string + mcpGroup *mcpv1alpha1.MCPGroup + newServer *mcpv1alpha1.MCPServer + ) + + BeforeAll(func() { + namespace = fmt.Sprintf("test-new-server-%d", time.Now().Unix()) + mcpGroupName = "test-group-new-server" + + // Create namespace + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespace, + }, + } + Expect(k8sClient.Create(ctx, ns)).Should(Succeed()) + + // Create MCPGroup first + mcpGroup = &mcpv1alpha1.MCPGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: mcpGroupName, + Namespace: namespace, + }, + Spec: mcpv1alpha1.MCPGroupSpec{ + Description: "Test group for new server", + }, + } + Expect(k8sClient.Create(ctx, mcpGroup)).Should(Succeed()) + + // Wait for initial reconciliation + Eventually(func() bool { + updatedGroup := &mcpv1alpha1.MCPGroup{} + err := k8sClient.Get(ctx, types.NamespacedName{ + Name: mcpGroupName, + Namespace: namespace, + }, updatedGroup) + return err == nil && updatedGroup.Status.Phase == mcpv1alpha1.MCPGroupPhaseReady + }, timeout, interval).Should(BeTrue()) + }) + + AfterAll(func() { + // Clean up + if newServer != nil { + Expect(k8sClient.Delete(ctx, newServer)).Should(Succeed()) + } + Expect(k8sClient.Delete(ctx, mcpGroup)).Should(Succeed()) + + // Delete namespace + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespace, + }, + } + Expect(k8sClient.Delete(ctx, ns)).Should(Succeed()) + }) + + It("Should trigger MCPGroup reconciliation when server is created", func() { + // Create new server with groupRef + newServer = &mcpv1alpha1.MCPServer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "new-server", + Namespace: namespace, + }, + Spec: mcpv1alpha1.MCPServerSpec{ + Image: "example/mcp-server:latest", + GroupRef: mcpGroupName, + }, + } + Expect(k8sClient.Create(ctx, newServer)).Should(Succeed()) + + // Update server status to Running + Eventually(func() error { + if err := k8sClient.Get(ctx, types.NamespacedName{Name: newServer.Name, Namespace: namespace}, newServer); err != nil { + return err + } + newServer.Status.Phase = mcpv1alpha1.MCPServerPhaseRunning + return k8sClient.Status().Update(ctx, newServer) + }, timeout, interval).Should(Succeed()) + + // Wait for MCPGroup to be updated + Eventually(func() bool { + updatedGroup := &mcpv1alpha1.MCPGroup{} + err := k8sClient.Get(ctx, types.NamespacedName{ + Name: mcpGroupName, + Namespace: namespace, + }, updatedGroup) + if err != nil { + return false + } + + return updatedGroup.Status.ServerCount == 1 && + updatedGroup.Status.Phase == mcpv1alpha1.MCPGroupPhaseReady + }, timeout, interval).Should(BeTrue()) + + // Verify the server is in the group + updatedGroup := &mcpv1alpha1.MCPGroup{} + Expect(k8sClient.Get(ctx, types.NamespacedName{ + Name: mcpGroupName, + Namespace: namespace, + }, updatedGroup)).Should(Succeed()) + + Expect(updatedGroup.Status.Servers).To(ContainElement("new-server")) + }) + }) + + Context("When deleting an MCPServer from a group", Ordered, func() { + var ( + namespace string + mcpGroupName string + mcpGroup *mcpv1alpha1.MCPGroup + server1 *mcpv1alpha1.MCPServer + server2 *mcpv1alpha1.MCPServer + ) + + BeforeAll(func() { + namespace = fmt.Sprintf("test-delete-server-%d", time.Now().Unix()) + mcpGroupName = "test-group-delete" + + // Create namespace + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespace, + }, + } + Expect(k8sClient.Create(ctx, ns)).Should(Succeed()) + + // Create MCPServers + server1 = &mcpv1alpha1.MCPServer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "server1", + Namespace: namespace, + }, + Spec: mcpv1alpha1.MCPServerSpec{ + Image: "example/mcp-server:latest", + GroupRef: mcpGroupName, + }, + } + Expect(k8sClient.Create(ctx, server1)).Should(Succeed()) + + server2 = &mcpv1alpha1.MCPServer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "server2", + Namespace: namespace, + }, + Spec: mcpv1alpha1.MCPServerSpec{ + Image: "example/mcp-server:latest", + GroupRef: mcpGroupName, + }, + } + Expect(k8sClient.Create(ctx, server2)).Should(Succeed()) + + // Update server statuses to Running + Eventually(func() error { + freshServer := &mcpv1alpha1.MCPServer{} + if err := k8sClient.Get(ctx, types.NamespacedName{Name: server1.Name, Namespace: namespace}, freshServer); err != nil { + return err + } + freshServer.Status.Phase = mcpv1alpha1.MCPServerPhaseRunning + return k8sClient.Status().Update(ctx, freshServer) + }, timeout, interval).Should(Succeed()) + + Eventually(func() error { + freshServer := &mcpv1alpha1.MCPServer{} + if err := k8sClient.Get(ctx, types.NamespacedName{Name: server2.Name, Namespace: namespace}, freshServer); err != nil { + return err + } + freshServer.Status.Phase = mcpv1alpha1.MCPServerPhaseRunning + return k8sClient.Status().Update(ctx, freshServer) + }, timeout, interval).Should(Succeed()) + + // Create MCPGroup + mcpGroup = &mcpv1alpha1.MCPGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: mcpGroupName, + Namespace: namespace, + }, + Spec: mcpv1alpha1.MCPGroupSpec{ + Description: "Test group for server deletion", + }, + } + Expect(k8sClient.Create(ctx, mcpGroup)).Should(Succeed()) + + // Wait for initial reconciliation with both servers + Eventually(func() bool { + updatedGroup := &mcpv1alpha1.MCPGroup{} + err := k8sClient.Get(ctx, types.NamespacedName{ + Name: mcpGroupName, + Namespace: namespace, + }, updatedGroup) + return err == nil && updatedGroup.Status.ServerCount == 2 + }, timeout, interval).Should(BeTrue()) + }) + + AfterAll(func() { + // Clean up remaining resources + // server1 is deleted in the test, so only check if it still exists + if err := k8sClient.Get(ctx, types.NamespacedName{Name: server1.Name, Namespace: namespace}, server1); err == nil { + Expect(k8sClient.Delete(ctx, server1)).Should(Succeed()) + } + Expect(k8sClient.Delete(ctx, server2)).Should(Succeed()) + Expect(k8sClient.Delete(ctx, mcpGroup)).Should(Succeed()) + + // Delete namespace + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespace, + }, + } + Expect(k8sClient.Delete(ctx, ns)).Should(Succeed()) + }) + + It("Should remain Ready after checking servers in namespace", func() { + // The MCPGroup should remain Ready because it can successfully list servers + // in the namespace. The MCPGroup phase is based on the ability to query + // servers, not on the state or count of servers. + updatedGroup := &mcpv1alpha1.MCPGroup{} + Expect(k8sClient.Get(ctx, types.NamespacedName{ + Name: mcpGroupName, + Namespace: namespace, + }, updatedGroup)).Should(Succeed()) + + // The MCPGroup should be Ready with 2 servers + Expect(updatedGroup.Status.Phase).To(Equal(mcpv1alpha1.MCPGroupPhaseReady)) + Expect(updatedGroup.Status.ServerCount).To(Equal(2)) + + // Trigger a reconciliation by updating the MCPGroup spec + Eventually(func() error { + freshGroup := &mcpv1alpha1.MCPGroup{} + if err := k8sClient.Get(ctx, types.NamespacedName{Name: mcpGroupName, Namespace: namespace}, freshGroup); err != nil { + return err + } + freshGroup.Spec.Description = "Test group for server deletion - updated" + return k8sClient.Update(ctx, freshGroup) + }, timeout, interval).Should(Succeed()) + + // After reconciliation, the MCPGroup should still be Ready + Eventually(func() mcpv1alpha1.MCPGroupPhase { + updatedGroup := &mcpv1alpha1.MCPGroup{} + if err := k8sClient.Get(ctx, types.NamespacedName{ + Name: mcpGroupName, + Namespace: namespace, + }, updatedGroup); err != nil { + return "" + } + return updatedGroup.Status.Phase + }, timeout, interval).Should(Equal(mcpv1alpha1.MCPGroupPhaseReady)) + }) + }) + + Context("When an MCPServer changes state", Ordered, func() { + var ( + namespace string + mcpGroupName string + mcpGroup *mcpv1alpha1.MCPGroup + server1 *mcpv1alpha1.MCPServer + server2 *mcpv1alpha1.MCPServer + ) + + BeforeAll(func() { + namespace = fmt.Sprintf("test-server-state-%d", time.Now().Unix()) + mcpGroupName = "test-group-state" + + // Create namespace + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespace, + }, + } + Expect(k8sClient.Create(ctx, ns)).Should(Succeed()) + + // Create MCPServers + server1 = &mcpv1alpha1.MCPServer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "server1", + Namespace: namespace, + }, + Spec: mcpv1alpha1.MCPServerSpec{ + Image: "example/mcp-server:latest", + GroupRef: mcpGroupName, + }, + } + Expect(k8sClient.Create(ctx, server1)).Should(Succeed()) + + server2 = &mcpv1alpha1.MCPServer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "server2", + Namespace: namespace, + }, + Spec: mcpv1alpha1.MCPServerSpec{ + Image: "example/mcp-server:latest", + GroupRef: mcpGroupName, + }, + } + Expect(k8sClient.Create(ctx, server2)).Should(Succeed()) + + // Update server statuses to Running + Eventually(func() error { + freshServer := &mcpv1alpha1.MCPServer{} + if err := k8sClient.Get(ctx, types.NamespacedName{Name: server1.Name, Namespace: namespace}, freshServer); err != nil { + return err + } + freshServer.Status.Phase = mcpv1alpha1.MCPServerPhaseRunning + return k8sClient.Status().Update(ctx, freshServer) + }, timeout, interval).Should(Succeed()) + + Eventually(func() error { + freshServer := &mcpv1alpha1.MCPServer{} + if err := k8sClient.Get(ctx, types.NamespacedName{Name: server2.Name, Namespace: namespace}, freshServer); err != nil { + return err + } + freshServer.Status.Phase = mcpv1alpha1.MCPServerPhaseRunning + return k8sClient.Status().Update(ctx, freshServer) + }, timeout, interval).Should(Succeed()) + + // Create MCPGroup + mcpGroup = &mcpv1alpha1.MCPGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: mcpGroupName, + Namespace: namespace, + }, + Spec: mcpv1alpha1.MCPGroupSpec{ + Description: "Test group for state changes", + }, + } + Expect(k8sClient.Create(ctx, mcpGroup)).Should(Succeed()) + + // Wait for initial reconciliation - the group should find the servers + Eventually(func() int { + updatedGroup := &mcpv1alpha1.MCPGroup{} + if err := k8sClient.Get(ctx, types.NamespacedName{ + Name: mcpGroupName, + Namespace: namespace, + }, updatedGroup); err != nil { + return -1 + } + return updatedGroup.Status.ServerCount + }, timeout, interval).Should(Equal(2)) + }) + + AfterAll(func() { + // Clean up + Expect(k8sClient.Delete(ctx, server1)).Should(Succeed()) + Expect(k8sClient.Delete(ctx, server2)).Should(Succeed()) + Expect(k8sClient.Delete(ctx, mcpGroup)).Should(Succeed()) + + // Delete namespace + ns := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespace, + }, + } + Expect(k8sClient.Delete(ctx, ns)).Should(Succeed()) + }) + + It("Should remain Ready when reconciled after server status changes", func() { + // Update server1 status to Failed + Eventually(func() error { + freshServer := &mcpv1alpha1.MCPServer{} + if err := k8sClient.Get(ctx, types.NamespacedName{Name: server1.Name, Namespace: namespace}, freshServer); err != nil { + return err + } + freshServer.Status.Phase = mcpv1alpha1.MCPServerPhaseFailed + return k8sClient.Status().Update(ctx, freshServer) + }, timeout, interval).Should(Succeed()) + + // Status changes don't trigger MCPGroup reconciliation, so we need to trigger it + // by updating the MCPGroup spec (e.g., adding/updating description) + Eventually(func() error { + freshGroup := &mcpv1alpha1.MCPGroup{} + if err := k8sClient.Get(ctx, types.NamespacedName{Name: mcpGroupName, Namespace: namespace}, freshGroup); err != nil { + return err + } + freshGroup.Spec.Description = "Test group for state changes - updated" + return k8sClient.Update(ctx, freshGroup) + }, timeout, interval).Should(Succeed()) + + // The MCPGroup should still be Ready because it doesn't check individual server phases + // (it only checks if servers exist). This reflects the simplified controller logic. + Eventually(func() mcpv1alpha1.MCPGroupPhase { + updatedGroup := &mcpv1alpha1.MCPGroup{} + if err := k8sClient.Get(ctx, types.NamespacedName{ + Name: mcpGroupName, + Namespace: namespace, + }, updatedGroup); err != nil { + return "" + } + return updatedGroup.Status.Phase + }, timeout, interval).Should(Equal(mcpv1alpha1.MCPGroupPhaseReady)) + + // Verify both servers are still counted + updatedGroup := &mcpv1alpha1.MCPGroup{} + Expect(k8sClient.Get(ctx, types.NamespacedName{ + Name: mcpGroupName, + Namespace: namespace, + }, updatedGroup)).Should(Succeed()) + Expect(updatedGroup.Status.ServerCount).To(Equal(2)) + }) + }) + + Context("When testing namespace isolation", Ordered, func() { + var ( + namespaceA string + namespaceB string + mcpGroupName string + mcpGroupA *mcpv1alpha1.MCPGroup + serverA *mcpv1alpha1.MCPServer + serverB *mcpv1alpha1.MCPServer + ) + + BeforeAll(func() { + namespaceA = fmt.Sprintf("test-ns-a-%d", time.Now().Unix()) + namespaceB = fmt.Sprintf("test-ns-b-%d", time.Now().Unix()) + mcpGroupName = "test-group" + + // Create namespaces + nsA := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespaceA, + }, + } + Expect(k8sClient.Create(ctx, nsA)).Should(Succeed()) + + nsB := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespaceB, + }, + } + Expect(k8sClient.Create(ctx, nsB)).Should(Succeed()) + + // Create server in namespace A + serverA = &mcpv1alpha1.MCPServer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "server-a", + Namespace: namespaceA, + }, + Spec: mcpv1alpha1.MCPServerSpec{ + Image: "example/mcp-server:latest", + GroupRef: mcpGroupName, + }, + } + Expect(k8sClient.Create(ctx, serverA)).Should(Succeed()) + + // Create server in namespace B with same group name + serverB = &mcpv1alpha1.MCPServer{ + ObjectMeta: metav1.ObjectMeta{ + Name: "server-b", + Namespace: namespaceB, + }, + Spec: mcpv1alpha1.MCPServerSpec{ + Image: "example/mcp-server:latest", + GroupRef: mcpGroupName, // Same group name, different namespace + }, + } + Expect(k8sClient.Create(ctx, serverB)).Should(Succeed()) + + // Update server statuses + Eventually(func() error { + freshServer := &mcpv1alpha1.MCPServer{} + if err := k8sClient.Get(ctx, types.NamespacedName{Name: serverA.Name, Namespace: namespaceA}, freshServer); err != nil { + return err + } + freshServer.Status.Phase = mcpv1alpha1.MCPServerPhaseRunning + return k8sClient.Status().Update(ctx, freshServer) + }, timeout, interval).Should(Succeed()) + + Eventually(func() error { + freshServer := &mcpv1alpha1.MCPServer{} + if err := k8sClient.Get(ctx, types.NamespacedName{Name: serverB.Name, Namespace: namespaceB}, freshServer); err != nil { + return err + } + freshServer.Status.Phase = mcpv1alpha1.MCPServerPhaseRunning + return k8sClient.Status().Update(ctx, freshServer) + }, timeout, interval).Should(Succeed()) + + // Create MCPGroup in namespace A + mcpGroupA = &mcpv1alpha1.MCPGroup{ + ObjectMeta: metav1.ObjectMeta{ + Name: mcpGroupName, + Namespace: namespaceA, + }, + Spec: mcpv1alpha1.MCPGroupSpec{ + Description: "Test group in namespace A", + }, + } + Expect(k8sClient.Create(ctx, mcpGroupA)).Should(Succeed()) + }) + + AfterAll(func() { + // Clean up + Expect(k8sClient.Delete(ctx, serverA)).Should(Succeed()) + Expect(k8sClient.Delete(ctx, serverB)).Should(Succeed()) + Expect(k8sClient.Delete(ctx, mcpGroupA)).Should(Succeed()) + + // Delete namespaces + nsA := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespaceA, + }, + } + Expect(k8sClient.Delete(ctx, nsA)).Should(Succeed()) + + nsB := &corev1.Namespace{ + ObjectMeta: metav1.ObjectMeta{ + Name: namespaceB, + }, + } + Expect(k8sClient.Delete(ctx, nsB)).Should(Succeed()) + }) + + It("Should only include servers from the same namespace", func() { + // Wait for reconciliation + Eventually(func() bool { + updatedGroup := &mcpv1alpha1.MCPGroup{} + err := k8sClient.Get(ctx, types.NamespacedName{ + Name: mcpGroupName, + Namespace: namespaceA, + }, updatedGroup) + return err == nil && updatedGroup.Status.ServerCount > 0 + }, timeout, interval).Should(BeTrue()) + + // Verify only server-a is in the group + updatedGroup := &mcpv1alpha1.MCPGroup{} + Expect(k8sClient.Get(ctx, types.NamespacedName{ + Name: mcpGroupName, + Namespace: namespaceA, + }, updatedGroup)).Should(Succeed()) + + Expect(updatedGroup.Status.ServerCount).To(Equal(1)) + Expect(updatedGroup.Status.Servers).To(ContainElement("server-a")) + Expect(updatedGroup.Status.Servers).NotTo(ContainElement("server-b")) + }) + }) +}) diff --git a/cmd/thv-operator/test-integration/mcp-group/suite_test.go b/cmd/thv-operator/test-integration/mcp-group/suite_test.go new file mode 100644 index 000000000..978b44308 --- /dev/null +++ b/cmd/thv-operator/test-integration/mcp-group/suite_test.go @@ -0,0 +1,120 @@ +package operator_test + +import ( + "context" + "path/filepath" + "testing" + "time" + + . "github.com/onsi/ginkgo/v2" + . "github.com/onsi/gomega" + "go.uber.org/zap/zapcore" + appsv1 "k8s.io/api/apps/v1" + corev1 "k8s.io/api/core/v1" + rbacv1 "k8s.io/api/rbac/v1" + "k8s.io/client-go/kubernetes/scheme" + "k8s.io/client-go/rest" + ctrl "sigs.k8s.io/controller-runtime" + "sigs.k8s.io/controller-runtime/pkg/client" + "sigs.k8s.io/controller-runtime/pkg/envtest" + logf "sigs.k8s.io/controller-runtime/pkg/log" + "sigs.k8s.io/controller-runtime/pkg/log/zap" + metricsserver "sigs.k8s.io/controller-runtime/pkg/metrics/server" + + mcpv1alpha1 "github.com/stacklok/toolhive/cmd/thv-operator/api/v1alpha1" + "github.com/stacklok/toolhive/cmd/thv-operator/controllers" +) + +// These tests use Ginkgo (BDD-style Go testing framework). Refer to +// http://onsi.github.io/ginkgo/ to learn more about Ginkgo. + +var ( + cfg *rest.Config + k8sClient client.Client + testEnv *envtest.Environment + ctx context.Context + cancel context.CancelFunc +) + +func TestControllers(t *testing.T) { + t.Parallel() + RegisterFailHandler(Fail) + RunSpecs(t, "MCPGroup Controller Integration Suite") +} + +var _ = BeforeSuite(func() { + logf.SetLogger(zap.New(zap.WriteTo(GinkgoWriter), zap.UseDevMode(true), zap.Level(zapcore.DebugLevel))) + + ctx, cancel = context.WithCancel(context.TODO()) + + By("bootstrapping test environment") + testEnv = &envtest.Environment{ + CRDDirectoryPaths: []string{filepath.Join("..", "..", "..", "..", "deploy", "charts", "operator-crds", "crds")}, + ErrorIfCRDPathMissing: true, + } + + var err error + // cfg is defined in this file globally. + cfg, err = testEnv.Start() + Expect(err).NotTo(HaveOccurred()) + Expect(cfg).NotTo(BeNil()) + + err = mcpv1alpha1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + // Add other schemes that the controllers use + err = appsv1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + err = corev1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + err = rbacv1.AddToScheme(scheme.Scheme) + Expect(err).NotTo(HaveOccurred()) + + //+kubebuilder:scaffold:scheme + + k8sClient, err = client.New(cfg, client.Options{Scheme: scheme.Scheme}) + Expect(err).NotTo(HaveOccurred()) + Expect(k8sClient).NotTo(BeNil()) + + // Start the controller manager + k8sManager, err := ctrl.NewManager(cfg, ctrl.Options{ + Scheme: scheme.Scheme, + Metrics: metricsserver.Options{ + BindAddress: "0", // Disable metrics server for tests to avoid port conflicts + }, + HealthProbeBindAddress: "0", // Disable health probe for tests + }) + Expect(err).ToNot(HaveOccurred()) + + // Register the MCPGroup controller + err = (&controllers.MCPGroupReconciler{ + Client: k8sManager.GetClient(), + }).SetupWithManager(k8sManager) + Expect(err).ToNot(HaveOccurred()) + + // Register the MCPServer controller (needed for watch tests) + err = (&controllers.MCPServerReconciler{ + Client: k8sManager.GetClient(), + Scheme: k8sManager.GetScheme(), + }).SetupWithManager(k8sManager) + Expect(err).ToNot(HaveOccurred()) + + // Start the manager in a goroutine + go func() { + defer GinkgoRecover() + err = k8sManager.Start(ctx) + Expect(err).ToNot(HaveOccurred(), "failed to run manager") + }() + +}) + +var _ = AfterSuite(func() { + By("tearing down the test environment") + cancel() + // Give it some time to shut down gracefully + time.Sleep(100 * time.Millisecond) + err := testEnv.Stop() + Expect(err).NotTo(HaveOccurred()) +}) diff --git a/deploy/charts/operator-crds/crds/toolhive.stacklok.dev_mcpgroups.yaml b/deploy/charts/operator-crds/crds/toolhive.stacklok.dev_mcpgroups.yaml new file mode 100644 index 000000000..cc84d4eeb --- /dev/null +++ b/deploy/charts/operator-crds/crds/toolhive.stacklok.dev_mcpgroups.yaml @@ -0,0 +1,127 @@ +--- +apiVersion: apiextensions.k8s.io/v1 +kind: CustomResourceDefinition +metadata: + annotations: + controller-gen.kubebuilder.io/version: v0.17.3 + name: mcpgroups.toolhive.stacklok.dev +spec: + group: toolhive.stacklok.dev + names: + kind: MCPGroup + listKind: MCPGroupList + plural: mcpgroups + singular: mcpgroup + scope: Namespaced + versions: + - name: v1alpha1 + schema: + openAPIV3Schema: + description: MCPGroup is the Schema for the mcpgroups API + properties: + apiVersion: + description: |- + APIVersion defines the versioned schema of this representation of an object. + Servers should convert recognized schemas to the latest internal value, and + may reject unrecognized values. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#resources + type: string + kind: + description: |- + Kind is a string value representing the REST resource this object represents. + Servers may infer this from the endpoint the client submits requests to. + Cannot be updated. + In CamelCase. + More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds + type: string + metadata: + type: object + spec: + description: MCPGroupSpec defines the desired state of MCPGroup + properties: + description: + description: Description provides human-readable context + type: string + type: object + status: + description: MCPGroupStatus defines observed state + properties: + conditions: + description: Conditions represent observations + items: + description: Condition contains details for one aspect of the current + state of this API Resource. + properties: + lastTransitionTime: + description: |- + lastTransitionTime is the last time the condition transitioned from one status to another. + This should be when the underlying condition changed. If that is not known, then using the time when the API field changed is acceptable. + format: date-time + type: string + message: + description: |- + message is a human readable message indicating details about the transition. + This may be an empty string. + maxLength: 32768 + type: string + observedGeneration: + description: |- + observedGeneration represents the .metadata.generation that the condition was set based upon. + For instance, if .metadata.generation is currently 12, but the .status.conditions[x].observedGeneration is 9, the condition is out of date + with respect to the current state of the instance. + format: int64 + minimum: 0 + type: integer + reason: + description: |- + reason contains a programmatic identifier indicating the reason for the condition's last transition. + Producers of specific condition types may define expected values and meanings for this field, + and whether the values are considered a guaranteed API. + The value should be a CamelCase string. + This field may not be empty. + maxLength: 1024 + minLength: 1 + pattern: ^[A-Za-z]([A-Za-z0-9_,:]*[A-Za-z0-9_])?$ + type: string + status: + description: status of the condition, one of True, False, Unknown. + enum: + - "True" + - "False" + - Unknown + type: string + type: + description: type of condition in CamelCase or in foo.example.com/CamelCase. + maxLength: 316 + pattern: ^([a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*/)?(([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9])$ + type: string + required: + - lastTransitionTime + - message + - reason + - status + - type + type: object + type: array + phase: + default: Pending + description: Phase indicates current state + enum: + - Ready + - Pending + - Failed + type: string + serverCount: + description: ServerCount is the number of servers + type: integer + servers: + description: Servers lists server names in this group + items: + type: string + type: array + type: object + type: object + served: true + storage: true + subresources: + status: {} diff --git a/deploy/charts/operator-crds/crds/toolhive.stacklok.dev_mcpservers.yaml b/deploy/charts/operator-crds/crds/toolhive.stacklok.dev_mcpservers.yaml index b4d7824d8..31dc2e5ac 100644 --- a/deploy/charts/operator-crds/crds/toolhive.stacklok.dev_mcpservers.yaml +++ b/deploy/charts/operator-crds/crds/toolhive.stacklok.dev_mcpservers.yaml @@ -131,6 +131,11 @@ spec: - value type: object type: array + groupRef: + description: |- + GroupRef is the name of the MCPGroup this server belongs to + Must reference an existing MCPGroup in the same namespace + type: string image: description: Image is the container image for the MCP server type: string diff --git a/deploy/charts/operator/templates/clusterrole/role.yaml b/deploy/charts/operator/templates/clusterrole/role.yaml index 75c2e08e5..bb25b9cea 100644 --- a/deploy/charts/operator/templates/clusterrole/role.yaml +++ b/deploy/charts/operator/templates/clusterrole/role.yaml @@ -100,6 +100,7 @@ rules: - apiGroups: - toolhive.stacklok.dev resources: + - mcpgroups - mcpregistries - mcpservers - mcptoolconfigs @@ -114,6 +115,7 @@ rules: - apiGroups: - toolhive.stacklok.dev resources: + - mcpgroups/finalizers - mcpregistries/finalizers - mcpservers/finalizers - mcptoolconfigs/finalizers @@ -122,6 +124,7 @@ rules: - apiGroups: - toolhive.stacklok.dev resources: + - mcpgroups/status - mcpregistries/status - mcpservers/status - mcptoolconfigs/status diff --git a/test/e2e/chainsaw/operator/multi-tenancy/setup/assert-rbac-clusterrole.yaml b/test/e2e/chainsaw/operator/multi-tenancy/setup/assert-rbac-clusterrole.yaml index 75c2e08e5..bb25b9cea 100644 --- a/test/e2e/chainsaw/operator/multi-tenancy/setup/assert-rbac-clusterrole.yaml +++ b/test/e2e/chainsaw/operator/multi-tenancy/setup/assert-rbac-clusterrole.yaml @@ -100,6 +100,7 @@ rules: - apiGroups: - toolhive.stacklok.dev resources: + - mcpgroups - mcpregistries - mcpservers - mcptoolconfigs @@ -114,6 +115,7 @@ rules: - apiGroups: - toolhive.stacklok.dev resources: + - mcpgroups/finalizers - mcpregistries/finalizers - mcpservers/finalizers - mcptoolconfigs/finalizers @@ -122,6 +124,7 @@ rules: - apiGroups: - toolhive.stacklok.dev resources: + - mcpgroups/status - mcpregistries/status - mcpservers/status - mcptoolconfigs/status diff --git a/test/e2e/chainsaw/operator/single-tenancy/setup/assert-rbac-clusterrole.yaml b/test/e2e/chainsaw/operator/single-tenancy/setup/assert-rbac-clusterrole.yaml index 75c2e08e5..bb25b9cea 100644 --- a/test/e2e/chainsaw/operator/single-tenancy/setup/assert-rbac-clusterrole.yaml +++ b/test/e2e/chainsaw/operator/single-tenancy/setup/assert-rbac-clusterrole.yaml @@ -100,6 +100,7 @@ rules: - apiGroups: - toolhive.stacklok.dev resources: + - mcpgroups - mcpregistries - mcpservers - mcptoolconfigs @@ -114,6 +115,7 @@ rules: - apiGroups: - toolhive.stacklok.dev resources: + - mcpgroups/finalizers - mcpregistries/finalizers - mcpservers/finalizers - mcptoolconfigs/finalizers @@ -122,6 +124,7 @@ rules: - apiGroups: - toolhive.stacklok.dev resources: + - mcpgroups/status - mcpregistries/status - mcpservers/status - mcptoolconfigs/status diff --git a/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/invalid-groupref/assert-mcpserver-groupref-not-validated.yaml b/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/invalid-groupref/assert-mcpserver-groupref-not-validated.yaml new file mode 100644 index 000000000..c86cd4532 --- /dev/null +++ b/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/invalid-groupref/assert-mcpserver-groupref-not-validated.yaml @@ -0,0 +1,8 @@ +apiVersion: toolhive.stacklok.dev/v1alpha1 +kind: MCPServer +metadata: + name: test-server + namespace: toolhive-system +status: + (conditions[?type == 'GroupRefValidated'] | [0].status): "False" + (conditions[?type == 'GroupRefValidated'] | [0].reason): "GroupRefNotFound" diff --git a/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/invalid-groupref/chainsaw-test.yaml b/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/invalid-groupref/chainsaw-test.yaml new file mode 100644 index 000000000..b18bdb645 --- /dev/null +++ b/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/invalid-groupref/chainsaw-test.yaml @@ -0,0 +1,28 @@ +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: invalid-groupref-test +spec: + description: Test MCPServer with GroupRef pointing to non-existent group + timeouts: + apply: 30s + assert: 30s + steps: + - name: verify-operator + description: Ensure operator is ready before testing + try: + - assert: + file: ../../../setup/assert-operator-ready.yaml + - name: Create MCPServer with non-existent GroupRef + try: + - apply: + file: mcpserver.yaml + - assert: + file: assert-mcpserver-groupref-not-validated.yaml + - name: Cleanup + try: + - delete: + ref: + apiVersion: toolhive.stacklok.dev/v1alpha1 + kind: MCPServer + name: test-server diff --git a/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/invalid-groupref/mcpserver.yaml b/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/invalid-groupref/mcpserver.yaml new file mode 100644 index 000000000..a012ccaf5 --- /dev/null +++ b/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/invalid-groupref/mcpserver.yaml @@ -0,0 +1,9 @@ +apiVersion: toolhive.stacklok.dev/v1alpha1 +kind: MCPServer +metadata: + name: test-server + namespace: toolhive-system +spec: + image: ghcr.io/modelcontextprotocol/servers/filesystem:0.6.2 + transport: stdio + groupRef: non-existent-group diff --git a/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/lifecycle/assert-mcpgroup-empty.yaml b/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/lifecycle/assert-mcpgroup-empty.yaml new file mode 100644 index 000000000..2496539c8 --- /dev/null +++ b/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/lifecycle/assert-mcpgroup-empty.yaml @@ -0,0 +1,9 @@ +apiVersion: toolhive.stacklok.dev/v1alpha1 +kind: MCPGroup +metadata: + name: mcpgroup-test-group-1 + namespace: toolhive-system +status: + phase: Ready + serverCount: 0 + servers: [] diff --git a/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/lifecycle/assert-mcpgroup-with-server.yaml b/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/lifecycle/assert-mcpgroup-with-server.yaml new file mode 100644 index 000000000..53c224afc --- /dev/null +++ b/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/lifecycle/assert-mcpgroup-with-server.yaml @@ -0,0 +1,9 @@ +apiVersion: toolhive.stacklok.dev/v1alpha1 +kind: MCPGroup +metadata: + name: mcpgroup-test-group-1 + namespace: toolhive-system +status: + phase: Ready + serverCount: 1 + servers: ["test-server-with-groupref-1"] diff --git a/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/lifecycle/chainsaw-test.yaml b/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/lifecycle/chainsaw-test.yaml new file mode 100644 index 000000000..cd82ff43f --- /dev/null +++ b/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/lifecycle/chainsaw-test.yaml @@ -0,0 +1,35 @@ +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: mcpgroup-lifecycle-test +spec: + description: Test basic MCPGroup creation, update, and status + timeouts: + apply: 30s + assert: 30s + cleanup: 30s + exec: 300s + steps: + - name: verify-operator + description: Ensure operator is ready before testing + try: + - assert: + file: ../../../setup/assert-operator-ready.yaml + - name: create-mcpgroup + description: Create an MCPGroup and verify it's created + try: + - apply: + file: mcpgroup.yaml + - assert: + file: mcpgroup.yaml + - assert: + file: assert-mcpgroup-empty.yaml + - name: add-mcpserver + description: Create an MCPServer that references the MCPGroup and verify status update + try: + - apply: + file: mcpserver-1.yaml + - assert: + file: mcpserver-1.yaml + - assert: + file: assert-mcpgroup-with-server.yaml diff --git a/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/lifecycle/mcpgroup.yaml b/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/lifecycle/mcpgroup.yaml new file mode 100644 index 000000000..0b1ed39bb --- /dev/null +++ b/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/lifecycle/mcpgroup.yaml @@ -0,0 +1,7 @@ +apiVersion: toolhive.stacklok.dev/v1alpha1 +kind: MCPGroup +metadata: + name: mcpgroup-test-group-1 + namespace: toolhive-system +spec: + description: "Test group for basic lifecycle testing" diff --git a/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/lifecycle/mcpserver-1.yaml b/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/lifecycle/mcpserver-1.yaml new file mode 100644 index 000000000..4f611d2b9 --- /dev/null +++ b/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/lifecycle/mcpserver-1.yaml @@ -0,0 +1,20 @@ +apiVersion: toolhive.stacklok.dev/v1alpha1 +kind: MCPServer +metadata: + name: test-server-with-groupref-1 + namespace: toolhive-system +spec: + image: ghcr.io/stackloklabs/yardstick/yardstick-server:0.0.2 + transport: stdio + groupRef: mcpgroup-test-group-1 + env: + - name: TRANSPORT + value: stdio + port: 8080 + resources: + limits: + cpu: "100m" + memory: "128Mi" + requests: + cpu: "50m" + memory: "64Mi" diff --git a/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/valid-groupref/assert-mcpgroup-ready.yaml b/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/valid-groupref/assert-mcpgroup-ready.yaml new file mode 100644 index 000000000..dba76268d --- /dev/null +++ b/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/valid-groupref/assert-mcpgroup-ready.yaml @@ -0,0 +1,7 @@ +apiVersion: toolhive.stacklok.dev/v1alpha1 +kind: MCPGroup +metadata: + name: test-group + namespace: toolhive-system +status: + phase: Ready diff --git a/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/valid-groupref/assert-mcpserver-groupref-validated.yaml b/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/valid-groupref/assert-mcpserver-groupref-validated.yaml new file mode 100644 index 000000000..6a0d2ffe1 --- /dev/null +++ b/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/valid-groupref/assert-mcpserver-groupref-validated.yaml @@ -0,0 +1,7 @@ +apiVersion: toolhive.stacklok.dev/v1alpha1 +kind: MCPServer +metadata: + name: test-server + namespace: toolhive-system +status: + (conditions[?type == 'GroupRefValidated'] | [0].status): "True" diff --git a/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/valid-groupref/chainsaw-test.yaml b/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/valid-groupref/chainsaw-test.yaml new file mode 100644 index 000000000..a19aa1631 --- /dev/null +++ b/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/valid-groupref/chainsaw-test.yaml @@ -0,0 +1,39 @@ +apiVersion: chainsaw.kyverno.io/v1alpha1 +kind: Test +metadata: + name: valid-groupref-test +spec: + description: Test MCPServer with valid GroupRef + timeouts: + apply: 30s + assert: 30s + steps: + - name: verify-operator + description: Ensure operator is ready before testing + try: + - assert: + file: ../../../setup/assert-operator-ready.yaml + - name: Create MCPGroup + try: + - apply: + file: mcpgroup.yaml + - assert: + file: assert-mcpgroup-ready.yaml + - name: Create MCPServer with GroupRef + try: + - apply: + file: mcpserver.yaml + - assert: + file: assert-mcpserver-groupref-validated.yaml + - name: Cleanup + try: + - delete: + ref: + apiVersion: toolhive.stacklok.dev/v1alpha1 + kind: MCPServer + name: test-server + - delete: + ref: + apiVersion: toolhive.stacklok.dev/v1alpha1 + kind: MCPGroup + name: test-group diff --git a/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/valid-groupref/mcpgroup.yaml b/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/valid-groupref/mcpgroup.yaml new file mode 100644 index 000000000..10a282cff --- /dev/null +++ b/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/valid-groupref/mcpgroup.yaml @@ -0,0 +1,7 @@ +apiVersion: toolhive.stacklok.dev/v1alpha1 +kind: MCPGroup +metadata: + name: test-group + namespace: toolhive-system +spec: + description: "Test group for GroupRef validation" diff --git a/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/valid-groupref/mcpserver.yaml b/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/valid-groupref/mcpserver.yaml new file mode 100644 index 000000000..9cf38e36f --- /dev/null +++ b/test/e2e/chainsaw/operator/single-tenancy/test-scenarios/mcpgroup/valid-groupref/mcpserver.yaml @@ -0,0 +1,9 @@ +apiVersion: toolhive.stacklok.dev/v1alpha1 +kind: MCPServer +metadata: + name: test-server + namespace: toolhive-system +spec: + image: ghcr.io/modelcontextprotocol/servers/filesystem:0.6.2 + transport: stdio + groupRef: test-group