From e44dc48e80f851aaa7c3eda3ab253a26321c3291 Mon Sep 17 00:00:00 2001
From: yao <63141491+yaonyan@users.noreply.github.com>
Date: Tue, 7 Oct 2025 01:02:09 +0800
Subject: [PATCH 1/4] refactor: enhance MCPSamplingLanguageModel to support
tool calls and response formatting
---
deno.lock | 44 +--
.../examples/generate_object_example.ts | 75 ++++
.../examples/generate_text_example.ts | 85 ++++-
.../examples/stream_text_example.ts | 179 +++++++++
.../src/language-model.ts | 357 ++++++++++++++----
packages/cli/deno.json | 2 +-
packages/cli/tests/bugfix_test.ts | 34 --
packages/core/deno.json | 2 +-
8 files changed, 612 insertions(+), 166 deletions(-)
create mode 100644 packages/ai-sdk-mcp-sampling-provider/examples/generate_object_example.ts
create mode 100644 packages/ai-sdk-mcp-sampling-provider/examples/stream_text_example.ts
delete mode 100644 packages/cli/tests/bugfix_test.ts
diff --git a/deno.lock b/deno.lock
index 02f6fc0..11dd7e4 100644
--- a/deno.lock
+++ b/deno.lock
@@ -2,8 +2,6 @@
"version": "5",
"specifiers": {
"jsr:@es-toolkit/es-toolkit@^1.37.2": "1.39.10",
- "jsr:@mcpc/utils@0.1": "0.1.0",
- "jsr:@std/assert@*": "1.0.12",
"jsr:@std/assert@1": "1.0.14",
"jsr:@std/assert@^1.0.14": "1.0.14",
"jsr:@std/cli@^1.0.21": "1.0.21",
@@ -21,7 +19,6 @@
"npm:@ai-sdk/provider@2": "2.0.0",
"npm:@hono/zod-openapi@~0.19.2": "0.19.10_hono@4.9.10_zod@3.25.76",
"npm:@mcpc-tech/ripgrep-napi@^0.0.4": "0.0.4",
- "npm:@modelcontextprotocol/sdk@1.0.4": "1.0.4",
"npm:@modelcontextprotocol/sdk@^1.8.0": "1.19.1_express@5.1.0_zod@3.25.76",
"npm:@opentelemetry/api@^1.9.0": "1.9.0",
"npm:@opentelemetry/exporter-trace-otlp-http@0.56": "0.56.0_@opentelemetry+api@1.9.0",
@@ -47,27 +44,6 @@
"@es-toolkit/es-toolkit@1.39.10": {
"integrity": "8757072a13aa64b3b349ba2b9d7d22fbe7ea6f138506c6cd2222d767cd79918f"
},
- "@mcpc/utils@0.1.0": {
- "integrity": "ab83d2fe79813cb603967dfed39a567322d6817ac326aebb85b8ae14f69bf637",
- "dependencies": [
- "jsr:@std/http",
- "npm:@modelcontextprotocol/sdk",
- "npm:cheerio",
- "npm:jsonrepair"
- ]
- },
- "@std/assert@1.0.10": {
- "integrity": "59b5cbac5bd55459a19045d95cc7c2ff787b4f8527c0dd195078ff6f9481fbb3",
- "dependencies": [
- "jsr:@std/internal@^1.0.5"
- ]
- },
- "@std/assert@1.0.12": {
- "integrity": "08009f0926dda9cbd8bef3a35d3b6a4b964b0ab5c3e140a4e0351fbf34af5b9a",
- "dependencies": [
- "jsr:@std/internal@^1.0.6"
- ]
- },
"@std/assert@1.0.14": {
"integrity": "68d0d4a43b365abc927f45a9b85c639ea18a9fab96ad92281e493e4ed84abaa4",
"dependencies": [
@@ -308,14 +284,6 @@
"@mcpc-tech/ripgrep-napi-win32-x64-msvc"
]
},
- "@modelcontextprotocol/sdk@1.0.4": {
- "integrity": "sha512-C+jw1lF6HSGzs7EZpzHbXfzz9rj9him4BaoumlTciW/IDDgIpweF/qiCWKlP02QKg5PPcgY6xY2WCt5y2tpYow==",
- "dependencies": [
- "content-type",
- "raw-body",
- "zod"
- ]
- },
"@modelcontextprotocol/sdk@1.19.1_express@5.1.0_zod@3.25.76": {
"integrity": "sha512-3Y2h3MZKjec1eAqSTBclATlX+AbC6n1LgfVzRMJLt3v6w0RCYgwLrjbxPDbhsYHt6Wdqc/aCceNJYgj448ELQQ==",
"dependencies": [
@@ -333,8 +301,8 @@
"zod-to-json-schema"
]
},
- "@napi-rs/wasm-runtime@1.0.3": {
- "integrity": "sha512-rZxtMsLwjdXkMUGC3WwsPwLNVqVqnTJT6MNIB6e+5fhMcSCPP0AOsNWuMQ5mdCq6HNjs/ZeWAEchpqeprqBD2Q==",
+ "@napi-rs/wasm-runtime@1.0.6": {
+ "integrity": "sha512-DXj75ewm11LIWUk198QSKUTxjyRjsBwk09MuMk5DGK+GDUtyPhhEHOGP/Xwwj3DjQXXkivoBirmOnKrLfc0+9g==",
"dependencies": [
"@emnapi/core",
"@emnapi/runtime",
@@ -528,8 +496,8 @@
"@standard-schema/spec@1.0.0": {
"integrity": "sha512-m2bOd0f2RT9k8QJx1JN85cZYyH1RqFBdlwtkSlf4tBDYLCiiZnv1fIIwacK6cqwXavOydf0NPToMQgpKq+dVlA=="
},
- "@tybys/wasm-util@0.10.0": {
- "integrity": "sha512-VyyPYFlOMNylG45GoAe0xDoLwWuowvf92F9kySqzYh8vmYm7D2u4iUJKa1tOUpS70Ku13ASrOkS4ScXFsTaCNQ==",
+ "@tybys/wasm-util@0.10.1": {
+ "integrity": "sha512-9tTaPJLSiejZKx+Bmog4uSubteqTvFrVrURwkmHixBo0G4seD0zUxp98E1DzUBJxLQ3NPwXrGKDiVjwx/DpPsg==",
"dependencies": [
"tslib"
]
@@ -1346,7 +1314,7 @@
"packages/cli": {
"dependencies": [
"jsr:@mcpc/core@~0.2.0-beta.1",
- "jsr:@mcpc/utils@0.1",
+ "jsr:@mcpc/utils@0.2",
"jsr:@std/assert@^1.0.14",
"jsr:@std/http@^1.0.14",
"npm:@hono/zod-openapi@~0.19.2",
@@ -1359,7 +1327,7 @@
},
"packages/core": {
"dependencies": [
- "jsr:@mcpc/utils@0.1",
+ "jsr:@mcpc/utils@0.2",
"jsr:@std/assert@1",
"npm:@mcpc-tech/ripgrep-napi@^0.0.4",
"npm:@modelcontextprotocol/sdk@^1.8.0",
diff --git a/packages/ai-sdk-mcp-sampling-provider/examples/generate_object_example.ts b/packages/ai-sdk-mcp-sampling-provider/examples/generate_object_example.ts
new file mode 100644
index 0000000..d73fa9e
--- /dev/null
+++ b/packages/ai-sdk-mcp-sampling-provider/examples/generate_object_example.ts
@@ -0,0 +1,75 @@
+/**
+ * Example: Using AI SDK generateObject with MCP Sampling Provider
+ *
+ * This example demonstrates how to use the MCP Sampling Provider
+ * with AI SDK's generateObject function for structured data generation.
+ *
+ * Run with:
+ * deno run --allow-all examples/generate_object_example.ts
+ */
+
+import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
+import { createMCPSamplingProvider } from "../mod.ts";
+import { generateObject } from "ai";
+import { z } from "zod";
+import { mcpc } from "../../core/mod.ts";
+import type { ComposableMCPServer } from "../../core/mod.ts";
+
+// Create a simple MCPC server with sampling capability
+const server = await mcpc(
+ [
+ { name: "ai-sdk-example", version: "1.0.0" },
+ { capabilities: { sampling: {}, tools: {} } },
+ ],
+ [],
+ (server: ComposableMCPServer) => {
+ // Register a tool that generates structured data
+ server.tool(
+ "generate-recipe",
+ "Generate a structured recipe object using AI SDK",
+ { type: "object", properties: {} },
+ async () => {
+ console.log("š³ Generating structured object with AI SDK...\n");
+
+ // Create MCP sampling provider
+ const provider = createMCPSamplingProvider({ server });
+
+ // Define the schema for the recipe object
+ const recipeSchema = z.object({
+ recipe: z.object({
+ name: z.string(),
+ cuisine: z.string(),
+ difficulty: z.enum(["easy", "medium", "hard"]),
+ prepTime: z.string(),
+ cookTime: z.string(),
+ servings: z.number(),
+ ingredients: z.array(z.string()),
+ steps: z.array(z.string()),
+ tips: z.array(z.string()).optional(),
+ }),
+ });
+
+ // Use generateObject with the provider
+ const result = await generateObject({
+ mode: "json",
+ model: provider.languageModel("copilot/gpt-5-mini"),
+ schema: recipeSchema,
+ prompt: "Generate a delicious lasagna recipe.",
+ });
+
+ // Display the results
+ console.log("ā
Generated object:");
+ console.log(JSON.stringify(result.object, null, 2));
+ console.log("\nā
Finish reason:", result.finishReason);
+ console.log("ā
Token usage:", result.usage);
+
+ return {
+ content: [{ type: "text", text: JSON.stringify(result, null, 2) }],
+ };
+ },
+ );
+ },
+);
+
+const transport = new StdioServerTransport();
+await server.connect(transport);
diff --git a/packages/ai-sdk-mcp-sampling-provider/examples/generate_text_example.ts b/packages/ai-sdk-mcp-sampling-provider/examples/generate_text_example.ts
index aa0cc32..516cbe5 100644
--- a/packages/ai-sdk-mcp-sampling-provider/examples/generate_text_example.ts
+++ b/packages/ai-sdk-mcp-sampling-provider/examples/generate_text_example.ts
@@ -10,7 +10,8 @@
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
import { createMCPSamplingProvider } from "../mod.ts";
-import { generateText } from "ai";
+import { generateText, stepCountIs } from "ai";
+import { z } from "zod";
import { mcpc } from "../../core/mod.ts";
import type { ComposableMCPServer } from "../../core/mod.ts";
@@ -20,19 +21,7 @@ const server = await mcpc(
{ name: "ai-sdk-example", version: "1.0.0" },
{ capabilities: { sampling: {}, tools: {} } },
],
- [
- {
- name: "ai-sdk-example",
- description: `I demonstrate AI SDK integration with MCP Sampling Provider.
-
-Available tools:
-
-
-I can generate greetings using AI SDK's generateText function.`,
- deps: { mcpServers: {} },
- options: { sampling: true },
- },
- ],
+ [],
(server: ComposableMCPServer) => {
// Register a simple tool that the agent can use
server.tool(
@@ -61,6 +50,74 @@ I can generate greetings using AI SDK's generateText function.`,
};
},
);
+
+ // Register a tool that tests tool calling with AI SDK
+ server.tool(
+ "test-tool-calls",
+ "Test tool calling functionality with AI SDK",
+ { type: "object", properties: {} },
+ async () => {
+ console.log("š§ Testing tool calls with AI SDK...\n");
+
+ // Create MCP sampling provider
+ const provider = createMCPSamplingProvider({ server });
+
+ // Simple prompt to test tool injection
+ // Define tools that will be injected into the system prompt
+ // According to AI SDK docs, tools should have description and parameters/inputSchema
+ const result = await generateText({
+ model: provider.languageModel("copilot/gpt-5-mini"),
+ stopWhen: stepCountIs(5),
+ prompt:
+ "Calculate 25 + 17 using the calculator tool, then explain the result.",
+ tools: {
+ calculator: {
+ description: "Perform mathematical calculations",
+ inputSchema: z.object({
+ operation: z
+ .enum(["add", "subtract", "multiply", "divide"])
+ .describe("The math operation to perform"),
+ a: z.number().describe("First number"),
+ b: z.number().describe("Second number"),
+ }),
+ execute: (params: {
+ operation: string;
+ a: number;
+ b: number;
+ }) => {
+ switch (params.operation) {
+ case "add":
+ return { result: params.a + params.b };
+ case "subtract":
+ return { result: params.a - params.b };
+ case "multiply":
+ return { result: params.a * params.b };
+ case "divide":
+ return { result: params.a / params.b };
+ default:
+ throw new Error("Unsupported operation");
+ }
+ },
+ },
+ },
+ });
+
+ // Display the results
+ console.log("\nā
Generated response:");
+ console.log(result.text);
+ console.log("\nā
Finish reason:", result.finishReason);
+ console.log("ā
Token usage:", result.usage);
+
+ return {
+ content: [
+ {
+ type: "text",
+ text: `Result: ${result.text}`,
+ },
+ ],
+ };
+ },
+ );
},
);
diff --git a/packages/ai-sdk-mcp-sampling-provider/examples/stream_text_example.ts b/packages/ai-sdk-mcp-sampling-provider/examples/stream_text_example.ts
new file mode 100644
index 0000000..3a7e21d
--- /dev/null
+++ b/packages/ai-sdk-mcp-sampling-provider/examples/stream_text_example.ts
@@ -0,0 +1,179 @@
+/**
+ * Example: Using AI SDK streamText with MCP Sampling Provider
+ *
+ * This example demonstrates how to use the MCP Sampling Provider
+ * with AI SDK's streamText function for streaming text generation.
+ *
+ * Run with:
+ * deno run --allow-all examples/stream_text_example.ts
+ */
+
+import process from "node:process";
+import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
+import { createMCPSamplingProvider } from "../mod.ts";
+import { stepCountIs, streamText } from "ai";
+import { z } from "zod";
+import { mcpc } from "../../core/mod.ts";
+import type { ComposableMCPServer } from "../../core/mod.ts";
+
+// Create a simple MCPC server with sampling capability
+const server = await mcpc(
+ [
+ { name: "ai-sdk-example", version: "1.0.0" },
+ { capabilities: { sampling: {}, tools: {} } },
+ ],
+ [],
+ (server: ComposableMCPServer) => {
+ // Register a simple tool that streams text
+ server.tool(
+ "stream-greeting",
+ "Stream a greeting message using AI SDK",
+ { type: "object", properties: {} },
+ async () => {
+ console.log("š Streaming text with AI SDK...\n");
+
+ // Create MCP sampling provider
+ const provider = createMCPSamplingProvider({ server });
+
+ // Use streamText with the provider
+ const result = streamText({
+ model: provider.languageModel("copilot/gpt-5-mini"),
+ prompt: "Write a short poem about coding.",
+ });
+
+ // Stream the text chunks
+ console.log("ā
Streaming response:");
+ for await (const chunk of result.textStream) {
+ process.stdout.write(chunk);
+ }
+ console.log("\n");
+
+ // Display final results after streaming completes
+ console.log("\nā
Finish reason:", await result.finishReason);
+ console.log("ā
Token usage:", await result.usage);
+
+ return {
+ content: [{ type: "text", text: await result.text }],
+ };
+ },
+ );
+
+ // Register a tool that tests tool calling with streaming
+ server.tool(
+ "stream-with-tools",
+ "Stream text generation with tool calls",
+ { type: "object", properties: {} },
+ async () => {
+ console.log("š§ Testing streaming with tool calls...\n");
+
+ // Create MCP sampling provider
+ const provider = createMCPSamplingProvider({ server });
+
+ // Stream with tools
+ const result = streamText({
+ model: provider.languageModel("copilot/gpt-5-mini"),
+ stopWhen: stepCountIs(5),
+ prompt:
+ "Calculate 25 + 17 using the calculator tool, then explain the result.",
+ tools: {
+ calculator: {
+ description: "Perform mathematical calculations",
+ inputSchema: z.object({
+ operation: z
+ .enum(["add", "subtract", "multiply", "divide"])
+ .describe("The math operation to perform"),
+ a: z.number().describe("First number"),
+ b: z.number().describe("Second number"),
+ }),
+ execute: (params: {
+ operation: string;
+ a: number;
+ b: number;
+ }) => {
+ console.log(
+ `\nš¢ Executing calculator: ${params.operation}(${params.a}, ${params.b})`,
+ );
+ switch (params.operation) {
+ case "add":
+ return { result: params.a + params.b };
+ case "subtract":
+ return { result: params.a - params.b };
+ case "multiply":
+ return { result: params.a * params.b };
+ case "divide":
+ return { result: params.a / params.b };
+ default:
+ throw new Error("Unsupported operation");
+ }
+ },
+ },
+ },
+ });
+
+ // Stream the text chunks and collect tool info
+ console.log("\nā
Streaming response:");
+ let chunkCount = 0;
+ const collectedToolCalls: any[] = [];
+ const collectedToolResults: any[] = [];
+
+ for await (const chunk of result.fullStream) {
+ chunkCount++;
+ console.log(`Chunk ${chunkCount}:`, JSON.stringify(chunk));
+
+ // Collect tool calls
+ if (chunk.type === "tool-call") {
+ collectedToolCalls.push(chunk);
+ }
+
+ // Collect tool results
+ if (chunk.type === "tool-result") {
+ collectedToolResults.push(chunk);
+ }
+ }
+
+ console.log(`\nš Total chunks received: ${chunkCount}`);
+
+ // Display collected tool calls
+ if (collectedToolCalls.length > 0) {
+ console.log("\nš Tool Calls:");
+ for (const call of collectedToolCalls) {
+ console.log(` - ${call.toolName}:`, JSON.stringify(call.input));
+ }
+ }
+
+ // Display collected tool results
+ if (collectedToolResults.length > 0) {
+ console.log("\nš Tool Results:");
+ for (const toolResult of collectedToolResults) {
+ console.log(
+ ` - ${toolResult.toolName}:`,
+ JSON.stringify(toolResult.output),
+ );
+ }
+ }
+
+ // Display final results
+ console.log("\nā
Finish reason:", await result.finishReason);
+ console.log("ā
Token usage:", await result.usage);
+
+ const finalText = await result.text;
+
+ return {
+ content: [
+ {
+ type: "text",
+ text: `Results:\n` +
+ `- Text: ${finalText || "(no text generated)"}\n` +
+ `- Tool calls: ${collectedToolCalls.length}\n` +
+ `- Tool results: ${collectedToolResults.length}\n` +
+ `- Finish reason: ${await result.finishReason}`,
+ },
+ ],
+ };
+ },
+ );
+ },
+);
+
+const transport = new StdioServerTransport();
+await server.connect(transport);
diff --git a/packages/ai-sdk-mcp-sampling-provider/src/language-model.ts b/packages/ai-sdk-mcp-sampling-provider/src/language-model.ts
index 224e3e9..848ae19 100644
--- a/packages/ai-sdk-mcp-sampling-provider/src/language-model.ts
+++ b/packages/ai-sdk-mcp-sampling-provider/src/language-model.ts
@@ -11,7 +11,6 @@ import type {
LanguageModelV2Prompt,
LanguageModelV2StreamPart,
LanguageModelV2Text,
- LanguageModelV2Usage,
} from "@ai-sdk/provider";
import type { Server } from "@modelcontextprotocol/sdk/server/index.js";
import type { SamplingMessage } from "@modelcontextprotocol/sdk/types.js";
@@ -29,8 +28,8 @@ export interface MCPSamplingLanguageModelConfig {
/**
* MCP Language Model implementation of AI SDK's LanguageModelV2 interface
*
- * This allows MCPC server's createMessage capability to be used through AI SDK's standard interface.
- * The model uses MCPC server's createMessage method under the hood.
+ * This allows MCP server's createMessage capability to be used through AI SDK's standard interface.
+ * The model uses MCP server's createMessage method under the hood.
*/
export class MCPSamplingLanguageModel implements LanguageModelV2 {
readonly specificationVersion = "v2" as const;
@@ -56,19 +55,21 @@ export class MCPSamplingLanguageModel implements LanguageModelV2 {
async doGenerate(options: LanguageModelV2CallOptions): Promise<{
content: LanguageModelV2Content[];
finishReason: LanguageModelV2FinishReason;
- usage: LanguageModelV2Usage;
+ usage: {
+ inputTokens: number | undefined;
+ outputTokens: number | undefined;
+ totalTokens: number;
+ };
request?: {
body?: string;
};
response?: {
- id?: string;
- timestamp?: Date;
modelId?: string;
headers?: Record;
};
warnings: LanguageModelV2CallWarning[];
}> {
- // Convert AI SDK messages to MCPC format
+ // Convert AI SDK messages to MCP Sampling format
const messages = this.convertMessages(options.prompt);
// Extract system prompt from AI SDK messages
@@ -82,22 +83,45 @@ export class MCPSamplingLanguageModel implements LanguageModelV2 {
}
}
- // Call MCPC server's createMessage method directly (like base-sampling-executor)
+ // Inject response format instructions into system prompt
+ // TODO: Remove this workaround when MCP natively supports responseFormat
+ systemPrompt = this.injectResponseFormatInstructions(
+ systemPrompt,
+ options.responseFormat,
+ );
+
+ // Inject tool definitions into system prompt
+ // TODO: Remove this workaround when MCP natively supports tools parameter
+ systemPrompt = this.injectToolInstructions(systemPrompt, options.tools);
+
+ // Call MCP server's createMessage method directly (like base-sampling-executor)
const result = await this.server.createMessage({
systemPrompt,
messages,
maxTokens: options.maxOutputTokens ?? 55_000,
});
- // Extract text from result and build content array
+ // Extract text and tool calls from result
const content: LanguageModelV2Content[] = [];
if (result.content.type === "text" && result.content.text) {
- const textContent: LanguageModelV2Text = {
- type: "text",
- text: result.content.text,
- };
- content.push(textContent);
+ // Parse the response text to extract tool calls
+ const { text, toolCalls } = this.extractToolCalls(
+ result.content.text,
+ options.tools,
+ );
+
+ // Add text content if present
+ if (text.trim()) {
+ const textContent: LanguageModelV2Text = {
+ type: "text",
+ text: text,
+ };
+ content.push(textContent);
+ }
+
+ // Add tool call content
+ content.push(...toolCalls);
}
const finishReason = this.mapStopReason(result.stopReason);
@@ -123,51 +147,49 @@ export class MCPSamplingLanguageModel implements LanguageModelV2 {
/**
* Stream a response using MCP's createMessage capability
- * Note: MCP createMessage doesn't natively support streaming, so this
- * implementation returns the full result as chunks.
+ *
+ * Since MCP doesn't support native streaming, we generate the full response
+ * and emit it as stream events following AI SDK's protocol.
*/
async doStream(options: LanguageModelV2CallOptions): Promise<{
stream: ReadableStream;
- request?: {
- body?: string;
- };
+ request?: { body?: string };
warnings: LanguageModelV2CallWarning[];
}> {
- // MCP createMessage doesn't support native streaming, so we generate
- // the full response and stream it as chunks
const result = await this.doGenerate(options);
const stream = new ReadableStream({
start(controller) {
- // Send stream start
- controller.enqueue({
- type: "stream-start",
- warnings: result.warnings,
- });
+ // 1. Send response metadata
+ if (result.response?.modelId) {
+ controller.enqueue({
+ type: "response-metadata",
+ modelId: result.response.modelId,
+ ...(result.response.headers &&
+ { headers: result.response.headers }),
+ });
+ }
- // Send content
+ // 2. Send content parts
+ let textIndex = 0;
for (const part of result.content) {
if (part.type === "text") {
+ const id = `text-${++textIndex}`;
+ // AI SDK requires: text-start ā text-delta ā text-end
+ controller.enqueue({ type: "text-start", id });
+ controller.enqueue({ type: "text-delta", id, delta: part.text });
+ controller.enqueue({ type: "text-end", id });
+ } else if (part.type === "tool-call") {
controller.enqueue({
- type: "text-delta",
- id: "text-1",
- delta: part.text,
+ type: "tool-call",
+ toolCallId: part.toolCallId,
+ toolName: part.toolName,
+ input: JSON.stringify(part.input),
});
}
}
- // Send response metadata
- if (result.response?.modelId) {
- controller.enqueue({
- type: "response-metadata",
- modelId: result.response.modelId,
- ...(result.response.headers
- ? { headers: result.response.headers }
- : {}),
- });
- }
-
- // Send finish message
+ // 3. Send finish event
controller.enqueue({
type: "finish",
finishReason: result.finishReason,
@@ -192,37 +214,46 @@ export class MCPSamplingLanguageModel implements LanguageModelV2 {
const messages: SamplingMessage[] = [];
for (const msg of prompt) {
- // Skip system messages - they're handled separately
- if (msg.role === "system") {
- continue;
- }
+ if (msg.role === "system") continue; // System handled separately
- // Convert role
- const role = msg.role === "user"
- ? ("user" as const)
- : msg.role === "assistant"
- ? ("assistant" as const)
- : ("user" as const); // fallback
+ const role = msg.role === "assistant" ? "assistant" : "user";
- // Convert content - extract text from text parts only
+ // Extract different content types
const textParts = msg.content.filter((c) => c.type === "text");
- const textContent = textParts
- .map((c) => {
- if (c.type === "text") {
- return c.text;
- }
- return "";
- })
- .join("\n");
-
- if (textContent) {
- messages.push({
- role: role,
- content: {
- type: "text",
- text: textContent,
- },
+ const toolCalls = msg.content.filter((c) => c.type === "tool-call");
+ const toolResults = msg.content.filter((c) => c.type === "tool-result");
+
+ // Format each type as plain text
+ const parts: string[] = [];
+
+ if (textParts.length > 0) {
+ parts.push(textParts.map((c) => (c as any).text).join("\n"));
+ }
+
+ if (toolCalls.length > 0) {
+ const calls = toolCalls.map((c) => {
+ const call = c as any;
+ return `\n${
+ JSON.stringify(call.input || {})
+ }\n`;
});
+ parts.push(calls.join("\n"));
+ }
+
+ if (toolResults.length > 0) {
+ const results = toolResults.map((c) => {
+ const result = c as any;
+ const output = JSON.stringify(
+ result.output || result.result || "undefined",
+ );
+ return `Tool "${result.toolName}" result:\n${output}`;
+ });
+ parts.push(results.join("\n\n"));
+ }
+
+ const text = parts.join("\n\n");
+ if (text) {
+ messages.push({ role, content: { type: "text", text } });
}
}
@@ -233,15 +264,185 @@ export class MCPSamplingLanguageModel implements LanguageModelV2 {
* Map MCP stop reason to AI SDK finish reason
*/
private mapStopReason(stopReason?: string): LanguageModelV2FinishReason {
- switch (stopReason) {
- case "endTurn":
- return "stop";
- case "maxTokens":
- return "length";
- case "stopSequence":
- return "stop";
- default:
- return "unknown";
+ if (stopReason === "endTurn" || stopReason === "stopSequence") {
+ return "stop";
+ }
+ if (stopReason === "maxTokens") return "length";
+ return (stopReason as LanguageModelV2FinishReason) ?? "unknown";
+ }
+
+ /**
+ * Inject response format instructions into system prompt
+ *
+ * WORKAROUND: MCP sampling currently doesn't support native responseFormat parameter.
+ * This method injects formatting instructions directly into the system prompt.
+ *
+ * TODO: Remove this workaround when MCP protocol adds native support for:
+ * - responseFormat parameter in createMessage
+ * - JSON schema validation
+ * - Structured output modes
+ */
+ private injectResponseFormatInstructions(
+ systemPrompt: string | undefined,
+ responseFormat?: LanguageModelV2CallOptions["responseFormat"],
+ ): string | undefined {
+ // If no response format specified, return original prompt
+ if (!responseFormat) {
+ return systemPrompt;
}
+
+ let enhanced = systemPrompt || "";
+
+ // Handle JSON response format
+ if (responseFormat.type === "json") {
+ const jsonPrompt = `
+
+IMPORTANT: You MUST respond with valid JSON only. Do not include any text before or after the JSON.
+- Your response must be a valid JSON object
+- Do not wrap the JSON in markdown code blocks
+- Do not include explanations or comments
+- Ensure all JSON is properly formatted and parseable`;
+
+ enhanced = enhanced ? `${enhanced}${jsonPrompt}` : jsonPrompt.trim();
+
+ // If schema is provided, add schema information
+ if (responseFormat.schema) {
+ const schemaInfo = `
+- Follow this JSON schema structure: ${JSON.stringify(responseFormat.schema)}`;
+ enhanced += schemaInfo;
+ }
+ }
+
+ return enhanced || undefined;
+ }
+
+ /**
+ * Inject tool definitions into system prompt
+ *
+ * WORKAROUND: MCP sampling currently doesn't support native tools parameter.
+ * This method injects tool descriptions and usage instructions into the system prompt.
+ *
+ * TODO: Remove this workaround when MCP protocol adds native support for:
+ * - tools parameter in createMessage
+ * - Tool calling and function execution
+ * - Structured tool responses
+ */
+ private injectToolInstructions(
+ systemPrompt: string | undefined,
+ tools?: LanguageModelV2CallOptions["tools"],
+ ): string | undefined {
+ // If no tools specified, return original prompt
+ if (!tools || tools.length === 0) {
+ return systemPrompt;
+ }
+
+ let enhanced = systemPrompt || "";
+
+ // Build tool instructions using XML format
+ const toolsPrompt = `
+
+AVAILABLE TOOLS:
+You have access to the following tools. To use a tool, respond with this XML format:
+
+{"param1": "value1", "param2": "value2"}
+
+
+Follow the JSON schema definition for each tool's parameters.
+You can use multiple tools in one response. You can include text before tool calls, but do NOT include text after tool calls - wait for the tool results first.
+
+Tools:`;
+
+ // Add each tool's description
+ const toolDescriptions = tools
+ .map((tool) => {
+ // Handle different tool types
+ if (tool.type === "function") {
+ const toolAny = tool as any;
+ const description = toolAny.description || "No description provided";
+ // Try both inputSchema and parameters for compatibility
+ const schema = toolAny.inputSchema || toolAny.parameters;
+ const params = schema
+ ? `\n JSON Schema: ${JSON.stringify(schema, null, 2)}`
+ : "";
+ return `
+- ${tool.name}: ${description}${params}`;
+ } else if (tool.type === "provider-defined") {
+ return `
+- ${tool.name}: ${tool.id || "No description provided"}`;
+ }
+ return "";
+ })
+ .filter(Boolean)
+ .join("");
+
+ enhanced = enhanced
+ ? `${enhanced}${toolsPrompt}${toolDescriptions}`
+ : `${toolsPrompt}${toolDescriptions}`.trim();
+
+ return enhanced || undefined;
+ }
+
+ /**
+ * Extract tool calls from LLM response text
+ *
+ * Parses XML-style tool call tags from the response:
+ * {"arg": "value"}
+ */
+ private extractToolCalls(
+ responseText: string,
+ tools?: LanguageModelV2CallOptions["tools"],
+ ): {
+ text: string;
+ toolCalls: LanguageModelV2Content[];
+ } {
+ // If no tools available, return plain text
+ if (!tools || tools.length === 0) {
+ return { text: responseText, toolCalls: [] };
+ }
+
+ const toolCalls: LanguageModelV2Content[] = [];
+
+ // Regular expression to match ...
+ const toolCallRegex = /([\s\S]*?)<\/use_tool>/g;
+
+ let match;
+ let lastIndex = 0;
+ const textParts: string[] = [];
+ let callIndex = 0;
+
+ while ((match = toolCallRegex.exec(responseText)) !== null) {
+ // Add text before this tool call
+ textParts.push(responseText.slice(lastIndex, match.index));
+
+ const toolName = match[1];
+ const argsText = match[2].trim();
+
+ // Parse args to get input object
+ let argsObject;
+ try {
+ argsObject = JSON.parse(argsText);
+ } catch {
+ argsObject = {};
+ }
+
+ // Create tool call in AI SDK format
+ // Based on: https://sdk.vercel.ai/docs/ai-sdk-core/tools-and-tool-calling
+ toolCalls.push({
+ type: "tool-call",
+ toolCallId: `call_${Date.now()}_${callIndex++}`,
+ toolName: toolName,
+ args: argsText,
+ input: argsObject,
+ } as LanguageModelV2Content);
+
+ lastIndex = match.index + match[0].length;
+ }
+
+ // Add remaining text after last tool call
+ textParts.push(responseText.slice(lastIndex));
+
+ const text = textParts.join("").trim();
+
+ return { text, toolCalls };
}
}
diff --git a/packages/cli/deno.json b/packages/cli/deno.json
index ea73006..22fe22b 100644
--- a/packages/cli/deno.json
+++ b/packages/cli/deno.json
@@ -9,7 +9,7 @@
},
"imports": {
"@mcpc/core": "jsr:@mcpc/core@^0.2.0-beta.1",
- "@mcpc/utils": "jsr:@mcpc/utils@^0.1.0",
+ "@mcpc/utils": "jsr:@mcpc/utils@^0.2.0",
"@modelcontextprotocol/sdk": "npm:@modelcontextprotocol/sdk@^1.8.0",
"@mcpc-tech/ripgrep-napi": "npm:@mcpc-tech/ripgrep-napi@^0.0.4",
"@hono/zod-openapi": "npm:@hono/zod-openapi@^0.19.2",
diff --git a/packages/cli/tests/bugfix_test.ts b/packages/cli/tests/bugfix_test.ts
deleted file mode 100644
index 711eff0..0000000
--- a/packages/cli/tests/bugfix_test.ts
+++ /dev/null
@@ -1,34 +0,0 @@
-import { assertEquals, assertExists } from "@std/assert";
-import { loadConfig } from "../src/config/loader.ts";
-import { createServer } from "../src/app.ts";
-import process from "node:process";
-
-Deno.test("Bug fix - empty deps object should work", async () => {
- // Setup - this was causing the error
- process.env.API_KEY = "secret123";
- process.env.MCPC_CONFIG = JSON.stringify([
- {
- name: "agent",
- description: "Key: $API_KEY",
- deps: {}, // Empty deps object
- },
- ]);
-
- // Test config loading
- const config = await loadConfig();
- assertExists(config);
- assertEquals(config.agents[0].description, "Key: secret123");
-
- // Verify deps.mcpServers was added
- assertExists(config.agents[0].deps);
- assertExists(config.agents[0].deps.mcpServers);
- assertEquals(typeof config.agents[0].deps.mcpServers, "object");
-
- // Test server creation should not throw
- const server = await createServer(config);
- assertExists(server);
-
- // Cleanup
- delete process.env.API_KEY;
- delete process.env.MCPC_CONFIG;
-});
diff --git a/packages/core/deno.json b/packages/core/deno.json
index de4201c..0e18d1d 100644
--- a/packages/core/deno.json
+++ b/packages/core/deno.json
@@ -13,7 +13,7 @@
"test:watch": "deno test --allow-all --watch tests/"
},
"imports": {
- "@mcpc/utils": "jsr:@mcpc/utils@^0.1.0",
+ "@mcpc/utils": "jsr:@mcpc/utils@^0.2.0",
"@mcpc-tech/ripgrep-napi": "npm:@mcpc-tech/ripgrep-napi@^0.0.4",
"@modelcontextprotocol/sdk": "npm:@modelcontextprotocol/sdk@^1.8.0",
"@opentelemetry/api": "npm:@opentelemetry/api@^1.9.0",
From 4033d37b4af01852f7464fb3fd377e95c55dc273 Mon Sep 17 00:00:00 2001
From: yao <63141491+yaonyan@users.noreply.github.com>
Date: Tue, 7 Oct 2025 01:15:04 +0800
Subject: [PATCH 2/4] feat: enhance examples to support tool registration and
streaming functionality
---
.../ai-sdk-mcp-sampling-provider/deno.json | 2 +-
.../examples/generate_object_example.ts | 111 +++---
.../examples/generate_text_example.ts | 200 +++++------
.../examples/stream_text_example.ts | 323 +++++++++---------
4 files changed, 329 insertions(+), 307 deletions(-)
diff --git a/packages/ai-sdk-mcp-sampling-provider/deno.json b/packages/ai-sdk-mcp-sampling-provider/deno.json
index 562328c..5e3c584 100644
--- a/packages/ai-sdk-mcp-sampling-provider/deno.json
+++ b/packages/ai-sdk-mcp-sampling-provider/deno.json
@@ -1,6 +1,6 @@
{
"name": "@mcpc/mcp-sampling-provider",
- "version": "0.1.0",
+ "version": "0.1.1",
"exports": {
".": "./mod.ts"
},
diff --git a/packages/ai-sdk-mcp-sampling-provider/examples/generate_object_example.ts b/packages/ai-sdk-mcp-sampling-provider/examples/generate_object_example.ts
index d73fa9e..8ca95b7 100644
--- a/packages/ai-sdk-mcp-sampling-provider/examples/generate_object_example.ts
+++ b/packages/ai-sdk-mcp-sampling-provider/examples/generate_object_example.ts
@@ -8,68 +8,75 @@
* deno run --allow-all examples/generate_object_example.ts
*/
+import { Server } from "@modelcontextprotocol/sdk/server/index.js";
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
+import {
+ CallToolRequestSchema,
+ ListToolsRequestSchema,
+} from "@modelcontextprotocol/sdk/types.js";
import { createMCPSamplingProvider } from "../mod.ts";
import { generateObject } from "ai";
import { z } from "zod";
-import { mcpc } from "../../core/mod.ts";
-import type { ComposableMCPServer } from "../../core/mod.ts";
-// Create a simple MCPC server with sampling capability
-const server = await mcpc(
- [
- { name: "ai-sdk-example", version: "1.0.0" },
- { capabilities: { sampling: {}, tools: {} } },
- ],
- [],
- (server: ComposableMCPServer) => {
- // Register a tool that generates structured data
- server.tool(
- "generate-recipe",
- "Generate a structured recipe object using AI SDK",
- { type: "object", properties: {} },
- async () => {
- console.log("š³ Generating structured object with AI SDK...\n");
+// Create a simple MCP server with sampling capability
+const server = new Server(
+ { name: "ai-sdk-example", version: "1.0.0" },
+ { capabilities: { sampling: {}, tools: {} } },
+);
- // Create MCP sampling provider
- const provider = createMCPSamplingProvider({ server });
+// Register a tool that generates structured data
+server.setRequestHandler(ListToolsRequestSchema, () => {
+ return {
+ tools: [{
+ name: "generate-recipe",
+ description: "Generate a structured recipe object using AI SDK",
+ inputSchema: { type: "object", properties: {} },
+ }],
+ };
+});
- // Define the schema for the recipe object
- const recipeSchema = z.object({
- recipe: z.object({
- name: z.string(),
- cuisine: z.string(),
- difficulty: z.enum(["easy", "medium", "hard"]),
- prepTime: z.string(),
- cookTime: z.string(),
- servings: z.number(),
- ingredients: z.array(z.string()),
- steps: z.array(z.string()),
- tips: z.array(z.string()).optional(),
- }),
- });
+server.setRequestHandler(CallToolRequestSchema, async (request) => {
+ if (request.params.name === "generate-recipe") {
+ console.log("š³ Generating structured object with AI SDK...\n");
- // Use generateObject with the provider
- const result = await generateObject({
- mode: "json",
- model: provider.languageModel("copilot/gpt-5-mini"),
- schema: recipeSchema,
- prompt: "Generate a delicious lasagna recipe.",
- });
+ // Create MCP sampling provider
+ const provider = createMCPSamplingProvider({ server });
- // Display the results
- console.log("ā
Generated object:");
- console.log(JSON.stringify(result.object, null, 2));
- console.log("\nā
Finish reason:", result.finishReason);
- console.log("ā
Token usage:", result.usage);
+ // Define the schema for the recipe object
+ const recipeSchema = z.object({
+ recipe: z.object({
+ name: z.string(),
+ cuisine: z.string(),
+ difficulty: z.enum(["easy", "medium", "hard"]),
+ prepTime: z.string(),
+ cookTime: z.string(),
+ servings: z.number(),
+ ingredients: z.array(z.string()),
+ steps: z.array(z.string()),
+ tips: z.array(z.string()).optional(),
+ }),
+ });
- return {
- content: [{ type: "text", text: JSON.stringify(result, null, 2) }],
- };
- },
- );
- },
-);
+ // Use generateObject with the provider
+ const result = await generateObject({
+ mode: "json",
+ model: provider.languageModel("copilot/gpt-5-mini"),
+ schema: recipeSchema,
+ prompt: "Generate a delicious lasagna recipe.",
+ });
+
+ // Display the results
+ console.log("ā
Generated object:");
+ console.log(JSON.stringify(result.object, null, 2));
+ console.log("\nā
Finish reason:", result.finishReason);
+ console.log("ā
Token usage:", result.usage);
+
+ return {
+ content: [{ type: "text", text: JSON.stringify(result, null, 2) }],
+ };
+ }
+ throw new Error(`Unknown tool: ${request.params.name}`);
+});
const transport = new StdioServerTransport();
await server.connect(transport);
diff --git a/packages/ai-sdk-mcp-sampling-provider/examples/generate_text_example.ts b/packages/ai-sdk-mcp-sampling-provider/examples/generate_text_example.ts
index 516cbe5..2eff6c5 100644
--- a/packages/ai-sdk-mcp-sampling-provider/examples/generate_text_example.ts
+++ b/packages/ai-sdk-mcp-sampling-provider/examples/generate_text_example.ts
@@ -8,118 +8,124 @@
* deno run --allow-all examples/generate_text_example.ts
*/
+import { Server } from "@modelcontextprotocol/sdk/server/index.js";
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
+import {
+ CallToolRequestSchema,
+ ListToolsRequestSchema,
+} from "@modelcontextprotocol/sdk/types.js";
import { createMCPSamplingProvider } from "../mod.ts";
import { generateText, stepCountIs } from "ai";
import { z } from "zod";
-import { mcpc } from "../../core/mod.ts";
-import type { ComposableMCPServer } from "../../core/mod.ts";
-// Create a simple MCPC server with sampling capability
-const server = await mcpc(
- [
- { name: "ai-sdk-example", version: "1.0.0" },
- { capabilities: { sampling: {}, tools: {} } },
- ],
- [],
- (server: ComposableMCPServer) => {
- // Register a simple tool that the agent can use
- server.tool(
- "generate-greeting",
- "Generate a greeting message using AI SDK",
- { type: "object", properties: {} },
- async () => {
- console.log("š Generating text with AI SDK...\n");
+// Create a simple MCP server with sampling capability
+const server = new Server(
+ { name: "ai-sdk-example", version: "1.0.0" },
+ { capabilities: { sampling: {}, tools: {} } },
+);
- // Create MCP sampling provider
- const provider = createMCPSamplingProvider({ server });
+// Register tools
+server.setRequestHandler(ListToolsRequestSchema, () => {
+ return {
+ tools: [
+ {
+ name: "generate-greeting",
+ description: "Generate a greeting message using AI SDK",
+ inputSchema: { type: "object", properties: {} },
+ },
+ {
+ name: "test-tool-calls",
+ description: "Test tool calling functionality with AI SDK",
+ inputSchema: { type: "object", properties: {} },
+ },
+ ],
+ };
+});
- // Use generateText with the provider
- const result = await generateText({
- model: provider.languageModel("copilot/gpt-5-mini"),
- prompt: "Say hello!",
- });
+server.setRequestHandler(CallToolRequestSchema, async (request) => {
+ if (request.params.name === "generate-greeting") {
+ console.log("š Generating text with AI SDK...\n");
- // Display the results
- console.log("ā
Generated text:", result.text);
- console.log("ā
Finish reason:", result.finishReason);
- console.log("ā
Token usage:", result.usage);
+ // Create MCP sampling provider
+ const provider = createMCPSamplingProvider({ server });
- return {
- content: [{ type: "text", text: JSON.stringify(result, null, 2) }],
- };
- },
- );
+ // Use generateText with the provider
+ const result = await generateText({
+ model: provider.languageModel("copilot/gpt-5-mini"),
+ prompt: "Say hello!",
+ });
- // Register a tool that tests tool calling with AI SDK
- server.tool(
- "test-tool-calls",
- "Test tool calling functionality with AI SDK",
- { type: "object", properties: {} },
- async () => {
- console.log("š§ Testing tool calls with AI SDK...\n");
+ // Display the results
+ console.log("ā
Generated text:", result.text);
+ console.log("ā
Finish reason:", result.finishReason);
+ console.log("ā
Token usage:", result.usage);
- // Create MCP sampling provider
- const provider = createMCPSamplingProvider({ server });
+ return {
+ content: [{ type: "text", text: JSON.stringify(result, null, 2) }],
+ };
+ }
- // Simple prompt to test tool injection
- // Define tools that will be injected into the system prompt
- // According to AI SDK docs, tools should have description and parameters/inputSchema
- const result = await generateText({
- model: provider.languageModel("copilot/gpt-5-mini"),
- stopWhen: stepCountIs(5),
- prompt:
- "Calculate 25 + 17 using the calculator tool, then explain the result.",
- tools: {
- calculator: {
- description: "Perform mathematical calculations",
- inputSchema: z.object({
- operation: z
- .enum(["add", "subtract", "multiply", "divide"])
- .describe("The math operation to perform"),
- a: z.number().describe("First number"),
- b: z.number().describe("Second number"),
- }),
- execute: (params: {
- operation: string;
- a: number;
- b: number;
- }) => {
- switch (params.operation) {
- case "add":
- return { result: params.a + params.b };
- case "subtract":
- return { result: params.a - params.b };
- case "multiply":
- return { result: params.a * params.b };
- case "divide":
- return { result: params.a / params.b };
- default:
- throw new Error("Unsupported operation");
- }
- },
- },
- },
- });
+ if (request.params.name === "test-tool-calls") {
+ console.log("š§ Testing tool calls with AI SDK...\n");
- // Display the results
- console.log("\nā
Generated response:");
- console.log(result.text);
- console.log("\nā
Finish reason:", result.finishReason);
- console.log("ā
Token usage:", result.usage);
+ // Create MCP sampling provider
+ const provider = createMCPSamplingProvider({ server });
- return {
- content: [
- {
- type: "text",
- text: `Result: ${result.text}`,
- },
- ],
- };
+ const result = await generateText({
+ model: provider.languageModel("copilot/gpt-5-mini"),
+ stopWhen: stepCountIs(5),
+ prompt:
+ "Calculate 25 + 17 using the calculator tool, then explain the result.",
+ tools: {
+ calculator: {
+ description: "Perform mathematical calculations",
+ inputSchema: z.object({
+ operation: z
+ .enum(["add", "subtract", "multiply", "divide"])
+ .describe("The math operation to perform"),
+ a: z.number().describe("First number"),
+ b: z.number().describe("Second number"),
+ }),
+ execute: (params: {
+ operation: string;
+ a: number;
+ b: number;
+ }) => {
+ switch (params.operation) {
+ case "add":
+ return { result: params.a + params.b };
+ case "subtract":
+ return { result: params.a - params.b };
+ case "multiply":
+ return { result: params.a * params.b };
+ case "divide":
+ return { result: params.a / params.b };
+ default:
+ throw new Error("Unsupported operation");
+ }
+ },
+ },
},
- );
- },
-);
+ });
+
+ // Display the results
+ console.log("\nā
Generated response:");
+ console.log(result.text);
+ console.log("\nā
Finish reason:", result.finishReason);
+ console.log("ā
Token usage:", result.usage);
+
+ return {
+ content: [
+ {
+ type: "text",
+ text: `Result: ${result.text}`,
+ },
+ ],
+ };
+ }
+
+ throw new Error(`Unknown tool: ${request.params.name}`);
+});
const transport = new StdioServerTransport();
await server.connect(transport);
diff --git a/packages/ai-sdk-mcp-sampling-provider/examples/stream_text_example.ts b/packages/ai-sdk-mcp-sampling-provider/examples/stream_text_example.ts
index 3a7e21d..4c52ad9 100644
--- a/packages/ai-sdk-mcp-sampling-provider/examples/stream_text_example.ts
+++ b/packages/ai-sdk-mcp-sampling-provider/examples/stream_text_example.ts
@@ -9,171 +9,180 @@
*/
import process from "node:process";
+import { Server } from "@modelcontextprotocol/sdk/server/index.js";
import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
+import {
+ CallToolRequestSchema,
+ ListToolsRequestSchema,
+} from "@modelcontextprotocol/sdk/types.js";
import { createMCPSamplingProvider } from "../mod.ts";
import { stepCountIs, streamText } from "ai";
import { z } from "zod";
-import { mcpc } from "../../core/mod.ts";
-import type { ComposableMCPServer } from "../../core/mod.ts";
-
-// Create a simple MCPC server with sampling capability
-const server = await mcpc(
- [
- { name: "ai-sdk-example", version: "1.0.0" },
- { capabilities: { sampling: {}, tools: {} } },
- ],
- [],
- (server: ComposableMCPServer) => {
- // Register a simple tool that streams text
- server.tool(
- "stream-greeting",
- "Stream a greeting message using AI SDK",
- { type: "object", properties: {} },
- async () => {
- console.log("š Streaming text with AI SDK...\n");
-
- // Create MCP sampling provider
- const provider = createMCPSamplingProvider({ server });
-
- // Use streamText with the provider
- const result = streamText({
- model: provider.languageModel("copilot/gpt-5-mini"),
- prompt: "Write a short poem about coding.",
- });
-
- // Stream the text chunks
- console.log("ā
Streaming response:");
- for await (const chunk of result.textStream) {
- process.stdout.write(chunk);
- }
- console.log("\n");
-
- // Display final results after streaming completes
- console.log("\nā
Finish reason:", await result.finishReason);
- console.log("ā
Token usage:", await result.usage);
-
- return {
- content: [{ type: "text", text: await result.text }],
- };
+
+// Create a simple MCP server with sampling capability
+const server = new Server(
+ { name: "ai-sdk-example", version: "1.0.0" },
+ { capabilities: { sampling: {}, tools: {} } },
+);
+
+// Register tools
+server.setRequestHandler(ListToolsRequestSchema, () => {
+ return {
+ tools: [
+ {
+ name: "stream-greeting",
+ description: "Stream a greeting message using AI SDK",
+ inputSchema: { type: "object", properties: {} },
},
- );
-
- // Register a tool that tests tool calling with streaming
- server.tool(
- "stream-with-tools",
- "Stream text generation with tool calls",
- { type: "object", properties: {} },
- async () => {
- console.log("š§ Testing streaming with tool calls...\n");
-
- // Create MCP sampling provider
- const provider = createMCPSamplingProvider({ server });
-
- // Stream with tools
- const result = streamText({
- model: provider.languageModel("copilot/gpt-5-mini"),
- stopWhen: stepCountIs(5),
- prompt:
- "Calculate 25 + 17 using the calculator tool, then explain the result.",
- tools: {
- calculator: {
- description: "Perform mathematical calculations",
- inputSchema: z.object({
- operation: z
- .enum(["add", "subtract", "multiply", "divide"])
- .describe("The math operation to perform"),
- a: z.number().describe("First number"),
- b: z.number().describe("Second number"),
- }),
- execute: (params: {
- operation: string;
- a: number;
- b: number;
- }) => {
- console.log(
- `\nš¢ Executing calculator: ${params.operation}(${params.a}, ${params.b})`,
- );
- switch (params.operation) {
- case "add":
- return { result: params.a + params.b };
- case "subtract":
- return { result: params.a - params.b };
- case "multiply":
- return { result: params.a * params.b };
- case "divide":
- return { result: params.a / params.b };
- default:
- throw new Error("Unsupported operation");
- }
- },
- },
- },
- });
-
- // Stream the text chunks and collect tool info
- console.log("\nā
Streaming response:");
- let chunkCount = 0;
- const collectedToolCalls: any[] = [];
- const collectedToolResults: any[] = [];
-
- for await (const chunk of result.fullStream) {
- chunkCount++;
- console.log(`Chunk ${chunkCount}:`, JSON.stringify(chunk));
-
- // Collect tool calls
- if (chunk.type === "tool-call") {
- collectedToolCalls.push(chunk);
- }
-
- // Collect tool results
- if (chunk.type === "tool-result") {
- collectedToolResults.push(chunk);
- }
- }
-
- console.log(`\nš Total chunks received: ${chunkCount}`);
-
- // Display collected tool calls
- if (collectedToolCalls.length > 0) {
- console.log("\nš Tool Calls:");
- for (const call of collectedToolCalls) {
- console.log(` - ${call.toolName}:`, JSON.stringify(call.input));
- }
- }
-
- // Display collected tool results
- if (collectedToolResults.length > 0) {
- console.log("\nš Tool Results:");
- for (const toolResult of collectedToolResults) {
+ {
+ name: "stream-with-tools",
+ description: "Stream text generation with tool calls",
+ inputSchema: { type: "object", properties: {} },
+ },
+ ],
+ };
+});
+
+server.setRequestHandler(CallToolRequestSchema, async (request) => {
+ if (request.params.name === "stream-greeting") {
+ console.log("š Streaming text with AI SDK...\n");
+
+ // Create MCP sampling provider
+ const provider = createMCPSamplingProvider({ server });
+
+ // Use streamText with the provider
+ const result = streamText({
+ model: provider.languageModel("copilot/gpt-5-mini"),
+ prompt: "Write a short poem about coding.",
+ });
+
+ // Stream the text chunks
+ console.log("ā
Streaming response:");
+ for await (const chunk of result.textStream) {
+ process.stdout.write(chunk);
+ }
+ console.log("\n");
+
+ // Display final results after streaming completes
+ console.log("\nā
Finish reason:", await result.finishReason);
+ console.log("ā
Token usage:", await result.usage);
+
+ return {
+ content: [{ type: "text", text: await result.text }],
+ };
+ }
+
+ if (request.params.name === "stream-with-tools") {
+ console.log("š§ Testing streaming with tool calls...\n");
+
+ // Create MCP sampling provider
+ const provider = createMCPSamplingProvider({ server });
+
+ // Stream with tools
+ const result = streamText({
+ model: provider.languageModel("copilot/gpt-5-mini"),
+ stopWhen: stepCountIs(5),
+ prompt:
+ "Calculate 25 + 17 using the calculator tool, then explain the result.",
+ tools: {
+ calculator: {
+ description: "Perform mathematical calculations",
+ inputSchema: z.object({
+ operation: z
+ .enum(["add", "subtract", "multiply", "divide"])
+ .describe("The math operation to perform"),
+ a: z.number().describe("First number"),
+ b: z.number().describe("Second number"),
+ }),
+ execute: (params: {
+ operation: string;
+ a: number;
+ b: number;
+ }) => {
console.log(
- ` - ${toolResult.toolName}:`,
- JSON.stringify(toolResult.output),
+ `\nš¢ Executing calculator: ${params.operation}(${params.a}, ${params.b})`,
);
- }
- }
-
- // Display final results
- console.log("\nā
Finish reason:", await result.finishReason);
- console.log("ā
Token usage:", await result.usage);
-
- const finalText = await result.text;
-
- return {
- content: [
- {
- type: "text",
- text: `Results:\n` +
- `- Text: ${finalText || "(no text generated)"}\n` +
- `- Tool calls: ${collectedToolCalls.length}\n` +
- `- Tool results: ${collectedToolResults.length}\n` +
- `- Finish reason: ${await result.finishReason}`,
- },
- ],
- };
+ switch (params.operation) {
+ case "add":
+ return { result: params.a + params.b };
+ case "subtract":
+ return { result: params.a - params.b };
+ case "multiply":
+ return { result: params.a * params.b };
+ case "divide":
+ return { result: params.a / params.b };
+ default:
+ throw new Error("Unsupported operation");
+ }
+ },
+ },
},
- );
- },
-);
+ });
+
+ // Stream the text chunks and collect tool info
+ console.log("\nā
Streaming response:");
+ let chunkCount = 0;
+ const collectedToolCalls: any[] = [];
+ const collectedToolResults: any[] = [];
+
+ for await (const chunk of result.fullStream) {
+ chunkCount++;
+ console.log(`Chunk ${chunkCount}:`, JSON.stringify(chunk));
+
+ // Collect tool calls
+ if (chunk.type === "tool-call") {
+ collectedToolCalls.push(chunk);
+ }
+
+ // Collect tool results
+ if (chunk.type === "tool-result") {
+ collectedToolResults.push(chunk);
+ }
+ }
+
+ console.log(`\nš Total chunks received: ${chunkCount}`);
+
+ // Display collected tool calls
+ if (collectedToolCalls.length > 0) {
+ console.log("\nš Tool Calls:");
+ for (const call of collectedToolCalls) {
+ console.log(` - ${call.toolName}:`, JSON.stringify(call.input));
+ }
+ }
+
+ // Display collected tool results
+ if (collectedToolResults.length > 0) {
+ console.log("\nš Tool Results:");
+ for (const toolResult of collectedToolResults) {
+ console.log(
+ ` - ${toolResult.toolName}:`,
+ JSON.stringify(toolResult.output),
+ );
+ }
+ }
+
+ // Display final results
+ console.log("\nā
Finish reason:", await result.finishReason);
+ console.log("ā
Token usage:", await result.usage);
+
+ const finalText = await result.text;
+
+ return {
+ content: [
+ {
+ type: "text",
+ text: `Results:\n` +
+ `- Text: ${finalText || "(no text generated)"}\n` +
+ `- Tool calls: ${collectedToolCalls.length}\n` +
+ `- Tool results: ${collectedToolResults.length}\n` +
+ `- Finish reason: ${await result.finishReason}`,
+ },
+ ],
+ };
+ }
+
+ throw new Error(`Unknown tool: ${request.params.name}`);
+});
const transport = new StdioServerTransport();
await server.connect(transport);
From c5324b727fdd3b0d468fc843b24510932a0b2f2a Mon Sep 17 00:00:00 2001
From: yao <63141491+yaonyan@users.noreply.github.com>
Date: Tue, 7 Oct 2025 01:24:27 +0800
Subject: [PATCH 3/4] feat: migrate MCP sampling provider to new package
structure
- Removed the old `@mcpc/ai-sdk-mcp-sampling-provider` package and its README.
- Introduced `@mcpc/mcp-sampling-ai-provider` package with a new README and examples.
- Implemented core functionality for the MCP sampling provider, including language model integration.
- Added support for structured data generation and streaming text generation examples.
- Created tests for the MCP provider to ensure functionality and reliability.
- Updated Deno configuration for the new package.
---
deno.json | 2 +-
deno.lock | 22 +-
.../ai-sdk-mcp-sampling-provider/README.md | 222 ------------------
packages/mcp-sampling-ai-provider/README.md | 155 ++++++++++++
.../deno.json | 2 +-
.../examples/generate_object_example.ts | 0
.../examples/generate_text_example.ts | 0
.../examples/stream_text_example.ts | 0
.../mod.ts | 17 +-
.../src/language-model.ts | 0
.../src/provider.ts | 16 +-
.../tests/provider.test.ts | 0
12 files changed, 181 insertions(+), 255 deletions(-)
delete mode 100644 packages/ai-sdk-mcp-sampling-provider/README.md
create mode 100644 packages/mcp-sampling-ai-provider/README.md
rename packages/{ai-sdk-mcp-sampling-provider => mcp-sampling-ai-provider}/deno.json (91%)
rename packages/{ai-sdk-mcp-sampling-provider => mcp-sampling-ai-provider}/examples/generate_object_example.ts (100%)
rename packages/{ai-sdk-mcp-sampling-provider => mcp-sampling-ai-provider}/examples/generate_text_example.ts (100%)
rename packages/{ai-sdk-mcp-sampling-provider => mcp-sampling-ai-provider}/examples/stream_text_example.ts (100%)
rename packages/{ai-sdk-mcp-sampling-provider => mcp-sampling-ai-provider}/mod.ts (66%)
rename packages/{ai-sdk-mcp-sampling-provider => mcp-sampling-ai-provider}/src/language-model.ts (100%)
rename packages/{ai-sdk-mcp-sampling-provider => mcp-sampling-ai-provider}/src/provider.ts (84%)
rename packages/{ai-sdk-mcp-sampling-provider => mcp-sampling-ai-provider}/tests/provider.test.ts (100%)
diff --git a/deno.json b/deno.json
index b0cc006..63f26be 100644
--- a/deno.json
+++ b/deno.json
@@ -8,7 +8,7 @@
"./packages/utils",
"./packages/core",
"./packages/cli",
- "./packages/ai-sdk-mcp-sampling-provider"
+ "./packages/mcp-sampling-ai-provider"
],
"imports": {
"@es-toolkit/es-toolkit": "jsr:@es-toolkit/es-toolkit@^1.37.2",
diff --git a/deno.lock b/deno.lock
index 11dd7e4..1396734 100644
--- a/deno.lock
+++ b/deno.lock
@@ -1300,17 +1300,6 @@
"npm:json-schema-traverse@1"
],
"members": {
- "packages/ai-sdk-mcp-sampling-provider": {
- "dependencies": [
- "jsr:@mcpc/core@0.2",
- "jsr:@std/assert@1",
- "npm:@ai-sdk/provider-utils@^2.2.8",
- "npm:@ai-sdk/provider@2",
- "npm:@modelcontextprotocol/sdk@^1.8.0",
- "npm:ai@^5.0.60",
- "npm:zod@^3.24.2"
- ]
- },
"packages/cli": {
"dependencies": [
"jsr:@mcpc/core@~0.2.0-beta.1",
@@ -1346,6 +1335,17 @@
"npm:zod@^3.24.2"
]
},
+ "packages/mcp-sampling-ai-provider": {
+ "dependencies": [
+ "jsr:@mcpc/core@0.2",
+ "jsr:@std/assert@1",
+ "npm:@ai-sdk/provider-utils@^2.2.8",
+ "npm:@ai-sdk/provider@2",
+ "npm:@modelcontextprotocol/sdk@^1.8.0",
+ "npm:ai@^5.0.60",
+ "npm:zod@^3.24.2"
+ ]
+ },
"packages/utils": {
"dependencies": [
"jsr:@std/http@^1.0.14",
diff --git a/packages/ai-sdk-mcp-sampling-provider/README.md b/packages/ai-sdk-mcp-sampling-provider/README.md
deleted file mode 100644
index 2754515..0000000
--- a/packages/ai-sdk-mcp-sampling-provider/README.md
+++ /dev/null
@@ -1,222 +0,0 @@
-# @mcpc/ai-sdk-mcp-sampling-provider
-
-AI SDK LanguageModelV2 provider for MCP (Model Context Protocol) servers with
-sampling capabilities.
-
-## Overview
-
-This package provides an AI SDK LanguageModelV2 provider that allows you to use
-MCP servers with sampling capabilities through the
-[AI SDK](https://ai-sdk.dev/)'s standard provider interface. This enables you to
-leverage AI SDK's agent capabilities with MCP servers.
-
-## Benefits
-
-- **LanguageModelV2 Support**: Uses the latest AI SDK v2 specification
-- **Direct Server Integration**: Works directly with MCP Server instances
-- **Standardized interface**: Work with MCP through the familiar AI SDK provider
- pattern
-- **MCP sampling integration**: Leverage MCP's createMessage capabilities for
- agentic workflows
-- **Easy migration**: Switch between different LLM providers and MCP servers
- seamlessly
-
-## Installation
-
-```bash
-# Using Deno
-deno add @mcpc/ai-sdk-mcp-sampling-provider
-
-# Using npm
-npm install @mcpc/ai-sdk-mcp-sampling-provider
-```
-
-## Usage
-
-### Basic Example with MCPC
-
-```typescript
-import { createMCPSamplingProvider } from "@mcpc/ai-sdk-mcp-sampling-provider";
-import { generateText } from "ai";
-import { mcpc } from "@mcpc/core";
-import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
-
-// Create MCPC server with sampling capability
-const server = await mcpc(
- [
- { name: "my-agent", version: "1.0.0" },
- { capabilities: { tools: {}, sampling: {} } },
- ],
- [
- {
- name: "my-agent",
- description: "An agent that uses tools",
- options: { sampling: true },
- },
- ],
-);
-
-// Create provider from server
-const provider = createMCPSamplingProvider({
- server: server,
-});
-
-// Use with AI SDK
-const result = await generateText({
- model: provider.languageModel("my-agent"),
- prompt: "What can you help me with?",
-});
-
-console.log(result.text);
-```
-
-### Using with Standard MCP Server
-
-```typescript
-import { createMCPSamplingProvider } from "@mcpc/ai-sdk-mcp-sampling-provider";
-import { generateText } from "ai";
-import { Server } from "@modelcontextprotocol/sdk/server/index.js";
-import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
-
-// Create MCPC server with agentic tools
-const server = await mcpc(
- [{
- name: "my-agent-server",
- version: "1.0.0",
- }, {
- capabilities: {
-
-// Create MCP server with sampling capability
-const server = await mcpc(
- [
- { name: "file-processor", version: "1.0.0" },
- {
- capabilities: {
- tools: {},
- sampling: {},
- },
- },
- ],
- [
- {
- name: "file-processor",
- description: `Process files using available tools.
-
-
- `,
- deps: {
- mcpServers: {
- filesystem: {
- command: "npx",
- args: ["-y", "@modelcontextprotocol/server-filesystem"],
- transportType: "stdio",
- },
- },
- },
- options: {
- mode: "agentic",
- sampling: true,
- },
- },
- ],
-);
-
-// Create provider from server
-const provider = createMCPSamplingProvider({
- server: server,
-});
-
-// Use with AI SDK
-const result = await generateText({
- model: provider.languageModel("file-processor"),
- prompt: "Read the contents of package.json",
-});
-```
-
-### Streaming
-
-```typescript
-import { streamText } from "ai";
-
-const result = await streamText({
- model: provider.languageModel("my-agent"),
- prompt: "Tell me a story",
-});
-
-for await (const chunk of result.textStream) {
- process.stdout.write(chunk);
-}
-```
-
-**Note**: MCP createMessage doesn't natively support streaming, so the
-implementation returns the full response as a single chunk. True streaming
-support would require server-side implementation.
-
-## API Reference
-
-### `createMCPSamplingProvider(config: MCPProviderConfig): MCPProvider`
-
-Creates an MCP sampling provider instance.
-
-**Parameters:**
-
-- `config.server` - MCP Server instance with sampling capability (via
- `createMessage`)
-- `config.modelId` - Optional default model ID
-- `config.headers` - Optional headers for requests
-- `config.baseUrl` - Optional base URL for display purposes
-
-**Returns:** An `MCPProvider` instance with `languageModel()` and `call()`
-methods.
-
-### `createSamplingProvider(config: MCPProviderConfig): (modelId: string) => LanguageModelV2`
-
-Creates a function that directly returns language models (convenient shorthand).
-
-**Parameters:** Same as `createMCPSamplingProvider`
-
-**Returns:** A function that takes a `modelId` and returns a `LanguageModelV2`
-instance. **Returns:** MCPProvider instance
-
-### `MCPProvider.languageModel(modelId: string, options?: MCPProviderOptions): LanguageModelV1`
-
-Creates a language model instance for a specific MCP tool/agent.
-
-**Parameters:**
-
-- `modelId` - The MCP tool name to use as the language model
-- `options.headers` - Optional headers override
-
-**Returns:** LanguageModelV1 instance compatible with AI SDK
-
-## How It Works
-
-The provider implements AI SDK's `LanguageModelV1` interface by:
-
-1. Converting AI SDK messages to MCP sampling format
-2. Calling the MCP server's `sampling/createMessage` method
-3. Converting MCP responses back to AI SDK format
-4. Mapping MCP stop reasons to AI SDK finish reasons
-
-The `modelId` you provide to the provider corresponds to an MCP tool name that
-supports sampling (typically an agentic or workflow tool created with MCPC).
-
-## Limitations
-
-- **Token counting**: MCP doesn't provide token counts, so usage reports will be
- 0
-- **Streaming**: MCP sampling doesn't natively support streaming; the stream
- implementation returns the complete response as a single chunk
-- **Tool calls**: Currently focuses on text generation; tool call support would
- require additional MCP protocol extensions
-
-## Related
-
-- [AI SDK Documentation](https://ai-sdk.dev/)
-- [AI SDK Providers](https://ai-sdk.dev/providers/ai-sdk-providers)
-- [MCP Specification](https://modelcontextprotocol.io/)
-- [MCPC Framework](https://github.com/mcpc-tech/mcpc)
-
-## License
-
-MIT
diff --git a/packages/mcp-sampling-ai-provider/README.md b/packages/mcp-sampling-ai-provider/README.md
new file mode 100644
index 0000000..256f35d
--- /dev/null
+++ b/packages/mcp-sampling-ai-provider/README.md
@@ -0,0 +1,155 @@
+# @mcpc/mcp-sampling-ai-provider
+
+AI SDK provider that enables MCP servers to act as language models through the
+[AI SDK](https://ai-sdk.dev/) interface.
+
+## Overview
+
+This package bridges MCP servers with AI SDK by implementing the LanguageModelV2
+interface. It allows any MCP server with sampling capabilities to be used as a
+language model in AI SDK applications.
+
+## Installation
+
+```bash
+# deno
+deno add jsr:@mcpc/mcp-sampling-ai-provider
+
+# npm (from jsr)
+npx jsr add @mcpc/mcp-sampling-ai-provider
+
+# pnpm (from jsr)
+pnpm add jsr:@mcpc/mcp-sampling-ai-provider
+```
+
+## Usage
+
+### Basic Example
+
+```typescript
+import { Server } from "@modelcontextprotocol/sdk/server/index.js";
+import {
+ CallToolRequestSchema,
+ ListToolsRequestSchema,
+} from "@modelcontextprotocol/sdk/types.js";
+import { createMCPSamplingProvider } from "@mcpc/mcp-sampling-ai-provider";
+import { generateText } from "ai";
+
+// Create MCP server with sampling capability
+const server = new Server(
+ { name: "my-agent", version: "1.0.0" },
+ { capabilities: { sampling: {}, tools: {} } },
+);
+
+// Register tools
+server.setRequestHandler(ListToolsRequestSchema, () => ({
+ tools: [
+ {
+ name: "greet",
+ description: "Generate a greeting",
+ inputSchema: { type: "object", properties: {} },
+ },
+ ],
+}));
+
+server.setRequestHandler(CallToolRequestSchema, async (request) => {
+ if (request.params.name === "greet") {
+ return {
+ content: [{ type: "text", text: "Hello from MCP!" }],
+ };
+ }
+ throw new Error(`Unknown tool: ${request.params.name}`);
+});
+
+// Create AI SDK provider from MCP server
+const provider = createMCPSamplingProvider({ server });
+
+// Use with AI SDK
+const result = await generateText({
+ model: provider.languageModel("copilot/gpt-4"),
+ prompt: "Say hello",
+});
+
+console.log(result.text);
+```
+
+### Streaming Example
+
+```typescript
+import { streamText } from "ai";
+
+const result = streamText({
+ model: provider.languageModel("copilot/gpt-4"),
+ prompt: "Write a short story",
+});
+
+for await (const chunk of result.textStream) {
+ process.stdout.write(chunk);
+}
+```
+
+### Object Generation Example
+
+```typescript
+import { generateObject } from "ai";
+import { z } from "zod";
+
+const result = await generateObject({
+ model: provider.languageModel("copilot/gpt-4"),
+ schema: z.object({
+ name: z.string(),
+ age: z.number(),
+ }),
+ prompt: "Generate a person's information",
+});
+
+console.log(result.object);
+```
+
+## API
+
+### `createMCPSamplingProvider(config)`
+
+Creates an MCP sampling provider for use with AI SDK.
+
+**Parameters:**
+
+- `config.server` - MCP Server instance with sampling capability
+- `config.modelId` - (Optional) Default model ID
+- `config.headers` - (Optional) Request headers
+- `config.baseUrl` - (Optional) Base URL for display
+
+**Returns:** Provider with `languageModel(modelId)` method
+
+### `provider.languageModel(modelId)`
+
+Returns a LanguageModelV2 instance for the specified model.
+
+**Parameters:**
+
+- `modelId` - Model identifier (e.g., "copilot/gpt-4")
+
+**Returns:** LanguageModelV2 compatible with AI SDK
+
+## How It Works
+
+1. Converts AI SDK messages to MCP `sampling/createMessage` format
+2. Calls MCP server's sampling endpoint
+3. Converts MCP response back to AI SDK format
+4. Maps stop reasons between protocols
+
+## Limitations
+
+- **Token counting**: MCP doesn't provide token counts (returns 0)
+- **Native streaming**: MCP sampling returns complete responses
+- **Tool calls**: Experimental support, under development
+
+## Related
+
+- [AI SDK](https://ai-sdk.dev/)
+- [MCP Specification](https://modelcontextprotocol.io/)
+- [MCPC Framework](https://github.com/mcpc-tech/mcpc)
+
+## License
+
+MIT
diff --git a/packages/ai-sdk-mcp-sampling-provider/deno.json b/packages/mcp-sampling-ai-provider/deno.json
similarity index 91%
rename from packages/ai-sdk-mcp-sampling-provider/deno.json
rename to packages/mcp-sampling-ai-provider/deno.json
index 5e3c584..5f4b2df 100644
--- a/packages/ai-sdk-mcp-sampling-provider/deno.json
+++ b/packages/mcp-sampling-ai-provider/deno.json
@@ -1,5 +1,5 @@
{
- "name": "@mcpc/mcp-sampling-provider",
+ "name": "@mcpc/mcp-sampling-ai-provider",
"version": "0.1.1",
"exports": {
".": "./mod.ts"
diff --git a/packages/ai-sdk-mcp-sampling-provider/examples/generate_object_example.ts b/packages/mcp-sampling-ai-provider/examples/generate_object_example.ts
similarity index 100%
rename from packages/ai-sdk-mcp-sampling-provider/examples/generate_object_example.ts
rename to packages/mcp-sampling-ai-provider/examples/generate_object_example.ts
diff --git a/packages/ai-sdk-mcp-sampling-provider/examples/generate_text_example.ts b/packages/mcp-sampling-ai-provider/examples/generate_text_example.ts
similarity index 100%
rename from packages/ai-sdk-mcp-sampling-provider/examples/generate_text_example.ts
rename to packages/mcp-sampling-ai-provider/examples/generate_text_example.ts
diff --git a/packages/ai-sdk-mcp-sampling-provider/examples/stream_text_example.ts b/packages/mcp-sampling-ai-provider/examples/stream_text_example.ts
similarity index 100%
rename from packages/ai-sdk-mcp-sampling-provider/examples/stream_text_example.ts
rename to packages/mcp-sampling-ai-provider/examples/stream_text_example.ts
diff --git a/packages/ai-sdk-mcp-sampling-provider/mod.ts b/packages/mcp-sampling-ai-provider/mod.ts
similarity index 66%
rename from packages/ai-sdk-mcp-sampling-provider/mod.ts
rename to packages/mcp-sampling-ai-provider/mod.ts
index 1d51335..777e101 100644
--- a/packages/ai-sdk-mcp-sampling-provider/mod.ts
+++ b/packages/mcp-sampling-ai-provider/mod.ts
@@ -5,26 +5,21 @@
* server's createMessage capability. It allows you to use MCP servers with sampling through
* the AI SDK's standard provider interface.
*
- * Benefits:
- * - Use MCP servers directly with AI SDK
- * - LanguageModelV2 specification support
- * - Compatible with AI SDK tools and workflows
- *
* @example
* ```typescript
- * import { createMCPSamplingProvider } from "@mcpc/ai-sdk-mcp-sampling-provider";
+ * import { createMCPSamplingProvider } from "@mcpc/mcp-sampling-ai-provider";
* import { generateText } from "ai";
- * import { mcpc } from "@mcpc/core";
+ * import { Server } from "@modelcontextprotocol/sdk/server/index.js";
*
- * const server = await mcpc(
- * [{ name: "my-agent", version: "1.0.0" }, { capabilities: { sampling: {} } }],
- * [{ name: "my-agent", description: "...", options: { sampling: true } }]
+ * const server = new Server(
+ * { name: "my-agent", version: "1.0.0" },
+ * { capabilities: { sampling: {}, tools: {} } }
* );
*
* const provider = createMCPSamplingProvider({ server });
*
* const result = await generateText({
- * model: provider.languageModel("my-agent"),
+ * model: provider.languageModel("copilot/gpt-4"),
* prompt: "Hello, world!"
* });
* ```
diff --git a/packages/ai-sdk-mcp-sampling-provider/src/language-model.ts b/packages/mcp-sampling-ai-provider/src/language-model.ts
similarity index 100%
rename from packages/ai-sdk-mcp-sampling-provider/src/language-model.ts
rename to packages/mcp-sampling-ai-provider/src/language-model.ts
diff --git a/packages/ai-sdk-mcp-sampling-provider/src/provider.ts b/packages/mcp-sampling-ai-provider/src/provider.ts
similarity index 84%
rename from packages/ai-sdk-mcp-sampling-provider/src/provider.ts
rename to packages/mcp-sampling-ai-provider/src/provider.ts
index 0e3c215..2e29dc0 100644
--- a/packages/ai-sdk-mcp-sampling-provider/src/provider.ts
+++ b/packages/mcp-sampling-ai-provider/src/provider.ts
@@ -90,20 +90,18 @@ export class MCPSamplingProvider {
*
* @example
* ```typescript
- * import { createMCPSamplingProvider } from "@mcpc/ai-sdk-mcp-sampling-provider";
- * import { mcpc } from "@mcpc/core";
+ * import { createMCPSamplingProvider } from "@mcpc/mcp-sampling-ai-provider";
+ * import { Server } from "@modelcontextprotocol/sdk/server/index.js";
*
- * const server = await mcpc(
- * [{ name: "my-agent", version: "1.0.0" }, { capabilities: { sampling: {} } }],
- * [{ name: "my-agent", description: "...", options: { sampling: true } }]
+ * const server = new Server(
+ * { name: "my-agent", version: "1.0.0" },
+ * { capabilities: { sampling: {}, tools: {} } }
* );
*
- * const provider = createMCPSamplingProvider({
- * server: server
- * });
+ * const provider = createMCPSamplingProvider({ server });
*
* // Use with AI SDK
- * const model = provider.languageModel("my-agent");
+ * const model = provider.languageModel("copilot/gpt-4");
* ```
*/
export function createMCPSamplingProvider(
diff --git a/packages/ai-sdk-mcp-sampling-provider/tests/provider.test.ts b/packages/mcp-sampling-ai-provider/tests/provider.test.ts
similarity index 100%
rename from packages/ai-sdk-mcp-sampling-provider/tests/provider.test.ts
rename to packages/mcp-sampling-ai-provider/tests/provider.test.ts
From 3f33fe0a663be8f5a69aa9521fe14931cb5a2a2f Mon Sep 17 00:00:00 2001
From: yao <63141491+yaonyan@users.noreply.github.com>
Date: Tue, 7 Oct 2025 01:30:18 +0800
Subject: [PATCH 4/4] feat: update README examples for MCP server and tool
integration
---
packages/mcp-sampling-ai-provider/README.md | 86 ++++++++++++++-------
packages/mcp-sampling-ai-provider/deno.json | 2 +-
2 files changed, 57 insertions(+), 31 deletions(-)
diff --git a/packages/mcp-sampling-ai-provider/README.md b/packages/mcp-sampling-ai-provider/README.md
index 256f35d..639634f 100644
--- a/packages/mcp-sampling-ai-provider/README.md
+++ b/packages/mcp-sampling-ai-provider/README.md
@@ -28,6 +28,7 @@ pnpm add jsr:@mcpc/mcp-sampling-ai-provider
```typescript
import { Server } from "@modelcontextprotocol/sdk/server/index.js";
+import { StdioServerTransport } from "@modelcontextprotocol/sdk/server/stdio.js";
import {
CallToolRequestSchema,
ListToolsRequestSchema,
@@ -35,54 +36,62 @@ import {
import { createMCPSamplingProvider } from "@mcpc/mcp-sampling-ai-provider";
import { generateText } from "ai";
-// Create MCP server with sampling capability
+// Create a simple MCP server with sampling capability
const server = new Server(
- { name: "my-agent", version: "1.0.0" },
+ { name: "ai-sdk-example", version: "1.0.0" },
{ capabilities: { sampling: {}, tools: {} } },
);
// Register tools
-server.setRequestHandler(ListToolsRequestSchema, () => ({
- tools: [
- {
- name: "greet",
- description: "Generate a greeting",
- inputSchema: { type: "object", properties: {} },
- },
- ],
-}));
+server.setRequestHandler(ListToolsRequestSchema, () => {
+ return {
+ tools: [
+ {
+ name: "generate-greeting",
+ description: "Generate a greeting message using AI SDK",
+ inputSchema: { type: "object", properties: {} },
+ },
+ ],
+ };
+});
server.setRequestHandler(CallToolRequestSchema, async (request) => {
- if (request.params.name === "greet") {
+ if (request.params.name === "generate-greeting") {
+ // Create MCP sampling provider
+ const provider = createMCPSamplingProvider({ server });
+
+ // Use generateText with the provider
+ const result = await generateText({
+ model: provider.languageModel("copilot/gpt-5-mini"),
+ prompt: "Say hello!",
+ });
+
return {
- content: [{ type: "text", text: "Hello from MCP!" }],
+ content: [{ type: "text", text: result.text }],
};
}
throw new Error(`Unknown tool: ${request.params.name}`);
});
-// Create AI SDK provider from MCP server
-const provider = createMCPSamplingProvider({ server });
-
-// Use with AI SDK
-const result = await generateText({
- model: provider.languageModel("copilot/gpt-4"),
- prompt: "Say hello",
-});
-
-console.log(result.text);
+const transport = new StdioServerTransport();
+await server.connect(transport);
```
### Streaming Example
```typescript
+import process from "node:process";
import { streamText } from "ai";
+// Inside your tool handler
+const provider = createMCPSamplingProvider({ server });
+
const result = streamText({
- model: provider.languageModel("copilot/gpt-4"),
- prompt: "Write a short story",
+ model: provider.languageModel("copilot/gpt-5-mini"),
+ prompt: "Write a short poem about coding.",
});
+// Stream the text chunks
for await (const chunk of result.textStream) {
process.stdout.write(chunk);
}
@@ -94,13 +103,30 @@ for await (const chunk of result.textStream) {
import { generateObject } from "ai";
import { z } from "zod";
-const result = await generateObject({
- model: provider.languageModel("copilot/gpt-4"),
- schema: z.object({
+// Inside your tool handler
+const provider = createMCPSamplingProvider({ server });
+
+// Define the schema
+const recipeSchema = z.object({
+ recipe: z.object({
name: z.string(),
- age: z.number(),
+ cuisine: z.string(),
+ difficulty: z.enum(["easy", "medium", "hard"]),
+ prepTime: z.string(),
+ cookTime: z.string(),
+ servings: z.number(),
+ ingredients: z.array(z.string()),
+ steps: z.array(z.string()),
+ tips: z.array(z.string()).optional(),
}),
- prompt: "Generate a person's information",
+});
+
+// Use generateObject with the provider
+const result = await generateObject({
+ mode: "json",
+ model: provider.languageModel("copilot/gpt-5-mini"),
+ schema: recipeSchema,
+ prompt: "Generate a delicious lasagna recipe.",
});
console.log(result.object);
diff --git a/packages/mcp-sampling-ai-provider/deno.json b/packages/mcp-sampling-ai-provider/deno.json
index 5f4b2df..3e30bc3 100644
--- a/packages/mcp-sampling-ai-provider/deno.json
+++ b/packages/mcp-sampling-ai-provider/deno.json
@@ -1,6 +1,6 @@
{
"name": "@mcpc/mcp-sampling-ai-provider",
- "version": "0.1.1",
+ "version": "0.1.2",
"exports": {
".": "./mod.ts"
},