Skip to content
This repository was archived by the owner on Sep 29, 2025. It is now read-only.
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
71 changes: 71 additions & 0 deletions packages/chatbot-server-mongodb-public/src/responsesApi.test.ts
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,14 @@ import {
createOpenAI,
streamText,
generateText,
stepCountIs,
tool,
} from "mongodb-rag-core/aiSdk";
import { CREATE_RESPONSE_ERR_MSG } from "mongodb-chatbot-server";
import { OpenAI } from "mongodb-rag-core/openai";
import { makeTestApp } from "./test/testHelpers";
import { Logger, makeBraintrustLogger } from "mongodb-rag-core/braintrust";
import { z } from "zod";

jest.setTimeout(100 * 1000); // 100 seconds

Expand Down Expand Up @@ -414,6 +417,19 @@ describe("Responses API with OpenAI Client", () => {
});

describe("AI SDK integration", () => {
const sampleToolName = "execute-code";
const sampleToolResult = {
result: `[{id: 1, name: "Foo"}, {id: 2, name: "Bar"}]`,
};
const sampleTool = tool({
name: sampleToolName,
inputSchema: z.object({
code: z.string(),
}),
execute: async () => {
return sampleToolResult;
},
});
it("Should handle basic text streaming", async () => {
const result = await streamText({
model: aiSDKClient.responses(MONGO_CHAT_MODEL),
Expand Down Expand Up @@ -446,6 +462,61 @@ describe("Responses API with OpenAI Client", () => {

expect(resultText.toLowerCase()).toContain("mongodb");
});
it("should support stopWhen with multiple steps", async () => {
const result = streamText({
model: aiSDKClient.responses(MONGO_CHAT_MODEL),
system: `Call the ${sampleToolName} when the user gives you code to execute in the subsequent message.`,
messages: [
{
role: "user",
content: "Code to execute: db.users.find({}).limit(2).toArray()",
},
],

tools: {
[sampleToolName]: sampleTool,
},
stopWhen: [stepCountIs(2)],
toolChoice: {
type: "tool",
toolName: sampleToolName,
},
prepareStep: ({ stepNumber }) => {
if (stepNumber > 0) {
return {
toolChoice: "auto",
};
}
},
});

const steps = await result.steps;
expect(steps.length).toBe(2);
const toolCallStepContent = steps[0].content;
expect(toolCallStepContent).toHaveLength(2);
const toolCall = toolCallStepContent[0];
expect(toolCall).toMatchObject({
type: "tool-call",
toolName: sampleToolName,
toolCallId: expect.any(String),
input: {
code: expect.any(String),
},
});
const toolResult = toolCallStepContent[1];
expect(toolResult).toMatchObject({
type: "tool-result",
toolCallId: expect.any(String),
output: sampleToolResult,
});
const textStepContent = steps[1].content;
expect(textStepContent).toHaveLength(1);
const text = textStepContent[0];
expect(text).toMatchObject({
type: "text",
text: expect.any(String),
});
});

it("Should throw an error when generating text since we don't support non-streaming generation", async () => {
try {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ const FunctionCallSchema = z.object({
arguments: z
.string()
.describe("JSON string of arguments passed to the function tool call"),
status: z.enum(["in_progress", "completed", "incomplete"]),
status: z.enum(["in_progress", "completed", "incomplete"]).optional(),
});

const FunctionCallOutputSchema = z.object({
Expand All @@ -147,7 +147,7 @@ const FunctionCallOutputSchema = z.object({
.string()
.describe("Unique ID of the function tool call generated by the model"),
output: z.string().describe("JSON string of the function tool call"),
status: z.enum(["in_progress", "completed", "incomplete"]),
status: z.enum(["in_progress", "completed", "incomplete"]).optional(),
});

const CreateResponseRequestBodySchema = z.object({
Expand Down