Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
81 commits
Select commit Hold shift + click to select a range
66af47d
Enhance documentation tools integration
mantrakp04 Mar 23, 2026
30d53e8
Merge branch 'dev' into dario-likes-mcps
mantrakp04 Mar 23, 2026
5078747
Enhance error handling and API response for documentation tools
mantrakp04 Mar 24, 2026
aaf49db
Merge branch 'dev' into dario-likes-mcps
mantrakp04 Mar 24, 2026
844e916
Refactor askStackAuth key to ask_stack_auth in API documentation
mantrakp04 Mar 24, 2026
274c742
fix: register private submodule gitlink in the index
mantrakp04 Mar 25, 2026
c7a3cca
Merge branch 'dev' into dario-likes-mcps
mantrakp04 Mar 25, 2026
ef2289f
Merge branch 'dev' into dario-likes-mcps
mantrakp04 Apr 3, 2026
d8065c4
Update environment configurations and remove internal secret validati…
mantrakp04 Apr 4, 2026
3b27eee
Merge branch 'dev' into dario-likes-mcps
mantrakp04 Apr 6, 2026
b82efa4
Merge branch 'dev' into dario-likes-mcps
mantrakp04 Apr 6, 2026
158498b
Merge branch 'dev' into dario-likes-mcps
mantrakp04 Apr 8, 2026
b22d4b0
Merge branch 'dev' into dario-likes-mcps
mantrakp04 Apr 9, 2026
95ca0a2
initial commit
aadesh18 Apr 10, 2026
fbab066
Merge remote-tracking branch 'origin/dario-likes-mcps' into llm-mcp-flow
aadesh18 Apr 10, 2026
73152a1
pnpm lock
aadesh18 Apr 10, 2026
e16040c
changed port
aadesh18 Apr 10, 2026
a07dbab
spacetime db ci change
aadesh18 Apr 10, 2026
ef77edc
ci fix
aadesh18 Apr 10, 2026
84dffa2
security fix
aadesh18 Apr 10, 2026
a0486e9
security fixes
aadesh18 Apr 11, 2026
8c596ec
Merge branch 'dev' into dario-likes-mcps
mantrakp04 Apr 12, 2026
ef6963d
Merge branch 'dev' into dario-likes-mcps
N2D4 Apr 12, 2026
1c69185
Merge branch 'dario-likes-mcps' into llm-mcp-flow
aadesh18 Apr 12, 2026
f794bd6
Merge remote-tracking branch 'origin/dev' into llm-mcp-flow
aadesh18 Apr 12, 2026
59a060a
merge error
aadesh18 Apr 13, 2026
0485c73
pr comment changes
aadesh18 Apr 13, 2026
97ee052
Merge branch 'dev' into llm-mcp-flow
aadesh18 Apr 13, 2026
411f775
bug fix
aadesh18 Apr 13, 2026
c514efd
Merge branch 'llm-mcp-flow' of https://github.com/stack-auth/stack-au…
aadesh18 Apr 13, 2026
516c424
Merge branch 'dev' into llm-mcp-flow
aadesh18 Apr 13, 2026
b0e3341
pr comments
aadesh18 Apr 13, 2026
a630be1
Merge branch 'llm-mcp-flow' of https://github.com/stack-auth/stack-au…
aadesh18 Apr 13, 2026
8c7bc54
tests failing
aadesh18 Apr 13, 2026
7a54be9
comment changes
aadesh18 Apr 13, 2026
bd3925d
Merge branch 'dev' into llm-mcp-flow
aadesh18 Apr 13, 2026
ca461d4
tests fix
aadesh18 Apr 13, 2026
224468c
Merge branch 'llm-mcp-flow' of https://github.com/stack-auth/stack-au…
aadesh18 Apr 13, 2026
042e616
tests fix
aadesh18 Apr 13, 2026
149d6d7
fixed the order
aadesh18 Apr 13, 2026
574cc4a
Merge branch 'dev' into llm-mcp-flow
aadesh18 Apr 13, 2026
3293845
Merge branch 'dev' into llm-mcp-flow
aadesh18 Apr 13, 2026
d8e99d6
Merge branch 'dev' into llm-mcp-flow
aadesh18 Apr 14, 2026
fa4c814
Merge branch 'dev' into llm-mcp-flow
aadesh18 Apr 14, 2026
a4c3306
pr changes
aadesh18 Apr 14, 2026
35739af
Merge branch 'dev' into llm-mcp-flow
aadesh18 Apr 14, 2026
15e5879
Merge remote-tracking branch 'origin/dev' into llm-mcp-flow
aadesh18 Apr 15, 2026
140ee7e
Merge branch 'dev' into llm-mcp-flow
aadesh18 Apr 15, 2026
afd84bc
minor fix
aadesh18 Apr 15, 2026
b0a329f
initial commit
aadesh18 Apr 15, 2026
c819537
proxy logging implemented
aadesh18 Apr 15, 2026
7a2332f
Merge remote-tracking branch 'origin/dev' into ai-analytics
aadesh18 Apr 15, 2026
83a37d1
pr message fixes
aadesh18 Apr 17, 2026
4fb5154
internal tool security update
aadesh18 Apr 19, 2026
30e3e5c
Merge branch 'dev' into ai-analytics
aadesh18 Apr 19, 2026
edd33b1
added e2e tests
aadesh18 Apr 20, 2026
a43eb11
bot comment
aadesh18 Apr 20, 2026
1ccef9c
Update seed function to preserve existing user metadata when updating…
aadesh18 Apr 20, 2026
4965534
refactor: replace callReducer with callReducerStrict for improved err…
aadesh18 Apr 20, 2026
9ba7b5e
clean up
aadesh18 Apr 20, 2026
ddde9c6
feat: implement timeout for SpacetimeDB HTTP calls to prevent hanging…
aadesh18 Apr 20, 2026
c329a46
fix: improve error handling for missing SpacetimeDB service token in …
aadesh18 Apr 20, 2026
26ce83f
fix: encode URI components in fetch requests to prevent errors with s…
aadesh18 Apr 20, 2026
f9386a8
bot fixes
aadesh18 Apr 20, 2026
dc5ab66
fix: add log token retrieval in getServiceToken function
aadesh18 Apr 20, 2026
2532632
fix: enhance error handling in isSpacetimedbReachable and update priv…
aadesh18 Apr 20, 2026
53a9f2c
fix: update footer separator in ConversationReplay component for impr…
aadesh18 Apr 20, 2026
0eff6b2
bug fix
aadesh18 Apr 20, 2026
170b4fe
fix: refactor MCP review authorization and improve logging mechanisms
aadesh18 Apr 21, 2026
a0bab5d
tests clean up
aadesh18 Apr 21, 2026
3654af5
Merge remote-tracking branch 'origin/dev' into ai-analytics
aadesh18 Apr 21, 2026
dbc7988
bug fix
aadesh18 Apr 21, 2026
d8b7499
Custom Dashboard Improvements (#1359)
aadesh18 Apr 24, 2026
331d208
Update backend environment variables and refactor AI query route imports
aadesh18 Apr 28, 2026
49d2c04
Enhance image attachment validation in AI query route
aadesh18 Apr 28, 2026
60c538b
Add context to system prompt in AI query route
aadesh18 Apr 28, 2026
0aea8ef
edited comment
aadesh18 Apr 28, 2026
39facf4
added comment
aadesh18 Apr 28, 2026
45ff5f2
aman comment changes
aadesh18 May 4, 2026
89d43b1
Merge remote-tracking branch 'origin/dev' into ai-analytics
aadesh18 May 4, 2026
971bad9
merge changes
aadesh18 May 4, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 2 additions & 1 deletion apps/backend/.env
Original file line number Diff line number Diff line change
Expand Up @@ -120,6 +120,7 @@ STACK_TELEGRAM_CHAT_ID=# enter your telegram chat id
STACK_MINTLIFY_MCP_URL=# override the Mintlify MCP server used by the backend's AI docs tool bundle. Defaults to https://stackauth-e0affa27.mintlify.app/mcp

# MCP review tool (SpacetimeDB)
STACK_SPACETIMEDB_URI=# SpacetimeDB host URI; default empty (logging disabled)
STACK_SPACETIMEDB_URL=# SpacetimeDB host URL; default empty (logging disabled)
STACK_SPACETIMEDB_DB_NAME=# SpacetimeDB database name
STACK_MCP_LOG_TOKEN=# shared secret gating the log_mcp_call reducer; must match EXPECTED_LOG_TOKEN in apps/internal-tool/spacetimedb/src/index.ts
STACK_SPACETIMEDB_SERVICE_TOKEN=# backend's SpacetimeDB-minted identity token
6 changes: 5 additions & 1 deletion apps/backend/.env.development
Original file line number Diff line number Diff line change
Expand Up @@ -114,10 +114,14 @@ STACK_QSTASH_CURRENT_SIGNING_KEY=sig_7kYjw48mhY7kAjqNGcy6cr29RJ6r
STACK_QSTASH_NEXT_SIGNING_KEY=sig_5ZB6DVzB1wjE8S6rZ7eenA8Pdnhs

# MCP review tool (SpacetimeDB)
STACK_SPACETIMEDB_URI=ws://localhost:${NEXT_PUBLIC_STACK_PORT_PREFIX:-81}39
STACK_SPACETIMEDB_URL=http://localhost:${NEXT_PUBLIC_STACK_PORT_PREFIX:-81}39
STACK_SPACETIMEDB_DB_NAME=stack-auth-llm
STACK_MCP_LOG_TOKEN=change-me

# To provision locally: `curl -X POST http://127.0.0.1:${NEXT_PUBLIC_STACK_PORT_PREFIX:-81}39/v1/identity`
# then copy the `token` field from the response.
STACK_SPACETIMEDB_SERVICE_TOKEN=
Comment thread
aadesh18 marked this conversation as resolved.

# Clickhouse
STACK_CLICKHOUSE_URL=http://localhost:${NEXT_PUBLIC_STACK_PORT_PREFIX:-81}36
STACK_CLICKHOUSE_ADMIN_USER=stackframe
Expand Down
13 changes: 13 additions & 0 deletions apps/backend/prisma/seed.ts
Original file line number Diff line number Diff line change
Expand Up @@ -425,6 +425,19 @@ export async function seed() {
}
}

const existingDefaultUser = await usersCrudHandlers.adminRead({
tenancy: internalTenancy,
user_id: defaultUserId,
});
const existingMetadata = (existingDefaultUser.client_read_only_metadata ?? {}) as Record<string, unknown>;
await usersCrudHandlers.adminUpdate({
tenancy: internalTenancy,
user_id: defaultUserId,
data: {
client_read_only_metadata: { ...existingMetadata, isAiChatReviewer: true },
},
});
Comment thread
coderabbitai[bot] marked this conversation as resolved.

// Create or ensure TeamMember exists before granting permissions.
// Using upsert here (instead of create inside the else block above) ensures
// idempotency when adminInternalAccess changes between seed runs.
Expand Down
8 changes: 6 additions & 2 deletions apps/backend/src/app/api/internal/[transport]/route.ts
Original file line number Diff line number Diff line change
Expand Up @@ -81,10 +81,14 @@ const handler = createMcpHandler(
.join("\n\n") ??
"";

const responseConversationId = body.conversationId ?? conversationId ?? "";
const responseConversationId = body.conversationId ?? conversationId;
const bodyText = text.length > 0 ? text : "(empty response)";
const fullText = responseConversationId
? `${bodyText}\n\n[conversationId: ${responseConversationId} — pass this value as the conversationId parameter in your next ask_stack_auth call to continue this conversation]`
: bodyText;

return {
content: [{ type: "text", text: `${text.length > 0 ? text : "(empty response)"}\n\n[conversationId: ${responseConversationId} - pass this value as the conversationId parameter in your next ask_stack_auth call to continue this conversation]` }],
content: [{ type: "text", text: fullText }],
};
},
);
Expand Down
218 changes: 89 additions & 129 deletions apps/backend/src/app/api/latest/ai/query/[mode]/route.ts
Comment thread
nams1570 marked this conversation as resolved.
Original file line number Diff line number Diff line change
@@ -1,21 +1,44 @@
import { logMcpCall } from "@/lib/ai/mcp-logger";
import {
assertProjectAccess,
handleGenerateMode,
handleStreamMode,
} from "@/lib/ai/ai-query-handlers";
import type { CommonLogFields, ModeContext } from "@/lib/ai/types";
import { selectModel } from "@/lib/ai/models";
import { getFullSystemPrompt } from "@/lib/ai/prompts";
import { reviewMcpCall } from "@/lib/ai/qa-reviewer";
import { getFullSystemPrompt, type SystemPromptId } from "@/lib/ai/prompts";
import { requestBodySchema } from "@/lib/ai/schema";
import { getTools } from "@/lib/ai/tools";
import { getVerifiedQaContext } from "@/lib/ai/verified-qa";
import { listManagedProjectIds } from "@/lib/projects";
import { getVerifiedQaContext } from "@/lib/ai/qa/verified-qa";
import { SmartResponse } from "@/route-handlers/smart-response";
import { createSmartRouteHandler } from "@/route-handlers/smart-route-handler";
import { runAsynchronouslyAndWaitUntil } from "@/utils/background-tasks";
import { validateImageAttachments } from "@stackframe/stack-shared/dist/ai/image-limits";
import { ChatContent } from "@stackframe/stack-shared/dist/interface/admin-interface";
import { KnownErrors } from "@stackframe/stack-shared/dist/known-errors";
import { yupMixed, yupObject, yupString } from "@stackframe/stack-shared/dist/schema-fields";
import { getEnvVariable } from "@stackframe/stack-shared/dist/utils/env";
import { StatusError } from "@stackframe/stack-shared/dist/utils/errors";
import { Json } from "@stackframe/stack-shared/dist/utils/json";
import { generateText, stepCountIs, streamText, type ModelMessage } from "ai";
import type { ModelMessage } from "ai";

function getStepLimit(systemPromptId: SystemPromptId, hasTools: boolean): number {
if (!hasTools) return 1;
if (systemPromptId === "docs-ask-ai" || systemPromptId === "command-center-ask-ai") return 50;
if (systemPromptId === "create-dashboard") return 12;
return 5;
}

async function buildSystemPrompt(systemPromptId: SystemPromptId): Promise<string> {
let systemPrompt = getFullSystemPrompt(systemPromptId);
const isDocsOrSearch = systemPromptId === "docs-ask-ai" || systemPromptId === "command-center-ask-ai";
if (isDocsOrSearch) {
// Stuffing the entire verified QA corpus into the system prompt on every
// request is intentionally naive — it grows monotonically with each new
// QA pair and re-fetches/re-sends content that's unchanged across
// requests. Once the corpus is large enough to matter we should swap to
// a retriever based system (maybe something like an embedding-based retriever
// (top-k by query similarity)) and/or cache the assembled context,
// but for the current corpus size this is fine and lets the model see everything
systemPrompt += await getVerifiedQaContext();
}
return systemPrompt;
}

export const POST = createSmartRouteHandler({
metadata: {
Expand All @@ -34,144 +57,81 @@ export const POST = createSmartRouteHandler({
const { quality, speed, systemPrompt: systemPromptId, tools: toolNames, messages, projectId } = body;

if (projectId != null) {
if (fullReq.auth?.project.id !== "internal") {
throw new StatusError(StatusError.Forbidden, "You do not have access to this project");
}
const user = fullReq.auth.user;
if (user == null) {
throw new StatusError(StatusError.Forbidden, "You do not have access to this project");
}
const managedProjectIds = await listManagedProjectIds(user);
if (!managedProjectIds.includes(projectId)) {
throw new StatusError(StatusError.Forbidden, "You do not have access to this project");
}
await assertProjectAccess(projectId, fullReq.auth);
}

const imageValidationResult = validateImageAttachments(messages);
if (!imageValidationResult.ok) {
throw new StatusError(StatusError.BadRequest, imageValidationResult.reason);
const { failure } = imageValidationResult;
switch (failure.code) {
case "too_many": {
throw new KnownErrors.TooManyImageAttachments(failure.maxImages);
}
case "too_large": {
throw new KnownErrors.ImageAttachmentTooLarge(failure.maxBytes, failure.actualBytes);
}
}
}

const authenticatedApiKey = isAuthenticated
? getEnvVariable("STACK_OPENROUTER_AUTHENTICATED_API_KEY", "")
: "";
const model = selectModel(quality, speed, isAuthenticated, authenticatedApiKey || undefined);
const isDocsOrSearch = systemPromptId === "docs-ask-ai" || systemPromptId === "command-center-ask-ai";
let systemPrompt = getFullSystemPrompt(systemPromptId);
if (isDocsOrSearch) {
systemPrompt += await getVerifiedQaContext();
}
const systemPrompt = await buildSystemPrompt(systemPromptId);
const tools = await getTools(toolNames, { auth: fullReq.auth, targetProjectId: projectId });
const toolsArg = Object.keys(tools).length > 0 ? tools : undefined;
const isCreateDashboard = systemPromptId === "create-dashboard";
const isBuildAnalyticsQuery = systemPromptId === "build-analytics-query";
const stepLimit = toolsArg == null
? 1
: isDocsOrSearch
? 50
: isCreateDashboard
? 12
: isBuildAnalyticsQuery
? 5
: 5;
const stepLimit = getStepLimit(systemPromptId, toolsArg != null);

const correlationId = crypto.randomUUID();
const conversationIdForLog = body.mcpCallMetadata
? body.mcpCallMetadata.conversationId ?? crypto.randomUUID()
: undefined;
const common: CommonLogFields = {
correlationId,
mode,
Comment on lines +89 to +90
Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

P2 mcpCorrelationId is always equal to correlationId

correlationId is the UUID freshly minted for this AI query, and mcpCorrelationId is set to that same value when mcpCallMetadata is present. In logIfMcpToolCall, the MCP call log row is also written with the same correlationId. So ai_query_log.mcpCorrelationId and ai_query_log.correlationId carry identical values, making the field redundant. The field name implies a different ID (the MCP log's own correlation key), which will confuse future maintainers. Either give the MCP call log its own distinct UUID and store that here, or remove the mcpCorrelationId column and rely on the shared correlationId.

Prompt To Fix With AI
This is a comment left during a code review.
Path: apps/backend/src/app/api/latest/ai/query/[mode]/route.ts
Line: 89-90

Comment:
**`mcpCorrelationId` is always equal to `correlationId`**

`correlationId` is the UUID freshly minted for this AI query, and `mcpCorrelationId` is set to that same value when `mcpCallMetadata` is present. In `logIfMcpToolCall`, the MCP call log row is also written with the same `correlationId`. So `ai_query_log.mcpCorrelationId` and `ai_query_log.correlationId` carry identical values, making the field redundant. The field name implies a _different_ ID (the MCP log's own correlation key), which will confuse future maintainers. Either give the MCP call log its own distinct UUID and store that here, or remove the `mcpCorrelationId` column and rely on the shared `correlationId`.

How can I resolve this? If you propose a fix, please make it concise.

systemPromptId,
quality,
speed,
modelId: String(model.modelId),
isAuthenticated,
projectId: projectId ?? undefined,
userId: fullReq.auth?.user?.id,
requestedToolsJson: JSON.stringify(toolNames),
messagesJson: JSON.stringify(messages),
mcpCorrelationId: body.mcpCallMetadata ? correlationId : undefined,
conversationId: conversationIdForLog,
Comment thread
aadesh18 marked this conversation as resolved.
};
const startedAt = performance.now();

const isAnthropic = model.modelId.startsWith("anthropic/");
// Can be optimized: only opt into prompt caching for routes that are hit
// frequently enough to amortize the write.
const systemMessage: ModelMessage = {
role: "system",
content: systemPrompt,
...(isAnthropic && {
providerOptions: {
openrouter: { cacheControl: { type: "ephemeral" } },
},
}),
};
Comment on lines +110 to +116
Copy link
Copy Markdown
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I want to point out a few things. Here, we're caching for only 5 min (https://openrouter.ai/docs/guides/best-practices/prompt-caching#cache-ttl-options)
If you have a route that isn't hit too often, you're wasting writes to the cache since the cache would be invalidated.

Copy link
Copy Markdown
Collaborator Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

As we discussed, most of the routes are being hit at least once in 5 mins. however in future we should think about not having the cache for the routes that are not being hit too often. added a comment explain the same

// Cast: the schema narrows role and leaves content as unknown, but the
// AI SDK accepts a superset (role: "system" etc.). We've intentionally
// excluded `system` at the schema layer to prevent prompt-injection via
// client-supplied system messages — see schema.ts.
const modelMessages = messages as unknown as ModelMessage[];
const cachedMessages: ModelMessage[] = [systemMessage, ...modelMessages];

if (mode === "stream") {
const result = streamText({
model,
system: systemPrompt,
messages: modelMessages,
tools: toolsArg,
stopWhen: stepCountIs(stepLimit),
});
return {
statusCode: 200,
bodyType: "response" as const,
body: result.toUIMessageStreamResponse(),
};
} else {
const startedAt = Date.now();
const controller = new AbortController();
const timeoutId = setTimeout(() => controller.abort(), 120_000);
const result = await generateText({
model,
system: systemPrompt,
messages: modelMessages,
tools: toolsArg,
abortSignal: controller.signal,
stopWhen: stepCountIs(stepLimit),
}).finally(() => clearTimeout(timeoutId));

const content: ChatContent = result.steps.flatMap((step) => {
const blocks: ChatContent = [];
if (step.text) {
blocks.push({ type: "text", text: step.text });
}
const outById = new Map(step.toolResults.map((r) => [r.toolCallId, r.output as Json]));
for (const call of step.toolCalls) {
blocks.push({
type: "tool-call",
toolName: call.toolName,
toolCallId: call.toolCallId,
args: call.input as Json,
argsText: JSON.stringify(call.input),
result: outById.get(call.toolCallId) ?? null,
});
}
return blocks;
});

let responseConversationId: string | undefined;
if (body.mcpCallMetadata != null) {
const correlationId = crypto.randomUUID();
const conversationId = body.mcpCallMetadata.conversationId ?? crypto.randomUUID();
responseConversationId = conversationId;
const firstUserMessage = messages.find(m => m.role === "user");
const question = typeof firstUserMessage?.content === "string"
? firstUserMessage.content
: JSON.stringify(firstUserMessage?.content ?? "");

const innerToolCallsJson = JSON.stringify(content.filter(b => b.type === "tool-call"));
const ctx: ModeContext = { model, cachedMessages, toolsArg, stepLimit, common, startedAt };
const extras = {
messages,
mcpCallMetadata: body.mcpCallMetadata ?? undefined,
correlationId,
conversationIdForLog,
};

const logPromise = logMcpCall({
correlationId,
toolName: body.mcpCallMetadata.toolName,
reason: body.mcpCallMetadata.reason,
userPrompt: body.mcpCallMetadata.userPrompt,
conversationId,
question,
response: result.text,
stepCount: result.steps.length,
innerToolCallsJson,
durationMs: BigInt(Date.now() - startedAt),
modelId: String(model.modelId),
errorMessage: undefined,
});
runAsynchronouslyAndWaitUntil(logPromise);

runAsynchronouslyAndWaitUntil(reviewMcpCall({
logPromise,
correlationId,
question,
reason: body.mcpCallMetadata.reason,
response: result.text,
}));
}

return {
statusCode: 200,
bodyType: "json" as const,
body: {
content,
finalText: result.text,
conversationId: responseConversationId ?? null,
},
};
if (mode === "stream") {
return handleStreamMode({ ...ctx, ...extras });
}
return await handleGenerateMode({ ...ctx, ...extras });
},
});
Loading
Loading