Skip to content
4 changes: 4 additions & 0 deletions app/api/mcps/github/[transport]/route.ts
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,10 @@ async function getGitHubMcpHandler() {
const connections = await oauthService.listConnections({ organizationId: orgId, platform: "github" });
const active = connections.find((c) => c.status === "active");
if (!active) {
const expired = connections.find((c) => c.status === "expired");
if (expired) {
return jsonResult({ connected: false, status: "expired", message: "GitHub connection expired. Please reconnect in Settings > Connections." });
}
return jsonResult({ connected: false, message: "GitHub not connected. Connect in Settings > Connections." });
}
return jsonResult({ connected: true, email: active.email, scopes: active.scopes, linkedAt: active.linkedAt });
Expand Down
9 changes: 8 additions & 1 deletion app/api/mcps/google/[transport]/route.ts
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,14 @@ async function getGoogleMcpHandler() {
const orgId = getOrgId();
const connections = await oauthService.listConnections({ organizationId: orgId, platform: "google" });
const active = connections.find((c) => c.status === "active");
return jsonResult(active ? { connected: true, email: active.email, scopes: active.scopes } : { connected: false });
if (!active) {
const expired = connections.find((c) => c.status === "expired");
if (expired) {
return jsonResult({ connected: false, status: "expired", message: "Google connection expired. Please reconnect in Settings > Connections." });
}
return jsonResult({ connected: false });
}
return jsonResult({ connected: true, email: active.email, scopes: active.scopes });
} catch (e) { return errorResult(e instanceof Error ? e.message : "Failed"); }
});

Expand Down
9 changes: 8 additions & 1 deletion app/api/mcps/linear/[transport]/route.ts
Original file line number Diff line number Diff line change
Expand Up @@ -92,7 +92,14 @@ async function getLinearMcpHandler() {
const orgId = getOrgId();
const connections = await oauthService.listConnections({ organizationId: orgId, platform: "linear" });
const active = connections.find((c) => c.status === "active");
return jsonResult(active ? { connected: true, email: active.email, scopes: active.scopes } : { connected: false });
if (!active) {
const expired = connections.find((c) => c.status === "expired");
if (expired) {
return jsonResult({ connected: false, status: "expired", message: "Linear connection expired. Please reconnect in Settings > Connections." });
}
return jsonResult({ connected: false });
}
return jsonResult({ connected: true, email: active.email, scopes: active.scopes });
} catch (e) {
return errorResult(e instanceof Error ? e.message : "Failed");
}
Expand Down
8 changes: 7 additions & 1 deletion app/api/mcps/notion/[transport]/route.ts
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,13 @@ async function getNotionMcpHandler() {
const orgId = getOrgId();
const connections = await oauthService.listConnections({ organizationId: orgId, platform: "notion" });
const active = connections.find((c) => c.status === "active");
if (!active) return jsonResult({ connected: false });
if (!active) {
const expired = connections.find((c) => c.status === "expired");
if (expired) {
return jsonResult({ connected: false, status: "expired", message: "Notion connection expired. Please reconnect in Settings > Connections." });
}
return jsonResult({ connected: false });
}
return jsonResult({ connected: true, email: active.email, scopes: active.scopes });
} catch (e) {
return errorResult(e instanceof Error ? e.message : "Failed");
Expand Down
25 changes: 17 additions & 8 deletions app/api/v1/chat/completions/route.ts
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@ import {
calculateCost,
getProviderFromModel,
normalizeModelName,
getSafeModelParams,
} from "@/lib/pricing";
import { logger } from "@/lib/utils/logger";
import { withRateLimit, RateLimitPresets } from "@/lib/middleware/rate-limit";
Expand Down Expand Up @@ -405,10 +406,7 @@ async function handleStreamingRequest(
) {
const provider = getProviderFromModel(model);

const result = streamText({
model: gateway.languageModel(model),
system: systemPrompt,
messages: await convertToModelMessages(messages),
const safeParams = getSafeModelParams(model, {
temperature: request.temperature,
topP: request.top_p,
frequencyPenalty: request.frequency_penalty,
Expand All @@ -418,6 +416,13 @@ async function handleStreamingRequest(
? request.stop
: [request.stop]
: undefined,
});

const result = streamText({
model: gateway.languageModel(model),
system: systemPrompt,
messages: await convertToModelMessages(messages),
...safeParams,
...(request.max_tokens && { maxOutputTokens: request.max_tokens }),
onFinish: async ({ text, usage }) => {
try {
Expand Down Expand Up @@ -563,10 +568,7 @@ async function handleNonStreamingRequest(
) {
const provider = getProviderFromModel(model);

const result = await generateText({
model: gateway.languageModel(model),
system: systemPrompt,
messages: await convertToModelMessages(messages),
const safeParamsNonStream = getSafeModelParams(model, {
temperature: request.temperature,
topP: request.top_p,
frequencyPenalty: request.frequency_penalty,
Expand All @@ -576,6 +578,13 @@ async function handleNonStreamingRequest(
? request.stop
: [request.stop]
: undefined,
});

const result = await generateText({
model: gateway.languageModel(model),
system: systemPrompt,
messages: await convertToModelMessages(messages),
...safeParamsNonStream,
...(request.max_tokens && { maxOutputTokens: request.max_tokens }),
});

Expand Down
14 changes: 13 additions & 1 deletion app/api/v1/responses/route.ts
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,7 @@ import {
normalizeModelName,
estimateRequestCost,
estimateTokens,
isReasoningModel,
} from "@/lib/pricing";
import { logger } from "@/lib/utils/logger";
import { withRateLimit, RateLimitPresets } from "@/lib/middleware/rate-limit";
Expand Down Expand Up @@ -671,9 +672,20 @@ async function handlePOST(req: NextRequest) {
}

// 6. Forward to Vercel AI Gateway with Groq as preferred provider
// Strip unsupported params for Anthropic models to avoid gateway warnings
const safeRequest = { ...request };
const modelProvider = getProviderFromModel(model);
if (modelProvider === "anthropic") {
delete safeRequest.frequency_penalty;
delete safeRequest.presence_penalty;
}
if (isReasoningModel(model)) {
delete safeRequest.temperature;
}

const providerInstance = getProvider();
const requestWithProvider = {
...request,
...safeRequest,
providerOptions: {
gateway: {
order: ["groq"], // Use Groq as preferred provider
Expand Down
13 changes: 5 additions & 8 deletions lib/eliza/agent-loader.ts
Original file line number Diff line number Diff line change
Expand Up @@ -82,10 +82,10 @@ async function resolveEffectiveMode(
}

// Query document count once - needed for multiple checks and plugin resolution
// Note: no roomId filter — we want agent-level document count across all rooms
const documentCount = await memoriesRepository.countByType(
characterId,
"documents",
characterId,
);

// Already ASSISTANT mode - no upgrade needed
Expand Down Expand Up @@ -287,14 +287,11 @@ export class AgentLoader {
...conditionalPlugins,
];

// Load knowledge plugin for ASSISTANT mode to enable both:
// - Knowledge queries (if documents exist)
// - Uploading new documents (even if none exist yet)
if (options?.hasKnowledge || agentMode === AgentMode.ASSISTANT) {
// Only load knowledge plugin when documents actually exist
// Upload capability is handled separately — no need to init the full plugin
if (options?.hasKnowledge) {
allPluginNames.push("@elizaos/plugin-knowledge");
logger.info(
`[AgentLoader] Loading knowledge plugin - ${options?.hasKnowledge ? "documents found" : "ASSISTANT mode (enables uploads)"}`
);
logger.info("[AgentLoader] Loading knowledge plugin - documents found");
}

for (const pluginName of allPluginNames) {
Expand Down
36 changes: 24 additions & 12 deletions lib/eliza/runtime-factory.ts
Original file line number Diff line number Diff line change
Expand Up @@ -540,8 +540,7 @@ export class RuntimeFactory {
const baseSettings = this.buildSettings(character, context);
const filteredPlugins = this.filterPlugins(plugins);

// Build MCP settings separately - these will be passed via opts.settings
// to avoid being persisted to the database via character.settings
// Build MCP settings from user's OAuth connections
// Pass character.settings to preserve any pre-configured MCP servers
const mcpSettings = this.buildMcpSettings(character.settings || {}, context);

Expand All @@ -552,6 +551,13 @@ export class RuntimeFactory {
elizaLogger.info("[RuntimeFactory] Added MCP plugin for OAuth-connected user");
}

// MCP settings go into character.settings so plugin-mcp can find them
// via runtime.character.settings.mcp (getSetting() drops object types).
// Runtime cache is in-memory only — these won't be persisted to DB.
const settingsWithMcp = mcpSettings.mcp
? { ...baseSettings, mcp: mcpSettings.mcp }
: baseSettings;

// User-specific settings that should NOT be persisted to the database
// These are passed via opts.settings so they're ephemeral per-request
const ephemeralSettings: Record<string, string | boolean | number | Record<string, unknown>> = {
Expand All @@ -563,17 +569,15 @@ export class RuntimeFactory {
ENTITY_ID: context.entityId,
ORGANIZATION_ID: context.organizationId,
IS_ANONYMOUS: context.isAnonymous,
// MCP settings - based on user's OAuth connections
...mcpSettings,
};

// Create runtime with user-specific settings in opts.settings (NOT character.settings)
// runtime.getSetting() checks opts.settings as fallback, and these won't be persisted to DB
// Create runtime with MCP in character.settings (for plugin-mcp)
// and user-specific keys in opts.settings (ephemeral per-request)
const runtime = new AgentRuntime({
character: {
...character,
id: agentId,
settings: baseSettings,
settings: settingsWithMcp,
},
plugins: filteredPlugins,
agentId,
Expand Down Expand Up @@ -638,11 +642,19 @@ export class RuntimeFactory {
charSettings.appPromptConfig = context.appPromptConfig;
}

// MCP settings - injected into character.settings because getSetting() drops objects.
// Must be refreshed per-user since API key headers differ per org.
const mcpSettings = this.buildMcpSettings(runtime.character.settings || {}, context);
if (mcpSettings.mcp) {
charSettings.mcp = mcpSettings.mcp;
} else {
delete charSettings.mcp;
}

// NOTE: The following are NO LONGER mutated here because they're resolved
// dynamically via getSetting() which checks request context first:
// - ELIZAOS_API_KEY / ELIZAOS_CLOUD_API_KEY
// - USER_ID / ENTITY_ID / ORGANIZATION_ID / IS_ANONYMOUS
// - MCP settings (mcp.servers with X-API-Key headers)
//
// See: packages/core/src/runtime.ts getSetting() and
// lib/services/entity-settings/service.ts prefetch()
Expand Down Expand Up @@ -759,8 +771,8 @@ export class RuntimeFactory {
(charSettings.ELIZAOS_CLOUD_EMBEDDING_MODEL as string);
const embeddingDimension = getStaticEmbeddingDimension(embeddingModel);

// Return only character-level settings that are safe to persist
// User-specific settings (API keys, user context, MCP) are passed via opts.settings
// Return character-level settings with stale DB values stripped.
// MCP is stripped here and re-injected fresh by createRuntimeForUser/applyUserContext.
return {
...charSettings,
POSTGRES_URL: process.env.DATABASE_URL!,
Expand All @@ -780,8 +792,8 @@ export class RuntimeFactory {
DEFAULT_IMAGE_MODEL.modelId,
),
...buildElevenLabsSettings(charSettings),
// NOTE: User-specific settings (API keys, user context, MCP) are NOT included here
// They're passed via opts.settings to avoid being persisted to the database
// NOTE: User-specific API keys and context are passed via opts.settings
// MCP is stripped here and re-injected via settingsWithMcp in createRuntimeForUser
...(context.appPromptConfig
? { appPromptConfig: context.appPromptConfig }
: {}),
Expand Down
47 changes: 47 additions & 0 deletions lib/pricing.ts
Original file line number Diff line number Diff line change
Expand Up @@ -163,6 +163,53 @@ export function getProviderFromModel(model: string): string {
return "openai";
}

/**
* Checks if a model is a reasoning model that doesn't support temperature.
*/
export function isReasoningModel(model: string): boolean {
const name = normalizeModelName(model);
return (
name.startsWith("claude-opus") ||
/^o[13](-|$)/.test(name)
);
}

/**
* Returns provider-safe model parameters by stripping unsupported settings.
* Anthropic doesn't support frequencyPenalty or presencePenalty.
* Reasoning models (claude-opus, o1, o3) don't support temperature.
*/
export function getSafeModelParams(
model: string,
params: {
temperature?: number;
topP?: number;
frequencyPenalty?: number;
presencePenalty?: number;
stopSequences?: string[];
},
): {
temperature?: number;
topP?: number;
frequencyPenalty?: number;
presencePenalty?: number;
stopSequences?: string[];
} {
const provider = getProviderFromModel(model);
const result: typeof params = { ...params };

if (provider === "anthropic") {
delete result.frequencyPenalty;
delete result.presencePenalty;
}

if (isReasoningModel(model)) {
delete result.temperature;
}

return result;
}

/**
* Normalizes a model name by removing the provider prefix if present.
*
Expand Down
Loading