Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion apps/landing/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
"framer-motion": "^12.34.3",
"geist": "^1.7.0",
"lucide-react": "^0.469.0",
"next": "^15.5.10",
"next": "^15.5.15",
"react": "^19.2.3",
"react-dom": "^19.2.3",
"tailwind-merge": "^2.1.0"
Expand Down
2 changes: 1 addition & 1 deletion apps/web/e2e/global-teardown.ts
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ async function globalTeardown() {
args: {
secret: adminSecret,
name: "testing/helpers:cleanupE2ETestData",
mutationArgs: {},
mutationArgsJson: JSON.stringify({}),
},
format: "json",
}),
Expand Down
6 changes: 5 additions & 1 deletion apps/web/e2e/helpers/test-data.ts
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,11 @@ async function callInternalMutation<T>(path: string, args: Record<string, unknow
},
body: JSON.stringify({
path: "testAdmin:runTestMutation",
args: { secret: getAdminSecret(), name: path, mutationArgs: args },
args: {
secret: getAdminSecret(),
name: path,
mutationArgsJson: JSON.stringify(args),
},
format: "json",
}),
});
Expand Down
2 changes: 2 additions & 0 deletions apps/web/e2e/helpers/widget-helpers.ts
Original file line number Diff line number Diff line change
Expand Up @@ -315,6 +315,8 @@ export async function waitForSurveyVisible(page: Page, timeout = 10000): Promise
export async function submitNPSRating(page: Page, rating: number): Promise<void> {
const frame = getWidgetContainer(page);

await dismissTour(page);

// Click the rating button (0-10)
await frame
.locator(
Expand Down
12 changes: 0 additions & 12 deletions apps/web/next.config.js
Original file line number Diff line number Diff line change
Expand Up @@ -46,18 +46,6 @@ const nextConfig = {
// Reduce memory usage during webpack compilation
webpackMemoryOptimizations: true,
},
webpack: (config, { dev }) => {
if (dev) {
// Use filesystem cache to reduce in-memory pressure during dev
config.cache = {
type: "filesystem",
buildDependencies: {
config: [__filename],
},
};
}
return config;
},
async headers() {
return [
{
Expand Down
2 changes: 1 addition & 1 deletion apps/web/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
"fflate": "^0.8.2",
"lucide-react": "^0.469.0",
"markdown-it": "^14.1.1",
"next": "^15.5.10",
"next": "^15.5.15",
"react": "^19.2.3",
"react-dom": "^19.2.3"
},
Expand Down
46 changes: 46 additions & 0 deletions apps/web/src/app/inbox/InboxAiReviewPanel.test.tsx
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
import { render, screen } from "@testing-library/react";
import { describe, expect, it, vi } from "vitest";
import type { Id } from "@opencom/convex/dataModel";
import { InboxAiReviewPanel } from "./InboxAiReviewPanel";
import type { InboxAiResponse } from "./inboxRenderTypes";

function messageId(value: string): Id<"messages"> {
return value as Id<"messages">;
}

function responseId(value: string): Id<"aiResponses"> {
return value as Id<"aiResponses">;
}

describe("InboxAiReviewPanel", () => {
it("renders persisted model and provider metadata for AI responses", () => {
const response: InboxAiResponse = {
_id: responseId("response_1"),
createdAt: Date.now(),
query: "How do I reset my password?",
response: "Go to Settings > Security > Reset Password.",
confidence: 0.82,
model: "openai/gpt-5-nano",
provider: "openai",
handedOff: false,
messageId: messageId("message_1"),
sources: [],
deliveredResponseContext: null,
generatedResponseContext: null,
};

render(
<InboxAiReviewPanel
aiResponses={[response]}
orderedAiResponses={[response]}
selectedConversation={null}
onOpenArticle={vi.fn()}
onJumpToMessage={vi.fn()}
getHandoffReasonLabel={(reason) => reason ?? "No reason"}
/>
);

expect(screen.getByText("Model openai/gpt-5-nano")).toBeInTheDocument();
expect(screen.getByText("Provider openai")).toBeInTheDocument();
});
});
6 changes: 6 additions & 0 deletions apps/web/src/app/inbox/InboxAiReviewPanel.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -136,6 +136,12 @@ export function InboxAiReviewPanel({
<span className="rounded bg-muted px-2 py-0.5">
{confidenceLabel} {Math.round(confidenceValue * 100)}%
</span>
<span className="rounded bg-muted px-2 py-0.5" data-testid={`inbox-ai-review-model-${response._id}`}>
Model {response.model}
</span>
<span className="rounded bg-muted px-2 py-0.5" data-testid={`inbox-ai-review-provider-${response._id}`}>
Provider {response.provider}
</span>
{response.feedback && (
<span className="rounded bg-muted px-2 py-0.5">
Feedback {response.feedback === "helpful" ? "helpful" : "not helpful"}
Expand Down
2 changes: 2 additions & 0 deletions apps/web/src/app/inbox/inboxRenderTypes.ts
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,8 @@ export interface InboxAiResponse {
query: string;
response: string;
confidence: number;
model: string;
provider: string;
handedOff: boolean;
handoffReason?: string | null;
messageId: Id<"messages">;
Expand Down
87 changes: 87 additions & 0 deletions apps/web/src/app/settings/AIAgentSection.test.tsx
Original file line number Diff line number Diff line change
@@ -0,0 +1,87 @@
import { act, render, screen, waitFor } from "@testing-library/react";
import { beforeEach, describe, expect, it, vi } from "vitest";
import type { Id } from "@opencom/convex/dataModel";
import { AIAgentSection } from "./AIAgentSection";
import { useWebAction, useWebMutation, useWebQuery } from "@/lib/convex/hooks";

vi.mock("@/lib/convex/hooks", () => ({
useWebAction: vi.fn(),
useWebMutation: vi.fn(),
useWebQuery: vi.fn(),
webActionRef: vi.fn((functionName: string) => functionName),
webMutationRef: vi.fn((functionName: string) => functionName),
webQueryRef: vi.fn((functionName: string) => functionName),
}));

describe("AIAgentSection model discovery fallbacks", () => {
const workspaceId = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa" as unknown as Id<"workspaces">;
const aiSettingsFixture = {
enabled: true,
model: "openai/gpt-5-nano",
confidenceThreshold: 0.6,
knowledgeSources: ["articles"],
personality: "",
handoffMessage: "",
suggestionsEnabled: false,
embeddingModel: "text-embedding-3-small",
lastConfigError: null,
} as const;

let listAvailableModelsMock: ReturnType<typeof vi.fn>;
let rejectDiscovery: ((reason?: unknown) => void) | undefined;

beforeEach(() => {
vi.clearAllMocks();
vi.spyOn(console, "error").mockImplementation(() => {});

const mockedUseWebQuery = useWebQuery as unknown as ReturnType<typeof vi.fn>;
mockedUseWebQuery.mockImplementation((_: unknown, args: unknown) => {
if (args === "skip") {
return undefined;
}

return aiSettingsFixture;
});

listAvailableModelsMock = vi.fn(
() =>
new Promise((_, reject) => {
rejectDiscovery = reject;
})
);

const mockedUseWebAction = useWebAction as unknown as ReturnType<typeof vi.fn>;
mockedUseWebAction.mockReturnValue(listAvailableModelsMock);

const mockedUseWebMutation = useWebMutation as unknown as ReturnType<typeof vi.fn>;
mockedUseWebMutation.mockReturnValue(vi.fn().mockResolvedValue(undefined));
});

it("stops showing the loading placeholder when model discovery fails", async () => {
render(<AIAgentSection workspaceId={workspaceId} />);

await waitFor(() => {
expect(listAvailableModelsMock).toHaveBeenCalledWith({
workspaceId,
selectedModel: aiSettingsFixture.model,
});
});

expect(screen.getByRole("option", { name: /loading discovered models/i })).toBeInTheDocument();

await act(async () => {
rejectDiscovery?.(new Error("Discovery failed"));
});

await waitFor(() => {
expect(screen.getByRole("option", { name: /model discovery unavailable/i })).toBeInTheDocument();
});

expect(
screen.getByText(/model discovery is currently unavailable\. enter a model id manually/i)
).toBeInTheDocument();
expect(
screen.queryByRole("option", { name: /loading discovered models/i })
).not.toBeInTheDocument();
});
});
69 changes: 45 additions & 24 deletions apps/web/src/app/settings/AIAgentSection.tsx
Original file line number Diff line number Diff line change
Expand Up @@ -6,12 +6,17 @@ import { AlertTriangle, Bot } from "lucide-react";
import type { Id } from "@opencom/convex/dataModel";
import { useAIAgentSectionConvex } from "./hooks/useSettingsSectionsConvex";

function normalizeModelValue(value: string): string {
return value.trim();
}

export function AIAgentSection({
workspaceId,
}: {
workspaceId?: Id<"workspaces">;
}): React.JSX.Element | null {
const { aiSettings, availableModels, updateSettings } = useAIAgentSectionConvex(workspaceId);
const { aiSettings, availableModels, availableModelsStatus, isSaving, saveSettings } =
useAIAgentSectionConvex(workspaceId);

const [enabled, setEnabled] = useState(false);
const [model, setModel] = useState("openai/gpt-5-nano");
Expand All @@ -21,7 +26,17 @@ export function AIAgentSection({
const [handoffMessage, setHandoffMessage] = useState("");
const [suggestionsEnabled, setSuggestionsEnabled] = useState(false);
const [embeddingModel, setEmbeddingModel] = useState("text-embedding-3-small");
const [isSaving, setIsSaving] = useState(false);
const normalizedModel = normalizeModelValue(model);
const selectedDiscoveredModel =
availableModels?.some((availableModel) => availableModel.id === normalizedModel) ?? false
? normalizedModel
: "";
const discoveredModelsPlaceholder =
availableModelsStatus === "loading"
? "Loading discovered models..."
: availableModelsStatus === "error"
? "Model discovery unavailable"
: "Choose a discovered model";

useEffect(() => {
if (aiSettings) {
Expand All @@ -38,23 +53,19 @@ export function AIAgentSection({

const handleSave = async () => {
if (!workspaceId) return;
setIsSaving(true);
try {
await updateSettings({
workspaceId,
enabled,
model,
confidenceThreshold,
knowledgeSources: knowledgeSources as ("articles" | "internalArticles" | "snippets")[],
personality: personality || undefined,
handoffMessage: handoffMessage || undefined,
suggestionsEnabled,
embeddingModel,
});
} catch (error) {
console.error("Failed to save AI settings:", error);
} finally {
setIsSaving(false);
const nextModel = await saveSettings({
workspaceId,
enabled,
model,
confidenceThreshold,
knowledgeSources: knowledgeSources as ("articles" | "internalArticles" | "snippets")[],
personality,
handoffMessage,
suggestionsEnabled,
embeddingModel,
});
if (nextModel) {
setModel(nextModel);
}
};

Expand Down Expand Up @@ -131,18 +142,31 @@ export function AIAgentSection({
<div className="space-y-2">
<label className="text-sm font-medium">AI Model</label>
<select
value={model}
value={selectedDiscoveredModel}
onChange={(e) => setModel(e.target.value)}
className="w-full px-3 py-2 border rounded-md text-sm bg-background"
>
<option value="">{discoveredModelsPlaceholder}</option>
{availableModels?.map((m: NonNullable<typeof availableModels>[number]) => (
<option key={m.id} value={m.id}>
{m.name} ({m.provider})
</option>
))}
</select>
<Input
value={model}
onChange={(e) => setModel(e.target.value)}
placeholder="openai/gpt-5-nano"
/>
{availableModelsStatus === "error" && (
<p className="text-xs text-amber-700">
Model discovery is currently unavailable. Enter a model ID manually or try again
later.
</p>
)}
<p className="text-xs text-muted-foreground">
Choose the AI model for generating responses.
Choose a discovered model or enter one manually. Raw model IDs are interpreted
against the currently configured AI gateway runtime.
</p>
</div>

Expand Down Expand Up @@ -252,9 +276,6 @@ export function AIAgentSection({
<option value="text-embedding-3-small">
text-embedding-3-small (Recommended)
</option>
<option value="text-embedding-3-large">
text-embedding-3-large (Higher quality)
</option>
<option value="text-embedding-ada-002">text-embedding-ada-002 (Legacy)</option>
</select>
<p className="text-xs text-muted-foreground">
Expand Down
Loading
Loading