diff --git a/apps/docs/content/docs/en/tools/image_generator.mdx b/apps/docs/content/docs/en/tools/image_generator.mdx index 36979e8dd3e..7e1f25bc642 100644 --- a/apps/docs/content/docs/en/tools/image_generator.mdx +++ b/apps/docs/content/docs/en/tools/image_generator.mdx @@ -29,7 +29,7 @@ In Sim, the DALL-E integration enables your agents to generate images programmat ## Usage Instructions -Integrate Image Generator into the workflow. Can generate images using DALL-E 3 or GPT Image. +Integrate Image Generator into the workflow. Can generate images using DALL-E 3, GPT Image 1, or GPT Image 2. @@ -43,12 +43,14 @@ Generate images using OpenAI | Parameter | Type | Required | Description | | --------- | ---- | -------- | ----------- | -| `model` | string | Yes | The model to use \(gpt-image-1 or dall-e-3\) | +| `model` | string | Yes | The model to use \(dall-e-3, gpt-image-1, or gpt-image-2\) | | `prompt` | string | Yes | A text description of the desired image | -| `size` | string | Yes | The size of the generated images \(1024x1024, 1024x1792, or 1792x1024\) | -| `quality` | string | No | The quality of the image \(standard or hd\) | -| `style` | string | No | The style of the image \(vivid or natural\) | -| `background` | string | No | The background color, only for gpt-image-1 | +| `size` | string | Yes | Image size. dall-e-3: 1024x1024, 1024x1792, or 1792x1024. gpt-image-1: auto, 1024x1024, 1536x1024, or 1024x1536. gpt-image-2: auto or any size with edges ≤3840px and multiples of 16 \(e.g. 1024x1024, 1536x1024, 1024x1536, 2560x1440, 3840x2160\). | +| `quality` | string | No | Quality. dall-e-3: standard\|hd. gpt-image-1/gpt-image-2: auto\|low\|medium\|high | +| `style` | string | No | The style of the image \(vivid or natural\), only for dall-e-3 | +| `background` | string | No | Background. gpt-image-1: auto\|transparent\|opaque. gpt-image-2: auto\|opaque \(transparent not supported\) | +| `outputFormat` | string | No | Output image format \(png, jpeg, webp\), only for gpt-image-1 and gpt-image-2 | +| `moderation` | string | No | Moderation level \(auto or low\), only for gpt-image-1 and gpt-image-2 | | `n` | number | No | The number of images to generate \(1-10\) | | `apiKey` | string | Yes | Your OpenAI API key | diff --git a/apps/docs/content/docs/en/tools/knowledge.mdx b/apps/docs/content/docs/en/tools/knowledge.mdx index 83cbe9b8fb3..b0e1338d9e0 100644 --- a/apps/docs/content/docs/en/tools/knowledge.mdx +++ b/apps/docs/content/docs/en/tools/knowledge.mdx @@ -49,6 +49,8 @@ Search for similar content in a knowledge base using vector similarity | `tagValue` | string | No | No description | | `rerankerEnabled` | boolean | No | Whether to apply Cohere reranking to vector search results | | `rerankerModel` | string | No | Cohere rerank model to use \(one of: rerank-v4.0-pro, rerank-v4.0-fast, rerank-v3.5\) | +| `rerankerInputCount` | number | No | Number of vector results sent to the Cohere reranker \(1–100\). Defaults to topK × 4 capped at 100. | +| `apiKey` | string | No | Cohere API key for reranker \(self-hosted deployments only\) | | `tagFilters` | string | No | No description | #### Output diff --git a/apps/docs/content/docs/en/tools/mem0.mdx b/apps/docs/content/docs/en/tools/mem0.mdx index 5360e00c5bf..2e2ef80dce8 100644 --- a/apps/docs/content/docs/en/tools/mem0.mdx +++ b/apps/docs/content/docs/en/tools/mem0.mdx @@ -50,12 +50,9 @@ Add memories to Mem0 for persistent storage and retrieval | Parameter | Type | Description | | --------- | ---- | ----------- | -| `ids` | array | Array of memory IDs that were created | -| `memories` | array | Array of memory objects that were created | -| ↳ `id` | string | Unique identifier for the memory | -| ↳ `memory` | string | The content of the memory | -| ↳ `event` | string | Event type indicating operation performed \(ADD, UPDATE, DELETE, NOOP\) | -| ↳ `metadata` | json | Custom metadata associated with the memory | +| `message` | string | Status message for the queued memory processing job | +| `status` | string | Processing status returned by Mem0 | +| `event_id` | string | Event ID for polling memory processing status | ### `mem0_search_memories` @@ -102,6 +99,7 @@ Retrieve memories from Mem0 by ID or filter criteria | `startDate` | string | No | Start date for filtering by created_at \(e.g., "2024-01-15"\) | | `endDate` | string | No | End date for filtering by created_at \(e.g., "2024-12-31"\) | | `limit` | number | No | Maximum number of results to return \(e.g., 10, 50, 100\) | +| `page` | number | No | Page number to retrieve for paginated list results | | `apiKey` | string | Yes | Your Mem0 API key | #### Output @@ -120,10 +118,9 @@ Retrieve memories from Mem0 by ID or filter criteria | ↳ `categories` | json | Auto-assigned categories for the memory | | ↳ `created_at` | string | ISO 8601 timestamp when the memory was created | | ↳ `updated_at` | string | ISO 8601 timestamp when the memory was last updated | -| ↳ `owner` | string | Owner of the memory | -| ↳ `organization` | string | Organization associated with the memory | -| ↳ `immutable` | boolean | Whether the memory can be modified | -| ↳ `expiration_date` | string | Expiration date after which memory is not retrieved | | `ids` | array | Array of memory IDs that were retrieved | +| `count` | number | Total number of memories matching the filters | +| `next` | string | URL for the next page of results | +| `previous` | string | URL for the previous page of results | diff --git a/apps/docs/content/docs/en/triggers/meta.json b/apps/docs/content/docs/en/triggers/meta.json index ae727f30f3e..7eff814aad7 100644 --- a/apps/docs/content/docs/en/triggers/meta.json +++ b/apps/docs/content/docs/en/triggers/meta.json @@ -39,6 +39,7 @@ "servicenow", "slack", "stripe", + "table", "telegram", "twilio_voice", "typeform", diff --git a/apps/docs/content/docs/en/triggers/table.mdx b/apps/docs/content/docs/en/triggers/table.mdx new file mode 100644 index 00000000000..1a4a7139987 --- /dev/null +++ b/apps/docs/content/docs/en/triggers/table.mdx @@ -0,0 +1,45 @@ +--- +title: Table +description: Available Table triggers for automating workflows +--- + +import { BlockInfoCard } from "@/components/ui/block-info-card" + + + +Table provides 1 trigger for automating workflows based on events. + +## Triggers + +### Table Trigger + +Triggers when rows are inserted or updated in a table + +#### Configuration + +| Parameter | Type | Required | Description | +| --------- | ---- | -------- | ----------- | +| `tableSelector` | table-selector | Yes | The table to monitor. | +| `manualTableId` | string | Yes | The table to monitor. | +| `eventType` | string | Yes | The type of event to trigger on. | +| `watchColumns` | string | No | Only fire when these columns change. Leave empty to fire on any update. | +| `includeHeaders` | boolean | No | When enabled, each row is returned as a key-value object mapped to column names. | + +#### Output + +| Parameter | Type | Description | +| --------- | ---- | ----------- | +| `row` | json | Row data mapped to column names \(when header mapping is enabled\) | +| `rawRow` | json | Raw row data object | +| `previousRow` | json | Previous row data before the update \(null for inserts\) | +| `changedColumns` | json | List of column names that changed \(empty for inserts\) | +| `rowId` | string | The unique row ID | +| `headers` | json | Column names from the table schema | +| `rowNumber` | number | The position of the row in the table | +| `tableId` | string | The table ID | +| `tableName` | string | The table name | +| `timestamp` | string | Event timestamp in ISO format | + diff --git a/apps/sim/.env.example b/apps/sim/.env.example index 6f7aa473666..f554797ea1e 100644 --- a/apps/sim/.env.example +++ b/apps/sim/.env.example @@ -35,6 +35,8 @@ API_ENCRYPTION_KEY=your_api_encryption_key # Use `openssl rand -hex 32` to gener # AZURE_ANTHROPIC_API_KEY= # Azure Anthropic API key # AZURE_ANTHROPIC_API_VERSION= # Azure Anthropic API version (e.g., 2023-06-01) # NEXT_PUBLIC_AZURE_CONFIGURED=true # Set when Azure credentials are pre-configured above. Hides endpoint/key/version fields in Agent block UI. +# COHERE_API_KEY= # Cohere API key for the Knowledge block reranker (rerank-v4.0-pro/-fast, rerank-v3.5). Alternatively set COHERE_API_KEY_1/2/3 for rotation. +# NEXT_PUBLIC_COHERE_CONFIGURED=true # Set when COHERE_API_KEY (or rotation keys) are pre-configured above. Hides the Cohere API Key field on the Knowledge block UI. # Admin API (Optional - for self-hosted GitOps) # ADMIN_API_KEY= # Use `openssl rand -hex 32` to generate. Enables admin API for workflow export/import. diff --git a/apps/sim/app/(landing)/integrations/data/integrations.json b/apps/sim/app/(landing)/integrations/data/integrations.json index 6cdced211a2..346cb98feeb 100644 --- a/apps/sim/app/(landing)/integrations/data/integrations.json +++ b/apps/sim/app/(landing)/integrations/data/integrations.json @@ -6496,7 +6496,7 @@ "slug": "image-generator", "name": "Image Generator", "description": "Generate images", - "longDescription": "Integrate Image Generator into the workflow. Can generate images using DALL-E 3 or GPT Image.", + "longDescription": "Integrate Image Generator into the workflow. Can generate images using DALL-E 3, GPT Image 1, or GPT Image 2.", "bgColor": "#4D5FFF", "iconName": "ImageIcon", "docsUrl": "https://docs.sim.ai/tools/image_generator", @@ -7540,7 +7540,7 @@ "operationCount": 14, "triggers": [], "triggerCount": 0, - "authType": "none", + "authType": "api-key", "category": "blocks" }, { diff --git a/apps/sim/app/api/auth/[...all]/route.test.ts b/apps/sim/app/api/auth/[...all]/route.test.ts index d9aa74cab91..f87f1a01673 100644 --- a/apps/sim/app/api/auth/[...all]/route.test.ts +++ b/apps/sim/app/api/auth/[...all]/route.test.ts @@ -8,11 +8,9 @@ const handlerMocks = vi.hoisted(() => ({ betterAuthGET: vi.fn(), betterAuthPOST: vi.fn(), ensureAnonymousUserExists: vi.fn(), - createAnonymousGetSessionResponse: vi.fn(() => ({ - data: { - user: { id: 'anon' }, - session: { id: 'anon-session' }, - }, + createAnonymousSession: vi.fn(() => ({ + user: { id: 'anon' }, + session: { id: 'anon-session' }, })), isAuthDisabled: false, })) @@ -30,7 +28,7 @@ vi.mock('@/lib/auth', () => ({ vi.mock('@/lib/auth/anonymous', () => ({ ensureAnonymousUserExists: handlerMocks.ensureAnonymousUserExists, - createAnonymousGetSessionResponse: handlerMocks.createAnonymousGetSessionResponse, + createAnonymousSession: handlerMocks.createAnonymousSession, })) vi.mock('@/lib/core/config/feature-flags', () => ({ @@ -63,10 +61,8 @@ describe('auth catch-all route (DISABLE_AUTH get-session)', () => { expect(handlerMocks.ensureAnonymousUserExists).toHaveBeenCalledTimes(1) expect(handlerMocks.betterAuthGET).not.toHaveBeenCalled() expect(json).toEqual({ - data: { - user: { id: 'anon' }, - session: { id: 'anon-session' }, - }, + user: { id: 'anon' }, + session: { id: 'anon-session' }, }) }) diff --git a/apps/sim/app/api/auth/[...all]/route.ts b/apps/sim/app/api/auth/[...all]/route.ts index b09ce7e7e67..6ff9bfd6db2 100644 --- a/apps/sim/app/api/auth/[...all]/route.ts +++ b/apps/sim/app/api/auth/[...all]/route.ts @@ -1,7 +1,7 @@ import { toNextJsHandler } from 'better-auth/next-js' import { type NextRequest, NextResponse } from 'next/server' import { auth } from '@/lib/auth' -import { createAnonymousGetSessionResponse, ensureAnonymousUserExists } from '@/lib/auth/anonymous' +import { createAnonymousSession, ensureAnonymousUserExists } from '@/lib/auth/anonymous' import { isAuthDisabled } from '@/lib/core/config/feature-flags' import { withRouteHandler } from '@/lib/core/utils/with-route-handler' @@ -24,7 +24,7 @@ export const GET = withRouteHandler(async (request: NextRequest) => { if (path === 'get-session' && isAuthDisabled) { await ensureAnonymousUserExists() - return NextResponse.json(createAnonymousGetSessionResponse()) + return NextResponse.json(createAnonymousSession()) } return betterAuthGET(request) diff --git a/apps/sim/app/api/copilot/chat/stop/route.test.ts b/apps/sim/app/api/copilot/chat/stop/route.test.ts index 0ac05257bf6..a87f35a2987 100644 --- a/apps/sim/app/api/copilot/chat/stop/route.test.ts +++ b/apps/sim/app/api/copilot/chat/stop/route.test.ts @@ -29,6 +29,16 @@ const { mockSql: vi.fn((strings: TemplateStringsArray, ...values: unknown[]) => ({ strings, values })), })) +vi.mock('@sim/db/schema', () => ({ + copilotChats: { + id: 'copilotChats.id', + userId: 'copilotChats.userId', + workspaceId: 'copilotChats.workspaceId', + messages: 'copilotChats.messages', + conversationId: 'copilotChats.conversationId', + }, +})) + vi.mock('@sim/db', () => ({ db: { select: mockSelect, @@ -140,6 +150,7 @@ describe('copilot chat stop route', () => { workspaceId: 'ws-1', chatId: 'chat-1', type: 'completed', + streamId: 'stream-1', }) }) }) diff --git a/apps/sim/app/api/copilot/chat/stop/route.ts b/apps/sim/app/api/copilot/chat/stop/route.ts index ad7da7386d1..36d3b8ae43d 100644 --- a/apps/sim/app/api/copilot/chat/stop/route.ts +++ b/apps/sim/app/api/copilot/chat/stop/route.ts @@ -111,6 +111,7 @@ export const POST = withRouteHandler((req: NextRequest) => workspaceId: updated.workspaceId, chatId, type: 'completed', + streamId, }) } diff --git a/apps/sim/app/api/copilot/chat/stream/route.ts b/apps/sim/app/api/copilot/chat/stream/route.ts index 04ba6109a56..7cc61cb6447 100644 --- a/apps/sim/app/api/copilot/chat/stream/route.ts +++ b/apps/sim/app/api/copilot/chat/stream/route.ts @@ -248,6 +248,7 @@ async function handleResumeRequestBody({ events: batchEvents, previewSessions, status: run.status, + ...(run.chatId ? { chatId: run.chatId } : {}), }) } diff --git a/apps/sim/app/api/knowledge/search/route.ts b/apps/sim/app/api/knowledge/search/route.ts index 13f4625a2cb..94c09f6c138 100644 --- a/apps/sim/app/api/knowledge/search/route.ts +++ b/apps/sim/app/api/knowledge/search/route.ts @@ -247,9 +247,21 @@ export const POST = withRouteHandler(async (request: NextRequest) => { const hasFilters = structuredFilters && structuredFilters.length > 0 - /** Oversample candidates when reranking so the reranker has more to choose from. - * Cap at 100 to bound Cohere request cost (1 search unit = ≤100 docs). */ - const candidateTopK = useReranker ? Math.min(100, validatedData.topK * 4) : validatedData.topK + /** Oversample vector results when reranking so the reranker has more to choose from. + * Cap at 100 to bound Cohere request cost (1 search unit = ≤100 docs). When the caller + * supplies `rerankerInputCount`, honor it but never let it drop below `topK` + * (which would defeat the purpose) or exceed 100 (which would split into >1 search units). */ + const rawInputCount = validatedData.rerankerInputCount + if (useReranker && rawInputCount !== undefined && rawInputCount < validatedData.topK) { + logger.warn( + `[${requestId}] rerankerInputCount (${rawInputCount}) is below topK (${validatedData.topK}); raising to topK` + ) + } + const candidateTopK = useReranker + ? rawInputCount !== undefined + ? Math.min(100, Math.max(validatedData.topK, rawInputCount)) + : Math.min(100, validatedData.topK * 4) + : validatedData.topK if (!hasQuery && hasFilters) { results = await handleTagOnlySearch({ @@ -300,7 +312,12 @@ export const POST = withRouteHandler(async (request: NextRequest) => { const { results: ranked, isBYOK } = await rerank( validatedData.query!, results.map((r) => ({ id: r.id, text: r.content })), - { model: rerankerModel, topN: validatedData.topK, workspaceId } + { + model: rerankerModel, + topN: validatedData.topK, + workspaceId, + apiKey: validatedData.rerankerApiKey, + } ) rerankBilled = true rerankIsBYOK = isBYOK diff --git a/apps/sim/app/api/logs/[id]/route.ts b/apps/sim/app/api/logs/[id]/route.ts index 575b0867b1a..5c0acd33e08 100644 --- a/apps/sim/app/api/logs/[id]/route.ts +++ b/apps/sim/app/api/logs/[id]/route.ts @@ -1,183 +1,39 @@ -import { db } from '@sim/db' -import { - jobExecutionLogs, - permissions, - workflow, - workflowDeploymentVersion, - workflowExecutionLogs, -} from '@sim/db/schema' import { createLogger } from '@sim/logger' -import { and, eq } from 'drizzle-orm' import { type NextRequest, NextResponse } from 'next/server' -import { logIdParamsSchema } from '@/lib/api/contracts/logs' -import { getSession } from '@/lib/auth' -import { generateRequestId } from '@/lib/core/utils/request' +import { getLogDetailContract } from '@/lib/api/contracts/logs' +import { parseRequest } from '@/lib/api/server' +import { checkSessionOrInternalAuth } from '@/lib/auth/hybrid' import { withRouteHandler } from '@/lib/core/utils/with-route-handler' +import { fetchLogDetail } from '@/lib/logs/fetch-log-detail' const logger = createLogger('LogDetailsByIdAPI') -export const revalidate = 0 - export const GET = withRouteHandler( - async (_request: NextRequest, { params }: { params: Promise<{ id: string }> }) => { - const requestId = generateRequestId() - - try { - const session = await getSession() - if (!session?.user?.id) { - logger.warn(`[${requestId}] Unauthorized log details access attempt`) - return NextResponse.json({ error: 'Unauthorized' }, { status: 401 }) - } - - const userId = session.user.id - const { id } = logIdParamsSchema.parse(await params) - - const rows = await db - .select({ - id: workflowExecutionLogs.id, - workflowId: workflowExecutionLogs.workflowId, - executionId: workflowExecutionLogs.executionId, - stateSnapshotId: workflowExecutionLogs.stateSnapshotId, - deploymentVersionId: workflowExecutionLogs.deploymentVersionId, - level: workflowExecutionLogs.level, - status: workflowExecutionLogs.status, - trigger: workflowExecutionLogs.trigger, - startedAt: workflowExecutionLogs.startedAt, - endedAt: workflowExecutionLogs.endedAt, - totalDurationMs: workflowExecutionLogs.totalDurationMs, - executionData: workflowExecutionLogs.executionData, - cost: workflowExecutionLogs.cost, - files: workflowExecutionLogs.files, - createdAt: workflowExecutionLogs.createdAt, - workflowName: workflow.name, - workflowDescription: workflow.description, - workflowColor: workflow.color, - workflowFolderId: workflow.folderId, - workflowUserId: workflow.userId, - workflowWorkspaceId: workflow.workspaceId, - workflowCreatedAt: workflow.createdAt, - workflowUpdatedAt: workflow.updatedAt, - deploymentVersion: workflowDeploymentVersion.version, - deploymentVersionName: workflowDeploymentVersion.name, - }) - .from(workflowExecutionLogs) - .leftJoin(workflow, eq(workflowExecutionLogs.workflowId, workflow.id)) - .leftJoin( - workflowDeploymentVersion, - eq(workflowDeploymentVersion.id, workflowExecutionLogs.deploymentVersionId) - ) - .innerJoin( - permissions, - and( - eq(permissions.entityType, 'workspace'), - eq(permissions.entityId, workflowExecutionLogs.workspaceId), - eq(permissions.userId, userId) - ) - ) - .where(eq(workflowExecutionLogs.id, id)) - .limit(1) - - const log = rows[0] - - // Fallback: check job_execution_logs - if (!log) { - const jobRows = await db - .select({ - id: jobExecutionLogs.id, - executionId: jobExecutionLogs.executionId, - level: jobExecutionLogs.level, - status: jobExecutionLogs.status, - trigger: jobExecutionLogs.trigger, - startedAt: jobExecutionLogs.startedAt, - endedAt: jobExecutionLogs.endedAt, - totalDurationMs: jobExecutionLogs.totalDurationMs, - executionData: jobExecutionLogs.executionData, - cost: jobExecutionLogs.cost, - createdAt: jobExecutionLogs.createdAt, - }) - .from(jobExecutionLogs) - .innerJoin( - permissions, - and( - eq(permissions.entityType, 'workspace'), - eq(permissions.entityId, jobExecutionLogs.workspaceId), - eq(permissions.userId, userId) - ) - ) - .where(eq(jobExecutionLogs.id, id)) - .limit(1) - - const jobLog = jobRows[0] - if (!jobLog) { - return NextResponse.json({ error: 'Not found' }, { status: 404 }) - } + async (request: NextRequest, context: { params: Promise<{ id: string }> }) => { + const authResult = await checkSessionOrInternalAuth(request, { requireWorkflowId: false }) + if (!authResult.success || !authResult.userId) { + return NextResponse.json( + { error: authResult.error || 'Authentication required' }, + { status: 401 } + ) + } - const execData = jobLog.executionData as Record | null - const response = { - id: jobLog.id, - workflowId: null, - executionId: jobLog.executionId, - deploymentVersionId: null, - deploymentVersion: null, - deploymentVersionName: null, - level: jobLog.level, - status: jobLog.status, - duration: jobLog.totalDurationMs ? `${jobLog.totalDurationMs}ms` : null, - trigger: jobLog.trigger, - createdAt: jobLog.startedAt.toISOString(), - workflow: null, - jobTitle: (execData?.trigger?.source as string) || null, - executionData: { - totalDuration: jobLog.totalDurationMs, - ...execData, - enhanced: true, - }, - cost: jobLog.cost as any, - } + const parsed = await parseRequest(getLogDetailContract, request, context) + if (!parsed.success) return parsed.response - return NextResponse.json({ data: response }) - } + const { id } = parsed.data.params + const { workspaceId } = parsed.data.query - const workflowSummary = log.workflowId - ? { - id: log.workflowId, - name: log.workflowName, - description: log.workflowDescription, - color: log.workflowColor, - folderId: log.workflowFolderId, - userId: log.workflowUserId, - workspaceId: log.workflowWorkspaceId, - createdAt: log.workflowCreatedAt, - updatedAt: log.workflowUpdatedAt, - } - : null + const data = await fetchLogDetail({ + userId: authResult.userId, + workspaceId, + lookupColumn: 'id', + lookupValue: id, + }) - const response = { - id: log.id, - workflowId: log.workflowId, - executionId: log.executionId, - deploymentVersionId: log.deploymentVersionId, - deploymentVersion: log.deploymentVersion ?? null, - deploymentVersionName: log.deploymentVersionName ?? null, - level: log.level, - status: log.status, - duration: log.totalDurationMs ? `${log.totalDurationMs}ms` : null, - trigger: log.trigger, - createdAt: log.startedAt.toISOString(), - files: log.files || undefined, - workflow: workflowSummary, - executionData: { - totalDuration: log.totalDurationMs, - ...(log.executionData as any), - enhanced: true, - }, - cost: log.cost as any, - } + if (!data) return NextResponse.json({ error: 'Not found' }, { status: 404 }) - return NextResponse.json({ data: response }) - } catch (error: any) { - logger.error(`[${requestId}] log details fetch error`, error) - return NextResponse.json({ error: error.message }, { status: 500 }) - } + logger.debug('Fetched log detail', { id, workspaceId }) + return NextResponse.json({ data }) } ) diff --git a/apps/sim/app/api/logs/by-execution/[executionId]/route.ts b/apps/sim/app/api/logs/by-execution/[executionId]/route.ts new file mode 100644 index 00000000000..172a77506cc --- /dev/null +++ b/apps/sim/app/api/logs/by-execution/[executionId]/route.ts @@ -0,0 +1,36 @@ +import { createLogger } from '@sim/logger' +import { type NextRequest, NextResponse } from 'next/server' +import { getLogByExecutionIdContract } from '@/lib/api/contracts/logs' +import { parseRequest } from '@/lib/api/server' +import { getSession } from '@/lib/auth' +import { withRouteHandler } from '@/lib/core/utils/with-route-handler' +import { fetchLogDetail } from '@/lib/logs/fetch-log-detail' + +const logger = createLogger('LogDetailsByExecutionAPI') + +export const GET = withRouteHandler( + async (request: NextRequest, context: { params: Promise<{ executionId: string }> }) => { + const session = await getSession() + if (!session?.user?.id) { + return NextResponse.json({ error: 'Unauthorized' }, { status: 401 }) + } + + const parsed = await parseRequest(getLogByExecutionIdContract, request, context) + if (!parsed.success) return parsed.response + + const { executionId } = parsed.data.params + const { workspaceId } = parsed.data.query + + const data = await fetchLogDetail({ + userId: session.user.id, + workspaceId, + lookupColumn: 'executionId', + lookupValue: executionId, + }) + + if (!data) return NextResponse.json({ error: 'Not found' }, { status: 404 }) + + logger.debug('Fetched log by execution id', { executionId, workspaceId }) + return NextResponse.json({ data }) + } +) diff --git a/apps/sim/app/api/logs/route.ts b/apps/sim/app/api/logs/route.ts index 27b071be0f3..cb3690441d2 100644 --- a/apps/sim/app/api/logs/route.ts +++ b/apps/sim/app/api/logs/route.ts @@ -10,6 +10,7 @@ import { import { createLogger } from '@sim/logger' import { and, + asc, desc, eq, gt, @@ -24,582 +25,443 @@ import { type SQL, sql, } from 'drizzle-orm' -import { type NextRequest, NextResponse } from 'next/server' -import { listLogsQuerySchema } from '@/lib/api/contracts/logs' -import { isZodError } from '@/lib/api/server' -import { getSession } from '@/lib/auth' -import { generateRequestId } from '@/lib/core/utils/request' +import type { NextRequest } from 'next/server' +import { NextResponse } from 'next/server' +import { listLogsContract, type WorkflowLogSummary } from '@/lib/api/contracts/logs' +import { parseRequest } from '@/lib/api/server' +import { checkSessionOrInternalAuth } from '@/lib/auth/hybrid' import { withRouteHandler } from '@/lib/core/utils/with-route-handler' import { buildFilterConditions } from '@/lib/logs/filters' const logger = createLogger('LogsAPI') -export const revalidate = 0 +type SortBy = 'date' | 'duration' | 'cost' | 'status' +type SortOrder = 'asc' | 'desc' -export const GET = withRouteHandler(async (request: NextRequest) => { - const requestId = generateRequestId() +interface CursorData { + v: string | number | null + id: string +} + +function encodeCursor(data: CursorData): string { + return Buffer.from(JSON.stringify(data)).toString('base64') +} +function decodeCursor(cursor: string): CursorData | null { try { - const session = await getSession() - if (!session?.user?.id) { - logger.warn(`[${requestId}] Unauthorized logs access attempt`) - return NextResponse.json({ error: 'Unauthorized' }, { status: 401 }) + const parsed = JSON.parse(Buffer.from(cursor, 'base64').toString()) + if (typeof parsed?.id !== 'string') return null + return parsed as CursorData + } catch { + return null + } +} + +export const GET = withRouteHandler(async (request: NextRequest) => { + const authResult = await checkSessionOrInternalAuth(request, { requireWorkflowId: false }) + if (!authResult.success || !authResult.userId) { + return NextResponse.json( + { error: authResult.error || 'Authentication required' }, + { status: 401 } + ) + } + const userId = authResult.userId + + const parsed = await parseRequest(listLogsContract, request, {}) + if (!parsed.success) return parsed.response + + const params = parsed.data.query + const sortBy = params.sortBy as SortBy + const sortOrder = params.sortOrder as SortOrder + const cursor = params.cursor ? decodeCursor(params.cursor) : null + + const workflowSortExpr: SQL = (() => { + switch (sortBy) { + case 'duration': + return sql`${workflowExecutionLogs.totalDurationMs}` + case 'cost': + return sql`(${workflowExecutionLogs.cost}->>'total')::numeric` + case 'status': + return sql`${workflowExecutionLogs.status}` + default: + return sql`${workflowExecutionLogs.startedAt}` } + })() + + const jobSortExpr: SQL = (() => { + switch (sortBy) { + case 'duration': + return sql`${jobExecutionLogs.totalDurationMs}` + case 'cost': + return sql`(${jobExecutionLogs.cost}->>'total')::numeric` + case 'status': + return sql`${jobExecutionLogs.status}` + default: + return sql`${jobExecutionLogs.startedAt}` + } + })() + + const dir = sortOrder === 'asc' ? asc : desc + const nullsLast = sql`NULLS LAST` + const orderByClause = (expr: SQL): SQL => sql`${dir(expr)} ${nullsLast}` + + const buildCursorCondition = (sortExpr: unknown, idCol: unknown): SQL | undefined => { + if (!cursor) return undefined + const v = cursor.v + const id = cursor.id + const cmp = sortOrder === 'asc' ? sql`>` : sql`<` + if (v === null) { + return sql`(${sortExpr} IS NULL AND ${idCol} ${cmp} ${id})` + } + return sql`((${sortExpr} IS NOT NULL AND ${sortExpr} ${cmp} ${v}) OR (${sortExpr} = ${v} AND ${idCol} ${cmp} ${id}) OR ${sortExpr} IS NULL)` + } - const userId = session.user.id - - try { - const { searchParams } = new URL(request.url) - const params = listLogsQuerySchema.parse(Object.fromEntries(searchParams.entries())) - - const selectColumns = - params.details === 'full' - ? { - id: workflowExecutionLogs.id, - workflowId: workflowExecutionLogs.workflowId, - executionId: workflowExecutionLogs.executionId, - stateSnapshotId: workflowExecutionLogs.stateSnapshotId, - deploymentVersionId: workflowExecutionLogs.deploymentVersionId, - level: workflowExecutionLogs.level, - status: workflowExecutionLogs.status, - trigger: workflowExecutionLogs.trigger, - startedAt: workflowExecutionLogs.startedAt, - endedAt: workflowExecutionLogs.endedAt, - totalDurationMs: workflowExecutionLogs.totalDurationMs, - executionData: workflowExecutionLogs.executionData, - cost: workflowExecutionLogs.cost, - files: workflowExecutionLogs.files, - createdAt: workflowExecutionLogs.createdAt, - workflowName: workflow.name, - workflowDescription: workflow.description, - workflowColor: workflow.color, - workflowFolderId: workflow.folderId, - workflowUserId: workflow.userId, - workflowWorkspaceId: workflow.workspaceId, - workflowCreatedAt: workflow.createdAt, - workflowUpdatedAt: workflow.updatedAt, - pausedStatus: pausedExecutions.status, - pausedTotalPauseCount: pausedExecutions.totalPauseCount, - pausedResumedCount: pausedExecutions.resumedCount, - deploymentVersion: workflowDeploymentVersion.version, - deploymentVersionName: workflowDeploymentVersion.name, - } - : { - id: workflowExecutionLogs.id, - workflowId: workflowExecutionLogs.workflowId, - executionId: workflowExecutionLogs.executionId, - stateSnapshotId: workflowExecutionLogs.stateSnapshotId, - deploymentVersionId: workflowExecutionLogs.deploymentVersionId, - level: workflowExecutionLogs.level, - status: workflowExecutionLogs.status, - trigger: workflowExecutionLogs.trigger, - startedAt: workflowExecutionLogs.startedAt, - endedAt: workflowExecutionLogs.endedAt, - totalDurationMs: workflowExecutionLogs.totalDurationMs, - executionData: sql`NULL`, - cost: workflowExecutionLogs.cost, - files: sql`NULL`, - createdAt: workflowExecutionLogs.createdAt, - workflowName: workflow.name, - workflowDescription: workflow.description, - workflowColor: workflow.color, - workflowFolderId: workflow.folderId, - workflowUserId: workflow.userId, - workflowWorkspaceId: workflow.workspaceId, - workflowCreatedAt: workflow.createdAt, - workflowUpdatedAt: workflow.updatedAt, - pausedStatus: pausedExecutions.status, - pausedTotalPauseCount: pausedExecutions.totalPauseCount, - pausedResumedCount: pausedExecutions.resumedCount, - deploymentVersion: workflowDeploymentVersion.version, - deploymentVersionName: sql`NULL`, - } - - const workspaceFilter = eq(workflowExecutionLogs.workspaceId, params.workspaceId) - - const baseQuery = db - .select(selectColumns) - .from(workflowExecutionLogs) - .leftJoin( - pausedExecutions, - eq(pausedExecutions.executionId, workflowExecutionLogs.executionId) - ) - .leftJoin( - workflowDeploymentVersion, - eq(workflowDeploymentVersion.id, workflowExecutionLogs.deploymentVersionId) - ) - .leftJoin(workflow, eq(workflowExecutionLogs.workflowId, workflow.id)) - .innerJoin( - permissions, - and( - eq(permissions.entityType, 'workspace'), - eq(permissions.entityId, workflowExecutionLogs.workspaceId), - eq(permissions.userId, userId) - ) - ) + const fetchSize = params.limit + 1 - let conditions: SQL | undefined + // Build workflow log conditions + const workflowConditions: SQL[] = [eq(workflowExecutionLogs.workspaceId, params.workspaceId)] - if (params.level && params.level !== 'all') { - const levels = params.level.split(',').filter(Boolean) - const levelConditions: SQL[] = [] + if (params.level && params.level !== 'all') { + const levels = params.level.split(',').filter(Boolean) + const levelConditions: SQL[] = [] - for (const level of levels) { - if (level === 'error') { - levelConditions.push(eq(workflowExecutionLogs.level, 'error')) - } else if (level === 'info') { - const condition = and( - eq(workflowExecutionLogs.level, 'info'), - isNotNull(workflowExecutionLogs.endedAt) - ) - if (condition) levelConditions.push(condition) - } else if (level === 'running') { - const condition = and( - eq(workflowExecutionLogs.level, 'info'), - isNull(workflowExecutionLogs.endedAt) - ) - if (condition) levelConditions.push(condition) - } else if (level === 'pending') { - const condition = and( - eq(workflowExecutionLogs.level, 'info'), - or( - sql`(${pausedExecutions.totalPauseCount} > 0 AND ${pausedExecutions.resumedCount} < ${pausedExecutions.totalPauseCount})`, - and( - isNotNull(pausedExecutions.status), - sql`${pausedExecutions.status} != 'fully_resumed'` - ) - ) + for (const level of levels) { + if (level === 'error') { + levelConditions.push(eq(workflowExecutionLogs.level, 'error')) + } else if (level === 'info') { + const c = and( + eq(workflowExecutionLogs.level, 'info'), + isNotNull(workflowExecutionLogs.endedAt) + ) + if (c) levelConditions.push(c) + } else if (level === 'running') { + const c = and( + eq(workflowExecutionLogs.level, 'info'), + isNull(workflowExecutionLogs.endedAt) + ) + if (c) levelConditions.push(c) + } else if (level === 'pending') { + const c = and( + eq(workflowExecutionLogs.level, 'info'), + or( + sql`(${pausedExecutions.totalPauseCount} > 0 AND ${pausedExecutions.resumedCount} < ${pausedExecutions.totalPauseCount})`, + and( + isNotNull(pausedExecutions.status), + sql`${pausedExecutions.status} != 'fully_resumed'` ) - if (condition) levelConditions.push(condition) - } - } - - if (levelConditions.length > 0) { - conditions = and( - conditions, - levelConditions.length === 1 ? levelConditions[0] : or(...levelConditions) ) - } - } - - // Apply common filters (workflowIds, folderIds, triggers, dates, search, cost, duration) - // Level filtering is handled above with advanced running/pending state logic - const commonFilters = buildFilterConditions(params, { useSimpleLevelFilter: false }) - if (commonFilters) { - conditions = and(conditions, commonFilters) + ) + if (c) levelConditions.push(c) } + } - // Workflow-specific filters exclude job logs entirely - const hasWorkflowSpecificFilters = !!( - params.workflowIds || - params.folderIds || - params.workflowName || - params.folderName + if (levelConditions.length > 0) { + workflowConditions.push( + levelConditions.length === 1 ? levelConditions[0] : or(...levelConditions)! ) - // If triggers filter is set and doesn't include 'mothership', skip job logs - const triggersList = params.triggers?.split(',').filter(Boolean) || [] - const triggersExcludeJobs = - triggersList.length > 0 && - !triggersList.includes('all') && - !triggersList.includes('mothership') - const includeJobLogs = !hasWorkflowSpecificFilters && !triggersExcludeJobs - - const fetchSize = params.limit + params.offset - - const workflowLogs = await baseQuery - .where(and(workspaceFilter, conditions)) - .orderBy(desc(workflowExecutionLogs.startedAt)) - .limit(fetchSize) + } + } - const workflowCountQuery = db - .select({ count: sql`count(*)` }) - .from(workflowExecutionLogs) - .leftJoin( - pausedExecutions, - eq(pausedExecutions.executionId, workflowExecutionLogs.executionId) - ) - .leftJoin(workflow, eq(workflowExecutionLogs.workflowId, workflow.id)) - .innerJoin( - permissions, - and( - eq(permissions.entityType, 'workspace'), - eq(permissions.entityId, workflowExecutionLogs.workspaceId), - eq(permissions.userId, userId) - ) - ) - .where(and(eq(workflowExecutionLogs.workspaceId, params.workspaceId), conditions)) - - // Build job log filters (subset of filters that apply to job logs) - let jobLogs: Array<{ - id: string - executionId: string - level: string - status: string - trigger: string - startedAt: Date - endedAt: Date | null - totalDurationMs: number | null - executionData: unknown - cost: unknown - createdAt: Date - jobTitle: string | null - }> = [] - let jobCount = 0 - - if (includeJobLogs) { - const jobConditions: SQL[] = [eq(jobExecutionLogs.workspaceId, params.workspaceId)] - - // Permission check + const commonFilters = buildFilterConditions(params, { useSimpleLevelFilter: false }) + if (commonFilters) workflowConditions.push(commonFilters) + + const workflowCursorCond = buildCursorCondition(workflowSortExpr, workflowExecutionLogs.id) + if (workflowCursorCond) workflowConditions.push(workflowCursorCond) + + // Decide whether to include job logs + const hasWorkflowSpecificFilters = !!( + params.workflowIds || + params.folderIds || + params.workflowName || + params.folderName + ) + const triggersList = params.triggers?.split(',').filter(Boolean) || [] + const triggersExcludeJobs = + triggersList.length > 0 && !triggersList.includes('all') && !triggersList.includes('mothership') + const levelList = + params.level && params.level !== 'all' ? params.level.split(',').filter(Boolean) : [] + const levelExcludesJobs = + levelList.length > 0 && !levelList.some((l) => l === 'error' || l === 'info') + const includeJobLogs = !hasWorkflowSpecificFilters && !triggersExcludeJobs && !levelExcludesJobs + + const workflowQuery = db + .select({ + id: workflowExecutionLogs.id, + workflowId: workflowExecutionLogs.workflowId, + executionId: workflowExecutionLogs.executionId, + deploymentVersionId: workflowExecutionLogs.deploymentVersionId, + level: workflowExecutionLogs.level, + status: workflowExecutionLogs.status, + trigger: workflowExecutionLogs.trigger, + startedAt: workflowExecutionLogs.startedAt, + endedAt: workflowExecutionLogs.endedAt, + totalDurationMs: workflowExecutionLogs.totalDurationMs, + cost: workflowExecutionLogs.cost, + createdAt: workflowExecutionLogs.createdAt, + workflowName: workflow.name, + workflowDescription: workflow.description, + workflowColor: workflow.color, + workflowFolderId: workflow.folderId, + workflowUserId: workflow.userId, + workflowWorkspaceId: workflow.workspaceId, + workflowCreatedAt: workflow.createdAt, + workflowUpdatedAt: workflow.updatedAt, + pausedStatus: pausedExecutions.status, + pausedTotalPauseCount: pausedExecutions.totalPauseCount, + pausedResumedCount: pausedExecutions.resumedCount, + deploymentVersion: workflowDeploymentVersion.version, + deploymentVersionName: workflowDeploymentVersion.name, + sortValue: sql`${workflowSortExpr}`.as('sort_value'), + }) + .from(workflowExecutionLogs) + .leftJoin(pausedExecutions, eq(pausedExecutions.executionId, workflowExecutionLogs.executionId)) + .leftJoin( + workflowDeploymentVersion, + eq(workflowDeploymentVersion.id, workflowExecutionLogs.deploymentVersionId) + ) + .leftJoin(workflow, eq(workflowExecutionLogs.workflowId, workflow.id)) + .innerJoin( + permissions, + and( + eq(permissions.entityType, 'workspace'), + eq(permissions.entityId, workflowExecutionLogs.workspaceId), + eq(permissions.userId, userId) + ) + ) + .where(and(...workflowConditions)) + .orderBy(orderByClause(workflowSortExpr), dir(workflowExecutionLogs.id)) + .limit(fetchSize) + + const jobConditions: SQL[] = [eq(jobExecutionLogs.workspaceId, params.workspaceId)] + + if (includeJobLogs) { + jobConditions.push( + sql`EXISTS (SELECT 1 FROM ${permissions} WHERE ${permissions.entityType} = 'workspace' AND ${permissions.entityId} = ${jobExecutionLogs.workspaceId} AND ${permissions.userId} = ${userId})` + ) + + if (params.level && params.level !== 'all') { + const levels = params.level.split(',').filter(Boolean) + const jobLevelConditions: SQL[] = [] + for (const level of levels) { + if (level === 'error') { + jobLevelConditions.push(eq(jobExecutionLogs.level, 'error')) + } else if (level === 'info') { + const c = and(eq(jobExecutionLogs.level, 'info'), isNotNull(jobExecutionLogs.endedAt)) + if (c) jobLevelConditions.push(c) + } + } + if (jobLevelConditions.length > 0) { jobConditions.push( - sql`EXISTS (SELECT 1 FROM ${permissions} WHERE ${permissions.entityType} = 'workspace' AND ${permissions.entityId} = ${jobExecutionLogs.workspaceId} AND ${permissions.userId} = ${userId})` + jobLevelConditions.length === 1 ? jobLevelConditions[0] : or(...jobLevelConditions)! ) + } + } - // Level filter - if (params.level && params.level !== 'all') { - const levels = params.level.split(',').filter(Boolean) - const jobLevelConditions: SQL[] = [] - for (const level of levels) { - if (level === 'error') { - jobLevelConditions.push(eq(jobExecutionLogs.level, 'error')) - } else if (level === 'info') { - const c = and(eq(jobExecutionLogs.level, 'info'), isNotNull(jobExecutionLogs.endedAt)) - if (c) jobLevelConditions.push(c) - } - // 'running' and 'pending' don't apply to job logs (they complete synchronously) - } - if (jobLevelConditions.length > 0) { - jobConditions.push( - jobLevelConditions.length === 1 ? jobLevelConditions[0] : or(...jobLevelConditions)! - ) - } - } - - // Trigger filter - if (triggersList.length > 0 && !triggersList.includes('all')) { - jobConditions.push(inArray(jobExecutionLogs.trigger, triggersList)) - } - - // Date filters - if (params.startDate) { - jobConditions.push(gte(jobExecutionLogs.startedAt, new Date(params.startDate))) - } - if (params.endDate) { - jobConditions.push(lte(jobExecutionLogs.startedAt, new Date(params.endDate))) - } + if (triggersList.length > 0 && !triggersList.includes('all')) { + jobConditions.push(inArray(jobExecutionLogs.trigger, triggersList)) + } - // Search by executionId - if (params.search) { - jobConditions.push(sql`${jobExecutionLogs.executionId} ILIKE ${`%${params.search}%`}`) - } - if (params.executionId) { - jobConditions.push(eq(jobExecutionLogs.executionId, params.executionId)) - } + if (params.startDate) { + jobConditions.push(gte(jobExecutionLogs.startedAt, new Date(params.startDate))) + } + if (params.endDate) { + jobConditions.push(lte(jobExecutionLogs.startedAt, new Date(params.endDate))) + } - // Cost filter - if (params.costOperator && params.costValue !== undefined) { - const costField = sql`(${jobExecutionLogs.cost}->>'total')::numeric` - const ops = { - '=': sql`=`, - '>': sql`>`, - '<': sql`<`, - '>=': sql`>=`, - '<=': sql`<=`, - '!=': sql`!=`, - } as const - jobConditions.push(sql`${costField} ${ops[params.costOperator]} ${params.costValue}`) - } + if (params.search) { + jobConditions.push(sql`${jobExecutionLogs.executionId} ILIKE ${`%${params.search}%`}`) + } + if (params.executionId) { + jobConditions.push(eq(jobExecutionLogs.executionId, params.executionId)) + } - // Duration filter - if (params.durationOperator && params.durationValue !== undefined) { - const durationOps: Record< - string, - (field: typeof jobExecutionLogs.totalDurationMs, val: number) => SQL | undefined - > = { - '=': (f, v) => eq(f, v), - '>': (f, v) => gt(f, v), - '<': (f, v) => lt(f, v), - '>=': (f, v) => gte(f, v), - '<=': (f, v) => lte(f, v), - '!=': (f, v) => ne(f, v), - } - const durationCond = durationOps[params.durationOperator]?.( - jobExecutionLogs.totalDurationMs, - params.durationValue - ) - if (durationCond) jobConditions.push(durationCond) - } + if (params.costOperator && params.costValue !== undefined) { + const costField = sql`(${jobExecutionLogs.cost}->>'total')::numeric` + const ops = { + '=': sql`=`, + '>': sql`>`, + '<': sql`<`, + '>=': sql`>=`, + '<=': sql`<=`, + '!=': sql`!=`, + } as const + jobConditions.push(sql`${costField} ${ops[params.costOperator]} ${params.costValue}`) + } - const jobWhere = and(...jobConditions) - - const [jobLogResults, jobCountResult] = await Promise.all([ - db - .select({ - id: jobExecutionLogs.id, - executionId: jobExecutionLogs.executionId, - level: jobExecutionLogs.level, - status: jobExecutionLogs.status, - trigger: jobExecutionLogs.trigger, - startedAt: jobExecutionLogs.startedAt, - endedAt: jobExecutionLogs.endedAt, - totalDurationMs: jobExecutionLogs.totalDurationMs, - executionData: - params.details === 'full' ? jobExecutionLogs.executionData : sql`NULL`, - cost: jobExecutionLogs.cost, - createdAt: jobExecutionLogs.createdAt, - jobTitle: sql`${jobExecutionLogs.executionData}->'trigger'->>'source'`, - }) - .from(jobExecutionLogs) - .where(jobWhere) - .orderBy(desc(jobExecutionLogs.startedAt)) - .limit(fetchSize), - db.select({ count: sql`count(*)` }).from(jobExecutionLogs).where(jobWhere), - ]) - - jobLogs = jobLogResults as typeof jobLogs - jobCount = Number(jobCountResult[0]?.count || 0) + if (params.durationOperator && params.durationValue !== undefined) { + const durationOps: Record< + string, + (field: typeof jobExecutionLogs.totalDurationMs, val: number) => SQL | undefined + > = { + '=': (f, v) => eq(f, v), + '>': (f, v) => gt(f, v), + '<': (f, v) => lt(f, v), + '>=': (f, v) => gte(f, v), + '<=': (f, v) => lte(f, v), + '!=': (f, v) => ne(f, v), } + const durationCond = durationOps[params.durationOperator]?.( + jobExecutionLogs.totalDurationMs, + params.durationValue + ) + if (durationCond) jobConditions.push(durationCond) + } - const workflowCountResult = await workflowCountQuery - const workflowCount = Number(workflowCountResult[0]?.count || 0) - const totalCount = workflowCount + jobCount - - // Transform workflow logs to the unified shape - const blockExecutionsByExecution: Record = {} - - const createTraceSpans = (blockExecutions: any[]) => { - return blockExecutions.map((block, index) => { - let output = block.outputData - if (block.status === 'error' && block.errorMessage) { - output = { - ...output, - error: block.errorMessage, - stackTrace: block.errorStackTrace, - } - } + const jobCursorCond = buildCursorCondition(jobSortExpr, jobExecutionLogs.id) + if (jobCursorCond) jobConditions.push(jobCursorCond) + } - return { - id: block.id, - name: `Block ${block.blockName || block.blockType} (${block.blockType})`, - type: block.blockType, - duration: block.durationMs, - startTime: block.startedAt, - endTime: block.endedAt, - status: block.status === 'success' ? 'success' : 'error', - blockId: block.blockId, - input: block.inputData, - output, - tokens: block.cost?.tokens?.total || 0, - relativeStartMs: index * 100, - children: [], - toolCalls: [], - } + const jobQuery = includeJobLogs + ? db + .select({ + id: jobExecutionLogs.id, + executionId: jobExecutionLogs.executionId, + level: jobExecutionLogs.level, + status: jobExecutionLogs.status, + trigger: jobExecutionLogs.trigger, + startedAt: jobExecutionLogs.startedAt, + endedAt: jobExecutionLogs.endedAt, + totalDurationMs: jobExecutionLogs.totalDurationMs, + cost: jobExecutionLogs.cost, + createdAt: jobExecutionLogs.createdAt, + jobTitle: sql`${jobExecutionLogs.executionData}->'trigger'->>'source'`, + sortValue: sql`${jobSortExpr}`.as('sort_value'), }) - } + .from(jobExecutionLogs) + .where(and(...jobConditions)) + .orderBy(orderByClause(jobSortExpr), dir(jobExecutionLogs.id)) + .limit(fetchSize) + : Promise.resolve([]) - const extractCostSummary = (blockExecutions: any[]) => { - let totalCost = 0 - let totalInputCost = 0 - let totalOutputCost = 0 - let totalTokens = 0 - let totalPromptTokens = 0 - let totalCompletionTokens = 0 - const models = new Map() - - blockExecutions.forEach((block) => { - if (block.cost) { - totalCost += Number(block.cost.total) || 0 - totalInputCost += Number(block.cost.input) || 0 - totalOutputCost += Number(block.cost.output) || 0 - totalTokens += block.cost.tokens?.total || 0 - totalPromptTokens += block.cost.tokens?.prompt || 0 - totalCompletionTokens += block.cost.tokens?.completion || 0 - - if (block.cost.model) { - if (!models.has(block.cost.model)) { - models.set(block.cost.model, { - input: 0, - output: 0, - total: 0, - tokens: { input: 0, output: 0, total: 0 }, - }) - } - const modelCost = models.get(block.cost.model) - modelCost.input += Number(block.cost.input) || 0 - modelCost.output += Number(block.cost.output) || 0 - modelCost.total += Number(block.cost.total) || 0 - modelCost.tokens.input += block.cost.tokens?.input || block.cost.tokens?.prompt || 0 - modelCost.tokens.output += - block.cost.tokens?.output || block.cost.tokens?.completion || 0 - modelCost.tokens.total += block.cost.tokens?.total || 0 - } - } - }) + const [workflowRows, jobRows] = await Promise.all([workflowQuery, jobQuery]) - return { - total: totalCost, - input: totalInputCost, - output: totalOutputCost, - tokens: { - total: totalTokens, - input: totalPromptTokens, - output: totalCompletionTokens, - }, - models: Object.fromEntries(models), - } - } + type RowWithSort = { + id: string + sortValue: unknown + summary: WorkflowLogSummary + } - const transformedWorkflowLogs = workflowLogs.map((log) => { - const blockExecutions = blockExecutionsByExecution[log.executionId] || [] - - let traceSpans = [] - let finalOutput: any - let costSummary = (log.cost as any) || { total: 0 } - - if (params.details === 'full' && log.executionData) { - const storedTraceSpans = (log.executionData as any)?.traceSpans - traceSpans = - storedTraceSpans && Array.isArray(storedTraceSpans) && storedTraceSpans.length > 0 - ? storedTraceSpans - : createTraceSpans(blockExecutions) - - costSummary = - log.cost && Object.keys(log.cost as any).length > 0 - ? (log.cost as any) - : extractCostSummary(blockExecutions) - - try { - const fo = (log.executionData as any)?.finalOutput - if (fo !== undefined) finalOutput = fo - } catch {} - } + const workflowMapped: RowWithSort[] = workflowRows.map((log) => { + const totalPauseCount = Number(log.pausedTotalPauseCount ?? 0) + const resumedCount = Number(log.pausedResumedCount ?? 0) + const hasPendingPause = + (totalPauseCount > 0 && resumedCount < totalPauseCount) || + (log.pausedStatus !== null && log.pausedStatus !== 'fully_resumed') + + const summary: WorkflowLogSummary = { + id: log.id, + workflowId: log.workflowId, + executionId: log.executionId, + deploymentVersionId: log.deploymentVersionId, + deploymentVersion: log.deploymentVersion ?? null, + deploymentVersionName: log.deploymentVersionName ?? null, + level: log.level, + status: log.status, + duration: log.totalDurationMs ? `${log.totalDurationMs}ms` : null, + trigger: log.trigger, + createdAt: log.startedAt.toISOString(), + workflow: log.workflowId + ? { + id: log.workflowId, + name: log.workflowName, + description: log.workflowDescription, + color: log.workflowColor, + folderId: log.workflowFolderId, + userId: log.workflowUserId, + workspaceId: log.workflowWorkspaceId, + createdAt: log.workflowCreatedAt?.toISOString() ?? null, + updatedAt: log.workflowUpdatedAt?.toISOString() ?? null, + } + : null, + jobTitle: null, + cost: (log.cost as WorkflowLogSummary['cost']) ?? null, + pauseSummary: { + status: log.pausedStatus ?? null, + total: totalPauseCount, + resumed: resumedCount, + }, + hasPendingPause, + } + return { id: log.id, sortValue: log.sortValue, summary } + }) + + const jobMapped: RowWithSort[] = (jobRows as Awaited).map((log) => { + const summary: WorkflowLogSummary = { + id: log.id, + workflowId: null, + executionId: log.executionId, + deploymentVersionId: null, + deploymentVersion: null, + deploymentVersionName: null, + level: log.level, + status: log.status, + duration: log.totalDurationMs ? `${log.totalDurationMs}ms` : null, + trigger: log.trigger, + createdAt: log.startedAt.toISOString(), + workflow: null, + jobTitle: log.jobTitle ?? null, + cost: (log.cost as WorkflowLogSummary['cost']) ?? null, + pauseSummary: { status: null, total: 0, resumed: 0 }, + hasPendingPause: false, + } + return { id: log.id, sortValue: log.sortValue, summary } + }) + + const compareSortValues = (a: unknown, b: unknown): number => { + if (a instanceof Date && b instanceof Date) return a.getTime() - b.getTime() + if (typeof a === 'number' && typeof b === 'number') return a - b + const aStr = String(a) + const bStr = String(b) + if (sortBy === 'date') { + return new Date(aStr).getTime() - new Date(bStr).getTime() + } + const aNum = Number(aStr) + const bNum = Number(bStr) + if (!Number.isNaN(aNum) && !Number.isNaN(bNum)) return aNum - bNum + return aStr.localeCompare(bStr) + } - const workflowSummary = log.workflowId - ? { - id: log.workflowId, - name: log.workflowName, - description: log.workflowDescription, - color: log.workflowColor, - folderId: log.workflowFolderId, - userId: log.workflowUserId, - workspaceId: log.workflowWorkspaceId, - createdAt: log.workflowCreatedAt, - updatedAt: log.workflowUpdatedAt, - } - : null - - return { - id: log.id, - workflowId: log.workflowId, - executionId: log.executionId, - deploymentVersionId: log.deploymentVersionId, - deploymentVersion: log.deploymentVersion ?? null, - deploymentVersionName: log.deploymentVersionName ?? null, - level: log.level, - status: log.status, - duration: log.totalDurationMs ? `${log.totalDurationMs}ms` : null, - trigger: log.trigger, - createdAt: log.startedAt.toISOString(), - files: params.details === 'full' ? log.files || undefined : undefined, - workflow: workflowSummary, - pauseSummary: { - status: log.pausedStatus ?? null, - total: log.pausedTotalPauseCount ?? 0, - resumed: log.pausedResumedCount ?? 0, - }, - executionData: - params.details === 'full' - ? { - totalDuration: log.totalDurationMs, - traceSpans, - blockExecutions, - finalOutput, - enhanced: true, - } - : undefined, - cost: - params.details === 'full' - ? (costSummary as any) - : { total: (costSummary as any)?.total || 0 }, - hasPendingPause: - (Number(log.pausedTotalPauseCount ?? 0) > 0 && - Number(log.pausedResumedCount ?? 0) < Number(log.pausedTotalPauseCount ?? 0)) || - (log.pausedStatus && log.pausedStatus !== 'fully_resumed'), - } - }) - - // Transform job logs to the same shape - const transformedJobLogs = jobLogs.map((log) => { - const execData = log.executionData as any - const costSummary = (log.cost as any) || { total: 0 } - - return { - id: log.id, - workflowId: null as string | null, - executionId: log.executionId, - deploymentVersionId: null as string | null, - deploymentVersion: null as number | null, - deploymentVersionName: null as string | null, - level: log.level, - status: log.status, - duration: log.totalDurationMs ? `${log.totalDurationMs}ms` : null, - trigger: log.trigger, - createdAt: log.startedAt.toISOString(), - files: undefined as any, - workflow: null as any, - jobTitle: log.jobTitle, - pauseSummary: { - status: null as string | null, - total: 0, - resumed: 0, - }, - executionData: - params.details === 'full' && execData - ? { - totalDuration: log.totalDurationMs, - traceSpans: execData.traceSpans || [], - blockExecutions: [], - finalOutput: execData.finalOutput, - enhanced: true, - trigger: execData.trigger, - } - : undefined, - cost: params.details === 'full' ? costSummary : { total: costSummary?.total || 0 }, - hasPendingPause: false, - } - }) - - // Merge, sort by createdAt (which is startedAt ISO string) desc, paginate - const allLogs = [...transformedWorkflowLogs, ...transformedJobLogs] - .sort((a, b) => new Date(b.createdAt).getTime() - new Date(a.createdAt).getTime()) - .slice(params.offset, params.offset + params.limit) - - return NextResponse.json( - { - data: allLogs, - total: totalCount, - page: Math.floor(params.offset / params.limit) + 1, - pageSize: params.limit, - totalPages: Math.ceil(totalCount / params.limit), - }, - { status: 200 } - ) - } catch (validationError) { - if (isZodError(validationError)) { - logger.warn(`[${requestId}] Invalid logs request parameters`, { - errors: validationError.issues, - }) - return NextResponse.json( - { - error: 'Invalid request parameters', - details: validationError.issues, - }, - { status: 400 } - ) - } - throw validationError + const merged = [...workflowMapped, ...jobMapped].sort((a, b) => { + const aNull = a.sortValue === null || a.sortValue === undefined + const bNull = b.sortValue === null || b.sortValue === undefined + // Mirror SQL's NULLS LAST for both ASC and DESC so the cursor stays consistent. + if (aNull && !bNull) return 1 + if (!aNull && bNull) return -1 + if (!aNull && !bNull) { + const cmp = compareSortValues(a.sortValue, b.sortValue) + if (cmp !== 0) return sortOrder === 'asc' ? cmp : -cmp } - } catch (error: any) { - logger.error(`[${requestId}] logs fetch error`, error) - return NextResponse.json({ error: error.message }, { status: 500 }) + const idCmp = a.id.localeCompare(b.id) + return sortOrder === 'asc' ? idCmp : -idCmp + }) + + const page = merged.slice(0, params.limit) + const hasMore = merged.length > params.limit + let nextCursor: string | null = null + if (hasMore && page.length > 0) { + const last = page[page.length - 1] + const v = last.sortValue + const cursorV = + v instanceof Date + ? v.toISOString() + : typeof v === 'number' || typeof v === 'string' + ? v + : v == null + ? null + : String(v) + nextCursor = encodeCursor({ v: cursorV, id: last.id }) } + + logger.debug('Listed logs', { + workspaceId: params.workspaceId, + count: page.length, + hasMore, + sortBy, + sortOrder, + }) + + return NextResponse.json({ + data: page.map((row) => row.summary), + nextCursor, + }) }) diff --git a/apps/sim/app/api/mothership/events/route.ts b/apps/sim/app/api/mothership/events/route.ts index 420f2be3fdb..bb3e1f278c8 100644 --- a/apps/sim/app/api/mothership/events/route.ts +++ b/apps/sim/app/api/mothership/events/route.ts @@ -27,6 +27,7 @@ const mothershipEventsHandler = createWorkspaceSSE({ send('task_status', { chatId: event.chatId, type: event.type, + ...(event.streamId ? { streamId: event.streamId } : {}), timestamp: Date.now(), }) }) diff --git a/apps/sim/app/api/table/[tableId]/rows/route.ts b/apps/sim/app/api/table/[tableId]/rows/route.ts index 8c69ef55a38..9b0076a127d 100644 --- a/apps/sim/app/api/table/[tableId]/rows/route.ts +++ b/apps/sim/app/api/table/[tableId]/rows/route.ts @@ -30,7 +30,7 @@ import { validateRowData, validateRowSize, } from '@/lib/table' -import { buildFilterClause, buildSortClause } from '@/lib/table/sql' +import { buildFilterClause, buildSortClause, TableQueryValidationError } from '@/lib/table/sql' import { accessError, checkAccess } from '@/app/api/table/utils' const logger = createLogger('TableRowsAPI') @@ -336,6 +336,10 @@ export const GET = withRouteHandler( return validationErrorResponse(error) } + if (error instanceof TableQueryValidationError) { + return NextResponse.json({ error: error.message }, { status: 400 }) + } + logger.error(`[${requestId}] Error querying rows:`, error) return NextResponse.json({ error: 'Failed to query rows' }, { status: 500 }) } @@ -421,6 +425,10 @@ export const PUT = withRouteHandler( return validationErrorResponse(error) } + if (error instanceof TableQueryValidationError) { + return NextResponse.json({ error: error.message }, { status: 400 }) + } + const errorMessage = toError(error).message if ( @@ -520,6 +528,10 @@ export const DELETE = withRouteHandler( return validationErrorResponse(error) } + if (error instanceof TableQueryValidationError) { + return NextResponse.json({ error: error.message }, { status: 400 }) + } + const errorMessage = toError(error).message if (errorMessage.includes('Filter is required')) { diff --git a/apps/sim/app/api/v1/tables/[tableId]/rows/route.ts b/apps/sim/app/api/v1/tables/[tableId]/rows/route.ts index a6bb5613cad..d4d9c448837 100644 --- a/apps/sim/app/api/v1/tables/[tableId]/rows/route.ts +++ b/apps/sim/app/api/v1/tables/[tableId]/rows/route.ts @@ -30,7 +30,7 @@ import { validateRowData, validateRowSize, } from '@/lib/table' -import { buildFilterClause, buildSortClause } from '@/lib/table/sql' +import { buildFilterClause, buildSortClause, TableQueryValidationError } from '@/lib/table/sql' import { accessError, checkAccess } from '@/app/api/table/utils' import { checkRateLimit, @@ -240,6 +240,10 @@ export const GET = withRouteHandler(async (request: NextRequest, context: TableR const validationResponse = validationErrorResponseFromError(error) if (validationResponse) return validationResponse + if (error instanceof TableQueryValidationError) { + return NextResponse.json({ error: error.message }, { status: 400 }) + } + logger.error(`[${requestId}] Error querying rows:`, error) return NextResponse.json({ error: 'Failed to query rows' }, { status: 500 }) } @@ -407,6 +411,10 @@ export const PUT = withRouteHandler(async (request: NextRequest, context: TableR const validationResponse = validationErrorResponseFromError(error) if (validationResponse) return validationResponse + if (error instanceof TableQueryValidationError) { + return NextResponse.json({ error: error.message }, { status: 400 }) + } + const errorMessage = toError(error).message if ( @@ -500,6 +508,10 @@ export const DELETE = withRouteHandler( const validationResponse = validationErrorResponseFromError(error) if (validationResponse) return validationResponse + if (error instanceof TableQueryValidationError) { + return NextResponse.json({ error: error.message }, { status: 400 }) + } + const errorMessage = toError(error).message if (errorMessage.includes('Filter is required')) { diff --git a/apps/sim/app/workspace/[workspaceId]/home/components/chat-message-attachments.tsx b/apps/sim/app/workspace/[workspaceId]/home/components/chat-message-attachments.tsx index e39d3a0dd37..d430fb14e61 100644 --- a/apps/sim/app/workspace/[workspaceId]/home/components/chat-message-attachments.tsx +++ b/apps/sim/app/workspace/[workspaceId]/home/components/chat-message-attachments.tsx @@ -30,13 +30,36 @@ export function ChatMessageAttachments(props: { )} > {attachments.map((att) => { - const isImage = att.media_type.startsWith('image/') - return isImage && att.previewUrl ? ( + if (!att.previewUrl) { + return ( + + ) + } + const isVideo = att.media_type.startsWith('video/') + if (isVideo) { + const Icon = getDocumentIcon(att.media_type, att.filename) + return ( +
+
+ +
+
+ ) + } + return (
{att.filename}
- ) : ( - ) })} diff --git a/apps/sim/app/workspace/[workspaceId]/home/components/message-content/components/special-tags/special-tags.tsx b/apps/sim/app/workspace/[workspaceId]/home/components/message-content/components/special-tags/special-tags.tsx index bfa032ee2bb..89a93999b61 100644 --- a/apps/sim/app/workspace/[workspaceId]/home/components/message-content/components/special-tags/special-tags.tsx +++ b/apps/sim/app/workspace/[workspaceId]/home/components/message-content/components/special-tags/special-tags.tsx @@ -2,7 +2,13 @@ import { createElement, useMemo, useState } from 'react' import { useParams } from 'next/navigation' -import { ArrowRight, ChevronDown, Expandable, ExpandableContent } from '@/components/emcn' +import { + ArrowRight, + ChevronDown, + Expandable, + ExpandableContent, + SecretReveal, +} from '@/components/emcn' import { cn } from '@/lib/core/utils/cn' import { OAUTH_PROVIDERS } from '@/lib/oauth/oauth' import { ContextMentionIcon } from '@/app/workspace/[workspaceId]/home/components/context-mention-icon' @@ -47,9 +53,10 @@ export const CREDENTIAL_TAG_TYPES = [ export type CredentialTagType = (typeof CREDENTIAL_TAG_TYPES)[number] export interface CredentialTagData { - value: string + value?: string type: CredentialTagType provider?: string + redacted?: boolean } export interface MothershipErrorTagData { @@ -140,12 +147,15 @@ function isUsageUpgradeTagData(value: unknown): value is UsageUpgradeTagData { function isCredentialTagData(value: unknown): value is CredentialTagData { if (!isRecord(value)) return false - return ( - typeof value.value === 'string' && - typeof value.type === 'string' && - (CREDENTIAL_TAG_TYPES as readonly string[]).includes(value.type) && - (value.provider === undefined || typeof value.provider === 'string') - ) + if ( + typeof value.type !== 'string' || + !(CREDENTIAL_TAG_TYPES as readonly string[]).includes(value.type) + ) { + return false + } + if (value.provider !== undefined && typeof value.provider !== 'string') return false + if (value.redacted === true) return value.value === undefined || typeof value.value === 'string' + return typeof value.value === 'string' } function isMothershipErrorTagData(value: unknown): value is MothershipErrorTagData { @@ -595,24 +605,30 @@ const LockIcon = (props: { className?: string }) => ( ) function CredentialDisplay({ data }: { data: CredentialTagData }) { - if (data.type !== 'link' || !data.provider) return null + if (data.type === 'link') { + if (!data.provider) return null + const Icon = getCredentialIcon(data.provider) ?? LockIcon + return ( + + {createElement(Icon, { className: 'h-[16px] w-[16px] shrink-0' })} + + Connect {data.provider} + + + + ) + } - const Icon = getCredentialIcon(data.provider) ?? LockIcon + if (data.type === 'sim_key') { + return + } - return ( - - {createElement(Icon, { className: 'h-[16px] w-[16px] shrink-0' })} - - Connect {data.provider} - - - - ) + return null } function MothershipErrorDisplay({ data }: { data: MothershipErrorTagData }) { diff --git a/apps/sim/app/workspace/[workspaceId]/home/components/mothership-view/components/add-resource-dropdown/add-resource-dropdown.tsx b/apps/sim/app/workspace/[workspaceId]/home/components/mothership-view/components/add-resource-dropdown/add-resource-dropdown.tsx index 302b3bce4c2..d51078e47af 100644 --- a/apps/sim/app/workspace/[workspaceId]/home/components/mothership-view/components/add-resource-dropdown/add-resource-dropdown.tsx +++ b/apps/sim/app/workspace/[workspaceId]/home/components/mothership-view/components/add-resource-dropdown/add-resource-dropdown.tsx @@ -59,6 +59,8 @@ const LOG_DROPDOWN_FILTERS = { triggers: [] as string[], searchQuery: '', limit: LOG_DROPDOWN_LIMIT, + sortBy: 'date' as const, + sortOrder: 'desc' as const, } export function useAvailableResources( diff --git a/apps/sim/app/workspace/[workspaceId]/home/components/mothership-view/components/resource-content/resource-content.tsx b/apps/sim/app/workspace/[workspaceId]/home/components/mothership-view/components/resource-content/resource-content.tsx index 46f78e1f89e..e793991957c 100644 --- a/apps/sim/app/workspace/[workspaceId]/home/components/mothership-view/components/resource-content/resource-content.tsx +++ b/apps/sim/app/workspace/[workspaceId]/home/components/mothership-view/components/resource-content/resource-content.tsx @@ -1,6 +1,6 @@ 'use client' -import { lazy, memo, Suspense, useEffect, useMemo } from 'react' +import { lazy, memo, Suspense, useEffect, useMemo, useRef } from 'react' import { createLogger } from '@sim/logger' import { Square } from 'lucide-react' import { useRouter } from 'next/navigation' @@ -13,6 +13,7 @@ import { SquareArrowUpRight, WorkflowX, } from '@/components/emcn/icons' +import { isApiClientError } from '@/lib/api/client/errors' import type { FilePreviewSession } from '@/lib/copilot/request/session' import { cancelRunToolExecution, @@ -70,6 +71,7 @@ interface ResourceContentProps { previewSession?: FilePreviewSession | null genericResourceData?: GenericResourceData previewContextKey?: string + onNotFound?: (resourceId: string) => void } /** @@ -86,6 +88,7 @@ export const ResourceContent = memo(function ResourceContent({ previewSession, genericResourceData, previewContextKey, + onNotFound, }: ResourceContentProps) { const streamFileName = previewSession?.fileName || 'file.md' const syntheticFile = useMemo(() => { @@ -179,7 +182,14 @@ export const ResourceContent = memo(function ResourceContent({ return case 'log': - return + return ( + onNotFound(resource.id) : undefined} + /> + ) case 'generic': return ( @@ -617,11 +627,22 @@ function EmbeddedFolder({ workspaceId, folderId }: EmbeddedFolderProps) { } interface EmbeddedLogProps { + workspaceId: string logId: string + onNotFound?: () => void } -function EmbeddedLog({ logId }: EmbeddedLogProps) { - const { data: log, isLoading } = useLogDetail(logId) +function EmbeddedLog({ workspaceId, logId, onNotFound }: EmbeddedLogProps) { + const { data: log, isLoading, error } = useLogDetail(logId, workspaceId) + + const onNotFoundRef = useRef(onNotFound) + onNotFoundRef.current = onNotFound + + useEffect(() => { + if (isApiClientError(error) && error.status === 404) { + onNotFoundRef.current?.() + } + }, [error]) if (isLoading) return LOADING_SKELETON @@ -653,7 +674,7 @@ interface EmbeddedLogActionsProps { export function EmbeddedLogActions({ workspaceId, logId }: EmbeddedLogActionsProps) { const router = useRouter() - const { data: log } = useLogDetail(logId) + const { data: log } = useLogDetail(logId, workspaceId) const handleOpenInLogs = () => { const param = log?.executionId ? `?executionId=${log.executionId}` : '' diff --git a/apps/sim/app/workspace/[workspaceId]/home/components/mothership-view/mothership-view.tsx b/apps/sim/app/workspace/[workspaceId]/home/components/mothership-view/mothership-view.tsx index fcfb08ff948..4eb7227c850 100644 --- a/apps/sim/app/workspace/[workspaceId]/home/components/mothership-view/mothership-view.tsx +++ b/apps/sim/app/workspace/[workspaceId]/home/components/mothership-view/mothership-view.tsx @@ -128,6 +128,7 @@ export const MothershipView = memo( previewSession={previewForActive} genericResourceData={active.type === 'generic' ? genericResourceData : undefined} previewContextKey={chatId} + onNotFound={(resourceId) => onRemoveResource('log', resourceId)} /> ) : (
diff --git a/apps/sim/app/workspace/[workspaceId]/home/components/user-input/components/attached-files-list.tsx b/apps/sim/app/workspace/[workspaceId]/home/components/user-input/components/attached-files-list.tsx index 6046107e6d6..3dc97d208ac 100644 --- a/apps/sim/app/workspace/[workspaceId]/home/components/user-input/components/attached-files-list.tsx +++ b/apps/sim/app/workspace/[workspaceId]/home/components/user-input/components/attached-files-list.tsx @@ -22,7 +22,8 @@ export const AttachedFilesList = React.memo(function AttachedFilesList({ return (
{attachedFiles.map((file) => { - const isImage = file.type.startsWith('image/') + const isVideo = file.type.startsWith('video/') + const hasPreview = Boolean(file.previewUrl) return ( @@ -30,7 +31,23 @@ export const AttachedFilesList = React.memo(function AttachedFilesList({ className='group relative h-[56px] w-[56px] flex-shrink-0 cursor-pointer overflow-hidden rounded-[8px] border border-[var(--border-1)] bg-[var(--surface-5)] hover:bg-[var(--surface-4)]' onClick={() => onFileClick(file)} > - {isImage && file.previewUrl ? ( + {hasPreview && isVideo ? ( + <> +
+ {(() => { + const Icon = getDocumentIcon(file.type, file.name) + return + })()} +
+
- { - activeTabRef.current = tab - onActiveTabChange?.(tab) - }} - /> + )} diff --git a/apps/sim/app/workspace/[workspaceId]/logs/components/log-row-context-menu/log-row-context-menu.tsx b/apps/sim/app/workspace/[workspaceId]/logs/components/log-row-context-menu/log-row-context-menu.tsx index 0ce7aafa9f2..d8435907db8 100644 --- a/apps/sim/app/workspace/[workspaceId]/logs/components/log-row-context-menu/log-row-context-menu.tsx +++ b/apps/sim/app/workspace/[workspaceId]/logs/components/log-row-context-menu/log-row-context-menu.tsx @@ -15,13 +15,13 @@ import { SquareArrowUpRight, X, } from '@/components/emcn' -import type { WorkflowLog } from '@/stores/logs/filters/types' +import type { WorkflowLogSummary } from '@/lib/api/contracts/logs' interface LogRowContextMenuProps { isOpen: boolean position: { x: number; y: number } onClose: () => void - log: WorkflowLog | null + log: WorkflowLogSummary | null onCopyExecutionId: () => void onCopyLink: () => void onOpenWorkflow: () => void diff --git a/apps/sim/app/workspace/[workspaceId]/logs/components/logs-list/logs-list.tsx b/apps/sim/app/workspace/[workspaceId]/logs/components/logs-list/logs-list.tsx index e8dd1d912be..7bf398a99ca 100644 --- a/apps/sim/app/workspace/[workspaceId]/logs/components/logs-list/logs-list.tsx +++ b/apps/sim/app/workspace/[workspaceId]/logs/components/logs-list/logs-list.tsx @@ -6,6 +6,7 @@ import { ArrowUpRight } from 'lucide-react' import Link from 'next/link' import { List, type RowComponentProps, useListRef } from 'react-window' import { Badge, buttonVariants, Loader } from '@/components/emcn' +import type { WorkflowLogSummary } from '@/lib/api/contracts/logs' import { dollarsToCredits } from '@/lib/billing/credits/conversion' import { cn } from '@/lib/core/utils/cn' import { workflowBorderColor } from '@/lib/workspaces/colors' @@ -18,16 +19,15 @@ import { StatusBadge, TriggerBadge, } from '@/app/workspace/[workspaceId]/logs/utils' -import type { WorkflowLog } from '@/stores/logs/filters/types' const LOG_ROW_HEIGHT = 44 as const interface LogRowProps { - log: WorkflowLog + log: WorkflowLogSummary isSelected: boolean - onClick: (log: WorkflowLog) => void - onHover?: (log: WorkflowLog) => void - onContextMenu?: (e: React.MouseEvent, log: WorkflowLog) => void + onClick: (log: WorkflowLogSummary) => void + onHover?: (log: WorkflowLogSummary) => void + onContextMenu?: (e: React.MouseEvent, log: WorkflowLogSummary) => void selectedRowRef: React.RefObject | null } @@ -56,7 +56,7 @@ const LogRow = memo( ? '#ec4899' : isDeletedWorkflow ? DELETED_WORKFLOW_COLOR - : log.workflow?.color + : (log.workflow?.color ?? undefined) const handleClick = () => onClick(log) const handleMouseEnter = () => onHover?.(log) @@ -164,11 +164,11 @@ const LogRow = memo( ) interface RowProps { - logs: WorkflowLog[] + logs: WorkflowLogSummary[] selectedLogId: string | null - onLogClick: (log: WorkflowLog) => void - onLogHover?: (log: WorkflowLog) => void - onLogContextMenu?: (e: React.MouseEvent, log: WorkflowLog) => void + onLogClick: (log: WorkflowLogSummary) => void + onLogHover?: (log: WorkflowLogSummary) => void + onLogContextMenu?: (e: React.MouseEvent, log: WorkflowLogSummary) => void selectedRowRef: React.RefObject isFetchingNextPage: boolean loaderRef: React.RefObject @@ -225,11 +225,11 @@ function Row({ } export interface LogsListProps { - logs: WorkflowLog[] + logs: WorkflowLogSummary[] selectedLogId: string | null - onLogClick: (log: WorkflowLog) => void - onLogHover?: (log: WorkflowLog) => void - onLogContextMenu?: (e: React.MouseEvent, log: WorkflowLog) => void + onLogClick: (log: WorkflowLogSummary) => void + onLogHover?: (log: WorkflowLogSummary) => void + onLogContextMenu?: (e: React.MouseEvent, log: WorkflowLogSummary) => void selectedRowRef: React.RefObject hasNextPage: boolean isFetchingNextPage: boolean diff --git a/apps/sim/app/workspace/[workspaceId]/logs/logs.tsx b/apps/sim/app/workspace/[workspaceId]/logs/logs.tsx index de2dce93250..5c3b3b0af66 100644 --- a/apps/sim/app/workspace/[workspaceId]/logs/logs.tsx +++ b/apps/sim/app/workspace/[workspaceId]/logs/logs.tsx @@ -16,6 +16,11 @@ import { RefreshCw, toast, } from '@/components/emcn' +import type { + WorkflowLogDetail, + WorkflowLogRow, + WorkflowLogSummary, +} from '@/lib/api/contracts/logs' import { dollarsToCredits } from '@/lib/billing/credits/conversion' import { cn } from '@/lib/core/utils/cn' import { @@ -50,12 +55,14 @@ import type { Suggestion } from '@/app/workspace/[workspaceId]/logs/types' import { useUserPermissionsContext } from '@/app/workspace/[workspaceId]/providers/workspace-permissions-provider' import { getBlock } from '@/blocks/registry' import { useFolderMap, useFolders } from '@/hooks/queries/folders' +import type { LogSortBy, LogSortOrder } from '@/hooks/queries/logs' import { fetchLogDetail, logKeys, prefetchLogDetail, useCancelExecution, useDashboardStats, + useLogByExecutionId, useLogDetail, useLogsList, useRetryExecution, @@ -63,7 +70,6 @@ import { import { useWorkflowMap, useWorkflows } from '@/hooks/queries/workflows' import { useDebounce } from '@/hooks/use-debounce' import { useFilterStore } from '@/stores/logs/filters/store' -import type { WorkflowLog } from '@/stores/logs/filters/types' import { CORE_TRIGGER_TYPES } from '@/stores/logs/filters/types' import { Dashboard, @@ -86,6 +92,7 @@ import { } from './utils' const LOGS_PER_PAGE = 50 as const +const SORTABLE_COLUMNS: readonly LogSortBy[] = ['date', 'duration', 'cost', 'status'] as const const REFRESH_SPINNER_DURATION_MS = 1000 as const const LOG_COLUMNS: ResourceColumn[] = [ @@ -214,6 +221,11 @@ export default function Logs() { const params = useParams() const workspaceId = params.workspaceId as string + useState(() => { + useFilterStore.getState().initializeFromURL() + return null + }) + const { setWorkspaceId, initializeFromURL, @@ -268,14 +280,11 @@ export default function Logs() { selectedLogId: null, isSidebarOpen: false, }) - const isInitialized = useRef(false) - const pendingExecutionIdRef = useRef(undefined) - if (pendingExecutionIdRef.current === undefined) { - pendingExecutionIdRef.current = - typeof window !== 'undefined' - ? new URLSearchParams(window.location.search).get('executionId') - : null - } + const [pendingExecutionId, setPendingExecutionId] = useState(() => + typeof window !== 'undefined' + ? new URLSearchParams(window.location.search).get('executionId') + : null + ) const [searchQuery, setSearchQuery] = useState(() => { if (typeof window === 'undefined') return '' @@ -287,7 +296,7 @@ export default function Logs() { const [isVisuallyRefreshing, setIsVisuallyRefreshing] = useState(false) const [isExporting, setIsExporting] = useState(false) const refreshTimersRef = useRef(new Set()) - const logsRef = useRef([]) + const logsRef = useRef([]) const selectedLogIndexRef = useRef(-1) const selectedLogIdRef = useRef(null) const shouldScrollIntoViewRef = useRef(false) @@ -304,21 +313,35 @@ export default function Logs() { const [contextMenuOpen, setContextMenuOpen] = useState(false) const [contextMenuPosition, setContextMenuPosition] = useState({ x: 0, y: 0 }) - const [contextMenuLog, setContextMenuLog] = useState(null) + const [contextMenuLog, setContextMenuLog] = useState(null) const [previewLogId, setPreviewLogId] = useState(null) - const activeLogId = previewLogId ?? selectedLogId const queryClient = useQueryClient() - const activeLogQuery = useLogDetail(activeLogId ?? undefined, { - refetchInterval: (query: { state: { data?: WorkflowLog } }) => { + const refetchInterval = useCallback( + (query: { state: { data?: WorkflowLogDetail } }) => { if (!isLive) return false const status = query.state.data?.status return status === 'running' || status === 'pending' ? 3000 : false }, + [isLive] + ) + + const selectedDetailQuery = useLogDetail(selectedLogId ?? undefined, workspaceId, { + refetchInterval, }) + const previewDetailQuery = useLogDetail(previewLogId ?? undefined, workspaceId, { + refetchInterval, + }) + + const sortBy: LogSortBy = + activeSort && SORTABLE_COLUMNS.includes(activeSort.column as LogSortBy) + ? (activeSort.column as LogSortBy) + : 'date' + const sortOrder: LogSortOrder = activeSort?.direction ?? 'desc' + const logFilters = useMemo( () => ({ timeRange, @@ -330,12 +353,24 @@ export default function Logs() { triggers, searchQuery: debouncedSearchQuery, limit: LOGS_PER_PAGE, + sortBy, + sortOrder, }), - [timeRange, startDate, endDate, level, workflowIds, folderIds, triggers, debouncedSearchQuery] + [ + timeRange, + startDate, + endDate, + level, + workflowIds, + folderIds, + triggers, + debouncedSearchQuery, + sortBy, + sortOrder, + ] ) const logsQuery = useLogsList(workspaceId, logFilters, { - enabled: Boolean(workspaceId) && isInitialized.current, refetchInterval: isLive ? 3000 : false, }) @@ -354,7 +389,6 @@ export default function Logs() { ) const dashboardStatsQuery = useDashboardStats(workspaceId, dashboardFilters, { - enabled: Boolean(workspaceId) && isInitialized.current, refetchInterval: isLive ? 3000 : false, }) @@ -362,80 +396,42 @@ export default function Logs() { return logsQuery.data?.pages?.flatMap((page) => page.logs) ?? [] }, [logsQuery.data?.pages]) - const sortedLogs = useMemo(() => { - if (!activeSort) return logs - - const { column, direction } = activeSort - return [...logs].sort((a, b) => { - let cmp = 0 - switch (column) { - case 'date': - cmp = new Date(a.createdAt).getTime() - new Date(b.createdAt).getTime() - break - case 'duration': { - const aDuration = parseDuration({ duration: a.duration ?? undefined }) ?? -1 - const bDuration = parseDuration({ duration: b.duration ?? undefined }) ?? -1 - cmp = aDuration - bDuration - break - } - case 'cost': { - const aCost = typeof a.cost?.total === 'number' ? a.cost.total : -1 - const bCost = typeof b.cost?.total === 'number' ? b.cost.total : -1 - cmp = aCost - bCost - break - } - case 'status': - cmp = (a.status ?? '').localeCompare(b.status ?? '') - break - default: - break - } - return direction === 'asc' ? cmp : -cmp - }) - }, [logs, activeSort]) - - const selectedLogIndex = selectedLogId ? sortedLogs.findIndex((l) => l.id === selectedLogId) : -1 - const selectedLogFromList = selectedLogIndex >= 0 ? sortedLogs[selectedLogIndex] : null - - const selectedLog = useMemo(() => { - if (!selectedLogFromList) return null - if (!activeLogQuery.data || previewLogId !== null) return selectedLogFromList - return { ...selectedLogFromList, ...activeLogQuery.data } - }, [selectedLogFromList, activeLogQuery.data, previewLogId]) + const selectedLogIndex = selectedLogId ? logs.findIndex((l) => l.id === selectedLogId) : -1 + const selectedLogFromList = selectedLogIndex >= 0 ? logs[selectedLogIndex] : null + const selectedLog = selectedDetailQuery.data ?? selectedLogFromList ?? null const handleLogHover = useCallback( (rowId: string) => { - prefetchLogDetail(queryClient, rowId) + prefetchLogDetail(queryClient, rowId, workspaceId) }, - [queryClient] + [queryClient, workspaceId] ) useFolders(workspaceId) - logsRef.current = sortedLogs + logsRef.current = logs selectedLogIndexRef.current = selectedLogIndex selectedLogIdRef.current = selectedLogId logsRefetchRef.current = logsQuery.refetch - activeLogRefetchRef.current = activeLogQuery.refetch + activeLogRefetchRef.current = selectedDetailQuery.refetch logsQueryRef.current = { isFetching: logsQuery.isFetching, hasNextPage: logsQuery.hasNextPage ?? false, fetchNextPage: logsQuery.fetchNextPage, } + const deepLinkQuery = useLogByExecutionId(workspaceId, pendingExecutionId) + useEffect(() => { - if (!pendingExecutionIdRef.current) return - const targetExecutionId = pendingExecutionIdRef.current - const found = sortedLogs.find((l) => l.executionId === targetExecutionId) - if (found) { - pendingExecutionIdRef.current = null - dispatch({ type: 'TOGGLE_LOG', logId: found.id }) - } else if (!logsQuery.hasNextPage && logsQuery.status === 'success') { - pendingExecutionIdRef.current = null - } else if (!logsQuery.isFetching && logsQuery.status === 'success') { - logsQueryRef.current.fetchNextPage() + if (!pendingExecutionId) return + const resolvedId = deepLinkQuery.data?.id + if (resolvedId) { + dispatch({ type: 'TOGGLE_LOG', logId: resolvedId }) + setPendingExecutionId(null) + } else if (deepLinkQuery.isError) { + setPendingExecutionId(null) } - }, [sortedLogs, logsQuery.hasNextPage, logsQuery.isFetching, logsQuery.status]) + }, [pendingExecutionId, deepLinkQuery.data, deepLinkQuery.isError]) useEffect(() => { const timers = refreshTimersRef.current @@ -446,9 +442,7 @@ export default function Logs() { }, []) useEffect(() => { - if (isInitialized.current) { - setStoreSearchQuery(debouncedSearchQuery) - } + setStoreSearchQuery(debouncedSearchQuery) }, [debouncedSearchQuery, setStoreSearchQuery]) const handleLogClick = useCallback((rowId: string) => { @@ -458,7 +452,7 @@ export default function Logs() { const handleNavigateNext = useCallback(() => { const idx = selectedLogIndexRef.current const currentLogs = logsRef.current - if (idx < currentLogs.length - 1) { + if (idx >= 0 && idx < currentLogs.length - 1) { shouldScrollIntoViewRef.current = true dispatch({ type: 'SELECT_LOG', logId: currentLogs[idx + 1].id }) } @@ -484,12 +478,12 @@ export default function Logs() { const handleLogContextMenu = useCallback( (e: React.MouseEvent, rowId: string) => { e.preventDefault() - const log = sortedLogs.find((l) => l.id === rowId) ?? null + const log = logs.find((l) => l.id === rowId) ?? null setContextMenuPosition({ x: e.clientX, y: e.clientY }) setContextMenuLog(log) setContextMenuOpen(true) }, - [sortedLogs] + [logs] ) const handleCopyExecutionId = useCallback(() => { @@ -547,7 +541,7 @@ export default function Logs() { }, [contextMenuLog]) const retryLog = useCallback( - async (log: WorkflowLog | null) => { + async (log: WorkflowLogRow | null) => { const workflowId = log?.workflow?.id || log?.workflowId const logId = log?.id if (!workflowId || !logId) return @@ -555,7 +549,7 @@ export default function Logs() { try { const detailLog = await queryClient.fetchQuery({ queryKey: logKeys.detail(logId), - queryFn: ({ signal }) => fetchLogDetail(logId, signal), + queryFn: ({ signal }) => fetchLogDetail(logId, workspaceId, signal), staleTime: 30 * 1000, }) const input = extractRetryInput(detailLog) @@ -600,7 +594,8 @@ export default function Logs() { } }, [selectedLogId, selectedLogIndex]) - const effectiveSidebarOpen = isSidebarOpen && selectedLogIndex !== -1 + const effectiveSidebarOpen = + isSidebarOpen && (selectedLogIndex !== -1 || !!selectedDetailQuery.data) const triggerVisualRefresh = useCallback(() => { setIsVisuallyRefreshing(true) @@ -676,13 +671,6 @@ export default function Logs() { debouncedSearchQuery, ]) - useEffect(() => { - if (!isInitialized.current) { - isInitialized.current = true - initializeFromURL() - } - }, [initializeFromURL]) - useEffect(() => { const handlePopState = () => { initializeFromURL() @@ -695,12 +683,11 @@ export default function Logs() { }, [initializeFromURL]) const loadMoreLogs = useCallback(() => { - if (activeSort) return const { isFetching, hasNextPage, fetchNextPage } = logsQueryRef.current if (!isFetching && hasNextPage) { fetchNextPage() } - }, [activeSort]) + }, []) useEffect(() => { const handleKeyDown = (e: KeyboardEvent) => { @@ -753,7 +740,7 @@ export default function Logs() { const rows: ResourceRow[] = useMemo( () => - sortedLogs.map((log) => { + logs.map((log) => { const formattedDate = formatDate(log.createdAt) const displayStatus = getDisplayStatus(log.status) const isMothershipJob = log.trigger === 'mothership' @@ -804,7 +791,7 @@ export default function Logs() { }, } }), - [sortedLogs] + [logs] ) const sidebarOverlay = ( @@ -814,7 +801,7 @@ export default function Logs() { onClose={handleCloseSidebar} onNavigateNext={handleNavigateNext} onNavigatePrev={handleNavigatePrev} - hasNext={selectedLogIndex < sortedLogs.length - 1} + hasNext={selectedLogIndex >= 0 && selectedLogIndex < logs.length - 1} hasPrev={selectedLogIndex > 0} onRetryExecution={handleRetrySidebarExecution} isRetryPending={retryExecution.isPending} @@ -1121,7 +1108,7 @@ export default function Logs() { label: 'Export', icon: Download, onClick: handleExport, - disabled: !userPermissions.canEdit || isExporting || sortedLogs.length === 0, + disabled: !userPermissions.canEdit || isExporting || logs.length === 0, }, { label: 'Notifications', @@ -1154,7 +1141,7 @@ export default function Logs() { handleExport, userPermissions.canEdit, isExporting, - sortedLogs.length, + logs.length, handleOpenNotificationSettings, ] ) @@ -1192,7 +1179,7 @@ export default function Logs() { onRowContextMenu={handleLogContextMenu} isLoading={!logsQuery.data} onLoadMore={loadMoreLogs} - hasMore={!activeSort && (logsQuery.hasNextPage ?? false)} + hasMore={logsQuery.hasNextPage ?? false} isLoadingMore={logsQuery.isFetchingNextPage} emptyMessage='No logs found' overlay={sidebarOverlay} @@ -1224,10 +1211,10 @@ export default function Logs() { hasActiveFilters={filtersActive} /> - {previewLogId !== null && activeLogQuery.data?.executionId && ( + {previewLogId !== null && previewDetailQuery.data?.executionId && ( - status?: string - error?: unknown -} - -interface BlockExecution { - outputData?: unknown - errorMessage?: string -} - -interface LogWithExecutionData { - executionData?: { - finalOutput?: unknown - traceSpans?: TraceSpan[] - blockExecutions?: BlockExecution[] - output?: unknown - } - output?: string - message?: string -} - -/** - * Extract output from various sources in execution data. - * Checks multiple locations in priority order: - * 1. executionData.finalOutput - * 2. output (as string) - * 3. executionData.traceSpans (iterates through spans) - * 4. executionData.blockExecutions (last block) - * 5. message (fallback) - * @param log - Log object containing execution data - * @returns Extracted output value or null - */ -export function extractOutput(log: LogWithExecutionData): unknown { - let output: unknown = null - - // Check finalOutput first - if (log.executionData?.finalOutput !== undefined) { - output = log.executionData.finalOutput - } - - // Check direct output field - if (typeof log.output === 'string') { - output = log.output - } else if (log.executionData?.traceSpans && Array.isArray(log.executionData.traceSpans)) { - // Search through trace spans - const spans = log.executionData.traceSpans - for (let i = spans.length - 1; i >= 0; i--) { - const s = spans[i] - if (s?.output && Object.keys(s.output).length > 0) { - output = s.output - break - } - const outputWithError = s?.output as Record | undefined - if (s?.status === 'error' && (outputWithError?.error || s?.error)) { - output = outputWithError?.error || s.error - break - } - } - // Fallback to executionData.output - if (!output && log.executionData?.output) { - output = log.executionData.output - } - } - - // Check block executions - if (!output) { - const blockExecutions = log.executionData?.blockExecutions - if (Array.isArray(blockExecutions) && blockExecutions.length > 0) { - const lastBlock = blockExecutions[blockExecutions.length - 1] - output = lastBlock?.outputData || lastBlock?.errorMessage || null - } - } - - // Final fallback to message - if (!output) { - output = log.message || null - } - - return output -} - -/** Execution log cost breakdown */ -interface ExecutionCost { - input: number - output: number - total: number -} - -/** Mapped execution log format for UI consumption */ -export interface ExecutionLog { - id: string - executionId: string - startedAt: string - level: string - status: string - trigger: string - triggerUserId: string | null - triggerInputs?: unknown - outputs?: unknown - errorMessage: string | null - duration: number | null - cost: ExecutionCost | null - workflowName?: string - workflowColor?: string - hasPendingPause?: boolean -} - -/** Raw API log response structure */ -interface RawLogResponse extends LogWithDuration, LogWithExecutionData { - id: string - executionId: string - startedAt?: string - endedAt?: string - createdAt?: string - level?: string - status?: string - trigger?: string - triggerUserId?: string | null - error?: string - cost?: { - input?: number - output?: number - total?: number - } - workflowName?: string - workflowColor?: string - workflow?: { - name?: string - color?: string - } - hasPendingPause?: boolean -} - -/** - * Convert raw API log response to ExecutionLog format. - * @param log - Raw log response from API - * @returns Formatted execution log - */ -export function mapToExecutionLog(log: RawLogResponse): ExecutionLog { - const started = log.startedAt - ? new Date(log.startedAt) - : log.endedAt - ? new Date(log.endedAt) - : null - - const startedAt = - started && !Number.isNaN(started.getTime()) ? started.toISOString() : new Date().toISOString() - - const duration = parseDuration(log) - const output = extractOutput(log) - - return { - id: log.id, - executionId: log.executionId, - startedAt, - level: log.level || 'info', - status: log.status || 'completed', - trigger: log.trigger || 'manual', - triggerUserId: log.triggerUserId || null, - triggerInputs: undefined, - outputs: output || undefined, - errorMessage: log.error || null, - duration, - cost: log.cost - ? { - input: log.cost.input || 0, - output: log.cost.output || 0, - total: log.cost.total || 0, - } - : null, - workflowName: log.workflowName || log.workflow?.name, - workflowColor: log.workflowColor || log.workflow?.color, - hasPendingPause: log.hasPendingPause === true, - } -} - -/** - * Alternative version that uses createdAt as fallback for startedAt. - * Used in some API responses. - * @param log - Raw log response from API - * @returns Formatted execution log - */ -export function mapToExecutionLogAlt(log: RawLogResponse): ExecutionLog { - const duration = parseDuration(log) - const output = extractOutput(log) - - return { - id: log.id, - executionId: log.executionId, - startedAt: log.createdAt || log.startedAt || new Date().toISOString(), - level: log.level || 'info', - status: log.status || 'completed', - trigger: log.trigger || 'manual', - triggerUserId: log.triggerUserId || null, - triggerInputs: undefined, - outputs: output || undefined, - errorMessage: log.error || null, - duration, - cost: log.cost - ? { - input: log.cost.input || 0, - output: log.cost.output || 0, - total: log.cost.total || 0, - } - : null, - workflowName: log.workflow?.name, - workflowColor: log.workflow?.color, - hasPendingPause: log.hasPendingPause === true, - } -} - /** * Format latency value for display in dashboard UI * @param ms - Latency in milliseconds (number) @@ -449,15 +226,15 @@ export const formatDate = (dateString: string) => { * Prefers the persisted `workflowInput` field (new logs), falls back to * reconstructing from `executionState.blockStates` (old logs). */ -export function extractRetryInput(log: WorkflowLog): unknown | undefined { - const execData = log.executionData as Record | undefined +export function extractRetryInput(log: WorkflowLogDetail): unknown | undefined { + const execData = log.executionData if (!execData) return undefined if (execData.workflowInput !== undefined) { return execData.workflowInput } - const executionState = execData.executionState as + const executionState = (execData as Record).executionState as | { blockStates?: Record< string, diff --git a/apps/sim/app/workspace/[workspaceId]/settings/components/api-keys/components/create-api-key-modal/create-api-key-modal.tsx b/apps/sim/app/workspace/[workspaceId]/settings/components/api-keys/components/create-api-key-modal/create-api-key-modal.tsx index 269c883ebb0..36731fe7962 100644 --- a/apps/sim/app/workspace/[workspaceId]/settings/components/api-keys/components/create-api-key-modal/create-api-key-modal.tsx +++ b/apps/sim/app/workspace/[workspaceId]/settings/components/api-keys/components/create-api-key-modal/create-api-key-modal.tsx @@ -2,7 +2,6 @@ import { useState } from 'react' import { createLogger } from '@sim/logger' -import { Check, Copy } from 'lucide-react' import { Button, ButtonGroup, @@ -13,6 +12,7 @@ import { ModalContent, ModalFooter, ModalHeader, + SecretReveal, } from '@/components/emcn' import { type ApiKey, useCreateApiKey } from '@/hooks/queries/api-keys' @@ -50,8 +50,6 @@ export function CreateApiKeyModal({ const [createError, setCreateError] = useState(null) const [newKey, setNewKey] = useState(null) const [showNewKeyDialog, setShowNewKeyDialog] = useState(false) - const [copySuccess, setCopySuccess] = useState(false) - const createApiKeyMutation = useCreateApiKey() const handleCreateKey = async () => { @@ -105,12 +103,6 @@ export function CreateApiKeyModal({ setCreateError(null) } - const copyToClipboard = (key: string) => { - navigator.clipboard.writeText(key) - setCopySuccess(true) - setTimeout(() => setCopySuccess(false), 2000) - } - return ( <> {/* Create API Key Dialog */} @@ -209,7 +201,6 @@ export function CreateApiKeyModal({ setShowNewKeyDialog(dialogOpen) if (!dialogOpen) { setNewKey(null) - setCopySuccess(false) } }} > @@ -223,27 +214,7 @@ export function CreateApiKeyModal({

- {newKey && ( -
-
- - {newKey.key} - -
- -
- )} + {newKey && } diff --git a/apps/sim/app/workspace/[workspaceId]/settings/components/copilot/copilot.tsx b/apps/sim/app/workspace/[workspaceId]/settings/components/copilot/copilot.tsx index ac4eda18f41..f867d9beee9 100644 --- a/apps/sim/app/workspace/[workspaceId]/settings/components/copilot/copilot.tsx +++ b/apps/sim/app/workspace/[workspaceId]/settings/components/copilot/copilot.tsx @@ -4,7 +4,7 @@ import { useMemo, useState } from 'react' // import { useParams } from 'next/navigation' import { createLogger } from '@sim/logger' import { formatDate } from '@sim/utils/formatting' -import { Check, Copy, Plus, Search } from 'lucide-react' +import { Plus, Search } from 'lucide-react' import { Button, Input as EmcnInput, @@ -13,6 +13,7 @@ import { ModalContent, ModalFooter, ModalHeader, + SecretReveal, // Switch, } from '@/components/emcn' import { Input } from '@/components/ui' @@ -58,7 +59,6 @@ export function Copilot() { const [newKeyName, setNewKeyName] = useState('') const [newKey, setNewKey] = useState(null) const [showNewKeyDialog, setShowNewKeyDialog] = useState(false) - const [copySuccess, setCopySuccess] = useState(false) const [deleteKey, setDeleteKey] = useState(null) const [showDeleteDialog, setShowDeleteDialog] = useState(false) const [searchTerm, setSearchTerm] = useState('') @@ -115,12 +115,6 @@ export function Copilot() { } } - const copyToClipboard = (key: string) => { - navigator.clipboard.writeText(key) - setCopySuccess(true) - setTimeout(() => setCopySuccess(false), 2000) - } - const handleDeleteKey = async () => { if (!deleteKey) return try { @@ -316,7 +310,6 @@ export function Copilot() { setShowNewKeyDialog(open) if (!open) { setNewKey(null) - setCopySuccess(false) } }} > @@ -330,27 +323,7 @@ export function Copilot() {

- {newKey && ( -
-
- - {newKey} - -
- -
- )} + {newKey && } diff --git a/apps/sim/app/workspace/[workspaceId]/tables/[tableId]/components/table/table.tsx b/apps/sim/app/workspace/[workspaceId]/tables/[tableId]/components/table/table.tsx index 6d624c8dd7d..08ba169d1e1 100644 --- a/apps/sim/app/workspace/[workspaceId]/tables/[tableId]/components/table/table.tsx +++ b/apps/sim/app/workspace/[workspaceId]/tables/[tableId]/components/table/table.tsx @@ -173,6 +173,7 @@ export function Table({ const containerRef = useRef(null) const scrollRef = useRef(null) const isDraggingRef = useRef(false) + const suppressFocusScrollRef = useRef(false) const { tableData, @@ -796,6 +797,7 @@ export function Table({ if (rws.length === 0 || currentCols.length === 0) return setEditingCell(null) setCheckedRows((prev) => (prev.size === 0 ? prev : EMPTY_CHECKED_ROWS)) + suppressFocusScrollRef.current = true setSelectionAnchor({ rowIndex: 0, colIndex: 0 }) setSelectionFocus({ rowIndex: maxPositionRef.current, @@ -1155,6 +1157,10 @@ export function Table({ useEffect(() => { if (isColumnSelection) return + if (suppressFocusScrollRef.current) { + suppressFocusScrollRef.current = false + return + } const target = selectionFocus ?? selectionAnchor if (!target) return const { rowIndex, colIndex } = target @@ -1296,6 +1302,7 @@ export function Table({ const rws = rowsRef.current const currentCols = columnsRef.current if (rws.length > 0 && currentCols.length > 0) { + suppressFocusScrollRef.current = true setEditingCell(null) setCheckedRows((prev) => (prev.size === 0 ? prev : EMPTY_CHECKED_ROWS)) setSelectionAnchor({ rowIndex: 0, colIndex: 0 }) @@ -2685,7 +2692,10 @@ export function Table({ <> {rows.map((row, index) => { const prevPosition = index > 0 ? rows[index - 1].position : -1 - const gapCount = queryOptions.filter ? 0 : row.position - prevPosition - 1 + const gapCount = + queryOptions.filter || queryOptions.sort + ? 0 + : row.position - prevPosition - 1 return ( {gapCount > 0 && ( @@ -2938,7 +2948,7 @@ export function Table({ } const GAP_ROW_LIMIT = 200 -const GAP_CHECKBOX_CLASS = cn(CELL_CHECKBOX, 'group/checkbox cursor-pointer text-center') +const GAP_CHECKBOX_CLASS = cn(CELL_CHECKBOX, 'cursor-pointer') interface PositionGapRowsProps { count: number @@ -2975,28 +2985,32 @@ const PositionGapRows = React.memo( const isGapChecked = checkedRows.has(position) return ( - { - if (e.button !== 0) return - onRowToggle(position, e.shiftKey) - }} - > - - {position + 1} - -
- + +
+
{ + if (e.button !== 0) return + onRowToggle(position, e.shiftKey) + }} + > + + {position + 1} + +
+ +
+
{columns.map((col, colIndex) => { @@ -3238,7 +3252,7 @@ const DataRow = React.memo(function DataRow({ return ( onContextMenu(e, row)}> -
+
{ @@ -3268,7 +3282,7 @@ const DataRow = React.memo(function DataRow({ type='button' aria-label={runningCount > 0 ? `Stop ${runningCount} running` : 'Run row'} title={runningCount > 0 ? `Stop ${runningCount} running` : 'Run row'} - className='flex h-[20px] w-[20px] shrink-0 items-center justify-center rounded text-[var(--text-primary)] transition-colors hover-hover:bg-[var(--surface-2)]' + className='ml-auto flex h-[20px] w-[20px] shrink-0 items-center justify-center rounded text-[var(--text-primary)] transition-colors hover-hover:bg-[var(--surface-2)]' onClick={() => { if (runningCount > 0) { onStopRow(row.id) diff --git a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/user-input/hooks/use-file-attachments.ts b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/user-input/hooks/use-file-attachments.ts index 73dc76e3792..9c09054d5ba 100644 --- a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/user-input/hooks/use-file-attachments.ts +++ b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/user-input/hooks/use-file-attachments.ts @@ -126,7 +126,10 @@ export function useFileAttachments(props: UseFileAttachmentsProps) { type: resolveFileType(file), path: '', uploading: true, - previewUrl: file.type.startsWith('image/') ? URL.createObjectURL(file) : undefined, + previewUrl: + file.type.startsWith('image/') || file.type.startsWith('video/') + ? URL.createObjectURL(file) + : undefined, })) setAttachedFiles((prev) => [...prev, ...placeholders]) diff --git a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/user-input/hooks/use-mention-data.ts b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/user-input/hooks/use-mention-data.ts index c4b0e1e5e67..411ab163f47 100644 --- a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/user-input/hooks/use-mention-data.ts +++ b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/user-input/hooks/use-mention-data.ts @@ -346,7 +346,7 @@ export function useMentionData(props: UseMentionDataProps): MentionDataReturn { try { setIsLoadingLogs(true) const data = await requestJson(listLogsContract, { - query: { workspaceId, limit: 50, details: 'full' }, + query: { workspaceId, limit: 50 }, }) const items = data.data const mapped = items.map((l) => ({ diff --git a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/terminal/utils.test.ts b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/terminal/utils.test.ts index e7677a608a7..ee07dba42dd 100644 --- a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/terminal/utils.test.ts +++ b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/terminal/utils.test.ts @@ -482,6 +482,312 @@ describe('groupEntriesByExecution', () => { }) }) +describe('duration computation', () => { + /** + * Regression guard for the 18m → 20m → 22m bug. + * + * When a loop iteration contains a parallel block, the iteration's displayed + * duration must be wall-clock (max(endedAt) − min(startedAt)), not the sum of + * child durationMs. Summing over concurrent parallel branches over-counts time + * and causes the displayed iteration duration to climb rapidly as each branch + * resolves. + */ + it('loop iteration with concurrent parallel branches uses wall-clock duration', () => { + const branches = 5 + const branchDurationMs = 110_000 + const loopIterStartMs = Date.UTC(2025, 0, 1, 0, 0, 0) + const loopIterEndMs = loopIterStartMs + branchDurationMs + + const entries: ConsoleEntry[] = [] + for (let branch = 0; branch < branches; branch++) { + entries.push( + makeEntry({ + blockId: 'function-1', + blockName: 'Function 1', + executionOrder: branch + 1, + startedAt: new Date(loopIterStartMs).toISOString(), + endedAt: new Date(loopIterEndMs).toISOString(), + durationMs: branchDurationMs, + iterationType: 'parallel', + iterationCurrent: branch, + iterationTotal: branches, + iterationContainerId: 'parallel-1', + parentIterations: [ + { + iterationType: 'loop', + iterationCurrent: 0, + iterationTotal: 1, + iterationContainerId: 'loop-1', + }, + ], + }) + ) + } + + const tree = buildEntryTree(entries) + const loopSubflow = tree.find((n) => n.entry.blockType === 'loop') + expect(loopSubflow).toBeDefined() + + const iteration = loopSubflow!.children[0] + expect(iteration.nodeType).toBe('iteration') + expect(iteration.entry.durationMs).toBe(branchDurationMs) + expect(iteration.entry.durationMs).toBeLessThan(branches * branchDurationMs) + }) + + it('subflow container with concurrent children uses wall-clock duration', () => { + const branches = 4 + const branchDurationMs = 60_000 + const startMs = Date.UTC(2025, 0, 1, 0, 0, 0) + const endMs = startMs + branchDurationMs + + const entries: ConsoleEntry[] = [] + for (let branch = 0; branch < branches; branch++) { + entries.push( + makeEntry({ + blockId: 'function-1', + executionOrder: branch + 1, + startedAt: new Date(startMs).toISOString(), + endedAt: new Date(endMs).toISOString(), + durationMs: branchDurationMs, + iterationType: 'parallel', + iterationCurrent: branch, + iterationTotal: branches, + iterationContainerId: 'parallel-1', + }) + ) + } + + const tree = buildEntryTree(entries) + const subflow = tree.find((n) => n.entry.blockType === 'parallel') + expect(subflow).toBeDefined() + expect(subflow!.entry.durationMs).toBe(branchDurationMs) + expect(subflow!.entry.durationMs).toBeLessThan(branches * branchDurationMs) + }) + + it('sequential loop iteration uses wall-clock duration', () => { + const blockStart = Date.UTC(2025, 0, 1, 0, 0, 0) + const blockEnd = blockStart + 5_000 + + const entries: ConsoleEntry[] = [ + makeEntry({ + blockId: 'function-1', + executionOrder: 1, + startedAt: new Date(blockStart).toISOString(), + endedAt: new Date(blockEnd).toISOString(), + durationMs: 5_000, + iterationType: 'loop', + iterationCurrent: 0, + iterationTotal: 1, + iterationContainerId: 'loop-1', + }), + ] + + const tree = buildEntryTree(entries) + const loop = tree.find((n) => n.entry.blockType === 'loop') + expect(loop).toBeDefined() + expect(loop!.children[0].entry.durationMs).toBe(5_000) + }) + + it('parallel iteration uses wall-clock duration', () => { + const start = Date.UTC(2025, 0, 1, 0, 0, 0) + const end = start + 7_500 + + const entries: ConsoleEntry[] = [ + makeEntry({ + blockId: 'function-1', + executionOrder: 1, + startedAt: new Date(start).toISOString(), + endedAt: new Date(end).toISOString(), + durationMs: 7_500, + iterationType: 'parallel', + iterationCurrent: 0, + iterationTotal: 1, + iterationContainerId: 'parallel-1', + }), + ] + + const tree = buildEntryTree(entries) + const parallel = tree.find((n) => n.entry.blockType === 'parallel') + expect(parallel).toBeDefined() + expect(parallel!.children[0].entry.durationMs).toBe(7_500) + }) + + it('sequential loop with gaps between iterations: each iteration is wall-clock of its own children', () => { + const entries: ConsoleEntry[] = [] + const iterStarts = [0, 10_000, 30_000] + const blockDuration = 1_000 + const base = Date.UTC(2025, 0, 1, 0, 0, 0) + + for (let i = 0; i < iterStarts.length; i++) { + entries.push( + makeEntry({ + blockId: 'function-1', + executionOrder: i + 1, + startedAt: new Date(base + iterStarts[i]).toISOString(), + endedAt: new Date(base + iterStarts[i] + blockDuration).toISOString(), + durationMs: blockDuration, + iterationType: 'loop', + iterationCurrent: i, + iterationTotal: 3, + iterationContainerId: 'loop-1', + }) + ) + } + + const tree = buildEntryTree(entries) + const loop = tree.find((n) => n.entry.blockType === 'loop')! + for (let i = 0; i < 3; i++) { + expect(loop.children[i].entry.durationMs).toBe(blockDuration) + } + expect(loop.entry.durationMs).toBe(iterStarts[2] + blockDuration - iterStarts[0]) + }) + + it('loop-in-loop: outer iteration duration spans all inner iterations wall-clock', () => { + const entries: ConsoleEntry[] = [] + const base = Date.UTC(2025, 0, 1, 0, 0, 0) + const innerDuration = 2_000 + const innerCount = 3 + + for (let inner = 0; inner < innerCount; inner++) { + const start = base + inner * innerDuration + entries.push( + makeEntry({ + blockId: 'function-1', + executionOrder: inner + 1, + startedAt: new Date(start).toISOString(), + endedAt: new Date(start + innerDuration).toISOString(), + durationMs: innerDuration, + iterationType: 'loop', + iterationCurrent: inner, + iterationTotal: innerCount, + iterationContainerId: 'inner-loop', + parentIterations: [ + { + iterationType: 'loop', + iterationCurrent: 0, + iterationTotal: 1, + iterationContainerId: 'outer-loop', + }, + ], + }) + ) + } + + const tree = buildEntryTree(entries) + const outerLoop = tree.find((n) => n.entry.blockType === 'loop')! + const outerIter = outerLoop.children[0] + expect(outerIter.entry.durationMs).toBe(innerCount * innerDuration) + }) + + it('loop-in-parallel: each branch duration reflects its own loop wall-clock', () => { + const entries: ConsoleEntry[] = [] + const base = Date.UTC(2025, 0, 1, 0, 0, 0) + const innerDuration = 1_500 + const innerCount = 2 + const branches = 3 + + for (let branch = 0; branch < branches; branch++) { + for (let inner = 0; inner < innerCount; inner++) { + const start = base + inner * innerDuration + entries.push( + makeEntry({ + blockId: 'function-1', + executionOrder: branch * innerCount + inner + 1, + startedAt: new Date(start).toISOString(), + endedAt: new Date(start + innerDuration).toISOString(), + durationMs: innerDuration, + iterationType: 'loop', + iterationCurrent: inner, + iterationTotal: innerCount, + iterationContainerId: 'inner-loop', + parentIterations: [ + { + iterationType: 'parallel', + iterationCurrent: branch, + iterationTotal: branches, + iterationContainerId: 'parallel-1', + }, + ], + }) + ) + } + } + + const tree = buildEntryTree(entries) + const parallelSubflow = tree.find((n) => n.entry.blockType === 'parallel')! + expect(parallelSubflow.children).toHaveLength(branches) + for (let branch = 0; branch < branches; branch++) { + const branchNode = parallelSubflow.children[branch] + expect(branchNode.entry.durationMs).toBe(innerCount * innerDuration) + } + expect(parallelSubflow.entry.durationMs).toBe(innerCount * innerDuration) + }) + + it('single-block iteration: duration equals the block durationMs', () => { + const start = Date.UTC(2025, 0, 1, 0, 0, 0) + const blockDuration = 3_141 + + const entries: ConsoleEntry[] = [ + makeEntry({ + blockId: 'function-1', + executionOrder: 1, + startedAt: new Date(start).toISOString(), + endedAt: new Date(start + blockDuration).toISOString(), + durationMs: blockDuration, + iterationType: 'loop', + iterationCurrent: 0, + iterationTotal: 1, + iterationContainerId: 'loop-1', + }), + ] + + const tree = buildEntryTree(entries) + const loop = tree.find((n) => n.entry.blockType === 'loop')! + expect(loop.children[0].entry.durationMs).toBe(blockDuration) + expect(loop.entry.durationMs).toBe(blockDuration) + }) + + it('does not sum concurrent branch durations into iteration duration', () => { + const branches = 20 + const branchDurationMs = 100_000 + const start = Date.UTC(2025, 0, 1, 0, 0, 0) + + const entries: ConsoleEntry[] = [] + for (let branch = 0; branch < branches; branch++) { + const branchStart = start + branch * 5 + entries.push( + makeEntry({ + blockId: 'function-1', + executionOrder: branch + 1, + startedAt: new Date(branchStart).toISOString(), + endedAt: new Date(branchStart + branchDurationMs).toISOString(), + durationMs: branchDurationMs, + iterationType: 'parallel', + iterationCurrent: branch, + iterationTotal: branches, + iterationContainerId: 'parallel-1', + parentIterations: [ + { + iterationType: 'loop', + iterationCurrent: 0, + iterationTotal: 1, + iterationContainerId: 'loop-1', + }, + ], + }) + ) + } + + const tree = buildEntryTree(entries) + const loopSubflow = tree.find((n) => n.entry.blockType === 'loop')! + const iteration = loopSubflow.children[0] + + const wallClock = branchDurationMs + (branches - 1) * 5 + expect(iteration.entry.durationMs).toBe(wallClock) + expect(iteration.entry.durationMs).toBeLessThan(branches * branchDurationMs) + }) +}) + describe('flattenVisibleExecutionRows', () => { it('only includes children for expanded nodes', () => { const childBlock = makeEntry({ diff --git a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/terminal/utils.ts b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/terminal/utils.ts index e4c450d9c7d..347a5ffbc2f 100644 --- a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/terminal/utils.ts +++ b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/terminal/utils.ts @@ -393,9 +393,7 @@ export function buildEntryTree(entries: ConsoleEntry[], idPrefix = ''): EntryNod const subflowEndMs = Math.max( ...allRelevantBlocks.map((b) => new Date(b.endedAt || b.timestamp).getTime()) ) - const totalDuration = allRelevantBlocks.reduce((sum, b) => sum + (b.durationMs || 0), 0) - const subflowDuration = - iterationType === 'parallel' ? subflowEndMs - subflowStartMs : totalDuration + const subflowDuration = subflowEndMs - subflowStartMs const subflowExecutionOrder = Math.min(...allRelevantBlocks.map((b) => b.executionOrder)) const metadataSource = allRelevantBlocks[0] @@ -449,9 +447,7 @@ export function buildEntryTree(entries: ConsoleEntry[], idPrefix = ''): EntryNod const iterEndMs = Math.max( ...allIterEntries.map((b) => new Date(b.endedAt || b.timestamp).getTime()) ) - const iterDuration = allIterEntries.reduce((sum, b) => sum + (b.durationMs || 0), 0) - const iterDisplayDuration = - iterationType === 'parallel' ? iterEndMs - iterStartMs : iterDuration + const iterDisplayDuration = iterEndMs - iterStartMs const iterExecutionOrder = Math.min(...allIterEntries.map((b) => b.executionOrder)) const iterMetadataSource = allIterEntries[0] diff --git a/apps/sim/blocks/blocks/image_generator.ts b/apps/sim/blocks/blocks/image_generator.ts index 6963cd604fd..69f94cb49e2 100644 --- a/apps/sim/blocks/blocks/image_generator.ts +++ b/apps/sim/blocks/blocks/image_generator.ts @@ -8,7 +8,7 @@ export const ImageGeneratorBlock: BlockConfig = { description: 'Generate images', authMode: AuthMode.ApiKey, longDescription: - 'Integrate Image Generator into the workflow. Can generate images using DALL-E 3 or GPT Image.', + 'Integrate Image Generator into the workflow. Can generate images using DALL-E 3, GPT Image 1, or GPT Image 2.', docsLink: 'https://docs.sim.ai/tools/image_generator', category: 'tools', integrationType: IntegrationType.AI, @@ -22,7 +22,8 @@ export const ImageGeneratorBlock: BlockConfig = { type: 'dropdown', options: [ { label: 'DALL-E 3', id: 'dall-e-3' }, - { label: 'GPT Image', id: 'gpt-image-1' }, + { label: 'GPT Image 1', id: 'gpt-image-1' }, + { label: 'GPT Image 2', id: 'gpt-image-2' }, ], value: () => 'dall-e-3', }, @@ -60,6 +61,22 @@ export const ImageGeneratorBlock: BlockConfig = { condition: { field: 'model', value: 'gpt-image-1' }, dependsOn: ['model'], }, + { + id: 'size', + title: 'Size', + type: 'dropdown', + options: [ + { label: 'Auto', id: 'auto' }, + { label: 'Square (1024x1024)', id: '1024x1024' }, + { label: 'Portrait (1024x1536)', id: '1024x1536' }, + { label: 'Landscape (1536x1024)', id: '1536x1024' }, + { label: '2K (2560x1440)', id: '2560x1440' }, + { label: '4K (3840x2160)', id: '3840x2160' }, + ], + value: () => 'auto', + condition: { field: 'model', value: 'gpt-image-2' }, + dependsOn: ['model'], + }, { id: 'quality', title: 'Quality', @@ -72,6 +89,20 @@ export const ImageGeneratorBlock: BlockConfig = { condition: { field: 'model', value: 'dall-e-3' }, dependsOn: ['model'], }, + { + id: 'quality', + title: 'Quality', + type: 'dropdown', + options: [ + { label: 'Auto', id: 'auto' }, + { label: 'Low', id: 'low' }, + { label: 'Medium', id: 'medium' }, + { label: 'High', id: 'high' }, + ], + value: () => 'auto', + condition: { field: 'model', value: ['gpt-image-1', 'gpt-image-2'] }, + dependsOn: ['model'], + }, { id: 'style', title: 'Style', @@ -97,6 +128,43 @@ export const ImageGeneratorBlock: BlockConfig = { condition: { field: 'model', value: 'gpt-image-1' }, dependsOn: ['model'], }, + { + id: 'background', + title: 'Background', + type: 'dropdown', + options: [ + { label: 'Auto', id: 'auto' }, + { label: 'Opaque', id: 'opaque' }, + ], + value: () => 'auto', + condition: { field: 'model', value: 'gpt-image-2' }, + dependsOn: ['model'], + }, + { + id: 'outputFormat', + title: 'Output Format', + type: 'dropdown', + options: [ + { label: 'PNG', id: 'png' }, + { label: 'JPEG', id: 'jpeg' }, + { label: 'WebP', id: 'webp' }, + ], + value: () => 'png', + condition: { field: 'model', value: ['gpt-image-1', 'gpt-image-2'] }, + dependsOn: ['model'], + }, + { + id: 'moderation', + title: 'Moderation', + type: 'dropdown', + options: [ + { label: 'Auto', id: 'auto' }, + { label: 'Low', id: 'low' }, + ], + value: () => 'auto', + condition: { field: 'model', value: ['gpt-image-1', 'gpt-image-2'] }, + dependsOn: ['model'], + }, { id: 'apiKey', title: 'API Key', @@ -120,7 +188,25 @@ export const ImageGeneratorBlock: BlockConfig = { } const model = params.model || 'dall-e-3' - const size = params.size || (model === 'gpt-image-1' ? 'auto' : '1024x1024') + + const ALLOWED_SIZES: Record = { + 'dall-e-3': ['1024x1024', '1024x1792', '1792x1024'], + 'gpt-image-1': ['auto', '1024x1024', '1536x1024', '1024x1536'], + 'gpt-image-2': ['auto', '1024x1024', '1536x1024', '1024x1536', '2560x1440', '3840x2160'], + } + const ALLOWED_QUALITIES: Record = { + 'dall-e-3': ['standard', 'hd'], + 'gpt-image-1': ['auto', 'low', 'medium', 'high'], + 'gpt-image-2': ['auto', 'low', 'medium', 'high'], + } + const ALLOWED_BACKGROUNDS: Record = { + 'gpt-image-1': ['auto', 'transparent', 'opaque'], + 'gpt-image-2': ['auto', 'opaque'], + } + + const defaultSize = model === 'dall-e-3' ? '1024x1024' : 'auto' + const size = ALLOWED_SIZES[model]?.includes(params.size) ? params.size : defaultSize + const baseParams = { prompt: params.prompt, model, @@ -129,16 +215,25 @@ export const ImageGeneratorBlock: BlockConfig = { } if (model === 'dall-e-3') { - return { - ...baseParams, - quality: params.quality || 'standard', - style: params.style || 'vivid', - } + const quality = ALLOWED_QUALITIES['dall-e-3'].includes(params.quality) + ? params.quality + : 'standard' + const style = ['vivid', 'natural'].includes(params.style) ? params.style : 'vivid' + return { ...baseParams, quality, style } } - if (model === 'gpt-image-1') { + if (model === 'gpt-image-1' || model === 'gpt-image-2') { + const quality = ALLOWED_QUALITIES[model].includes(params.quality) + ? params.quality + : undefined + const background = ALLOWED_BACKGROUNDS[model].includes(params.background) + ? params.background + : undefined return { ...baseParams, - ...(params.background && { background: params.background }), + ...(quality && { quality }), + ...(background && { background }), + ...(params.outputFormat && { outputFormat: params.outputFormat }), + ...(params.moderation && { moderation: params.moderation }), } } @@ -153,6 +248,8 @@ export const ImageGeneratorBlock: BlockConfig = { quality: { type: 'string', description: 'Image quality level' }, style: { type: 'string', description: 'Image style' }, background: { type: 'string', description: 'Background type' }, + outputFormat: { type: 'string', description: 'Output image format (png, jpeg, webp)' }, + moderation: { type: 'string', description: 'Moderation level (auto or low)' }, apiKey: { type: 'string', description: 'OpenAI API key' }, }, outputs: { diff --git a/apps/sim/blocks/blocks/knowledge.ts b/apps/sim/blocks/blocks/knowledge.ts index 3d17e9cb402..f8a92235b2e 100644 --- a/apps/sim/blocks/blocks/knowledge.ts +++ b/apps/sim/blocks/blocks/knowledge.ts @@ -1,6 +1,7 @@ import { PackageSearchIcon } from '@/components/icons' import { DEFAULT_RERANKER_MODEL, SUPPORTED_RERANKER_MODELS } from '@/lib/knowledge/reranker-models' import type { BlockConfig } from '@/blocks/types' +import { getCohereRerankerApiKeyCondition } from '@/blocks/utils' export const KnowledgeBlock: BlockConfig = { type: 'knowledge', @@ -105,6 +106,28 @@ export const KnowledgeBlock: BlockConfig = { and: { field: 'rerankerEnabled', value: true }, }, }, + { + id: 'rerankerInputCount', + title: 'Documents Sent to Reranker', + type: 'short-input', + placeholder: 'Auto (4× results, capped at 100)', + mode: 'advanced', + condition: { + field: 'operation', + value: 'search', + and: { field: 'rerankerEnabled', value: true }, + }, + }, + { + id: 'apiKey', + title: 'Cohere API Key', + type: 'short-input', + placeholder: 'Enter your Cohere API key', + password: true, + connectionDroppable: false, + required: true, + condition: getCohereRerankerApiKeyCondition(), + }, // --- List Documents --- { @@ -419,6 +442,11 @@ export const KnowledgeBlock: BlockConfig = { tagFilters: { type: 'string', description: 'Tag filter criteria' }, rerankerEnabled: { type: 'boolean', description: 'Apply Cohere reranking to search results' }, rerankerModel: { type: 'string', description: 'Cohere rerank model identifier' }, + rerankerInputCount: { + type: 'number', + description: 'Number of vector results sent to the Cohere reranker (1–100)', + }, + apiKey: { type: 'string', description: 'Cohere API key (self-hosted only)' }, documentTags: { type: 'string', description: 'Document tags' }, chunkSearch: { type: 'string', description: 'Search filter for chunks' }, chunkEnabledFilter: { type: 'string', description: 'Filter chunks by enabled status' }, diff --git a/apps/sim/blocks/blocks/logs.ts b/apps/sim/blocks/blocks/logs.ts new file mode 100644 index 00000000000..d7665089f98 --- /dev/null +++ b/apps/sim/blocks/blocks/logs.ts @@ -0,0 +1,253 @@ +import { Library } from '@/components/emcn/icons' +import type { BlockConfig } from '@/blocks/types' + +export const LogsBlock: BlockConfig = { + type: 'logs', + name: 'Logs', + description: 'Query workflow execution logs', + longDescription: + 'Search workflow execution logs in the current workspace, fetch a single log by id, or load full execution details with the per-block state snapshot.', + bgColor: '#EAB308', + bestPractices: ` + - The block always operates on the current workspace; you cannot query other workspaces. + - 'Query Logs' returns summary rows. To get a full log entry (executionData, files), use 'Get Log by ID' on a row's id. + - Use 'Get Execution Details' (with an executionId) to inspect per-block state for a single run. + - Pagination is cursor-based: pass the previous response's nextCursor as Cursor to fetch the next page. + `, + icon: Library, + category: 'blocks', + docsLink: 'https://docs.sim.ai/api-reference/logs/getExecutionDetails', + subBlocks: [ + { + id: 'operation', + title: 'Operation', + type: 'dropdown', + options: [ + { label: 'Query Logs', id: 'query' }, + { label: 'Get Log by ID', id: 'get_log' }, + { label: 'Get Execution Details', id: 'get_execution' }, + ], + placeholder: 'Select operation', + value: () => 'query', + }, + { + id: 'workflowIds', + title: 'Workflow IDs', + type: 'short-input', + placeholder: 'Comma-separated workflow IDs', + condition: { field: 'operation', value: 'query' }, + }, + { + id: 'executionId', + title: 'Execution ID', + type: 'short-input', + placeholder: 'Filter by a single execution ID', + condition: { field: 'operation', value: 'query' }, + }, + { + id: 'level', + title: 'Level', + type: 'dropdown', + options: [ + { label: 'All', id: 'all' }, + { label: 'Info', id: 'info' }, + { label: 'Error', id: 'error' }, + { label: 'Running', id: 'running' }, + { label: 'Pending', id: 'pending' }, + ], + value: () => 'all', + condition: { field: 'operation', value: 'query' }, + }, + { + id: 'triggers', + title: 'Triggers', + type: 'short-input', + placeholder: 'api,webhook,schedule,manual,chat,mothership', + condition: { field: 'operation', value: 'query' }, + }, + { + id: 'limit', + title: 'Limit', + type: 'short-input', + placeholder: '100 (max 200)', + condition: { field: 'operation', value: 'query' }, + }, + { + id: 'startDate', + title: 'Start Date', + type: 'short-input', + placeholder: 'ISO 8601 timestamp', + mode: 'advanced', + wandConfig: { + enabled: true, + prompt: + 'Generate an ISO 8601 timestamp from the user description. Return ONLY the timestamp string.', + generationType: 'timestamp', + }, + condition: { field: 'operation', value: 'query' }, + }, + { + id: 'endDate', + title: 'End Date', + type: 'short-input', + placeholder: 'ISO 8601 timestamp', + mode: 'advanced', + wandConfig: { + enabled: true, + prompt: + 'Generate an ISO 8601 timestamp from the user description. Return ONLY the timestamp string.', + generationType: 'timestamp', + }, + condition: { field: 'operation', value: 'query' }, + }, + { + id: 'search', + title: 'Search', + type: 'short-input', + placeholder: 'Free-text search', + mode: 'advanced', + condition: { field: 'operation', value: 'query' }, + }, + { + id: 'sortBy', + title: 'Sort By', + type: 'dropdown', + options: [ + { label: 'Date', id: 'date' }, + { label: 'Duration', id: 'duration' }, + { label: 'Cost', id: 'cost' }, + { label: 'Status', id: 'status' }, + ], + value: () => 'date', + mode: 'advanced', + condition: { field: 'operation', value: 'query' }, + }, + { + id: 'sortOrder', + title: 'Sort Order', + type: 'dropdown', + options: [ + { label: 'Descending', id: 'desc' }, + { label: 'Ascending', id: 'asc' }, + ], + value: () => 'desc', + mode: 'advanced', + condition: { field: 'operation', value: 'query' }, + }, + { + id: 'cursor', + title: 'Cursor', + type: 'short-input', + placeholder: 'nextCursor from a previous response', + mode: 'advanced', + condition: { field: 'operation', value: 'query' }, + }, + { + id: 'logId', + title: 'Log ID', + type: 'short-input', + placeholder: 'Log entry ID', + condition: { field: 'operation', value: 'get_log' }, + required: true, + }, + { + id: 'executionIdLookup', + title: 'Execution ID', + type: 'short-input', + placeholder: 'Execution ID', + condition: { field: 'operation', value: 'get_execution' }, + required: true, + }, + ], + tools: { + access: ['logs_query', 'logs_get', 'logs_get_execution'], + config: { + tool: (params: Record) => { + const operation = params.operation || 'query' + if (operation === 'get_log') return 'logs_get' + if (operation === 'get_execution') return 'logs_get_execution' + return 'logs_query' + }, + params: (params: Record) => { + const operation = params.operation || 'query' + + if (operation === 'get_log') { + if (!params.logId) { + throw new Error('Logs Block Error: Log ID is required for get_log operation') + } + return { id: params.logId } + } + + if (operation === 'get_execution') { + if (!params.executionIdLookup) { + throw new Error( + 'Logs Block Error: Execution ID is required for get_execution operation' + ) + } + return { executionId: params.executionIdLookup } + } + + const rawLimit = + params.limit !== undefined && params.limit !== null && params.limit !== '' + ? Number(params.limit) + : undefined + const limit = Number.isFinite(rawLimit) ? rawLimit : undefined + + return { + workflowIds: params.workflowIds || undefined, + executionId: params.executionId || undefined, + level: params.level && params.level !== 'all' ? params.level : undefined, + triggers: params.triggers || undefined, + limit, + startDate: params.startDate || undefined, + endDate: params.endDate || undefined, + search: params.search || undefined, + cursor: params.cursor || undefined, + sortBy: params.sortBy || undefined, + sortOrder: params.sortOrder || undefined, + } + }, + }, + }, + inputs: { + operation: { type: 'string', description: 'Operation to perform' }, + workflowIds: { type: 'string', description: 'Comma-separated workflow IDs' }, + executionId: { type: 'string', description: 'Execution ID filter (query operation)' }, + level: { type: 'string', description: 'Log level filter' }, + triggers: { type: 'string', description: 'Comma-separated triggers' }, + limit: { type: 'number', description: 'Max logs to return (default 100, max 200)' }, + startDate: { type: 'string', description: 'ISO 8601 lower bound' }, + endDate: { type: 'string', description: 'ISO 8601 upper bound' }, + search: { type: 'string', description: 'Free-text search term' }, + sortBy: { type: 'string', description: "'date' | 'duration' | 'cost' | 'status'" }, + sortOrder: { type: 'string', description: "'desc' | 'asc'" }, + cursor: { type: 'string', description: 'Pagination cursor' }, + logId: { type: 'string', description: 'Log entry ID (get_log operation)' }, + executionIdLookup: { + type: 'string', + description: 'Execution ID (get_execution operation)', + }, + }, + outputs: { + logs: { type: 'json', description: 'Array of log summary entries (query operation)' }, + nextCursor: { + type: 'string', + description: 'Cursor for next page; null when no more results (query operation)', + }, + log: { type: 'json', description: 'Full log entry (get_log operation)' }, + executionId: { type: 'string', description: 'Execution ID (get_execution operation)' }, + workflowId: { type: 'string', description: 'Workflow ID (get_execution operation)' }, + workflowState: { + type: 'json', + description: 'Per-block state snapshot (get_execution operation)', + }, + childWorkflowSnapshots: { + type: 'json', + description: 'Snapshots for child workflows (get_execution operation)', + }, + executionMetadata: { + type: 'json', + description: 'Trigger, timestamps, totalDurationMs, cost (get_execution operation)', + }, + }, +} diff --git a/apps/sim/blocks/registry.ts b/apps/sim/blocks/registry.ts index e7ca943af3c..aacf6d49431 100644 --- a/apps/sim/blocks/registry.ts +++ b/apps/sim/blocks/registry.ts @@ -113,6 +113,7 @@ import { LemlistBlock } from '@/blocks/blocks/lemlist' import { LinearBlock, LinearV2Block } from '@/blocks/blocks/linear' import { LinkedInBlock } from '@/blocks/blocks/linkedin' import { LinkupBlock } from '@/blocks/blocks/linkup' +import { LogsBlock } from '@/blocks/blocks/logs' import { LoopsBlock } from '@/blocks/blocks/loops' import { LumaBlock } from '@/blocks/blocks/luma' import { MailchimpBlock } from '@/blocks/blocks/mailchimp' @@ -361,6 +362,7 @@ export const registry: Record = { linear_v2: LinearV2Block, linkedin: LinkedInBlock, linkup: LinkupBlock, + logs: LogsBlock, loops: LoopsBlock, luma: LumaBlock, mailchimp: MailchimpBlock, diff --git a/apps/sim/blocks/utils.ts b/apps/sim/blocks/utils.ts index b70ca7af504..c22596b34cd 100644 --- a/apps/sim/blocks/utils.ts +++ b/apps/sim/blocks/utils.ts @@ -1,5 +1,10 @@ import { toError } from '@sim/utils/errors' -import { isAzureConfigured, isHosted, isOllamaConfigured } from '@/lib/core/config/feature-flags' +import { + isAzureConfigured, + isCohereConfigured, + isHosted, + isOllamaConfigured, +} from '@/lib/core/config/feature-flags' import { getScopesForService } from '@/lib/oauth/utils' import { buildCanonicalIndex } from '@/lib/workflows/subblocks/visibility' import type { BlockOutput, OutputFieldDefinition, SubBlockConfig } from '@/blocks/types' @@ -184,6 +189,27 @@ export function getApiKeyCondition() { } } +/** + * Visibility condition for the Cohere reranker API key field on the Knowledge block. + * Hidden on hosted Sim (platform supplies the key via workspace BYOK or rotating env keys) + * and on self-hosted deployments that have set `NEXT_PUBLIC_COHERE_CONFIGURED=true` to + * indicate `COHERE_API_KEY` is pre-configured server-side. Otherwise shown (and required) + * whenever reranking is enabled for a search operation, mirroring the agent block's + * `getApiKeyCondition` pattern. + */ +export function getCohereRerankerApiKeyCondition() { + return () => { + if (isHosted || isCohereConfigured) { + return { field: 'operation', value: '__never_show__' } + } + return { + field: 'operation', + value: 'search', + and: { field: 'rerankerEnabled', value: true }, + } + } +} + /** * Returns the standard provider credential subblocks used by LLM-based blocks. * This includes: Vertex AI OAuth, API Key, Azure (OpenAI + Anthropic), Vertex AI config, and Bedrock config. diff --git a/apps/sim/components/emcn/components/code/copy-code-button.tsx b/apps/sim/components/emcn/components/code/copy-code-button.tsx index 5ef81dfb8a3..93ace78bf57 100644 --- a/apps/sim/components/emcn/components/code/copy-code-button.tsx +++ b/apps/sim/components/emcn/components/code/copy-code-button.tsx @@ -1,8 +1,8 @@ 'use client' -import { useCallback, useEffect, useRef, useState } from 'react' import { Button, Check, Copy } from '@/components/emcn' import { cn } from '@/lib/core/utils/cn' +import { useCopyToClipboard } from '@/hooks/use-copy-to-clipboard' interface CopyCodeButtonProps { code: string @@ -10,33 +10,16 @@ interface CopyCodeButtonProps { } export function CopyCodeButton({ code, className }: CopyCodeButtonProps) { - const [copied, setCopied] = useState(false) - const timerRef = useRef | null>(null) - - const handleCopy = useCallback(async () => { - try { - await navigator.clipboard.writeText(code) - setCopied(true) - if (timerRef.current) clearTimeout(timerRef.current) - timerRef.current = setTimeout(() => setCopied(false), 2000) - } catch {} - }, [code]) - - useEffect( - () => () => { - if (timerRef.current) clearTimeout(timerRef.current) - }, - [] - ) + const { copied, copy } = useCopyToClipboard() return ( ) } diff --git a/apps/sim/components/emcn/components/index.ts b/apps/sim/components/emcn/components/index.ts index 15b6cefd77f..0f30eeb09ac 100644 --- a/apps/sim/components/emcn/components/index.ts +++ b/apps/sim/components/emcn/components/index.ts @@ -126,6 +126,7 @@ export { SModalTrigger, } from './s-modal/s-modal' export { SecretInput, type SecretInputProps } from './secret-input/secret-input' +export { SecretReveal, type SecretRevealProps } from './secret-reveal/secret-reveal' export { Skeleton } from './skeleton/skeleton' export { Slider, type SliderProps } from './slider/slider' export { Switch } from './switch/switch' diff --git a/apps/sim/components/emcn/components/secret-reveal/secret-reveal.tsx b/apps/sim/components/emcn/components/secret-reveal/secret-reveal.tsx new file mode 100644 index 00000000000..1357ebfc3aa --- /dev/null +++ b/apps/sim/components/emcn/components/secret-reveal/secret-reveal.tsx @@ -0,0 +1,77 @@ +/** + * A read-only display for a one-time secret reveal: the value renders inside + * a bordered code box with a copy button, or as masked dots when redacted. + * + * @remarks + * Use for surfaces that show a freshly-generated credential (API key, signing + * secret, etc.) once and then need to fall back to a redacted state on + * subsequent renders. Pair with `redacted` (or simply omit `value`) to render + * the masked state without a copy affordance. + * + * @example + * ```tsx + * import { SecretReveal } from '@/components/emcn' + * + * + * + * ``` + */ +'use client' + +import { Button, Check, Copy } from '@/components/emcn' +import { cn } from '@/lib/core/utils/cn' +import { useCopyToClipboard } from '@/hooks/use-copy-to-clipboard' + +const REDACTED_DOTS = '••••••••••••••••••••••••••••••••' + +export interface SecretRevealProps { + /** Secret value to display. When absent or `redacted` is true, renders masked dots. */ + value?: string + /** Force the masked state even when `value` is provided. */ + redacted?: boolean + className?: string +} + +export function SecretReveal({ value, className, redacted = false }: SecretRevealProps) { + const { copied, copy } = useCopyToClipboard() + const isHidden = redacted || !value + + const handleCopy = () => { + if (isHidden || !value) return + copy(value) + } + + return ( +
+
+ + {isHidden ? REDACTED_DOTS : value} + +
+ {!isHidden && ( + + )} +
+ ) +} diff --git a/apps/sim/executor/execution/block-executor.ts b/apps/sim/executor/execution/block-executor.ts index 340b2aab01a..9a3c22e8529 100644 --- a/apps/sim/executor/execution/block-executor.ts +++ b/apps/sim/executor/execution/block-executor.ts @@ -187,7 +187,8 @@ export class BlockExecutor { } } - this.state.setBlockOutput(node.id, normalizedOutput, duration) + const { childTraceSpans: _traces, ...outputForState } = normalizedOutput + this.state.setBlockOutput(node.id, outputForState as NormalizedBlockOutput, duration) if (!isSentinel && blockLog) { const childWorkflowInstanceId = @@ -211,7 +212,7 @@ export class BlockExecutor { ) } - return normalizedOutput + return outputForState as NormalizedBlockOutput } catch (error) { return await this.handleBlockError( error, @@ -270,7 +271,6 @@ export class BlockExecutor { } if (ChildWorkflowError.isChildWorkflowError(error)) { - errorOutput.childTraceSpans = error.childTraceSpans errorOutput.childWorkflowName = error.childWorkflowName if (error.childWorkflowSnapshotId) { errorOutput.childWorkflowSnapshotId = error.childWorkflowSnapshotId @@ -287,8 +287,8 @@ export class BlockExecutor { blockLog.input = this.sanitizeInputsForLog(input) blockLog.output = filterOutputForLog(block.metadata?.id || '', errorOutput, { block }) - if (errorOutput.childTraceSpans && Array.isArray(errorOutput.childTraceSpans)) { - blockLog.childTraceSpans = errorOutput.childTraceSpans + if (ChildWorkflowError.isChildWorkflowError(error) && error.childTraceSpans.length > 0) { + blockLog.childTraceSpans = error.childTraceSpans } } diff --git a/apps/sim/hooks/queries/logs.ts b/apps/sim/hooks/queries/logs.ts index bd5b0e5e695..00b1aac4985 100644 --- a/apps/sim/hooks/queries/logs.ts +++ b/apps/sim/hooks/queries/logs.ts @@ -7,6 +7,7 @@ import { useQuery, useQueryClient, } from '@tanstack/react-query' +import { isApiClientError } from '@/lib/api/client/errors' import { requestJson } from '@/lib/api/client/request' import { cancelWorkflowExecutionContract, @@ -14,22 +15,27 @@ import { type ExecutionSnapshotData, getDashboardStatsContract, getExecutionSnapshotContract, + getLogByExecutionIdContract, getLogDetailContract, listLogsContract, type SegmentStats, - type WorkflowLogData, + type WorkflowLogDetail, + type WorkflowLogSummary, type WorkflowStats, } from '@/lib/api/contracts/logs' import { getEndDateFromTimeRange, getStartDateFromTimeRange } from '@/lib/logs/filters' import { parseQuery, queryToApiParams } from '@/lib/logs/query-parser' -import type { TimeRange, WorkflowLog } from '@/stores/logs/filters/types' +import type { TimeRange } from '@/stores/logs/filters/types' export type { DashboardStatsResponse, SegmentStats, WorkflowStats } +export type LogSortBy = 'date' | 'duration' | 'cost' | 'status' +export type LogSortOrder = 'asc' | 'desc' + export const logKeys = { all: ['logs'] as const, lists: () => [...logKeys.all, 'list'] as const, - list: (workspaceId: string | undefined, filters: Omit) => + list: (workspaceId: string | undefined, filters: LogFilters) => [...logKeys.lists(), workspaceId ?? '', filters] as const, details: () => [...logKeys.all, 'detail'] as const, detail: (logId: string | undefined) => [...logKeys.details(), logId ?? ''] as const, @@ -44,7 +50,7 @@ export const logKeys = { [...logKeys.executionSnapshots(), executionId ?? ''] as const, } -interface LogFilters { +export interface LogFilters { timeRange: TimeRange startDate?: string endDate?: string @@ -54,15 +60,14 @@ interface LogFilters { triggers: string[] searchQuery: string limit: number + sortBy: LogSortBy + sortOrder: LogSortOrder } -const toWorkflowLog = (log: WorkflowLogData): WorkflowLog => log as WorkflowLog - -/** - * Applies common filter parameters to a URLSearchParams object. - * Shared between paginated and non-paginated log fetches. - */ -function applyFilterParams(params: URLSearchParams, filters: Omit): void { +function applyFilterParams( + params: URLSearchParams, + filters: Omit +): void { if (filters.level !== 'all') { params.set('level', filters.level) } @@ -99,61 +104,53 @@ function applyFilterParams(params: URLSearchParams, filters: Omit { +): Promise { const apiData = await requestJson(listLogsContract, { - query: buildQueryParams(workspaceId, filters, page), + query: buildListQuery(workspaceId, filters, cursor), signal, }) - const hasMore = apiData.data.length === filters.limit && apiData.page < apiData.totalPages return { - logs: apiData.data.map(toWorkflowLog), - hasMore, - nextPage: hasMore ? page + 1 : undefined, + logs: apiData.data, + nextCursor: apiData.nextCursor, } } -export async function fetchLogDetail(logId: string, signal?: AbortSignal): Promise { - const { data } = await requestJson(getLogDetailContract, { - params: { id: logId }, - signal, - }) - return toWorkflowLog(data) -} - -async function fetchLogByExecutionId( +export async function fetchLogDetail( + logId: string, workspaceId: string, - executionId: string, signal?: AbortSignal -): Promise { - const apiData = await requestJson(listLogsContract, { - query: { - workspaceId, - executionId, - details: 'full', - limit: 1, - }, +): Promise { + const { data } = await requestJson(getLogDetailContract, { + params: { id: logId }, + query: { workspaceId }, signal, }) - return apiData.data?.[0] ? toWorkflowLog(apiData.data[0]) : null + return data } interface UseLogsListOptions { @@ -172,10 +169,10 @@ export function useLogsList( fetchLogsPage(workspaceId as string, filters, pageParam, signal), enabled: Boolean(workspaceId) && (options?.enabled ?? true), refetchInterval: options?.refetchInterval ?? false, - staleTime: 0, + staleTime: 30 * 1000, placeholderData: keepPreviousData, - initialPageParam: 1, - getNextPageParam: (lastPage) => lastPage.nextPage, + initialPageParam: null as string | null, + getNextPageParam: (lastPage) => lastPage.nextCursor, }) } @@ -184,55 +181,57 @@ interface UseLogDetailOptions { refetchInterval?: | number | false - | ((query: { state: { data?: WorkflowLog } }) => number | false | undefined) + | ((query: { state: { data?: WorkflowLogDetail } }) => number | false | undefined) } -export function useLogDetail(logId: string | undefined, options?: UseLogDetailOptions) { +export function useLogDetail( + logId: string | undefined, + workspaceId: string | undefined, + options?: UseLogDetailOptions +) { return useQuery({ queryKey: logKeys.detail(logId), - queryFn: ({ signal }) => fetchLogDetail(logId as string, signal), - enabled: Boolean(logId) && (options?.enabled ?? true), + queryFn: ({ signal }) => fetchLogDetail(logId as string, workspaceId as string, signal), + enabled: Boolean(logId) && Boolean(workspaceId) && (options?.enabled ?? true), refetchInterval: options?.refetchInterval ?? false, staleTime: 30 * 1000, + retry: (failureCount, err) => + !(isApiClientError(err) && err.status === 404) && failureCount < 3, }) } -/** - * Looks up a workflow log by its `executionId` (the id stored on table workflow cells). - * Returns the full log shape so the LogDetails sidebar can render directly without - * an extra detail fetch. - */ export function useLogByExecutionId( workspaceId: string | undefined, executionId: string | null | undefined ) { + const queryClient = useQueryClient() return useQuery({ queryKey: logKeys.byExecution(workspaceId, executionId ?? undefined), - queryFn: ({ signal }) => - fetchLogByExecutionId(workspaceId as string, executionId as string, signal), + queryFn: async ({ signal }) => { + const { data } = await requestJson(getLogByExecutionIdContract, { + params: { executionId: executionId as string }, + query: { workspaceId: workspaceId as string }, + signal, + }) + queryClient.setQueryData(logKeys.detail(data.id), data) + return data + }, enabled: Boolean(workspaceId) && Boolean(executionId), staleTime: 30 * 1000, }) } -/** - * Prefetches log detail data on hover for instant panel rendering on click. - */ -export function prefetchLogDetail(queryClient: QueryClient, logId: string) { +export function prefetchLogDetail(queryClient: QueryClient, logId: string, workspaceId: string) { queryClient.prefetchQuery({ queryKey: logKeys.detail(logId), - queryFn: ({ signal }) => fetchLogDetail(logId, signal), + queryFn: ({ signal }) => fetchLogDetail(logId, workspaceId, signal), staleTime: 30 * 1000, }) } -/** - * Fetches dashboard stats from the server-side aggregation endpoint. - * Uses SQL aggregation for efficient computation without arbitrary limits. - */ async function fetchDashboardStats( workspaceId: string, - filters: Omit, + filters: Omit, signal?: AbortSignal ): Promise { const params = new URLSearchParams() @@ -252,13 +251,9 @@ interface UseDashboardStatsOptions { refetchInterval?: number | false } -/** - * Hook for fetching dashboard stats using server-side aggregation. - * No arbitrary limits - uses SQL aggregation for accurate metrics. - */ export function useDashboardStats( workspaceId: string | undefined, - filters: Omit, + filters: Omit, options?: UseDashboardStatsOptions ) { return useQuery({ @@ -266,7 +261,7 @@ export function useDashboardStats( queryFn: ({ signal }) => fetchDashboardStats(workspaceId as string, filters, signal), enabled: Boolean(workspaceId) && (options?.enabled ?? true), refetchInterval: options?.refetchInterval ?? false, - staleTime: 0, + staleTime: 30 * 1000, placeholderData: keepPreviousData, }) } @@ -293,12 +288,10 @@ export function useExecutionSnapshot(executionId: string | undefined) { queryKey: logKeys.executionSnapshot(executionId), queryFn: ({ signal }) => fetchExecutionSnapshot(executionId as string, signal), enabled: Boolean(executionId), - staleTime: 5 * 60 * 1000, // 5 minutes - execution snapshots don't change + staleTime: 5 * 60 * 1000, }) } -type LogsPage = { logs: WorkflowLog[]; hasMore: boolean; nextPage: number | undefined } - export function useCancelExecution() { const queryClient = useQueryClient() return useMutation({ @@ -322,29 +315,47 @@ export function useCancelExecution() { queryKey: logKeys.lists(), }) + let affectedLogId: string | null = null queryClient.setQueriesData>({ queryKey: logKeys.lists() }, (old) => { if (!old) return old return { ...old, pages: old.pages.map((page) => ({ ...page, - logs: page.logs.map((log) => - log.executionId === executionId ? { ...log, status: 'cancelling' } : log - ), + logs: page.logs.map((log) => { + if (log.executionId !== executionId) return log + affectedLogId = log.id + return { ...log, status: 'cancelling' } + }), })), } }) - return { previousQueries } + let previousDetail: WorkflowLogDetail | undefined + if (affectedLogId) { + previousDetail = queryClient.getQueryData(logKeys.detail(affectedLogId)) + if (previousDetail) { + queryClient.setQueryData(logKeys.detail(affectedLogId), { + ...previousDetail, + status: 'cancelling', + }) + } + } + + return { previousQueries, affectedLogId, previousDetail } }, onError: (_err, _variables, context) => { for (const [queryKey, data] of context?.previousQueries ?? []) { queryClient.setQueryData(queryKey, data) } + if (context?.affectedLogId && context.previousDetail !== undefined) { + queryClient.setQueryData(logKeys.detail(context.affectedLogId), context.previousDetail) + } }, onSettled: () => { queryClient.invalidateQueries({ queryKey: logKeys.lists() }) queryClient.invalidateQueries({ queryKey: logKeys.details() }) + queryClient.invalidateQueries({ queryKey: logKeys.byExecutionAll() }) queryClient.invalidateQueries({ queryKey: logKeys.stats() }) }, }) @@ -364,9 +375,6 @@ export function useRetryExecution() { const data = await res.json().catch(() => ({})) throw new Error(data.error || 'Failed to retry execution') } - // The ReadableStream is lazy — start() only runs when read. - // Read one chunk to trigger execution, then cancel. Execution continues - // server-side after client disconnect. const reader = res.body?.getReader() if (reader) { await reader.read() @@ -377,6 +385,7 @@ export function useRetryExecution() { onSettled: () => { queryClient.invalidateQueries({ queryKey: logKeys.lists() }) queryClient.invalidateQueries({ queryKey: logKeys.details() }) + queryClient.invalidateQueries({ queryKey: logKeys.byExecutionAll() }) queryClient.invalidateQueries({ queryKey: logKeys.stats() }) }, }) diff --git a/apps/sim/hooks/use-copy-to-clipboard.ts b/apps/sim/hooks/use-copy-to-clipboard.ts new file mode 100644 index 00000000000..751a94cf76c --- /dev/null +++ b/apps/sim/hooks/use-copy-to-clipboard.ts @@ -0,0 +1,59 @@ +'use client' + +import { useCallback, useEffect, useRef, useState } from 'react' + +interface UseCopyToClipboardOptions { + /** How long the `copied` flag stays true before resetting. Defaults to 2000ms. */ + resetMs?: number +} + +interface UseCopyToClipboardReturn { + copied: boolean + copy: (text: string) => Promise +} + +/** + * Copy text to the clipboard with a transient `copied` flag for swap-icon + * feedback (e.g. Copy → Check for ~2s). + * + * Replaces the `[copied, setCopied] + setTimeout` boilerplate that's been + * duplicated across ~30 callsites. Each `copy()` call resets the timer so + * back-to-back copies don't stack timeouts; the timer is cleared on unmount. + * + * @example + * const { copied, copy } = useCopyToClipboard() + * + */ +export function useCopyToClipboard( + options: UseCopyToClipboardOptions = {} +): UseCopyToClipboardReturn { + const { resetMs = 2000 } = options + const [copied, setCopied] = useState(false) + const timerRef = useRef | null>(null) + + const copy = useCallback( + async (text: string): Promise => { + try { + await navigator.clipboard.writeText(text) + setCopied(true) + if (timerRef.current) clearTimeout(timerRef.current) + timerRef.current = setTimeout(() => setCopied(false), resetMs) + return true + } catch { + return false + } + }, + [resetMs] + ) + + useEffect( + () => () => { + if (timerRef.current) clearTimeout(timerRef.current) + }, + [] + ) + + return { copied, copy } +} diff --git a/apps/sim/hooks/use-task-events.test.ts b/apps/sim/hooks/use-task-events.test.ts index 2e68175b935..e81edbca6dd 100644 --- a/apps/sim/hooks/use-task-events.test.ts +++ b/apps/sim/hooks/use-task-events.test.ts @@ -9,14 +9,46 @@ import { handleTaskStatusEvent } from '@/hooks/use-task-events' describe('handleTaskStatusEvent', () => { const queryClient = { + getQueryData: vi.fn(), invalidateQueries: vi.fn().mockResolvedValue(undefined), - } satisfies Pick + removeQueries: vi.fn(), + } satisfies Pick beforeEach(() => { vi.clearAllMocks() + queryClient.getQueryData.mockReturnValue(undefined) }) - it('invalidates only the task list for completed task events', () => { + it('invalidates the task list and detail for completed task events', () => { + handleTaskStatusEvent( + queryClient, + 'ws-1', + JSON.stringify({ + chatId: 'chat-1', + type: 'completed', + timestamp: Date.now(), + }) + ) + + expect(queryClient.invalidateQueries).toHaveBeenCalledTimes(2) + expect(queryClient.invalidateQueries).toHaveBeenCalledWith({ + queryKey: taskKeys.list('ws-1'), + }) + expect(queryClient.invalidateQueries).toHaveBeenCalledWith({ + queryKey: taskKeys.detail('chat-1'), + }) + expect(queryClient.removeQueries).not.toHaveBeenCalled() + }) + + it('keeps completed task detail when an unkeyed completion races an active stream', () => { + queryClient.getQueryData.mockReturnValue({ + id: 'chat-1', + title: null, + messages: [{ id: 'new-stream' }, { id: 'live-assistant:new-stream' }], + activeStreamId: 'new-stream', + resources: [], + }) + handleTaskStatusEvent( queryClient, 'ws-1', @@ -31,15 +63,333 @@ describe('handleTaskStatusEvent', () => { expect(queryClient.invalidateQueries).toHaveBeenCalledWith({ queryKey: taskKeys.list('ws-1'), }) + expect(queryClient.removeQueries).not.toHaveBeenCalled() }) - it('keeps list invalidation only for non-completed task events', () => { + it('keeps completed task detail when a newer optimistic stream is active', () => { + queryClient.getQueryData.mockReturnValue({ + id: 'chat-1', + title: null, + messages: [{ id: 'old-stream' }, { id: 'new-stream' }], + activeStreamId: 'new-stream', + resources: [], + }) + + handleTaskStatusEvent( + queryClient, + 'ws-1', + JSON.stringify({ + chatId: 'chat-1', + type: 'completed', + streamId: 'old-stream', + timestamp: Date.now(), + }) + ) + + expect(queryClient.invalidateQueries).toHaveBeenCalledTimes(1) + expect(queryClient.invalidateQueries).toHaveBeenCalledWith({ + queryKey: taskKeys.list('ws-1'), + }) + expect(queryClient.removeQueries).not.toHaveBeenCalled() + }) + + it('keeps completed task detail when only a newer optimistic stream is cached', () => { + queryClient.getQueryData.mockReturnValue({ + id: 'chat-1', + title: null, + messages: [{ id: 'new-stream' }, { id: 'live-assistant:new-stream' }], + activeStreamId: 'new-stream', + resources: [], + }) + + handleTaskStatusEvent( + queryClient, + 'ws-1', + JSON.stringify({ + chatId: 'chat-1', + type: 'completed', + streamId: 'old-stream', + timestamp: Date.now(), + }) + ) + + expect(queryClient.invalidateQueries).toHaveBeenCalledTimes(1) + expect(queryClient.invalidateQueries).toHaveBeenCalledWith({ + queryKey: taskKeys.list('ws-1'), + }) + expect(queryClient.removeQueries).not.toHaveBeenCalled() + }) + + it('invalidates completed task detail when the active stream disagreement is only stale cache', () => { + queryClient.getQueryData.mockReturnValue({ + id: 'chat-1', + title: null, + messages: [{ id: 'new-stream' }, { id: 'old-stream' }], + activeStreamId: 'new-stream', + resources: [], + }) + + handleTaskStatusEvent( + queryClient, + 'ws-1', + JSON.stringify({ + chatId: 'chat-1', + type: 'completed', + streamId: 'old-stream', + timestamp: Date.now(), + }) + ) + + expect(queryClient.invalidateQueries).toHaveBeenCalledTimes(2) + expect(queryClient.invalidateQueries).toHaveBeenCalledWith({ + queryKey: taskKeys.list('ws-1'), + }) + expect(queryClient.invalidateQueries).toHaveBeenCalledWith({ + queryKey: taskKeys.detail('chat-1'), + }) + expect(queryClient.removeQueries).not.toHaveBeenCalled() + }) + + it('invalidates completed task detail when a missing stream may be newer server state', () => { + queryClient.getQueryData.mockReturnValue({ + id: 'chat-1', + title: null, + messages: [{ id: 'old-stream' }], + activeStreamId: 'old-stream', + resources: [], + }) + + handleTaskStatusEvent( + queryClient, + 'ws-1', + JSON.stringify({ + chatId: 'chat-1', + type: 'completed', + streamId: 'new-stream', + timestamp: Date.now(), + }) + ) + + expect(queryClient.invalidateQueries).toHaveBeenCalledTimes(2) + expect(queryClient.invalidateQueries).toHaveBeenCalledWith({ + queryKey: taskKeys.list('ws-1'), + }) + expect(queryClient.invalidateQueries).toHaveBeenCalledWith({ + queryKey: taskKeys.detail('chat-1'), + }) + expect(queryClient.removeQueries).not.toHaveBeenCalled() + }) + + it('invalidates completed task detail when the completed stream is active', () => { + queryClient.getQueryData.mockReturnValue({ + id: 'chat-1', + title: null, + messages: [], + activeStreamId: 'stream-1', + resources: [], + }) + + handleTaskStatusEvent( + queryClient, + 'ws-1', + JSON.stringify({ + chatId: 'chat-1', + type: 'completed', + streamId: 'stream-1', + timestamp: Date.now(), + }) + ) + + expect(queryClient.invalidateQueries).toHaveBeenCalledTimes(2) + expect(queryClient.invalidateQueries).toHaveBeenCalledWith({ + queryKey: taskKeys.list('ws-1'), + }) + expect(queryClient.invalidateQueries).toHaveBeenCalledWith({ + queryKey: taskKeys.detail('chat-1'), + }) + expect(queryClient.removeQueries).not.toHaveBeenCalled() + }) + + it('invalidates the task list and detail for metadata-changing task events', () => { + handleTaskStatusEvent( + queryClient, + 'ws-1', + JSON.stringify({ + chatId: 'chat-1', + type: 'renamed', + timestamp: Date.now(), + }) + ) + + expect(queryClient.invalidateQueries).toHaveBeenCalledTimes(2) + expect(queryClient.invalidateQueries).toHaveBeenCalledWith({ + queryKey: taskKeys.list('ws-1'), + }) + expect(queryClient.invalidateQueries).toHaveBeenCalledWith({ + queryKey: taskKeys.detail('chat-1'), + }) + expect(queryClient.removeQueries).not.toHaveBeenCalled() + }) + + it('invalidates the task list and removes detail cache for deleted task events', () => { + handleTaskStatusEvent( + queryClient, + 'ws-1', + JSON.stringify({ + chatId: 'chat-1', + type: 'deleted', + timestamp: Date.now(), + }) + ) + + expect(queryClient.invalidateQueries).toHaveBeenCalledTimes(1) + expect(queryClient.invalidateQueries).toHaveBeenCalledWith({ + queryKey: taskKeys.list('ws-1'), + }) + expect(queryClient.removeQueries).toHaveBeenCalledTimes(1) + expect(queryClient.removeQueries).toHaveBeenCalledWith({ + queryKey: taskKeys.detail('chat-1'), + }) + }) + + it('invalidates the task list and detail for started task events', () => { + handleTaskStatusEvent( + queryClient, + 'ws-1', + JSON.stringify({ + chatId: 'chat-1', + type: 'started', + timestamp: Date.now(), + }) + ) + + expect(queryClient.invalidateQueries).toHaveBeenCalledTimes(2) + expect(queryClient.invalidateQueries).toHaveBeenCalledWith({ + queryKey: taskKeys.list('ws-1'), + }) + expect(queryClient.invalidateQueries).toHaveBeenCalledWith({ + queryKey: taskKeys.detail('chat-1'), + }) + expect(queryClient.removeQueries).not.toHaveBeenCalled() + }) + + it('keeps started task detail when an unkeyed started event races an active stream', () => { + queryClient.getQueryData.mockReturnValue({ + id: 'chat-1', + title: null, + messages: [{ id: 'new-stream' }, { id: 'live-assistant:new-stream' }], + activeStreamId: 'new-stream', + resources: [], + }) + + handleTaskStatusEvent( + queryClient, + 'ws-1', + JSON.stringify({ + chatId: 'chat-1', + type: 'started', + timestamp: Date.now(), + }) + ) + + expect(queryClient.invalidateQueries).toHaveBeenCalledTimes(1) + expect(queryClient.invalidateQueries).toHaveBeenCalledWith({ + queryKey: taskKeys.list('ws-1'), + }) + expect(queryClient.removeQueries).not.toHaveBeenCalled() + }) + + it('keeps started task detail when the started stream is already active', () => { + queryClient.getQueryData.mockReturnValue({ + id: 'chat-1', + title: null, + messages: [{ id: 'stream-1' }], + activeStreamId: 'stream-1', + resources: [], + }) + + handleTaskStatusEvent( + queryClient, + 'ws-1', + JSON.stringify({ + chatId: 'chat-1', + type: 'started', + streamId: 'stream-1', + timestamp: Date.now(), + }) + ) + + expect(queryClient.invalidateQueries).toHaveBeenCalledTimes(1) + expect(queryClient.invalidateQueries).toHaveBeenCalledWith({ + queryKey: taskKeys.list('ws-1'), + }) + expect(queryClient.removeQueries).not.toHaveBeenCalled() + }) + + it('keeps started task detail when a stale started stream is older than the active stream', () => { + queryClient.getQueryData.mockReturnValue({ + id: 'chat-1', + title: null, + messages: [{ id: 'old-stream' }, { id: 'new-stream' }], + activeStreamId: 'new-stream', + resources: [], + }) + + handleTaskStatusEvent( + queryClient, + 'ws-1', + JSON.stringify({ + chatId: 'chat-1', + type: 'started', + streamId: 'old-stream', + timestamp: Date.now(), + }) + ) + + expect(queryClient.invalidateQueries).toHaveBeenCalledTimes(1) + expect(queryClient.invalidateQueries).toHaveBeenCalledWith({ + queryKey: taskKeys.list('ws-1'), + }) + expect(queryClient.removeQueries).not.toHaveBeenCalled() + }) + + it('invalidates started task detail when a missing stream may be newer server state', () => { + queryClient.getQueryData.mockReturnValue({ + id: 'chat-1', + title: null, + messages: [{ id: 'old-stream' }], + activeStreamId: 'old-stream', + resources: [], + }) + handleTaskStatusEvent( queryClient, 'ws-1', JSON.stringify({ chatId: 'chat-1', type: 'started', + streamId: 'new-stream', + timestamp: Date.now(), + }) + ) + + expect(queryClient.invalidateQueries).toHaveBeenCalledTimes(2) + expect(queryClient.invalidateQueries).toHaveBeenCalledWith({ + queryKey: taskKeys.list('ws-1'), + }) + expect(queryClient.invalidateQueries).toHaveBeenCalledWith({ + queryKey: taskKeys.detail('chat-1'), + }) + expect(queryClient.removeQueries).not.toHaveBeenCalled() + }) + + it('keeps list invalidation only for unknown task event types', () => { + handleTaskStatusEvent( + queryClient, + 'ws-1', + JSON.stringify({ + chatId: 'chat-1', + type: 'archived', timestamp: Date.now(), }) ) @@ -48,11 +398,13 @@ describe('handleTaskStatusEvent', () => { expect(queryClient.invalidateQueries).toHaveBeenCalledWith({ queryKey: taskKeys.list('ws-1'), }) + expect(queryClient.removeQueries).not.toHaveBeenCalled() }) it('does not invalidate when task event payload is invalid', () => { handleTaskStatusEvent(queryClient, 'ws-1', '{') expect(queryClient.invalidateQueries).not.toHaveBeenCalled() + expect(queryClient.removeQueries).not.toHaveBeenCalled() }) }) diff --git a/apps/sim/hooks/use-task-events.ts b/apps/sim/hooks/use-task-events.ts index 0d6d4f7d0e9..b9a5216dad4 100644 --- a/apps/sim/hooks/use-task-events.ts +++ b/apps/sim/hooks/use-task-events.ts @@ -2,13 +2,68 @@ import { useEffect } from 'react' import { createLogger } from '@sim/logger' import type { QueryClient } from '@tanstack/react-query' import { useQueryClient } from '@tanstack/react-query' -import { taskKeys } from '@/hooks/queries/tasks' +import { getLiveAssistantMessageId } from '@/lib/copilot/chat/effective-transcript' +import { type TaskChatHistory, taskKeys } from '@/hooks/queries/tasks' const logger = createLogger('TaskEvents') +const TASK_STATUS_TYPES = ['started', 'completed', 'created', 'deleted', 'renamed'] as const +type TaskStatusEventType = (typeof TASK_STATUS_TYPES)[number] +const TASK_STATUS_TYPE_SET = new Set(TASK_STATUS_TYPES) + interface TaskStatusEventPayload { chatId?: string - type?: 'started' | 'completed' | 'created' | 'deleted' | 'renamed' + type?: TaskStatusEventType + streamId?: string +} + +const DETAIL_INVALIDATING_TASK_STATUS_TYPES = new Set([ + 'started', + 'completed', + 'renamed', +]) + +function isTaskStatusEventType(value: unknown): value is TaskStatusEventType { + return typeof value === 'string' && TASK_STATUS_TYPE_SET.has(value) +} + +function isLocalOptimisticActiveStream(current: TaskChatHistory | undefined) { + if (!current?.activeStreamId) return false + const liveAssistantId = getLiveAssistantMessageId(current.activeStreamId) + return current.messages.some((message) => message.id === liveAssistantId) +} + +/** + * Returns true when the cached active stream is known to be later in the + * chronological transcript than the stream that emitted this status event. + * If either stream is absent from the transcript, callers should refetch + * instead of inferring order from incomplete cache state. + */ +function hasNewerKnownActiveStream(current: TaskChatHistory | undefined, streamId: string) { + if (!current?.activeStreamId || current.activeStreamId === streamId) return false + + const activeIndex = current.messages.findIndex((message) => message.id === current.activeStreamId) + const eventStreamIndex = current.messages.findIndex((message) => message.id === streamId) + if (activeIndex === -1) return false + if (eventStreamIndex === -1) return false + return activeIndex > eventStreamIndex +} + +function shouldSkipDetailInvalidationForStreamEvent( + current: TaskChatHistory | undefined, + payload: TaskStatusEventPayload +) { + if (payload.type !== 'started' && payload.type !== 'completed') return false + if (!current?.activeStreamId) return false + if (!payload.streamId) return isLocalOptimisticActiveStream(current) + if (payload.type === 'started' && current.activeStreamId === payload.streamId) return true + if (current.activeStreamId === payload.streamId) return false + if (hasNewerKnownActiveStream(current, payload.streamId)) return true + return ( + payload.type === 'completed' && + isLocalOptimisticActiveStream(current) && + !current.messages.some((message) => message.id === payload.streamId) + ) } function parseTaskStatusEventPayload(data: unknown): TaskStatusEventPayload | null { @@ -30,14 +85,13 @@ function parseTaskStatusEventPayload(data: unknown): TaskStatusEventPayload | nu return { ...(typeof record.chatId === 'string' ? { chatId: record.chatId } : {}), - ...(typeof record.type === 'string' - ? { type: record.type as TaskStatusEventPayload['type'] } - : {}), + ...(isTaskStatusEventType(record.type) ? { type: record.type } : {}), + ...(typeof record.streamId === 'string' ? { streamId: record.streamId } : {}), } } export function handleTaskStatusEvent( - queryClient: Pick, + queryClient: Pick, workspaceId: string, data: unknown ): void { @@ -48,6 +102,20 @@ export function handleTaskStatusEvent( } queryClient.invalidateQueries({ queryKey: taskKeys.list(workspaceId) }) + if (!payload.chatId) return + if (payload.type === 'deleted') { + queryClient.removeQueries({ queryKey: taskKeys.detail(payload.chatId) }) + return + } + if (payload.type === 'started' || payload.type === 'completed') { + const current = queryClient.getQueryData(taskKeys.detail(payload.chatId)) + if (shouldSkipDetailInvalidationForStreamEvent(current, payload)) { + return + } + } + if (payload.type && DETAIL_INVALIDATING_TASK_STATUS_TYPES.has(payload.type)) { + queryClient.invalidateQueries({ queryKey: taskKeys.detail(payload.chatId) }) + } } /** diff --git a/apps/sim/lib/api/contracts/knowledge/search.ts b/apps/sim/lib/api/contracts/knowledge/search.ts index 291257e7b16..ea1dff75ce0 100644 --- a/apps/sim/lib/api/contracts/knowledge/search.ts +++ b/apps/sim/lib/api/contracts/knowledge/search.ts @@ -36,6 +36,24 @@ export const knowledgeSearchBodySchema = z .transform((val) => val || undefined), rerankerEnabled: z.boolean().optional().default(false), rerankerModel: rerankerModelSchema.optional().default(DEFAULT_RERANKER_MODEL), + /** + * Number of vector results sent to Cohere as the documents array for reranking. Capped at 100 + * so each rerank call stays within a single Cohere search unit (1 query × ≤100 docs); see + * `RERANK_MODEL_PRICING` in `providers/models.ts`. + */ + rerankerInputCount: z + .number() + .int('rerankerInputCount must be an integer') + .min(1, 'rerankerInputCount must be at least 1') + .max(100, 'rerankerInputCount cannot exceed 100') + .optional() + .nullable() + .transform((val) => val ?? undefined), + rerankerApiKey: z + .string() + .optional() + .nullable() + .transform((val) => val || undefined), }) .refine( (data) => { diff --git a/apps/sim/lib/api/contracts/logs.ts b/apps/sim/lib/api/contracts/logs.ts index b0298e349ec..6e94720f91a 100644 --- a/apps/sim/lib/api/contracts/logs.ts +++ b/apps/sim/lib/api/contracts/logs.ts @@ -34,10 +34,18 @@ const logFilterQuerySchema = z.object({ durationValue: z.coerce.number().optional(), }) +export const logSortBySchema = z.enum(['date', 'duration', 'cost', 'status']).default('date') +export const logSortOrderSchema = z.enum(['asc', 'desc']).default('desc') + export const listLogsQuerySchema = logFilterQuerySchema.extend({ - details: z.enum(['basic', 'full']).optional().default('basic'), - limit: z.coerce.number().optional().default(100), - offset: z.coerce.number().optional().default(0), + cursor: z.string().optional(), + limit: z.coerce.number().int().min(1).max(200).optional().default(100), + sortBy: logSortBySchema, + sortOrder: logSortOrderSchema, +}) + +export const logDetailQuerySchema = z.object({ + workspaceId: z.string().min(1), }) export const statsQueryParamsSchema = logFilterQuerySchema.extend({ @@ -58,55 +66,196 @@ const workflowSummarySchema = z }) .partial() -const fileSchema = z +const fileSchema = z.object({ + id: z.string(), + name: z.string(), + size: z.number(), + type: z.string(), + url: z.string(), + key: z.string(), + uploadedAt: z.string(), + expiresAt: z.string(), + storageProvider: z.enum(['s3', 'blob', 'local']).optional(), + bucketName: z.string().optional(), +}) + +const tokenBreakdownSchema = z .object({ - id: z.string(), - name: z.string(), - size: z.number(), - type: z.string(), - url: z.string(), - key: z.string(), - uploadedAt: z.string(), - expiresAt: z.string(), - storageProvider: z.enum(['s3', 'blob', 'local']).optional(), - bucketName: z.string().optional(), + total: z.number().optional(), + input: z.number().optional(), + output: z.number().optional(), + prompt: z.number().optional(), + completion: z.number().optional(), + }) + .partial() + +const modelCostSchema = z + .object({ + input: z.number().optional(), + output: z.number().optional(), + total: z.number().optional(), + tokens: tokenBreakdownSchema.optional(), + }) + .partial() + +const costSummarySchema = z + .object({ + total: z.number().optional(), + input: z.number().optional(), + output: z.number().optional(), + tokens: tokenBreakdownSchema.optional(), + models: z.record(z.string(), modelCostSchema).optional(), + pricing: z + .object({ + input: z.number(), + output: z.number(), + cachedInput: z.number().optional(), + updatedAt: z.string(), + }) + .optional(), + }) + .partial() + +const pauseSummarySchema = z.object({ + status: z.string().nullable(), + total: z.number(), + resumed: z.number(), +}) + +const blockExecutionSchema = z.object({ + id: z.string(), + blockId: z.string(), + blockName: z.string(), + blockType: z.string(), + startedAt: z.string(), + endedAt: z.string(), + durationMs: z.number(), + status: z.enum(['success', 'error', 'skipped']), + errorMessage: z.string().optional(), + errorStackTrace: z.string().optional(), + inputData: z.unknown(), + outputData: z.unknown(), + cost: costSummarySchema.optional(), + metadata: z.record(z.string(), z.unknown()).optional(), +}) + +const toolCallSchema = z + .object({ + id: z.string().optional(), + name: z.string().optional(), + arguments: z.unknown().optional(), + result: z.unknown().optional(), + error: z.string().optional(), + startTime: z.string().optional(), + endTime: z.string().optional(), + duration: z.number().optional(), }) .passthrough() -export const workflowLogSchema = z +type TraceSpan = { + id: string + name: string + type: string + duration?: number + durationMs?: number + startTime?: string + endTime?: string + status?: string + blockId?: string + input?: unknown + output?: unknown + tokens?: number | { total?: number; input?: number; output?: number } + relativeStartMs?: number + toolCalls?: Array> + children?: TraceSpan[] +} + +const traceSpanSchema: z.ZodType = z.lazy(() => + z + .object({ + id: z.string(), + name: z.string(), + type: z.string(), + duration: z.number().optional(), + durationMs: z.number().optional(), + startTime: z.string().optional(), + endTime: z.string().optional(), + status: z.string().optional(), + blockId: z.string().optional(), + input: z.unknown().optional(), + output: z.unknown().optional(), + tokens: z + .union([ + z.number(), + z + .object({ + total: z.number().optional(), + input: z.number().optional(), + output: z.number().optional(), + }) + .partial(), + ]) + .optional(), + relativeStartMs: z.number().optional(), + toolCalls: z.array(toolCallSchema).optional(), + children: z.array(traceSpanSchema).optional(), + }) + .passthrough() +) + +const executionDataDetailSchema = z .object({ - id: z.string(), - workflowId: z.string().nullable(), - executionId: z.string().nullable().optional(), - deploymentVersionId: z.string().nullable().optional(), - deploymentVersion: z.number().nullable().optional(), - deploymentVersionName: z.string().nullable().optional(), - level: z.string(), - status: z.string().nullable().optional(), - duration: z.string().nullable(), - trigger: z.string().nullable(), - createdAt: z.string(), - workflow: workflowSummarySchema.nullable().optional(), - jobTitle: z.string().nullable().optional(), - files: z.array(fileSchema).optional(), - cost: z.unknown().optional(), - hasPendingPause: z.boolean().nullable().optional(), - pauseSummary: z.unknown().optional(), - executionData: z.unknown().optional(), + totalDuration: z.number().nullable().optional(), + enhanced: z.literal(true).optional(), + traceSpans: z.array(traceSpanSchema).optional(), + blockExecutions: z.array(blockExecutionSchema).optional(), + finalOutput: z.unknown().optional(), + workflowInput: z.unknown().optional(), + blockInput: z.record(z.string(), z.unknown()).optional(), + trigger: z.unknown().optional(), }) .passthrough() -export type WorkflowLogData = z.output +export const workflowLogSummarySchema = z.object({ + id: z.string(), + workflowId: z.string().nullable(), + executionId: z.string().nullable(), + deploymentVersionId: z.string().nullable(), + deploymentVersion: z.number().nullable(), + deploymentVersionName: z.string().nullable(), + level: z.string(), + status: z.string().nullable(), + duration: z.string().nullable(), + trigger: z.string().nullable(), + createdAt: z.string(), + workflow: workflowSummarySchema.nullable(), + jobTitle: z.string().nullable(), + cost: costSummarySchema.nullable(), + pauseSummary: pauseSummarySchema, + hasPendingPause: z.boolean(), +}) -export const logsResponseSchema = z.object({ - data: z.array(workflowLogSchema), - total: z.number(), - page: z.number(), - pageSize: z.number(), - totalPages: z.number(), +export const workflowLogDetailSchema = workflowLogSummarySchema.extend({ + executionData: executionDataDetailSchema, + files: z.array(fileSchema).nullable(), }) -export type LogsResponse = z.output +export type WorkflowLogSummary = z.output +export type WorkflowLogDetail = z.output + +/** + * A row that may be either a list-view summary or a fully loaded detail. Used by + * UI surfaces that render the same log before and after its detail query resolves. + */ +export type WorkflowLogRow = WorkflowLogSummary & + Partial> + +export const listLogsResponseSchema = z.object({ + data: z.array(workflowLogSummarySchema), + nextCursor: z.string().nullable(), +}) + +export type ListLogsResponse = z.output export const segmentStatsSchema = z.object({ timestamp: z.string(), @@ -179,7 +328,7 @@ export const listLogsContract = defineRouteContract({ query: listLogsQuerySchema, response: { mode: 'json', - schema: logsResponseSchema, + schema: listLogsResponseSchema, }, }) @@ -187,10 +336,24 @@ export const getLogDetailContract = defineRouteContract({ method: 'GET', path: '/api/logs/[id]', params: logIdParamsSchema, + query: logDetailQuerySchema, + response: { + mode: 'json', + schema: z.object({ + data: workflowLogDetailSchema, + }), + }, +}) + +export const getLogByExecutionIdContract = defineRouteContract({ + method: 'GET', + path: '/api/logs/by-execution/[executionId]', + params: executionIdParamsSchema, + query: logDetailQuerySchema, response: { mode: 'json', schema: z.object({ - data: workflowLogSchema, + data: workflowLogDetailSchema, }), }, }) diff --git a/apps/sim/lib/auth/anonymous.ts b/apps/sim/lib/auth/anonymous.ts index 839e65487ec..7504ee7fd62 100644 --- a/apps/sim/lib/auth/anonymous.ts +++ b/apps/sim/lib/auth/anonymous.ts @@ -103,7 +103,3 @@ export function createAnonymousSession(): AnonymousSession { }, } } - -export function createAnonymousGetSessionResponse(): { data: AnonymousSession } { - return { data: createAnonymousSession() } -} diff --git a/apps/sim/lib/copilot/chat/attachment-preview.ts b/apps/sim/lib/copilot/chat/attachment-preview.ts new file mode 100644 index 00000000000..f4a65aa0ce0 --- /dev/null +++ b/apps/sim/lib/copilot/chat/attachment-preview.ts @@ -0,0 +1,9 @@ +export function getMothershipAttachmentPreviewUrl(file: { + key: string + media_type: string +}): string | undefined { + if (!file.media_type.startsWith('image/') && !file.media_type.startsWith('video/')) { + return undefined + } + return `/api/files/serve/${encodeURIComponent(file.key)}?context=mothership` +} diff --git a/apps/sim/lib/copilot/chat/display-message.ts b/apps/sim/lib/copilot/chat/display-message.ts index a254e5e3e94..51622070009 100644 --- a/apps/sim/lib/copilot/chat/display-message.ts +++ b/apps/sim/lib/copilot/chat/display-message.ts @@ -15,6 +15,7 @@ import { type ToolCallInfo, ToolCallStatus, } from '@/app/workspace/[workspaceId]/home/types' +import { getMothershipAttachmentPreviewUrl } from './attachment-preview' import type { PersistedContentBlock, PersistedMessage } from './persisted-message' import { withBlockTiming } from './persisted-message' @@ -91,9 +92,7 @@ function toDisplayAttachment(f: PersistedMessage['fileAttachments']): ChatMessag filename: a.filename, media_type: a.media_type, size: a.size, - previewUrl: a.media_type.startsWith('image/') - ? `/api/files/serve/${encodeURIComponent(a.key)}?context=mothership` - : undefined, + previewUrl: getMothershipAttachmentPreviewUrl(a), })) } diff --git a/apps/sim/lib/copilot/chat/persisted-message.test.ts b/apps/sim/lib/copilot/chat/persisted-message.test.ts index 377a8c2b0b5..de701394235 100644 --- a/apps/sim/lib/copilot/chat/persisted-message.test.ts +++ b/apps/sim/lib/copilot/chat/persisted-message.test.ts @@ -75,6 +75,102 @@ describe('persisted-message', () => { expect(persisted.requestId).toBe('sim-request-1') }) + it('redacts sim_key credential tags so persisted assistant messages never re-expose the key', () => { + const live = `Here is your key: ${JSON.stringify({ value: 'sk-sim-secret-123', type: 'sim_key' })} save it.` + const result: OrchestratorResult = { + success: true, + content: live, + requestId: 'req-1', + contentBlocks: [{ type: 'text', content: live }], + toolCalls: [], + } + + const persisted = buildPersistedAssistantMessage(result) + + expect(persisted.content).not.toContain('sk-sim-secret-123') + expect(persisted.content).toContain('"redacted":true') + const textBlock = persisted.contentBlocks?.find((b) => b.type === 'text') + expect(textBlock?.content).not.toContain('sk-sim-secret-123') + expect(textBlock?.content).toContain('"redacted":true') + }) + + it('redacts sim_key credential tags split across streamed text chunks', () => { + const chunks = [ + 'Here\'s your key:\n\n{"value": "sk-', + 'sim-secret', + '-12345', + '", "type":', + ' "sim_key"}', + '\n\nDone.', + ] + const result: OrchestratorResult = { + success: true, + content: chunks.join(''), + requestId: 'req-1', + contentBlocks: chunks.map((c) => ({ type: 'text', content: c })), + toolCalls: [], + } + + const persisted = buildPersistedAssistantMessage(result) + + expect(persisted.content).not.toContain('sk-sim-secret-12345') + expect(persisted.contentBlocks).toBeDefined() + const joined = (persisted.contentBlocks ?? []).map((b) => b.content ?? '').join('') + expect(joined).not.toContain('sk-sim-secret-12345') + expect(joined).toContain('"redacted":true') + }) + + it('redacts the api key from a persisted generate_api_key tool result output', () => { + const result: OrchestratorResult = { + success: true, + content: '', + requestId: 'req-1', + contentBlocks: [ + { + type: 'tool_call', + toolCall: { + id: 'tool-1', + name: 'generate_api_key', + status: 'success', + params: { name: 'workspace-key' }, + result: { + success: true, + output: { + id: 'k1', + name: 'workspace-key', + key: 'sk-sim-tool-output-secret', + }, + }, + }, + }, + ], + toolCalls: [], + } + + const persisted = buildPersistedAssistantMessage(result) + const toolBlock = persisted.contentBlocks?.find((b) => b.toolCall?.name === 'generate_api_key') + const output = toolBlock?.toolCall?.result?.output as Record | undefined + + expect(output?.key).toBe('[REDACTED]') + expect(output?.redacted).toBe(true) + expect(JSON.stringify(persisted)).not.toContain('sk-sim-tool-output-secret') + }) + + it('leaves non-sim_key credential tags untouched', () => { + const live = `${JSON.stringify({ value: 'https://oauth.example/connect', type: 'link', provider: 'slack' })}` + const result: OrchestratorResult = { + success: true, + content: live, + requestId: 'req-1', + contentBlocks: [{ type: 'text', content: live }], + toolCalls: [], + } + + const persisted = buildPersistedAssistantMessage(result) + + expect(persisted.content).toContain('https://oauth.example/connect') + }) + it('normalizes legacy tool_call and top-level toolCalls shapes', () => { const normalized = normalizeMessage({ id: 'msg-1', diff --git a/apps/sim/lib/copilot/chat/persisted-message.ts b/apps/sim/lib/copilot/chat/persisted-message.ts index 3c34fb4901f..e249ecef43f 100644 --- a/apps/sim/lib/copilot/chat/persisted-message.ts +++ b/apps/sim/lib/copilot/chat/persisted-message.ts @@ -1,4 +1,9 @@ import { generateId } from '@sim/utils/id' +import { + mergeAndRedactPersistedBlocks, + redactSensitiveContent, + redactToolCallResult, +} from '@/lib/copilot/chat/sim-key-redaction' import { MothershipStreamV1CompletionStatus, MothershipStreamV1EventType, @@ -164,11 +169,13 @@ function mapContentBlockBody(block: ContentBlock): PersistedContentBlock { state === 'pending' || state === 'executing' + const redactedResult = redactToolCallResult(block.toolCall.name, block.toolCall.result) + const toolCall: PersistedToolCall = { id: block.toolCall.id, name: block.toolCall.name, state, - ...(isSubagentTool && isNonTerminal ? {} : { result: block.toolCall.result }), + ...(isSubagentTool && isNonTerminal ? {} : { result: redactedResult }), ...(isSubagentTool && isNonTerminal ? {} : block.toolCall.params @@ -202,7 +209,7 @@ export function buildPersistedAssistantMessage( const message: PersistedMessage = { id: generateId(), role: 'assistant', - content: result.content, + content: redactSensitiveContent(result.content), timestamp: new Date().toISOString(), } @@ -211,7 +218,7 @@ export function buildPersistedAssistantMessage( } if (result.contentBlocks.length > 0) { - message.contentBlocks = result.contentBlocks.map(mapContentBlock) + message.contentBlocks = mergeAndRedactPersistedBlocks(result.contentBlocks.map(mapContentBlock)) } return message diff --git a/apps/sim/lib/copilot/chat/post.ts b/apps/sim/lib/copilot/chat/post.ts index 21d94e56bb5..a745f209c9e 100644 --- a/apps/sim/lib/copilot/chat/post.ts +++ b/apps/sim/lib/copilot/chat/post.ts @@ -329,6 +329,7 @@ async function persistUserMessage(params: { workspaceId, chatId, type: 'started', + streamId: userMessageId, }) } @@ -430,6 +431,7 @@ function buildOnComplete(params: { workspaceId, chatId, type: 'completed', + streamId: userMessageId, }) } } catch (error) { @@ -461,6 +463,7 @@ function buildOnError(params: { workspaceId, chatId, type: 'completed', + streamId: userMessageId, }) } } catch (error) { diff --git a/apps/sim/lib/copilot/chat/sim-key-redaction.test.ts b/apps/sim/lib/copilot/chat/sim-key-redaction.test.ts new file mode 100644 index 00000000000..70fe637e6d2 --- /dev/null +++ b/apps/sim/lib/copilot/chat/sim-key-redaction.test.ts @@ -0,0 +1,154 @@ +/** + * @vitest-environment node + */ + +import { describe, expect, it } from 'vitest' +import type { ChatMessage } from '@/app/workspace/[workspaceId]/home/types' +import { + captureRevealedSimKeys, + extractRevealedSimKeys, + restoreRevealedSimKeysForMessage, +} from './sim-key-redaction' + +const credential = (value: string) => + `${JSON.stringify({ value, type: 'sim_key' })}` +const redacted = `${JSON.stringify({ type: 'sim_key', redacted: true })}` + +describe('sim-key-redaction', () => { + describe('extractRevealedSimKeys', () => { + it('returns sim_key values in document order', () => { + const text = `first ${credential('sk-sim-A')} mid ${credential('sk-sim-B')}` + expect(extractRevealedSimKeys(text)).toEqual(['sk-sim-A', 'sk-sim-B']) + }) + + it('skips redacted entries and non-sim_key tags', () => { + const link = `${JSON.stringify({ value: 'https://x', type: 'link', provider: 'slack' })}` + const text = `${link} ${credential('sk-sim-A')} ${redacted}` + expect(extractRevealedSimKeys(text)).toEqual(['sk-sim-A']) + }) + }) + + describe('captureRevealedSimKeys', () => { + it('records new keys under each provided key', () => { + const cache = new Map() + captureRevealedSimKeys(cache, ['msg-1', 'req-1'], credential('sk-sim-A')) + expect(cache.get('msg-1')).toEqual(['sk-sim-A']) + expect(cache.get('req-1')).toEqual(['sk-sim-A']) + }) + + it('extends but never shrinks the captured list across calls', () => { + const cache = new Map() + captureRevealedSimKeys( + cache, + ['msg-1'], + `${credential('sk-sim-A')} ${credential('sk-sim-B')}` + ) + captureRevealedSimKeys(cache, ['msg-1'], credential('sk-sim-A')) + expect(cache.get('msg-1')).toEqual(['sk-sim-A', 'sk-sim-B']) + }) + + it('skips undefined keys without throwing', () => { + const cache = new Map() + captureRevealedSimKeys(cache, ['msg-1', undefined], credential('sk-sim-A')) + expect(cache.get('msg-1')).toEqual(['sk-sim-A']) + expect(cache.size).toBe(1) + }) + + it('ignores content with no credential tag', () => { + const cache = new Map() + captureRevealedSimKeys(cache, ['msg-1'], 'plain assistant text') + expect(cache.has('msg-1')).toBe(false) + }) + }) + + describe('restoreRevealedSimKeysForMessage', () => { + it('substitutes the live key back into a redacted message', () => { + const cache = new Map([['msg-1', ['sk-sim-A']]]) + const msg: ChatMessage = { + id: 'msg-1', + role: 'assistant', + content: `Here is your key: ${redacted} save it.`, + contentBlocks: [{ type: 'text', content: `Here is your key: ${redacted} save it.` }], + } + const restored = restoreRevealedSimKeysForMessage(msg, cache) + expect(restored.content).toContain('"sk-sim-A"') + expect(restored.content).not.toContain('"redacted":true') + expect(restored.contentBlocks?.[0].content).toContain('"sk-sim-A"') + }) + + it('substitutes multiple keys in stream order', () => { + const cache = new Map([['msg-1', ['sk-sim-A', 'sk-sim-B']]]) + const msg: ChatMessage = { + id: 'msg-1', + role: 'assistant', + content: `first ${redacted} second ${redacted}`, + } + const restored = restoreRevealedSimKeysForMessage(msg, cache) + expect(restored.content).toBe( + `first ${credential('sk-sim-A')} second ${credential('sk-sim-B')}` + ) + }) + + it('leaves a redacted tag in place if no live value is captured for that slot', () => { + const cache = new Map([['msg-1', ['sk-sim-A']]]) + const msg: ChatMessage = { + id: 'msg-1', + role: 'assistant', + content: `first ${redacted} second ${redacted}`, + } + const restored = restoreRevealedSimKeysForMessage(msg, cache) + expect(restored.content).toBe(`first ${credential('sk-sim-A')} second ${redacted}`) + }) + + it('returns the same message reference when nothing to restore', () => { + const cache = new Map() + const msg: ChatMessage = { + id: 'msg-1', + role: 'assistant', + content: 'no credentials here', + } + expect(restoreRevealedSimKeysForMessage(msg, cache)).toBe(msg) + }) + + it('does nothing for user messages', () => { + const cache = new Map([['msg-1', ['sk-sim-A']]]) + const msg: ChatMessage = { + id: 'msg-1', + role: 'user', + content: redacted, + } + expect(restoreRevealedSimKeysForMessage(msg, cache)).toBe(msg) + }) + + it('threads the cursor across separate content blocks so each block gets its matching key', () => { + const cache = new Map([['msg-1', ['sk-sim-A', 'sk-sim-B']]]) + const msg: ChatMessage = { + id: 'msg-1', + role: 'assistant', + content: `first ${redacted} (tool ran) second ${redacted}`, + contentBlocks: [ + { type: 'text', content: `first ${redacted}` }, + { type: 'tool_call', content: '' }, + { type: 'text', content: `second ${redacted}` }, + ], + } + const restored = restoreRevealedSimKeysForMessage(msg, cache) + expect(restored.contentBlocks?.[0].content).toContain('"sk-sim-A"') + expect(restored.contentBlocks?.[0].content).not.toContain('"sk-sim-B"') + expect(restored.contentBlocks?.[2].content).toContain('"sk-sim-B"') + expect(restored.contentBlocks?.[2].content).not.toContain('"sk-sim-A"') + }) + + it('isolates revealed values by message id (multiple keys across messages)', () => { + const cache = new Map([ + ['msg-1', ['sk-sim-A']], + ['msg-2', ['sk-sim-B']], + ]) + const msg1: ChatMessage = { id: 'msg-1', role: 'assistant', content: redacted } + const msg2: ChatMessage = { id: 'msg-2', role: 'assistant', content: redacted } + expect(restoreRevealedSimKeysForMessage(msg1, cache).content).toContain('sk-sim-A') + expect(restoreRevealedSimKeysForMessage(msg2, cache).content).toContain('sk-sim-B') + expect(restoreRevealedSimKeysForMessage(msg1, cache).content).not.toContain('sk-sim-B') + }) + }) +}) diff --git a/apps/sim/lib/copilot/chat/sim-key-redaction.ts b/apps/sim/lib/copilot/chat/sim-key-redaction.ts new file mode 100644 index 00000000000..d5aba0f302d --- /dev/null +++ b/apps/sim/lib/copilot/chat/sim-key-redaction.ts @@ -0,0 +1,263 @@ +import type { PersistedContentBlock } from '@/lib/copilot/chat/persisted-message' +import { + MothershipStreamV1EventType, + MothershipStreamV1TextChannel, +} from '@/lib/copilot/generated/mothership-stream-v1' +import { GenerateApiKey } from '@/lib/copilot/generated/tool-catalog-v1' +import { REDACTED_MARKER } from '@/lib/core/security/redaction' +import type { ChatMessage, ContentBlock } from '@/app/workspace/[workspaceId]/home/types' + +/** + * Two-sided handling of `sim_key` API keys in the Mothership chat: + * + * - **Write side** (server, runs in `buildPersistedAssistantMessage`): + * strip every revealed `` value before the row + * hits Postgres. Reloading a chat days later — or pulling the row from the + * DB directly — never re-exposes the key. + * + * - **Read side** (client, runs in `useChat`'s message selector): an in-memory + * page-session cache captures revealed values during the live SSE stream. + * When the post-stream refetch returns the redacted persisted message, the + * selector re-injects the captured values so the user can still copy the + * key they just generated. Cache is dropped on page unload. + */ + +const CREDENTIAL_TAG_PATTERN = /([\s\S]*?)<\/credential>/g +const REDACTED_TAG_PATTERN = /[^<]*"redacted"\s*:\s*true[^<]*<\/credential>/ +const SIM_KEY_TYPE = 'sim_key' +const REDACTED_SIM_KEY_TAG = `${JSON.stringify({ + type: SIM_KEY_TYPE, + redacted: true, +})}` + +interface CredentialTagBody { + type?: unknown + value?: unknown + redacted?: unknown +} + +function parseCredentialBody(body: string): CredentialTagBody | null { + try { + return JSON.parse(body) as CredentialTagBody + } catch { + return null + } +} + +function hasRedactedSimKeyTag(content: string | undefined): boolean { + return typeof content === 'string' && REDACTED_TAG_PATTERN.test(content) +} + +// Write side --------------------------------------------------------------- + +/** + * Replace every revealed `` tag in `content` with a + * placeholder marked `redacted: true`. Other credential types (e.g. OAuth + * `link`) and malformed bodies pass through unchanged. + */ +export function redactSensitiveContent(content: T): T { + if (typeof content !== 'string' || !content.includes('')) return content + return content.replace(CREDENTIAL_TAG_PATTERN, (match, body: string) => { + const parsed = parseCredentialBody(body) + return parsed?.type === SIM_KEY_TYPE ? REDACTED_SIM_KEY_TAG : match + }) as T +} + +/** + * Replace the raw `key` field in a `generate_api_key` tool result with the + * shared redaction marker. The persisted tool result still records the + * call's outcome and metadata; only the secret is stripped. + */ +export function redactToolCallResult( + toolName: string | undefined, + result: { success: boolean; output?: unknown; error?: string } | undefined +): { success: boolean; output?: unknown; error?: string } | undefined { + if (!result || toolName !== GenerateApiKey.id) return result + const output = result.output + if (!output || typeof output !== 'object') return result + const record = output as Record + if (typeof record.key !== 'string') return result + return { + ...result, + output: { ...record, key: REDACTED_MARKER, redacted: true }, + } +} + +function isMergeableAssistantTextBlock(block: PersistedContentBlock): boolean { + return ( + block.type === MothershipStreamV1EventType.text && + block.channel === MothershipStreamV1TextChannel.assistant && + block.toolCall === undefined + ) +} + +/** + * Streaming produces one assistant-text block per token chunk, which means a + * `...` tag can straddle dozens of blocks. Per-block + * redaction can't see across that boundary and would persist the secret. So + * coalesce consecutive same-lane assistant-text blocks into a single block, + * then redact the merged content. + * + * Block timestamps for assistant text aren't user-visible (only `thinking` + * blocks drive the "Thought for Ns" chip), so collapsing the run is safe. + */ +export function mergeAndRedactPersistedBlocks( + blocks: PersistedContentBlock[] +): PersistedContentBlock[] { + const out: PersistedContentBlock[] = [] + let runStart = -1 + let runLane: PersistedContentBlock['lane'] + + const flushRun = (endExclusive: number) => { + if (runStart < 0) return + const run = blocks.slice(runStart, endExclusive) + runStart = -1 + if (run.length === 0) return + if (run.length === 1) { + const single = run[0] + out.push({ ...single, content: redactSensitiveContent(single.content) }) + return + } + const head = run[0] + const tail = run[run.length - 1] + out.push({ + ...head, + content: redactSensitiveContent(run.map((b) => b.content ?? '').join('')), + ...(tail.endedAt !== undefined ? { endedAt: tail.endedAt } : {}), + }) + } + + for (let i = 0; i < blocks.length; i++) { + const block = blocks[i] + const sameRun = runStart >= 0 && isMergeableAssistantTextBlock(block) && runLane === block.lane + if (sameRun) continue + flushRun(i) + if (isMergeableAssistantTextBlock(block)) { + runStart = i + runLane = block.lane + } else { + out.push(block) + } + } + flushRun(blocks.length) + + return out +} + +// Read side ---------------------------------------------------------------- + +/** + * Page-session cache of `sim_key` credential values revealed during the live + * SSE stream, keyed by either the synthetic live-assistant id (used while + * streaming) or the persisted message's `requestId` (used after refetch). + * Lives in a `useRef`; never persisted; dropped on unload. + */ +export type RevealedSimKeysByMessage = Map + +/** + * Scan an assembled assistant message for `` tags + * and return their values in stream order, skipping anything already redacted. + */ +export function extractRevealedSimKeys(content: string): string[] { + if (!content || !content.includes('')) return [] + const values: string[] = [] + for (const match of content.matchAll(CREDENTIAL_TAG_PATTERN)) { + const parsed = parseCredentialBody(match[1]) + if (parsed?.type === SIM_KEY_TYPE && !parsed.redacted && typeof parsed.value === 'string') { + values.push(parsed.value) + } + } + return values +} + +/** + * Extend the cache entries for the given keys with any newly-revealed values. + * Each key in `keys` is written the same array — passing both the live-stream + * id and the persisted `requestId` lets the post-finalize refetch hit the + * cache after the message is renamed to its real UUID. The longest captured + * list wins so a rerun that surfaces fewer values can't shrink the entry. + */ +export function captureRevealedSimKeys( + cache: RevealedSimKeysByMessage, + keys: ReadonlyArray, + content: string +): void { + if (!content.includes('')) return + const next = extractRevealedSimKeys(content) + if (next.length === 0) return + for (const key of keys) { + if (!key) continue + const existing = cache.get(key) + if (!existing || next.length > existing.length) cache.set(key, next) + } +} + +function restoreInString( + content: string, + revealedValues: string[], + startCursor: number +): { + next: string + changed: boolean + cursor: number +} { + if (!content.includes('') || revealedValues.length === 0) { + return { next: content, changed: false, cursor: startCursor } + } + let cursor = startCursor + let changed = false + const next = content.replace(CREDENTIAL_TAG_PATTERN, (match, body: string) => { + const parsed = parseCredentialBody(body) + if (parsed?.type === SIM_KEY_TYPE && parsed.redacted === true) { + const value = revealedValues[cursor] + cursor += 1 + if (typeof value === 'string') { + changed = true + return `${JSON.stringify({ value, type: SIM_KEY_TYPE })}` + } + } + return match + }) + return { next, changed, cursor } +} + +/** + * Replace redacted `sim_key` tags in a single message with the live values + * captured for that message. Returns the original message reference unchanged + * when there's nothing to substitute, so memoized children keep their identity. + */ +export function restoreRevealedSimKeysForMessage( + message: ChatMessage, + cache: RevealedSimKeysByMessage +): ChatMessage { + if (message.role !== 'assistant') return message + const revealed = + cache.get(message.id) ?? (message.requestId ? cache.get(message.requestId) : undefined) + if (!revealed || revealed.length === 0) return message + if ( + !hasRedactedSimKeyTag(message.content) && + !message.contentBlocks?.some((b) => hasRedactedSimKeyTag(b.content)) + ) { + return message + } + + const restoredContent = restoreInString(message.content, revealed, 0) + let blocksChanged = false + let blockCursor = 0 + const nextBlocks: ContentBlock[] | undefined = message.contentBlocks?.map((block) => { + if (!hasRedactedSimKeyTag(block.content)) return block + const restored = restoreInString(block.content as string, revealed, blockCursor) + blockCursor = restored.cursor + if (!restored.changed) return block + blocksChanged = true + return { ...block, content: restored.next } + }) + + if (!restoredContent.changed && !blocksChanged) return message + + return { + ...message, + content: restoredContent.next, + ...(nextBlocks ? { contentBlocks: nextBlocks } : {}), + } +} diff --git a/apps/sim/lib/copilot/resources/extraction.ts b/apps/sim/lib/copilot/resources/extraction.ts index 29ca644a21a..6cba5a3bee0 100644 --- a/apps/sim/lib/copilot/resources/extraction.ts +++ b/apps/sim/lib/copilot/resources/extraction.ts @@ -7,7 +7,6 @@ import { FunctionExecute, GenerateImage, GenerateVisualization, - GetWorkflowLogs, Knowledge, KnowledgeBase, UserTable, @@ -30,7 +29,6 @@ const RESOURCE_TOOL_NAMES: Set = new Set([ Knowledge.id, GenerateVisualization.id, GenerateImage.id, - GetWorkflowLogs.id, ]) export function isResourceToolName(toolName: string): boolean { @@ -214,19 +212,6 @@ export function extractResourcesFromToolResult( return resources } - case GetWorkflowLogs.id: { - const entries = Array.isArray(output) ? output : Array.isArray(result.data) ? result.data : [] - const resources: ChatResource[] = [] - for (const entry of entries) { - const rec = asRecord(entry) - const logId = rec.id as string | undefined - if (logId) { - resources.push({ type: 'log', id: logId, title: 'Log' }) - } - } - return resources - } - default: return [] } diff --git a/apps/sim/lib/copilot/tasks.ts b/apps/sim/lib/copilot/tasks.ts index 5828a711cb4..db6594ebf28 100644 --- a/apps/sim/lib/copilot/tasks.ts +++ b/apps/sim/lib/copilot/tasks.ts @@ -13,6 +13,7 @@ interface TaskStatusEvent { workspaceId: string chatId: string type: 'started' | 'completed' | 'created' | 'deleted' | 'renamed' + streamId?: string } const channel = diff --git a/apps/sim/lib/core/config/env.ts b/apps/sim/lib/core/config/env.ts index 969324591b0..14bf33ce5d4 100644 --- a/apps/sim/lib/core/config/env.ts +++ b/apps/sim/lib/core/config/env.ts @@ -430,6 +430,7 @@ export const env = createEnv({ NEXT_PUBLIC_E2B_ENABLED: z.string().optional(), NEXT_PUBLIC_BEDROCK_DEFAULT_CREDENTIALS: z.string().optional(), // Hide Bedrock credential fields when deployment uses AWS default credential chain (IAM roles, instance profiles, ECS task roles, IRSA) NEXT_PUBLIC_AZURE_CONFIGURED: z.string().optional(), // Hide Azure credential fields when endpoint/key/version are pre-configured server-side + NEXT_PUBLIC_COHERE_CONFIGURED: z.string().optional(), // Hide Cohere API key field on Knowledge block when COHERE_API_KEY is pre-configured server-side NEXT_PUBLIC_COPILOT_TRAINING_ENABLED: z.string().optional(), NEXT_PUBLIC_ENABLE_PLAYGROUND: z.string().optional(), // Enable component playground at /playground NEXT_PUBLIC_DOCUMENTATION_URL: z.string().url().optional(), // Custom documentation URL @@ -496,6 +497,7 @@ export const env = createEnv({ NEXT_PUBLIC_E2B_ENABLED: process.env.NEXT_PUBLIC_E2B_ENABLED, NEXT_PUBLIC_BEDROCK_DEFAULT_CREDENTIALS: process.env.NEXT_PUBLIC_BEDROCK_DEFAULT_CREDENTIALS, NEXT_PUBLIC_AZURE_CONFIGURED: process.env.NEXT_PUBLIC_AZURE_CONFIGURED, + NEXT_PUBLIC_COHERE_CONFIGURED: process.env.NEXT_PUBLIC_COHERE_CONFIGURED, NEXT_PUBLIC_COPILOT_TRAINING_ENABLED: process.env.NEXT_PUBLIC_COPILOT_TRAINING_ENABLED, NEXT_PUBLIC_ENABLE_PLAYGROUND: process.env.NEXT_PUBLIC_ENABLE_PLAYGROUND, NEXT_PUBLIC_POSTHOG_ENABLED: process.env.NEXT_PUBLIC_POSTHOG_ENABLED, diff --git a/apps/sim/lib/core/config/feature-flags.ts b/apps/sim/lib/core/config/feature-flags.ts index c593c2b3eda..3a69af74fd1 100644 --- a/apps/sim/lib/core/config/feature-flags.ts +++ b/apps/sim/lib/core/config/feature-flags.ts @@ -156,6 +156,14 @@ export const isOllamaConfigured = Boolean(env.OLLAMA_URL) */ export const isAzureConfigured = isTruthy(getEnv('NEXT_PUBLIC_AZURE_CONFIGURED')) +/** + * Whether a Cohere API key is pre-configured server-side for the Knowledge block reranker + * (`COHERE_API_KEY` or `COHERE_API_KEY_1/2/3`). When true, the Cohere API Key field is hidden + * in the Knowledge block UI. + * Set NEXT_PUBLIC_COHERE_CONFIGURED=true in self-hosted deployments that ship a Cohere key. + */ +export const isCohereConfigured = isTruthy(getEnv('NEXT_PUBLIC_COHERE_CONFIGURED')) + /** * Are invitations disabled globally * When true, workspace invitations are disabled for all users diff --git a/apps/sim/lib/knowledge/reranker.ts b/apps/sim/lib/knowledge/reranker.ts index 54b2ae02c91..b1bebc11aa8 100644 --- a/apps/sim/lib/knowledge/reranker.ts +++ b/apps/sim/lib/knowledge/reranker.ts @@ -2,6 +2,7 @@ import { createLogger } from '@sim/logger' import { getBYOKKey } from '@/lib/api-key/byok' import { getRotatingApiKey } from '@/lib/core/config/api-keys' import { env } from '@/lib/core/config/env' +import { isHosted } from '@/lib/core/config/feature-flags' import { isRetryableError, retryWithExponentialBackoff } from '@/lib/knowledge/documents/utils' import { DEFAULT_RERANKER_MODEL, @@ -56,8 +57,18 @@ class RerankAPIError extends Error { } async function resolveCohereKey( - workspaceId?: string | null + workspaceId?: string | null, + userApiKey?: string ): Promise<{ apiKey: string; isBYOK: boolean }> { + /** + * Mirrors the agent block hosted-key pattern (`injectHostedKeyIfNeeded`): + * on self-hosted the user-supplied key from the block field flows through + * unchanged; on hosted Sim we always source the key from workspace BYOK or + * platform env, so any user-supplied value is ignored. + */ + if (!isHosted && userApiKey) { + return { apiKey: userApiKey, isBYOK: false } + } if (workspaceId) { const byokResult = await getBYOKKey(workspaceId, 'cohere') if (byokResult) { @@ -77,8 +88,19 @@ async function resolveCohereKey( } } +/** + * Subset of Cohere v2/rerank response fields we read. + * Reference: https://docs.cohere.com/v2/reference/rerank + * - `results[].index` maps back to the position in the documents we sent. + * - `results[].relevance_score` is normalized 0–1. + * - `meta.warnings` is documented as an array of strings; we surface them in logs + * so issues like document truncation don't disappear silently. + */ interface CohereRerankResponse { results: Array<{ index: number; relevance_score: number }> + meta?: { + warnings?: string[] + } } /** @@ -92,6 +114,8 @@ export async function rerank( model: string topN?: number workspaceId?: string | null + /** User-supplied Cohere key from the Knowledge block field. Honored only on self-hosted. */ + apiKey?: string } ): Promise> { if (items.length === 0) return { results: [], isBYOK: false } @@ -100,7 +124,7 @@ export async function rerank( throw new Error(`Unsupported reranker model: ${options.model}`) } - const { apiKey, isBYOK } = await resolveCohereKey(options.workspaceId) + const { apiKey, isBYOK } = await resolveCohereKey(options.workspaceId, options.apiKey) const cappedItems = items.length > MAX_DOCUMENTS_PER_RERANK ? items.slice(0, MAX_DOCUMENTS_PER_RERANK) : items if (items.length > MAX_DOCUMENTS_PER_RERANK) { @@ -151,6 +175,13 @@ export async function rerank( } ) + if (response.meta?.warnings && response.meta.warnings.length > 0) { + logger.warn('Cohere rerank returned warnings', { + model: options.model, + warnings: response.meta.warnings, + }) + } + return { results: response.results .filter((r) => r.index >= 0 && r.index < cappedItems.length) diff --git a/apps/sim/lib/logs/fetch-log-detail.ts b/apps/sim/lib/logs/fetch-log-detail.ts new file mode 100644 index 00000000000..1a5aea4dc26 --- /dev/null +++ b/apps/sim/lib/logs/fetch-log-detail.ts @@ -0,0 +1,197 @@ +import { db } from '@sim/db' +import { + jobExecutionLogs, + pausedExecutions, + permissions, + workflow, + workflowDeploymentVersion, + workflowExecutionLogs, +} from '@sim/db/schema' +import { and, eq, type SQL } from 'drizzle-orm' + +type LookupColumn = 'id' | 'executionId' + +interface FetchLogDetailArgs { + userId: string + workspaceId: string + lookupColumn: LookupColumn + lookupValue: string +} + +/** + * Shared loader for the workflow-log detail shape returned by the by-id and + * by-execution routes. Returns `null` when no matching row exists in either + * the workflow-execution or job-execution tables for this user + workspace. + */ +export async function fetchLogDetail({ + userId, + workspaceId, + lookupColumn, + lookupValue, +}: FetchLogDetailArgs) { + const workflowMatch: SQL = + lookupColumn === 'id' + ? eq(workflowExecutionLogs.id, lookupValue) + : eq(workflowExecutionLogs.executionId, lookupValue) + + const rows = await db + .select({ + id: workflowExecutionLogs.id, + workflowId: workflowExecutionLogs.workflowId, + executionId: workflowExecutionLogs.executionId, + deploymentVersionId: workflowExecutionLogs.deploymentVersionId, + level: workflowExecutionLogs.level, + status: workflowExecutionLogs.status, + trigger: workflowExecutionLogs.trigger, + startedAt: workflowExecutionLogs.startedAt, + endedAt: workflowExecutionLogs.endedAt, + totalDurationMs: workflowExecutionLogs.totalDurationMs, + executionData: workflowExecutionLogs.executionData, + cost: workflowExecutionLogs.cost, + files: workflowExecutionLogs.files, + createdAt: workflowExecutionLogs.createdAt, + workflowName: workflow.name, + workflowDescription: workflow.description, + workflowColor: workflow.color, + workflowFolderId: workflow.folderId, + workflowUserId: workflow.userId, + workflowWorkspaceId: workflow.workspaceId, + workflowCreatedAt: workflow.createdAt, + workflowUpdatedAt: workflow.updatedAt, + deploymentVersion: workflowDeploymentVersion.version, + deploymentVersionName: workflowDeploymentVersion.name, + pausedStatus: pausedExecutions.status, + pausedTotalPauseCount: pausedExecutions.totalPauseCount, + pausedResumedCount: pausedExecutions.resumedCount, + }) + .from(workflowExecutionLogs) + .leftJoin(workflow, eq(workflowExecutionLogs.workflowId, workflow.id)) + .leftJoin( + workflowDeploymentVersion, + eq(workflowDeploymentVersion.id, workflowExecutionLogs.deploymentVersionId) + ) + .leftJoin(pausedExecutions, eq(pausedExecutions.executionId, workflowExecutionLogs.executionId)) + .innerJoin( + permissions, + and( + eq(permissions.entityType, 'workspace'), + eq(permissions.entityId, workflowExecutionLogs.workspaceId), + eq(permissions.userId, userId) + ) + ) + .where(and(workflowMatch, eq(workflowExecutionLogs.workspaceId, workspaceId))) + .limit(1) + + const log = rows[0] + + if (log) { + const workflowSummary = log.workflowId + ? { + id: log.workflowId, + name: log.workflowName, + description: log.workflowDescription, + color: log.workflowColor, + folderId: log.workflowFolderId, + userId: log.workflowUserId, + workspaceId: log.workflowWorkspaceId, + createdAt: log.workflowCreatedAt?.toISOString() ?? null, + updatedAt: log.workflowUpdatedAt?.toISOString() ?? null, + } + : null + + const totalPauseCount = Number(log.pausedTotalPauseCount ?? 0) + const resumedCount = Number(log.pausedResumedCount ?? 0) + const hasPendingPause = + (totalPauseCount > 0 && resumedCount < totalPauseCount) || + (log.pausedStatus !== null && log.pausedStatus !== 'fully_resumed') + + return { + id: log.id, + workflowId: log.workflowId, + executionId: log.executionId, + deploymentVersionId: log.deploymentVersionId, + deploymentVersion: log.deploymentVersion ?? null, + deploymentVersionName: log.deploymentVersionName ?? null, + level: log.level, + status: log.status, + duration: log.totalDurationMs ? `${log.totalDurationMs}ms` : null, + trigger: log.trigger, + createdAt: log.startedAt.toISOString(), + workflow: workflowSummary, + jobTitle: null, + cost: log.cost ?? null, + pauseSummary: { + status: log.pausedStatus ?? null, + total: totalPauseCount, + resumed: resumedCount, + }, + hasPendingPause, + executionData: { + totalDuration: log.totalDurationMs, + ...((log.executionData as Record | null) ?? {}), + enhanced: true as const, + }, + files: log.files ?? null, + } + } + + const jobMatch: SQL = + lookupColumn === 'id' + ? eq(jobExecutionLogs.id, lookupValue) + : eq(jobExecutionLogs.executionId, lookupValue) + + const jobRows = await db + .select({ + id: jobExecutionLogs.id, + executionId: jobExecutionLogs.executionId, + level: jobExecutionLogs.level, + status: jobExecutionLogs.status, + trigger: jobExecutionLogs.trigger, + startedAt: jobExecutionLogs.startedAt, + endedAt: jobExecutionLogs.endedAt, + totalDurationMs: jobExecutionLogs.totalDurationMs, + executionData: jobExecutionLogs.executionData, + cost: jobExecutionLogs.cost, + createdAt: jobExecutionLogs.createdAt, + }) + .from(jobExecutionLogs) + .innerJoin( + permissions, + and( + eq(permissions.entityType, 'workspace'), + eq(permissions.entityId, jobExecutionLogs.workspaceId), + eq(permissions.userId, userId) + ) + ) + .where(and(jobMatch, eq(jobExecutionLogs.workspaceId, workspaceId))) + .limit(1) + + const jobLog = jobRows[0] + if (!jobLog) return null + + const execData = (jobLog.executionData as Record | null) ?? {} + return { + id: jobLog.id, + workflowId: null, + executionId: jobLog.executionId, + deploymentVersionId: null, + deploymentVersion: null, + deploymentVersionName: null, + level: jobLog.level, + status: jobLog.status, + duration: jobLog.totalDurationMs ? `${jobLog.totalDurationMs}ms` : null, + trigger: jobLog.trigger, + createdAt: jobLog.startedAt.toISOString(), + workflow: null, + jobTitle: ((execData.trigger as Record | undefined)?.source as string) ?? null, + cost: jobLog.cost ?? null, + pauseSummary: { status: null, total: 0, resumed: 0 }, + hasPendingPause: false, + executionData: { + totalDuration: jobLog.totalDurationMs, + ...execData, + enhanced: true as const, + }, + files: null, + } +} diff --git a/apps/sim/lib/mothership/inbox/executor.ts b/apps/sim/lib/mothership/inbox/executor.ts index b738a6a37ae..52236d9b959 100644 --- a/apps/sim/lib/mothership/inbox/executor.ts +++ b/apps/sim/lib/mothership/inbox/executor.ts @@ -131,11 +131,14 @@ export async function executeInboxTask(taskId: string): Promise { }) } + const userMessageId = generateId() + if (chatId) { taskPubSub?.publishStatusChanged({ workspaceId: ws.id, chatId, type: 'started', + streamId: userMessageId, }) } @@ -178,7 +181,6 @@ export async function executeInboxTask(taskId: string): Promise { } const messageContent = formatEmailAsMessage(truncatedTask, attachments) - const userMessageId = generateId() const requestPayload: Record = { message: messageContent, userId, @@ -244,6 +246,7 @@ export async function executeInboxTask(taskId: string): Promise { workspaceId: ws.id, chatId, type: 'completed', + streamId: userMessageId, }) } diff --git a/apps/sim/lib/table/sql.ts b/apps/sim/lib/table/sql.ts index d2004175f44..f854d2b5237 100644 --- a/apps/sim/lib/table/sql.ts +++ b/apps/sim/lib/table/sql.ts @@ -10,6 +10,17 @@ import { sql } from 'drizzle-orm' import { NAME_PATTERN } from './constants' import type { ColumnDefinition, ConditionOperators, Filter, JsonValue, Sort } from './types' +/** + * Error thrown when caller-supplied filter or sort input is malformed. + * Routes should map this to HTTP 400 with the message preserved. + */ +export class TableQueryValidationError extends Error { + constructor(message: string) { + super(message) + this.name = 'TableQueryValidationError' + } +} + /** * Whitelist of allowed operators for query filtering. * Only these operators can be used in filter conditions. @@ -41,7 +52,7 @@ const ALLOWED_OPERATORS = new Set([ * @param filter - Filter object with field conditions and logical operators * @param tableName - Table name for the query (e.g., 'user_table_rows') * @returns SQL WHERE clause or undefined if no filter specified - * @throws Error if field name is invalid or operator is not allowed + * @throws {TableQueryValidationError} if field name is invalid or operator is not allowed * * @example * // Simple equality @@ -110,7 +121,7 @@ export function buildFilterClause(filter: Filter, tableName: string): SQL | unde * @param tableName - Table name for the query (e.g., 'user_table_rows') * @param columns - Optional column definitions for type-aware sorting * @returns SQL ORDER BY clause or undefined if no sort specified - * @throws Error if field name is invalid + * @throws {TableQueryValidationError} if field name or sort direction is invalid * * @example * buildSortClause({ name: 'asc', age: 'desc' }, 'user_table_rows') @@ -133,7 +144,9 @@ export function buildSortClause( validateFieldName(field) if (direction !== 'asc' && direction !== 'desc') { - throw new Error(`Invalid sort direction "${direction}". Must be "asc" or "desc".`) + throw new TableQueryValidationError( + `Invalid sort direction "${direction}". Must be "asc" or "desc".` + ) } const columnType = columnTypeMap.get(field) @@ -148,15 +161,15 @@ export function buildSortClause( * Field names must match the NAME_PATTERN (alphanumeric + underscore, starting with letter/underscore). * * @param field - The field name to validate - * @throws Error if field name is invalid + * @throws {TableQueryValidationError} if field name is invalid */ function validateFieldName(field: string): void { if (!field || typeof field !== 'string') { - throw new Error('Field name must be a non-empty string') + throw new TableQueryValidationError('Field name must be a non-empty string') } if (!NAME_PATTERN.test(field)) { - throw new Error( + throw new TableQueryValidationError( `Invalid field name "${field}". Field names must start with a letter or underscore, followed by alphanumeric characters or underscores.` ) } @@ -166,11 +179,11 @@ function validateFieldName(field: string): void { * Validates an operator to ensure it's in the allowed list. * * @param operator - The operator to validate - * @throws Error if operator is not allowed + * @throws {TableQueryValidationError} if operator is not allowed */ function validateOperator(operator: string): void { if (!ALLOWED_OPERATORS.has(operator)) { - throw new Error( + throw new TableQueryValidationError( `Invalid operator "${operator}". Allowed operators: ${Array.from(ALLOWED_OPERATORS).join(', ')}` ) } @@ -190,7 +203,7 @@ function validateOperator(operator: string): void { * object with operators like $eq, $gt, $in, etc. * @returns Array of SQL condition fragments. Multiple conditions are returned * when the condition object contains multiple operators. - * @throws Error if field name is invalid or operator is not allowed + * @throws {TableQueryValidationError} if field name is invalid or operator is not allowed */ function buildFieldCondition( tableName: string, @@ -260,7 +273,9 @@ function buildFieldCondition( break default: - // This should never happen due to validateOperator, but added for completeness + // This should never happen due to validateOperator, but added for completeness. + // Throw a plain Error (→ 500) since reaching this default means the switch + // and ALLOWED_OPERATORS have drifted — that's a programmer error, not a caller error. throw new Error(`Unsupported operator: ${op}`) } } diff --git a/apps/sim/stores/logs/filters/types.ts b/apps/sim/stores/logs/filters/types.ts index 3fbd85bfaee..cf95d3bee3e 100644 --- a/apps/sim/stores/logs/filters/types.ts +++ b/apps/sim/stores/logs/filters/types.ts @@ -1,113 +1,3 @@ -import type { ProviderTiming, TokenInfo, ToolCall, TraceSpan } from '@/lib/logs/types' - -export type { ProviderTiming, TokenInfo, ToolCall, TraceSpan } - -export interface WorkflowData { - id: string - name: string - description: string | null - color: string - state: any -} - -export interface ToolCallMetadata { - toolCalls?: ToolCall[] -} - -export interface CostMetadata { - models?: Record< - string, - { - input: number - output: number - total: number - tokens?: { - input?: number - output?: number - prompt?: number - completion?: number - total?: number - } - } - > - input?: number - output?: number - total?: number - tokens?: { - input?: number - output?: number - prompt?: number - completion?: number - total?: number - } - pricing?: { - input: number - output: number - cachedInput?: number - updatedAt: string - } -} - -export interface WorkflowLog { - id: string - workflowId: string | null - executionId?: string | null - deploymentVersion?: number | null - deploymentVersionName?: string | null - level: string - status?: string | null - duration: string | null - trigger: string | null - createdAt: string - workflow?: WorkflowData | null - jobTitle?: string | null - files?: Array<{ - id: string - name: string - size: number - type: string - url: string - key: string - uploadedAt: string - expiresAt: string - storageProvider?: 's3' | 'blob' | 'local' - bucketName?: string - }> - cost?: CostMetadata - hasPendingPause?: boolean - executionData?: ToolCallMetadata & { - traceSpans?: TraceSpan[] - totalDuration?: number - blockInput?: Record - enhanced?: boolean - - blockExecutions?: Array<{ - id: string - blockId: string - blockName: string - blockType: string - startedAt: string - endedAt: string - durationMs: number - status: 'success' | 'error' | 'skipped' - errorMessage?: string - errorStackTrace?: string - inputData: unknown - outputData: unknown - cost?: CostMetadata - metadata: Record - }> - } -} - -export interface LogsResponse { - data: WorkflowLog[] - total: number - page: number - pageSize: number - totalPages: number -} - export type TimeRange = | 'Past 30 minutes' | 'Past hour' @@ -129,6 +19,7 @@ export type LogLevel = | 'cancelled' | 'all' | (string & {}) + /** Core trigger types for workflow execution */ export const CORE_TRIGGER_TYPES = [ 'manual', diff --git a/apps/sim/tools/knowledge/search.ts b/apps/sim/tools/knowledge/search.ts index 7f0ee99e933..09da5193704 100644 --- a/apps/sim/tools/knowledge/search.ts +++ b/apps/sim/tools/knowledge/search.ts @@ -55,6 +55,19 @@ export const knowledgeSearchTool: ToolConfig = { description: 'Cohere rerank model to use (one of: rerank-v4.0-pro, rerank-v4.0-fast, rerank-v3.5)', }, + rerankerInputCount: { + type: 'number', + required: false, + visibility: 'user-only', + description: + 'Number of vector results sent to the Cohere reranker (1–100). Defaults to topK × 4 capped at 100.', + }, + apiKey: { + type: 'string', + required: false, + visibility: 'user-only', + description: 'Cohere API key for reranker (self-hosted deployments only)', + }, }, schemaEnrichment: { @@ -84,13 +97,29 @@ export const knowledgeSearchTool: ToolConfig = { typeof params.rerankerModel === 'string' && params.rerankerModel.length > 0 ? params.rerankerModel : DEFAULT_RERANKER_MODEL + const rerankerApiKey = + typeof params.apiKey === 'string' && params.apiKey.length > 0 ? params.apiKey : undefined + const rawInputCount = + params.rerankerInputCount !== undefined && + params.rerankerInputCount !== null && + params.rerankerInputCount !== '' + ? Number(params.rerankerInputCount) + : Number.NaN + const rerankerInputCount = Number.isFinite(rawInputCount) + ? Math.max(1, Math.min(100, Math.floor(rawInputCount))) + : undefined const requestBody = { knowledgeBaseIds, query: params.query, topK: params.topK ? Math.max(1, Math.min(100, Number(params.topK))) : 10, ...(structuredFilters.length > 0 && { tagFilters: structuredFilters }), - ...(rerankerEnabled && { rerankerEnabled: true, rerankerModel }), + ...(rerankerEnabled && { + rerankerEnabled: true, + rerankerModel, + ...(rerankerInputCount !== undefined && { rerankerInputCount }), + ...(rerankerApiKey && { rerankerApiKey }), + }), ...(workflowId && { workflowId }), } diff --git a/apps/sim/tools/logs/get_execution.ts b/apps/sim/tools/logs/get_execution.ts new file mode 100644 index 00000000000..a62eef0525b --- /dev/null +++ b/apps/sim/tools/logs/get_execution.ts @@ -0,0 +1,53 @@ +import type { LogsGetExecutionParams, LogsGetExecutionResponse } from '@/tools/logs/types' +import type { ToolConfig } from '@/tools/types' + +export const logsGetExecutionTool: ToolConfig = { + id: 'logs_get_execution', + name: 'Get Execution Details', + description: + 'Fetch full execution details for a workflow run, including the per-block state snapshot.', + version: '1.0.0', + + params: { + executionId: { + type: 'string', + required: true, + visibility: 'user-or-llm', + description: 'Execution ID returned by a workflow run', + }, + }, + + request: { + url: (params) => `/api/logs/execution/${encodeURIComponent(params.executionId)}`, + method: 'GET', + headers: () => ({ + 'Content-Type': 'application/json', + }), + }, + + transformResponse: async (response): Promise => { + const data = await response.json() + if (!response.ok) { + throw new Error(data?.error || `Request failed with status ${response.status}`) + } + return { + success: true, + output: data, + } + }, + + outputs: { + executionId: { type: 'string', description: 'Execution ID' }, + workflowId: { type: 'string', description: 'Workflow ID this execution belongs to' }, + workflowState: { type: 'json', description: 'Per-block state snapshot for the execution' }, + childWorkflowSnapshots: { + type: 'json', + description: 'Snapshots for any child workflows invoked during the run', + optional: true, + }, + executionMetadata: { + type: 'json', + description: 'Trigger, timestamps, totalDurationMs, and cost for the run', + }, + }, +} diff --git a/apps/sim/tools/logs/get_log.ts b/apps/sim/tools/logs/get_log.ts new file mode 100644 index 00000000000..92e41e79b83 --- /dev/null +++ b/apps/sim/tools/logs/get_log.ts @@ -0,0 +1,50 @@ +import type { LogsGetParams, LogsGetResponse } from '@/tools/logs/types' +import type { ToolConfig } from '@/tools/types' + +export const logsGetTool: ToolConfig = { + id: 'logs_get', + name: 'Get Log by ID', + description: 'Fetch a single workflow execution log entry by its log ID.', + version: '1.0.0', + + params: { + id: { + type: 'string', + required: true, + visibility: 'user-or-llm', + description: 'Log entry ID', + }, + }, + + request: { + url: (params) => { + const workspaceId = params._context?.workspaceId + if (!workspaceId) { + throw new Error('workspaceId is required in execution context') + } + const qs = new URLSearchParams({ workspaceId }) + return `/api/logs/${encodeURIComponent(params.id)}?${qs.toString()}` + }, + method: 'GET', + headers: () => ({ + 'Content-Type': 'application/json', + }), + }, + + transformResponse: async (response): Promise => { + const result = await response.json() + if (!response.ok) { + throw new Error(result?.error || `Request failed with status ${response.status}`) + } + return { + success: true, + output: { + log: result.data, + }, + } + }, + + outputs: { + log: { type: 'json', description: 'Workflow execution log entry' }, + }, +} diff --git a/apps/sim/tools/logs/index.ts b/apps/sim/tools/logs/index.ts new file mode 100644 index 00000000000..109d223c8b8 --- /dev/null +++ b/apps/sim/tools/logs/index.ts @@ -0,0 +1,3 @@ +export { logsGetExecutionTool } from '@/tools/logs/get_execution' +export { logsGetTool } from '@/tools/logs/get_log' +export { logsQueryTool } from '@/tools/logs/query' diff --git a/apps/sim/tools/logs/query.ts b/apps/sim/tools/logs/query.ts new file mode 100644 index 00000000000..8ea660ee29a --- /dev/null +++ b/apps/sim/tools/logs/query.ts @@ -0,0 +1,132 @@ +import type { LogsQueryParams, LogsQueryResponse } from '@/tools/logs/types' +import type { ToolConfig } from '@/tools/types' + +export const logsQueryTool: ToolConfig = { + id: 'logs_query', + name: 'Query Logs', + description: 'Query workflow execution logs in the current workspace with filters.', + version: '1.0.0', + + params: { + workflowIds: { + type: 'string', + required: false, + visibility: 'user-or-llm', + description: 'Comma-separated workflow IDs to filter by', + }, + executionId: { + type: 'string', + required: false, + visibility: 'user-or-llm', + description: 'Filter logs to a single execution ID', + }, + level: { + type: 'string', + required: false, + visibility: 'user-or-llm', + description: + "Log level filter: 'all', 'info', 'error', 'running', 'pending'. Comma-separated for multiple.", + }, + triggers: { + type: 'string', + required: false, + visibility: 'user-or-llm', + description: 'Comma-separated triggers (api, webhook, schedule, manual, chat, mothership)', + }, + limit: { + type: 'number', + required: false, + visibility: 'user-or-llm', + description: 'Max logs to return (default 100, max 200)', + }, + cursor: { + type: 'string', + required: false, + visibility: 'user-or-llm', + description: 'Opaque pagination cursor returned by a previous query', + }, + sortBy: { + type: 'string', + required: false, + visibility: 'user-only', + description: "Sort field: 'date' (default), 'duration', 'cost', 'status'", + }, + sortOrder: { + type: 'string', + required: false, + visibility: 'user-only', + description: "Sort order: 'desc' (default) or 'asc'", + }, + startDate: { + type: 'string', + required: false, + visibility: 'user-or-llm', + description: 'ISO 8601 timestamp; only logs at or after this time', + }, + endDate: { + type: 'string', + required: false, + visibility: 'user-or-llm', + description: 'ISO 8601 timestamp; only logs at or before this time', + }, + search: { + type: 'string', + required: false, + visibility: 'user-or-llm', + description: 'Free-text search across log fields', + }, + }, + + request: { + url: (params) => { + const workspaceId = params._context?.workspaceId + if (!workspaceId) { + throw new Error('workspaceId is required in execution context') + } + const qs = new URLSearchParams({ workspaceId }) + if (params.workflowIds) qs.set('workflowIds', params.workflowIds) + if (params.executionId) qs.set('executionId', params.executionId) + if (params.level && params.level !== 'all') qs.set('level', params.level) + if (params.triggers) qs.set('triggers', params.triggers) + if (params.startDate) qs.set('startDate', params.startDate) + if (params.endDate) qs.set('endDate', params.endDate) + if (params.search) qs.set('search', params.search) + if (params.cursor) qs.set('cursor', params.cursor) + if (params.sortBy) qs.set('sortBy', params.sortBy) + if (params.sortOrder) qs.set('sortOrder', params.sortOrder) + if (params.limit !== undefined && params.limit !== null) { + qs.set('limit', String(params.limit)) + } + return `/api/logs?${qs.toString()}` + }, + method: 'GET', + headers: () => ({ + 'Content-Type': 'application/json', + }), + }, + + transformResponse: async (response): Promise => { + const result = await response.json() + if (!response.ok) { + throw new Error(result?.error || `Request failed with status ${response.status}`) + } + return { + success: true, + output: { + logs: result.data || [], + nextCursor: result.nextCursor ?? null, + }, + } + }, + + outputs: { + logs: { + type: 'array', + description: 'Array of workflow execution log entries', + }, + nextCursor: { + type: 'string', + description: 'Pagination cursor for the next page; null when no more results', + }, + }, +} diff --git a/apps/sim/tools/logs/types.ts b/apps/sim/tools/logs/types.ts new file mode 100644 index 00000000000..3053059b1f1 --- /dev/null +++ b/apps/sim/tools/logs/types.ts @@ -0,0 +1,48 @@ +import type { + ExecutionSnapshotData, + WorkflowLogDetail, + WorkflowLogSummary, +} from '@/lib/api/contracts/logs' +import type { ToolResponse, WorkflowToolExecutionContext } from '@/tools/types' + +export interface LogsQueryParams { + workflowIds?: string + executionId?: string + level?: string + triggers?: string + limit?: number + cursor?: string + sortBy?: 'date' | 'duration' | 'cost' | 'status' + sortOrder?: 'asc' | 'desc' + startDate?: string + endDate?: string + search?: string + _context?: WorkflowToolExecutionContext +} + +export interface LogsGetParams { + id: string + _context?: WorkflowToolExecutionContext +} + +export interface LogsGetExecutionParams { + executionId: string + _context?: WorkflowToolExecutionContext +} + +export interface LogsQueryResponse extends ToolResponse { + output: { + logs: WorkflowLogSummary[] + nextCursor: string | null + } +} + +export interface LogsGetResponse extends ToolResponse { + output: { + log: WorkflowLogDetail + } +} + +export interface LogsGetExecutionResponse extends ToolResponse { + output: ExecutionSnapshotData +} diff --git a/apps/sim/tools/openai/image.ts b/apps/sim/tools/openai/image.ts index 2e857d153f6..cd84472b044 100644 --- a/apps/sim/tools/openai/image.ts +++ b/apps/sim/tools/openai/image.ts @@ -16,7 +16,7 @@ export const imageTool: ToolConfig = { type: 'string', required: true, visibility: 'user-only', - description: 'The model to use (gpt-image-1 or dall-e-3)', + description: 'The model to use (dall-e-3, gpt-image-1, or gpt-image-2)', }, prompt: { type: 'string', @@ -28,25 +28,39 @@ export const imageTool: ToolConfig = { type: 'string', required: true, visibility: 'user-or-llm', - description: 'The size of the generated images (1024x1024, 1024x1792, or 1792x1024)', + description: + 'Image size. dall-e-3: 1024x1024, 1024x1792, or 1792x1024. gpt-image-1: auto, 1024x1024, 1536x1024, or 1024x1536. gpt-image-2: auto or any size with edges ≤3840px and multiples of 16 (e.g. 1024x1024, 1536x1024, 1024x1536, 2560x1440, 3840x2160).', }, quality: { type: 'string', required: false, visibility: 'user-or-llm', - description: 'The quality of the image (standard or hd)', + description: 'Quality. dall-e-3: standard|hd. gpt-image-1/gpt-image-2: auto|low|medium|high', }, style: { type: 'string', required: false, visibility: 'user-or-llm', - description: 'The style of the image (vivid or natural)', + description: 'The style of the image (vivid or natural), only for dall-e-3', }, background: { type: 'string', required: false, visibility: 'user-or-llm', - description: 'The background color, only for gpt-image-1', + description: + 'Background. gpt-image-1: auto|transparent|opaque. gpt-image-2: auto|opaque (transparent not supported)', + }, + outputFormat: { + type: 'string', + required: false, + visibility: 'user-or-llm', + description: 'Output image format (png, jpeg, webp), only for gpt-image-1 and gpt-image-2', + }, + moderation: { + type: 'string', + required: false, + visibility: 'user-or-llm', + description: 'Moderation level (auto or low), only for gpt-image-1 and gpt-image-2', }, n: { type: 'number', @@ -73,15 +87,18 @@ export const imageTool: ToolConfig = { const body: BaseImageRequestBody = { model: params.model, prompt: params.prompt, - size: params.size || '1024x1024', + size: params.size || (params.model === 'dall-e-3' ? '1024x1024' : 'auto'), n: params.n ? Number(params.n) : 1, } if (params.model === 'dall-e-3') { if (params.quality) body.quality = params.quality if (params.style) body.style = params.style - } else if (params.model === 'gpt-image-1') { + } else if (params.model === 'gpt-image-1' || params.model === 'gpt-image-2') { + if (params.quality) body.quality = params.quality if (params.background) body.background = params.background + if (params.outputFormat) body.output_format = params.outputFormat + if (params.moderation) body.moderation = params.moderation } return body @@ -111,7 +128,7 @@ export const imageTool: ToolConfig = { } else if (data.data?.[0]?.b64_json) { base64Image = data.data[0].b64_json logger.info( - 'Found base64 encoded image in response for GPT-Image-1', + `Found base64 encoded image in response for ${modelName}`, `length: ${base64Image.length}` ) } else { diff --git a/apps/sim/tools/registry.ts b/apps/sim/tools/registry.ts index ad7dd384867..9130ac52dee 100644 --- a/apps/sim/tools/registry.ts +++ b/apps/sim/tools/registry.ts @@ -1568,6 +1568,7 @@ import { import { linkedInGetProfileTool, linkedInSharePostTool } from '@/tools/linkedin' import { linkupSearchTool } from '@/tools/linkup' import { llmChatTool } from '@/tools/llm' +import { logsGetExecutionTool, logsGetTool, logsQueryTool } from '@/tools/logs' import { loopsCreateContactPropertyTool, loopsCreateContactTool, @@ -3204,6 +3205,9 @@ export const tools: Record = { ketch_set_consent: ketchSetConsentTool, ketch_set_subscriptions: ketchSetSubscriptionsTool, linkup_search: linkupSearchTool, + logs_query: logsQueryTool, + logs_get: logsGetTool, + logs_get_execution: logsGetExecutionTool, loops_create_contact: loopsCreateContactTool, loops_create_contact_property: loopsCreateContactPropertyTool, loops_update_contact: loopsUpdateContactTool, diff --git a/helm/sim/values.yaml b/helm/sim/values.yaml index 97fbeba5761..d2fd5c0ee11 100644 --- a/helm/sim/values.yaml +++ b/helm/sim/values.yaml @@ -275,6 +275,12 @@ app: # in the Agent block UI — users just pick an Azure model and run. NEXT_PUBLIC_AZURE_CONFIGURED: "" # Set to "true" to hide Azure credential fields + # Cohere Reranker (Knowledge block) + # Set COHERE_API_KEY (or COHERE_API_KEY_1/2/3 for rotation) and NEXT_PUBLIC_COHERE_CONFIGURED=true + # to pre-configure the Cohere reranker server-side. When configured, the Cohere API Key field is + # hidden in the Knowledge block UI. + NEXT_PUBLIC_COHERE_CONFIGURED: "" # Set to "true" to hide the Cohere API Key field on the Knowledge block + # AWS S3 Cloud Storage Configuration (optional - for file storage) # If configured, files will be stored in S3 instead of local storage AWS_REGION: "" # AWS region (e.g., "us-east-1") diff --git a/scripts/check-api-validation-contracts.ts b/scripts/check-api-validation-contracts.ts index 34cbacb0f6e..14a57e05fad 100644 --- a/scripts/check-api-validation-contracts.ts +++ b/scripts/check-api-validation-contracts.ts @@ -9,8 +9,8 @@ const QUERY_HOOKS_DIR = path.join(ROOT, 'apps/sim/hooks/queries') const SELECTOR_HOOKS_DIR = path.join(ROOT, 'apps/sim/hooks/selectors') const BASELINE = { - totalRoutes: 725, - zodRoutes: 725, + totalRoutes: 726, + zodRoutes: 726, nonZodRoutes: 0, } as const