- {
- if (e.button !== 0) return
- onRowToggle(position, e.shiftKey)
- }}
- >
-
- {position + 1}
-
-
-
+
+
+ {
+ if (e.button !== 0) return
+ onRowToggle(position, e.shiftKey)
+ }}
+ >
+
+ {position + 1}
+
+
+
+
+
|
{columns.map((col, colIndex) => {
@@ -3238,7 +3252,7 @@ const DataRow = React.memo(function DataRow({
return (
onContextMenu(e, row)}>
-
+
{
@@ -3268,7 +3282,7 @@ const DataRow = React.memo(function DataRow({
type='button'
aria-label={runningCount > 0 ? `Stop ${runningCount} running` : 'Run row'}
title={runningCount > 0 ? `Stop ${runningCount} running` : 'Run row'}
- className='flex h-[20px] w-[20px] shrink-0 items-center justify-center rounded text-[var(--text-primary)] transition-colors hover-hover:bg-[var(--surface-2)]'
+ className='ml-auto flex h-[20px] w-[20px] shrink-0 items-center justify-center rounded text-[var(--text-primary)] transition-colors hover-hover:bg-[var(--surface-2)]'
onClick={() => {
if (runningCount > 0) {
onStopRow(row.id)
diff --git a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/user-input/hooks/use-file-attachments.ts b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/user-input/hooks/use-file-attachments.ts
index 73dc76e3792..9c09054d5ba 100644
--- a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/user-input/hooks/use-file-attachments.ts
+++ b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/user-input/hooks/use-file-attachments.ts
@@ -126,7 +126,10 @@ export function useFileAttachments(props: UseFileAttachmentsProps) {
type: resolveFileType(file),
path: '',
uploading: true,
- previewUrl: file.type.startsWith('image/') ? URL.createObjectURL(file) : undefined,
+ previewUrl:
+ file.type.startsWith('image/') || file.type.startsWith('video/')
+ ? URL.createObjectURL(file)
+ : undefined,
}))
setAttachedFiles((prev) => [...prev, ...placeholders])
diff --git a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/user-input/hooks/use-mention-data.ts b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/user-input/hooks/use-mention-data.ts
index c4b0e1e5e67..411ab163f47 100644
--- a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/user-input/hooks/use-mention-data.ts
+++ b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/copilot/components/user-input/hooks/use-mention-data.ts
@@ -346,7 +346,7 @@ export function useMentionData(props: UseMentionDataProps): MentionDataReturn {
try {
setIsLoadingLogs(true)
const data = await requestJson(listLogsContract, {
- query: { workspaceId, limit: 50, details: 'full' },
+ query: { workspaceId, limit: 50 },
})
const items = data.data
const mapped = items.map((l) => ({
diff --git a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/terminal/utils.test.ts b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/terminal/utils.test.ts
index e7677a608a7..ee07dba42dd 100644
--- a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/terminal/utils.test.ts
+++ b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/terminal/utils.test.ts
@@ -482,6 +482,312 @@ describe('groupEntriesByExecution', () => {
})
})
+describe('duration computation', () => {
+ /**
+ * Regression guard for the 18m → 20m → 22m bug.
+ *
+ * When a loop iteration contains a parallel block, the iteration's displayed
+ * duration must be wall-clock (max(endedAt) − min(startedAt)), not the sum of
+ * child durationMs. Summing over concurrent parallel branches over-counts time
+ * and causes the displayed iteration duration to climb rapidly as each branch
+ * resolves.
+ */
+ it('loop iteration with concurrent parallel branches uses wall-clock duration', () => {
+ const branches = 5
+ const branchDurationMs = 110_000
+ const loopIterStartMs = Date.UTC(2025, 0, 1, 0, 0, 0)
+ const loopIterEndMs = loopIterStartMs + branchDurationMs
+
+ const entries: ConsoleEntry[] = []
+ for (let branch = 0; branch < branches; branch++) {
+ entries.push(
+ makeEntry({
+ blockId: 'function-1',
+ blockName: 'Function 1',
+ executionOrder: branch + 1,
+ startedAt: new Date(loopIterStartMs).toISOString(),
+ endedAt: new Date(loopIterEndMs).toISOString(),
+ durationMs: branchDurationMs,
+ iterationType: 'parallel',
+ iterationCurrent: branch,
+ iterationTotal: branches,
+ iterationContainerId: 'parallel-1',
+ parentIterations: [
+ {
+ iterationType: 'loop',
+ iterationCurrent: 0,
+ iterationTotal: 1,
+ iterationContainerId: 'loop-1',
+ },
+ ],
+ })
+ )
+ }
+
+ const tree = buildEntryTree(entries)
+ const loopSubflow = tree.find((n) => n.entry.blockType === 'loop')
+ expect(loopSubflow).toBeDefined()
+
+ const iteration = loopSubflow!.children[0]
+ expect(iteration.nodeType).toBe('iteration')
+ expect(iteration.entry.durationMs).toBe(branchDurationMs)
+ expect(iteration.entry.durationMs).toBeLessThan(branches * branchDurationMs)
+ })
+
+ it('subflow container with concurrent children uses wall-clock duration', () => {
+ const branches = 4
+ const branchDurationMs = 60_000
+ const startMs = Date.UTC(2025, 0, 1, 0, 0, 0)
+ const endMs = startMs + branchDurationMs
+
+ const entries: ConsoleEntry[] = []
+ for (let branch = 0; branch < branches; branch++) {
+ entries.push(
+ makeEntry({
+ blockId: 'function-1',
+ executionOrder: branch + 1,
+ startedAt: new Date(startMs).toISOString(),
+ endedAt: new Date(endMs).toISOString(),
+ durationMs: branchDurationMs,
+ iterationType: 'parallel',
+ iterationCurrent: branch,
+ iterationTotal: branches,
+ iterationContainerId: 'parallel-1',
+ })
+ )
+ }
+
+ const tree = buildEntryTree(entries)
+ const subflow = tree.find((n) => n.entry.blockType === 'parallel')
+ expect(subflow).toBeDefined()
+ expect(subflow!.entry.durationMs).toBe(branchDurationMs)
+ expect(subflow!.entry.durationMs).toBeLessThan(branches * branchDurationMs)
+ })
+
+ it('sequential loop iteration uses wall-clock duration', () => {
+ const blockStart = Date.UTC(2025, 0, 1, 0, 0, 0)
+ const blockEnd = blockStart + 5_000
+
+ const entries: ConsoleEntry[] = [
+ makeEntry({
+ blockId: 'function-1',
+ executionOrder: 1,
+ startedAt: new Date(blockStart).toISOString(),
+ endedAt: new Date(blockEnd).toISOString(),
+ durationMs: 5_000,
+ iterationType: 'loop',
+ iterationCurrent: 0,
+ iterationTotal: 1,
+ iterationContainerId: 'loop-1',
+ }),
+ ]
+
+ const tree = buildEntryTree(entries)
+ const loop = tree.find((n) => n.entry.blockType === 'loop')
+ expect(loop).toBeDefined()
+ expect(loop!.children[0].entry.durationMs).toBe(5_000)
+ })
+
+ it('parallel iteration uses wall-clock duration', () => {
+ const start = Date.UTC(2025, 0, 1, 0, 0, 0)
+ const end = start + 7_500
+
+ const entries: ConsoleEntry[] = [
+ makeEntry({
+ blockId: 'function-1',
+ executionOrder: 1,
+ startedAt: new Date(start).toISOString(),
+ endedAt: new Date(end).toISOString(),
+ durationMs: 7_500,
+ iterationType: 'parallel',
+ iterationCurrent: 0,
+ iterationTotal: 1,
+ iterationContainerId: 'parallel-1',
+ }),
+ ]
+
+ const tree = buildEntryTree(entries)
+ const parallel = tree.find((n) => n.entry.blockType === 'parallel')
+ expect(parallel).toBeDefined()
+ expect(parallel!.children[0].entry.durationMs).toBe(7_500)
+ })
+
+ it('sequential loop with gaps between iterations: each iteration is wall-clock of its own children', () => {
+ const entries: ConsoleEntry[] = []
+ const iterStarts = [0, 10_000, 30_000]
+ const blockDuration = 1_000
+ const base = Date.UTC(2025, 0, 1, 0, 0, 0)
+
+ for (let i = 0; i < iterStarts.length; i++) {
+ entries.push(
+ makeEntry({
+ blockId: 'function-1',
+ executionOrder: i + 1,
+ startedAt: new Date(base + iterStarts[i]).toISOString(),
+ endedAt: new Date(base + iterStarts[i] + blockDuration).toISOString(),
+ durationMs: blockDuration,
+ iterationType: 'loop',
+ iterationCurrent: i,
+ iterationTotal: 3,
+ iterationContainerId: 'loop-1',
+ })
+ )
+ }
+
+ const tree = buildEntryTree(entries)
+ const loop = tree.find((n) => n.entry.blockType === 'loop')!
+ for (let i = 0; i < 3; i++) {
+ expect(loop.children[i].entry.durationMs).toBe(blockDuration)
+ }
+ expect(loop.entry.durationMs).toBe(iterStarts[2] + blockDuration - iterStarts[0])
+ })
+
+ it('loop-in-loop: outer iteration duration spans all inner iterations wall-clock', () => {
+ const entries: ConsoleEntry[] = []
+ const base = Date.UTC(2025, 0, 1, 0, 0, 0)
+ const innerDuration = 2_000
+ const innerCount = 3
+
+ for (let inner = 0; inner < innerCount; inner++) {
+ const start = base + inner * innerDuration
+ entries.push(
+ makeEntry({
+ blockId: 'function-1',
+ executionOrder: inner + 1,
+ startedAt: new Date(start).toISOString(),
+ endedAt: new Date(start + innerDuration).toISOString(),
+ durationMs: innerDuration,
+ iterationType: 'loop',
+ iterationCurrent: inner,
+ iterationTotal: innerCount,
+ iterationContainerId: 'inner-loop',
+ parentIterations: [
+ {
+ iterationType: 'loop',
+ iterationCurrent: 0,
+ iterationTotal: 1,
+ iterationContainerId: 'outer-loop',
+ },
+ ],
+ })
+ )
+ }
+
+ const tree = buildEntryTree(entries)
+ const outerLoop = tree.find((n) => n.entry.blockType === 'loop')!
+ const outerIter = outerLoop.children[0]
+ expect(outerIter.entry.durationMs).toBe(innerCount * innerDuration)
+ })
+
+ it('loop-in-parallel: each branch duration reflects its own loop wall-clock', () => {
+ const entries: ConsoleEntry[] = []
+ const base = Date.UTC(2025, 0, 1, 0, 0, 0)
+ const innerDuration = 1_500
+ const innerCount = 2
+ const branches = 3
+
+ for (let branch = 0; branch < branches; branch++) {
+ for (let inner = 0; inner < innerCount; inner++) {
+ const start = base + inner * innerDuration
+ entries.push(
+ makeEntry({
+ blockId: 'function-1',
+ executionOrder: branch * innerCount + inner + 1,
+ startedAt: new Date(start).toISOString(),
+ endedAt: new Date(start + innerDuration).toISOString(),
+ durationMs: innerDuration,
+ iterationType: 'loop',
+ iterationCurrent: inner,
+ iterationTotal: innerCount,
+ iterationContainerId: 'inner-loop',
+ parentIterations: [
+ {
+ iterationType: 'parallel',
+ iterationCurrent: branch,
+ iterationTotal: branches,
+ iterationContainerId: 'parallel-1',
+ },
+ ],
+ })
+ )
+ }
+ }
+
+ const tree = buildEntryTree(entries)
+ const parallelSubflow = tree.find((n) => n.entry.blockType === 'parallel')!
+ expect(parallelSubflow.children).toHaveLength(branches)
+ for (let branch = 0; branch < branches; branch++) {
+ const branchNode = parallelSubflow.children[branch]
+ expect(branchNode.entry.durationMs).toBe(innerCount * innerDuration)
+ }
+ expect(parallelSubflow.entry.durationMs).toBe(innerCount * innerDuration)
+ })
+
+ it('single-block iteration: duration equals the block durationMs', () => {
+ const start = Date.UTC(2025, 0, 1, 0, 0, 0)
+ const blockDuration = 3_141
+
+ const entries: ConsoleEntry[] = [
+ makeEntry({
+ blockId: 'function-1',
+ executionOrder: 1,
+ startedAt: new Date(start).toISOString(),
+ endedAt: new Date(start + blockDuration).toISOString(),
+ durationMs: blockDuration,
+ iterationType: 'loop',
+ iterationCurrent: 0,
+ iterationTotal: 1,
+ iterationContainerId: 'loop-1',
+ }),
+ ]
+
+ const tree = buildEntryTree(entries)
+ const loop = tree.find((n) => n.entry.blockType === 'loop')!
+ expect(loop.children[0].entry.durationMs).toBe(blockDuration)
+ expect(loop.entry.durationMs).toBe(blockDuration)
+ })
+
+ it('does not sum concurrent branch durations into iteration duration', () => {
+ const branches = 20
+ const branchDurationMs = 100_000
+ const start = Date.UTC(2025, 0, 1, 0, 0, 0)
+
+ const entries: ConsoleEntry[] = []
+ for (let branch = 0; branch < branches; branch++) {
+ const branchStart = start + branch * 5
+ entries.push(
+ makeEntry({
+ blockId: 'function-1',
+ executionOrder: branch + 1,
+ startedAt: new Date(branchStart).toISOString(),
+ endedAt: new Date(branchStart + branchDurationMs).toISOString(),
+ durationMs: branchDurationMs,
+ iterationType: 'parallel',
+ iterationCurrent: branch,
+ iterationTotal: branches,
+ iterationContainerId: 'parallel-1',
+ parentIterations: [
+ {
+ iterationType: 'loop',
+ iterationCurrent: 0,
+ iterationTotal: 1,
+ iterationContainerId: 'loop-1',
+ },
+ ],
+ })
+ )
+ }
+
+ const tree = buildEntryTree(entries)
+ const loopSubflow = tree.find((n) => n.entry.blockType === 'loop')!
+ const iteration = loopSubflow.children[0]
+
+ const wallClock = branchDurationMs + (branches - 1) * 5
+ expect(iteration.entry.durationMs).toBe(wallClock)
+ expect(iteration.entry.durationMs).toBeLessThan(branches * branchDurationMs)
+ })
+})
+
describe('flattenVisibleExecutionRows', () => {
it('only includes children for expanded nodes', () => {
const childBlock = makeEntry({
diff --git a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/terminal/utils.ts b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/terminal/utils.ts
index e4c450d9c7d..347a5ffbc2f 100644
--- a/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/terminal/utils.ts
+++ b/apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/terminal/utils.ts
@@ -393,9 +393,7 @@ export function buildEntryTree(entries: ConsoleEntry[], idPrefix = ''): EntryNod
const subflowEndMs = Math.max(
...allRelevantBlocks.map((b) => new Date(b.endedAt || b.timestamp).getTime())
)
- const totalDuration = allRelevantBlocks.reduce((sum, b) => sum + (b.durationMs || 0), 0)
- const subflowDuration =
- iterationType === 'parallel' ? subflowEndMs - subflowStartMs : totalDuration
+ const subflowDuration = subflowEndMs - subflowStartMs
const subflowExecutionOrder = Math.min(...allRelevantBlocks.map((b) => b.executionOrder))
const metadataSource = allRelevantBlocks[0]
@@ -449,9 +447,7 @@ export function buildEntryTree(entries: ConsoleEntry[], idPrefix = ''): EntryNod
const iterEndMs = Math.max(
...allIterEntries.map((b) => new Date(b.endedAt || b.timestamp).getTime())
)
- const iterDuration = allIterEntries.reduce((sum, b) => sum + (b.durationMs || 0), 0)
- const iterDisplayDuration =
- iterationType === 'parallel' ? iterEndMs - iterStartMs : iterDuration
+ const iterDisplayDuration = iterEndMs - iterStartMs
const iterExecutionOrder = Math.min(...allIterEntries.map((b) => b.executionOrder))
const iterMetadataSource = allIterEntries[0]
diff --git a/apps/sim/blocks/blocks/image_generator.ts b/apps/sim/blocks/blocks/image_generator.ts
index 6963cd604fd..69f94cb49e2 100644
--- a/apps/sim/blocks/blocks/image_generator.ts
+++ b/apps/sim/blocks/blocks/image_generator.ts
@@ -8,7 +8,7 @@ export const ImageGeneratorBlock: BlockConfig = {
description: 'Generate images',
authMode: AuthMode.ApiKey,
longDescription:
- 'Integrate Image Generator into the workflow. Can generate images using DALL-E 3 or GPT Image.',
+ 'Integrate Image Generator into the workflow. Can generate images using DALL-E 3, GPT Image 1, or GPT Image 2.',
docsLink: 'https://docs.sim.ai/tools/image_generator',
category: 'tools',
integrationType: IntegrationType.AI,
@@ -22,7 +22,8 @@ export const ImageGeneratorBlock: BlockConfig = {
type: 'dropdown',
options: [
{ label: 'DALL-E 3', id: 'dall-e-3' },
- { label: 'GPT Image', id: 'gpt-image-1' },
+ { label: 'GPT Image 1', id: 'gpt-image-1' },
+ { label: 'GPT Image 2', id: 'gpt-image-2' },
],
value: () => 'dall-e-3',
},
@@ -60,6 +61,22 @@ export const ImageGeneratorBlock: BlockConfig = {
condition: { field: 'model', value: 'gpt-image-1' },
dependsOn: ['model'],
},
+ {
+ id: 'size',
+ title: 'Size',
+ type: 'dropdown',
+ options: [
+ { label: 'Auto', id: 'auto' },
+ { label: 'Square (1024x1024)', id: '1024x1024' },
+ { label: 'Portrait (1024x1536)', id: '1024x1536' },
+ { label: 'Landscape (1536x1024)', id: '1536x1024' },
+ { label: '2K (2560x1440)', id: '2560x1440' },
+ { label: '4K (3840x2160)', id: '3840x2160' },
+ ],
+ value: () => 'auto',
+ condition: { field: 'model', value: 'gpt-image-2' },
+ dependsOn: ['model'],
+ },
{
id: 'quality',
title: 'Quality',
@@ -72,6 +89,20 @@ export const ImageGeneratorBlock: BlockConfig = {
condition: { field: 'model', value: 'dall-e-3' },
dependsOn: ['model'],
},
+ {
+ id: 'quality',
+ title: 'Quality',
+ type: 'dropdown',
+ options: [
+ { label: 'Auto', id: 'auto' },
+ { label: 'Low', id: 'low' },
+ { label: 'Medium', id: 'medium' },
+ { label: 'High', id: 'high' },
+ ],
+ value: () => 'auto',
+ condition: { field: 'model', value: ['gpt-image-1', 'gpt-image-2'] },
+ dependsOn: ['model'],
+ },
{
id: 'style',
title: 'Style',
@@ -97,6 +128,43 @@ export const ImageGeneratorBlock: BlockConfig = {
condition: { field: 'model', value: 'gpt-image-1' },
dependsOn: ['model'],
},
+ {
+ id: 'background',
+ title: 'Background',
+ type: 'dropdown',
+ options: [
+ { label: 'Auto', id: 'auto' },
+ { label: 'Opaque', id: 'opaque' },
+ ],
+ value: () => 'auto',
+ condition: { field: 'model', value: 'gpt-image-2' },
+ dependsOn: ['model'],
+ },
+ {
+ id: 'outputFormat',
+ title: 'Output Format',
+ type: 'dropdown',
+ options: [
+ { label: 'PNG', id: 'png' },
+ { label: 'JPEG', id: 'jpeg' },
+ { label: 'WebP', id: 'webp' },
+ ],
+ value: () => 'png',
+ condition: { field: 'model', value: ['gpt-image-1', 'gpt-image-2'] },
+ dependsOn: ['model'],
+ },
+ {
+ id: 'moderation',
+ title: 'Moderation',
+ type: 'dropdown',
+ options: [
+ { label: 'Auto', id: 'auto' },
+ { label: 'Low', id: 'low' },
+ ],
+ value: () => 'auto',
+ condition: { field: 'model', value: ['gpt-image-1', 'gpt-image-2'] },
+ dependsOn: ['model'],
+ },
{
id: 'apiKey',
title: 'API Key',
@@ -120,7 +188,25 @@ export const ImageGeneratorBlock: BlockConfig = {
}
const model = params.model || 'dall-e-3'
- const size = params.size || (model === 'gpt-image-1' ? 'auto' : '1024x1024')
+
+ const ALLOWED_SIZES: Record = {
+ 'dall-e-3': ['1024x1024', '1024x1792', '1792x1024'],
+ 'gpt-image-1': ['auto', '1024x1024', '1536x1024', '1024x1536'],
+ 'gpt-image-2': ['auto', '1024x1024', '1536x1024', '1024x1536', '2560x1440', '3840x2160'],
+ }
+ const ALLOWED_QUALITIES: Record = {
+ 'dall-e-3': ['standard', 'hd'],
+ 'gpt-image-1': ['auto', 'low', 'medium', 'high'],
+ 'gpt-image-2': ['auto', 'low', 'medium', 'high'],
+ }
+ const ALLOWED_BACKGROUNDS: Record = {
+ 'gpt-image-1': ['auto', 'transparent', 'opaque'],
+ 'gpt-image-2': ['auto', 'opaque'],
+ }
+
+ const defaultSize = model === 'dall-e-3' ? '1024x1024' : 'auto'
+ const size = ALLOWED_SIZES[model]?.includes(params.size) ? params.size : defaultSize
+
const baseParams = {
prompt: params.prompt,
model,
@@ -129,16 +215,25 @@ export const ImageGeneratorBlock: BlockConfig = {
}
if (model === 'dall-e-3') {
- return {
- ...baseParams,
- quality: params.quality || 'standard',
- style: params.style || 'vivid',
- }
+ const quality = ALLOWED_QUALITIES['dall-e-3'].includes(params.quality)
+ ? params.quality
+ : 'standard'
+ const style = ['vivid', 'natural'].includes(params.style) ? params.style : 'vivid'
+ return { ...baseParams, quality, style }
}
- if (model === 'gpt-image-1') {
+ if (model === 'gpt-image-1' || model === 'gpt-image-2') {
+ const quality = ALLOWED_QUALITIES[model].includes(params.quality)
+ ? params.quality
+ : undefined
+ const background = ALLOWED_BACKGROUNDS[model].includes(params.background)
+ ? params.background
+ : undefined
return {
...baseParams,
- ...(params.background && { background: params.background }),
+ ...(quality && { quality }),
+ ...(background && { background }),
+ ...(params.outputFormat && { outputFormat: params.outputFormat }),
+ ...(params.moderation && { moderation: params.moderation }),
}
}
@@ -153,6 +248,8 @@ export const ImageGeneratorBlock: BlockConfig = {
quality: { type: 'string', description: 'Image quality level' },
style: { type: 'string', description: 'Image style' },
background: { type: 'string', description: 'Background type' },
+ outputFormat: { type: 'string', description: 'Output image format (png, jpeg, webp)' },
+ moderation: { type: 'string', description: 'Moderation level (auto or low)' },
apiKey: { type: 'string', description: 'OpenAI API key' },
},
outputs: {
diff --git a/apps/sim/blocks/blocks/knowledge.ts b/apps/sim/blocks/blocks/knowledge.ts
index 3d17e9cb402..f8a92235b2e 100644
--- a/apps/sim/blocks/blocks/knowledge.ts
+++ b/apps/sim/blocks/blocks/knowledge.ts
@@ -1,6 +1,7 @@
import { PackageSearchIcon } from '@/components/icons'
import { DEFAULT_RERANKER_MODEL, SUPPORTED_RERANKER_MODELS } from '@/lib/knowledge/reranker-models'
import type { BlockConfig } from '@/blocks/types'
+import { getCohereRerankerApiKeyCondition } from '@/blocks/utils'
export const KnowledgeBlock: BlockConfig = {
type: 'knowledge',
@@ -105,6 +106,28 @@ export const KnowledgeBlock: BlockConfig = {
and: { field: 'rerankerEnabled', value: true },
},
},
+ {
+ id: 'rerankerInputCount',
+ title: 'Documents Sent to Reranker',
+ type: 'short-input',
+ placeholder: 'Auto (4× results, capped at 100)',
+ mode: 'advanced',
+ condition: {
+ field: 'operation',
+ value: 'search',
+ and: { field: 'rerankerEnabled', value: true },
+ },
+ },
+ {
+ id: 'apiKey',
+ title: 'Cohere API Key',
+ type: 'short-input',
+ placeholder: 'Enter your Cohere API key',
+ password: true,
+ connectionDroppable: false,
+ required: true,
+ condition: getCohereRerankerApiKeyCondition(),
+ },
// --- List Documents ---
{
@@ -419,6 +442,11 @@ export const KnowledgeBlock: BlockConfig = {
tagFilters: { type: 'string', description: 'Tag filter criteria' },
rerankerEnabled: { type: 'boolean', description: 'Apply Cohere reranking to search results' },
rerankerModel: { type: 'string', description: 'Cohere rerank model identifier' },
+ rerankerInputCount: {
+ type: 'number',
+ description: 'Number of vector results sent to the Cohere reranker (1–100)',
+ },
+ apiKey: { type: 'string', description: 'Cohere API key (self-hosted only)' },
documentTags: { type: 'string', description: 'Document tags' },
chunkSearch: { type: 'string', description: 'Search filter for chunks' },
chunkEnabledFilter: { type: 'string', description: 'Filter chunks by enabled status' },
diff --git a/apps/sim/blocks/blocks/logs.ts b/apps/sim/blocks/blocks/logs.ts
new file mode 100644
index 00000000000..d7665089f98
--- /dev/null
+++ b/apps/sim/blocks/blocks/logs.ts
@@ -0,0 +1,253 @@
+import { Library } from '@/components/emcn/icons'
+import type { BlockConfig } from '@/blocks/types'
+
+export const LogsBlock: BlockConfig = {
+ type: 'logs',
+ name: 'Logs',
+ description: 'Query workflow execution logs',
+ longDescription:
+ 'Search workflow execution logs in the current workspace, fetch a single log by id, or load full execution details with the per-block state snapshot.',
+ bgColor: '#EAB308',
+ bestPractices: `
+ - The block always operates on the current workspace; you cannot query other workspaces.
+ - 'Query Logs' returns summary rows. To get a full log entry (executionData, files), use 'Get Log by ID' on a row's id.
+ - Use 'Get Execution Details' (with an executionId) to inspect per-block state for a single run.
+ - Pagination is cursor-based: pass the previous response's nextCursor as Cursor to fetch the next page.
+ `,
+ icon: Library,
+ category: 'blocks',
+ docsLink: 'https://docs.sim.ai/api-reference/logs/getExecutionDetails',
+ subBlocks: [
+ {
+ id: 'operation',
+ title: 'Operation',
+ type: 'dropdown',
+ options: [
+ { label: 'Query Logs', id: 'query' },
+ { label: 'Get Log by ID', id: 'get_log' },
+ { label: 'Get Execution Details', id: 'get_execution' },
+ ],
+ placeholder: 'Select operation',
+ value: () => 'query',
+ },
+ {
+ id: 'workflowIds',
+ title: 'Workflow IDs',
+ type: 'short-input',
+ placeholder: 'Comma-separated workflow IDs',
+ condition: { field: 'operation', value: 'query' },
+ },
+ {
+ id: 'executionId',
+ title: 'Execution ID',
+ type: 'short-input',
+ placeholder: 'Filter by a single execution ID',
+ condition: { field: 'operation', value: 'query' },
+ },
+ {
+ id: 'level',
+ title: 'Level',
+ type: 'dropdown',
+ options: [
+ { label: 'All', id: 'all' },
+ { label: 'Info', id: 'info' },
+ { label: 'Error', id: 'error' },
+ { label: 'Running', id: 'running' },
+ { label: 'Pending', id: 'pending' },
+ ],
+ value: () => 'all',
+ condition: { field: 'operation', value: 'query' },
+ },
+ {
+ id: 'triggers',
+ title: 'Triggers',
+ type: 'short-input',
+ placeholder: 'api,webhook,schedule,manual,chat,mothership',
+ condition: { field: 'operation', value: 'query' },
+ },
+ {
+ id: 'limit',
+ title: 'Limit',
+ type: 'short-input',
+ placeholder: '100 (max 200)',
+ condition: { field: 'operation', value: 'query' },
+ },
+ {
+ id: 'startDate',
+ title: 'Start Date',
+ type: 'short-input',
+ placeholder: 'ISO 8601 timestamp',
+ mode: 'advanced',
+ wandConfig: {
+ enabled: true,
+ prompt:
+ 'Generate an ISO 8601 timestamp from the user description. Return ONLY the timestamp string.',
+ generationType: 'timestamp',
+ },
+ condition: { field: 'operation', value: 'query' },
+ },
+ {
+ id: 'endDate',
+ title: 'End Date',
+ type: 'short-input',
+ placeholder: 'ISO 8601 timestamp',
+ mode: 'advanced',
+ wandConfig: {
+ enabled: true,
+ prompt:
+ 'Generate an ISO 8601 timestamp from the user description. Return ONLY the timestamp string.',
+ generationType: 'timestamp',
+ },
+ condition: { field: 'operation', value: 'query' },
+ },
+ {
+ id: 'search',
+ title: 'Search',
+ type: 'short-input',
+ placeholder: 'Free-text search',
+ mode: 'advanced',
+ condition: { field: 'operation', value: 'query' },
+ },
+ {
+ id: 'sortBy',
+ title: 'Sort By',
+ type: 'dropdown',
+ options: [
+ { label: 'Date', id: 'date' },
+ { label: 'Duration', id: 'duration' },
+ { label: 'Cost', id: 'cost' },
+ { label: 'Status', id: 'status' },
+ ],
+ value: () => 'date',
+ mode: 'advanced',
+ condition: { field: 'operation', value: 'query' },
+ },
+ {
+ id: 'sortOrder',
+ title: 'Sort Order',
+ type: 'dropdown',
+ options: [
+ { label: 'Descending', id: 'desc' },
+ { label: 'Ascending', id: 'asc' },
+ ],
+ value: () => 'desc',
+ mode: 'advanced',
+ condition: { field: 'operation', value: 'query' },
+ },
+ {
+ id: 'cursor',
+ title: 'Cursor',
+ type: 'short-input',
+ placeholder: 'nextCursor from a previous response',
+ mode: 'advanced',
+ condition: { field: 'operation', value: 'query' },
+ },
+ {
+ id: 'logId',
+ title: 'Log ID',
+ type: 'short-input',
+ placeholder: 'Log entry ID',
+ condition: { field: 'operation', value: 'get_log' },
+ required: true,
+ },
+ {
+ id: 'executionIdLookup',
+ title: 'Execution ID',
+ type: 'short-input',
+ placeholder: 'Execution ID',
+ condition: { field: 'operation', value: 'get_execution' },
+ required: true,
+ },
+ ],
+ tools: {
+ access: ['logs_query', 'logs_get', 'logs_get_execution'],
+ config: {
+ tool: (params: Record) => {
+ const operation = params.operation || 'query'
+ if (operation === 'get_log') return 'logs_get'
+ if (operation === 'get_execution') return 'logs_get_execution'
+ return 'logs_query'
+ },
+ params: (params: Record) => {
+ const operation = params.operation || 'query'
+
+ if (operation === 'get_log') {
+ if (!params.logId) {
+ throw new Error('Logs Block Error: Log ID is required for get_log operation')
+ }
+ return { id: params.logId }
+ }
+
+ if (operation === 'get_execution') {
+ if (!params.executionIdLookup) {
+ throw new Error(
+ 'Logs Block Error: Execution ID is required for get_execution operation'
+ )
+ }
+ return { executionId: params.executionIdLookup }
+ }
+
+ const rawLimit =
+ params.limit !== undefined && params.limit !== null && params.limit !== ''
+ ? Number(params.limit)
+ : undefined
+ const limit = Number.isFinite(rawLimit) ? rawLimit : undefined
+
+ return {
+ workflowIds: params.workflowIds || undefined,
+ executionId: params.executionId || undefined,
+ level: params.level && params.level !== 'all' ? params.level : undefined,
+ triggers: params.triggers || undefined,
+ limit,
+ startDate: params.startDate || undefined,
+ endDate: params.endDate || undefined,
+ search: params.search || undefined,
+ cursor: params.cursor || undefined,
+ sortBy: params.sortBy || undefined,
+ sortOrder: params.sortOrder || undefined,
+ }
+ },
+ },
+ },
+ inputs: {
+ operation: { type: 'string', description: 'Operation to perform' },
+ workflowIds: { type: 'string', description: 'Comma-separated workflow IDs' },
+ executionId: { type: 'string', description: 'Execution ID filter (query operation)' },
+ level: { type: 'string', description: 'Log level filter' },
+ triggers: { type: 'string', description: 'Comma-separated triggers' },
+ limit: { type: 'number', description: 'Max logs to return (default 100, max 200)' },
+ startDate: { type: 'string', description: 'ISO 8601 lower bound' },
+ endDate: { type: 'string', description: 'ISO 8601 upper bound' },
+ search: { type: 'string', description: 'Free-text search term' },
+ sortBy: { type: 'string', description: "'date' | 'duration' | 'cost' | 'status'" },
+ sortOrder: { type: 'string', description: "'desc' | 'asc'" },
+ cursor: { type: 'string', description: 'Pagination cursor' },
+ logId: { type: 'string', description: 'Log entry ID (get_log operation)' },
+ executionIdLookup: {
+ type: 'string',
+ description: 'Execution ID (get_execution operation)',
+ },
+ },
+ outputs: {
+ logs: { type: 'json', description: 'Array of log summary entries (query operation)' },
+ nextCursor: {
+ type: 'string',
+ description: 'Cursor for next page; null when no more results (query operation)',
+ },
+ log: { type: 'json', description: 'Full log entry (get_log operation)' },
+ executionId: { type: 'string', description: 'Execution ID (get_execution operation)' },
+ workflowId: { type: 'string', description: 'Workflow ID (get_execution operation)' },
+ workflowState: {
+ type: 'json',
+ description: 'Per-block state snapshot (get_execution operation)',
+ },
+ childWorkflowSnapshots: {
+ type: 'json',
+ description: 'Snapshots for child workflows (get_execution operation)',
+ },
+ executionMetadata: {
+ type: 'json',
+ description: 'Trigger, timestamps, totalDurationMs, cost (get_execution operation)',
+ },
+ },
+}
diff --git a/apps/sim/blocks/registry.ts b/apps/sim/blocks/registry.ts
index e7ca943af3c..aacf6d49431 100644
--- a/apps/sim/blocks/registry.ts
+++ b/apps/sim/blocks/registry.ts
@@ -113,6 +113,7 @@ import { LemlistBlock } from '@/blocks/blocks/lemlist'
import { LinearBlock, LinearV2Block } from '@/blocks/blocks/linear'
import { LinkedInBlock } from '@/blocks/blocks/linkedin'
import { LinkupBlock } from '@/blocks/blocks/linkup'
+import { LogsBlock } from '@/blocks/blocks/logs'
import { LoopsBlock } from '@/blocks/blocks/loops'
import { LumaBlock } from '@/blocks/blocks/luma'
import { MailchimpBlock } from '@/blocks/blocks/mailchimp'
@@ -361,6 +362,7 @@ export const registry: Record = {
linear_v2: LinearV2Block,
linkedin: LinkedInBlock,
linkup: LinkupBlock,
+ logs: LogsBlock,
loops: LoopsBlock,
luma: LumaBlock,
mailchimp: MailchimpBlock,
diff --git a/apps/sim/blocks/utils.ts b/apps/sim/blocks/utils.ts
index b70ca7af504..c22596b34cd 100644
--- a/apps/sim/blocks/utils.ts
+++ b/apps/sim/blocks/utils.ts
@@ -1,5 +1,10 @@
import { toError } from '@sim/utils/errors'
-import { isAzureConfigured, isHosted, isOllamaConfigured } from '@/lib/core/config/feature-flags'
+import {
+ isAzureConfigured,
+ isCohereConfigured,
+ isHosted,
+ isOllamaConfigured,
+} from '@/lib/core/config/feature-flags'
import { getScopesForService } from '@/lib/oauth/utils'
import { buildCanonicalIndex } from '@/lib/workflows/subblocks/visibility'
import type { BlockOutput, OutputFieldDefinition, SubBlockConfig } from '@/blocks/types'
@@ -184,6 +189,27 @@ export function getApiKeyCondition() {
}
}
+/**
+ * Visibility condition for the Cohere reranker API key field on the Knowledge block.
+ * Hidden on hosted Sim (platform supplies the key via workspace BYOK or rotating env keys)
+ * and on self-hosted deployments that have set `NEXT_PUBLIC_COHERE_CONFIGURED=true` to
+ * indicate `COHERE_API_KEY` is pre-configured server-side. Otherwise shown (and required)
+ * whenever reranking is enabled for a search operation, mirroring the agent block's
+ * `getApiKeyCondition` pattern.
+ */
+export function getCohereRerankerApiKeyCondition() {
+ return () => {
+ if (isHosted || isCohereConfigured) {
+ return { field: 'operation', value: '__never_show__' }
+ }
+ return {
+ field: 'operation',
+ value: 'search',
+ and: { field: 'rerankerEnabled', value: true },
+ }
+ }
+}
+
/**
* Returns the standard provider credential subblocks used by LLM-based blocks.
* This includes: Vertex AI OAuth, API Key, Azure (OpenAI + Anthropic), Vertex AI config, and Bedrock config.
diff --git a/apps/sim/components/emcn/components/code/copy-code-button.tsx b/apps/sim/components/emcn/components/code/copy-code-button.tsx
index 5ef81dfb8a3..93ace78bf57 100644
--- a/apps/sim/components/emcn/components/code/copy-code-button.tsx
+++ b/apps/sim/components/emcn/components/code/copy-code-button.tsx
@@ -1,8 +1,8 @@
'use client'
-import { useCallback, useEffect, useRef, useState } from 'react'
import { Button, Check, Copy } from '@/components/emcn'
import { cn } from '@/lib/core/utils/cn'
+import { useCopyToClipboard } from '@/hooks/use-copy-to-clipboard'
interface CopyCodeButtonProps {
code: string
@@ -10,33 +10,16 @@ interface CopyCodeButtonProps {
}
export function CopyCodeButton({ code, className }: CopyCodeButtonProps) {
- const [copied, setCopied] = useState(false)
- const timerRef = useRef | null>(null)
-
- const handleCopy = useCallback(async () => {
- try {
- await navigator.clipboard.writeText(code)
- setCopied(true)
- if (timerRef.current) clearTimeout(timerRef.current)
- timerRef.current = setTimeout(() => setCopied(false), 2000)
- } catch {}
- }, [code])
-
- useEffect(
- () => () => {
- if (timerRef.current) clearTimeout(timerRef.current)
- },
- []
- )
+ const { copied, copy } = useCopyToClipboard()
return (
)
}
diff --git a/apps/sim/components/emcn/components/index.ts b/apps/sim/components/emcn/components/index.ts
index 15b6cefd77f..0f30eeb09ac 100644
--- a/apps/sim/components/emcn/components/index.ts
+++ b/apps/sim/components/emcn/components/index.ts
@@ -126,6 +126,7 @@ export {
SModalTrigger,
} from './s-modal/s-modal'
export { SecretInput, type SecretInputProps } from './secret-input/secret-input'
+export { SecretReveal, type SecretRevealProps } from './secret-reveal/secret-reveal'
export { Skeleton } from './skeleton/skeleton'
export { Slider, type SliderProps } from './slider/slider'
export { Switch } from './switch/switch'
diff --git a/apps/sim/components/emcn/components/secret-reveal/secret-reveal.tsx b/apps/sim/components/emcn/components/secret-reveal/secret-reveal.tsx
new file mode 100644
index 00000000000..1357ebfc3aa
--- /dev/null
+++ b/apps/sim/components/emcn/components/secret-reveal/secret-reveal.tsx
@@ -0,0 +1,77 @@
+/**
+ * A read-only display for a one-time secret reveal: the value renders inside
+ * a bordered code box with a copy button, or as masked dots when redacted.
+ *
+ * @remarks
+ * Use for surfaces that show a freshly-generated credential (API key, signing
+ * secret, etc.) once and then need to fall back to a redacted state on
+ * subsequent renders. Pair with `redacted` (or simply omit `value`) to render
+ * the masked state without a copy affordance.
+ *
+ * @example
+ * ```tsx
+ * import { SecretReveal } from '@/components/emcn'
+ *
+ *
+ *
+ * ```
+ */
+'use client'
+
+import { Button, Check, Copy } from '@/components/emcn'
+import { cn } from '@/lib/core/utils/cn'
+import { useCopyToClipboard } from '@/hooks/use-copy-to-clipboard'
+
+const REDACTED_DOTS = '••••••••••••••••••••••••••••••••'
+
+export interface SecretRevealProps {
+ /** Secret value to display. When absent or `redacted` is true, renders masked dots. */
+ value?: string
+ /** Force the masked state even when `value` is provided. */
+ redacted?: boolean
+ className?: string
+}
+
+export function SecretReveal({ value, className, redacted = false }: SecretRevealProps) {
+ const { copied, copy } = useCopyToClipboard()
+ const isHidden = redacted || !value
+
+ const handleCopy = () => {
+ if (isHidden || !value) return
+ copy(value)
+ }
+
+ return (
+
+
+
+ {isHidden ? REDACTED_DOTS : value}
+
+
+ {!isHidden && (
+
+ )}
+
+ )
+}
diff --git a/apps/sim/executor/execution/block-executor.ts b/apps/sim/executor/execution/block-executor.ts
index 340b2aab01a..9a3c22e8529 100644
--- a/apps/sim/executor/execution/block-executor.ts
+++ b/apps/sim/executor/execution/block-executor.ts
@@ -187,7 +187,8 @@ export class BlockExecutor {
}
}
- this.state.setBlockOutput(node.id, normalizedOutput, duration)
+ const { childTraceSpans: _traces, ...outputForState } = normalizedOutput
+ this.state.setBlockOutput(node.id, outputForState as NormalizedBlockOutput, duration)
if (!isSentinel && blockLog) {
const childWorkflowInstanceId =
@@ -211,7 +212,7 @@ export class BlockExecutor {
)
}
- return normalizedOutput
+ return outputForState as NormalizedBlockOutput
} catch (error) {
return await this.handleBlockError(
error,
@@ -270,7 +271,6 @@ export class BlockExecutor {
}
if (ChildWorkflowError.isChildWorkflowError(error)) {
- errorOutput.childTraceSpans = error.childTraceSpans
errorOutput.childWorkflowName = error.childWorkflowName
if (error.childWorkflowSnapshotId) {
errorOutput.childWorkflowSnapshotId = error.childWorkflowSnapshotId
@@ -287,8 +287,8 @@ export class BlockExecutor {
blockLog.input = this.sanitizeInputsForLog(input)
blockLog.output = filterOutputForLog(block.metadata?.id || '', errorOutput, { block })
- if (errorOutput.childTraceSpans && Array.isArray(errorOutput.childTraceSpans)) {
- blockLog.childTraceSpans = errorOutput.childTraceSpans
+ if (ChildWorkflowError.isChildWorkflowError(error) && error.childTraceSpans.length > 0) {
+ blockLog.childTraceSpans = error.childTraceSpans
}
}
diff --git a/apps/sim/hooks/queries/logs.ts b/apps/sim/hooks/queries/logs.ts
index bd5b0e5e695..00b1aac4985 100644
--- a/apps/sim/hooks/queries/logs.ts
+++ b/apps/sim/hooks/queries/logs.ts
@@ -7,6 +7,7 @@ import {
useQuery,
useQueryClient,
} from '@tanstack/react-query'
+import { isApiClientError } from '@/lib/api/client/errors'
import { requestJson } from '@/lib/api/client/request'
import {
cancelWorkflowExecutionContract,
@@ -14,22 +15,27 @@ import {
type ExecutionSnapshotData,
getDashboardStatsContract,
getExecutionSnapshotContract,
+ getLogByExecutionIdContract,
getLogDetailContract,
listLogsContract,
type SegmentStats,
- type WorkflowLogData,
+ type WorkflowLogDetail,
+ type WorkflowLogSummary,
type WorkflowStats,
} from '@/lib/api/contracts/logs'
import { getEndDateFromTimeRange, getStartDateFromTimeRange } from '@/lib/logs/filters'
import { parseQuery, queryToApiParams } from '@/lib/logs/query-parser'
-import type { TimeRange, WorkflowLog } from '@/stores/logs/filters/types'
+import type { TimeRange } from '@/stores/logs/filters/types'
export type { DashboardStatsResponse, SegmentStats, WorkflowStats }
+export type LogSortBy = 'date' | 'duration' | 'cost' | 'status'
+export type LogSortOrder = 'asc' | 'desc'
+
export const logKeys = {
all: ['logs'] as const,
lists: () => [...logKeys.all, 'list'] as const,
- list: (workspaceId: string | undefined, filters: Omit) =>
+ list: (workspaceId: string | undefined, filters: LogFilters) =>
[...logKeys.lists(), workspaceId ?? '', filters] as const,
details: () => [...logKeys.all, 'detail'] as const,
detail: (logId: string | undefined) => [...logKeys.details(), logId ?? ''] as const,
@@ -44,7 +50,7 @@ export const logKeys = {
[...logKeys.executionSnapshots(), executionId ?? ''] as const,
}
-interface LogFilters {
+export interface LogFilters {
timeRange: TimeRange
startDate?: string
endDate?: string
@@ -54,15 +60,14 @@ interface LogFilters {
triggers: string[]
searchQuery: string
limit: number
+ sortBy: LogSortBy
+ sortOrder: LogSortOrder
}
-const toWorkflowLog = (log: WorkflowLogData): WorkflowLog => log as WorkflowLog
-
-/**
- * Applies common filter parameters to a URLSearchParams object.
- * Shared between paginated and non-paginated log fetches.
- */
-function applyFilterParams(params: URLSearchParams, filters: Omit): void {
+function applyFilterParams(
+ params: URLSearchParams,
+ filters: Omit
+): void {
if (filters.level !== 'all') {
params.set('level', filters.level)
}
@@ -99,61 +104,53 @@ function applyFilterParams(params: URLSearchParams, filters: Omit {
+): Promise {
const apiData = await requestJson(listLogsContract, {
- query: buildQueryParams(workspaceId, filters, page),
+ query: buildListQuery(workspaceId, filters, cursor),
signal,
})
- const hasMore = apiData.data.length === filters.limit && apiData.page < apiData.totalPages
return {
- logs: apiData.data.map(toWorkflowLog),
- hasMore,
- nextPage: hasMore ? page + 1 : undefined,
+ logs: apiData.data,
+ nextCursor: apiData.nextCursor,
}
}
-export async function fetchLogDetail(logId: string, signal?: AbortSignal): Promise {
- const { data } = await requestJson(getLogDetailContract, {
- params: { id: logId },
- signal,
- })
- return toWorkflowLog(data)
-}
-
-async function fetchLogByExecutionId(
+export async function fetchLogDetail(
+ logId: string,
workspaceId: string,
- executionId: string,
signal?: AbortSignal
-): Promise {
- const apiData = await requestJson(listLogsContract, {
- query: {
- workspaceId,
- executionId,
- details: 'full',
- limit: 1,
- },
+): Promise {
+ const { data } = await requestJson(getLogDetailContract, {
+ params: { id: logId },
+ query: { workspaceId },
signal,
})
- return apiData.data?.[0] ? toWorkflowLog(apiData.data[0]) : null
+ return data
}
interface UseLogsListOptions {
@@ -172,10 +169,10 @@ export function useLogsList(
fetchLogsPage(workspaceId as string, filters, pageParam, signal),
enabled: Boolean(workspaceId) && (options?.enabled ?? true),
refetchInterval: options?.refetchInterval ?? false,
- staleTime: 0,
+ staleTime: 30 * 1000,
placeholderData: keepPreviousData,
- initialPageParam: 1,
- getNextPageParam: (lastPage) => lastPage.nextPage,
+ initialPageParam: null as string | null,
+ getNextPageParam: (lastPage) => lastPage.nextCursor,
})
}
@@ -184,55 +181,57 @@ interface UseLogDetailOptions {
refetchInterval?:
| number
| false
- | ((query: { state: { data?: WorkflowLog } }) => number | false | undefined)
+ | ((query: { state: { data?: WorkflowLogDetail } }) => number | false | undefined)
}
-export function useLogDetail(logId: string | undefined, options?: UseLogDetailOptions) {
+export function useLogDetail(
+ logId: string | undefined,
+ workspaceId: string | undefined,
+ options?: UseLogDetailOptions
+) {
return useQuery({
queryKey: logKeys.detail(logId),
- queryFn: ({ signal }) => fetchLogDetail(logId as string, signal),
- enabled: Boolean(logId) && (options?.enabled ?? true),
+ queryFn: ({ signal }) => fetchLogDetail(logId as string, workspaceId as string, signal),
+ enabled: Boolean(logId) && Boolean(workspaceId) && (options?.enabled ?? true),
refetchInterval: options?.refetchInterval ?? false,
staleTime: 30 * 1000,
+ retry: (failureCount, err) =>
+ !(isApiClientError(err) && err.status === 404) && failureCount < 3,
})
}
-/**
- * Looks up a workflow log by its `executionId` (the id stored on table workflow cells).
- * Returns the full log shape so the LogDetails sidebar can render directly without
- * an extra detail fetch.
- */
export function useLogByExecutionId(
workspaceId: string | undefined,
executionId: string | null | undefined
) {
+ const queryClient = useQueryClient()
return useQuery({
queryKey: logKeys.byExecution(workspaceId, executionId ?? undefined),
- queryFn: ({ signal }) =>
- fetchLogByExecutionId(workspaceId as string, executionId as string, signal),
+ queryFn: async ({ signal }) => {
+ const { data } = await requestJson(getLogByExecutionIdContract, {
+ params: { executionId: executionId as string },
+ query: { workspaceId: workspaceId as string },
+ signal,
+ })
+ queryClient.setQueryData(logKeys.detail(data.id), data)
+ return data
+ },
enabled: Boolean(workspaceId) && Boolean(executionId),
staleTime: 30 * 1000,
})
}
-/**
- * Prefetches log detail data on hover for instant panel rendering on click.
- */
-export function prefetchLogDetail(queryClient: QueryClient, logId: string) {
+export function prefetchLogDetail(queryClient: QueryClient, logId: string, workspaceId: string) {
queryClient.prefetchQuery({
queryKey: logKeys.detail(logId),
- queryFn: ({ signal }) => fetchLogDetail(logId, signal),
+ queryFn: ({ signal }) => fetchLogDetail(logId, workspaceId, signal),
staleTime: 30 * 1000,
})
}
-/**
- * Fetches dashboard stats from the server-side aggregation endpoint.
- * Uses SQL aggregation for efficient computation without arbitrary limits.
- */
async function fetchDashboardStats(
workspaceId: string,
- filters: Omit,
+ filters: Omit,
signal?: AbortSignal
): Promise {
const params = new URLSearchParams()
@@ -252,13 +251,9 @@ interface UseDashboardStatsOptions {
refetchInterval?: number | false
}
-/**
- * Hook for fetching dashboard stats using server-side aggregation.
- * No arbitrary limits - uses SQL aggregation for accurate metrics.
- */
export function useDashboardStats(
workspaceId: string | undefined,
- filters: Omit,
+ filters: Omit,
options?: UseDashboardStatsOptions
) {
return useQuery({
@@ -266,7 +261,7 @@ export function useDashboardStats(
queryFn: ({ signal }) => fetchDashboardStats(workspaceId as string, filters, signal),
enabled: Boolean(workspaceId) && (options?.enabled ?? true),
refetchInterval: options?.refetchInterval ?? false,
- staleTime: 0,
+ staleTime: 30 * 1000,
placeholderData: keepPreviousData,
})
}
@@ -293,12 +288,10 @@ export function useExecutionSnapshot(executionId: string | undefined) {
queryKey: logKeys.executionSnapshot(executionId),
queryFn: ({ signal }) => fetchExecutionSnapshot(executionId as string, signal),
enabled: Boolean(executionId),
- staleTime: 5 * 60 * 1000, // 5 minutes - execution snapshots don't change
+ staleTime: 5 * 60 * 1000,
})
}
-type LogsPage = { logs: WorkflowLog[]; hasMore: boolean; nextPage: number | undefined }
-
export function useCancelExecution() {
const queryClient = useQueryClient()
return useMutation({
@@ -322,29 +315,47 @@ export function useCancelExecution() {
queryKey: logKeys.lists(),
})
+ let affectedLogId: string | null = null
queryClient.setQueriesData>({ queryKey: logKeys.lists() }, (old) => {
if (!old) return old
return {
...old,
pages: old.pages.map((page) => ({
...page,
- logs: page.logs.map((log) =>
- log.executionId === executionId ? { ...log, status: 'cancelling' } : log
- ),
+ logs: page.logs.map((log) => {
+ if (log.executionId !== executionId) return log
+ affectedLogId = log.id
+ return { ...log, status: 'cancelling' }
+ }),
})),
}
})
- return { previousQueries }
+ let previousDetail: WorkflowLogDetail | undefined
+ if (affectedLogId) {
+ previousDetail = queryClient.getQueryData(logKeys.detail(affectedLogId))
+ if (previousDetail) {
+ queryClient.setQueryData(logKeys.detail(affectedLogId), {
+ ...previousDetail,
+ status: 'cancelling',
+ })
+ }
+ }
+
+ return { previousQueries, affectedLogId, previousDetail }
},
onError: (_err, _variables, context) => {
for (const [queryKey, data] of context?.previousQueries ?? []) {
queryClient.setQueryData(queryKey, data)
}
+ if (context?.affectedLogId && context.previousDetail !== undefined) {
+ queryClient.setQueryData(logKeys.detail(context.affectedLogId), context.previousDetail)
+ }
},
onSettled: () => {
queryClient.invalidateQueries({ queryKey: logKeys.lists() })
queryClient.invalidateQueries({ queryKey: logKeys.details() })
+ queryClient.invalidateQueries({ queryKey: logKeys.byExecutionAll() })
queryClient.invalidateQueries({ queryKey: logKeys.stats() })
},
})
@@ -364,9 +375,6 @@ export function useRetryExecution() {
const data = await res.json().catch(() => ({}))
throw new Error(data.error || 'Failed to retry execution')
}
- // The ReadableStream is lazy — start() only runs when read.
- // Read one chunk to trigger execution, then cancel. Execution continues
- // server-side after client disconnect.
const reader = res.body?.getReader()
if (reader) {
await reader.read()
@@ -377,6 +385,7 @@ export function useRetryExecution() {
onSettled: () => {
queryClient.invalidateQueries({ queryKey: logKeys.lists() })
queryClient.invalidateQueries({ queryKey: logKeys.details() })
+ queryClient.invalidateQueries({ queryKey: logKeys.byExecutionAll() })
queryClient.invalidateQueries({ queryKey: logKeys.stats() })
},
})
diff --git a/apps/sim/hooks/use-copy-to-clipboard.ts b/apps/sim/hooks/use-copy-to-clipboard.ts
new file mode 100644
index 00000000000..751a94cf76c
--- /dev/null
+++ b/apps/sim/hooks/use-copy-to-clipboard.ts
@@ -0,0 +1,59 @@
+'use client'
+
+import { useCallback, useEffect, useRef, useState } from 'react'
+
+interface UseCopyToClipboardOptions {
+ /** How long the `copied` flag stays true before resetting. Defaults to 2000ms. */
+ resetMs?: number
+}
+
+interface UseCopyToClipboardReturn {
+ copied: boolean
+ copy: (text: string) => Promise
+}
+
+/**
+ * Copy text to the clipboard with a transient `copied` flag for swap-icon
+ * feedback (e.g. Copy → Check for ~2s).
+ *
+ * Replaces the `[copied, setCopied] + setTimeout` boilerplate that's been
+ * duplicated across ~30 callsites. Each `copy()` call resets the timer so
+ * back-to-back copies don't stack timeouts; the timer is cleared on unmount.
+ *
+ * @example
+ * const { copied, copy } = useCopyToClipboard()
+ *
+ */
+export function useCopyToClipboard(
+ options: UseCopyToClipboardOptions = {}
+): UseCopyToClipboardReturn {
+ const { resetMs = 2000 } = options
+ const [copied, setCopied] = useState(false)
+ const timerRef = useRef | null>(null)
+
+ const copy = useCallback(
+ async (text: string): Promise => {
+ try {
+ await navigator.clipboard.writeText(text)
+ setCopied(true)
+ if (timerRef.current) clearTimeout(timerRef.current)
+ timerRef.current = setTimeout(() => setCopied(false), resetMs)
+ return true
+ } catch {
+ return false
+ }
+ },
+ [resetMs]
+ )
+
+ useEffect(
+ () => () => {
+ if (timerRef.current) clearTimeout(timerRef.current)
+ },
+ []
+ )
+
+ return { copied, copy }
+}
diff --git a/apps/sim/hooks/use-task-events.test.ts b/apps/sim/hooks/use-task-events.test.ts
index 2e68175b935..e81edbca6dd 100644
--- a/apps/sim/hooks/use-task-events.test.ts
+++ b/apps/sim/hooks/use-task-events.test.ts
@@ -9,14 +9,46 @@ import { handleTaskStatusEvent } from '@/hooks/use-task-events'
describe('handleTaskStatusEvent', () => {
const queryClient = {
+ getQueryData: vi.fn(),
invalidateQueries: vi.fn().mockResolvedValue(undefined),
- } satisfies Pick
+ removeQueries: vi.fn(),
+ } satisfies Pick
beforeEach(() => {
vi.clearAllMocks()
+ queryClient.getQueryData.mockReturnValue(undefined)
})
- it('invalidates only the task list for completed task events', () => {
+ it('invalidates the task list and detail for completed task events', () => {
+ handleTaskStatusEvent(
+ queryClient,
+ 'ws-1',
+ JSON.stringify({
+ chatId: 'chat-1',
+ type: 'completed',
+ timestamp: Date.now(),
+ })
+ )
+
+ expect(queryClient.invalidateQueries).toHaveBeenCalledTimes(2)
+ expect(queryClient.invalidateQueries).toHaveBeenCalledWith({
+ queryKey: taskKeys.list('ws-1'),
+ })
+ expect(queryClient.invalidateQueries).toHaveBeenCalledWith({
+ queryKey: taskKeys.detail('chat-1'),
+ })
+ expect(queryClient.removeQueries).not.toHaveBeenCalled()
+ })
+
+ it('keeps completed task detail when an unkeyed completion races an active stream', () => {
+ queryClient.getQueryData.mockReturnValue({
+ id: 'chat-1',
+ title: null,
+ messages: [{ id: 'new-stream' }, { id: 'live-assistant:new-stream' }],
+ activeStreamId: 'new-stream',
+ resources: [],
+ })
+
handleTaskStatusEvent(
queryClient,
'ws-1',
@@ -31,15 +63,333 @@ describe('handleTaskStatusEvent', () => {
expect(queryClient.invalidateQueries).toHaveBeenCalledWith({
queryKey: taskKeys.list('ws-1'),
})
+ expect(queryClient.removeQueries).not.toHaveBeenCalled()
})
- it('keeps list invalidation only for non-completed task events', () => {
+ it('keeps completed task detail when a newer optimistic stream is active', () => {
+ queryClient.getQueryData.mockReturnValue({
+ id: 'chat-1',
+ title: null,
+ messages: [{ id: 'old-stream' }, { id: 'new-stream' }],
+ activeStreamId: 'new-stream',
+ resources: [],
+ })
+
+ handleTaskStatusEvent(
+ queryClient,
+ 'ws-1',
+ JSON.stringify({
+ chatId: 'chat-1',
+ type: 'completed',
+ streamId: 'old-stream',
+ timestamp: Date.now(),
+ })
+ )
+
+ expect(queryClient.invalidateQueries).toHaveBeenCalledTimes(1)
+ expect(queryClient.invalidateQueries).toHaveBeenCalledWith({
+ queryKey: taskKeys.list('ws-1'),
+ })
+ expect(queryClient.removeQueries).not.toHaveBeenCalled()
+ })
+
+ it('keeps completed task detail when only a newer optimistic stream is cached', () => {
+ queryClient.getQueryData.mockReturnValue({
+ id: 'chat-1',
+ title: null,
+ messages: [{ id: 'new-stream' }, { id: 'live-assistant:new-stream' }],
+ activeStreamId: 'new-stream',
+ resources: [],
+ })
+
+ handleTaskStatusEvent(
+ queryClient,
+ 'ws-1',
+ JSON.stringify({
+ chatId: 'chat-1',
+ type: 'completed',
+ streamId: 'old-stream',
+ timestamp: Date.now(),
+ })
+ )
+
+ expect(queryClient.invalidateQueries).toHaveBeenCalledTimes(1)
+ expect(queryClient.invalidateQueries).toHaveBeenCalledWith({
+ queryKey: taskKeys.list('ws-1'),
+ })
+ expect(queryClient.removeQueries).not.toHaveBeenCalled()
+ })
+
+ it('invalidates completed task detail when the active stream disagreement is only stale cache', () => {
+ queryClient.getQueryData.mockReturnValue({
+ id: 'chat-1',
+ title: null,
+ messages: [{ id: 'new-stream' }, { id: 'old-stream' }],
+ activeStreamId: 'new-stream',
+ resources: [],
+ })
+
+ handleTaskStatusEvent(
+ queryClient,
+ 'ws-1',
+ JSON.stringify({
+ chatId: 'chat-1',
+ type: 'completed',
+ streamId: 'old-stream',
+ timestamp: Date.now(),
+ })
+ )
+
+ expect(queryClient.invalidateQueries).toHaveBeenCalledTimes(2)
+ expect(queryClient.invalidateQueries).toHaveBeenCalledWith({
+ queryKey: taskKeys.list('ws-1'),
+ })
+ expect(queryClient.invalidateQueries).toHaveBeenCalledWith({
+ queryKey: taskKeys.detail('chat-1'),
+ })
+ expect(queryClient.removeQueries).not.toHaveBeenCalled()
+ })
+
+ it('invalidates completed task detail when a missing stream may be newer server state', () => {
+ queryClient.getQueryData.mockReturnValue({
+ id: 'chat-1',
+ title: null,
+ messages: [{ id: 'old-stream' }],
+ activeStreamId: 'old-stream',
+ resources: [],
+ })
+
+ handleTaskStatusEvent(
+ queryClient,
+ 'ws-1',
+ JSON.stringify({
+ chatId: 'chat-1',
+ type: 'completed',
+ streamId: 'new-stream',
+ timestamp: Date.now(),
+ })
+ )
+
+ expect(queryClient.invalidateQueries).toHaveBeenCalledTimes(2)
+ expect(queryClient.invalidateQueries).toHaveBeenCalledWith({
+ queryKey: taskKeys.list('ws-1'),
+ })
+ expect(queryClient.invalidateQueries).toHaveBeenCalledWith({
+ queryKey: taskKeys.detail('chat-1'),
+ })
+ expect(queryClient.removeQueries).not.toHaveBeenCalled()
+ })
+
+ it('invalidates completed task detail when the completed stream is active', () => {
+ queryClient.getQueryData.mockReturnValue({
+ id: 'chat-1',
+ title: null,
+ messages: [],
+ activeStreamId: 'stream-1',
+ resources: [],
+ })
+
+ handleTaskStatusEvent(
+ queryClient,
+ 'ws-1',
+ JSON.stringify({
+ chatId: 'chat-1',
+ type: 'completed',
+ streamId: 'stream-1',
+ timestamp: Date.now(),
+ })
+ )
+
+ expect(queryClient.invalidateQueries).toHaveBeenCalledTimes(2)
+ expect(queryClient.invalidateQueries).toHaveBeenCalledWith({
+ queryKey: taskKeys.list('ws-1'),
+ })
+ expect(queryClient.invalidateQueries).toHaveBeenCalledWith({
+ queryKey: taskKeys.detail('chat-1'),
+ })
+ expect(queryClient.removeQueries).not.toHaveBeenCalled()
+ })
+
+ it('invalidates the task list and detail for metadata-changing task events', () => {
+ handleTaskStatusEvent(
+ queryClient,
+ 'ws-1',
+ JSON.stringify({
+ chatId: 'chat-1',
+ type: 'renamed',
+ timestamp: Date.now(),
+ })
+ )
+
+ expect(queryClient.invalidateQueries).toHaveBeenCalledTimes(2)
+ expect(queryClient.invalidateQueries).toHaveBeenCalledWith({
+ queryKey: taskKeys.list('ws-1'),
+ })
+ expect(queryClient.invalidateQueries).toHaveBeenCalledWith({
+ queryKey: taskKeys.detail('chat-1'),
+ })
+ expect(queryClient.removeQueries).not.toHaveBeenCalled()
+ })
+
+ it('invalidates the task list and removes detail cache for deleted task events', () => {
+ handleTaskStatusEvent(
+ queryClient,
+ 'ws-1',
+ JSON.stringify({
+ chatId: 'chat-1',
+ type: 'deleted',
+ timestamp: Date.now(),
+ })
+ )
+
+ expect(queryClient.invalidateQueries).toHaveBeenCalledTimes(1)
+ expect(queryClient.invalidateQueries).toHaveBeenCalledWith({
+ queryKey: taskKeys.list('ws-1'),
+ })
+ expect(queryClient.removeQueries).toHaveBeenCalledTimes(1)
+ expect(queryClient.removeQueries).toHaveBeenCalledWith({
+ queryKey: taskKeys.detail('chat-1'),
+ })
+ })
+
+ it('invalidates the task list and detail for started task events', () => {
+ handleTaskStatusEvent(
+ queryClient,
+ 'ws-1',
+ JSON.stringify({
+ chatId: 'chat-1',
+ type: 'started',
+ timestamp: Date.now(),
+ })
+ )
+
+ expect(queryClient.invalidateQueries).toHaveBeenCalledTimes(2)
+ expect(queryClient.invalidateQueries).toHaveBeenCalledWith({
+ queryKey: taskKeys.list('ws-1'),
+ })
+ expect(queryClient.invalidateQueries).toHaveBeenCalledWith({
+ queryKey: taskKeys.detail('chat-1'),
+ })
+ expect(queryClient.removeQueries).not.toHaveBeenCalled()
+ })
+
+ it('keeps started task detail when an unkeyed started event races an active stream', () => {
+ queryClient.getQueryData.mockReturnValue({
+ id: 'chat-1',
+ title: null,
+ messages: [{ id: 'new-stream' }, { id: 'live-assistant:new-stream' }],
+ activeStreamId: 'new-stream',
+ resources: [],
+ })
+
+ handleTaskStatusEvent(
+ queryClient,
+ 'ws-1',
+ JSON.stringify({
+ chatId: 'chat-1',
+ type: 'started',
+ timestamp: Date.now(),
+ })
+ )
+
+ expect(queryClient.invalidateQueries).toHaveBeenCalledTimes(1)
+ expect(queryClient.invalidateQueries).toHaveBeenCalledWith({
+ queryKey: taskKeys.list('ws-1'),
+ })
+ expect(queryClient.removeQueries).not.toHaveBeenCalled()
+ })
+
+ it('keeps started task detail when the started stream is already active', () => {
+ queryClient.getQueryData.mockReturnValue({
+ id: 'chat-1',
+ title: null,
+ messages: [{ id: 'stream-1' }],
+ activeStreamId: 'stream-1',
+ resources: [],
+ })
+
+ handleTaskStatusEvent(
+ queryClient,
+ 'ws-1',
+ JSON.stringify({
+ chatId: 'chat-1',
+ type: 'started',
+ streamId: 'stream-1',
+ timestamp: Date.now(),
+ })
+ )
+
+ expect(queryClient.invalidateQueries).toHaveBeenCalledTimes(1)
+ expect(queryClient.invalidateQueries).toHaveBeenCalledWith({
+ queryKey: taskKeys.list('ws-1'),
+ })
+ expect(queryClient.removeQueries).not.toHaveBeenCalled()
+ })
+
+ it('keeps started task detail when a stale started stream is older than the active stream', () => {
+ queryClient.getQueryData.mockReturnValue({
+ id: 'chat-1',
+ title: null,
+ messages: [{ id: 'old-stream' }, { id: 'new-stream' }],
+ activeStreamId: 'new-stream',
+ resources: [],
+ })
+
+ handleTaskStatusEvent(
+ queryClient,
+ 'ws-1',
+ JSON.stringify({
+ chatId: 'chat-1',
+ type: 'started',
+ streamId: 'old-stream',
+ timestamp: Date.now(),
+ })
+ )
+
+ expect(queryClient.invalidateQueries).toHaveBeenCalledTimes(1)
+ expect(queryClient.invalidateQueries).toHaveBeenCalledWith({
+ queryKey: taskKeys.list('ws-1'),
+ })
+ expect(queryClient.removeQueries).not.toHaveBeenCalled()
+ })
+
+ it('invalidates started task detail when a missing stream may be newer server state', () => {
+ queryClient.getQueryData.mockReturnValue({
+ id: 'chat-1',
+ title: null,
+ messages: [{ id: 'old-stream' }],
+ activeStreamId: 'old-stream',
+ resources: [],
+ })
+
handleTaskStatusEvent(
queryClient,
'ws-1',
JSON.stringify({
chatId: 'chat-1',
type: 'started',
+ streamId: 'new-stream',
+ timestamp: Date.now(),
+ })
+ )
+
+ expect(queryClient.invalidateQueries).toHaveBeenCalledTimes(2)
+ expect(queryClient.invalidateQueries).toHaveBeenCalledWith({
+ queryKey: taskKeys.list('ws-1'),
+ })
+ expect(queryClient.invalidateQueries).toHaveBeenCalledWith({
+ queryKey: taskKeys.detail('chat-1'),
+ })
+ expect(queryClient.removeQueries).not.toHaveBeenCalled()
+ })
+
+ it('keeps list invalidation only for unknown task event types', () => {
+ handleTaskStatusEvent(
+ queryClient,
+ 'ws-1',
+ JSON.stringify({
+ chatId: 'chat-1',
+ type: 'archived',
timestamp: Date.now(),
})
)
@@ -48,11 +398,13 @@ describe('handleTaskStatusEvent', () => {
expect(queryClient.invalidateQueries).toHaveBeenCalledWith({
queryKey: taskKeys.list('ws-1'),
})
+ expect(queryClient.removeQueries).not.toHaveBeenCalled()
})
it('does not invalidate when task event payload is invalid', () => {
handleTaskStatusEvent(queryClient, 'ws-1', '{')
expect(queryClient.invalidateQueries).not.toHaveBeenCalled()
+ expect(queryClient.removeQueries).not.toHaveBeenCalled()
})
})
diff --git a/apps/sim/hooks/use-task-events.ts b/apps/sim/hooks/use-task-events.ts
index 0d6d4f7d0e9..b9a5216dad4 100644
--- a/apps/sim/hooks/use-task-events.ts
+++ b/apps/sim/hooks/use-task-events.ts
@@ -2,13 +2,68 @@ import { useEffect } from 'react'
import { createLogger } from '@sim/logger'
import type { QueryClient } from '@tanstack/react-query'
import { useQueryClient } from '@tanstack/react-query'
-import { taskKeys } from '@/hooks/queries/tasks'
+import { getLiveAssistantMessageId } from '@/lib/copilot/chat/effective-transcript'
+import { type TaskChatHistory, taskKeys } from '@/hooks/queries/tasks'
const logger = createLogger('TaskEvents')
+const TASK_STATUS_TYPES = ['started', 'completed', 'created', 'deleted', 'renamed'] as const
+type TaskStatusEventType = (typeof TASK_STATUS_TYPES)[number]
+const TASK_STATUS_TYPE_SET = new Set(TASK_STATUS_TYPES)
+
interface TaskStatusEventPayload {
chatId?: string
- type?: 'started' | 'completed' | 'created' | 'deleted' | 'renamed'
+ type?: TaskStatusEventType
+ streamId?: string
+}
+
+const DETAIL_INVALIDATING_TASK_STATUS_TYPES = new Set([
+ 'started',
+ 'completed',
+ 'renamed',
+])
+
+function isTaskStatusEventType(value: unknown): value is TaskStatusEventType {
+ return typeof value === 'string' && TASK_STATUS_TYPE_SET.has(value)
+}
+
+function isLocalOptimisticActiveStream(current: TaskChatHistory | undefined) {
+ if (!current?.activeStreamId) return false
+ const liveAssistantId = getLiveAssistantMessageId(current.activeStreamId)
+ return current.messages.some((message) => message.id === liveAssistantId)
+}
+
+/**
+ * Returns true when the cached active stream is known to be later in the
+ * chronological transcript than the stream that emitted this status event.
+ * If either stream is absent from the transcript, callers should refetch
+ * instead of inferring order from incomplete cache state.
+ */
+function hasNewerKnownActiveStream(current: TaskChatHistory | undefined, streamId: string) {
+ if (!current?.activeStreamId || current.activeStreamId === streamId) return false
+
+ const activeIndex = current.messages.findIndex((message) => message.id === current.activeStreamId)
+ const eventStreamIndex = current.messages.findIndex((message) => message.id === streamId)
+ if (activeIndex === -1) return false
+ if (eventStreamIndex === -1) return false
+ return activeIndex > eventStreamIndex
+}
+
+function shouldSkipDetailInvalidationForStreamEvent(
+ current: TaskChatHistory | undefined,
+ payload: TaskStatusEventPayload
+) {
+ if (payload.type !== 'started' && payload.type !== 'completed') return false
+ if (!current?.activeStreamId) return false
+ if (!payload.streamId) return isLocalOptimisticActiveStream(current)
+ if (payload.type === 'started' && current.activeStreamId === payload.streamId) return true
+ if (current.activeStreamId === payload.streamId) return false
+ if (hasNewerKnownActiveStream(current, payload.streamId)) return true
+ return (
+ payload.type === 'completed' &&
+ isLocalOptimisticActiveStream(current) &&
+ !current.messages.some((message) => message.id === payload.streamId)
+ )
}
function parseTaskStatusEventPayload(data: unknown): TaskStatusEventPayload | null {
@@ -30,14 +85,13 @@ function parseTaskStatusEventPayload(data: unknown): TaskStatusEventPayload | nu
return {
...(typeof record.chatId === 'string' ? { chatId: record.chatId } : {}),
- ...(typeof record.type === 'string'
- ? { type: record.type as TaskStatusEventPayload['type'] }
- : {}),
+ ...(isTaskStatusEventType(record.type) ? { type: record.type } : {}),
+ ...(typeof record.streamId === 'string' ? { streamId: record.streamId } : {}),
}
}
export function handleTaskStatusEvent(
- queryClient: Pick,
+ queryClient: Pick,
workspaceId: string,
data: unknown
): void {
@@ -48,6 +102,20 @@ export function handleTaskStatusEvent(
}
queryClient.invalidateQueries({ queryKey: taskKeys.list(workspaceId) })
+ if (!payload.chatId) return
+ if (payload.type === 'deleted') {
+ queryClient.removeQueries({ queryKey: taskKeys.detail(payload.chatId) })
+ return
+ }
+ if (payload.type === 'started' || payload.type === 'completed') {
+ const current = queryClient.getQueryData(taskKeys.detail(payload.chatId))
+ if (shouldSkipDetailInvalidationForStreamEvent(current, payload)) {
+ return
+ }
+ }
+ if (payload.type && DETAIL_INVALIDATING_TASK_STATUS_TYPES.has(payload.type)) {
+ queryClient.invalidateQueries({ queryKey: taskKeys.detail(payload.chatId) })
+ }
}
/**
diff --git a/apps/sim/lib/api/contracts/knowledge/search.ts b/apps/sim/lib/api/contracts/knowledge/search.ts
index 291257e7b16..ea1dff75ce0 100644
--- a/apps/sim/lib/api/contracts/knowledge/search.ts
+++ b/apps/sim/lib/api/contracts/knowledge/search.ts
@@ -36,6 +36,24 @@ export const knowledgeSearchBodySchema = z
.transform((val) => val || undefined),
rerankerEnabled: z.boolean().optional().default(false),
rerankerModel: rerankerModelSchema.optional().default(DEFAULT_RERANKER_MODEL),
+ /**
+ * Number of vector results sent to Cohere as the documents array for reranking. Capped at 100
+ * so each rerank call stays within a single Cohere search unit (1 query × ≤100 docs); see
+ * `RERANK_MODEL_PRICING` in `providers/models.ts`.
+ */
+ rerankerInputCount: z
+ .number()
+ .int('rerankerInputCount must be an integer')
+ .min(1, 'rerankerInputCount must be at least 1')
+ .max(100, 'rerankerInputCount cannot exceed 100')
+ .optional()
+ .nullable()
+ .transform((val) => val ?? undefined),
+ rerankerApiKey: z
+ .string()
+ .optional()
+ .nullable()
+ .transform((val) => val || undefined),
})
.refine(
(data) => {
diff --git a/apps/sim/lib/api/contracts/logs.ts b/apps/sim/lib/api/contracts/logs.ts
index b0298e349ec..6e94720f91a 100644
--- a/apps/sim/lib/api/contracts/logs.ts
+++ b/apps/sim/lib/api/contracts/logs.ts
@@ -34,10 +34,18 @@ const logFilterQuerySchema = z.object({
durationValue: z.coerce.number().optional(),
})
+export const logSortBySchema = z.enum(['date', 'duration', 'cost', 'status']).default('date')
+export const logSortOrderSchema = z.enum(['asc', 'desc']).default('desc')
+
export const listLogsQuerySchema = logFilterQuerySchema.extend({
- details: z.enum(['basic', 'full']).optional().default('basic'),
- limit: z.coerce.number().optional().default(100),
- offset: z.coerce.number().optional().default(0),
+ cursor: z.string().optional(),
+ limit: z.coerce.number().int().min(1).max(200).optional().default(100),
+ sortBy: logSortBySchema,
+ sortOrder: logSortOrderSchema,
+})
+
+export const logDetailQuerySchema = z.object({
+ workspaceId: z.string().min(1),
})
export const statsQueryParamsSchema = logFilterQuerySchema.extend({
@@ -58,55 +66,196 @@ const workflowSummarySchema = z
})
.partial()
-const fileSchema = z
+const fileSchema = z.object({
+ id: z.string(),
+ name: z.string(),
+ size: z.number(),
+ type: z.string(),
+ url: z.string(),
+ key: z.string(),
+ uploadedAt: z.string(),
+ expiresAt: z.string(),
+ storageProvider: z.enum(['s3', 'blob', 'local']).optional(),
+ bucketName: z.string().optional(),
+})
+
+const tokenBreakdownSchema = z
.object({
- id: z.string(),
- name: z.string(),
- size: z.number(),
- type: z.string(),
- url: z.string(),
- key: z.string(),
- uploadedAt: z.string(),
- expiresAt: z.string(),
- storageProvider: z.enum(['s3', 'blob', 'local']).optional(),
- bucketName: z.string().optional(),
+ total: z.number().optional(),
+ input: z.number().optional(),
+ output: z.number().optional(),
+ prompt: z.number().optional(),
+ completion: z.number().optional(),
+ })
+ .partial()
+
+const modelCostSchema = z
+ .object({
+ input: z.number().optional(),
+ output: z.number().optional(),
+ total: z.number().optional(),
+ tokens: tokenBreakdownSchema.optional(),
+ })
+ .partial()
+
+const costSummarySchema = z
+ .object({
+ total: z.number().optional(),
+ input: z.number().optional(),
+ output: z.number().optional(),
+ tokens: tokenBreakdownSchema.optional(),
+ models: z.record(z.string(), modelCostSchema).optional(),
+ pricing: z
+ .object({
+ input: z.number(),
+ output: z.number(),
+ cachedInput: z.number().optional(),
+ updatedAt: z.string(),
+ })
+ .optional(),
+ })
+ .partial()
+
+const pauseSummarySchema = z.object({
+ status: z.string().nullable(),
+ total: z.number(),
+ resumed: z.number(),
+})
+
+const blockExecutionSchema = z.object({
+ id: z.string(),
+ blockId: z.string(),
+ blockName: z.string(),
+ blockType: z.string(),
+ startedAt: z.string(),
+ endedAt: z.string(),
+ durationMs: z.number(),
+ status: z.enum(['success', 'error', 'skipped']),
+ errorMessage: z.string().optional(),
+ errorStackTrace: z.string().optional(),
+ inputData: z.unknown(),
+ outputData: z.unknown(),
+ cost: costSummarySchema.optional(),
+ metadata: z.record(z.string(), z.unknown()).optional(),
+})
+
+const toolCallSchema = z
+ .object({
+ id: z.string().optional(),
+ name: z.string().optional(),
+ arguments: z.unknown().optional(),
+ result: z.unknown().optional(),
+ error: z.string().optional(),
+ startTime: z.string().optional(),
+ endTime: z.string().optional(),
+ duration: z.number().optional(),
})
.passthrough()
-export const workflowLogSchema = z
+type TraceSpan = {
+ id: string
+ name: string
+ type: string
+ duration?: number
+ durationMs?: number
+ startTime?: string
+ endTime?: string
+ status?: string
+ blockId?: string
+ input?: unknown
+ output?: unknown
+ tokens?: number | { total?: number; input?: number; output?: number }
+ relativeStartMs?: number
+ toolCalls?: Array>
+ children?: TraceSpan[]
+}
+
+const traceSpanSchema: z.ZodType = z.lazy(() =>
+ z
+ .object({
+ id: z.string(),
+ name: z.string(),
+ type: z.string(),
+ duration: z.number().optional(),
+ durationMs: z.number().optional(),
+ startTime: z.string().optional(),
+ endTime: z.string().optional(),
+ status: z.string().optional(),
+ blockId: z.string().optional(),
+ input: z.unknown().optional(),
+ output: z.unknown().optional(),
+ tokens: z
+ .union([
+ z.number(),
+ z
+ .object({
+ total: z.number().optional(),
+ input: z.number().optional(),
+ output: z.number().optional(),
+ })
+ .partial(),
+ ])
+ .optional(),
+ relativeStartMs: z.number().optional(),
+ toolCalls: z.array(toolCallSchema).optional(),
+ children: z.array(traceSpanSchema).optional(),
+ })
+ .passthrough()
+)
+
+const executionDataDetailSchema = z
.object({
- id: z.string(),
- workflowId: z.string().nullable(),
- executionId: z.string().nullable().optional(),
- deploymentVersionId: z.string().nullable().optional(),
- deploymentVersion: z.number().nullable().optional(),
- deploymentVersionName: z.string().nullable().optional(),
- level: z.string(),
- status: z.string().nullable().optional(),
- duration: z.string().nullable(),
- trigger: z.string().nullable(),
- createdAt: z.string(),
- workflow: workflowSummarySchema.nullable().optional(),
- jobTitle: z.string().nullable().optional(),
- files: z.array(fileSchema).optional(),
- cost: z.unknown().optional(),
- hasPendingPause: z.boolean().nullable().optional(),
- pauseSummary: z.unknown().optional(),
- executionData: z.unknown().optional(),
+ totalDuration: z.number().nullable().optional(),
+ enhanced: z.literal(true).optional(),
+ traceSpans: z.array(traceSpanSchema).optional(),
+ blockExecutions: z.array(blockExecutionSchema).optional(),
+ finalOutput: z.unknown().optional(),
+ workflowInput: z.unknown().optional(),
+ blockInput: z.record(z.string(), z.unknown()).optional(),
+ trigger: z.unknown().optional(),
})
.passthrough()
-export type WorkflowLogData = z.output
+export const workflowLogSummarySchema = z.object({
+ id: z.string(),
+ workflowId: z.string().nullable(),
+ executionId: z.string().nullable(),
+ deploymentVersionId: z.string().nullable(),
+ deploymentVersion: z.number().nullable(),
+ deploymentVersionName: z.string().nullable(),
+ level: z.string(),
+ status: z.string().nullable(),
+ duration: z.string().nullable(),
+ trigger: z.string().nullable(),
+ createdAt: z.string(),
+ workflow: workflowSummarySchema.nullable(),
+ jobTitle: z.string().nullable(),
+ cost: costSummarySchema.nullable(),
+ pauseSummary: pauseSummarySchema,
+ hasPendingPause: z.boolean(),
+})
-export const logsResponseSchema = z.object({
- data: z.array(workflowLogSchema),
- total: z.number(),
- page: z.number(),
- pageSize: z.number(),
- totalPages: z.number(),
+export const workflowLogDetailSchema = workflowLogSummarySchema.extend({
+ executionData: executionDataDetailSchema,
+ files: z.array(fileSchema).nullable(),
})
-export type LogsResponse = z.output
+export type WorkflowLogSummary = z.output
+export type WorkflowLogDetail = z.output
+
+/**
+ * A row that may be either a list-view summary or a fully loaded detail. Used by
+ * UI surfaces that render the same log before and after its detail query resolves.
+ */
+export type WorkflowLogRow = WorkflowLogSummary &
+ Partial>
+
+export const listLogsResponseSchema = z.object({
+ data: z.array(workflowLogSummarySchema),
+ nextCursor: z.string().nullable(),
+})
+
+export type ListLogsResponse = z.output
export const segmentStatsSchema = z.object({
timestamp: z.string(),
@@ -179,7 +328,7 @@ export const listLogsContract = defineRouteContract({
query: listLogsQuerySchema,
response: {
mode: 'json',
- schema: logsResponseSchema,
+ schema: listLogsResponseSchema,
},
})
@@ -187,10 +336,24 @@ export const getLogDetailContract = defineRouteContract({
method: 'GET',
path: '/api/logs/[id]',
params: logIdParamsSchema,
+ query: logDetailQuerySchema,
+ response: {
+ mode: 'json',
+ schema: z.object({
+ data: workflowLogDetailSchema,
+ }),
+ },
+})
+
+export const getLogByExecutionIdContract = defineRouteContract({
+ method: 'GET',
+ path: '/api/logs/by-execution/[executionId]',
+ params: executionIdParamsSchema,
+ query: logDetailQuerySchema,
response: {
mode: 'json',
schema: z.object({
- data: workflowLogSchema,
+ data: workflowLogDetailSchema,
}),
},
})
diff --git a/apps/sim/lib/auth/anonymous.ts b/apps/sim/lib/auth/anonymous.ts
index 839e65487ec..7504ee7fd62 100644
--- a/apps/sim/lib/auth/anonymous.ts
+++ b/apps/sim/lib/auth/anonymous.ts
@@ -103,7 +103,3 @@ export function createAnonymousSession(): AnonymousSession {
},
}
}
-
-export function createAnonymousGetSessionResponse(): { data: AnonymousSession } {
- return { data: createAnonymousSession() }
-}
diff --git a/apps/sim/lib/copilot/chat/attachment-preview.ts b/apps/sim/lib/copilot/chat/attachment-preview.ts
new file mode 100644
index 00000000000..f4a65aa0ce0
--- /dev/null
+++ b/apps/sim/lib/copilot/chat/attachment-preview.ts
@@ -0,0 +1,9 @@
+export function getMothershipAttachmentPreviewUrl(file: {
+ key: string
+ media_type: string
+}): string | undefined {
+ if (!file.media_type.startsWith('image/') && !file.media_type.startsWith('video/')) {
+ return undefined
+ }
+ return `/api/files/serve/${encodeURIComponent(file.key)}?context=mothership`
+}
diff --git a/apps/sim/lib/copilot/chat/display-message.ts b/apps/sim/lib/copilot/chat/display-message.ts
index a254e5e3e94..51622070009 100644
--- a/apps/sim/lib/copilot/chat/display-message.ts
+++ b/apps/sim/lib/copilot/chat/display-message.ts
@@ -15,6 +15,7 @@ import {
type ToolCallInfo,
ToolCallStatus,
} from '@/app/workspace/[workspaceId]/home/types'
+import { getMothershipAttachmentPreviewUrl } from './attachment-preview'
import type { PersistedContentBlock, PersistedMessage } from './persisted-message'
import { withBlockTiming } from './persisted-message'
@@ -91,9 +92,7 @@ function toDisplayAttachment(f: PersistedMessage['fileAttachments']): ChatMessag
filename: a.filename,
media_type: a.media_type,
size: a.size,
- previewUrl: a.media_type.startsWith('image/')
- ? `/api/files/serve/${encodeURIComponent(a.key)}?context=mothership`
- : undefined,
+ previewUrl: getMothershipAttachmentPreviewUrl(a),
}))
}
diff --git a/apps/sim/lib/copilot/chat/persisted-message.test.ts b/apps/sim/lib/copilot/chat/persisted-message.test.ts
index 377a8c2b0b5..de701394235 100644
--- a/apps/sim/lib/copilot/chat/persisted-message.test.ts
+++ b/apps/sim/lib/copilot/chat/persisted-message.test.ts
@@ -75,6 +75,102 @@ describe('persisted-message', () => {
expect(persisted.requestId).toBe('sim-request-1')
})
+ it('redacts sim_key credential tags so persisted assistant messages never re-expose the key', () => {
+ const live = `Here is your key: ${JSON.stringify({ value: 'sk-sim-secret-123', type: 'sim_key' })} save it.`
+ const result: OrchestratorResult = {
+ success: true,
+ content: live,
+ requestId: 'req-1',
+ contentBlocks: [{ type: 'text', content: live }],
+ toolCalls: [],
+ }
+
+ const persisted = buildPersistedAssistantMessage(result)
+
+ expect(persisted.content).not.toContain('sk-sim-secret-123')
+ expect(persisted.content).toContain('"redacted":true')
+ const textBlock = persisted.contentBlocks?.find((b) => b.type === 'text')
+ expect(textBlock?.content).not.toContain('sk-sim-secret-123')
+ expect(textBlock?.content).toContain('"redacted":true')
+ })
+
+ it('redacts sim_key credential tags split across streamed text chunks', () => {
+ const chunks = [
+ 'Here\'s your key:\n\n{"value": "sk-',
+ 'sim-secret',
+ '-12345',
+ '", "type":',
+ ' "sim_key"}',
+ '\n\nDone.',
+ ]
+ const result: OrchestratorResult = {
+ success: true,
+ content: chunks.join(''),
+ requestId: 'req-1',
+ contentBlocks: chunks.map((c) => ({ type: 'text', content: c })),
+ toolCalls: [],
+ }
+
+ const persisted = buildPersistedAssistantMessage(result)
+
+ expect(persisted.content).not.toContain('sk-sim-secret-12345')
+ expect(persisted.contentBlocks).toBeDefined()
+ const joined = (persisted.contentBlocks ?? []).map((b) => b.content ?? '').join('')
+ expect(joined).not.toContain('sk-sim-secret-12345')
+ expect(joined).toContain('"redacted":true')
+ })
+
+ it('redacts the api key from a persisted generate_api_key tool result output', () => {
+ const result: OrchestratorResult = {
+ success: true,
+ content: '',
+ requestId: 'req-1',
+ contentBlocks: [
+ {
+ type: 'tool_call',
+ toolCall: {
+ id: 'tool-1',
+ name: 'generate_api_key',
+ status: 'success',
+ params: { name: 'workspace-key' },
+ result: {
+ success: true,
+ output: {
+ id: 'k1',
+ name: 'workspace-key',
+ key: 'sk-sim-tool-output-secret',
+ },
+ },
+ },
+ },
+ ],
+ toolCalls: [],
+ }
+
+ const persisted = buildPersistedAssistantMessage(result)
+ const toolBlock = persisted.contentBlocks?.find((b) => b.toolCall?.name === 'generate_api_key')
+ const output = toolBlock?.toolCall?.result?.output as Record | undefined
+
+ expect(output?.key).toBe('[REDACTED]')
+ expect(output?.redacted).toBe(true)
+ expect(JSON.stringify(persisted)).not.toContain('sk-sim-tool-output-secret')
+ })
+
+ it('leaves non-sim_key credential tags untouched', () => {
+ const live = `${JSON.stringify({ value: 'https://oauth.example/connect', type: 'link', provider: 'slack' })}`
+ const result: OrchestratorResult = {
+ success: true,
+ content: live,
+ requestId: 'req-1',
+ contentBlocks: [{ type: 'text', content: live }],
+ toolCalls: [],
+ }
+
+ const persisted = buildPersistedAssistantMessage(result)
+
+ expect(persisted.content).toContain('https://oauth.example/connect')
+ })
+
it('normalizes legacy tool_call and top-level toolCalls shapes', () => {
const normalized = normalizeMessage({
id: 'msg-1',
diff --git a/apps/sim/lib/copilot/chat/persisted-message.ts b/apps/sim/lib/copilot/chat/persisted-message.ts
index 3c34fb4901f..e249ecef43f 100644
--- a/apps/sim/lib/copilot/chat/persisted-message.ts
+++ b/apps/sim/lib/copilot/chat/persisted-message.ts
@@ -1,4 +1,9 @@
import { generateId } from '@sim/utils/id'
+import {
+ mergeAndRedactPersistedBlocks,
+ redactSensitiveContent,
+ redactToolCallResult,
+} from '@/lib/copilot/chat/sim-key-redaction'
import {
MothershipStreamV1CompletionStatus,
MothershipStreamV1EventType,
@@ -164,11 +169,13 @@ function mapContentBlockBody(block: ContentBlock): PersistedContentBlock {
state === 'pending' ||
state === 'executing'
+ const redactedResult = redactToolCallResult(block.toolCall.name, block.toolCall.result)
+
const toolCall: PersistedToolCall = {
id: block.toolCall.id,
name: block.toolCall.name,
state,
- ...(isSubagentTool && isNonTerminal ? {} : { result: block.toolCall.result }),
+ ...(isSubagentTool && isNonTerminal ? {} : { result: redactedResult }),
...(isSubagentTool && isNonTerminal
? {}
: block.toolCall.params
@@ -202,7 +209,7 @@ export function buildPersistedAssistantMessage(
const message: PersistedMessage = {
id: generateId(),
role: 'assistant',
- content: result.content,
+ content: redactSensitiveContent(result.content),
timestamp: new Date().toISOString(),
}
@@ -211,7 +218,7 @@ export function buildPersistedAssistantMessage(
}
if (result.contentBlocks.length > 0) {
- message.contentBlocks = result.contentBlocks.map(mapContentBlock)
+ message.contentBlocks = mergeAndRedactPersistedBlocks(result.contentBlocks.map(mapContentBlock))
}
return message
diff --git a/apps/sim/lib/copilot/chat/post.ts b/apps/sim/lib/copilot/chat/post.ts
index 21d94e56bb5..a745f209c9e 100644
--- a/apps/sim/lib/copilot/chat/post.ts
+++ b/apps/sim/lib/copilot/chat/post.ts
@@ -329,6 +329,7 @@ async function persistUserMessage(params: {
workspaceId,
chatId,
type: 'started',
+ streamId: userMessageId,
})
}
@@ -430,6 +431,7 @@ function buildOnComplete(params: {
workspaceId,
chatId,
type: 'completed',
+ streamId: userMessageId,
})
}
} catch (error) {
@@ -461,6 +463,7 @@ function buildOnError(params: {
workspaceId,
chatId,
type: 'completed',
+ streamId: userMessageId,
})
}
} catch (error) {
diff --git a/apps/sim/lib/copilot/chat/sim-key-redaction.test.ts b/apps/sim/lib/copilot/chat/sim-key-redaction.test.ts
new file mode 100644
index 00000000000..70fe637e6d2
--- /dev/null
+++ b/apps/sim/lib/copilot/chat/sim-key-redaction.test.ts
@@ -0,0 +1,154 @@
+/**
+ * @vitest-environment node
+ */
+
+import { describe, expect, it } from 'vitest'
+import type { ChatMessage } from '@/app/workspace/[workspaceId]/home/types'
+import {
+ captureRevealedSimKeys,
+ extractRevealedSimKeys,
+ restoreRevealedSimKeysForMessage,
+} from './sim-key-redaction'
+
+const credential = (value: string) =>
+ `${JSON.stringify({ value, type: 'sim_key' })}`
+const redacted = `${JSON.stringify({ type: 'sim_key', redacted: true })}`
+
+describe('sim-key-redaction', () => {
+ describe('extractRevealedSimKeys', () => {
+ it('returns sim_key values in document order', () => {
+ const text = `first ${credential('sk-sim-A')} mid ${credential('sk-sim-B')}`
+ expect(extractRevealedSimKeys(text)).toEqual(['sk-sim-A', 'sk-sim-B'])
+ })
+
+ it('skips redacted entries and non-sim_key tags', () => {
+ const link = `${JSON.stringify({ value: 'https://x', type: 'link', provider: 'slack' })}`
+ const text = `${link} ${credential('sk-sim-A')} ${redacted}`
+ expect(extractRevealedSimKeys(text)).toEqual(['sk-sim-A'])
+ })
+ })
+
+ describe('captureRevealedSimKeys', () => {
+ it('records new keys under each provided key', () => {
+ const cache = new Map()
+ captureRevealedSimKeys(cache, ['msg-1', 'req-1'], credential('sk-sim-A'))
+ expect(cache.get('msg-1')).toEqual(['sk-sim-A'])
+ expect(cache.get('req-1')).toEqual(['sk-sim-A'])
+ })
+
+ it('extends but never shrinks the captured list across calls', () => {
+ const cache = new Map()
+ captureRevealedSimKeys(
+ cache,
+ ['msg-1'],
+ `${credential('sk-sim-A')} ${credential('sk-sim-B')}`
+ )
+ captureRevealedSimKeys(cache, ['msg-1'], credential('sk-sim-A'))
+ expect(cache.get('msg-1')).toEqual(['sk-sim-A', 'sk-sim-B'])
+ })
+
+ it('skips undefined keys without throwing', () => {
+ const cache = new Map()
+ captureRevealedSimKeys(cache, ['msg-1', undefined], credential('sk-sim-A'))
+ expect(cache.get('msg-1')).toEqual(['sk-sim-A'])
+ expect(cache.size).toBe(1)
+ })
+
+ it('ignores content with no credential tag', () => {
+ const cache = new Map()
+ captureRevealedSimKeys(cache, ['msg-1'], 'plain assistant text')
+ expect(cache.has('msg-1')).toBe(false)
+ })
+ })
+
+ describe('restoreRevealedSimKeysForMessage', () => {
+ it('substitutes the live key back into a redacted message', () => {
+ const cache = new Map([['msg-1', ['sk-sim-A']]])
+ const msg: ChatMessage = {
+ id: 'msg-1',
+ role: 'assistant',
+ content: `Here is your key: ${redacted} save it.`,
+ contentBlocks: [{ type: 'text', content: `Here is your key: ${redacted} save it.` }],
+ }
+ const restored = restoreRevealedSimKeysForMessage(msg, cache)
+ expect(restored.content).toContain('"sk-sim-A"')
+ expect(restored.content).not.toContain('"redacted":true')
+ expect(restored.contentBlocks?.[0].content).toContain('"sk-sim-A"')
+ })
+
+ it('substitutes multiple keys in stream order', () => {
+ const cache = new Map([['msg-1', ['sk-sim-A', 'sk-sim-B']]])
+ const msg: ChatMessage = {
+ id: 'msg-1',
+ role: 'assistant',
+ content: `first ${redacted} second ${redacted}`,
+ }
+ const restored = restoreRevealedSimKeysForMessage(msg, cache)
+ expect(restored.content).toBe(
+ `first ${credential('sk-sim-A')} second ${credential('sk-sim-B')}`
+ )
+ })
+
+ it('leaves a redacted tag in place if no live value is captured for that slot', () => {
+ const cache = new Map([['msg-1', ['sk-sim-A']]])
+ const msg: ChatMessage = {
+ id: 'msg-1',
+ role: 'assistant',
+ content: `first ${redacted} second ${redacted}`,
+ }
+ const restored = restoreRevealedSimKeysForMessage(msg, cache)
+ expect(restored.content).toBe(`first ${credential('sk-sim-A')} second ${redacted}`)
+ })
+
+ it('returns the same message reference when nothing to restore', () => {
+ const cache = new Map()
+ const msg: ChatMessage = {
+ id: 'msg-1',
+ role: 'assistant',
+ content: 'no credentials here',
+ }
+ expect(restoreRevealedSimKeysForMessage(msg, cache)).toBe(msg)
+ })
+
+ it('does nothing for user messages', () => {
+ const cache = new Map([['msg-1', ['sk-sim-A']]])
+ const msg: ChatMessage = {
+ id: 'msg-1',
+ role: 'user',
+ content: redacted,
+ }
+ expect(restoreRevealedSimKeysForMessage(msg, cache)).toBe(msg)
+ })
+
+ it('threads the cursor across separate content blocks so each block gets its matching key', () => {
+ const cache = new Map([['msg-1', ['sk-sim-A', 'sk-sim-B']]])
+ const msg: ChatMessage = {
+ id: 'msg-1',
+ role: 'assistant',
+ content: `first ${redacted} (tool ran) second ${redacted}`,
+ contentBlocks: [
+ { type: 'text', content: `first ${redacted}` },
+ { type: 'tool_call', content: '' },
+ { type: 'text', content: `second ${redacted}` },
+ ],
+ }
+ const restored = restoreRevealedSimKeysForMessage(msg, cache)
+ expect(restored.contentBlocks?.[0].content).toContain('"sk-sim-A"')
+ expect(restored.contentBlocks?.[0].content).not.toContain('"sk-sim-B"')
+ expect(restored.contentBlocks?.[2].content).toContain('"sk-sim-B"')
+ expect(restored.contentBlocks?.[2].content).not.toContain('"sk-sim-A"')
+ })
+
+ it('isolates revealed values by message id (multiple keys across messages)', () => {
+ const cache = new Map([
+ ['msg-1', ['sk-sim-A']],
+ ['msg-2', ['sk-sim-B']],
+ ])
+ const msg1: ChatMessage = { id: 'msg-1', role: 'assistant', content: redacted }
+ const msg2: ChatMessage = { id: 'msg-2', role: 'assistant', content: redacted }
+ expect(restoreRevealedSimKeysForMessage(msg1, cache).content).toContain('sk-sim-A')
+ expect(restoreRevealedSimKeysForMessage(msg2, cache).content).toContain('sk-sim-B')
+ expect(restoreRevealedSimKeysForMessage(msg1, cache).content).not.toContain('sk-sim-B')
+ })
+ })
+})
diff --git a/apps/sim/lib/copilot/chat/sim-key-redaction.ts b/apps/sim/lib/copilot/chat/sim-key-redaction.ts
new file mode 100644
index 00000000000..d5aba0f302d
--- /dev/null
+++ b/apps/sim/lib/copilot/chat/sim-key-redaction.ts
@@ -0,0 +1,263 @@
+import type { PersistedContentBlock } from '@/lib/copilot/chat/persisted-message'
+import {
+ MothershipStreamV1EventType,
+ MothershipStreamV1TextChannel,
+} from '@/lib/copilot/generated/mothership-stream-v1'
+import { GenerateApiKey } from '@/lib/copilot/generated/tool-catalog-v1'
+import { REDACTED_MARKER } from '@/lib/core/security/redaction'
+import type { ChatMessage, ContentBlock } from '@/app/workspace/[workspaceId]/home/types'
+
+/**
+ * Two-sided handling of `sim_key` API keys in the Mothership chat:
+ *
+ * - **Write side** (server, runs in `buildPersistedAssistantMessage`):
+ * strip every revealed `` value before the row
+ * hits Postgres. Reloading a chat days later — or pulling the row from the
+ * DB directly — never re-exposes the key.
+ *
+ * - **Read side** (client, runs in `useChat`'s message selector): an in-memory
+ * page-session cache captures revealed values during the live SSE stream.
+ * When the post-stream refetch returns the redacted persisted message, the
+ * selector re-injects the captured values so the user can still copy the
+ * key they just generated. Cache is dropped on page unload.
+ */
+
+const CREDENTIAL_TAG_PATTERN = /([\s\S]*?)<\/credential>/g
+const REDACTED_TAG_PATTERN = /[^<]*"redacted"\s*:\s*true[^<]*<\/credential>/
+const SIM_KEY_TYPE = 'sim_key'
+const REDACTED_SIM_KEY_TAG = `${JSON.stringify({
+ type: SIM_KEY_TYPE,
+ redacted: true,
+})}`
+
+interface CredentialTagBody {
+ type?: unknown
+ value?: unknown
+ redacted?: unknown
+}
+
+function parseCredentialBody(body: string): CredentialTagBody | null {
+ try {
+ return JSON.parse(body) as CredentialTagBody
+ } catch {
+ return null
+ }
+}
+
+function hasRedactedSimKeyTag(content: string | undefined): boolean {
+ return typeof content === 'string' && REDACTED_TAG_PATTERN.test(content)
+}
+
+// Write side ---------------------------------------------------------------
+
+/**
+ * Replace every revealed `` tag in `content` with a
+ * placeholder marked `redacted: true`. Other credential types (e.g. OAuth
+ * `link`) and malformed bodies pass through unchanged.
+ */
+export function redactSensitiveContent(content: T): T {
+ if (typeof content !== 'string' || !content.includes('')) return content
+ return content.replace(CREDENTIAL_TAG_PATTERN, (match, body: string) => {
+ const parsed = parseCredentialBody(body)
+ return parsed?.type === SIM_KEY_TYPE ? REDACTED_SIM_KEY_TAG : match
+ }) as T
+}
+
+/**
+ * Replace the raw `key` field in a `generate_api_key` tool result with the
+ * shared redaction marker. The persisted tool result still records the
+ * call's outcome and metadata; only the secret is stripped.
+ */
+export function redactToolCallResult(
+ toolName: string | undefined,
+ result: { success: boolean; output?: unknown; error?: string } | undefined
+): { success: boolean; output?: unknown; error?: string } | undefined {
+ if (!result || toolName !== GenerateApiKey.id) return result
+ const output = result.output
+ if (!output || typeof output !== 'object') return result
+ const record = output as Record
+ if (typeof record.key !== 'string') return result
+ return {
+ ...result,
+ output: { ...record, key: REDACTED_MARKER, redacted: true },
+ }
+}
+
+function isMergeableAssistantTextBlock(block: PersistedContentBlock): boolean {
+ return (
+ block.type === MothershipStreamV1EventType.text &&
+ block.channel === MothershipStreamV1TextChannel.assistant &&
+ block.toolCall === undefined
+ )
+}
+
+/**
+ * Streaming produces one assistant-text block per token chunk, which means a
+ * `...` tag can straddle dozens of blocks. Per-block
+ * redaction can't see across that boundary and would persist the secret. So
+ * coalesce consecutive same-lane assistant-text blocks into a single block,
+ * then redact the merged content.
+ *
+ * Block timestamps for assistant text aren't user-visible (only `thinking`
+ * blocks drive the "Thought for Ns" chip), so collapsing the run is safe.
+ */
+export function mergeAndRedactPersistedBlocks(
+ blocks: PersistedContentBlock[]
+): PersistedContentBlock[] {
+ const out: PersistedContentBlock[] = []
+ let runStart = -1
+ let runLane: PersistedContentBlock['lane']
+
+ const flushRun = (endExclusive: number) => {
+ if (runStart < 0) return
+ const run = blocks.slice(runStart, endExclusive)
+ runStart = -1
+ if (run.length === 0) return
+ if (run.length === 1) {
+ const single = run[0]
+ out.push({ ...single, content: redactSensitiveContent(single.content) })
+ return
+ }
+ const head = run[0]
+ const tail = run[run.length - 1]
+ out.push({
+ ...head,
+ content: redactSensitiveContent(run.map((b) => b.content ?? '').join('')),
+ ...(tail.endedAt !== undefined ? { endedAt: tail.endedAt } : {}),
+ })
+ }
+
+ for (let i = 0; i < blocks.length; i++) {
+ const block = blocks[i]
+ const sameRun = runStart >= 0 && isMergeableAssistantTextBlock(block) && runLane === block.lane
+ if (sameRun) continue
+ flushRun(i)
+ if (isMergeableAssistantTextBlock(block)) {
+ runStart = i
+ runLane = block.lane
+ } else {
+ out.push(block)
+ }
+ }
+ flushRun(blocks.length)
+
+ return out
+}
+
+// Read side ----------------------------------------------------------------
+
+/**
+ * Page-session cache of `sim_key` credential values revealed during the live
+ * SSE stream, keyed by either the synthetic live-assistant id (used while
+ * streaming) or the persisted message's `requestId` (used after refetch).
+ * Lives in a `useRef`; never persisted; dropped on unload.
+ */
+export type RevealedSimKeysByMessage = Map
+
+/**
+ * Scan an assembled assistant message for `` tags
+ * and return their values in stream order, skipping anything already redacted.
+ */
+export function extractRevealedSimKeys(content: string): string[] {
+ if (!content || !content.includes('')) return []
+ const values: string[] = []
+ for (const match of content.matchAll(CREDENTIAL_TAG_PATTERN)) {
+ const parsed = parseCredentialBody(match[1])
+ if (parsed?.type === SIM_KEY_TYPE && !parsed.redacted && typeof parsed.value === 'string') {
+ values.push(parsed.value)
+ }
+ }
+ return values
+}
+
+/**
+ * Extend the cache entries for the given keys with any newly-revealed values.
+ * Each key in `keys` is written the same array — passing both the live-stream
+ * id and the persisted `requestId` lets the post-finalize refetch hit the
+ * cache after the message is renamed to its real UUID. The longest captured
+ * list wins so a rerun that surfaces fewer values can't shrink the entry.
+ */
+export function captureRevealedSimKeys(
+ cache: RevealedSimKeysByMessage,
+ keys: ReadonlyArray,
+ content: string
+): void {
+ if (!content.includes('')) return
+ const next = extractRevealedSimKeys(content)
+ if (next.length === 0) return
+ for (const key of keys) {
+ if (!key) continue
+ const existing = cache.get(key)
+ if (!existing || next.length > existing.length) cache.set(key, next)
+ }
+}
+
+function restoreInString(
+ content: string,
+ revealedValues: string[],
+ startCursor: number
+): {
+ next: string
+ changed: boolean
+ cursor: number
+} {
+ if (!content.includes('') || revealedValues.length === 0) {
+ return { next: content, changed: false, cursor: startCursor }
+ }
+ let cursor = startCursor
+ let changed = false
+ const next = content.replace(CREDENTIAL_TAG_PATTERN, (match, body: string) => {
+ const parsed = parseCredentialBody(body)
+ if (parsed?.type === SIM_KEY_TYPE && parsed.redacted === true) {
+ const value = revealedValues[cursor]
+ cursor += 1
+ if (typeof value === 'string') {
+ changed = true
+ return `${JSON.stringify({ value, type: SIM_KEY_TYPE })}`
+ }
+ }
+ return match
+ })
+ return { next, changed, cursor }
+}
+
+/**
+ * Replace redacted `sim_key` tags in a single message with the live values
+ * captured for that message. Returns the original message reference unchanged
+ * when there's nothing to substitute, so memoized children keep their identity.
+ */
+export function restoreRevealedSimKeysForMessage(
+ message: ChatMessage,
+ cache: RevealedSimKeysByMessage
+): ChatMessage {
+ if (message.role !== 'assistant') return message
+ const revealed =
+ cache.get(message.id) ?? (message.requestId ? cache.get(message.requestId) : undefined)
+ if (!revealed || revealed.length === 0) return message
+ if (
+ !hasRedactedSimKeyTag(message.content) &&
+ !message.contentBlocks?.some((b) => hasRedactedSimKeyTag(b.content))
+ ) {
+ return message
+ }
+
+ const restoredContent = restoreInString(message.content, revealed, 0)
+ let blocksChanged = false
+ let blockCursor = 0
+ const nextBlocks: ContentBlock[] | undefined = message.contentBlocks?.map((block) => {
+ if (!hasRedactedSimKeyTag(block.content)) return block
+ const restored = restoreInString(block.content as string, revealed, blockCursor)
+ blockCursor = restored.cursor
+ if (!restored.changed) return block
+ blocksChanged = true
+ return { ...block, content: restored.next }
+ })
+
+ if (!restoredContent.changed && !blocksChanged) return message
+
+ return {
+ ...message,
+ content: restoredContent.next,
+ ...(nextBlocks ? { contentBlocks: nextBlocks } : {}),
+ }
+}
diff --git a/apps/sim/lib/copilot/resources/extraction.ts b/apps/sim/lib/copilot/resources/extraction.ts
index 29ca644a21a..6cba5a3bee0 100644
--- a/apps/sim/lib/copilot/resources/extraction.ts
+++ b/apps/sim/lib/copilot/resources/extraction.ts
@@ -7,7 +7,6 @@ import {
FunctionExecute,
GenerateImage,
GenerateVisualization,
- GetWorkflowLogs,
Knowledge,
KnowledgeBase,
UserTable,
@@ -30,7 +29,6 @@ const RESOURCE_TOOL_NAMES: Set = new Set([
Knowledge.id,
GenerateVisualization.id,
GenerateImage.id,
- GetWorkflowLogs.id,
])
export function isResourceToolName(toolName: string): boolean {
@@ -214,19 +212,6 @@ export function extractResourcesFromToolResult(
return resources
}
- case GetWorkflowLogs.id: {
- const entries = Array.isArray(output) ? output : Array.isArray(result.data) ? result.data : []
- const resources: ChatResource[] = []
- for (const entry of entries) {
- const rec = asRecord(entry)
- const logId = rec.id as string | undefined
- if (logId) {
- resources.push({ type: 'log', id: logId, title: 'Log' })
- }
- }
- return resources
- }
-
default:
return []
}
diff --git a/apps/sim/lib/copilot/tasks.ts b/apps/sim/lib/copilot/tasks.ts
index 5828a711cb4..db6594ebf28 100644
--- a/apps/sim/lib/copilot/tasks.ts
+++ b/apps/sim/lib/copilot/tasks.ts
@@ -13,6 +13,7 @@ interface TaskStatusEvent {
workspaceId: string
chatId: string
type: 'started' | 'completed' | 'created' | 'deleted' | 'renamed'
+ streamId?: string
}
const channel =
diff --git a/apps/sim/lib/core/config/env.ts b/apps/sim/lib/core/config/env.ts
index 969324591b0..14bf33ce5d4 100644
--- a/apps/sim/lib/core/config/env.ts
+++ b/apps/sim/lib/core/config/env.ts
@@ -430,6 +430,7 @@ export const env = createEnv({
NEXT_PUBLIC_E2B_ENABLED: z.string().optional(),
NEXT_PUBLIC_BEDROCK_DEFAULT_CREDENTIALS: z.string().optional(), // Hide Bedrock credential fields when deployment uses AWS default credential chain (IAM roles, instance profiles, ECS task roles, IRSA)
NEXT_PUBLIC_AZURE_CONFIGURED: z.string().optional(), // Hide Azure credential fields when endpoint/key/version are pre-configured server-side
+ NEXT_PUBLIC_COHERE_CONFIGURED: z.string().optional(), // Hide Cohere API key field on Knowledge block when COHERE_API_KEY is pre-configured server-side
NEXT_PUBLIC_COPILOT_TRAINING_ENABLED: z.string().optional(),
NEXT_PUBLIC_ENABLE_PLAYGROUND: z.string().optional(), // Enable component playground at /playground
NEXT_PUBLIC_DOCUMENTATION_URL: z.string().url().optional(), // Custom documentation URL
@@ -496,6 +497,7 @@ export const env = createEnv({
NEXT_PUBLIC_E2B_ENABLED: process.env.NEXT_PUBLIC_E2B_ENABLED,
NEXT_PUBLIC_BEDROCK_DEFAULT_CREDENTIALS: process.env.NEXT_PUBLIC_BEDROCK_DEFAULT_CREDENTIALS,
NEXT_PUBLIC_AZURE_CONFIGURED: process.env.NEXT_PUBLIC_AZURE_CONFIGURED,
+ NEXT_PUBLIC_COHERE_CONFIGURED: process.env.NEXT_PUBLIC_COHERE_CONFIGURED,
NEXT_PUBLIC_COPILOT_TRAINING_ENABLED: process.env.NEXT_PUBLIC_COPILOT_TRAINING_ENABLED,
NEXT_PUBLIC_ENABLE_PLAYGROUND: process.env.NEXT_PUBLIC_ENABLE_PLAYGROUND,
NEXT_PUBLIC_POSTHOG_ENABLED: process.env.NEXT_PUBLIC_POSTHOG_ENABLED,
diff --git a/apps/sim/lib/core/config/feature-flags.ts b/apps/sim/lib/core/config/feature-flags.ts
index c593c2b3eda..3a69af74fd1 100644
--- a/apps/sim/lib/core/config/feature-flags.ts
+++ b/apps/sim/lib/core/config/feature-flags.ts
@@ -156,6 +156,14 @@ export const isOllamaConfigured = Boolean(env.OLLAMA_URL)
*/
export const isAzureConfigured = isTruthy(getEnv('NEXT_PUBLIC_AZURE_CONFIGURED'))
+/**
+ * Whether a Cohere API key is pre-configured server-side for the Knowledge block reranker
+ * (`COHERE_API_KEY` or `COHERE_API_KEY_1/2/3`). When true, the Cohere API Key field is hidden
+ * in the Knowledge block UI.
+ * Set NEXT_PUBLIC_COHERE_CONFIGURED=true in self-hosted deployments that ship a Cohere key.
+ */
+export const isCohereConfigured = isTruthy(getEnv('NEXT_PUBLIC_COHERE_CONFIGURED'))
+
/**
* Are invitations disabled globally
* When true, workspace invitations are disabled for all users
diff --git a/apps/sim/lib/knowledge/reranker.ts b/apps/sim/lib/knowledge/reranker.ts
index 54b2ae02c91..b1bebc11aa8 100644
--- a/apps/sim/lib/knowledge/reranker.ts
+++ b/apps/sim/lib/knowledge/reranker.ts
@@ -2,6 +2,7 @@ import { createLogger } from '@sim/logger'
import { getBYOKKey } from '@/lib/api-key/byok'
import { getRotatingApiKey } from '@/lib/core/config/api-keys'
import { env } from '@/lib/core/config/env'
+import { isHosted } from '@/lib/core/config/feature-flags'
import { isRetryableError, retryWithExponentialBackoff } from '@/lib/knowledge/documents/utils'
import {
DEFAULT_RERANKER_MODEL,
@@ -56,8 +57,18 @@ class RerankAPIError extends Error {
}
async function resolveCohereKey(
- workspaceId?: string | null
+ workspaceId?: string | null,
+ userApiKey?: string
): Promise<{ apiKey: string; isBYOK: boolean }> {
+ /**
+ * Mirrors the agent block hosted-key pattern (`injectHostedKeyIfNeeded`):
+ * on self-hosted the user-supplied key from the block field flows through
+ * unchanged; on hosted Sim we always source the key from workspace BYOK or
+ * platform env, so any user-supplied value is ignored.
+ */
+ if (!isHosted && userApiKey) {
+ return { apiKey: userApiKey, isBYOK: false }
+ }
if (workspaceId) {
const byokResult = await getBYOKKey(workspaceId, 'cohere')
if (byokResult) {
@@ -77,8 +88,19 @@ async function resolveCohereKey(
}
}
+/**
+ * Subset of Cohere v2/rerank response fields we read.
+ * Reference: https://docs.cohere.com/v2/reference/rerank
+ * - `results[].index` maps back to the position in the documents we sent.
+ * - `results[].relevance_score` is normalized 0–1.
+ * - `meta.warnings` is documented as an array of strings; we surface them in logs
+ * so issues like document truncation don't disappear silently.
+ */
interface CohereRerankResponse {
results: Array<{ index: number; relevance_score: number }>
+ meta?: {
+ warnings?: string[]
+ }
}
/**
@@ -92,6 +114,8 @@ export async function rerank(
model: string
topN?: number
workspaceId?: string | null
+ /** User-supplied Cohere key from the Knowledge block field. Honored only on self-hosted. */
+ apiKey?: string
}
): Promise> {
if (items.length === 0) return { results: [], isBYOK: false }
@@ -100,7 +124,7 @@ export async function rerank(
throw new Error(`Unsupported reranker model: ${options.model}`)
}
- const { apiKey, isBYOK } = await resolveCohereKey(options.workspaceId)
+ const { apiKey, isBYOK } = await resolveCohereKey(options.workspaceId, options.apiKey)
const cappedItems =
items.length > MAX_DOCUMENTS_PER_RERANK ? items.slice(0, MAX_DOCUMENTS_PER_RERANK) : items
if (items.length > MAX_DOCUMENTS_PER_RERANK) {
@@ -151,6 +175,13 @@ export async function rerank(
}
)
+ if (response.meta?.warnings && response.meta.warnings.length > 0) {
+ logger.warn('Cohere rerank returned warnings', {
+ model: options.model,
+ warnings: response.meta.warnings,
+ })
+ }
+
return {
results: response.results
.filter((r) => r.index >= 0 && r.index < cappedItems.length)
diff --git a/apps/sim/lib/logs/fetch-log-detail.ts b/apps/sim/lib/logs/fetch-log-detail.ts
new file mode 100644
index 00000000000..1a5aea4dc26
--- /dev/null
+++ b/apps/sim/lib/logs/fetch-log-detail.ts
@@ -0,0 +1,197 @@
+import { db } from '@sim/db'
+import {
+ jobExecutionLogs,
+ pausedExecutions,
+ permissions,
+ workflow,
+ workflowDeploymentVersion,
+ workflowExecutionLogs,
+} from '@sim/db/schema'
+import { and, eq, type SQL } from 'drizzle-orm'
+
+type LookupColumn = 'id' | 'executionId'
+
+interface FetchLogDetailArgs {
+ userId: string
+ workspaceId: string
+ lookupColumn: LookupColumn
+ lookupValue: string
+}
+
+/**
+ * Shared loader for the workflow-log detail shape returned by the by-id and
+ * by-execution routes. Returns `null` when no matching row exists in either
+ * the workflow-execution or job-execution tables for this user + workspace.
+ */
+export async function fetchLogDetail({
+ userId,
+ workspaceId,
+ lookupColumn,
+ lookupValue,
+}: FetchLogDetailArgs) {
+ const workflowMatch: SQL =
+ lookupColumn === 'id'
+ ? eq(workflowExecutionLogs.id, lookupValue)
+ : eq(workflowExecutionLogs.executionId, lookupValue)
+
+ const rows = await db
+ .select({
+ id: workflowExecutionLogs.id,
+ workflowId: workflowExecutionLogs.workflowId,
+ executionId: workflowExecutionLogs.executionId,
+ deploymentVersionId: workflowExecutionLogs.deploymentVersionId,
+ level: workflowExecutionLogs.level,
+ status: workflowExecutionLogs.status,
+ trigger: workflowExecutionLogs.trigger,
+ startedAt: workflowExecutionLogs.startedAt,
+ endedAt: workflowExecutionLogs.endedAt,
+ totalDurationMs: workflowExecutionLogs.totalDurationMs,
+ executionData: workflowExecutionLogs.executionData,
+ cost: workflowExecutionLogs.cost,
+ files: workflowExecutionLogs.files,
+ createdAt: workflowExecutionLogs.createdAt,
+ workflowName: workflow.name,
+ workflowDescription: workflow.description,
+ workflowColor: workflow.color,
+ workflowFolderId: workflow.folderId,
+ workflowUserId: workflow.userId,
+ workflowWorkspaceId: workflow.workspaceId,
+ workflowCreatedAt: workflow.createdAt,
+ workflowUpdatedAt: workflow.updatedAt,
+ deploymentVersion: workflowDeploymentVersion.version,
+ deploymentVersionName: workflowDeploymentVersion.name,
+ pausedStatus: pausedExecutions.status,
+ pausedTotalPauseCount: pausedExecutions.totalPauseCount,
+ pausedResumedCount: pausedExecutions.resumedCount,
+ })
+ .from(workflowExecutionLogs)
+ .leftJoin(workflow, eq(workflowExecutionLogs.workflowId, workflow.id))
+ .leftJoin(
+ workflowDeploymentVersion,
+ eq(workflowDeploymentVersion.id, workflowExecutionLogs.deploymentVersionId)
+ )
+ .leftJoin(pausedExecutions, eq(pausedExecutions.executionId, workflowExecutionLogs.executionId))
+ .innerJoin(
+ permissions,
+ and(
+ eq(permissions.entityType, 'workspace'),
+ eq(permissions.entityId, workflowExecutionLogs.workspaceId),
+ eq(permissions.userId, userId)
+ )
+ )
+ .where(and(workflowMatch, eq(workflowExecutionLogs.workspaceId, workspaceId)))
+ .limit(1)
+
+ const log = rows[0]
+
+ if (log) {
+ const workflowSummary = log.workflowId
+ ? {
+ id: log.workflowId,
+ name: log.workflowName,
+ description: log.workflowDescription,
+ color: log.workflowColor,
+ folderId: log.workflowFolderId,
+ userId: log.workflowUserId,
+ workspaceId: log.workflowWorkspaceId,
+ createdAt: log.workflowCreatedAt?.toISOString() ?? null,
+ updatedAt: log.workflowUpdatedAt?.toISOString() ?? null,
+ }
+ : null
+
+ const totalPauseCount = Number(log.pausedTotalPauseCount ?? 0)
+ const resumedCount = Number(log.pausedResumedCount ?? 0)
+ const hasPendingPause =
+ (totalPauseCount > 0 && resumedCount < totalPauseCount) ||
+ (log.pausedStatus !== null && log.pausedStatus !== 'fully_resumed')
+
+ return {
+ id: log.id,
+ workflowId: log.workflowId,
+ executionId: log.executionId,
+ deploymentVersionId: log.deploymentVersionId,
+ deploymentVersion: log.deploymentVersion ?? null,
+ deploymentVersionName: log.deploymentVersionName ?? null,
+ level: log.level,
+ status: log.status,
+ duration: log.totalDurationMs ? `${log.totalDurationMs}ms` : null,
+ trigger: log.trigger,
+ createdAt: log.startedAt.toISOString(),
+ workflow: workflowSummary,
+ jobTitle: null,
+ cost: log.cost ?? null,
+ pauseSummary: {
+ status: log.pausedStatus ?? null,
+ total: totalPauseCount,
+ resumed: resumedCount,
+ },
+ hasPendingPause,
+ executionData: {
+ totalDuration: log.totalDurationMs,
+ ...((log.executionData as Record | null) ?? {}),
+ enhanced: true as const,
+ },
+ files: log.files ?? null,
+ }
+ }
+
+ const jobMatch: SQL =
+ lookupColumn === 'id'
+ ? eq(jobExecutionLogs.id, lookupValue)
+ : eq(jobExecutionLogs.executionId, lookupValue)
+
+ const jobRows = await db
+ .select({
+ id: jobExecutionLogs.id,
+ executionId: jobExecutionLogs.executionId,
+ level: jobExecutionLogs.level,
+ status: jobExecutionLogs.status,
+ trigger: jobExecutionLogs.trigger,
+ startedAt: jobExecutionLogs.startedAt,
+ endedAt: jobExecutionLogs.endedAt,
+ totalDurationMs: jobExecutionLogs.totalDurationMs,
+ executionData: jobExecutionLogs.executionData,
+ cost: jobExecutionLogs.cost,
+ createdAt: jobExecutionLogs.createdAt,
+ })
+ .from(jobExecutionLogs)
+ .innerJoin(
+ permissions,
+ and(
+ eq(permissions.entityType, 'workspace'),
+ eq(permissions.entityId, jobExecutionLogs.workspaceId),
+ eq(permissions.userId, userId)
+ )
+ )
+ .where(and(jobMatch, eq(jobExecutionLogs.workspaceId, workspaceId)))
+ .limit(1)
+
+ const jobLog = jobRows[0]
+ if (!jobLog) return null
+
+ const execData = (jobLog.executionData as Record | null) ?? {}
+ return {
+ id: jobLog.id,
+ workflowId: null,
+ executionId: jobLog.executionId,
+ deploymentVersionId: null,
+ deploymentVersion: null,
+ deploymentVersionName: null,
+ level: jobLog.level,
+ status: jobLog.status,
+ duration: jobLog.totalDurationMs ? `${jobLog.totalDurationMs}ms` : null,
+ trigger: jobLog.trigger,
+ createdAt: jobLog.startedAt.toISOString(),
+ workflow: null,
+ jobTitle: ((execData.trigger as Record | undefined)?.source as string) ?? null,
+ cost: jobLog.cost ?? null,
+ pauseSummary: { status: null, total: 0, resumed: 0 },
+ hasPendingPause: false,
+ executionData: {
+ totalDuration: jobLog.totalDurationMs,
+ ...execData,
+ enhanced: true as const,
+ },
+ files: null,
+ }
+}
diff --git a/apps/sim/lib/mothership/inbox/executor.ts b/apps/sim/lib/mothership/inbox/executor.ts
index b738a6a37ae..52236d9b959 100644
--- a/apps/sim/lib/mothership/inbox/executor.ts
+++ b/apps/sim/lib/mothership/inbox/executor.ts
@@ -131,11 +131,14 @@ export async function executeInboxTask(taskId: string): Promise {
})
}
+ const userMessageId = generateId()
+
if (chatId) {
taskPubSub?.publishStatusChanged({
workspaceId: ws.id,
chatId,
type: 'started',
+ streamId: userMessageId,
})
}
@@ -178,7 +181,6 @@ export async function executeInboxTask(taskId: string): Promise {
}
const messageContent = formatEmailAsMessage(truncatedTask, attachments)
- const userMessageId = generateId()
const requestPayload: Record = {
message: messageContent,
userId,
@@ -244,6 +246,7 @@ export async function executeInboxTask(taskId: string): Promise {
workspaceId: ws.id,
chatId,
type: 'completed',
+ streamId: userMessageId,
})
}
diff --git a/apps/sim/lib/table/sql.ts b/apps/sim/lib/table/sql.ts
index d2004175f44..f854d2b5237 100644
--- a/apps/sim/lib/table/sql.ts
+++ b/apps/sim/lib/table/sql.ts
@@ -10,6 +10,17 @@ import { sql } from 'drizzle-orm'
import { NAME_PATTERN } from './constants'
import type { ColumnDefinition, ConditionOperators, Filter, JsonValue, Sort } from './types'
+/**
+ * Error thrown when caller-supplied filter or sort input is malformed.
+ * Routes should map this to HTTP 400 with the message preserved.
+ */
+export class TableQueryValidationError extends Error {
+ constructor(message: string) {
+ super(message)
+ this.name = 'TableQueryValidationError'
+ }
+}
+
/**
* Whitelist of allowed operators for query filtering.
* Only these operators can be used in filter conditions.
@@ -41,7 +52,7 @@ const ALLOWED_OPERATORS = new Set([
* @param filter - Filter object with field conditions and logical operators
* @param tableName - Table name for the query (e.g., 'user_table_rows')
* @returns SQL WHERE clause or undefined if no filter specified
- * @throws Error if field name is invalid or operator is not allowed
+ * @throws {TableQueryValidationError} if field name is invalid or operator is not allowed
*
* @example
* // Simple equality
@@ -110,7 +121,7 @@ export function buildFilterClause(filter: Filter, tableName: string): SQL | unde
* @param tableName - Table name for the query (e.g., 'user_table_rows')
* @param columns - Optional column definitions for type-aware sorting
* @returns SQL ORDER BY clause or undefined if no sort specified
- * @throws Error if field name is invalid
+ * @throws {TableQueryValidationError} if field name or sort direction is invalid
*
* @example
* buildSortClause({ name: 'asc', age: 'desc' }, 'user_table_rows')
@@ -133,7 +144,9 @@ export function buildSortClause(
validateFieldName(field)
if (direction !== 'asc' && direction !== 'desc') {
- throw new Error(`Invalid sort direction "${direction}". Must be "asc" or "desc".`)
+ throw new TableQueryValidationError(
+ `Invalid sort direction "${direction}". Must be "asc" or "desc".`
+ )
}
const columnType = columnTypeMap.get(field)
@@ -148,15 +161,15 @@ export function buildSortClause(
* Field names must match the NAME_PATTERN (alphanumeric + underscore, starting with letter/underscore).
*
* @param field - The field name to validate
- * @throws Error if field name is invalid
+ * @throws {TableQueryValidationError} if field name is invalid
*/
function validateFieldName(field: string): void {
if (!field || typeof field !== 'string') {
- throw new Error('Field name must be a non-empty string')
+ throw new TableQueryValidationError('Field name must be a non-empty string')
}
if (!NAME_PATTERN.test(field)) {
- throw new Error(
+ throw new TableQueryValidationError(
`Invalid field name "${field}". Field names must start with a letter or underscore, followed by alphanumeric characters or underscores.`
)
}
@@ -166,11 +179,11 @@ function validateFieldName(field: string): void {
* Validates an operator to ensure it's in the allowed list.
*
* @param operator - The operator to validate
- * @throws Error if operator is not allowed
+ * @throws {TableQueryValidationError} if operator is not allowed
*/
function validateOperator(operator: string): void {
if (!ALLOWED_OPERATORS.has(operator)) {
- throw new Error(
+ throw new TableQueryValidationError(
`Invalid operator "${operator}". Allowed operators: ${Array.from(ALLOWED_OPERATORS).join(', ')}`
)
}
@@ -190,7 +203,7 @@ function validateOperator(operator: string): void {
* object with operators like $eq, $gt, $in, etc.
* @returns Array of SQL condition fragments. Multiple conditions are returned
* when the condition object contains multiple operators.
- * @throws Error if field name is invalid or operator is not allowed
+ * @throws {TableQueryValidationError} if field name is invalid or operator is not allowed
*/
function buildFieldCondition(
tableName: string,
@@ -260,7 +273,9 @@ function buildFieldCondition(
break
default:
- // This should never happen due to validateOperator, but added for completeness
+ // This should never happen due to validateOperator, but added for completeness.
+ // Throw a plain Error (→ 500) since reaching this default means the switch
+ // and ALLOWED_OPERATORS have drifted — that's a programmer error, not a caller error.
throw new Error(`Unsupported operator: ${op}`)
}
}
diff --git a/apps/sim/stores/logs/filters/types.ts b/apps/sim/stores/logs/filters/types.ts
index 3fbd85bfaee..cf95d3bee3e 100644
--- a/apps/sim/stores/logs/filters/types.ts
+++ b/apps/sim/stores/logs/filters/types.ts
@@ -1,113 +1,3 @@
-import type { ProviderTiming, TokenInfo, ToolCall, TraceSpan } from '@/lib/logs/types'
-
-export type { ProviderTiming, TokenInfo, ToolCall, TraceSpan }
-
-export interface WorkflowData {
- id: string
- name: string
- description: string | null
- color: string
- state: any
-}
-
-export interface ToolCallMetadata {
- toolCalls?: ToolCall[]
-}
-
-export interface CostMetadata {
- models?: Record<
- string,
- {
- input: number
- output: number
- total: number
- tokens?: {
- input?: number
- output?: number
- prompt?: number
- completion?: number
- total?: number
- }
- }
- >
- input?: number
- output?: number
- total?: number
- tokens?: {
- input?: number
- output?: number
- prompt?: number
- completion?: number
- total?: number
- }
- pricing?: {
- input: number
- output: number
- cachedInput?: number
- updatedAt: string
- }
-}
-
-export interface WorkflowLog {
- id: string
- workflowId: string | null
- executionId?: string | null
- deploymentVersion?: number | null
- deploymentVersionName?: string | null
- level: string
- status?: string | null
- duration: string | null
- trigger: string | null
- createdAt: string
- workflow?: WorkflowData | null
- jobTitle?: string | null
- files?: Array<{
- id: string
- name: string
- size: number
- type: string
- url: string
- key: string
- uploadedAt: string
- expiresAt: string
- storageProvider?: 's3' | 'blob' | 'local'
- bucketName?: string
- }>
- cost?: CostMetadata
- hasPendingPause?: boolean
- executionData?: ToolCallMetadata & {
- traceSpans?: TraceSpan[]
- totalDuration?: number
- blockInput?: Record
- enhanced?: boolean
-
- blockExecutions?: Array<{
- id: string
- blockId: string
- blockName: string
- blockType: string
- startedAt: string
- endedAt: string
- durationMs: number
- status: 'success' | 'error' | 'skipped'
- errorMessage?: string
- errorStackTrace?: string
- inputData: unknown
- outputData: unknown
- cost?: CostMetadata
- metadata: Record
- }>
- }
-}
-
-export interface LogsResponse {
- data: WorkflowLog[]
- total: number
- page: number
- pageSize: number
- totalPages: number
-}
-
export type TimeRange =
| 'Past 30 minutes'
| 'Past hour'
@@ -129,6 +19,7 @@ export type LogLevel =
| 'cancelled'
| 'all'
| (string & {})
+
/** Core trigger types for workflow execution */
export const CORE_TRIGGER_TYPES = [
'manual',
diff --git a/apps/sim/tools/knowledge/search.ts b/apps/sim/tools/knowledge/search.ts
index 7f0ee99e933..09da5193704 100644
--- a/apps/sim/tools/knowledge/search.ts
+++ b/apps/sim/tools/knowledge/search.ts
@@ -55,6 +55,19 @@ export const knowledgeSearchTool: ToolConfig = {
description:
'Cohere rerank model to use (one of: rerank-v4.0-pro, rerank-v4.0-fast, rerank-v3.5)',
},
+ rerankerInputCount: {
+ type: 'number',
+ required: false,
+ visibility: 'user-only',
+ description:
+ 'Number of vector results sent to the Cohere reranker (1–100). Defaults to topK × 4 capped at 100.',
+ },
+ apiKey: {
+ type: 'string',
+ required: false,
+ visibility: 'user-only',
+ description: 'Cohere API key for reranker (self-hosted deployments only)',
+ },
},
schemaEnrichment: {
@@ -84,13 +97,29 @@ export const knowledgeSearchTool: ToolConfig = {
typeof params.rerankerModel === 'string' && params.rerankerModel.length > 0
? params.rerankerModel
: DEFAULT_RERANKER_MODEL
+ const rerankerApiKey =
+ typeof params.apiKey === 'string' && params.apiKey.length > 0 ? params.apiKey : undefined
+ const rawInputCount =
+ params.rerankerInputCount !== undefined &&
+ params.rerankerInputCount !== null &&
+ params.rerankerInputCount !== ''
+ ? Number(params.rerankerInputCount)
+ : Number.NaN
+ const rerankerInputCount = Number.isFinite(rawInputCount)
+ ? Math.max(1, Math.min(100, Math.floor(rawInputCount)))
+ : undefined
const requestBody = {
knowledgeBaseIds,
query: params.query,
topK: params.topK ? Math.max(1, Math.min(100, Number(params.topK))) : 10,
...(structuredFilters.length > 0 && { tagFilters: structuredFilters }),
- ...(rerankerEnabled && { rerankerEnabled: true, rerankerModel }),
+ ...(rerankerEnabled && {
+ rerankerEnabled: true,
+ rerankerModel,
+ ...(rerankerInputCount !== undefined && { rerankerInputCount }),
+ ...(rerankerApiKey && { rerankerApiKey }),
+ }),
...(workflowId && { workflowId }),
}
diff --git a/apps/sim/tools/logs/get_execution.ts b/apps/sim/tools/logs/get_execution.ts
new file mode 100644
index 00000000000..a62eef0525b
--- /dev/null
+++ b/apps/sim/tools/logs/get_execution.ts
@@ -0,0 +1,53 @@
+import type { LogsGetExecutionParams, LogsGetExecutionResponse } from '@/tools/logs/types'
+import type { ToolConfig } from '@/tools/types'
+
+export const logsGetExecutionTool: ToolConfig = {
+ id: 'logs_get_execution',
+ name: 'Get Execution Details',
+ description:
+ 'Fetch full execution details for a workflow run, including the per-block state snapshot.',
+ version: '1.0.0',
+
+ params: {
+ executionId: {
+ type: 'string',
+ required: true,
+ visibility: 'user-or-llm',
+ description: 'Execution ID returned by a workflow run',
+ },
+ },
+
+ request: {
+ url: (params) => `/api/logs/execution/${encodeURIComponent(params.executionId)}`,
+ method: 'GET',
+ headers: () => ({
+ 'Content-Type': 'application/json',
+ }),
+ },
+
+ transformResponse: async (response): Promise => {
+ const data = await response.json()
+ if (!response.ok) {
+ throw new Error(data?.error || `Request failed with status ${response.status}`)
+ }
+ return {
+ success: true,
+ output: data,
+ }
+ },
+
+ outputs: {
+ executionId: { type: 'string', description: 'Execution ID' },
+ workflowId: { type: 'string', description: 'Workflow ID this execution belongs to' },
+ workflowState: { type: 'json', description: 'Per-block state snapshot for the execution' },
+ childWorkflowSnapshots: {
+ type: 'json',
+ description: 'Snapshots for any child workflows invoked during the run',
+ optional: true,
+ },
+ executionMetadata: {
+ type: 'json',
+ description: 'Trigger, timestamps, totalDurationMs, and cost for the run',
+ },
+ },
+}
diff --git a/apps/sim/tools/logs/get_log.ts b/apps/sim/tools/logs/get_log.ts
new file mode 100644
index 00000000000..92e41e79b83
--- /dev/null
+++ b/apps/sim/tools/logs/get_log.ts
@@ -0,0 +1,50 @@
+import type { LogsGetParams, LogsGetResponse } from '@/tools/logs/types'
+import type { ToolConfig } from '@/tools/types'
+
+export const logsGetTool: ToolConfig = {
+ id: 'logs_get',
+ name: 'Get Log by ID',
+ description: 'Fetch a single workflow execution log entry by its log ID.',
+ version: '1.0.0',
+
+ params: {
+ id: {
+ type: 'string',
+ required: true,
+ visibility: 'user-or-llm',
+ description: 'Log entry ID',
+ },
+ },
+
+ request: {
+ url: (params) => {
+ const workspaceId = params._context?.workspaceId
+ if (!workspaceId) {
+ throw new Error('workspaceId is required in execution context')
+ }
+ const qs = new URLSearchParams({ workspaceId })
+ return `/api/logs/${encodeURIComponent(params.id)}?${qs.toString()}`
+ },
+ method: 'GET',
+ headers: () => ({
+ 'Content-Type': 'application/json',
+ }),
+ },
+
+ transformResponse: async (response): Promise => {
+ const result = await response.json()
+ if (!response.ok) {
+ throw new Error(result?.error || `Request failed with status ${response.status}`)
+ }
+ return {
+ success: true,
+ output: {
+ log: result.data,
+ },
+ }
+ },
+
+ outputs: {
+ log: { type: 'json', description: 'Workflow execution log entry' },
+ },
+}
diff --git a/apps/sim/tools/logs/index.ts b/apps/sim/tools/logs/index.ts
new file mode 100644
index 00000000000..109d223c8b8
--- /dev/null
+++ b/apps/sim/tools/logs/index.ts
@@ -0,0 +1,3 @@
+export { logsGetExecutionTool } from '@/tools/logs/get_execution'
+export { logsGetTool } from '@/tools/logs/get_log'
+export { logsQueryTool } from '@/tools/logs/query'
diff --git a/apps/sim/tools/logs/query.ts b/apps/sim/tools/logs/query.ts
new file mode 100644
index 00000000000..8ea660ee29a
--- /dev/null
+++ b/apps/sim/tools/logs/query.ts
@@ -0,0 +1,132 @@
+import type { LogsQueryParams, LogsQueryResponse } from '@/tools/logs/types'
+import type { ToolConfig } from '@/tools/types'
+
+export const logsQueryTool: ToolConfig = {
+ id: 'logs_query',
+ name: 'Query Logs',
+ description: 'Query workflow execution logs in the current workspace with filters.',
+ version: '1.0.0',
+
+ params: {
+ workflowIds: {
+ type: 'string',
+ required: false,
+ visibility: 'user-or-llm',
+ description: 'Comma-separated workflow IDs to filter by',
+ },
+ executionId: {
+ type: 'string',
+ required: false,
+ visibility: 'user-or-llm',
+ description: 'Filter logs to a single execution ID',
+ },
+ level: {
+ type: 'string',
+ required: false,
+ visibility: 'user-or-llm',
+ description:
+ "Log level filter: 'all', 'info', 'error', 'running', 'pending'. Comma-separated for multiple.",
+ },
+ triggers: {
+ type: 'string',
+ required: false,
+ visibility: 'user-or-llm',
+ description: 'Comma-separated triggers (api, webhook, schedule, manual, chat, mothership)',
+ },
+ limit: {
+ type: 'number',
+ required: false,
+ visibility: 'user-or-llm',
+ description: 'Max logs to return (default 100, max 200)',
+ },
+ cursor: {
+ type: 'string',
+ required: false,
+ visibility: 'user-or-llm',
+ description: 'Opaque pagination cursor returned by a previous query',
+ },
+ sortBy: {
+ type: 'string',
+ required: false,
+ visibility: 'user-only',
+ description: "Sort field: 'date' (default), 'duration', 'cost', 'status'",
+ },
+ sortOrder: {
+ type: 'string',
+ required: false,
+ visibility: 'user-only',
+ description: "Sort order: 'desc' (default) or 'asc'",
+ },
+ startDate: {
+ type: 'string',
+ required: false,
+ visibility: 'user-or-llm',
+ description: 'ISO 8601 timestamp; only logs at or after this time',
+ },
+ endDate: {
+ type: 'string',
+ required: false,
+ visibility: 'user-or-llm',
+ description: 'ISO 8601 timestamp; only logs at or before this time',
+ },
+ search: {
+ type: 'string',
+ required: false,
+ visibility: 'user-or-llm',
+ description: 'Free-text search across log fields',
+ },
+ },
+
+ request: {
+ url: (params) => {
+ const workspaceId = params._context?.workspaceId
+ if (!workspaceId) {
+ throw new Error('workspaceId is required in execution context')
+ }
+ const qs = new URLSearchParams({ workspaceId })
+ if (params.workflowIds) qs.set('workflowIds', params.workflowIds)
+ if (params.executionId) qs.set('executionId', params.executionId)
+ if (params.level && params.level !== 'all') qs.set('level', params.level)
+ if (params.triggers) qs.set('triggers', params.triggers)
+ if (params.startDate) qs.set('startDate', params.startDate)
+ if (params.endDate) qs.set('endDate', params.endDate)
+ if (params.search) qs.set('search', params.search)
+ if (params.cursor) qs.set('cursor', params.cursor)
+ if (params.sortBy) qs.set('sortBy', params.sortBy)
+ if (params.sortOrder) qs.set('sortOrder', params.sortOrder)
+ if (params.limit !== undefined && params.limit !== null) {
+ qs.set('limit', String(params.limit))
+ }
+ return `/api/logs?${qs.toString()}`
+ },
+ method: 'GET',
+ headers: () => ({
+ 'Content-Type': 'application/json',
+ }),
+ },
+
+ transformResponse: async (response): Promise => {
+ const result = await response.json()
+ if (!response.ok) {
+ throw new Error(result?.error || `Request failed with status ${response.status}`)
+ }
+ return {
+ success: true,
+ output: {
+ logs: result.data || [],
+ nextCursor: result.nextCursor ?? null,
+ },
+ }
+ },
+
+ outputs: {
+ logs: {
+ type: 'array',
+ description: 'Array of workflow execution log entries',
+ },
+ nextCursor: {
+ type: 'string',
+ description: 'Pagination cursor for the next page; null when no more results',
+ },
+ },
+}
diff --git a/apps/sim/tools/logs/types.ts b/apps/sim/tools/logs/types.ts
new file mode 100644
index 00000000000..3053059b1f1
--- /dev/null
+++ b/apps/sim/tools/logs/types.ts
@@ -0,0 +1,48 @@
+import type {
+ ExecutionSnapshotData,
+ WorkflowLogDetail,
+ WorkflowLogSummary,
+} from '@/lib/api/contracts/logs'
+import type { ToolResponse, WorkflowToolExecutionContext } from '@/tools/types'
+
+export interface LogsQueryParams {
+ workflowIds?: string
+ executionId?: string
+ level?: string
+ triggers?: string
+ limit?: number
+ cursor?: string
+ sortBy?: 'date' | 'duration' | 'cost' | 'status'
+ sortOrder?: 'asc' | 'desc'
+ startDate?: string
+ endDate?: string
+ search?: string
+ _context?: WorkflowToolExecutionContext
+}
+
+export interface LogsGetParams {
+ id: string
+ _context?: WorkflowToolExecutionContext
+}
+
+export interface LogsGetExecutionParams {
+ executionId: string
+ _context?: WorkflowToolExecutionContext
+}
+
+export interface LogsQueryResponse extends ToolResponse {
+ output: {
+ logs: WorkflowLogSummary[]
+ nextCursor: string | null
+ }
+}
+
+export interface LogsGetResponse extends ToolResponse {
+ output: {
+ log: WorkflowLogDetail
+ }
+}
+
+export interface LogsGetExecutionResponse extends ToolResponse {
+ output: ExecutionSnapshotData
+}
diff --git a/apps/sim/tools/openai/image.ts b/apps/sim/tools/openai/image.ts
index 2e857d153f6..cd84472b044 100644
--- a/apps/sim/tools/openai/image.ts
+++ b/apps/sim/tools/openai/image.ts
@@ -16,7 +16,7 @@ export const imageTool: ToolConfig = {
type: 'string',
required: true,
visibility: 'user-only',
- description: 'The model to use (gpt-image-1 or dall-e-3)',
+ description: 'The model to use (dall-e-3, gpt-image-1, or gpt-image-2)',
},
prompt: {
type: 'string',
@@ -28,25 +28,39 @@ export const imageTool: ToolConfig = {
type: 'string',
required: true,
visibility: 'user-or-llm',
- description: 'The size of the generated images (1024x1024, 1024x1792, or 1792x1024)',
+ description:
+ 'Image size. dall-e-3: 1024x1024, 1024x1792, or 1792x1024. gpt-image-1: auto, 1024x1024, 1536x1024, or 1024x1536. gpt-image-2: auto or any size with edges ≤3840px and multiples of 16 (e.g. 1024x1024, 1536x1024, 1024x1536, 2560x1440, 3840x2160).',
},
quality: {
type: 'string',
required: false,
visibility: 'user-or-llm',
- description: 'The quality of the image (standard or hd)',
+ description: 'Quality. dall-e-3: standard|hd. gpt-image-1/gpt-image-2: auto|low|medium|high',
},
style: {
type: 'string',
required: false,
visibility: 'user-or-llm',
- description: 'The style of the image (vivid or natural)',
+ description: 'The style of the image (vivid or natural), only for dall-e-3',
},
background: {
type: 'string',
required: false,
visibility: 'user-or-llm',
- description: 'The background color, only for gpt-image-1',
+ description:
+ 'Background. gpt-image-1: auto|transparent|opaque. gpt-image-2: auto|opaque (transparent not supported)',
+ },
+ outputFormat: {
+ type: 'string',
+ required: false,
+ visibility: 'user-or-llm',
+ description: 'Output image format (png, jpeg, webp), only for gpt-image-1 and gpt-image-2',
+ },
+ moderation: {
+ type: 'string',
+ required: false,
+ visibility: 'user-or-llm',
+ description: 'Moderation level (auto or low), only for gpt-image-1 and gpt-image-2',
},
n: {
type: 'number',
@@ -73,15 +87,18 @@ export const imageTool: ToolConfig = {
const body: BaseImageRequestBody = {
model: params.model,
prompt: params.prompt,
- size: params.size || '1024x1024',
+ size: params.size || (params.model === 'dall-e-3' ? '1024x1024' : 'auto'),
n: params.n ? Number(params.n) : 1,
}
if (params.model === 'dall-e-3') {
if (params.quality) body.quality = params.quality
if (params.style) body.style = params.style
- } else if (params.model === 'gpt-image-1') {
+ } else if (params.model === 'gpt-image-1' || params.model === 'gpt-image-2') {
+ if (params.quality) body.quality = params.quality
if (params.background) body.background = params.background
+ if (params.outputFormat) body.output_format = params.outputFormat
+ if (params.moderation) body.moderation = params.moderation
}
return body
@@ -111,7 +128,7 @@ export const imageTool: ToolConfig = {
} else if (data.data?.[0]?.b64_json) {
base64Image = data.data[0].b64_json
logger.info(
- 'Found base64 encoded image in response for GPT-Image-1',
+ `Found base64 encoded image in response for ${modelName}`,
`length: ${base64Image.length}`
)
} else {
diff --git a/apps/sim/tools/registry.ts b/apps/sim/tools/registry.ts
index ad7dd384867..9130ac52dee 100644
--- a/apps/sim/tools/registry.ts
+++ b/apps/sim/tools/registry.ts
@@ -1568,6 +1568,7 @@ import {
import { linkedInGetProfileTool, linkedInSharePostTool } from '@/tools/linkedin'
import { linkupSearchTool } from '@/tools/linkup'
import { llmChatTool } from '@/tools/llm'
+import { logsGetExecutionTool, logsGetTool, logsQueryTool } from '@/tools/logs'
import {
loopsCreateContactPropertyTool,
loopsCreateContactTool,
@@ -3204,6 +3205,9 @@ export const tools: Record = {
ketch_set_consent: ketchSetConsentTool,
ketch_set_subscriptions: ketchSetSubscriptionsTool,
linkup_search: linkupSearchTool,
+ logs_query: logsQueryTool,
+ logs_get: logsGetTool,
+ logs_get_execution: logsGetExecutionTool,
loops_create_contact: loopsCreateContactTool,
loops_create_contact_property: loopsCreateContactPropertyTool,
loops_update_contact: loopsUpdateContactTool,
diff --git a/helm/sim/values.yaml b/helm/sim/values.yaml
index 97fbeba5761..d2fd5c0ee11 100644
--- a/helm/sim/values.yaml
+++ b/helm/sim/values.yaml
@@ -275,6 +275,12 @@ app:
# in the Agent block UI — users just pick an Azure model and run.
NEXT_PUBLIC_AZURE_CONFIGURED: "" # Set to "true" to hide Azure credential fields
+ # Cohere Reranker (Knowledge block)
+ # Set COHERE_API_KEY (or COHERE_API_KEY_1/2/3 for rotation) and NEXT_PUBLIC_COHERE_CONFIGURED=true
+ # to pre-configure the Cohere reranker server-side. When configured, the Cohere API Key field is
+ # hidden in the Knowledge block UI.
+ NEXT_PUBLIC_COHERE_CONFIGURED: "" # Set to "true" to hide the Cohere API Key field on the Knowledge block
+
# AWS S3 Cloud Storage Configuration (optional - for file storage)
# If configured, files will be stored in S3 instead of local storage
AWS_REGION: "" # AWS region (e.g., "us-east-1")
diff --git a/scripts/check-api-validation-contracts.ts b/scripts/check-api-validation-contracts.ts
index 34cbacb0f6e..14a57e05fad 100644
--- a/scripts/check-api-validation-contracts.ts
+++ b/scripts/check-api-validation-contracts.ts
@@ -9,8 +9,8 @@ const QUERY_HOOKS_DIR = path.join(ROOT, 'apps/sim/hooks/queries')
const SELECTOR_HOOKS_DIR = path.join(ROOT, 'apps/sim/hooks/selectors')
const BASELINE = {
- totalRoutes: 725,
- zodRoutes: 725,
+ totalRoutes: 726,
+ zodRoutes: 726,
nonZodRoutes: 0,
} as const
| |