From cbb4127985ad5ea512a339a4ac945990e9a86d84 Mon Sep 17 00:00:00 2001 From: Pranjal Paliwal Date: Tue, 14 Oct 2025 11:52:20 -0400 Subject: [PATCH 01/13] Use Vercel AI SDK for OpenAI chat component --- worker/package.json | 3 +- worker/src/components/core/openai-chat.ts | 205 ++++++++++++++++++++++ worker/src/components/index.ts | 1 + worker/src/types/vercel-ai.d.ts | 29 +++ worker/tsconfig.json | 3 +- 5 files changed, 238 insertions(+), 3 deletions(-) create mode 100644 worker/src/components/core/openai-chat.ts create mode 100644 worker/src/types/vercel-ai.d.ts diff --git a/worker/package.json b/worker/package.json index 69d300d5..1fed5c60 100644 --- a/worker/package.json +++ b/worker/package.json @@ -18,12 +18,14 @@ "test": "bun test" }, "dependencies": { + "@ai-sdk/openai": "^0.0.40", "@shipsec/component-sdk": "*", "@grpc/grpc-js": "^1.14.0", "@shipsec/shared": "*", "@temporalio/client": "^1.11.3", "@temporalio/worker": "^1.11.3", "@temporalio/workflow": "^1.11.3", + "ai": "^3.2.37", "dotenv": "^17.2.3", "drizzle-orm": "^0.44.6", "long": "^5.3.2", @@ -35,7 +37,6 @@ "@types/node": "^20.16.11", "@types/pg": "^8.15.5", "@types/minio": "^7.1.1", - "bun-types": "^1.2.23", "tsx": "^4.20.6", "typescript": "^5.6.3" } diff --git a/worker/src/components/core/openai-chat.ts b/worker/src/components/core/openai-chat.ts new file mode 100644 index 00000000..af24dbef --- /dev/null +++ b/worker/src/components/core/openai-chat.ts @@ -0,0 +1,205 @@ +import { z } from 'zod'; +import { generateText } from 'ai'; +import { createOpenAI } from '@ai-sdk/openai'; +import { componentRegistry, ComponentDefinition } from '@shipsec/component-sdk'; + +const HARDCODED_API_KEY = 'sk-REPLACE_WITH_REAL_KEY'; +const OPENAI_API_KEY = process.env.OPENAI_API_KEY ?? HARDCODED_API_KEY; + +const DEFAULT_MODEL = 'gpt-4o-mini'; +const DEFAULT_TEMPERATURE = 0.7; +const DEFAULT_MAX_TOKENS = 512; +const DEFAULT_BASE_URL = process.env.OPENAI_BASE_URL ?? ''; + +const inputSchema = z.object({ + systemPrompt: z + .string() + .default('') + .describe('Optional system message to steer the assistant behaviour.'), + userPrompt: z + .string() + .min(1, 'User prompt cannot be empty') + .describe('Primary user prompt sent to the assistant.'), + model: z + .string() + .default(DEFAULT_MODEL) + .describe('OpenAI compatible chat model identifier.'), + temperature: z + .number() + .min(0) + .max(2) + .default(DEFAULT_TEMPERATURE) + .describe('Sampling temperature for the response (0-2).'), + maxTokens: z + .number() + .int() + .min(1) + .max(4096) + .default(DEFAULT_MAX_TOKENS) + .describe('Maximum number of tokens to generate in the completion.'), + apiBaseUrl: z + .string() + .default(DEFAULT_BASE_URL) + .describe('Optional override for the OpenAI-compatible API base URL.'), +}); + +type Input = z.infer; + +type Output = { + responseText: string; + finishReason: string | null; + rawResponse: unknown; + usage?: unknown; +}; + +const outputSchema = z.object({ + responseText: z.string(), + finishReason: z.string().nullable(), + rawResponse: z.unknown(), + usage: z.unknown().optional(), +}); + +const definition: ComponentDefinition = { + id: 'core.openai.chat', + label: 'OpenAI Chat Completion', + category: 'transform', + runner: { kind: 'inline' }, + inputSchema, + outputSchema, + docs: 'Executes a one-shot chat completion using the Vercel AI SDK against an OpenAI-compatible endpoint.', + metadata: { + slug: 'openai-chat-completion', + version: '1.0.0', + type: 'process', + category: 'building-block', + description: + 'Send a system + user prompt to an OpenAI compatible chat completion API and return the response.', + icon: 'MessageCircle', + author: { + name: 'ShipSecAI', + type: 'shipsecai', + }, + inputs: [ + { + id: 'systemPrompt', + label: 'System Prompt', + type: 'string', + required: false, + description: 'Optional system message that primes the model.', + }, + { + id: 'userPrompt', + label: 'User Prompt', + type: 'string', + required: true, + description: 'User input that will be sent to the assistant.', + }, + ], + outputs: [ + { + id: 'responseText', + label: 'Response Text', + type: 'string', + description: 'The assistant response from the model.', + }, + { + id: 'rawResponse', + label: 'Raw Response', + type: 'object', + description: 'Raw response metadata returned by the provider for debugging.', + }, + { + id: 'usage', + label: 'Token Usage', + type: 'object', + description: 'Token usage metadata returned by the provider, if available.', + }, + ], + parameters: [ + { + id: 'model', + label: 'Model', + type: 'select', + required: true, + default: DEFAULT_MODEL, + description: 'OpenAI compatible chat model to invoke.', + options: [ + { label: 'gpt-4o-mini', value: 'gpt-4o-mini' }, + { label: 'gpt-4o', value: 'gpt-4o' }, + { label: 'gpt-4.1-mini', value: 'gpt-4.1-mini' }, + ], + }, + { + id: 'temperature', + label: 'Temperature', + type: 'number', + required: false, + default: DEFAULT_TEMPERATURE, + min: 0, + max: 2, + description: 'Higher values increase creativity, lower values make output deterministic.', + }, + { + id: 'maxTokens', + label: 'Max Tokens', + type: 'number', + required: false, + default: DEFAULT_MAX_TOKENS, + min: 1, + max: 4096, + description: 'Maximum number of tokens to request from the model.', + }, + { + id: 'apiBaseUrl', + label: 'API Base URL', + type: 'string', + required: false, + default: DEFAULT_BASE_URL, + description: + 'Override for the OpenAI-compatible API base URL (leave blank for the default provider URL).', + }, + ], + }, + async execute(params, context) { + const { systemPrompt, userPrompt, model, temperature, maxTokens, apiBaseUrl } = params; + + if (!OPENAI_API_KEY || OPENAI_API_KEY === HARDCODED_API_KEY) { + throw new Error('OpenAI API key is not configured. Update OPENAI_API_KEY or HARDCODED_API_KEY.'); + } + + const baseURL = apiBaseUrl?.trim() ? apiBaseUrl.trim() : process.env.OPENAI_BASE_URL; + const client = createOpenAI({ + apiKey: OPENAI_API_KEY, + ...(baseURL ? { baseURL } : {}), + }); + + context.logger.info(`[OpenAIChat] Calling model ${model}`); + context.emitProgress('Contacting OpenAI-compatible chat completion endpoint...'); + + const trimmedSystemPrompt = systemPrompt?.trim(); + + try { + const result = await generateText({ + model: client(model), + prompt: userPrompt, + system: trimmedSystemPrompt ? trimmedSystemPrompt : undefined, + temperature, + maxTokens, + }); + + context.emitProgress('Received response from OpenAI-compatible provider'); + + return { + responseText: result.text, + finishReason: result.finishReason ?? null, + rawResponse: result.response, + usage: result.usage, + }; + } catch (error) { + context.logger.error('[OpenAIChat] Request failed', error); + throw error; + } + }, +}; + +componentRegistry.register(definition); diff --git a/worker/src/components/index.ts b/worker/src/components/index.ts index 350db9be..d2c8a642 100644 --- a/worker/src/components/index.ts +++ b/worker/src/components/index.ts @@ -10,6 +10,7 @@ import './core/webhook'; import './core/text-splitter'; import './core/console-log'; import './core/secret-fetch'; +import './core/openai-chat'; // Security components import './security/subfinder'; diff --git a/worker/src/types/vercel-ai.d.ts b/worker/src/types/vercel-ai.d.ts new file mode 100644 index 00000000..508798e5 --- /dev/null +++ b/worker/src/types/vercel-ai.d.ts @@ -0,0 +1,29 @@ +declare module 'ai' { + export interface GenerateTextResult { + text: string; + finishReason?: string | null; + response: unknown; + usage?: unknown; + } + + export interface GenerateTextParams { + model: unknown; + prompt: string; + system?: string; + temperature?: number; + maxTokens?: number; + } + + export function generateText(params: GenerateTextParams): Promise; +} + +declare module '@ai-sdk/openai' { + export interface OpenAIClientOptions { + apiKey: string; + baseURL?: string; + } + + export type OpenAIModelFactory = (model: string) => unknown; + + export function createOpenAI(options: OpenAIClientOptions): OpenAIModelFactory; +} diff --git a/worker/tsconfig.json b/worker/tsconfig.json index 6576c527..0d7d46de 100644 --- a/worker/tsconfig.json +++ b/worker/tsconfig.json @@ -14,8 +14,7 @@ "rootDir": "./src", "declaration": true, "declarationMap": true, - "sourceMap": true, - "types": ["bun-types"] + "sourceMap": true }, "include": ["src/**/*"], "exclude": ["node_modules", "dist", "**/*.spec.ts"] From 8ac0ceb95c65b4072acb1cf06b616706f444e4c4 Mon Sep 17 00:00:00 2001 From: Aseem Shrey Date: Thu, 16 Oct 2025 22:54:23 -0400 Subject: [PATCH 02/13] wip --- README.md | 1 + backend/package.json | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 708a3c8d..178838b6 100644 --- a/README.md +++ b/README.md @@ -46,6 +46,7 @@ docker compose up -d # Verify all services are healthy docker compose ps +curl -f http://localhost:8081/health || echo "Temporal UI not ready yet" ``` ### 4. Create Temporal Namespace diff --git a/backend/package.json b/backend/package.json index 1f5d18e5..f3e00407 100644 --- a/backend/package.json +++ b/backend/package.json @@ -21,8 +21,8 @@ "@nestjs/platform-express": "^10.4.0", "@nestjs/swagger": "^11.2.0", "@shipsec/component-sdk": "workspace:*", - "@shipsec/shared": "workspace:*", "@shipsec/worker": "workspace:*", + "@shipsec/shared": "workspace:*", "@temporalio/client": "^1.11.3", "@temporalio/worker": "^1.11.3", "@temporalio/workflow": "^1.11.3", From aa33e43be2d7b3920e84bd145f89cefb96727850 Mon Sep 17 00:00:00 2001 From: Aayush Prajapati Date: Mon, 20 Oct 2025 23:59:00 +0530 Subject: [PATCH 03/13] chore: commit staged changes with summary of work since last commit This commit includes all currently staged changes. Changes since last commit (HEAD): 11 files changed, 299 insertions(+), 48 deletions(-) Files changed since HEAD (name-status): M backend/package.json M backend/src/main.ts M bun.lock M frontend/src/components/workflow/RunWorkflowDialog.tsx M frontend/src/components/workflow/RuntimeInputsEditor.tsx M frontend/src/components/workflow/WorkflowNode.tsx M frontend/src/pages/WorkflowBuilder.tsx M frontend/src/utils/connectionValidation.ts M worker/src/components/__tests__/trigger-manual.test.ts M worker/src/components/core/openai-chat.ts M worker/src/components/core/trigger-manual.ts --- backend/package.json | 2 +- backend/src/main.ts | 1 - .../components/workflow/RunWorkflowDialog.tsx | 29 ++++++++------ .../workflow/RuntimeInputsEditor.tsx | 31 ++++++++++++++- .../src/components/workflow/WorkflowNode.tsx | 19 ++++++--- frontend/src/pages/WorkflowBuilder.tsx | 7 +++- frontend/src/utils/connectionValidation.ts | 19 ++++++--- .../__tests__/trigger-manual.test.ts | 25 ++++++++++++ worker/src/components/core/openai-chat.ts | 39 ++++++++++++++----- worker/src/components/core/trigger-manual.ts | 15 ++++++- 10 files changed, 146 insertions(+), 41 deletions(-) diff --git a/backend/package.json b/backend/package.json index f3e00407..1f5d18e5 100644 --- a/backend/package.json +++ b/backend/package.json @@ -21,8 +21,8 @@ "@nestjs/platform-express": "^10.4.0", "@nestjs/swagger": "^11.2.0", "@shipsec/component-sdk": "workspace:*", - "@shipsec/worker": "workspace:*", "@shipsec/shared": "workspace:*", + "@shipsec/worker": "workspace:*", "@temporalio/client": "^1.11.3", "@temporalio/worker": "^1.11.3", "@temporalio/workflow": "^1.11.3", diff --git a/backend/src/main.ts b/backend/src/main.ts index cae687f1..1072d379 100644 --- a/backend/src/main.ts +++ b/backend/src/main.ts @@ -18,7 +18,6 @@ async function bootstrap() { methods: ['GET', 'POST', 'PUT', 'DELETE', 'PATCH', 'OPTIONS'], allowedHeaders: ['Content-Type', 'Authorization', 'Accept'], }); - const port = Number(process.env.PORT ?? 3211); const host = process.env.HOST ?? '0.0.0.0'; diff --git a/frontend/src/components/workflow/RunWorkflowDialog.tsx b/frontend/src/components/workflow/RunWorkflowDialog.tsx index 21101818..bdd42a43 100644 --- a/frontend/src/components/workflow/RunWorkflowDialog.tsx +++ b/frontend/src/components/workflow/RunWorkflowDialog.tsx @@ -14,10 +14,17 @@ import { Textarea } from '@/components/ui/textarea' import { Play, Loader2 } from 'lucide-react' import { api } from '@/services/api' +type RuntimeInputType = 'file' | 'text' | 'number' | 'json' | 'array' | 'string' +type NormalizedRuntimeInputType = Exclude + +const normalizeRuntimeInputType = ( + type: RuntimeInputType, +): NormalizedRuntimeInputType => (type === 'string' ? 'text' : type) + interface RuntimeInputDefinition { id: string label: string - type: 'file' | 'text' | 'number' | 'json' | 'array' + type: RuntimeInputType required: boolean description?: string } @@ -25,7 +32,6 @@ interface RuntimeInputDefinition { interface RunWorkflowDialogProps { open: boolean onOpenChange: (open: boolean) => void - workflowId: string runtimeInputs: RuntimeInputDefinition[] onRun: (inputs: Record) => void } @@ -33,7 +39,6 @@ interface RunWorkflowDialogProps { export function RunWorkflowDialog({ open, onOpenChange, - workflowId: _workflowId, runtimeInputs, onRun, }: RunWorkflowDialogProps) { @@ -74,15 +79,16 @@ export function RunWorkflowDialog({ const handleInputChange = ( inputId: string, value: unknown, - type: RuntimeInputDefinition['type'] + type: RuntimeInputType, ) => { setErrors(prev => ({ ...prev, [inputId]: '' })) + const normalizedType = normalizeRuntimeInputType(type) // Parse based on type let parsedValue = value - if (type === 'number') { + if (normalizedType === 'number') { parsedValue = value ? parseFloat(value as string) : undefined - } else if (type === 'array') { + } else if (normalizedType === 'array') { const textValue = typeof value === 'string' ? value : '' const trimmedValue = textValue.trim() @@ -113,7 +119,7 @@ export function RunWorkflowDialog({ parsedValue = fallback } } - } else if (type === 'json') { + } else if (normalizedType === 'json') { try { parsedValue = value ? JSON.parse(value as string) : undefined } catch (error) { @@ -149,8 +155,9 @@ export function RunWorkflowDialog({ const renderInput = (input: RuntimeInputDefinition) => { const hasError = !!errors[input.id] const isUploading = uploading[input.id] + const inputType = normalizeRuntimeInputType(input.type) - switch (input.type) { + switch (inputType) { case 'file': return (
@@ -197,7 +204,7 @@ export function RunWorkflowDialog({