diff --git a/.dockerignore b/.dockerignore index 03802a9..e50bcb1 100644 --- a/.dockerignore +++ b/.dockerignore @@ -2,4 +2,5 @@ .github docs test -logs \ No newline at end of file +logs +.env \ No newline at end of file diff --git a/Dockerfile b/Dockerfile index 3f572e3..39c0d02 100644 --- a/Dockerfile +++ b/Dockerfile @@ -5,6 +5,8 @@ WORKDIR /app # Build arguments for optional modules ARG ENABLE_FS_MODULE=false ARG ENABLE_EVS_MODULE=false +ARG ENABLE_TDVS_MODULE=false +ARG ENABLE_TDML_MODULE=false # Copy essential files for dependency installation COPY pyproject.toml uv.lock* README.md /app/ @@ -16,8 +18,8 @@ RUN apt-get update && \ pip install uv mcpo && \ # Build uv sync command with conditional extras \ UV_EXTRAS="" && \ - if [ "$ENABLE_FS_MODULE" = "true" ]; then UV_EXTRAS="$UV_EXTRAS --extra fs"; fi && \ - if [ "$ENABLE_EVS_MODULE" = "true" ]; then UV_EXTRAS="$UV_EXTRAS --extra evs"; fi && \ + if [ "$ENABLE_FS_MODULE" = "true" ] || [ "$ENABLE_TDML_MODULE" = "true" ]; then UV_EXTRAS="$UV_EXTRAS --extra fs"; fi && \ + if [ "$ENABLE_EVS_MODULE" = "true" ] || [ "$ENABLE_TDVS_MODULE" = "true" ]; then UV_EXTRAS="$UV_EXTRAS --extra tdvs"; fi && \ uv sync $UV_EXTRAS # Copy source code before building @@ -26,8 +28,8 @@ COPY ./src /app/src # Build and install the package RUN uv build && \ pip install . && \ - if [ "$ENABLE_FS_MODULE" = "true" ]; then pip install .[fs];fi && \ - if [ "$ENABLE_EVS_MODULE" = "true" ]; then pip install .[evs];fi && \ + if [ "$ENABLE_FS_MODULE" = "true" ] || [ "$ENABLE_TDML_MODULE" = "true" ]; then pip install .[fs];fi && \ + if [ "$ENABLE_EVS_MODULE" = "true" ] || [ "$ENABLE_TDVS_MODULE" = "true" ]; then pip install .[tdvs];fi && \ apt-get purge -y build-essential gcc && \ rm -rf /var/lib/apt/lists/* @@ -35,7 +37,8 @@ RUN uv build && \ COPY . /app # Remove optional module directories if not enabled RUN if [ "$ENABLE_FS_MODULE" != "true" ]; then rm -rf /app/src/teradata_mcp_server/tools/fs; fi && \ - if [ "$ENABLE_EVS_MODULE" != "true" ]; then rm -rf /app/src/teradata_mcp_server/tools/evs; fi + if [ "$ENABLE_EVS_MODULE" != "true" ]; then rm -rf /app/src/teradata_mcp_server/tools/evs; fi && \ + if [ "$ENABLE_TDVS_MODULE" != "true" ]; then rm -rf /app/src/teradata_mcp_server/tools/tdvs; fi # └──────────── End build stage ────────────┘ # ┌───────────── Runtime stage ─────────────┐ diff --git a/README.md b/README.md index 45b2b9f..3ddb767 100644 --- a/README.md +++ b/README.md @@ -19,6 +19,9 @@ downloads + + docs +

@@ -29,6 +32,7 @@ ✨ Quickstart with Claude Desktop or your favorite tool in <5 minute ✨

+ ## Overview The Teradata MCP server provides sets of tools and prompts, grouped as modules for interacting with Teradata databases. Enabling AI agents and users to query, analyze, and manage their data efficiently. @@ -54,6 +58,10 @@ We are providing groupings of tools and associated helpful prompts to support al - **DBA** tools, prompts and resources to facilitate your platform administration tasks: - [DBA Tools](https://github.com/Teradata/teradata-mcp-server/blob/main/src/teradata_mcp_server/tools/dba/README.md) - [Security Tools](https://github.com/Teradata/teradata-mcp-server/blob/main/src/teradata_mcp_server/tools/sec/README.md) +- **Data Scientist** tools, prompts, and resources to build powerful [AI agents and workflows](./examples/app-flowise/flowise_teradata_agents/README.md) for data-driven applications. + - [Teradata Vector Store Tools](./src/teradata_mcp_server/tools/tdvs/README.md) + - [Teradataml Functions Tools](./src/teradata_mcp_server/tools/constants.py) + - [Plot Tools](./src/teradata_mcp_server/tools/plot/README.md) ## Quick start with Claude Desktop (no installation) > Prefer to use other tools? Check out our Quick Starts for [VS Code/Copilot](https://github.com/Teradata/teradata-mcp-server/blob/main/docs/server_guide/QUICK_START_VSCODE.md), [Open WebUI](https://github.com/Teradata/teradata-mcp-server/blob/main/docs/server_guide/QUICK_START_OPEN_WEBUI.md), or dive into [simple code examples](https://github.com/Teradata/teradata-mcp-server/blob/main/examples/README.md#client-applications)! diff --git a/docker-compose.yml b/docker-compose.yml index 55161df..4c0d2c9 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -5,6 +5,8 @@ services: args: - ENABLE_FS_MODULE=${ENABLE_FS_MODULE:-false} - ENABLE_EVS_MODULE=${ENABLE_EVS_MODULE:-false} + - ENABLE_TDVS_MODULE=${ENABLE_TDVS_MODULE:-false} + - ENABLE_TDML_MODULE=${ENABLE_TDML_MODULE:-false} image: teradata-mcp-server:latest environment: - DATABASE_URI=${DATABASE_URI} @@ -23,6 +25,8 @@ services: args: - ENABLE_FS_MODULE=${ENABLE_FS_MODULE:-false} - ENABLE_EVS_MODULE=${ENABLE_EVS_MODULE:-false} + - ENABLE_TDVS_MODULE=${ENABLE_TDVS_MODULE:-false} + - ENABLE_TDML_MODULE=${ENABLE_TDML_MODULE:-false} image: teradata-mcp-server:latest entrypoint: sh -c 'export MCP_TRANSPORT=streamable-http && mcpo --port 8002 --api-key "$MCPO_API_KEY" -- uv run teradata-mcp-server' environment: diff --git a/docs/client_guide/Flowise_with_teradata_mcp_Guide.md b/docs/client_guide/Flowise_with_teradata_mcp_Guide.md index fe6e6e6..e24e594 100644 --- a/docs/client_guide/Flowise_with_teradata_mcp_Guide.md +++ b/docs/client_guide/Flowise_with_teradata_mcp_Guide.md @@ -9,7 +9,8 @@ git clone https://github.com/Teradata/teradata-mcp-server.git cd teradata-mcp-server # build container from Source code docker build --build-arg ENABLE_FS_MODULE=true \ - --build-arg ENABLE_EVS_MODULE=true \ + --build-arg ENABLE_TDML_MODULE=true \ + --build-arg ENABLE_TDVS_MODULE=true \ -t teradata-mcp-server:latest . ``` @@ -34,6 +35,8 @@ LOGMECH=TD2 #TD2 or LDAP TD_POOL_SIZE=5 TD_MAX_OVERFLOW=10 TDPOOL_TIMEOUT=30 +PROFILE=dataScientist +DATABASE_HOST=IP_OF_DB_NODE MCP_TRANSPORT=streamable-http #stdio, sse, streamable-http MCP_HOST=0.0.0.0 @@ -42,8 +45,8 @@ MCP_PATH=/mcp/ # ----- Enterprise Vector Store ---------- TD_BASE_URL=https://host/api/accounts/40c83ff23b2e #Your UES_URI, strip off the trailing /open-analytics -TD_PAT=gwxhQG2UZcDqQlp9LKWjEBfXB7 #Your PAT -TD_PEM=./demo_key.pem #Your PEM +#TD_PAT=gwxhQG2UZcDqQlp9LKWjEBfXB7 #Your PAT if you have Teradata Lake system. +TD_PEM=/root/td_ai_stack/demo_key.pem #Your PEM with full path where you kept on host VS_NAME=vs_example #Your target Vector Store Name # ------------ Flowise env varieable -------------------# @@ -73,7 +76,9 @@ services: # Default Teradata Configuration env to refer into flowise - TD_MCP_SERVER=http://teradata-mcp-server:8001/mcp ports: - - '${PORT}:${PORT}' + - "${PORT}:${PORT}" + extra_hosts: + - "dbccop1:${DATABASE_HOST}" container_name: flowise healthcheck: test: ['CMD', 'curl', '-f', 'http://localhost:${PORT}/api/v1/ping'] @@ -94,9 +99,17 @@ services: - MCP_HOST=${MCP_HOST} - MCP_PORT=${MCP_PORT} - PROFILE=${PROFILE} + - TD_BASE_URL=${TD_BASE_URL} + - TD_PAT=${TD_PAT} + - TD_PEM=${TD_PEM} + - VS_NAME=${VS_NAME} container_name: teradata-mcp-server + extra_hosts: + - "dbccop1:${DATABASE_HOST}" ports: - "${MCP_PORT}:${MCP_PORT}" + volumes: + - ${TD_PEM}:${TD_PEM} tty: true networks: default: diff --git a/docs/developer_guide/DEVELOPER_GUIDE.md b/docs/developer_guide/DEVELOPER_GUIDE.md index 1422d69..b521a98 100644 --- a/docs/developer_guide/DEVELOPER_GUIDE.md +++ b/docs/developer_guide/DEVELOPER_GUIDE.md @@ -23,7 +23,7 @@ uv sync # create venv and install project deps > Tip: add extras for full dev (feature store, EVS) if you use them: ```bash -uv sync --extra fs --extra evs +uv sync --extra fs --extra tdvs ``` ### 3) Run the server from source diff --git a/docs/server_guide/INSTALLATION.md b/docs/server_guide/INSTALLATION.md index 21e94c9..43c63ed 100644 --- a/docs/server_guide/INSTALLATION.md +++ b/docs/server_guide/INSTALLATION.md @@ -110,14 +110,14 @@ docker compose up export DATABASE_URI="teradata://username:password@host:1025/database" # Build with optional modules (Feature Store, Vector Store) -ENABLE_FS_MODULE=true ENABLE_EVS_MODULE=true docker compose build +ENABLE_FS_MODULE=true ENABLE_TDVS_MODULE=true docker compose build docker compose up # Run with specific profile PROFILE=dba docker compose up # Combine options -ENABLE_FS_MODULE=true PROFILE=dataScientist docker compose build +ENABLE_FS_MODULE=true ENABLE_TDVS_MODULE=true PROFILE=dataScientist docker compose build PROFILE=dataScientist docker compose up # Run in background (production) diff --git a/env b/env index 39dcc11..ec0c768 100644 --- a/env +++ b/env @@ -6,7 +6,7 @@ TD_MAX_OVERFLOW=10 TDPOOL_TIMEOUT=30 MCP_TRANSPORT=streamable-http #stdio, sse, streamable-http -MCP_HOST=127.0.0.1 +MCP_HOST=0.0.0.0 MCP_PORT=8001 MCP_PATH=/mcp/ diff --git a/examples/app-flowise/README.md b/examples/app-flowise/README.md index 9b61abc..8f0b5f6 100644 --- a/examples/app-flowise/README.md +++ b/examples/app-flowise/README.md @@ -1,5 +1,7 @@ # Flowise Example with Teradata MCP +[![Teradata Agents](https://img.shields.io/badge/Teradata--Agents-Setup-green?style=for-the-badge&logo=teradata)](./flowise_teradata_agents/README.md) + Use this example to locally test Flowise and the Teradata MCP server with recommended defaults. Refer to the [Flowise client guide](./docs/client_guide/Flowise_with_teradata_mcp_Guide.md) for walkthrough and screenshots. diff --git a/examples/app-flowise/docker-compose.yaml b/examples/app-flowise/docker-compose.yaml index a849c40..844cce3 100644 --- a/examples/app-flowise/docker-compose.yaml +++ b/examples/app-flowise/docker-compose.yaml @@ -1,6 +1,6 @@ services: flowise: - image: flowiseai/flowise:latest + image: flowise:latest restart: always environment: - PORT=${PORT} @@ -12,7 +12,9 @@ services: # Default Teradata Configuration env to refer into flowise - TD_MCP_SERVER=http://teradata-mcp-server:8001/mcp ports: - - '${PORT}:${PORT}' + - "${PORT}:${PORT}" + extra_hosts: + - "dbccop1:${DATABASE_HOST}" container_name: flowise healthcheck: test: ['CMD', 'curl', '-f', 'http://localhost:${PORT}/api/v1/ping'] @@ -21,14 +23,8 @@ services: retries: 5 start_period: 30s volumes: - - ${DATA_DIR:-./.flowise}:/root/.flowise + - ${DATA_DIR}/.flowise:/root/.flowise teradata-mcp-server: - build: - context: ../.. - dockerfile: Dockerfile - args: - ENABLE_FS_MODULE: "true" - ENABLE_EVS_MODULE: "true" image: teradata-mcp-server:latest restart: always environment: @@ -39,11 +35,19 @@ services: - MCP_HOST=${MCP_HOST} - MCP_PORT=${MCP_PORT} - PROFILE=${PROFILE} + - TD_BASE_URL=${TD_BASE_URL} + - TD_PAT=${TD_PAT} + - TD_PEM=${TD_PEM} + - VS_NAME=${VS_NAME} container_name: teradata-mcp-server + extra_hosts: + - "dbccop1:${DATABASE_HOST}" ports: - "${MCP_PORT}:${MCP_PORT}" + volumes: + - ${TD_PEM}:${TD_PEM} tty: true networks: default: name: td-ai-stack - external: false + external: false \ No newline at end of file diff --git a/examples/app-flowise/env b/examples/app-flowise/env index 3d3083e..6243bd1 100644 --- a/examples/app-flowise/env +++ b/examples/app-flowise/env @@ -4,6 +4,8 @@ LOGMECH=TD2 #TD2 or LDAP TD_POOL_SIZE=5 TD_MAX_OVERFLOW=10 TDPOOL_TIMEOUT=30 +PROFILE=dataScientist +DATABASE_HOST=IP_OF_DB_NODE MCP_TRANSPORT=streamable-http #stdio, sse, streamable-http MCP_HOST=0.0.0.0 @@ -14,7 +16,7 @@ MCP_PATH=/mcp/ # Inherit from current environment, or un-comment and change as needed #TD_VS_BASE_URL=https://host/api/accounts/40c83ff23b2e #Your UES_URI, strip off the trailing /open-analytics #TD_PAT=gwxhQG2UZcDqQlp9LKWjEBfXB7 #Your PAT token -#TD_PEM=./demo_key.pem #Your PEM file path +TD_PEM=./demo_key.pem #Your PEM with full path where you kept on host #VS_NAME=vs_example #Your target Vector Store Name # ------------ Flowise varibles -------------------# diff --git a/examples/app-flowise/flowise_teradata_agents/Customer_Lifetime_Value_V2.json b/examples/app-flowise/flowise_teradata_agents/Customer_Lifetime_Value_V2.json new file mode 100644 index 0000000..d648474 --- /dev/null +++ b/examples/app-flowise/flowise_teradata_agents/Customer_Lifetime_Value_V2.json @@ -0,0 +1,3184 @@ +{ + "nodes": [ + { + "id": "startAgentflow_0", + "type": "agentFlow", + "position": { + "x": -1583.1135940554227, + "y": -227.2039996549027 + }, + "data": { + "id": "startAgentflow_0", + "label": "Start", + "version": 1.1, + "name": "startAgentflow", + "type": "Start", + "color": "#7EE787", + "hideInput": true, + "baseClasses": [ + "Start" + ], + "category": "Agent Flows", + "description": "Starting point of the agentflow", + "inputParams": [ + { + "label": "Input Type", + "name": "startInputType", + "type": "options", + "options": [ + { + "label": "Chat Input", + "name": "chatInput", + "description": "Start the conversation with chat input" + }, + { + "label": "Form Input", + "name": "formInput", + "description": "Start the workflow with form inputs" + } + ], + "default": "chatInput", + "id": "startAgentflow_0-input-startInputType-options", + "display": true + }, + { + "label": "Form Title", + "name": "formTitle", + "type": "string", + "placeholder": "Please Fill Out The Form", + "show": { + "startInputType": "formInput" + }, + "id": "startAgentflow_0-input-formTitle-string", + "display": false + }, + { + "label": "Form Description", + "name": "formDescription", + "type": "string", + "placeholder": "Complete all fields below to continue", + "show": { + "startInputType": "formInput" + }, + "id": "startAgentflow_0-input-formDescription-string", + "display": false + }, + { + "label": "Form Input Types", + "name": "formInputTypes", + "description": "Specify the type of form input", + "type": "array", + "show": { + "startInputType": "formInput" + }, + "array": [ + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + }, + { + "label": "Options", + "name": "options" + } + ], + "default": "string" + }, + { + "label": "Label", + "name": "label", + "type": "string", + "placeholder": "Label for the input" + }, + { + "label": "Variable Name", + "name": "name", + "type": "string", + "placeholder": "Variable name for the input (must be camel case)", + "description": "Variable name must be camel case. For example: firstName, lastName, etc." + }, + { + "label": "Add Options", + "name": "addOptions", + "type": "array", + "show": { + "formInputTypes[$index].type": "options" + }, + "array": [ + { + "label": "Option", + "name": "option", + "type": "string" + } + ] + } + ], + "id": "startAgentflow_0-input-formInputTypes-array", + "display": false + }, + { + "label": "Ephemeral Memory", + "name": "startEphemeralMemory", + "type": "boolean", + "description": "Start fresh for every execution without past chat history", + "optional": true, + "id": "startAgentflow_0-input-startEphemeralMemory-boolean", + "display": true + }, + { + "label": "Flow State", + "name": "startState", + "description": "Runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "string", + "placeholder": "Foo" + }, + { + "label": "Value", + "name": "value", + "type": "string", + "placeholder": "Bar", + "optional": true + } + ], + "id": "startAgentflow_0-input-startState-array", + "display": true + }, + { + "label": "Persist State", + "name": "startPersistState", + "type": "boolean", + "description": "Persist the state in the same session", + "optional": true, + "id": "startAgentflow_0-input-startPersistState-boolean", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "startInputType": "chatInput", + "startEphemeralMemory": "", + "startState": [ + { + "key": "main_agent_op", + "value": "None" + }, + { + "key": "main_question", + "value": "None" + }, + { + "key": "chat_history", + "value": "None" + }, + { + "key": "db_name", + "value": "FINSERV" + } + ], + "startPersistState": "" + }, + "outputAnchors": [ + { + "id": "startAgentflow_0-output-startAgentflow", + "label": "Start", + "name": "startAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "width": 104, + "height": 66, + "selected": false, + "positionAbsolute": { + "x": -1583.1135940554227, + "y": -227.2039996549027 + }, + "dragging": false + }, + { + "id": "agentAgentflow_0", + "position": { + "x": -411.25224260970435, + "y": -330.00910957187307 + }, + "data": { + "id": "agentAgentflow_0", + "label": "Teradata visualization Agent", + "version": 2.2, + "name": "agentAgentflow", + "type": "Agent", + "color": "#4DD0E1", + "baseClasses": [ + "Agent" + ], + "category": "Agent Flows", + "description": "Dynamically choose and utilize tools during runtime, enabling multi-step reasoning", + "inputParams": [ + { + "label": "Model", + "name": "agentModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "agentAgentflow_0-input-agentModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "agentMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "agentAgentflow_0-input-agentMessages-array", + "display": true + }, + { + "label": "OpenAI Built-in Tools", + "name": "agentToolsBuiltInOpenAI", + "type": "multiOptions", + "optional": true, + "options": [ + { + "label": "Web Search", + "name": "web_search_preview", + "description": "Search the web for the latest information" + }, + { + "label": "Code Interpreter", + "name": "code_interpreter", + "description": "Write and run Python code in a sandboxed environment" + }, + { + "label": "Image Generation", + "name": "image_generation", + "description": "Generate images based on a text prompt" + } + ], + "show": { + "agentModel": "chatOpenAI" + }, + "id": "agentAgentflow_0-input-agentToolsBuiltInOpenAI-multiOptions", + "display": false + }, + { + "label": "Gemini Built-in Tools", + "name": "agentToolsBuiltInGemini", + "type": "multiOptions", + "optional": true, + "options": [ + { + "label": "URL Context", + "name": "urlContext", + "description": "Extract content from given URLs" + }, + { + "label": "Google Search", + "name": "googleSearch", + "description": "Search real-time web content" + } + ], + "show": { + "agentModel": "chatGoogleGenerativeAI" + }, + "id": "agentAgentflow_0-input-agentToolsBuiltInGemini-multiOptions", + "display": false + }, + { + "label": "Anthropic Built-in Tools", + "name": "agentToolsBuiltInAnthropic", + "type": "multiOptions", + "optional": true, + "options": [ + { + "label": "Web Search", + "name": "web_search_20250305", + "description": "Search the web for the latest information" + }, + { + "label": "Web Fetch", + "name": "web_fetch_20250910", + "description": "Retrieve full content from specified web pages" + } + ], + "show": { + "agentModel": "chatAnthropic" + }, + "id": "agentAgentflow_0-input-agentToolsBuiltInAnthropic-multiOptions", + "display": false + }, + { + "label": "Tools", + "name": "agentTools", + "type": "array", + "optional": true, + "array": [ + { + "label": "Tool", + "name": "agentSelectedTool", + "type": "asyncOptions", + "loadMethod": "listTools", + "loadConfig": true + }, + { + "label": "Require Human Input", + "name": "agentSelectedToolRequiresHumanInput", + "type": "boolean", + "optional": true + } + ], + "id": "agentAgentflow_0-input-agentTools-array", + "display": true + }, + { + "label": "Knowledge (Document Stores)", + "name": "agentKnowledgeDocumentStores", + "type": "array", + "description": "Give your agent context about different document sources. Document stores must be upserted in advance.", + "array": [ + { + "label": "Document Store", + "name": "documentStore", + "type": "asyncOptions", + "loadMethod": "listStores" + }, + { + "label": "Describe Knowledge", + "name": "docStoreDescription", + "type": "string", + "generateDocStoreDescription": true, + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_0-input-agentKnowledgeDocumentStores-array", + "display": true + }, + { + "label": "Knowledge (Vector Embeddings)", + "name": "agentKnowledgeVSEmbeddings", + "type": "array", + "description": "Give your agent context about different document sources from existing vector stores and embeddings", + "array": [ + { + "label": "Vector Store", + "name": "vectorStore", + "type": "asyncOptions", + "loadMethod": "listVectorStores", + "loadConfig": true + }, + { + "label": "Embedding Model", + "name": "embeddingModel", + "type": "asyncOptions", + "loadMethod": "listEmbeddings", + "loadConfig": true + }, + { + "label": "Knowledge Name", + "name": "knowledgeName", + "type": "string", + "placeholder": "A short name for the knowledge base, this is useful for the AI to know when and how to search for correct information" + }, + { + "label": "Describe Knowledge", + "name": "knowledgeDescription", + "type": "string", + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_0-input-agentKnowledgeVSEmbeddings-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "agentEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "agentAgentflow_0-input-agentEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "agentMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_0-input-agentMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "agentMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "agentMemoryType": "windowSize" + }, + "id": "agentAgentflow_0-input-agentMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "agentMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "agentMemoryType": "conversationSummaryBuffer" + }, + "id": "agentAgentflow_0-input-agentMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "agentUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_0-input-agentUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "agentReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "agentAgentflow_0-input-agentReturnResponseAs-options", + "display": true + }, + { + "label": "Update Flow State", + "name": "agentUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "agentAgentflow_0-input-agentUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "agentModel": "azureChatOpenAI", + "agentMessages": [ + { + "role": "system", + "content": "

Always output the final answer strictly in JSON format.

Always use default database as \"{{ $flow.state.db_name }}\". Pass table_name as database.table_name.

If not sure about table/column name, call MCP tools to get table information before calling any chart related MCP tools.

Do not remove any output data. Do not add any text like \"//\" or \"many more values\".

Do not add explanations, notes, or extra text.

If using a chart, the JSON must have the structure:

{

\"type\": \"type_of_chart\",

\"title\": \"chart title\",

\"labels\": [...],

\"datasets\": [...]

}

" + } + ], + "agentToolsBuiltInOpenAI": "", + "agentToolsBuiltInGemini": "", + "agentToolsBuiltInAnthropic": "", + "agentTools": [ + { + "agentSelectedTool": "customMCP", + "agentSelectedToolRequiresHumanInput": false, + "agentSelectedToolConfig": { + "mcpServerConfig": "{\n \"url\": \"http://teradata-mcp-server:8001/mcp\"\n}", + "mcpActions": "[\"base_columnDescription\",\"base_tableDDL\",\"plot_line_chart\",\"plot_pie_chart\",\"plot_polar_chart\",\"plot_radar_chart\",\"base_tablePreview\"]", + "agentSelectedTool": "customMCP" + } + } + ], + "agentKnowledgeDocumentStores": "", + "agentKnowledgeVSEmbeddings": "", + "agentEnableMemory": true, + "agentMemoryType": "allMessages", + "agentMemoryWindowSize": "20", + "agentMemoryMaxTokenLimit": "2000", + "agentUserMessage": "", + "agentReturnResponseAs": "userMessage", + "agentUpdateState": "", + "agentModelConfig": { + "credential": "", + "modelName": "gpt-4.1-mini", + "temperature": 0.9, + "maxTokens": "", + "streaming": true, + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "basepath": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low", + "reasoning": "", + "agentModel": "azureChatOpenAI" + } + }, + "outputAnchors": [ + { + "id": "agentAgentflow_0-output-agentAgentflow", + "label": "Agent", + "name": "agentAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 257, + "height": 101, + "selected": false, + "positionAbsolute": { + "x": -411.25224260970435, + "y": -330.00910957187307 + }, + "dragging": false + }, + { + "id": "customFunctionAgentflow_0", + "position": { + "x": -77.9671951206814, + "y": -328.6337066686058 + }, + "data": { + "id": "customFunctionAgentflow_0", + "label": "td_func_draw_plot", + "version": 1, + "name": "customFunctionAgentflow", + "type": "CustomFunction", + "color": "#E4B7FF", + "baseClasses": [ + "CustomFunction" + ], + "category": "Agent Flows", + "description": "Execute custom function", + "inputParams": [ + { + "label": "Input Variables", + "name": "customFunctionInputVariables", + "description": "Input variables can be used in the function with prefix $. For example: $foo", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Variable Name", + "name": "variableName", + "type": "string" + }, + { + "label": "Variable Value", + "name": "variableValue", + "type": "string", + "acceptVariable": true + } + ], + "id": "customFunctionAgentflow_0-input-customFunctionInputVariables-array", + "display": true + }, + { + "label": "Javascript Function", + "name": "customFunctionJavascriptFunction", + "type": "code", + "codeExample": "/*\n* You can use any libraries imported in Flowise\n* You can use properties specified in Input Schema as variables. Ex: Property = userid, Variable = $userid\n* You can get default flow config: $flow.sessionId, $flow.chatId, $flow.chatflowId, $flow.input, $flow.state\n* You can get custom variables: $vars.\n* Must return a string value at the end of function\n*/\n\nconst fetch = require('node-fetch');\nconst url = 'https://api.open-meteo.com/v1/forecast?latitude=52.52&longitude=13.41¤t_weather=true';\nconst options = {\n method: 'GET',\n headers: {\n 'Content-Type': 'application/json'\n }\n};\ntry {\n const response = await fetch(url, options);\n const text = await response.text();\n return text;\n} catch (error) {\n console.error(error);\n return '';\n}", + "description": "The function to execute. Must return a string or an object that can be converted to a string.", + "id": "customFunctionAgentflow_0-input-customFunctionJavascriptFunction-code", + "display": true + }, + { + "label": "Update Flow State", + "name": "customFunctionUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "customFunctionAgentflow_0-input-customFunctionUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "customFunctionInputVariables": [ + { + "variableName": "inputs_data", + "variableValue": "

{{ agentAgentflow_0 }}

" + } + ], + "customFunctionJavascriptFunction": "let chartType = \"line\";\nlet chartTitle = \"Chart\";\nlet chartData = { labels: [], datasets: [] };\nlet isValidChartData = false;\n\ntry {\n const input = JSON.parse($inputs_data);\n console.log(\"inputs data:\", $inputs_data);\n // Check if input looks like chart data (has labels and datasets arrays)\n if (\n input &&\n typeof input === \"object\" &&\n Array.isArray(input.labels) &&\n Array.isArray(input.datasets)\n ) {\n chartType = input.type || \"line\";\n chartTitle = input.title || chartType.toUpperCase() + \" Chart\";\n chartData.labels = input.labels;\n chartData.datasets = input.datasets;\n isValidChartData = true;\n }\n} catch {\n // $inputs_data not JSON, treat as plain message\n}\n\nif (!isValidChartData) {\n const message =\n typeof $inputs_data === \"string\"\n ? $inputs_data\n : \"No chart data provided.\";\n let res;\n try {\n res = JSON.parse(message);\n } catch {\n res = { message };\n }\n return res.message || message;\n}\n\n// Build QuickChart URL\nconst quickChartUrl = `https://quickchart.io/chart?c=${encodeURIComponent(\n JSON.stringify({\n type: chartType,\n data: chartData,\n options: {\n plugins: {\n legend: { position: \"top\" },\n title: { display: true, text: chartTitle },\n },\n },\n })\n)}`;\n\n// Return Markdown image (Flowise chat supports it)\nreturn `Here is your chart:\\n\\n![Chart](${quickChartUrl})`;\n", + "customFunctionUpdateState": "", + "undefined": "" + }, + "outputAnchors": [ + { + "id": "customFunctionAgentflow_0-output-customFunctionAgentflow", + "label": "Custom Function", + "name": "customFunctionAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 191, + "height": 66, + "selected": false, + "dragging": false, + "positionAbsolute": { + "x": -77.9671951206814, + "y": -328.6337066686058 + } + }, + { + "id": "stickyNoteAgentflow_0", + "position": { + "x": -407.8942505277363, + "y": -503.7131738818773 + }, + "data": { + "id": "stickyNoteAgentflow_0", + "label": "Sticky Note", + "version": 1, + "name": "stickyNoteAgentflow", + "type": "StickyNote", + "color": "#fee440", + "baseClasses": [ + "StickyNote" + ], + "category": "Agent Flows", + "description": "Add notes to the agent flow", + "inputParams": [ + { + "label": "", + "name": "note", + "type": "string", + "rows": 1, + "placeholder": "Type something here", + "optional": true, + "id": "stickyNoteAgentflow_0-input-note-string", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "note": "this agent will draw chart for tables data\n\nsupported chart types - line , pie, radar, polar " + }, + "outputAnchors": [ + { + "id": "stickyNoteAgentflow_0-output-stickyNoteAgentflow", + "label": "Sticky Note", + "name": "stickyNoteAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "stickyNote", + "width": 204, + "height": 143, + "selected": false, + "dragging": false, + "positionAbsolute": { + "x": -407.8942505277363, + "y": -503.7131738818773 + } + }, + { + "id": "agentAgentflow_1", + "position": { + "x": -1038.1693297983177, + "y": -360.03597062789714 + }, + "data": { + "id": "agentAgentflow_1", + "label": "Data Exploration Agent", + "version": 2.2, + "name": "agentAgentflow", + "type": "Agent", + "color": "#4DD0E1", + "baseClasses": [ + "Agent" + ], + "category": "Agent Flows", + "description": "Dynamically choose and utilize tools during runtime, enabling multi-step reasoning", + "inputParams": [ + { + "label": "Model", + "name": "agentModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "agentAgentflow_1-input-agentModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "agentMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "agentAgentflow_1-input-agentMessages-array", + "display": true + }, + { + "label": "OpenAI Built-in Tools", + "name": "agentToolsBuiltInOpenAI", + "type": "multiOptions", + "optional": true, + "options": [ + { + "label": "Web Search", + "name": "web_search_preview", + "description": "Search the web for the latest information" + }, + { + "label": "Code Interpreter", + "name": "code_interpreter", + "description": "Write and run Python code in a sandboxed environment" + }, + { + "label": "Image Generation", + "name": "image_generation", + "description": "Generate images based on a text prompt" + } + ], + "show": { + "agentModel": "chatOpenAI" + }, + "id": "agentAgentflow_1-input-agentToolsBuiltInOpenAI-multiOptions", + "display": false + }, + { + "label": "Gemini Built-in Tools", + "name": "agentToolsBuiltInGemini", + "type": "multiOptions", + "optional": true, + "options": [ + { + "label": "URL Context", + "name": "urlContext", + "description": "Extract content from given URLs" + }, + { + "label": "Google Search", + "name": "googleSearch", + "description": "Search real-time web content" + } + ], + "show": { + "agentModel": "chatGoogleGenerativeAI" + }, + "id": "agentAgentflow_1-input-agentToolsBuiltInGemini-multiOptions", + "display": false + }, + { + "label": "Anthropic Built-in Tools", + "name": "agentToolsBuiltInAnthropic", + "type": "multiOptions", + "optional": true, + "options": [ + { + "label": "Web Search", + "name": "web_search_20250305", + "description": "Search the web for the latest information" + }, + { + "label": "Web Fetch", + "name": "web_fetch_20250910", + "description": "Retrieve full content from specified web pages" + } + ], + "show": { + "agentModel": "chatAnthropic" + }, + "id": "agentAgentflow_1-input-agentToolsBuiltInAnthropic-multiOptions", + "display": false + }, + { + "label": "Tools", + "name": "agentTools", + "type": "array", + "optional": true, + "array": [ + { + "label": "Tool", + "name": "agentSelectedTool", + "type": "asyncOptions", + "loadMethod": "listTools", + "loadConfig": true + }, + { + "label": "Require Human Input", + "name": "agentSelectedToolRequiresHumanInput", + "type": "boolean", + "optional": true + } + ], + "id": "agentAgentflow_1-input-agentTools-array", + "display": true + }, + { + "label": "Knowledge (Document Stores)", + "name": "agentKnowledgeDocumentStores", + "type": "array", + "description": "Give your agent context about different document sources. Document stores must be upserted in advance.", + "array": [ + { + "label": "Document Store", + "name": "documentStore", + "type": "asyncOptions", + "loadMethod": "listStores" + }, + { + "label": "Describe Knowledge", + "name": "docStoreDescription", + "type": "string", + "generateDocStoreDescription": true, + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_1-input-agentKnowledgeDocumentStores-array", + "display": true + }, + { + "label": "Knowledge (Vector Embeddings)", + "name": "agentKnowledgeVSEmbeddings", + "type": "array", + "description": "Give your agent context about different document sources from existing vector stores and embeddings", + "array": [ + { + "label": "Vector Store", + "name": "vectorStore", + "type": "asyncOptions", + "loadMethod": "listVectorStores", + "loadConfig": true + }, + { + "label": "Embedding Model", + "name": "embeddingModel", + "type": "asyncOptions", + "loadMethod": "listEmbeddings", + "loadConfig": true + }, + { + "label": "Knowledge Name", + "name": "knowledgeName", + "type": "string", + "placeholder": "A short name for the knowledge base, this is useful for the AI to know when and how to search for correct information" + }, + { + "label": "Describe Knowledge", + "name": "knowledgeDescription", + "type": "string", + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_1-input-agentKnowledgeVSEmbeddings-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "agentEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "agentAgentflow_1-input-agentEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "agentMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_1-input-agentMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "agentMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "agentMemoryType": "windowSize" + }, + "id": "agentAgentflow_1-input-agentMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "agentMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "agentMemoryType": "conversationSummaryBuffer" + }, + "id": "agentAgentflow_1-input-agentMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "agentUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_1-input-agentUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "agentReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "agentAgentflow_1-input-agentReturnResponseAs-options", + "display": true + }, + { + "label": "Update Flow State", + "name": "agentUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "agentAgentflow_1-input-agentUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "agentModel": "azureChatOpenAI", + "agentMessages": [ + { + "role": "system", + "content": "

You are a data analyst assistant. Answer user questions related to database basics—tables, columns, schema, etc.—using only the default database named \"{{ $flow.state.db_name }}\" . Never ask the user which database to use; always assume and use \"{{ $flow.state.db_name }}\" . Provide clear, factual, beginner-friendly explanations focused on schemas, tables, columns, their relationships, and data basics. Always pass table_name as database.table_name.

Steps

Output Format

Examples

Example 1
User input: \"What tables are available in the database?\"
Response:
First, consider the default database, which is named \"{{ $flow.state.db_name }}.\" To answer the user's question, list all the tables that exist within this database.
The tables available in the \"{{ $flow.state.db_name }}\" database are: [table1], [table2], [table3].

Example 2
User input: \"What columns does the users table have?\"
Response:
To answer this, focus on the \"users\" table within the default \"{{ $flow.state.db_name }}\" database. Identify all columns (fields) present in this table.
The \"users\" table in the \"{{ $flow.state.db_name }}\" database contains the following columns: [column1], [column2], [column3], etc.

Notes

" + } + ], + "agentToolsBuiltInOpenAI": "", + "agentToolsBuiltInGemini": "", + "agentToolsBuiltInAnthropic": "", + "agentTools": [ + { + "agentSelectedTool": "customMCP", + "agentSelectedToolRequiresHumanInput": false, + "agentSelectedToolConfig": { + "mcpServerConfig": "{\n \"url\": \"http://teradata-mcp-server:8001/mcp\"\n}", + "mcpActions": "[\"base_columnDescription\",\"base_readQuery\",\"base_tableList\",\"dba_tableSqlList\",\"base_tablePreview\"]", + "agentSelectedTool": "customMCP" + } + } + ], + "agentKnowledgeDocumentStores": "", + "agentKnowledgeVSEmbeddings": "", + "agentEnableMemory": true, + "agentMemoryType": "allMessages", + "agentMemoryWindowSize": "20", + "agentMemoryMaxTokenLimit": "2000", + "agentUserMessage": "", + "agentReturnResponseAs": "assistantMessage", + "agentUpdateState": [ + { + "key": "main_agent_op", + "value": "

{{ output }}

" + }, + { + "key": "main_question", + "value": "

{{ question }}

" + }, + { + "key": "chat_history", + "value": "

{{ chat_history }}

" + } + ], + "agentModelConfig": { + "credential": "", + "modelName": "gpt-4.1-mini", + "temperature": 0.9, + "maxTokens": "", + "streaming": true, + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "basepath": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low", + "reasoning": "", + "agentModel": "azureChatOpenAI" + } + }, + "outputAnchors": [ + { + "id": "agentAgentflow_1-output-agentAgentflow", + "label": "Agent", + "name": "agentAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 222, + "height": 101, + "positionAbsolute": { + "x": -1038.1693297983177, + "y": -360.03597062789714 + }, + "selected": false, + "dragging": false + }, + { + "id": "stickyNoteAgentflow_1", + "position": { + "x": -1040.0632008582488, + "y": -530.7834528333287 + }, + "data": { + "id": "stickyNoteAgentflow_1", + "label": "Sticky Note", + "version": 1, + "name": "stickyNoteAgentflow", + "type": "StickyNote", + "color": "#fee440", + "baseClasses": [ + "StickyNote" + ], + "category": "Agent Flows", + "description": "Add notes to the agent flow", + "inputParams": [ + { + "label": "", + "name": "note", + "type": "string", + "rows": 1, + "placeholder": "Type something here", + "optional": true, + "id": "stickyNoteAgentflow_1-input-note-string", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "note": "This agent will call TD MCP Server and fetch relevent data by leveraging the MCP tools." + }, + "outputAnchors": [ + { + "id": "stickyNoteAgentflow_1-output-stickyNoteAgentflow", + "label": "Sticky Note", + "name": "stickyNoteAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "stickyNote", + "width": 204, + "height": 143, + "selected": false, + "dragging": false, + "positionAbsolute": { + "x": -1040.0632008582488, + "y": -530.7834528333287 + } + }, + { + "id": "conditionAgentAgentflow_0", + "position": { + "x": -1394.1791076070886, + "y": -248.3761208034682 + }, + "data": { + "id": "conditionAgentAgentflow_0", + "label": "Check question intent", + "version": 1.1, + "name": "conditionAgentAgentflow", + "type": "ConditionAgent", + "color": "#ff8fab", + "baseClasses": [ + "ConditionAgent" + ], + "category": "Agent Flows", + "description": "Utilize an agent to split flows based on dynamic conditions", + "inputParams": [ + { + "label": "Model", + "name": "conditionAgentModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "conditionAgentAgentflow_0-input-conditionAgentModel-asyncOptions", + "display": true + }, + { + "label": "Instructions", + "name": "conditionAgentInstructions", + "type": "string", + "description": "A general instructions of what the condition agent should do", + "rows": 4, + "acceptVariable": true, + "placeholder": "Determine if the user is interested in learning about AI", + "id": "conditionAgentAgentflow_0-input-conditionAgentInstructions-string", + "display": true + }, + { + "label": "Input", + "name": "conditionAgentInput", + "type": "string", + "description": "Input to be used for the condition agent", + "rows": 4, + "acceptVariable": true, + "default": "

{{ question }}

", + "id": "conditionAgentAgentflow_0-input-conditionAgentInput-string", + "display": true + }, + { + "label": "Scenarios", + "name": "conditionAgentScenarios", + "description": "Define the scenarios that will be used as the conditions to split the flow", + "type": "array", + "array": [ + { + "label": "Scenario", + "name": "scenario", + "type": "string", + "placeholder": "User is asking for a pizza" + } + ], + "default": [ + { + "scenario": "User is asking for basic details about tables, databases, columns, etc" + }, + { + "scenario": "User is asking about banking churn, customer lifetime value, CLV, correlation, charts like pie chart, historgram etc" + } + ], + "id": "conditionAgentAgentflow_0-input-conditionAgentScenarios-array", + "display": true + }, + { + "label": "Override System Prompt", + "name": "conditionAgentOverrideSystemPrompt", + "type": "boolean", + "description": "Override initial system prompt for Condition Agent", + "optional": true, + "id": "conditionAgentAgentflow_0-input-conditionAgentOverrideSystemPrompt-boolean", + "display": true + }, + { + "label": "Node System Prompt", + "name": "conditionAgentSystemPrompt", + "type": "string", + "rows": 4, + "optional": true, + "acceptVariable": true, + "default": "

You are part of a multi-agent system designed to make agent coordination and execution easy. Your task is to analyze the given input and select one matching scenario from a provided set of scenarios.

\n \n

Steps

\n
    \n
  1. Read the input string and the list of scenarios.
  2. \n
  3. Analyze the content of the input to identify its main topic or intention.
  4. \n
  5. Compare the input with each scenario: Evaluate how well the input's topic or intention aligns with each of the provided scenarios and select the one that is the best fit.
  6. \n
  7. Output the result: Return the selected scenario in the specified JSON format.
  8. \n
\n

Output Format

\n

Output should be a JSON object that names the selected scenario, like this: {\"output\": \"\"}. No explanation is needed.

\n

Examples

\n
    \n
  1. \n

    Input: {\"input\": \"Hello\", \"scenarios\": [\"user is asking about AI\", \"user is not asking about AI\"], \"instruction\": \"Your task is to check if the user is asking about AI.\"}

    \n

    Output: {\"output\": \"user is not asking about AI\"}

    \n
  2. \n
  3. \n

    Input: {\"input\": \"What is AIGC?\", \"scenarios\": [\"user is asking about AI\", \"user is asking about the weather\"], \"instruction\": \"Your task is to check and see if the user is asking a topic about AI.\"}

    \n

    Output: {\"output\": \"user is asking about AI\"}

    \n
  4. \n
  5. \n

    Input: {\"input\": \"Can you explain deep learning?\", \"scenarios\": [\"user is interested in AI topics\", \"user wants to order food\"], \"instruction\": \"Determine if the user is interested in learning about AI.\"}

    \n

    Output: {\"output\": \"user is interested in AI topics\"}

    \n
  6. \n
\n

Note

\n ", + "description": "Expert use only. Modifying this can significantly alter agent behavior. Leave default if unsure", + "show": { + "conditionAgentOverrideSystemPrompt": true + }, + "id": "conditionAgentAgentflow_0-input-conditionAgentSystemPrompt-string", + "display": false + } + ], + "inputAnchors": [], + "inputs": { + "conditionAgentModel": "azureChatOpenAI", + "conditionAgentInstructions": "

You are a classification system that analyzes user questions about customer churn and categorizes them into one of three specific scenarios: data exploration, data insights, or recommendation for churn reduction.

First, carefully analyze the user's question to understand what they are asking for. Consider the intent, specific words used, and the type of response they seem to expect. Then classify the question into exactly one of the three scenarios.

# Steps

1. Analyze the Question: Read the user's question carefully and identify key phrases, intent, and what type of information or action they are seeking.

2. Consider Each Scenario:

- Data Exploration: Questions asking to examine, view, display, or browse through data without specific analytical goals

- Data Insights: Questions seeking analytical findings, patterns, trends, correlations, or explanations about churn behavior

- Recommendation for Churn Reduction: Questions asking for actionable strategies, interventions, or suggestions to prevent or reduce customer churn

3. Make Classification: Based on your analysis, select the single most appropriate scenario that matches the user's intent.

# Output Format

Provide your response as a JSON object with two fields:

# Examples

Input: \"Can you show me the customer data for the last quarter?\"

Output:

{\n  \"output\": \"User is asking for basic details about tables, databases, columns, etc\",\n  \"isFulfilled\": true\n}

# Notes

- Focus only on the primary intent of the question - some questions may have elements of multiple scenarios, but classify based on the main ask

- Questions about viewing, displaying, or examining data fall under data exploration

- Questions seeking understanding, patterns, or analytical findings fall under data insights

- Questions asking for strategies, actions, or ways to improve fall under recommendations

- If a question combines multiple intents, prioritize based on what actionable response the user seems to want most

", + "conditionAgentInput": "

{{ question }}

", + "conditionAgentScenarios": [ + { + "scenario": "User is asking for basic details about tables, databases, columns, etc" + }, + { + "scenario": "User is asking about banking churn, customer lifetime value, CLV, correlation, charts like pie chart, historgram etc" + }, + { + "scenario": "User is asking about recommendation to reduce the churn, What are the main factors leading to a Churn or low CLV, Recommend me method to reduce Churn, Which are the important factor for Churn" + } + ], + "conditionAgentOverrideSystemPrompt": "", + "conditionAgentModelConfig": { + "credential": "", + "modelName": "gpt-4.1-mini", + "temperature": 0.9, + "maxTokens": "", + "streaming": true, + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "basepath": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low", + "reasoning": "", + "conditionAgentModel": "azureChatOpenAI" + } + }, + "outputAnchors": [ + { + "id": "conditionAgentAgentflow_0-output-0", + "label": 0, + "name": 0, + "description": "Condition 0" + }, + { + "id": "conditionAgentAgentflow_0-output-1", + "label": 1, + "name": 1, + "description": "Condition 1" + }, + { + "id": "conditionAgentAgentflow_0-output-2", + "label": 2, + "name": 2, + "description": "Condition 2" + } + ], + "outputs": { + "conditionAgentAgentflow": "" + }, + "selected": false + }, + "type": "agentFlow", + "width": 215, + "height": 100, + "selected": false, + "positionAbsolute": { + "x": -1394.1791076070886, + "y": -248.3761208034682 + }, + "dragging": false + }, + { + "id": "agentAgentflow_2", + "position": { + "x": -1055.9219414367558, + "y": -233.82889446496387 + }, + "data": { + "id": "agentAgentflow_2", + "label": "Insights Generator Agent", + "version": 2.2, + "name": "agentAgentflow", + "type": "Agent", + "color": "#4DD0E1", + "baseClasses": [ + "Agent" + ], + "category": "Agent Flows", + "description": "Dynamically choose and utilize tools during runtime, enabling multi-step reasoning", + "inputParams": [ + { + "label": "Model", + "name": "agentModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "agentAgentflow_2-input-agentModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "agentMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "agentAgentflow_2-input-agentMessages-array", + "display": true + }, + { + "label": "OpenAI Built-in Tools", + "name": "agentToolsBuiltInOpenAI", + "type": "multiOptions", + "optional": true, + "options": [ + { + "label": "Web Search", + "name": "web_search_preview", + "description": "Search the web for the latest information" + }, + { + "label": "Code Interpreter", + "name": "code_interpreter", + "description": "Write and run Python code in a sandboxed environment" + }, + { + "label": "Image Generation", + "name": "image_generation", + "description": "Generate images based on a text prompt" + } + ], + "show": { + "agentModel": "chatOpenAI" + }, + "id": "agentAgentflow_2-input-agentToolsBuiltInOpenAI-multiOptions", + "display": false + }, + { + "label": "Gemini Built-in Tools", + "name": "agentToolsBuiltInGemini", + "type": "multiOptions", + "optional": true, + "options": [ + { + "label": "URL Context", + "name": "urlContext", + "description": "Extract content from given URLs" + }, + { + "label": "Google Search", + "name": "googleSearch", + "description": "Search real-time web content" + } + ], + "show": { + "agentModel": "chatGoogleGenerativeAI" + }, + "id": "agentAgentflow_2-input-agentToolsBuiltInGemini-multiOptions", + "display": false + }, + { + "label": "Anthropic Built-in Tools", + "name": "agentToolsBuiltInAnthropic", + "type": "multiOptions", + "optional": true, + "options": [ + { + "label": "Web Search", + "name": "web_search_20250305", + "description": "Search the web for the latest information" + }, + { + "label": "Web Fetch", + "name": "web_fetch_20250910", + "description": "Retrieve full content from specified web pages" + } + ], + "show": { + "agentModel": "chatAnthropic" + }, + "id": "agentAgentflow_2-input-agentToolsBuiltInAnthropic-multiOptions", + "display": false + }, + { + "label": "Tools", + "name": "agentTools", + "type": "array", + "optional": true, + "array": [ + { + "label": "Tool", + "name": "agentSelectedTool", + "type": "asyncOptions", + "loadMethod": "listTools", + "loadConfig": true + }, + { + "label": "Require Human Input", + "name": "agentSelectedToolRequiresHumanInput", + "type": "boolean", + "optional": true + } + ], + "id": "agentAgentflow_2-input-agentTools-array", + "display": true + }, + { + "label": "Knowledge (Document Stores)", + "name": "agentKnowledgeDocumentStores", + "type": "array", + "description": "Give your agent context about different document sources. Document stores must be upserted in advance.", + "array": [ + { + "label": "Document Store", + "name": "documentStore", + "type": "asyncOptions", + "loadMethod": "listStores" + }, + { + "label": "Describe Knowledge", + "name": "docStoreDescription", + "type": "string", + "generateDocStoreDescription": true, + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_2-input-agentKnowledgeDocumentStores-array", + "display": true + }, + { + "label": "Knowledge (Vector Embeddings)", + "name": "agentKnowledgeVSEmbeddings", + "type": "array", + "description": "Give your agent context about different document sources from existing vector stores and embeddings", + "array": [ + { + "label": "Vector Store", + "name": "vectorStore", + "type": "asyncOptions", + "loadMethod": "listVectorStores", + "loadConfig": true + }, + { + "label": "Embedding Model", + "name": "embeddingModel", + "type": "asyncOptions", + "loadMethod": "listEmbeddings", + "loadConfig": true + }, + { + "label": "Knowledge Name", + "name": "knowledgeName", + "type": "string", + "placeholder": "A short name for the knowledge base, this is useful for the AI to know when and how to search for correct information" + }, + { + "label": "Describe Knowledge", + "name": "knowledgeDescription", + "type": "string", + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_2-input-agentKnowledgeVSEmbeddings-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "agentEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "agentAgentflow_2-input-agentEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "agentMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_2-input-agentMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "agentMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "agentMemoryType": "windowSize" + }, + "id": "agentAgentflow_2-input-agentMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "agentMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "agentMemoryType": "conversationSummaryBuffer" + }, + "id": "agentAgentflow_2-input-agentMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "agentUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_2-input-agentUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "agentReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "agentAgentflow_2-input-agentReturnResponseAs-options", + "display": true + }, + { + "label": "Update Flow State", + "name": "agentUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "agentAgentflow_2-input-agentUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "agentModel": "azureChatOpenAI", + "agentMessages": [ + { + "role": "system", + "content": "

As a data analyst, analyze complex questions to derive insights from the \"{{ $flow.state.db_name }}\" database, using only its tables. Do not request the database name from the user; always use \"{{ $flow.state.db_name }}\" as the default. Always pass table_name as database.table_name.

Approach each analytic task by:

Steps

  1. Interpret the user's analytical question and identify relevant tables and fields within the \"{{ $flow.state.db_name }}\" database.

  2. Outline the reasoning process, including:

    • Data exploration steps (e.g., which columns are relevant, data type considerations, handling of missing data).

    • Choice of methods/metrics (e.g., correlation calculation, aggregation, statistical testing).

    • Rationale for analytic choices (e.g., why a certain measure is suitable).

  3. Apply the analysis, describe the process and results, and then clearly state the final conclusions or insights.

Output Format

Respond in markdown with clearly labeled sections:

Examples

Example Input:
What is the correlation between tenure and churn?

Example Output:

Reasoning:

Conclusion:
There is a [placeholder:strength, e.g., \"moderate negative\"] correlation between tenure and churn, indicating that longer tenure is associated with a lower likelihood of churn.

(Real examples for more complex questions should provide detailed reasoning with multiple steps, references to specific columns, statistical tests used, and a nuanced conclusion.)

Notes

" + } + ], + "agentToolsBuiltInOpenAI": "", + "agentToolsBuiltInGemini": "", + "agentToolsBuiltInAnthropic": "", + "agentTools": [ + { + "agentSelectedTool": "customMCP", + "agentSelectedToolRequiresHumanInput": "", + "agentSelectedToolConfig": { + "mcpServerConfig": "{\n \"url\": \"http://teradata-mcp-server:8001/mcp\"\n}", + "mcpActions": "[\"base_columnDescription\",\"base_readQuery\",\"base_tableList\",\"base_tablePreview\"]", + "agentSelectedTool": "customMCP" + } + } + ], + "agentKnowledgeDocumentStores": "", + "agentKnowledgeVSEmbeddings": "", + "agentEnableMemory": true, + "agentMemoryType": "allMessages", + "agentMemoryWindowSize": "20", + "agentMemoryMaxTokenLimit": "2000", + "agentUserMessage": "", + "agentReturnResponseAs": "assistantMessage", + "agentUpdateState": [ + { + "key": "main_agent_op", + "value": "

{{ output }} 

" + }, + { + "key": "main_question", + "value": "

{{ question }}

" + }, + { + "key": "chat_history", + "value": "

{{ chat_history }}

" + } + ], + "agentModelConfig": { + "credential": "", + "modelName": "gpt-4.1-mini", + "temperature": 0.9, + "maxTokens": "", + "streaming": true, + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "basepath": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low", + "reasoning": "", + "agentModel": "azureChatOpenAI" + } + }, + "outputAnchors": [ + { + "id": "agentAgentflow_2-output-agentAgentflow", + "label": "Agent", + "name": "agentAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 235, + "height": 101, + "positionAbsolute": { + "x": -1055.9219414367558, + "y": -233.82889446496387 + }, + "selected": false, + "dragging": false + }, + { + "id": "agentAgentflow_3", + "position": { + "x": -1058.0666519777938, + "y": -86.74587721316122 + }, + "data": { + "id": "agentAgentflow_3", + "label": "Strategic Reasoning Agent", + "version": 2.2, + "name": "agentAgentflow", + "type": "Agent", + "color": "#4DD0E1", + "baseClasses": [ + "Agent" + ], + "category": "Agent Flows", + "description": "Dynamically choose and utilize tools during runtime, enabling multi-step reasoning", + "inputParams": [ + { + "label": "Model", + "name": "agentModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "agentAgentflow_3-input-agentModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "agentMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "agentAgentflow_3-input-agentMessages-array", + "display": true + }, + { + "label": "OpenAI Built-in Tools", + "name": "agentToolsBuiltInOpenAI", + "type": "multiOptions", + "optional": true, + "options": [ + { + "label": "Web Search", + "name": "web_search_preview", + "description": "Search the web for the latest information" + }, + { + "label": "Code Interpreter", + "name": "code_interpreter", + "description": "Write and run Python code in a sandboxed environment" + }, + { + "label": "Image Generation", + "name": "image_generation", + "description": "Generate images based on a text prompt" + } + ], + "show": { + "agentModel": "chatOpenAI" + }, + "id": "agentAgentflow_3-input-agentToolsBuiltInOpenAI-multiOptions", + "display": false + }, + { + "label": "Gemini Built-in Tools", + "name": "agentToolsBuiltInGemini", + "type": "multiOptions", + "optional": true, + "options": [ + { + "label": "URL Context", + "name": "urlContext", + "description": "Extract content from given URLs" + }, + { + "label": "Google Search", + "name": "googleSearch", + "description": "Search real-time web content" + } + ], + "show": { + "agentModel": "chatGoogleGenerativeAI" + }, + "id": "agentAgentflow_3-input-agentToolsBuiltInGemini-multiOptions", + "display": false + }, + { + "label": "Anthropic Built-in Tools", + "name": "agentToolsBuiltInAnthropic", + "type": "multiOptions", + "optional": true, + "options": [ + { + "label": "Web Search", + "name": "web_search_20250305", + "description": "Search the web for the latest information" + }, + { + "label": "Web Fetch", + "name": "web_fetch_20250910", + "description": "Retrieve full content from specified web pages" + } + ], + "show": { + "agentModel": "chatAnthropic" + }, + "id": "agentAgentflow_3-input-agentToolsBuiltInAnthropic-multiOptions", + "display": false + }, + { + "label": "Tools", + "name": "agentTools", + "type": "array", + "optional": true, + "array": [ + { + "label": "Tool", + "name": "agentSelectedTool", + "type": "asyncOptions", + "loadMethod": "listTools", + "loadConfig": true + }, + { + "label": "Require Human Input", + "name": "agentSelectedToolRequiresHumanInput", + "type": "boolean", + "optional": true + } + ], + "id": "agentAgentflow_3-input-agentTools-array", + "display": true + }, + { + "label": "Knowledge (Document Stores)", + "name": "agentKnowledgeDocumentStores", + "type": "array", + "description": "Give your agent context about different document sources. Document stores must be upserted in advance.", + "array": [ + { + "label": "Document Store", + "name": "documentStore", + "type": "asyncOptions", + "loadMethod": "listStores" + }, + { + "label": "Describe Knowledge", + "name": "docStoreDescription", + "type": "string", + "generateDocStoreDescription": true, + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_3-input-agentKnowledgeDocumentStores-array", + "display": true + }, + { + "label": "Knowledge (Vector Embeddings)", + "name": "agentKnowledgeVSEmbeddings", + "type": "array", + "description": "Give your agent context about different document sources from existing vector stores and embeddings", + "array": [ + { + "label": "Vector Store", + "name": "vectorStore", + "type": "asyncOptions", + "loadMethod": "listVectorStores", + "loadConfig": true + }, + { + "label": "Embedding Model", + "name": "embeddingModel", + "type": "asyncOptions", + "loadMethod": "listEmbeddings", + "loadConfig": true + }, + { + "label": "Knowledge Name", + "name": "knowledgeName", + "type": "string", + "placeholder": "A short name for the knowledge base, this is useful for the AI to know when and how to search for correct information" + }, + { + "label": "Describe Knowledge", + "name": "knowledgeDescription", + "type": "string", + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_3-input-agentKnowledgeVSEmbeddings-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "agentEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "agentAgentflow_3-input-agentEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "agentMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_3-input-agentMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "agentMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "agentMemoryType": "windowSize" + }, + "id": "agentAgentflow_3-input-agentMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "agentMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "agentMemoryType": "conversationSummaryBuffer" + }, + "id": "agentAgentflow_3-input-agentMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "agentUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_3-input-agentUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "agentReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "agentAgentflow_3-input-agentReturnResponseAs-options", + "display": true + }, + { + "label": "Update Flow State", + "name": "agentUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "agentAgentflow_3-input-agentUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "agentModel": "azureChatOpenAI", + "agentMessages": [ + { + "role": "system", + "content": "

Provide detailed expert advice to help reduce customer churn in banking, utilizing your business experience and knowledge of churn modeling.

Access and analyze data only from the default database {{ $flow.state.db_name }}\". Do not request or use any other database name. For churn analysis and feature importance, use only the \"Churn_Feature_Importance\" table within this database, which contains information about key factors contributing to churn.

Always pass table_name as database.table_name.

Steps

  1. Review the \"ChurnFeatureImportance\" table in the \"{{ $flow.state.db_name }}\" database to identify the primary factors linked to banking churn.

  2. Explain how these factors contribute to customer churn, including any relevant trends or insights.

  3. Based on your business expertise and data analysis, outline actionable, evidence-based methods and strategies to reduce banking churn.

  4. Present clear reasoning for each proposed method, referencing the importance of features as identified in the data.

  5. Conclude with a concise summary of the most effective, prioritized actions.

Output Format

Respond with a structured markdown document, containing:

Examples

Example Input:
How can my bank reduce customer churn using data?

Example Output:

As a banking expert, I have analyzed your data from the \"ChurnFeatureImportance\" table in the \"{{ $flow.state.db_name }}\" database to identify main drivers of customer churn and recommend effective mitigation strategies.

Key Feature Analysis and Reasoning:

Recommended Methods to Reduce Banking Churn:

  1. Launch targeted retention programs for new customers (linked to Account Tenure).

  2. Cross-sell additional banking products relevant to customer needs (linked to Number of Products Held).

  3. Enhance complaint resolution processes for speedy and customer-focused solutions (linked to Customer Complaints).

  4. Use predictive models to proactively identify high-risk customers and offer personalized incentives.

Summary:
Prioritize targeted retention initiatives, improvement of cross-selling strategies, and swift complaint resolution—these actions are closely tied to the key factors driving churn in your data and will most effectively reduce customer attrition.

Notes

" + } + ], + "agentToolsBuiltInOpenAI": "", + "agentToolsBuiltInGemini": "", + "agentToolsBuiltInAnthropic": "", + "agentTools": [ + { + "agentSelectedTool": "customMCP", + "agentSelectedToolRequiresHumanInput": false, + "agentSelectedToolConfig": { + "mcpServerConfig": "{\n \"url\": \"http://teradata-mcp-server:8001/mcp\"\n}", + "mcpActions": "[\"base_readQuery\",\"base_tableList\",\"base_tablePreview\",\"base_tableDDL\"]", + "agentSelectedTool": "customMCP" + } + } + ], + "agentKnowledgeDocumentStores": "", + "agentKnowledgeVSEmbeddings": "", + "agentEnableMemory": true, + "agentMemoryType": "allMessages", + "agentMemoryWindowSize": "20", + "agentMemoryMaxTokenLimit": "2000", + "agentUserMessage": "", + "agentReturnResponseAs": "assistantMessage", + "agentUpdateState": [ + { + "key": "main_question", + "value": "

{{ question }}

" + }, + { + "key": "main_agent_op", + "value": "

{{ output }}

" + }, + { + "key": "chat_history", + "value": "

{{ chat_history }}

" + } + ], + "agentModelConfig": { + "credential": "", + "modelName": "gpt-4.1-mini", + "temperature": 0.9, + "maxTokens": "", + "streaming": true, + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "basepath": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low", + "reasoning": "", + "agentModel": "azureChatOpenAI" + } + }, + "outputAnchors": [ + { + "id": "agentAgentflow_3-output-agentAgentflow", + "label": "Agent", + "name": "agentAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 245, + "height": 101, + "positionAbsolute": { + "x": -1058.0666519777938, + "y": -86.74587721316122 + }, + "selected": false, + "dragging": false + }, + { + "id": "conditionAgentAgentflow_1", + "position": { + "x": -707.9954069299963, + "y": -241.87491411384235 + }, + "data": { + "id": "conditionAgentAgentflow_1", + "label": "Check if Chart required", + "version": 1.1, + "name": "conditionAgentAgentflow", + "type": "ConditionAgent", + "color": "#ff8fab", + "baseClasses": [ + "ConditionAgent" + ], + "category": "Agent Flows", + "description": "Utilize an agent to split flows based on dynamic conditions", + "inputParams": [ + { + "label": "Model", + "name": "conditionAgentModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "conditionAgentAgentflow_1-input-conditionAgentModel-asyncOptions", + "display": true + }, + { + "label": "Instructions", + "name": "conditionAgentInstructions", + "type": "string", + "description": "A general instructions of what the condition agent should do", + "rows": 4, + "acceptVariable": true, + "placeholder": "Determine if the user is interested in learning about AI", + "id": "conditionAgentAgentflow_1-input-conditionAgentInstructions-string", + "display": true + }, + { + "label": "Input", + "name": "conditionAgentInput", + "type": "string", + "description": "Input to be used for the condition agent", + "rows": 4, + "acceptVariable": true, + "default": "

{{ question }}

", + "id": "conditionAgentAgentflow_1-input-conditionAgentInput-string", + "display": true + }, + { + "label": "Scenarios", + "name": "conditionAgentScenarios", + "description": "Define the scenarios that will be used as the conditions to split the flow", + "type": "array", + "array": [ + { + "label": "Scenario", + "name": "scenario", + "type": "string", + "placeholder": "User is asking for a pizza" + } + ], + "default": [ + { + "scenario": "Chart related" + }, + { + "scenario": "General" + } + ], + "id": "conditionAgentAgentflow_1-input-conditionAgentScenarios-array", + "display": true + }, + { + "label": "Override System Prompt", + "name": "conditionAgentOverrideSystemPrompt", + "type": "boolean", + "description": "Override initial system prompt for Condition Agent", + "optional": true, + "id": "conditionAgentAgentflow_1-input-conditionAgentOverrideSystemPrompt-boolean", + "display": true + }, + { + "label": "Node System Prompt", + "name": "conditionAgentSystemPrompt", + "type": "string", + "rows": 4, + "optional": true, + "acceptVariable": true, + "default": "

You are part of a multi-agent system designed to make agent coordination and execution easy. Your task is to analyze the given input and select one matching scenario from a provided set of scenarios.

\n \n

Steps

\n
    \n
  1. Read the input string and the list of scenarios.
  2. \n
  3. Analyze the content of the input to identify its main topic or intention.
  4. \n
  5. Compare the input with each scenario: Evaluate how well the input's topic or intention aligns with each of the provided scenarios and select the one that is the best fit.
  6. \n
  7. Output the result: Return the selected scenario in the specified JSON format.
  8. \n
\n

Output Format

\n

Output should be a JSON object that names the selected scenario, like this: {\"output\": \"\"}. No explanation is needed.

\n

Examples

\n
    \n
  1. \n

    Input: {\"input\": \"Hello\", \"scenarios\": [\"user is asking about AI\", \"user is not asking about AI\"], \"instruction\": \"Your task is to check if the user is asking about AI.\"}

    \n

    Output: {\"output\": \"user is not asking about AI\"}

    \n
  2. \n
  3. \n

    Input: {\"input\": \"What is AIGC?\", \"scenarios\": [\"user is asking about AI\", \"user is asking about the weather\"], \"instruction\": \"Your task is to check and see if the user is asking a topic about AI.\"}

    \n

    Output: {\"output\": \"user is asking about AI\"}

    \n
  4. \n
  5. \n

    Input: {\"input\": \"Can you explain deep learning?\", \"scenarios\": [\"user is interested in AI topics\", \"user wants to order food\"], \"instruction\": \"Determine if the user is interested in learning about AI.\"}

    \n

    Output: {\"output\": \"user is interested in AI topics\"}

    \n
  6. \n
\n

Note

\n ", + "description": "Expert use only. Modifying this can significantly alter agent behavior. Leave default if unsure", + "show": { + "conditionAgentOverrideSystemPrompt": true + }, + "id": "conditionAgentAgentflow_1-input-conditionAgentSystemPrompt-string", + "display": false + } + ], + "inputAnchors": [], + "inputs": { + "conditionAgentModel": "azureChatOpenAI", + "conditionAgentInstructions": "

Check if user is asking about create a chart/vizualization.

Do not return any code for chart. Chart should be picked from one of these options: 1. Line, 2. Polar, 3. Pie 4. Radar. If didn't found relevent option in that case just show the data in table format.

If User is asking for plot the chart then only call chart else go to generate response.

If the output is single value, like corr: 0.89, do not use chart for such questions.

", + "conditionAgentInput": "

{{ question }}

", + "conditionAgentScenarios": [ + { + "scenario": "Chart related" + }, + { + "scenario": "General" + } + ], + "conditionAgentOverrideSystemPrompt": "", + "conditionAgentModelConfig": { + "credential": "", + "modelName": "gpt-4.1-mini", + "temperature": 0.9, + "maxTokens": "", + "streaming": true, + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "basepath": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low", + "reasoning": "", + "conditionAgentModel": "azureChatOpenAI" + } + }, + "outputAnchors": [ + { + "id": "conditionAgentAgentflow_1-output-0", + "label": "Condition Agent", + "name": "conditionAgentAgentflow" + }, + { + "id": "conditionAgentAgentflow_1-output-1", + "label": "Condition Agent", + "name": "conditionAgentAgentflow" + } + ], + "outputs": { + "conditionAgentAgentflow": "" + }, + "selected": false + }, + "type": "agentFlow", + "width": 224, + "height": 80, + "positionAbsolute": { + "x": -707.9954069299963, + "y": -241.87491411384235 + }, + "selected": false, + "dragging": false + }, + { + "id": "stickyNoteAgentflow_2", + "position": { + "x": -1396.6154241898391, + "y": -371.2776219063079 + }, + "data": { + "id": "stickyNoteAgentflow_2", + "label": "Sticky Note (2)", + "version": 1, + "name": "stickyNoteAgentflow", + "type": "StickyNote", + "color": "#fee440", + "baseClasses": [ + "StickyNote" + ], + "category": "Agent Flows", + "description": "Add notes to the agent flow", + "inputParams": [ + { + "label": "", + "name": "note", + "type": "string", + "rows": 1, + "placeholder": "Type something here", + "optional": true, + "id": "stickyNoteAgentflow_2-input-note-string", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "note": "This agent will itentify the intent of user query" + }, + "outputAnchors": [ + { + "id": "stickyNoteAgentflow_2-output-stickyNoteAgentflow", + "label": "Sticky Note", + "name": "stickyNoteAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "stickyNote", + "width": 204, + "height": 103, + "selected": false, + "dragging": false, + "positionAbsolute": { + "x": -1396.6154241898391, + "y": -371.2776219063079 + } + }, + { + "id": "stickyNoteAgentflow_3", + "position": { + "x": -419.654914353561, + "y": -39.041356192488635 + }, + "data": { + "id": "stickyNoteAgentflow_3", + "label": "Sticky Note (3)", + "version": 1, + "name": "stickyNoteAgentflow", + "type": "StickyNote", + "color": "#fee440", + "baseClasses": [ + "StickyNote" + ], + "category": "Agent Flows", + "description": "Add notes to the agent flow", + "inputParams": [ + { + "label": "", + "name": "note", + "type": "string", + "rows": 1, + "placeholder": "Type something here", + "optional": true, + "id": "stickyNoteAgentflow_3-input-note-string", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "note": "This agent will generate the final response." + }, + "outputAnchors": [ + { + "id": "stickyNoteAgentflow_3-output-stickyNoteAgentflow", + "label": "Sticky Note", + "name": "stickyNoteAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "stickyNote", + "width": 204, + "height": 103, + "selected": false, + "dragging": false, + "positionAbsolute": { + "x": -419.654914353561, + "y": -39.041356192488635 + } + }, + { + "id": "llmAgentflow_0", + "position": { + "x": -411.72039726675524, + "y": -150.66612705299124 + }, + "data": { + "id": "llmAgentflow_0", + "label": "Generate the Response", + "version": 1, + "name": "llmAgentflow", + "type": "LLM", + "color": "#64B5F6", + "baseClasses": [ + "LLM" + ], + "category": "Agent Flows", + "description": "Large language models to analyze user-provided inputs and generate responses", + "inputParams": [ + { + "label": "Model", + "name": "llmModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "llmAgentflow_0-input-llmModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "llmMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "llmAgentflow_0-input-llmMessages-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "llmEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "llmAgentflow_0-input-llmEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "llmMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_0-input-llmMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "llmMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "llmMemoryType": "windowSize" + }, + "id": "llmAgentflow_0-input-llmMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "llmMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "llmMemoryType": "conversationSummaryBuffer" + }, + "id": "llmAgentflow_0-input-llmMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "llmUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_0-input-llmUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "llmReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "llmAgentflow_0-input-llmReturnResponseAs-options", + "display": true + }, + { + "label": "JSON Structured Output", + "name": "llmStructuredOutput", + "description": "Instruct the LLM to give output in a JSON structured schema", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "string" + }, + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "String Array", + "name": "stringArray" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + }, + { + "label": "Enum", + "name": "enum" + }, + { + "label": "JSON Array", + "name": "jsonArray" + } + ] + }, + { + "label": "Enum Values", + "name": "enumValues", + "type": "string", + "placeholder": "value1, value2, value3", + "description": "Enum values. Separated by comma", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "enum" + } + }, + { + "label": "JSON Schema", + "name": "jsonSchema", + "type": "code", + "placeholder": "{\n \"answer\": {\n \"type\": \"string\",\n \"description\": \"Value of the answer\"\n },\n \"reason\": {\n \"type\": \"string\",\n \"description\": \"Reason for the answer\"\n },\n \"optional\": {\n \"type\": \"boolean\"\n },\n \"count\": {\n \"type\": \"number\"\n },\n \"children\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"value\": {\n \"type\": \"string\",\n \"description\": \"Value of the children's answer\"\n }\n }\n }\n }\n}", + "description": "JSON schema for the structured output", + "optional": true, + "hideCodeExecute": true, + "show": { + "llmStructuredOutput[$index].type": "jsonArray" + } + }, + { + "label": "Description", + "name": "description", + "type": "string", + "placeholder": "Description of the key" + } + ], + "id": "llmAgentflow_0-input-llmStructuredOutput-array", + "display": true + }, + { + "label": "Update Flow State", + "name": "llmUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "llmAgentflow_0-input-llmUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "llmModel": "azureChatOpenAI", + "llmMessages": [ + { + "role": "system", + "content": "

You are an expert assistant specializing in data analysis and business intelligence who answers questions about chart generation, tables, churn reduction recommendations, and database-related topics based on the provided context from {{$flow.state.main_agent_op}}.

Your role is to provide comprehensive, actionable insights and recommendations without generating any code. Focus on explaining concepts, suggesting approaches, and providing strategic guidance.

If question is simple, like scaler value, do not return too much details, keep answer concise and clear.

# Steps

1. Analyze the Context: Review the information provided in {{$flow.state.main_agent_op}} to understand the specific question or scenario

2. Identify the Domain: Determine whether the question relates to:

- Chart generation and visualization strategies

- Table structure and data organization

- Churn reduction recommendations

- Database design or querying approaches

- Related data analysis topics

3. Reasoning Process: Work through the problem systematically:

- Assess the current situation or requirements

- Consider relevant best practices and methodologies

- Evaluate potential solutions or approaches

- Account for business context and constraints

4. Formulate Response: Provide clear, actionable guidance with specific recommendations

# Output Format

Provide your response as a well-structured explanation in paragraph form. Include:

- Clear reasoning for your recommendations

- Specific, actionable steps or strategies

- Business context and rationale

- Any important considerations or caveats

Length should be 2-4 paragraphs, comprehensive enough to be actionable but concise enough to be easily digestible.

# Examples

Input Context: \"Customer churn rate has increased 15% over the last quarter, particularly among customers who have been with us 6-12 months. What visualization approach would best show this trend and what recommendations do you have?\"

Response:

To effectively visualize this churn trend, I recommend using a combination of a time-series line chart showing churn rates over the past year with customer tenure segments as different colored lines, paired with a cohort analysis heatmap that displays retention rates by month of acquisition. This dual approach will clearly highlight both the recent increase and the specific vulnerability window at 6-12 months.

For churn reduction, focus on implementing a proactive engagement program targeting customers at the 5-month mark. This should include personalized check-ins, usage optimization consultations, and early renewal incentives. Additionally, analyze the customer journey during months 6-12 to identify specific friction points—common issues include feature adoption challenges, unclear value realization, or inadequate onboarding follow-up.

Consider segmenting your at-risk customers by usage patterns, support ticket history, and engagement metrics to create targeted retention campaigns. The visualization should also incorporate leading indicators like login frequency, feature usage depth, and support interactions to enable predictive intervention rather than reactive responses.

# Notes

- Never provide code snippets, programming syntax, or technical implementation details

- Focus on strategic and analytical guidance rather than technical execution

- When discussing charts, describe the type, structure, and business value rather than how to create them

- For database questions, emphasize design principles, query strategies, and data organization concepts

- Always tie recommendations back to business outcomes and measurable impact

" + } + ], + "llmEnableMemory": true, + "llmMemoryType": "allMessages", + "llmUserMessage": "

", + "llmReturnResponseAs": "assistantMessage", + "llmStructuredOutput": "", + "llmUpdateState": "", + "llmModelConfig": { + "credential": "", + "modelName": "gpt-4.1-mini", + "temperature": 0.9, + "maxTokens": "", + "streaming": true, + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "basepath": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low", + "reasoning": "", + "llmModel": "azureChatOpenAI" + } + }, + "outputAnchors": [ + { + "id": "llmAgentflow_0-output-llmAgentflow", + "label": "LLM", + "name": "llmAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 224, + "height": 73, + "selected": false, + "positionAbsolute": { + "x": -411.72039726675524, + "y": -150.66612705299124 + }, + "dragging": false + }, + { + "id": "stickyNoteAgentflow_4", + "position": { + "x": -1631.5837441898389, + "y": -94.45202190630799 + }, + "data": { + "id": "stickyNoteAgentflow_4", + "label": "Sticky Note (2) (4)", + "version": 1, + "name": "stickyNoteAgentflow", + "type": "StickyNote", + "color": "#fee440", + "baseClasses": [ + "StickyNote" + ], + "category": "Agent Flows", + "description": "Add notes to the agent flow", + "inputParams": [ + { + "label": "", + "name": "note", + "type": "string", + "rows": 1, + "placeholder": "Type something here", + "optional": true, + "id": "stickyNoteAgentflow_4-input-note-string", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "note": "set here db_name value \ninto Start instead of None" + }, + "outputAnchors": [ + { + "id": "stickyNoteAgentflow_4-output-stickyNoteAgentflow", + "label": "Sticky Note", + "name": "stickyNoteAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "stickyNote", + "width": 204, + "height": 123, + "selected": false, + "dragging": false, + "positionAbsolute": { + "x": -1631.5837441898389, + "y": -94.45202190630799 + } + } + ], + "edges": [ + { + "source": "startAgentflow_0", + "sourceHandle": "startAgentflow_0-output-startAgentflow", + "target": "conditionAgentAgentflow_0", + "targetHandle": "conditionAgentAgentflow_0", + "data": { + "sourceColor": "#7EE787", + "targetColor": "#ff8fab", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "startAgentflow_0-startAgentflow_0-output-startAgentflow-conditionAgentAgentflow_0-conditionAgentAgentflow_0" + }, + { + "source": "conditionAgentAgentflow_0", + "sourceHandle": "conditionAgentAgentflow_0-output-0", + "target": "agentAgentflow_1", + "targetHandle": "agentAgentflow_1", + "data": { + "sourceColor": "#ff8fab", + "targetColor": "#4DD0E1", + "edgeLabel": "0", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "conditionAgentAgentflow_0-conditionAgentAgentflow_0-output-0-agentAgentflow_1-agentAgentflow_1" + }, + { + "source": "conditionAgentAgentflow_0", + "sourceHandle": "conditionAgentAgentflow_0-output-1", + "target": "agentAgentflow_2", + "targetHandle": "agentAgentflow_2", + "data": { + "sourceColor": "#ff8fab", + "targetColor": "#4DD0E1", + "edgeLabel": "1", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "conditionAgentAgentflow_0-conditionAgentAgentflow_0-output-1-agentAgentflow_2-agentAgentflow_2" + }, + { + "source": "conditionAgentAgentflow_0", + "sourceHandle": "conditionAgentAgentflow_0-output-2", + "target": "agentAgentflow_3", + "targetHandle": "agentAgentflow_3", + "data": { + "sourceColor": "#ff8fab", + "targetColor": "#4DD0E1", + "edgeLabel": "2", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "conditionAgentAgentflow_0-conditionAgentAgentflow_0-output-2-agentAgentflow_3-agentAgentflow_3" + }, + { + "source": "agentAgentflow_1", + "sourceHandle": "agentAgentflow_1-output-agentAgentflow", + "target": "conditionAgentAgentflow_1", + "targetHandle": "conditionAgentAgentflow_1", + "data": { + "sourceColor": "#4DD0E1", + "targetColor": "#ff8fab", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "agentAgentflow_1-agentAgentflow_1-output-agentAgentflow-conditionAgentAgentflow_1-conditionAgentAgentflow_1" + }, + { + "source": "agentAgentflow_2", + "sourceHandle": "agentAgentflow_2-output-agentAgentflow", + "target": "conditionAgentAgentflow_1", + "targetHandle": "conditionAgentAgentflow_1", + "data": { + "sourceColor": "#4DD0E1", + "targetColor": "#ff8fab", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "agentAgentflow_2-agentAgentflow_2-output-agentAgentflow-conditionAgentAgentflow_1-conditionAgentAgentflow_1" + }, + { + "source": "agentAgentflow_3", + "sourceHandle": "agentAgentflow_3-output-agentAgentflow", + "target": "conditionAgentAgentflow_1", + "targetHandle": "conditionAgentAgentflow_1", + "data": { + "sourceColor": "#4DD0E1", + "targetColor": "#ff8fab", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "agentAgentflow_3-agentAgentflow_3-output-agentAgentflow-conditionAgentAgentflow_1-conditionAgentAgentflow_1" + }, + { + "source": "conditionAgentAgentflow_1", + "sourceHandle": "conditionAgentAgentflow_1-output-0", + "target": "agentAgentflow_0", + "targetHandle": "agentAgentflow_0", + "data": { + "sourceColor": "#ff8fab", + "targetColor": "#4DD0E1", + "edgeLabel": "0", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "conditionAgentAgentflow_1-conditionAgentAgentflow_1-output-0-agentAgentflow_0-agentAgentflow_0" + }, + { + "source": "conditionAgentAgentflow_1", + "sourceHandle": "conditionAgentAgentflow_1-output-1", + "target": "llmAgentflow_0", + "targetHandle": "llmAgentflow_0", + "data": { + "sourceColor": "#ff8fab", + "targetColor": "#64B5F6", + "edgeLabel": "1", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "conditionAgentAgentflow_1-conditionAgentAgentflow_1-output-1-llmAgentflow_0-llmAgentflow_0" + }, + { + "source": "agentAgentflow_0", + "sourceHandle": "agentAgentflow_0-output-agentAgentflow", + "target": "customFunctionAgentflow_0", + "targetHandle": "customFunctionAgentflow_0", + "data": { + "sourceColor": "#4DD0E1", + "targetColor": "#E4B7FF", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "agentAgentflow_0-agentAgentflow_0-output-agentAgentflow-customFunctionAgentflow_0-customFunctionAgentflow_0" + } + ] +} \ No newline at end of file diff --git a/examples/app-flowise/flowise_teradata_agents/README.md b/examples/app-flowise/flowise_teradata_agents/README.md new file mode 100644 index 0000000..c1b9cc7 --- /dev/null +++ b/examples/app-flowise/flowise_teradata_agents/README.md @@ -0,0 +1,90 @@ +# Teradata Agents with Flowise AgentFlow and Teradata MCP Server + +[![Teradata Data Science Agent](https://img.shields.io/badge/Teradata--Data--Science--Agent-Setup%20Video-green?style=for-the-badge&logo=teradata)](https://youtu.be/xkmslhg_ulU) +[![Teradata Vector Store RAG Agent](https://img.shields.io/badge/Teradata--Vector--Store--Agent-Setup%20Video-green?style=for-the-badge&logo=teradata)](https://youtu.be/aM01xOndsvk) +[![Teradata Visualization Agent](https://img.shields.io/badge/Teradata--Visualization--Agent-Template-green?style=for-the-badge&logo=teradata)](./Teradata_visualized_Agents_V2.json) +[![Teradata Customer Lifetime Value (CLV) Demo Agent](https://img.shields.io/badge/Teradata--Customer--Lifetime--Value--Agent--Demo-Setup%20Video-green?style=for-the-badge&logo=teradata)](https://youtu.be/pYx0dn65Z2s) + + + +This repository provides a set of **Teradata Agents** designed to integrate seamlessly with **Flowise AgentFlow** using the **Teradata MCP Server.** +These agents enable intelligent workflows that combine **Teradata’s data and vector capabilities** with **LLM-powered analytics** — helping you build scalable, AI-driven data applications. + +Before getting started, make sure both **Teradata MCP Server and Flowise** containers are running as described in the setup guide below. + +### 📘 Setup Guide: + +Refer to [Flowise_with_Teradata_MCP](../../../docs/client_guide/Flowise_with_teradata_mcp_Guide.md) + for detailed installation and configuration steps. + +--- + +## 🚀 Available Teradata Agents for Flowise + +### 🧠 Teradata Data Science Agent + +This agent template provides a complete **Flowise workflow** to interact with **Teradata** for data science–related use cases such as querying data, running analytics, and generating insights using LLMs. + +#### Template: +[Teradata_Data_Science_Workflow_Agents_V2.json](./Teradata_Data_Science_Workflow_Agents_V2.json) + +#### Configuration Steps: + +1. Import the JSON template into Flowise. +2. Configure your preferred LLM model and provide its credentials. +3. Save and deploy the workflow. + +**🎥 How-To Video**: + +Watch this step-by-step video tutorial — [Teradata Data Science Agent Setup](https://youtu.be/xkmslhg_ulU) + +--- +### 🧩 Teradata Vector Store RAG Agent + +This agent template provides a complete **Flowise workflow** to interact with the **Teradata Vector Store**. It supports **similarity search** and **retrieval-augmented generation (RAG) on vectorized data that already resides in Teradata**, enabling context-aware question-answering and semantic insights. + +#### Template: +[Teradata_VectorStore_RAG_Agent_V2.json](./Teradata_VectorStore_RAG_Agent_V2.json) + +#### Configuration Steps: + +1. Import the JSON template into Flowise. +2. Configure your preferred LLM model and provide its credentials. +3. Save and deploy the workflow. + +**🎥 How-To Video**: + +Watch this step-by-step video tutorial — [Teradata VectorStore RAG Agent Setup](https://youtu.be/aM01xOndsvk) + +--- +### 💼 Customer Lifetime Value (CLV) Demo Agent + +This demo agent showcases how **Flowise** and **Teradata MCP Server** can work together to calculate and visualize **Customer Lifetime Value (CLV)** using Teradata data. +It demonstrates practical use of LLMs for analytics, insights generation, and storytelling on customer data. + +#### Template: +[Teradata_Customer_Lifetime_Value_V2](./Customer_Lifetime_Value_V2.json) + +#### Configuration Steps: + +1. Import the JSON template into Flowise. +2. Configure your preferred LLM model and provide its credentials. +3. Save and deploy the workflow. + +**🎥 How-To Video**: + +Watch this step-by-step video tutorial — [Customer Lifetime Value (CLV) Demo Agent](https://youtu.be/pYx0dn65Z2s) + +--- +### 📊 Teradata Visualization Agent +This agent template demonstrates how to **visualize Teradata data** within a **Flowise workflow**. +It enables users to generate various types of **plots and charts** (e.g., line, pie, polor, radar) directly from Teradata query results — turning data into interactive visual insights. + +#### Template: +[Teradata_visualized_Agents_V2.json](./Teradata_visualized_Agents_V2.json) + +#### Configuration Steps: + +1. Import the JSON template into Flowise. +2. Configure your preferred LLM model and provide its credentials. +3. Save and deploy the workflow. diff --git a/examples/app-flowise/flowise_teradata_agents/Teradata_Data_Science_Workflow_Agents_V2.json b/examples/app-flowise/flowise_teradata_agents/Teradata_Data_Science_Workflow_Agents_V2.json new file mode 100644 index 0000000..235d946 --- /dev/null +++ b/examples/app-flowise/flowise_teradata_agents/Teradata_Data_Science_Workflow_Agents_V2.json @@ -0,0 +1,7126 @@ +{ + "nodes": [ + { + "id": "startAgentflow_0", + "type": "agentFlow", + "position": { + "x": -381.31634434950655, + "y": 415.1034273614637 + }, + "data": { + "id": "startAgentflow_0", + "label": "Start", + "version": 1.1, + "name": "startAgentflow", + "type": "Start", + "color": "#7EE787", + "hideInput": true, + "baseClasses": [ + "Start" + ], + "category": "Agent Flows", + "description": "Starting point of the agentflow", + "inputParams": [ + { + "label": "Input Type", + "name": "startInputType", + "type": "options", + "options": [ + { + "label": "Chat Input", + "name": "chatInput", + "description": "Start the conversation with chat input" + }, + { + "label": "Form Input", + "name": "formInput", + "description": "Start the workflow with form inputs" + } + ], + "default": "chatInput", + "id": "startAgentflow_0-input-startInputType-options", + "display": true + }, + { + "label": "Form Title", + "name": "formTitle", + "type": "string", + "placeholder": "Please Fill Out The Form", + "show": { + "startInputType": "formInput" + }, + "id": "startAgentflow_0-input-formTitle-string", + "display": false + }, + { + "label": "Form Description", + "name": "formDescription", + "type": "string", + "placeholder": "Complete all fields below to continue", + "show": { + "startInputType": "formInput" + }, + "id": "startAgentflow_0-input-formDescription-string", + "display": false + }, + { + "label": "Form Input Types", + "name": "formInputTypes", + "description": "Specify the type of form input", + "type": "array", + "show": { + "startInputType": "formInput" + }, + "array": [ + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + }, + { + "label": "Options", + "name": "options" + } + ], + "default": "string" + }, + { + "label": "Label", + "name": "label", + "type": "string", + "placeholder": "Label for the input" + }, + { + "label": "Variable Name", + "name": "name", + "type": "string", + "placeholder": "Variable name for the input (must be camel case)", + "description": "Variable name must be camel case. For example: firstName, lastName, etc." + }, + { + "label": "Add Options", + "name": "addOptions", + "type": "array", + "show": { + "formInputTypes[$index].type": "options" + }, + "array": [ + { + "label": "Option", + "name": "option", + "type": "string" + } + ] + } + ], + "id": "startAgentflow_0-input-formInputTypes-array", + "display": false + }, + { + "label": "Ephemeral Memory", + "name": "startEphemeralMemory", + "type": "boolean", + "description": "Start fresh for every execution without past chat history", + "optional": true + }, + { + "label": "Flow State", + "name": "startState", + "description": "Runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "string", + "placeholder": "Foo" + }, + { + "label": "Value", + "name": "value", + "type": "string", + "placeholder": "Bar" + } + ], + "id": "startAgentflow_0-input-startState-array", + "display": true + }, + { + "label": "Persist State", + "name": "startPersistState", + "type": "boolean", + "description": "Persist the state in the same session", + "optional": true, + "id": "startAgentflow_0-input-startPersistState-boolean", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "startInputType": "chatInput", + "formTitle": "", + "formDescription": "", + "formInputTypes": "", + "startState": [ + { + "key": "next", + "value": "None" + }, + { + "key": "instruction", + "value": "None" + }, + { + "key": "answers", + "value": " " + } + ] + }, + "outputAnchors": [ + { + "id": "startAgentflow_0-output-startAgentflow", + "label": "Start", + "name": "startAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "width": 103, + "height": 66, + "selected": false, + "positionAbsolute": { + "x": -381.31634434950655, + "y": 415.1034273614637 + }, + "dragging": false + }, + { + "id": "conditionAgentflow_0", + "position": { + "x": -13.482253879966805, + "y": 313.4517182061321 + }, + "data": { + "id": "conditionAgentflow_0", + "label": "Check next worker", + "version": 1, + "name": "conditionAgentflow", + "type": "Condition", + "color": "#FFB938", + "baseClasses": [ + "Condition" + ], + "category": "Agent Flows", + "description": "Split flows based on If Else conditions", + "inputParams": [ + { + "label": "Conditions", + "name": "conditions", + "type": "array", + "description": "Values to compare", + "acceptVariable": true, + "default": [ + { + "type": "string", + "value1": "

{{ $flow.state.next }}

", + "operation": "equal", + "value2": "

SOFTWARE

" + } + ], + "array": [ + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + } + ], + "default": "string" + }, + { + "label": "Value 1", + "name": "value1", + "type": "string", + "default": "", + "description": "First value to be compared with", + "acceptVariable": true, + "show": { + "conditions[$index].type": "string" + } + }, + { + "label": "Operation", + "name": "operation", + "type": "options", + "options": [ + { + "label": "Contains", + "name": "contains" + }, + { + "label": "Ends With", + "name": "endsWith" + }, + { + "label": "Equal", + "name": "equal" + }, + { + "label": "Not Contains", + "name": "notContains" + }, + { + "label": "Not Equal", + "name": "notEqual" + }, + { + "label": "Regex", + "name": "regex" + }, + { + "label": "Starts With", + "name": "startsWith" + }, + { + "label": "Is Empty", + "name": "isEmpty" + }, + { + "label": "Not Empty", + "name": "notEmpty" + } + ], + "default": "equal", + "description": "Type of operation", + "show": { + "conditions[$index].type": "string" + } + }, + { + "label": "Value 2", + "name": "value2", + "type": "string", + "default": "", + "description": "Second value to be compared with", + "acceptVariable": true, + "show": { + "conditions[$index].type": "string" + }, + "hide": { + "conditions[$index].operation": [ + "isEmpty", + "notEmpty" + ] + } + }, + { + "label": "Value 1", + "name": "value1", + "type": "number", + "default": "", + "description": "First value to be compared with", + "acceptVariable": true, + "show": { + "conditions[$index].type": "number" + } + }, + { + "label": "Operation", + "name": "operation", + "type": "options", + "options": [ + { + "label": "Smaller", + "name": "smaller" + }, + { + "label": "Smaller Equal", + "name": "smallerEqual" + }, + { + "label": "Equal", + "name": "equal" + }, + { + "label": "Not Equal", + "name": "notEqual" + }, + { + "label": "Larger", + "name": "larger" + }, + { + "label": "Larger Equal", + "name": "largerEqual" + }, + { + "label": "Is Empty", + "name": "isEmpty" + }, + { + "label": "Not Empty", + "name": "notEmpty" + } + ], + "default": "equal", + "description": "Type of operation", + "show": { + "conditions[$index].type": "number" + } + }, + { + "label": "Value 2", + "name": "value2", + "type": "number", + "default": 0, + "description": "Second value to be compared with", + "acceptVariable": true, + "show": { + "conditions[$index].type": "number" + } + }, + { + "label": "Value 1", + "name": "value1", + "type": "boolean", + "default": false, + "description": "First value to be compared with", + "show": { + "conditions[$index].type": "boolean" + } + }, + { + "label": "Operation", + "name": "operation", + "type": "options", + "options": [ + { + "label": "Equal", + "name": "equal" + }, + { + "label": "Not Equal", + "name": "notEqual" + } + ], + "default": "equal", + "description": "Type of operation", + "show": { + "conditions[$index].type": "boolean" + } + }, + { + "label": "Value 2", + "name": "value2", + "type": "boolean", + "default": false, + "description": "Second value to be compared with", + "show": { + "conditions[$index].type": "boolean" + } + } + ], + "id": "conditionAgentflow_0-input-conditions-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "conditions": [ + { + "type": "string", + "value1": "

{{ $flow.state.next }}

", + "operation": "equal", + "value2": "

MetaData_Extractor

" + }, + { + "type": "string", + "value1": "

{{ $flow.state.next }}

", + "operation": "equal", + "value2": "

Data_Cleaning

" + }, + { + "type": "string", + "value1": "

{{ $flow.state.next }}

", + "operation": "equal", + "value2": "

Data_Exploration

" + }, + { + "type": "string", + "value1": "

{{ $flow.state.next }}

", + "operation": "equal", + "value2": "

Data_Preparation

" + }, + { + "type": "string", + "value1": "

{{ $flow.state.next }}

", + "operation": "equal", + "value2": "

Model_Training

" + }, + { + "type": "string", + "value1": "

{{ $flow.state.next }}

", + "operation": "equal", + "value2": "

Model_Inference

" + }, + { + "type": "string", + "value1": "

{{ $flow.state.next }}

", + "operation": "equal", + "value2": "

Model_Evaluation

" + }, + { + "type": "string", + "value1": "

{{ $flow.state.next }}

", + "operation": "equal", + "value2": "

Text_Analytics

" + }, + { + "type": "string", + "value1": "

{{ $flow.state.next }}

", + "operation": "equal", + "value2": "

Association_Analysis

" + }, + { + "type": "string", + "value1": "

{{ $flow.state.next }}

", + "operation": "equal", + "value2": "

General

" + } + ] + }, + "outputAnchors": [ + { + "id": "conditionAgentflow_0-output-0", + "label": 0, + "name": 0, + "description": "Condition 0" + }, + { + "id": "conditionAgentflow_0-output-1", + "label": 1, + "name": 1, + "description": "Condition 1" + }, + { + "id": "conditionAgentflow_0-output-2", + "label": 2, + "name": 2, + "description": "Condition 2" + }, + { + "id": "conditionAgentflow_0-output-3", + "label": 3, + "name": 3, + "description": "Condition 3" + }, + { + "id": "conditionAgentflow_0-output-4", + "label": 4, + "name": 4, + "description": "Condition 4" + }, + { + "id": "conditionAgentflow_0-output-5", + "label": 5, + "name": 5, + "description": "Condition 5" + }, + { + "id": "conditionAgentflow_0-output-6", + "label": 6, + "name": 6, + "description": "Condition 6" + }, + { + "id": "conditionAgentflow_0-output-7", + "label": 7, + "name": 7, + "description": "Condition 7" + }, + { + "id": "conditionAgentflow_0-output-8", + "label": 8, + "name": 8, + "description": "Condition 8" + }, + { + "id": "conditionAgentflow_0-output-9", + "label": 9, + "name": 9, + "description": "Condition 9" + }, + { + "id": "conditionAgentflow_0-output-10", + "label": 10, + "name": 10, + "description": "Else" + } + ], + "outputs": { + "conditionAgentflow": "" + }, + "selected": false + }, + "type": "agentFlow", + "width": 194, + "height": 260, + "selected": false, + "positionAbsolute": { + "x": -13.482253879966805, + "y": 313.4517182061321 + }, + "dragging": false + }, + { + "id": "agentAgentflow_1", + "position": { + "x": 628.5977440272089, + "y": -183.86413884340638 + }, + "data": { + "id": "agentAgentflow_1", + "label": "Data Cleaning Agent", + "version": 2.2, + "name": "agentAgentflow", + "type": "Agent", + "color": "#4DD0E1", + "baseClasses": [ + "Agent" + ], + "category": "Agent Flows", + "description": "Dynamically choose and utilize tools during runtime, enabling multi-step reasoning", + "inputParams": [ + { + "label": "Model", + "name": "agentModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "agentAgentflow_1-input-agentModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "agentMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "agentAgentflow_1-input-agentMessages-array", + "display": true + }, + { + "label": "OpenAI Built-in Tools", + "name": "agentToolsBuiltInOpenAI", + "type": "multiOptions", + "optional": true, + "options": [ + { + "label": "Web Search", + "name": "web_search_preview", + "description": "Search the web for the latest information" + }, + { + "label": "Code Interpreter", + "name": "code_interpreter", + "description": "Write and run Python code in a sandboxed environment" + }, + { + "label": "Image Generation", + "name": "image_generation", + "description": "Generate images based on a text prompt" + } + ], + "show": { + "agentModel": "chatOpenAI" + }, + "id": "agentAgentflow_1-input-agentToolsBuiltInOpenAI-multiOptions", + "display": false + }, + { + "label": "Gemini Built-in Tools", + "name": "agentToolsBuiltInGemini", + "type": "multiOptions", + "optional": true, + "options": [ + { + "label": "URL Context", + "name": "urlContext", + "description": "Extract content from given URLs" + }, + { + "label": "Google Search", + "name": "googleSearch", + "description": "Search real-time web content" + } + ], + "show": { + "agentModel": "chatGoogleGenerativeAI" + }, + "id": "agentAgentflow_1-input-agentToolsBuiltInGemini-multiOptions", + "display": false + }, + { + "label": "Anthropic Built-in Tools", + "name": "agentToolsBuiltInAnthropic", + "type": "multiOptions", + "optional": true, + "options": [ + { + "label": "Web Search", + "name": "web_search_20250305", + "description": "Search the web for the latest information" + }, + { + "label": "Web Fetch", + "name": "web_fetch_20250910", + "description": "Retrieve full content from specified web pages" + } + ], + "show": { + "agentModel": "chatAnthropic" + }, + "id": "agentAgentflow_1-input-agentToolsBuiltInAnthropic-multiOptions", + "display": false + }, + { + "label": "Tools", + "name": "agentTools", + "type": "array", + "optional": true, + "array": [ + { + "label": "Tool", + "name": "agentSelectedTool", + "type": "asyncOptions", + "loadMethod": "listTools", + "loadConfig": true + }, + { + "label": "Require Human Input", + "name": "agentSelectedToolRequiresHumanInput", + "type": "boolean", + "optional": true + } + ], + "id": "agentAgentflow_1-input-agentTools-array", + "display": true + }, + { + "label": "Knowledge (Document Stores)", + "name": "agentKnowledgeDocumentStores", + "type": "array", + "description": "Give your agent context about different document sources. Document stores must be upserted in advance.", + "array": [ + { + "label": "Document Store", + "name": "documentStore", + "type": "asyncOptions", + "loadMethod": "listStores" + }, + { + "label": "Describe Knowledge", + "name": "docStoreDescription", + "type": "string", + "generateDocStoreDescription": true, + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_1-input-agentKnowledgeDocumentStores-array", + "display": true + }, + { + "label": "Knowledge (Vector Embeddings)", + "name": "agentKnowledgeVSEmbeddings", + "type": "array", + "description": "Give your agent context about different document sources from existing vector stores and embeddings", + "array": [ + { + "label": "Vector Store", + "name": "vectorStore", + "type": "asyncOptions", + "loadMethod": "listVectorStores", + "loadConfig": true + }, + { + "label": "Embedding Model", + "name": "embeddingModel", + "type": "asyncOptions", + "loadMethod": "listEmbeddings", + "loadConfig": true + }, + { + "label": "Knowledge Name", + "name": "knowledgeName", + "type": "string", + "placeholder": "A short name for the knowledge base, this is useful for the AI to know when and how to search for correct information" + }, + { + "label": "Describe Knowledge", + "name": "knowledgeDescription", + "type": "string", + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_1-input-agentKnowledgeVSEmbeddings-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "agentEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "agentAgentflow_1-input-agentEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "agentMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_1-input-agentMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "agentMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "agentMemoryType": "windowSize" + }, + "id": "agentAgentflow_1-input-agentMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "agentMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "agentMemoryType": "conversationSummaryBuffer" + }, + "id": "agentAgentflow_1-input-agentMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "agentUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_1-input-agentUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "agentReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "agentAgentflow_1-input-agentReturnResponseAs-options", + "display": true + }, + { + "label": "Update Flow State", + "name": "agentUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "agentAgentflow_1-input-agentUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "agentModel": "awsChatBedrock", + "agentMessages": [ + { + "role": "system", + "content": "

You are a Data Cleaning Worker Agent responsible for preparing datasets for analysis by identifying and correcting data quality issues. Your tasks include handling outliers, managing missing values, and preprocessing columns using the provided MCP functions. Use supervisor instruction {{ $flow.state.instruction }} to perform task.

Your responsibilities include:

🟠 Handling Outliers

🔵 Handling Missing Values

🟢 Manipulating / Preprocessing Data

You should:

Be precise, efficient, and consistent in your cleaning operations to ensure high-quality data for modeling and analysis.

" + } + ], + "agentToolsBuiltInOpenAI": "", + "agentToolsBuiltInGemini": "", + "agentToolsBuiltInAnthropic": "", + "agentTools": [ + { + "agentSelectedTool": "customMCP", + "agentSelectedToolRequiresHumanInput": false, + "agentSelectedToolConfig": { + "mcpServerConfig": "{\n \"url\": \"http://teradata-mcp-server:8001/mcp\"\n}", + "mcpActions": "[\"tdml_ConvertTo\",\"tdml_GetFutileColumns\",\"tdml_GetRowsWithMissingValues\",\"tdml_GetRowsWithoutMissingValues\",\"tdml_OutlierFilterFit\",\"tdml_OutlierFilterTransform\",\"tdml_Pack\",\"tdml_SimpleImputeFit\",\"tdml_SimpleImputeTransform\",\"tdml_StringSimilarity\",\"tdml_Unpack\",\"base_tableDDL\"]", + "agentSelectedTool": "customMCP" + } + } + ], + "agentKnowledgeDocumentStores": "", + "agentKnowledgeVSEmbeddings": "", + "agentEnableMemory": true, + "agentMemoryType": "windowSize", + "agentMemoryWindowSize": "20", + "agentMemoryMaxTokenLimit": "2000", + "agentUserMessage": "

{{ $flow.state.instruction }}

", + "agentReturnResponseAs": "assistantMessage", + "agentUpdateState": "", + "agentModelConfig": { + "credential": "", + "region": "us-east-1", + "model": "anthropic.claude-3-haiku-20240307-v1:0", + "customModel": "", + "streaming": true, + "temperature": 0.7, + "max_tokens_to_sample": "2000", + "allowImageUploads": "", + "latencyOptimized": "", + "agentModel": "awsChatBedrock" + } + }, + "outputAnchors": [ + { + "id": "agentAgentflow_1-output-agentAgentflow", + "label": "Agent", + "name": "agentAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 332, + "height": 100, + "selected": false, + "positionAbsolute": { + "x": 628.5977440272089, + "y": -183.86413884340638 + }, + "dragging": false + }, + { + "id": "agentAgentflow_2", + "position": { + "x": 581.3832108246423, + "y": -68.7606667093819 + }, + "data": { + "id": "agentAgentflow_2", + "label": "Data Exploration Agent", + "version": 2.2, + "name": "agentAgentflow", + "type": "Agent", + "color": "#4DD0E1", + "baseClasses": [ + "Agent" + ], + "category": "Agent Flows", + "description": "Dynamically choose and utilize tools during runtime, enabling multi-step reasoning", + "inputParams": [ + { + "label": "Model", + "name": "agentModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "agentAgentflow_2-input-agentModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "agentMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "agentAgentflow_2-input-agentMessages-array", + "display": true + }, + { + "label": "OpenAI Built-in Tools", + "name": "agentToolsBuiltInOpenAI", + "type": "multiOptions", + "optional": true, + "options": [ + { + "label": "Web Search", + "name": "web_search_preview", + "description": "Search the web for the latest information" + }, + { + "label": "Code Interpreter", + "name": "code_interpreter", + "description": "Write and run Python code in a sandboxed environment" + }, + { + "label": "Image Generation", + "name": "image_generation", + "description": "Generate images based on a text prompt" + } + ], + "show": { + "agentModel": "chatOpenAI" + }, + "id": "agentAgentflow_2-input-agentToolsBuiltInOpenAI-multiOptions", + "display": false + }, + { + "label": "Gemini Built-in Tools", + "name": "agentToolsBuiltInGemini", + "type": "multiOptions", + "optional": true, + "options": [ + { + "label": "URL Context", + "name": "urlContext", + "description": "Extract content from given URLs" + }, + { + "label": "Google Search", + "name": "googleSearch", + "description": "Search real-time web content" + } + ], + "show": { + "agentModel": "chatGoogleGenerativeAI" + }, + "id": "agentAgentflow_2-input-agentToolsBuiltInGemini-multiOptions", + "display": false + }, + { + "label": "Anthropic Built-in Tools", + "name": "agentToolsBuiltInAnthropic", + "type": "multiOptions", + "optional": true, + "options": [ + { + "label": "Web Search", + "name": "web_search_20250305", + "description": "Search the web for the latest information" + }, + { + "label": "Web Fetch", + "name": "web_fetch_20250910", + "description": "Retrieve full content from specified web pages" + } + ], + "show": { + "agentModel": "chatAnthropic" + }, + "id": "agentAgentflow_2-input-agentToolsBuiltInAnthropic-multiOptions", + "display": false + }, + { + "label": "Tools", + "name": "agentTools", + "type": "array", + "optional": true, + "array": [ + { + "label": "Tool", + "name": "agentSelectedTool", + "type": "asyncOptions", + "loadMethod": "listTools", + "loadConfig": true + }, + { + "label": "Require Human Input", + "name": "agentSelectedToolRequiresHumanInput", + "type": "boolean", + "optional": true + } + ], + "id": "agentAgentflow_2-input-agentTools-array", + "display": true + }, + { + "label": "Knowledge (Document Stores)", + "name": "agentKnowledgeDocumentStores", + "type": "array", + "description": "Give your agent context about different document sources. Document stores must be upserted in advance.", + "array": [ + { + "label": "Document Store", + "name": "documentStore", + "type": "asyncOptions", + "loadMethod": "listStores" + }, + { + "label": "Describe Knowledge", + "name": "docStoreDescription", + "type": "string", + "generateDocStoreDescription": true, + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_2-input-agentKnowledgeDocumentStores-array", + "display": true + }, + { + "label": "Knowledge (Vector Embeddings)", + "name": "agentKnowledgeVSEmbeddings", + "type": "array", + "description": "Give your agent context about different document sources from existing vector stores and embeddings", + "array": [ + { + "label": "Vector Store", + "name": "vectorStore", + "type": "asyncOptions", + "loadMethod": "listVectorStores", + "loadConfig": true + }, + { + "label": "Embedding Model", + "name": "embeddingModel", + "type": "asyncOptions", + "loadMethod": "listEmbeddings", + "loadConfig": true + }, + { + "label": "Knowledge Name", + "name": "knowledgeName", + "type": "string", + "placeholder": "A short name for the knowledge base, this is useful for the AI to know when and how to search for correct information" + }, + { + "label": "Describe Knowledge", + "name": "knowledgeDescription", + "type": "string", + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_2-input-agentKnowledgeVSEmbeddings-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "agentEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "agentAgentflow_2-input-agentEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "agentMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_2-input-agentMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "agentMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "agentMemoryType": "windowSize" + }, + "id": "agentAgentflow_2-input-agentMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "agentMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "agentMemoryType": "conversationSummaryBuffer" + }, + "id": "agentAgentflow_2-input-agentMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "agentUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_2-input-agentUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "agentReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "agentAgentflow_2-input-agentReturnResponseAs-options", + "display": true + }, + { + "label": "Update Flow State", + "name": "agentUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "agentAgentflow_2-input-agentUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "agentModel": "awsChatBedrock", + "agentMessages": [ + { + "role": "system", + "content": "

You are a Data Exploration Worker Agent tasked with exploration of datasets using the provided MCP tools.

You have access to the following functions:

" + } + ], + "agentToolsBuiltInOpenAI": "", + "agentToolsBuiltInGemini": "", + "agentToolsBuiltInAnthropic": "", + "agentTools": [ + { + "agentSelectedTool": "customMCP", + "agentSelectedToolRequiresHumanInput": false, + "agentSelectedToolConfig": { + "mcpServerConfig": "{\n \"url\": \"http://teradata-mcp-server:8001/mcp\"\n}", + "mcpActions": "[\"tdml_CategoricalSummary\",\"tdml_ColumnSummary\",\"tdml_Histogram\",\"tdml_MovingAverage\",\"tdml_WhichMax\",\"tdml_WhichMin\",\"base_tableDDL\"]", + "agentSelectedTool": "customMCP" + } + } + ], + "agentKnowledgeDocumentStores": "", + "agentKnowledgeVSEmbeddings": "", + "agentEnableMemory": true, + "agentMemoryType": "windowSize", + "agentMemoryWindowSize": "20", + "agentMemoryMaxTokenLimit": "2000", + "agentUserMessage": "

{{ $flow.state.instruction }}

", + "agentReturnResponseAs": "assistantMessage", + "agentUpdateState": "", + "agentModelConfig": { + "credential": "", + "region": "us-east-1", + "model": "anthropic.claude-3-haiku-20240307-v1:0", + "customModel": "", + "streaming": true, + "temperature": 0.7, + "max_tokens_to_sample": "4000", + "allowImageUploads": "", + "latencyOptimized": "", + "agentModel": "awsChatBedrock" + } + }, + "outputAnchors": [ + { + "id": "agentAgentflow_2-output-agentAgentflow", + "label": "Agent", + "name": "agentAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 332, + "height": 100, + "selected": false, + "positionAbsolute": { + "x": 581.3832108246423, + "y": -68.7606667093819 + }, + "dragging": false + }, + { + "id": "agentAgentflow_3", + "position": { + "x": 515.2354654611594, + "y": 896.0368661165589 + }, + "data": { + "id": "agentAgentflow_3", + "label": "Generate Final Answer", + "version": 2.2, + "name": "agentAgentflow", + "type": "Agent", + "color": "#4DD0E1", + "baseClasses": [ + "Agent" + ], + "category": "Agent Flows", + "description": "Dynamically choose and utilize tools during runtime, enabling multi-step reasoning", + "inputParams": [ + { + "label": "Model", + "name": "agentModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "agentAgentflow_3-input-agentModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "agentMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "agentAgentflow_3-input-agentMessages-array", + "display": true + }, + { + "label": "OpenAI Built-in Tools", + "name": "agentToolsBuiltInOpenAI", + "type": "multiOptions", + "optional": true, + "options": [ + { + "label": "Web Search", + "name": "web_search_preview", + "description": "Search the web for the latest information" + }, + { + "label": "Code Interpreter", + "name": "code_interpreter", + "description": "Write and run Python code in a sandboxed environment" + }, + { + "label": "Image Generation", + "name": "image_generation", + "description": "Generate images based on a text prompt" + } + ], + "show": { + "agentModel": "chatOpenAI" + }, + "id": "agentAgentflow_3-input-agentToolsBuiltInOpenAI-multiOptions", + "display": false + }, + { + "label": "Gemini Built-in Tools", + "name": "agentToolsBuiltInGemini", + "type": "multiOptions", + "optional": true, + "options": [ + { + "label": "URL Context", + "name": "urlContext", + "description": "Extract content from given URLs" + }, + { + "label": "Google Search", + "name": "googleSearch", + "description": "Search real-time web content" + } + ], + "show": { + "agentModel": "chatGoogleGenerativeAI" + }, + "id": "agentAgentflow_3-input-agentToolsBuiltInGemini-multiOptions", + "display": false + }, + { + "label": "Anthropic Built-in Tools", + "name": "agentToolsBuiltInAnthropic", + "type": "multiOptions", + "optional": true, + "options": [ + { + "label": "Web Search", + "name": "web_search_20250305", + "description": "Search the web for the latest information" + }, + { + "label": "Web Fetch", + "name": "web_fetch_20250910", + "description": "Retrieve full content from specified web pages" + } + ], + "show": { + "agentModel": "chatAnthropic" + }, + "id": "agentAgentflow_3-input-agentToolsBuiltInAnthropic-multiOptions", + "display": false + }, + { + "label": "Tools", + "name": "agentTools", + "type": "array", + "optional": true, + "array": [ + { + "label": "Tool", + "name": "agentSelectedTool", + "type": "asyncOptions", + "loadMethod": "listTools", + "loadConfig": true + }, + { + "label": "Require Human Input", + "name": "agentSelectedToolRequiresHumanInput", + "type": "boolean", + "optional": true + } + ], + "id": "agentAgentflow_3-input-agentTools-array", + "display": true + }, + { + "label": "Knowledge (Document Stores)", + "name": "agentKnowledgeDocumentStores", + "type": "array", + "description": "Give your agent context about different document sources. Document stores must be upserted in advance.", + "array": [ + { + "label": "Document Store", + "name": "documentStore", + "type": "asyncOptions", + "loadMethod": "listStores" + }, + { + "label": "Describe Knowledge", + "name": "docStoreDescription", + "type": "string", + "generateDocStoreDescription": true, + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_3-input-agentKnowledgeDocumentStores-array", + "display": true + }, + { + "label": "Knowledge (Vector Embeddings)", + "name": "agentKnowledgeVSEmbeddings", + "type": "array", + "description": "Give your agent context about different document sources from existing vector stores and embeddings", + "array": [ + { + "label": "Vector Store", + "name": "vectorStore", + "type": "asyncOptions", + "loadMethod": "listVectorStores", + "loadConfig": true + }, + { + "label": "Embedding Model", + "name": "embeddingModel", + "type": "asyncOptions", + "loadMethod": "listEmbeddings", + "loadConfig": true + }, + { + "label": "Knowledge Name", + "name": "knowledgeName", + "type": "string", + "placeholder": "A short name for the knowledge base, this is useful for the AI to know when and how to search for correct information" + }, + { + "label": "Describe Knowledge", + "name": "knowledgeDescription", + "type": "string", + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_3-input-agentKnowledgeVSEmbeddings-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "agentEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "agentAgentflow_3-input-agentEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "agentMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_3-input-agentMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "agentMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "agentMemoryType": "windowSize" + }, + "id": "agentAgentflow_3-input-agentMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "agentMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "agentMemoryType": "conversationSummaryBuffer" + }, + "id": "agentAgentflow_3-input-agentMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "agentUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_3-input-agentUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "agentReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "agentAgentflow_3-input-agentReturnResponseAs-options", + "display": true + }, + { + "label": "Update Flow State", + "name": "agentUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "agentAgentflow_3-input-agentUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "agentModel": "azureChatOpenAI", + "agentMessages": [], + "agentToolsBuiltInOpenAI": "", + "agentToolsBuiltInGemini": "", + "agentToolsBuiltInAnthropic": "", + "agentTools": [], + "agentKnowledgeDocumentStores": "", + "agentKnowledgeVSEmbeddings": "", + "agentEnableMemory": true, + "agentMemoryType": "windowSize", + "agentMemoryWindowSize": "5", + "agentMemoryMaxTokenLimit": "2000", + "agentUserMessage": "

generate output for user question : {{ question }} using answers : {{ $flow.state.answers }}

", + "agentReturnResponseAs": "userMessage", + "agentUpdateState": "", + "agentModelConfig": { + "credential": "", + "modelName": "gpt-4.1-mini", + "temperature": 0.9, + "maxTokens": "", + "streaming": true, + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "basepath": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low", + "reasoning": "", + "agentModel": "azureChatOpenAI" + } + }, + "outputAnchors": [ + { + "id": "agentAgentflow_3-output-agentAgentflow", + "label": "Agent", + "name": "agentAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 219, + "height": 72, + "selected": false, + "positionAbsolute": { + "x": 515.2354654611594, + "y": 896.0368661165589 + }, + "dragging": false + }, + { + "id": "llmAgentflow_0", + "position": { + "x": -211.54573437337677, + "y": 414.78479771131543 + }, + "data": { + "id": "llmAgentflow_0", + "label": "Supervisor", + "version": 1, + "name": "llmAgentflow", + "type": "LLM", + "color": "#64B5F6", + "baseClasses": [ + "LLM" + ], + "category": "Agent Flows", + "description": "Large language models to analyze user-provided inputs and generate responses", + "inputParams": [ + { + "label": "Model", + "name": "llmModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "llmAgentflow_0-input-llmModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "llmMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "llmAgentflow_0-input-llmMessages-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "llmEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "llmAgentflow_0-input-llmEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "llmMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_0-input-llmMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "llmMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "llmMemoryType": "windowSize" + }, + "id": "llmAgentflow_0-input-llmMemoryWindowSize-number", + "display": true + }, + { + "label": "Max Token Limit", + "name": "llmMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "llmMemoryType": "conversationSummaryBuffer" + }, + "id": "llmAgentflow_0-input-llmMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "llmUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_0-input-llmUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "llmReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "llmAgentflow_0-input-llmReturnResponseAs-options", + "display": true + }, + { + "label": "JSON Structured Output", + "name": "llmStructuredOutput", + "description": "Instruct the LLM to give output in a JSON structured schema", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "string" + }, + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "String Array", + "name": "stringArray" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + }, + { + "label": "Enum", + "name": "enum" + }, + { + "label": "JSON Array", + "name": "jsonArray" + } + ] + }, + { + "label": "Enum Values", + "name": "enumValues", + "type": "string", + "placeholder": "value1, value2, value3", + "description": "Enum values. Separated by comma", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "enum" + } + }, + { + "label": "JSON Schema", + "name": "jsonSchema", + "type": "code", + "placeholder": "{\n \"answer\": {\n \"type\": \"string\",\n \"description\": \"Value of the answer\"\n },\n \"reason\": {\n \"type\": \"string\",\n \"description\": \"Reason for the answer\"\n },\n \"optional\": {\n \"type\": \"boolean\"\n },\n \"count\": {\n \"type\": \"number\"\n },\n \"children\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"value\": {\n \"type\": \"string\",\n \"description\": \"Value of the children's answer\"\n }\n }\n }\n }\n}", + "description": "JSON schema for the structured output", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "jsonArray" + } + }, + { + "label": "Description", + "name": "description", + "type": "string", + "placeholder": "Description of the key" + } + ], + "id": "llmAgentflow_0-input-llmStructuredOutput-array", + "display": true + }, + { + "label": "Update Flow State", + "name": "llmUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "llmAgentflow_0-input-llmUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "llmModel": "azureChatOpenAI", + "llmMessages": [ + { + "role": "system", + "content": "

You are a Data Science Supervisor Agent responsible for coordinating and overseeing a team of specialized agents in a data science pipeline. Your team includes agents for Data Cleaning, Data Exploration, Data Preparation, Model Training, Model Inference, Model Evaluation, Text Analytics, Path Analysis, and Association Analysis.

🛠️ Available Worker Agents:

  1. MetaData_Extractor – Extracts relevant tables, and columns. Use first if metadata is missing.

  2. Data_Cleaning – Handles outliers, missing values, and preprocessing. e.g. outlier detection etc

  3. Data_Exploration – Provides insights into dataset columns.

  4. Data_Preparation – Transforms and prepares data for modeling or analysis. For e.g target encoding fit and transform

  5. Model_Training – Fit machine learning models. Like kmeans , xgboost etc

  6. Model_Inference – Makes predictions using trained models.

  7. Model_Evaluation – Assesses model performance.

  8. Text_Analytics – Extracts insights from textual data.

  9. Path_Analysis – Performs path analysis on datasets.

  10. Association_Analysis – Conducts association analysis (e.g., Apriori, CFilter) for transactional insights.

🧠 Planning Steps for Task Execution:

  1. Understand the User Query

    • Determine if the query is generic (e.g., greetings, general questions) → respond with \"General\".

    • Otherwise, proceed to check Metadata Availability.

  2. Check for Metadata Availability

    • If column information is not known, invoke MetaData_Extractor first.

    • Prompt the user for the database name if not provided.

  3. Define the Workflow Path

    • Based on the query, identify which of the following task categories apply:

      • Data Cleaning

      • Data Exploration

      • Data Preparation

      • Model Training

      • Model Inference

      • Model Evaluation

      • Text Analytics

      • Path Analysis

      • Association Analysis

  4. Select the Appropriate Worker

    • Choose the most relevant worker to begin the task.

    • Ensure the selection minimizes the number of steps required to complete the workflow.

  5. Monitor Worker Responses

    • Analyze result of worker.

    • If a worker encounters an error or lacks required input, handle gracefully and prompt the user for missing information. And return \"FINISH\"

  6. Iterate and Delegate

    • Based on the output of each worker, determine the next logical step.

    • Continue delegating tasks until the workflow is complete.

  7. Complete the Workflow

    • Once all necessary tasks are completed, respond with \"FINISH\" and send the complete answer for user query : {{ question }} .

    NOTE : Restrict yourself to user question. Do not perform any additional step.

" + } + ], + "llmEnableMemory": true, + "llmMemoryType": "windowSize", + "llmUserMessage": "

Given the conversation above, who should act next? Or should we FINISH? Select one of: Data_Cleaning , Data_Exploration , MetaData_Extractor, Data_Preparation, Model_Training, Model_Inference, Model_Evaluation, Text_Analytics, Path_Analysis, Association_Analysis, General, FINISH

", + "llmReturnResponseAs": "userMessage", + "llmStructuredOutput": [ + { + "key": "next", + "type": "enum", + "enumValues": "FINISH, Data_Cleaning , Data_Exploration , MetaData_Extractor, Data_Preparation, Model_Training, Model_Inference, Model_Evaluation, Text_Analytics, Path_Analysis, Association_Analysis, General", + "jsonSchema": "", + "description": "next worker to act" + }, + { + "key": "instructions", + "type": "string", + "enumValues": "", + "jsonSchema": "", + "description": "The specific instructions of the sub-task the next worker should accomplish." + }, + { + "key": "reasoning", + "type": "string", + "enumValues": "", + "jsonSchema": "", + "description": "The reason why next worker is tasked to do the job" + } + ], + "llmUpdateState": [ + { + "key": "next", + "value": "

{{ output.next }}

" + }, + { + "key": "instruction", + "value": "

{{ output.instructions }}

" + } + ], + "llmModelConfig": { + "credential": "", + "modelName": "gpt-4.1-mini", + "temperature": "0.7", + "maxTokens": "", + "streaming": true, + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "basepath": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low", + "reasoning": "", + "llmModel": "azureChatOpenAI" + }, + "llmMemoryWindowSize": "50" + }, + "outputAnchors": [ + { + "id": "llmAgentflow_0-output-llmAgentflow", + "label": "LLM", + "name": "llmAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 175, + "height": 72, + "selected": false, + "positionAbsolute": { + "x": -211.54573437337677, + "y": 414.78479771131543 + }, + "dragging": false + }, + { + "id": "agentAgentflow_0", + "position": { + "x": 659.3207729606347, + "y": -296.11076864656417 + }, + "data": { + "id": "agentAgentflow_0", + "label": "MetaData_Extractor", + "version": 2.2, + "name": "agentAgentflow", + "type": "Agent", + "color": "#4DD0E1", + "baseClasses": [ + "Agent" + ], + "category": "Agent Flows", + "description": "Dynamically choose and utilize tools during runtime, enabling multi-step reasoning", + "inputParams": [ + { + "label": "Model", + "name": "agentModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "agentAgentflow_0-input-agentModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "agentMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "agentAgentflow_0-input-agentMessages-array", + "display": true + }, + { + "label": "OpenAI Built-in Tools", + "name": "agentToolsBuiltInOpenAI", + "type": "multiOptions", + "optional": true, + "options": [ + { + "label": "Web Search", + "name": "web_search_preview", + "description": "Search the web for the latest information" + }, + { + "label": "Code Interpreter", + "name": "code_interpreter", + "description": "Write and run Python code in a sandboxed environment" + }, + { + "label": "Image Generation", + "name": "image_generation", + "description": "Generate images based on a text prompt" + } + ], + "show": { + "agentModel": "chatOpenAI" + }, + "id": "agentAgentflow_0-input-agentToolsBuiltInOpenAI-multiOptions", + "display": false + }, + { + "label": "Gemini Built-in Tools", + "name": "agentToolsBuiltInGemini", + "type": "multiOptions", + "optional": true, + "options": [ + { + "label": "URL Context", + "name": "urlContext", + "description": "Extract content from given URLs" + }, + { + "label": "Google Search", + "name": "googleSearch", + "description": "Search real-time web content" + } + ], + "show": { + "agentModel": "chatGoogleGenerativeAI" + }, + "id": "agentAgentflow_0-input-agentToolsBuiltInGemini-multiOptions", + "display": false + }, + { + "label": "Anthropic Built-in Tools", + "name": "agentToolsBuiltInAnthropic", + "type": "multiOptions", + "optional": true, + "options": [ + { + "label": "Web Search", + "name": "web_search_20250305", + "description": "Search the web for the latest information" + }, + { + "label": "Web Fetch", + "name": "web_fetch_20250910", + "description": "Retrieve full content from specified web pages" + } + ], + "show": { + "agentModel": "chatAnthropic" + }, + "id": "agentAgentflow_0-input-agentToolsBuiltInAnthropic-multiOptions", + "display": false + }, + { + "label": "Tools", + "name": "agentTools", + "type": "array", + "optional": true, + "array": [ + { + "label": "Tool", + "name": "agentSelectedTool", + "type": "asyncOptions", + "loadMethod": "listTools", + "loadConfig": true + }, + { + "label": "Require Human Input", + "name": "agentSelectedToolRequiresHumanInput", + "type": "boolean", + "optional": true + } + ], + "id": "agentAgentflow_0-input-agentTools-array", + "display": true + }, + { + "label": "Knowledge (Document Stores)", + "name": "agentKnowledgeDocumentStores", + "type": "array", + "description": "Give your agent context about different document sources. Document stores must be upserted in advance.", + "array": [ + { + "label": "Document Store", + "name": "documentStore", + "type": "asyncOptions", + "loadMethod": "listStores" + }, + { + "label": "Describe Knowledge", + "name": "docStoreDescription", + "type": "string", + "generateDocStoreDescription": true, + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_0-input-agentKnowledgeDocumentStores-array", + "display": true + }, + { + "label": "Knowledge (Vector Embeddings)", + "name": "agentKnowledgeVSEmbeddings", + "type": "array", + "description": "Give your agent context about different document sources from existing vector stores and embeddings", + "array": [ + { + "label": "Vector Store", + "name": "vectorStore", + "type": "asyncOptions", + "loadMethod": "listVectorStores", + "loadConfig": true + }, + { + "label": "Embedding Model", + "name": "embeddingModel", + "type": "asyncOptions", + "loadMethod": "listEmbeddings", + "loadConfig": true + }, + { + "label": "Knowledge Name", + "name": "knowledgeName", + "type": "string", + "placeholder": "A short name for the knowledge base, this is useful for the AI to know when and how to search for correct information" + }, + { + "label": "Describe Knowledge", + "name": "knowledgeDescription", + "type": "string", + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_0-input-agentKnowledgeVSEmbeddings-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "agentEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "agentAgentflow_0-input-agentEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "agentMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_0-input-agentMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "agentMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "agentMemoryType": "windowSize" + }, + "id": "agentAgentflow_0-input-agentMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "agentMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "agentMemoryType": "conversationSummaryBuffer" + }, + "id": "agentAgentflow_0-input-agentMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "agentUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_0-input-agentUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "agentReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "agentAgentflow_0-input-agentReturnResponseAs-options", + "display": true + }, + { + "label": "Update Flow State", + "name": "agentUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "agentAgentflow_0-input-agentUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "agentModel": "awsChatBedrock", + "agentMessages": [ + { + "role": "system", + "content": "

You are a Metadata Extractor Worker Agent responsible for retrieving column-level metadata from specified databases and tables using MCP tools.

" + } + ], + "agentToolsBuiltInOpenAI": "", + "agentToolsBuiltInGemini": "", + "agentToolsBuiltInAnthropic": "", + "agentTools": [ + { + "agentSelectedTool": "customMCP", + "agentSelectedToolRequiresHumanInput": false, + "agentSelectedToolConfig": { + "mcpServerConfig": "{\n \"url\": \"http://teradata-mcp-server:8001/mcp\"\n}", + "mcpActions": "[\"base_tableDDL\",\"base_tableList\"]", + "agentSelectedTool": "customMCP" + } + } + ], + "agentKnowledgeDocumentStores": "", + "agentKnowledgeVSEmbeddings": "", + "agentEnableMemory": true, + "agentMemoryType": "windowSize", + "agentMemoryWindowSize": "20", + "agentMemoryMaxTokenLimit": "2000", + "agentUserMessage": "

{{ $flow.state.instruction }}

", + "agentReturnResponseAs": "assistantMessage", + "agentUpdateState": "", + "agentModelConfig": { + "credential": "", + "region": "us-east-1", + "model": "anthropic.claude-3-haiku-20240307-v1:0", + "customModel": "", + "streaming": true, + "temperature": 0.7, + "max_tokens_to_sample": "2000", + "allowImageUploads": "", + "latencyOptimized": "", + "agentModel": "awsChatBedrock" + } + }, + "outputAnchors": [ + { + "id": "agentAgentflow_0-output-agentAgentflow", + "label": "Agent", + "name": "agentAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 332, + "height": 100, + "selected": false, + "positionAbsolute": { + "x": 659.3207729606347, + "y": -296.11076864656417 + }, + "dragging": false + }, + { + "id": "llmAgentflow_1", + "position": { + "x": 527.2688287061134, + "y": 795.073648768414 + }, + "data": { + "id": "llmAgentflow_1", + "label": "General Queries", + "version": 1, + "name": "llmAgentflow", + "type": "LLM", + "color": "#64B5F6", + "baseClasses": [ + "LLM" + ], + "category": "Agent Flows", + "description": "Large language models to analyze user-provided inputs and generate responses", + "inputParams": [ + { + "label": "Model", + "name": "llmModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "llmAgentflow_1-input-llmModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "llmMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "llmAgentflow_1-input-llmMessages-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "llmEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "llmAgentflow_1-input-llmEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "llmMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_1-input-llmMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "llmMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "llmMemoryType": "windowSize" + }, + "id": "llmAgentflow_1-input-llmMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "llmMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "llmMemoryType": "conversationSummaryBuffer" + }, + "id": "llmAgentflow_1-input-llmMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "llmUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "llmEnableMemory": true + }, + "id": "llmAgentflow_1-input-llmUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "llmReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "llmAgentflow_1-input-llmReturnResponseAs-options", + "display": true + }, + { + "label": "JSON Structured Output", + "name": "llmStructuredOutput", + "description": "Instruct the LLM to give output in a JSON structured schema", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "string" + }, + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "String Array", + "name": "stringArray" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + }, + { + "label": "Enum", + "name": "enum" + }, + { + "label": "JSON Array", + "name": "jsonArray" + } + ] + }, + { + "label": "Enum Values", + "name": "enumValues", + "type": "string", + "placeholder": "value1, value2, value3", + "description": "Enum values. Separated by comma", + "optional": true, + "show": { + "llmStructuredOutput[$index].type": "enum" + } + }, + { + "label": "JSON Schema", + "name": "jsonSchema", + "type": "code", + "placeholder": "{\n \"answer\": {\n \"type\": \"string\",\n \"description\": \"Value of the answer\"\n },\n \"reason\": {\n \"type\": \"string\",\n \"description\": \"Reason for the answer\"\n },\n \"optional\": {\n \"type\": \"boolean\"\n },\n \"count\": {\n \"type\": \"number\"\n },\n \"children\": {\n \"type\": \"array\",\n \"items\": {\n \"type\": \"object\",\n \"properties\": {\n \"value\": {\n \"type\": \"string\",\n \"description\": \"Value of the children's answer\"\n }\n }\n }\n }\n}", + "description": "JSON schema for the structured output", + "optional": true, + "hideCodeExecute": true, + "show": { + "llmStructuredOutput[$index].type": "jsonArray" + } + }, + { + "label": "Description", + "name": "description", + "type": "string", + "placeholder": "Description of the key" + } + ], + "id": "llmAgentflow_1-input-llmStructuredOutput-array", + "display": true + }, + { + "label": "Update Flow State", + "name": "llmUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "llmAgentflow_1-input-llmUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "llmModel": "azureChatOpenAI", + "llmMessages": [ + { + "role": "system", + "content": "

You are a chatbot. Respond to user query: {{ question }}

use output of supervisor for framing your answer {{ llmAgentflow_0 }}

Note: You must always ask for database name in the beginning of conversation and whenever required.

" + } + ], + "llmEnableMemory": true, + "llmReturnResponseAs": "userMessage", + "llmStructuredOutput": "", + "llmUpdateState": "", + "llmModelConfig": { + "cache": "", + "modelName": "gpt-4.1-mini", + "temperature": 0.9, + "maxTokens": "", + "streaming": true, + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "basepath": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low", + "llmModel": "azureChatOpenAI" + }, + "undefined": "" + }, + "outputAnchors": [ + { + "id": "llmAgentflow_1-output-llmAgentflow", + "label": "LLM", + "name": "llmAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 177, + "height": 72, + "selected": false, + "positionAbsolute": { + "x": 527.2688287061134, + "y": 795.073648768414 + }, + "dragging": false + }, + { + "id": "agentAgentflow_4", + "position": { + "x": 523.269991756813, + "y": 45.162725168826455 + }, + "data": { + "id": "agentAgentflow_4", + "label": "Data Preparation Agent ", + "version": 2.2, + "name": "agentAgentflow", + "type": "Agent", + "color": "#4DD0E1", + "baseClasses": [ + "Agent" + ], + "category": "Agent Flows", + "description": "Dynamically choose and utilize tools during runtime, enabling multi-step reasoning", + "inputParams": [ + { + "label": "Model", + "name": "agentModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "agentAgentflow_4-input-agentModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "agentMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "agentAgentflow_4-input-agentMessages-array", + "display": true + }, + { + "label": "OpenAI Built-in Tools", + "name": "agentToolsBuiltInOpenAI", + "type": "multiOptions", + "optional": true, + "options": [ + { + "label": "Web Search", + "name": "web_search_preview", + "description": "Search the web for the latest information" + }, + { + "label": "Code Interpreter", + "name": "code_interpreter", + "description": "Write and run Python code in a sandboxed environment" + }, + { + "label": "Image Generation", + "name": "image_generation", + "description": "Generate images based on a text prompt" + } + ], + "show": { + "agentModel": "chatOpenAI" + }, + "id": "agentAgentflow_4-input-agentToolsBuiltInOpenAI-multiOptions", + "display": false + }, + { + "label": "Gemini Built-in Tools", + "name": "agentToolsBuiltInGemini", + "type": "multiOptions", + "optional": true, + "options": [ + { + "label": "URL Context", + "name": "urlContext", + "description": "Extract content from given URLs" + }, + { + "label": "Google Search", + "name": "googleSearch", + "description": "Search real-time web content" + } + ], + "show": { + "agentModel": "chatGoogleGenerativeAI" + }, + "id": "agentAgentflow_4-input-agentToolsBuiltInGemini-multiOptions", + "display": false + }, + { + "label": "Anthropic Built-in Tools", + "name": "agentToolsBuiltInAnthropic", + "type": "multiOptions", + "optional": true, + "options": [ + { + "label": "Web Search", + "name": "web_search_20250305", + "description": "Search the web for the latest information" + }, + { + "label": "Web Fetch", + "name": "web_fetch_20250910", + "description": "Retrieve full content from specified web pages" + } + ], + "show": { + "agentModel": "chatAnthropic" + }, + "id": "agentAgentflow_4-input-agentToolsBuiltInAnthropic-multiOptions", + "display": false + }, + { + "label": "Tools", + "name": "agentTools", + "type": "array", + "optional": true, + "array": [ + { + "label": "Tool", + "name": "agentSelectedTool", + "type": "asyncOptions", + "loadMethod": "listTools", + "loadConfig": true + }, + { + "label": "Require Human Input", + "name": "agentSelectedToolRequiresHumanInput", + "type": "boolean", + "optional": true + } + ], + "id": "agentAgentflow_4-input-agentTools-array", + "display": true + }, + { + "label": "Knowledge (Document Stores)", + "name": "agentKnowledgeDocumentStores", + "type": "array", + "description": "Give your agent context about different document sources. Document stores must be upserted in advance.", + "array": [ + { + "label": "Document Store", + "name": "documentStore", + "type": "asyncOptions", + "loadMethod": "listStores" + }, + { + "label": "Describe Knowledge", + "name": "docStoreDescription", + "type": "string", + "generateDocStoreDescription": true, + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_4-input-agentKnowledgeDocumentStores-array", + "display": true + }, + { + "label": "Knowledge (Vector Embeddings)", + "name": "agentKnowledgeVSEmbeddings", + "type": "array", + "description": "Give your agent context about different document sources from existing vector stores and embeddings", + "array": [ + { + "label": "Vector Store", + "name": "vectorStore", + "type": "asyncOptions", + "loadMethod": "listVectorStores", + "loadConfig": true + }, + { + "label": "Embedding Model", + "name": "embeddingModel", + "type": "asyncOptions", + "loadMethod": "listEmbeddings", + "loadConfig": true + }, + { + "label": "Knowledge Name", + "name": "knowledgeName", + "type": "string", + "placeholder": "A short name for the knowledge base, this is useful for the AI to know when and how to search for correct information" + }, + { + "label": "Describe Knowledge", + "name": "knowledgeDescription", + "type": "string", + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_4-input-agentKnowledgeVSEmbeddings-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "agentEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "agentAgentflow_4-input-agentEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "agentMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_4-input-agentMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "agentMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "agentMemoryType": "windowSize" + }, + "id": "agentAgentflow_4-input-agentMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "agentMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "agentMemoryType": "conversationSummaryBuffer" + }, + "id": "agentAgentflow_4-input-agentMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "agentUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_4-input-agentUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "agentReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "agentAgentflow_4-input-agentReturnResponseAs-options", + "display": true + }, + { + "label": "Update Flow State", + "name": "agentUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "agentAgentflow_4-input-agentUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "agentModel": "awsChatBedrock", + "agentMessages": [ + { + "role": "system", + "content": "

You are a Data Preparation Worker Agent responsible for transforming and preparing datasets using the available MCP tools. Your goal is to interpret user queries and apply the appropriate data transformation techniques to make the data suitable for analysis or modeling. Use supervisor instruction {{ $flow.state.instruction }} to perform task.

You have access to the following MCP functions:

🔄 Column Selection & Transformation

📊 Encoding & Binning

🧬 Feature Engineering

📉 Dimensionality Reduction & Normalization

🔁 Reshaping Data

Your responsibilities include:

Be precise, efficient, and adaptable in your approach to data preparation, ensuring the dataset is optimized for its intended use.

" + } + ], + "agentToolsBuiltInOpenAI": "", + "agentToolsBuiltInGemini": "", + "agentToolsBuiltInAnthropic": "", + "agentTools": [ + { + "agentSelectedTool": "customMCP", + "agentSelectedToolRequiresHumanInput": false, + "agentSelectedToolConfig": { + "mcpServerConfig": "{\n \"url\": \"http://teradata-mcp-server:8001/mcp\"\n}", + "mcpActions": "[\"tdml_Antiselect\",\"tdml_BincodeFit\",\"tdml_BincodeTransform\",\"tdml_FillRowId\",\"tdml_NonLinearCombineFit\",\"tdml_NonLinearCombineTransform\",\"tdml_NumApply\",\"tdml_OneHotEncodingFit\",\"tdml_OneHotEncodingTransform\",\"tdml_OrdinalEncodingFit\",\"tdml_OrdinalEncodingTransform\",\"tdml_Pivoting\",\"tdml_PolynomialFeaturesFit\",\"tdml_PolynomialFeaturesTransform\",\"tdml_RandomProjectionFit\",\"tdml_RandomProjectionMinComponents\",\"tdml_RandomProjectionTransform\",\"tdml_RoundColumns\",\"tdml_RowNormalizeFit\",\"tdml_RowNormalizeTransform\",\"tdml_ScaleFit\",\"tdml_ScaleTransform\",\"tdml_StrApply\",\"tdml_TargetEncodingFit\",\"tdml_TargetEncodingTransform\",\"tdml_Unpivoting\",\"base_tableDDL\"]", + "agentSelectedTool": "customMCP" + } + } + ], + "agentKnowledgeDocumentStores": "", + "agentKnowledgeVSEmbeddings": "", + "agentEnableMemory": true, + "agentMemoryType": "windowSize", + "agentMemoryWindowSize": "20", + "agentMemoryMaxTokenLimit": "2000", + "agentUserMessage": "

{{ $flow.state.instruction }}

", + "agentReturnResponseAs": "assistantMessage", + "agentUpdateState": "", + "agentModelConfig": { + "credential": "", + "region": "us-east-1", + "model": "anthropic.claude-3-haiku-20240307-v1:0", + "customModel": "", + "streaming": true, + "temperature": 0.7, + "max_tokens_to_sample": "2000", + "allowImageUploads": "", + "latencyOptimized": "", + "agentModel": "awsChatBedrock" + } + }, + "outputAnchors": [ + { + "id": "agentAgentflow_4-output-agentAgentflow", + "label": "Agent", + "name": "agentAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 332, + "height": 100, + "selected": false, + "positionAbsolute": { + "x": 523.269991756813, + "y": 45.162725168826455 + }, + "dragging": false + }, + { + "id": "agentAgentflow_5", + "position": { + "x": 468.60858763619217, + "y": 161.3459187533254 + }, + "data": { + "id": "agentAgentflow_5", + "label": "Model Training Agent", + "version": 2.2, + "name": "agentAgentflow", + "type": "Agent", + "color": "#4DD0E1", + "baseClasses": [ + "Agent" + ], + "category": "Agent Flows", + "description": "Dynamically choose and utilize tools during runtime, enabling multi-step reasoning", + "inputParams": [ + { + "label": "Model", + "name": "agentModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "agentAgentflow_5-input-agentModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "agentMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "agentAgentflow_5-input-agentMessages-array", + "display": true + }, + { + "label": "OpenAI Built-in Tools", + "name": "agentToolsBuiltInOpenAI", + "type": "multiOptions", + "optional": true, + "options": [ + { + "label": "Web Search", + "name": "web_search_preview", + "description": "Search the web for the latest information" + }, + { + "label": "Code Interpreter", + "name": "code_interpreter", + "description": "Write and run Python code in a sandboxed environment" + }, + { + "label": "Image Generation", + "name": "image_generation", + "description": "Generate images based on a text prompt" + } + ], + "show": { + "agentModel": "chatOpenAI" + }, + "id": "agentAgentflow_5-input-agentToolsBuiltInOpenAI-multiOptions", + "display": false + }, + { + "label": "Gemini Built-in Tools", + "name": "agentToolsBuiltInGemini", + "type": "multiOptions", + "optional": true, + "options": [ + { + "label": "URL Context", + "name": "urlContext", + "description": "Extract content from given URLs" + }, + { + "label": "Google Search", + "name": "googleSearch", + "description": "Search real-time web content" + } + ], + "show": { + "agentModel": "chatGoogleGenerativeAI" + }, + "id": "agentAgentflow_5-input-agentToolsBuiltInGemini-multiOptions", + "display": false + }, + { + "label": "Anthropic Built-in Tools", + "name": "agentToolsBuiltInAnthropic", + "type": "multiOptions", + "optional": true, + "options": [ + { + "label": "Web Search", + "name": "web_search_20250305", + "description": "Search the web for the latest information" + }, + { + "label": "Web Fetch", + "name": "web_fetch_20250910", + "description": "Retrieve full content from specified web pages" + } + ], + "show": { + "agentModel": "chatAnthropic" + }, + "id": "agentAgentflow_5-input-agentToolsBuiltInAnthropic-multiOptions", + "display": false + }, + { + "label": "Tools", + "name": "agentTools", + "type": "array", + "optional": true, + "array": [ + { + "label": "Tool", + "name": "agentSelectedTool", + "type": "asyncOptions", + "loadMethod": "listTools", + "loadConfig": true + }, + { + "label": "Require Human Input", + "name": "agentSelectedToolRequiresHumanInput", + "type": "boolean", + "optional": true + } + ], + "id": "agentAgentflow_5-input-agentTools-array", + "display": true + }, + { + "label": "Knowledge (Document Stores)", + "name": "agentKnowledgeDocumentStores", + "type": "array", + "description": "Give your agent context about different document sources. Document stores must be upserted in advance.", + "array": [ + { + "label": "Document Store", + "name": "documentStore", + "type": "asyncOptions", + "loadMethod": "listStores" + }, + { + "label": "Describe Knowledge", + "name": "docStoreDescription", + "type": "string", + "generateDocStoreDescription": true, + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_5-input-agentKnowledgeDocumentStores-array", + "display": true + }, + { + "label": "Knowledge (Vector Embeddings)", + "name": "agentKnowledgeVSEmbeddings", + "type": "array", + "description": "Give your agent context about different document sources from existing vector stores and embeddings", + "array": [ + { + "label": "Vector Store", + "name": "vectorStore", + "type": "asyncOptions", + "loadMethod": "listVectorStores", + "loadConfig": true + }, + { + "label": "Embedding Model", + "name": "embeddingModel", + "type": "asyncOptions", + "loadMethod": "listEmbeddings", + "loadConfig": true + }, + { + "label": "Knowledge Name", + "name": "knowledgeName", + "type": "string", + "placeholder": "A short name for the knowledge base, this is useful for the AI to know when and how to search for correct information" + }, + { + "label": "Describe Knowledge", + "name": "knowledgeDescription", + "type": "string", + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_5-input-agentKnowledgeVSEmbeddings-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "agentEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "agentAgentflow_5-input-agentEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "agentMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_5-input-agentMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "agentMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "agentMemoryType": "windowSize" + }, + "id": "agentAgentflow_5-input-agentMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "agentMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "agentMemoryType": "conversationSummaryBuffer" + }, + "id": "agentAgentflow_5-input-agentMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "agentUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_5-input-agentUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "agentReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "agentAgentflow_5-input-agentReturnResponseAs-options", + "display": true + }, + { + "label": "Update Flow State", + "name": "agentUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "agentAgentflow_5-input-agentUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "agentModel": "awsChatBedrock", + "agentMessages": [ + { + "role": "system", + "content": "

You are a Model Training Worker Agent responsible for building machine learning models using the available MCP tools. Your task is to interpret user queries and apply the most appropriate algorithm to train a model on the provided dataset. Use supervisor instruction {{ $flow.state.instruction }} to perform task.

You have access to the following model training functions:

Note : Always save output model in an output table.

Note : Do not modify the output of tool. Just directly return tool output.

" + } + ], + "agentToolsBuiltInOpenAI": "", + "agentToolsBuiltInGemini": "", + "agentToolsBuiltInAnthropic": "", + "agentTools": [ + { + "agentSelectedTool": "customMCP", + "agentSelectedToolRequiresHumanInput": false, + "agentSelectedToolConfig": { + "mcpServerConfig": "{\n \"url\": \"http://teradata-mcp-server:8001/mcp\"\n}", + "mcpActions": "[\"DecisionForest\",\"tdml_GLM\",\"tdml_KMeans\",\"tdml_KNN\",\"tdml_NaiveBayes\",\"tdml_OneClassSVM\",\"tdml_SVM\",\"tdml_XGBoost\",\"base_tableDDL\"]", + "agentSelectedTool": "customMCP" + } + } + ], + "agentKnowledgeDocumentStores": "", + "agentKnowledgeVSEmbeddings": "", + "agentEnableMemory": true, + "agentMemoryType": "windowSize", + "agentMemoryWindowSize": "20", + "agentMemoryMaxTokenLimit": "2000", + "agentUserMessage": "

{{ $flow.state.instruction }}

", + "agentReturnResponseAs": "assistantMessage", + "agentUpdateState": "", + "agentModelConfig": { + "credential": "", + "region": "us-east-1", + "model": "anthropic.claude-3-haiku-20240307-v1:0", + "customModel": "", + "streaming": true, + "temperature": "0.4", + "max_tokens_to_sample": "2000", + "allowImageUploads": "", + "latencyOptimized": "", + "agentModel": "awsChatBedrock" + } + }, + "outputAnchors": [ + { + "id": "agentAgentflow_5-output-agentAgentflow", + "label": "Agent", + "name": "agentAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 332, + "height": 100, + "positionAbsolute": { + "x": 468.60858763619217, + "y": 161.3459187533254 + }, + "selected": false, + "dragging": false + }, + { + "id": "agentAgentflow_6", + "position": { + "x": 411.9811010655284, + "y": 288.94774970472616 + }, + "data": { + "id": "agentAgentflow_6", + "label": "Model Inference Agent", + "version": 2.2, + "name": "agentAgentflow", + "type": "Agent", + "color": "#4DD0E1", + "baseClasses": [ + "Agent" + ], + "category": "Agent Flows", + "description": "Dynamically choose and utilize tools during runtime, enabling multi-step reasoning", + "inputParams": [ + { + "label": "Model", + "name": "agentModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "agentAgentflow_6-input-agentModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "agentMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "agentAgentflow_6-input-agentMessages-array", + "display": true + }, + { + "label": "OpenAI Built-in Tools", + "name": "agentToolsBuiltInOpenAI", + "type": "multiOptions", + "optional": true, + "options": [ + { + "label": "Web Search", + "name": "web_search_preview", + "description": "Search the web for the latest information" + }, + { + "label": "Code Interpreter", + "name": "code_interpreter", + "description": "Write and run Python code in a sandboxed environment" + }, + { + "label": "Image Generation", + "name": "image_generation", + "description": "Generate images based on a text prompt" + } + ], + "show": { + "agentModel": "chatOpenAI" + }, + "id": "agentAgentflow_6-input-agentToolsBuiltInOpenAI-multiOptions", + "display": false + }, + { + "label": "Gemini Built-in Tools", + "name": "agentToolsBuiltInGemini", + "type": "multiOptions", + "optional": true, + "options": [ + { + "label": "URL Context", + "name": "urlContext", + "description": "Extract content from given URLs" + }, + { + "label": "Google Search", + "name": "googleSearch", + "description": "Search real-time web content" + } + ], + "show": { + "agentModel": "chatGoogleGenerativeAI" + }, + "id": "agentAgentflow_6-input-agentToolsBuiltInGemini-multiOptions", + "display": false + }, + { + "label": "Anthropic Built-in Tools", + "name": "agentToolsBuiltInAnthropic", + "type": "multiOptions", + "optional": true, + "options": [ + { + "label": "Web Search", + "name": "web_search_20250305", + "description": "Search the web for the latest information" + }, + { + "label": "Web Fetch", + "name": "web_fetch_20250910", + "description": "Retrieve full content from specified web pages" + } + ], + "show": { + "agentModel": "chatAnthropic" + }, + "id": "agentAgentflow_6-input-agentToolsBuiltInAnthropic-multiOptions", + "display": false + }, + { + "label": "Tools", + "name": "agentTools", + "type": "array", + "optional": true, + "array": [ + { + "label": "Tool", + "name": "agentSelectedTool", + "type": "asyncOptions", + "loadMethod": "listTools", + "loadConfig": true + }, + { + "label": "Require Human Input", + "name": "agentSelectedToolRequiresHumanInput", + "type": "boolean", + "optional": true + } + ], + "id": "agentAgentflow_6-input-agentTools-array", + "display": true + }, + { + "label": "Knowledge (Document Stores)", + "name": "agentKnowledgeDocumentStores", + "type": "array", + "description": "Give your agent context about different document sources. Document stores must be upserted in advance.", + "array": [ + { + "label": "Document Store", + "name": "documentStore", + "type": "asyncOptions", + "loadMethod": "listStores" + }, + { + "label": "Describe Knowledge", + "name": "docStoreDescription", + "type": "string", + "generateDocStoreDescription": true, + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_6-input-agentKnowledgeDocumentStores-array", + "display": true + }, + { + "label": "Knowledge (Vector Embeddings)", + "name": "agentKnowledgeVSEmbeddings", + "type": "array", + "description": "Give your agent context about different document sources from existing vector stores and embeddings", + "array": [ + { + "label": "Vector Store", + "name": "vectorStore", + "type": "asyncOptions", + "loadMethod": "listVectorStores", + "loadConfig": true + }, + { + "label": "Embedding Model", + "name": "embeddingModel", + "type": "asyncOptions", + "loadMethod": "listEmbeddings", + "loadConfig": true + }, + { + "label": "Knowledge Name", + "name": "knowledgeName", + "type": "string", + "placeholder": "A short name for the knowledge base, this is useful for the AI to know when and how to search for correct information" + }, + { + "label": "Describe Knowledge", + "name": "knowledgeDescription", + "type": "string", + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_6-input-agentKnowledgeVSEmbeddings-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "agentEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "agentAgentflow_6-input-agentEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "agentMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_6-input-agentMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "agentMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "agentMemoryType": "windowSize" + }, + "id": "agentAgentflow_6-input-agentMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "agentMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "agentMemoryType": "conversationSummaryBuffer" + }, + "id": "agentAgentflow_6-input-agentMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "agentUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_6-input-agentUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "agentReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "agentAgentflow_6-input-agentReturnResponseAs-options", + "display": true + }, + { + "label": "Update Flow State", + "name": "agentUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "agentAgentflow_6-input-agentUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "agentModel": "awsChatBedrock", + "agentMessages": [ + { + "role": "system", + "content": "

You are a Model Inference/Prediction Worker Agent responsible for generating predictions using trained machine learning models via MCP tools. Your task is to interpret user queries and apply the appropriate prediction function based on the model type and dataset provided. Use supervisor instruction {{ $flow.state.instruction }} to perform task.

You have access to the following MCP prediction functions:

Note : Always save tool output in an output table.

" + } + ], + "agentToolsBuiltInOpenAI": "", + "agentToolsBuiltInGemini": "", + "agentToolsBuiltInAnthropic": "", + "agentTools": [ + { + "agentSelectedTool": "customMCP", + "agentSelectedToolRequiresHumanInput": false, + "agentSelectedToolConfig": { + "mcpServerConfig": "{\n \"url\": \"http://teradata-mcp-server:8001/mcp\"\n}", + "mcpActions": "[\"tdml_DecisionForestPredict\",\"tdml_GLMPredict\",\"tdml_KMeansPredict\",\"tdml_OneClassSVMPredict\",\"tdml_SVMPredict\",\"tdml_TDNaiveBayesPredict\",\"tdml_XGBoostPredict\",\"base_tableDDL\"]", + "agentSelectedTool": "customMCP" + } + } + ], + "agentKnowledgeDocumentStores": "", + "agentKnowledgeVSEmbeddings": "", + "agentEnableMemory": true, + "agentMemoryType": "windowSize", + "agentMemoryWindowSize": "20", + "agentMemoryMaxTokenLimit": "2000", + "agentUserMessage": "

{{ $flow.state.instruction }}

", + "agentReturnResponseAs": "assistantMessage", + "agentUpdateState": "", + "agentModelConfig": { + "credential": "", + "region": "us-east-1", + "model": "anthropic.claude-3-haiku-20240307-v1:0", + "customModel": "", + "streaming": true, + "temperature": 0.7, + "max_tokens_to_sample": "2000", + "allowImageUploads": "", + "latencyOptimized": "", + "agentModel": "awsChatBedrock" + } + }, + "outputAnchors": [ + { + "id": "agentAgentflow_6-output-agentAgentflow", + "label": "Agent", + "name": "agentAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 332, + "height": 100, + "positionAbsolute": { + "x": 411.9811010655284, + "y": 288.94774970472616 + }, + "selected": false, + "dragging": false + }, + { + "id": "agentAgentflow_7", + "position": { + "x": 409.9811010655284, + "y": 411.0097938417615 + }, + "data": { + "id": "agentAgentflow_7", + "label": "Model Evaluation Agent", + "version": 2.2, + "name": "agentAgentflow", + "type": "Agent", + "color": "#4DD0E1", + "baseClasses": [ + "Agent" + ], + "category": "Agent Flows", + "description": "Dynamically choose and utilize tools during runtime, enabling multi-step reasoning", + "inputParams": [ + { + "label": "Model", + "name": "agentModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "agentAgentflow_7-input-agentModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "agentMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "agentAgentflow_7-input-agentMessages-array", + "display": true + }, + { + "label": "OpenAI Built-in Tools", + "name": "agentToolsBuiltInOpenAI", + "type": "multiOptions", + "optional": true, + "options": [ + { + "label": "Web Search", + "name": "web_search_preview", + "description": "Search the web for the latest information" + }, + { + "label": "Code Interpreter", + "name": "code_interpreter", + "description": "Write and run Python code in a sandboxed environment" + }, + { + "label": "Image Generation", + "name": "image_generation", + "description": "Generate images based on a text prompt" + } + ], + "show": { + "agentModel": "chatOpenAI" + }, + "id": "agentAgentflow_7-input-agentToolsBuiltInOpenAI-multiOptions", + "display": false + }, + { + "label": "Gemini Built-in Tools", + "name": "agentToolsBuiltInGemini", + "type": "multiOptions", + "optional": true, + "options": [ + { + "label": "URL Context", + "name": "urlContext", + "description": "Extract content from given URLs" + }, + { + "label": "Google Search", + "name": "googleSearch", + "description": "Search real-time web content" + } + ], + "show": { + "agentModel": "chatGoogleGenerativeAI" + }, + "id": "agentAgentflow_7-input-agentToolsBuiltInGemini-multiOptions", + "display": false + }, + { + "label": "Anthropic Built-in Tools", + "name": "agentToolsBuiltInAnthropic", + "type": "multiOptions", + "optional": true, + "options": [ + { + "label": "Web Search", + "name": "web_search_20250305", + "description": "Search the web for the latest information" + }, + { + "label": "Web Fetch", + "name": "web_fetch_20250910", + "description": "Retrieve full content from specified web pages" + } + ], + "show": { + "agentModel": "chatAnthropic" + }, + "id": "agentAgentflow_7-input-agentToolsBuiltInAnthropic-multiOptions", + "display": false + }, + { + "label": "Tools", + "name": "agentTools", + "type": "array", + "optional": true, + "array": [ + { + "label": "Tool", + "name": "agentSelectedTool", + "type": "asyncOptions", + "loadMethod": "listTools", + "loadConfig": true + }, + { + "label": "Require Human Input", + "name": "agentSelectedToolRequiresHumanInput", + "type": "boolean", + "optional": true + } + ], + "id": "agentAgentflow_7-input-agentTools-array", + "display": true + }, + { + "label": "Knowledge (Document Stores)", + "name": "agentKnowledgeDocumentStores", + "type": "array", + "description": "Give your agent context about different document sources. Document stores must be upserted in advance.", + "array": [ + { + "label": "Document Store", + "name": "documentStore", + "type": "asyncOptions", + "loadMethod": "listStores" + }, + { + "label": "Describe Knowledge", + "name": "docStoreDescription", + "type": "string", + "generateDocStoreDescription": true, + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_7-input-agentKnowledgeDocumentStores-array", + "display": true + }, + { + "label": "Knowledge (Vector Embeddings)", + "name": "agentKnowledgeVSEmbeddings", + "type": "array", + "description": "Give your agent context about different document sources from existing vector stores and embeddings", + "array": [ + { + "label": "Vector Store", + "name": "vectorStore", + "type": "asyncOptions", + "loadMethod": "listVectorStores", + "loadConfig": true + }, + { + "label": "Embedding Model", + "name": "embeddingModel", + "type": "asyncOptions", + "loadMethod": "listEmbeddings", + "loadConfig": true + }, + { + "label": "Knowledge Name", + "name": "knowledgeName", + "type": "string", + "placeholder": "A short name for the knowledge base, this is useful for the AI to know when and how to search for correct information" + }, + { + "label": "Describe Knowledge", + "name": "knowledgeDescription", + "type": "string", + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_7-input-agentKnowledgeVSEmbeddings-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "agentEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "agentAgentflow_7-input-agentEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "agentMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_7-input-agentMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "agentMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "agentMemoryType": "windowSize" + }, + "id": "agentAgentflow_7-input-agentMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "agentMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "agentMemoryType": "conversationSummaryBuffer" + }, + "id": "agentAgentflow_7-input-agentMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "agentUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_7-input-agentUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "agentReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "agentAgentflow_7-input-agentReturnResponseAs-options", + "display": true + }, + { + "label": "Update Flow State", + "name": "agentUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "agentAgentflow_7-input-agentUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "agentModel": "awsChatBedrock", + "agentMessages": [ + { + "role": "system", + "content": "

You are a Model Evaluation/Explainability Worker Agent responsible for evaluating machine learning models and explaining their behavior using the available MCP tools. Your task is to interpret user queries and apply the appropriate evaluation or explainability techniques to assess model performance and interpret predictions. Use supervisor instruction {{ $flow.state.instruction }} to perform task.

You have access to the following MCP functions:

✅ Model Evaluation

🔍 Model Explainability

Your responsibilities include:

Be thorough, insightful, and user-focused in your evaluation process, ensuring that model performance and behavior are well-understood and actionable.

" + } + ], + "agentToolsBuiltInOpenAI": "", + "agentToolsBuiltInGemini": "", + "agentToolsBuiltInAnthropic": "", + "agentTools": [ + { + "agentSelectedTool": "customMCP", + "agentSelectedToolRequiresHumanInput": false, + "agentSelectedToolConfig": { + "mcpServerConfig": "{\n \"url\": \"http://teradata-mcp-server:8001/mcp\"\n}", + "mcpActions": "[\"tdml_ClassificationEvaluator\",\"tdml_RegressionEvaluator\",\"tdml_ROC\",\"tdml_Shap\",\"tdml_Silhouette\",\"base_tableDDL\"]", + "agentSelectedTool": "customMCP" + } + } + ], + "agentKnowledgeDocumentStores": "", + "agentKnowledgeVSEmbeddings": "", + "agentEnableMemory": true, + "agentMemoryType": "windowSize", + "agentMemoryWindowSize": "20", + "agentMemoryMaxTokenLimit": "2000", + "agentUserMessage": "

{{ $flow.state.instruction }}

", + "agentReturnResponseAs": "assistantMessage", + "agentUpdateState": "", + "agentModelConfig": { + "credential": "", + "region": "us-east-1", + "model": "anthropic.claude-3-haiku-20240307-v1:0", + "customModel": "", + "streaming": true, + "temperature": 0.7, + "max_tokens_to_sample": "2000", + "allowImageUploads": "", + "latencyOptimized": "", + "agentModel": "awsChatBedrock" + } + }, + "outputAnchors": [ + { + "id": "agentAgentflow_7-output-agentAgentflow", + "label": "Agent", + "name": "agentAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 332, + "height": 100, + "positionAbsolute": { + "x": 409.9811010655284, + "y": 411.0097938417615 + }, + "selected": false, + "dragging": false + }, + { + "id": "agentAgentflow_8", + "position": { + "x": 440.90939838370366, + "y": 527.2356684315139 + }, + "data": { + "id": "agentAgentflow_8", + "label": "Text Analytics Agent", + "version": 2.2, + "name": "agentAgentflow", + "type": "Agent", + "color": "#4DD0E1", + "baseClasses": [ + "Agent" + ], + "category": "Agent Flows", + "description": "Dynamically choose and utilize tools during runtime, enabling multi-step reasoning", + "inputParams": [ + { + "label": "Model", + "name": "agentModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "agentAgentflow_8-input-agentModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "agentMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "agentAgentflow_8-input-agentMessages-array", + "display": true + }, + { + "label": "OpenAI Built-in Tools", + "name": "agentToolsBuiltInOpenAI", + "type": "multiOptions", + "optional": true, + "options": [ + { + "label": "Web Search", + "name": "web_search_preview", + "description": "Search the web for the latest information" + }, + { + "label": "Code Interpreter", + "name": "code_interpreter", + "description": "Write and run Python code in a sandboxed environment" + }, + { + "label": "Image Generation", + "name": "image_generation", + "description": "Generate images based on a text prompt" + } + ], + "show": { + "agentModel": "chatOpenAI" + }, + "id": "agentAgentflow_8-input-agentToolsBuiltInOpenAI-multiOptions", + "display": false + }, + { + "label": "Gemini Built-in Tools", + "name": "agentToolsBuiltInGemini", + "type": "multiOptions", + "optional": true, + "options": [ + { + "label": "URL Context", + "name": "urlContext", + "description": "Extract content from given URLs" + }, + { + "label": "Google Search", + "name": "googleSearch", + "description": "Search real-time web content" + } + ], + "show": { + "agentModel": "chatGoogleGenerativeAI" + }, + "id": "agentAgentflow_8-input-agentToolsBuiltInGemini-multiOptions", + "display": false + }, + { + "label": "Anthropic Built-in Tools", + "name": "agentToolsBuiltInAnthropic", + "type": "multiOptions", + "optional": true, + "options": [ + { + "label": "Web Search", + "name": "web_search_20250305", + "description": "Search the web for the latest information" + }, + { + "label": "Web Fetch", + "name": "web_fetch_20250910", + "description": "Retrieve full content from specified web pages" + } + ], + "show": { + "agentModel": "chatAnthropic" + }, + "id": "agentAgentflow_8-input-agentToolsBuiltInAnthropic-multiOptions", + "display": false + }, + { + "label": "Tools", + "name": "agentTools", + "type": "array", + "optional": true, + "array": [ + { + "label": "Tool", + "name": "agentSelectedTool", + "type": "asyncOptions", + "loadMethod": "listTools", + "loadConfig": true + }, + { + "label": "Require Human Input", + "name": "agentSelectedToolRequiresHumanInput", + "type": "boolean", + "optional": true + } + ], + "id": "agentAgentflow_8-input-agentTools-array", + "display": true + }, + { + "label": "Knowledge (Document Stores)", + "name": "agentKnowledgeDocumentStores", + "type": "array", + "description": "Give your agent context about different document sources. Document stores must be upserted in advance.", + "array": [ + { + "label": "Document Store", + "name": "documentStore", + "type": "asyncOptions", + "loadMethod": "listStores" + }, + { + "label": "Describe Knowledge", + "name": "docStoreDescription", + "type": "string", + "generateDocStoreDescription": true, + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_8-input-agentKnowledgeDocumentStores-array", + "display": true + }, + { + "label": "Knowledge (Vector Embeddings)", + "name": "agentKnowledgeVSEmbeddings", + "type": "array", + "description": "Give your agent context about different document sources from existing vector stores and embeddings", + "array": [ + { + "label": "Vector Store", + "name": "vectorStore", + "type": "asyncOptions", + "loadMethod": "listVectorStores", + "loadConfig": true + }, + { + "label": "Embedding Model", + "name": "embeddingModel", + "type": "asyncOptions", + "loadMethod": "listEmbeddings", + "loadConfig": true + }, + { + "label": "Knowledge Name", + "name": "knowledgeName", + "type": "string", + "placeholder": "A short name for the knowledge base, this is useful for the AI to know when and how to search for correct information" + }, + { + "label": "Describe Knowledge", + "name": "knowledgeDescription", + "type": "string", + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_8-input-agentKnowledgeVSEmbeddings-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "agentEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "agentAgentflow_8-input-agentEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "agentMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_8-input-agentMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "agentMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "agentMemoryType": "windowSize" + }, + "id": "agentAgentflow_8-input-agentMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "agentMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "agentMemoryType": "conversationSummaryBuffer" + }, + "id": "agentAgentflow_8-input-agentMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "agentUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_8-input-agentUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "agentReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "agentAgentflow_8-input-agentReturnResponseAs-options", + "display": true + }, + { + "label": "Update Flow State", + "name": "agentUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "agentAgentflow_8-input-agentUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "agentModel": "awsChatBedrock", + "agentMessages": [ + { + "role": "system", + "content": "

You are a Text Analytics Worker Agent responsible for analyzing and interpreting text data using the available MCP tools. Your task is to understand user queries and apply the appropriate text analytics functions to extract insights, classify content, and explain linguistic patterns. Use supervisor instruction {{ $flow.state.instruction }} to perform task.

You have access to the following MCP tools:

🔍 Text Parsing & Tokenization

📊 Text Representation & Similarity

🧠 Text Classification & Sentiment

🧩 Text Structure & Patterns

Your responsibilities include:

Be analytical, precise, and context-aware in your approach to text analytics, ensuring that your outputs are insightful and actionable.

" + } + ], + "agentToolsBuiltInOpenAI": "", + "agentToolsBuiltInGemini": "", + "agentToolsBuiltInAnthropic": "", + "agentTools": [ + { + "agentSelectedTool": "customMCP", + "agentSelectedToolRequiresHumanInput": false, + "agentSelectedToolConfig": { + "mcpServerConfig": "{\n \"url\": \"http://teradata-mcp-server:8001/mcp\"\n}", + "mcpActions": "[\"tdml_NaiveBayesTextClassifierPredict\",\"tdml_NaiveBayesTextClassifierTrainer\",\"tdml_NERExtractor\",\"tdml_NGramSplitter\",\"tdml_SentimentExtractor\",\"tdml_TextMorph\",\"tdml_TextParser\",\"tdml_TFIDF\",\"tdml_WordEmbeddings\",\"base_tableDDL\"]", + "agentSelectedTool": "customMCP" + } + } + ], + "agentKnowledgeDocumentStores": "", + "agentKnowledgeVSEmbeddings": "", + "agentEnableMemory": true, + "agentMemoryType": "windowSize", + "agentMemoryWindowSize": "20", + "agentMemoryMaxTokenLimit": "2000", + "agentUserMessage": "

{{ $flow.state.instruction }}

", + "agentReturnResponseAs": "assistantMessage", + "agentUpdateState": "", + "agentModelConfig": { + "credential": "", + "region": "us-east-1", + "model": "anthropic.claude-3-haiku-20240307-v1:0", + "customModel": "", + "streaming": true, + "temperature": 0.7, + "max_tokens_to_sample": "4000", + "allowImageUploads": "", + "latencyOptimized": "", + "agentModel": "awsChatBedrock" + } + }, + "outputAnchors": [ + { + "id": "agentAgentflow_8-output-agentAgentflow", + "label": "Agent", + "name": "agentAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 332, + "height": 100, + "positionAbsolute": { + "x": 440.90939838370366, + "y": 527.2356684315139 + }, + "selected": false, + "dragging": false + }, + { + "id": "agentAgentflow_9", + "position": { + "x": 485.2263687619494, + "y": 651.6709552297655 + }, + "data": { + "id": "agentAgentflow_9", + "label": "Association Analysis Agent", + "version": 2.2, + "name": "agentAgentflow", + "type": "Agent", + "color": "#4DD0E1", + "baseClasses": [ + "Agent" + ], + "category": "Agent Flows", + "description": "Dynamically choose and utilize tools during runtime, enabling multi-step reasoning", + "inputParams": [ + { + "label": "Model", + "name": "agentModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "agentAgentflow_9-input-agentModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "agentMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "agentAgentflow_9-input-agentMessages-array", + "display": true + }, + { + "label": "OpenAI Built-in Tools", + "name": "agentToolsBuiltInOpenAI", + "type": "multiOptions", + "optional": true, + "options": [ + { + "label": "Web Search", + "name": "web_search_preview", + "description": "Search the web for the latest information" + }, + { + "label": "Code Interpreter", + "name": "code_interpreter", + "description": "Write and run Python code in a sandboxed environment" + }, + { + "label": "Image Generation", + "name": "image_generation", + "description": "Generate images based on a text prompt" + } + ], + "show": { + "agentModel": "chatOpenAI" + }, + "id": "agentAgentflow_9-input-agentToolsBuiltInOpenAI-multiOptions", + "display": false + }, + { + "label": "Gemini Built-in Tools", + "name": "agentToolsBuiltInGemini", + "type": "multiOptions", + "optional": true, + "options": [ + { + "label": "URL Context", + "name": "urlContext", + "description": "Extract content from given URLs" + }, + { + "label": "Google Search", + "name": "googleSearch", + "description": "Search real-time web content" + } + ], + "show": { + "agentModel": "chatGoogleGenerativeAI" + }, + "id": "agentAgentflow_9-input-agentToolsBuiltInGemini-multiOptions", + "display": false + }, + { + "label": "Anthropic Built-in Tools", + "name": "agentToolsBuiltInAnthropic", + "type": "multiOptions", + "optional": true, + "options": [ + { + "label": "Web Search", + "name": "web_search_20250305", + "description": "Search the web for the latest information" + }, + { + "label": "Web Fetch", + "name": "web_fetch_20250910", + "description": "Retrieve full content from specified web pages" + } + ], + "show": { + "agentModel": "chatAnthropic" + }, + "id": "agentAgentflow_9-input-agentToolsBuiltInAnthropic-multiOptions", + "display": false + }, + { + "label": "Tools", + "name": "agentTools", + "type": "array", + "optional": true, + "array": [ + { + "label": "Tool", + "name": "agentSelectedTool", + "type": "asyncOptions", + "loadMethod": "listTools", + "loadConfig": true + }, + { + "label": "Require Human Input", + "name": "agentSelectedToolRequiresHumanInput", + "type": "boolean", + "optional": true + } + ], + "id": "agentAgentflow_9-input-agentTools-array", + "display": true + }, + { + "label": "Knowledge (Document Stores)", + "name": "agentKnowledgeDocumentStores", + "type": "array", + "description": "Give your agent context about different document sources. Document stores must be upserted in advance.", + "array": [ + { + "label": "Document Store", + "name": "documentStore", + "type": "asyncOptions", + "loadMethod": "listStores" + }, + { + "label": "Describe Knowledge", + "name": "docStoreDescription", + "type": "string", + "generateDocStoreDescription": true, + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_9-input-agentKnowledgeDocumentStores-array", + "display": true + }, + { + "label": "Knowledge (Vector Embeddings)", + "name": "agentKnowledgeVSEmbeddings", + "type": "array", + "description": "Give your agent context about different document sources from existing vector stores and embeddings", + "array": [ + { + "label": "Vector Store", + "name": "vectorStore", + "type": "asyncOptions", + "loadMethod": "listVectorStores", + "loadConfig": true + }, + { + "label": "Embedding Model", + "name": "embeddingModel", + "type": "asyncOptions", + "loadMethod": "listEmbeddings", + "loadConfig": true + }, + { + "label": "Knowledge Name", + "name": "knowledgeName", + "type": "string", + "placeholder": "A short name for the knowledge base, this is useful for the AI to know when and how to search for correct information" + }, + { + "label": "Describe Knowledge", + "name": "knowledgeDescription", + "type": "string", + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_9-input-agentKnowledgeVSEmbeddings-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "agentEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "agentAgentflow_9-input-agentEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "agentMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_9-input-agentMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "agentMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "agentMemoryType": "windowSize" + }, + "id": "agentAgentflow_9-input-agentMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "agentMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "agentMemoryType": "conversationSummaryBuffer" + }, + "id": "agentAgentflow_9-input-agentMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "agentUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_9-input-agentUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "agentReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "agentAgentflow_9-input-agentReturnResponseAs-options", + "display": true + }, + { + "label": "Update Flow State", + "name": "agentUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "agentAgentflow_9-input-agentUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "agentModel": "awsChatBedrock", + "agentMessages": [ + { + "role": "system", + "content": "

You are an Association Analysis Worker Agent responsible for discovering item relationships and co-occurrence patterns in sales transaction data using the available MCP tools. Your task is to interpret user queries and apply the appropriate association analysis techniques to generate actionable insights. Use supervisor instruction {{ $flow.state.instruction }} to perform task.

You have access to the following MCP tools:

🔗 Association Rule Mining

🧮 Pairwise Item Analysis

Your responsibilities include:

Be analytical, precise, and business-aware in your approach to association analysis, ensuring that your insights support strategic decisions like product bundling, recommendation systems, or inventory planning.

" + } + ], + "agentToolsBuiltInOpenAI": "", + "agentToolsBuiltInGemini": "", + "agentToolsBuiltInAnthropic": "", + "agentTools": [ + { + "agentSelectedTool": "customMCP", + "agentSelectedToolRequiresHumanInput": false, + "agentSelectedToolConfig": { + "mcpServerConfig": "{\n \"url\": \"http://teradata-mcp-server:8001/mcp\"\n}", + "mcpActions": "[\"tdml_Apriori\",\"tdml_CFilter\",\"tdml_base_tableDDL\"]", + "agentSelectedTool": "customMCP" + } + } + ], + "agentKnowledgeDocumentStores": "", + "agentKnowledgeVSEmbeddings": "", + "agentEnableMemory": true, + "agentMemoryType": "windowSize", + "agentMemoryWindowSize": "20", + "agentMemoryMaxTokenLimit": "2000", + "agentUserMessage": "

{{ $flow.state.instruction }}

", + "agentReturnResponseAs": "assistantMessage", + "agentUpdateState": "", + "agentModelConfig": { + "credential": "", + "region": "us-east-1", + "model": "anthropic.claude-3-haiku-20240307-v1:0", + "customModel": "", + "streaming": true, + "temperature": 0.7, + "max_tokens_to_sample": "4000", + "allowImageUploads": "", + "latencyOptimized": "", + "agentModel": "awsChatBedrock" + }, + "undefined": "" + }, + "outputAnchors": [ + { + "id": "agentAgentflow_9-output-agentAgentflow", + "label": "Agent", + "name": "agentAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 332, + "height": 100, + "positionAbsolute": { + "x": 485.2263687619494, + "y": 651.6709552297655 + }, + "selected": false, + "dragging": false + }, + { + "id": "loopAgentflow_8", + "position": { + "x": 1652.1864675516174, + "y": 290.69580668429035 + }, + "data": { + "id": "loopAgentflow_8", + "label": "Loop to Supervisor", + "version": 1.1, + "name": "loopAgentflow", + "type": "Loop", + "color": "#FFA07A", + "hideOutput": true, + "baseClasses": [ + "Loop" + ], + "category": "Agent Flows", + "description": "Loop back to a previous node", + "inputParams": [ + { + "label": "Loop Back To", + "name": "loopBackToNode", + "type": "asyncOptions", + "loadMethod": "listPreviousNodes", + "freeSolo": true, + "id": "loopAgentflow_8-input-loopBackToNode-asyncOptions", + "display": true + }, + { + "label": "Max Loop Count", + "name": "maxLoopCount", + "type": "number", + "default": 5, + "id": "loopAgentflow_8-input-maxLoopCount-number", + "display": true + }, + { + "label": "Fallback Message", + "name": "fallbackMessage", + "type": "string", + "description": "Message to display if the loop count is exceeded", + "placeholder": "Enter your fallback message here", + "rows": 4, + "acceptVariable": true, + "optional": true, + "id": "loopAgentflow_8-input-fallbackMessage-string", + "display": true + }, + { + "label": "Update Flow State", + "name": "loopUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "loopAgentflow_8-input-loopUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "loopBackToNode": "llmAgentflow_0-Supervisor", + "maxLoopCount": "5", + "fallbackMessage": "", + "loopUpdateState": "", + "undefined": "" + }, + "outputAnchors": [], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 195, + "height": 66, + "selected": false, + "positionAbsolute": { + "x": 1652.1864675516174, + "y": 290.69580668429035 + }, + "dragging": false + }, + { + "id": "customFunctionAgentflow_0", + "position": { + "x": 1175, + "y": -275 + }, + "data": { + "id": "customFunctionAgentflow_0", + "label": "f1", + "version": 1, + "name": "customFunctionAgentflow", + "type": "CustomFunction", + "color": "#E4B7FF", + "baseClasses": [ + "CustomFunction" + ], + "category": "Agent Flows", + "description": "Execute custom function", + "inputParams": [ + { + "label": "Input Variables", + "name": "customFunctionInputVariables", + "description": "Input variables can be used in the function with prefix $. For example: $foo", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Variable Name", + "name": "variableName", + "type": "string" + }, + { + "label": "Variable Value", + "name": "variableValue", + "type": "string", + "acceptVariable": true + } + ], + "id": "customFunctionAgentflow_0-input-customFunctionInputVariables-array", + "display": true + }, + { + "label": "Javascript Function", + "name": "customFunctionJavascriptFunction", + "type": "code", + "codeExample": "/*\n* You can use any libraries imported in Flowise\n* You can use properties specified in Input Variables with the prefix $. For example: $foo\n* You can get default flow config: $flow.sessionId, $flow.chatId, $flow.chatflowId, $flow.input, $flow.state\n* You can get global variables: $vars.\n* Must return a string value at the end of function\n*/\n\nconst fetch = require('node-fetch');\nconst url = 'https://api.open-meteo.com/v1/forecast?latitude=52.52&longitude=13.41¤t_weather=true';\nconst options = {\n method: 'GET',\n headers: {\n 'Content-Type': 'application/json'\n }\n};\ntry {\n const response = await fetch(url, options);\n const text = await response.text();\n return text;\n} catch (error) {\n console.error(error);\n return '';\n}", + "description": "The function to execute. Must return a string or an object that can be converted to a string.", + "id": "customFunctionAgentflow_0-input-customFunctionJavascriptFunction-code", + "display": true + }, + { + "label": "Update Flow State", + "name": "customFunctionUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "customFunctionAgentflow_0-input-customFunctionUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "customFunctionInputVariables": [ + { + "variableName": "var1", + "variableValue": "

{{ agentAgentflow_0 }}

" + } + ], + "customFunctionJavascriptFunction": "/*\n* You can use any libraries imported in Flowise\n* You can use properties specified in Input Variables with the prefix $. For example: $foo\n* You can get default flow config: $flow.sessionId, $flow.chatId, $flow.chatflowId, $flow.input, $flow.state\n* You can get global variables: $vars.\n* Must return a string value at the end of function\n*/\n\n\ntry {\n $flow.state.answers = \"\\n\" + $flow.state.answers + \"\\n\" + $var1 ;\n return $var1;\n} catch (error) {\n console.error(error);\n return 'Inferencing Failed due to unknown exception';\n}", + "customFunctionUpdateState": "" + }, + "outputAnchors": [ + { + "id": "customFunctionAgentflow_0-output-customFunctionAgentflow", + "label": "Custom Function", + "name": "customFunctionAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 83, + "height": 66, + "selected": false, + "positionAbsolute": { + "x": 1175, + "y": -275 + }, + "dragging": false + }, + { + "id": "customFunctionAgentflow_1", + "position": { + "x": 1169.1045749621364, + "y": -164.29471342848467 + }, + "data": { + "id": "customFunctionAgentflow_1", + "label": "f2", + "version": 1, + "name": "customFunctionAgentflow", + "type": "CustomFunction", + "color": "#E4B7FF", + "baseClasses": [ + "CustomFunction" + ], + "category": "Agent Flows", + "description": "Execute custom function", + "inputParams": [ + { + "label": "Input Variables", + "name": "customFunctionInputVariables", + "description": "Input variables can be used in the function with prefix $. For example: $foo", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Variable Name", + "name": "variableName", + "type": "string" + }, + { + "label": "Variable Value", + "name": "variableValue", + "type": "string", + "acceptVariable": true + } + ], + "id": "customFunctionAgentflow_1-input-customFunctionInputVariables-array", + "display": true + }, + { + "label": "Javascript Function", + "name": "customFunctionJavascriptFunction", + "type": "code", + "codeExample": "/*\n* You can use any libraries imported in Flowise\n* You can use properties specified in Input Variables with the prefix $. For example: $foo\n* You can get default flow config: $flow.sessionId, $flow.chatId, $flow.chatflowId, $flow.input, $flow.state\n* You can get global variables: $vars.\n* Must return a string value at the end of function\n*/\n\nconst fetch = require('node-fetch');\nconst url = 'https://api.open-meteo.com/v1/forecast?latitude=52.52&longitude=13.41¤t_weather=true';\nconst options = {\n method: 'GET',\n headers: {\n 'Content-Type': 'application/json'\n }\n};\ntry {\n const response = await fetch(url, options);\n const text = await response.text();\n return text;\n} catch (error) {\n console.error(error);\n return '';\n}", + "description": "The function to execute. Must return a string or an object that can be converted to a string.", + "id": "customFunctionAgentflow_1-input-customFunctionJavascriptFunction-code", + "display": true + }, + { + "label": "Update Flow State", + "name": "customFunctionUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "customFunctionAgentflow_1-input-customFunctionUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "customFunctionInputVariables": [ + { + "variableName": "var1", + "variableValue": "

{{ agentAgentflow_1 }}

" + } + ], + "customFunctionJavascriptFunction": "/*\n* You can use any libraries imported in Flowise\n* You can use properties specified in Input Variables with the prefix $. For example: $foo\n* You can get default flow config: $flow.sessionId, $flow.chatId, $flow.chatflowId, $flow.input, $flow.state\n* You can get global variables: $vars.\n* Must return a string value at the end of function\n*/\n\n\ntry {\n\n $flow.state.answers = \"\\n\" + $flow.state.answers + \"\\n\" + $var1 ;\n \n \n return $var1;\n} catch (error) {\n console.error(error);\n return '';\n}", + "customFunctionUpdateState": "" + }, + "outputAnchors": [ + { + "id": "customFunctionAgentflow_1-output-customFunctionAgentflow", + "label": "Custom Function", + "name": "customFunctionAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 86, + "height": 66, + "selected": false, + "positionAbsolute": { + "x": 1169.1045749621364, + "y": -164.29471342848467 + }, + "dragging": false + }, + { + "id": "customFunctionAgentflow_2", + "position": { + "x": 1168.662907206442, + "y": -51.686263858425704 + }, + "data": { + "id": "customFunctionAgentflow_2", + "label": "f3", + "version": 1, + "name": "customFunctionAgentflow", + "type": "CustomFunction", + "color": "#E4B7FF", + "baseClasses": [ + "CustomFunction" + ], + "category": "Agent Flows", + "description": "Execute custom function", + "inputParams": [ + { + "label": "Input Variables", + "name": "customFunctionInputVariables", + "description": "Input variables can be used in the function with prefix $. For example: $foo", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Variable Name", + "name": "variableName", + "type": "string" + }, + { + "label": "Variable Value", + "name": "variableValue", + "type": "string", + "acceptVariable": true + } + ], + "id": "customFunctionAgentflow_2-input-customFunctionInputVariables-array", + "display": true + }, + { + "label": "Javascript Function", + "name": "customFunctionJavascriptFunction", + "type": "code", + "codeExample": "/*\n* You can use any libraries imported in Flowise\n* You can use properties specified in Input Variables with the prefix $. For example: $foo\n* You can get default flow config: $flow.sessionId, $flow.chatId, $flow.chatflowId, $flow.input, $flow.state\n* You can get global variables: $vars.\n* Must return a string value at the end of function\n*/\n\nconst fetch = require('node-fetch');\nconst url = 'https://api.open-meteo.com/v1/forecast?latitude=52.52&longitude=13.41¤t_weather=true';\nconst options = {\n method: 'GET',\n headers: {\n 'Content-Type': 'application/json'\n }\n};\ntry {\n const response = await fetch(url, options);\n const text = await response.text();\n return text;\n} catch (error) {\n console.error(error);\n return '';\n}", + "description": "The function to execute. Must return a string or an object that can be converted to a string.", + "id": "customFunctionAgentflow_2-input-customFunctionJavascriptFunction-code", + "display": true + }, + { + "label": "Update Flow State", + "name": "customFunctionUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "customFunctionAgentflow_2-input-customFunctionUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "customFunctionInputVariables": [ + { + "variableName": "var1", + "variableValue": "

{{ agentAgentflow_2 }}

" + } + ], + "customFunctionJavascriptFunction": "/*\n* You can use any libraries imported in Flowise\n* You can use properties specified in Input Variables with the prefix $. For example: $foo\n* You can get default flow config: $flow.sessionId, $flow.chatId, $flow.chatflowId, $flow.input, $flow.state\n* You can get global variables: $vars.\n* Must return a string value at the end of function\n*/\n\n\ntry {\n\n $flow.state.answers = \"\\n\" + $flow.state.answers + \"\\n\" + $var1 ;\n \n \n return $var1;\n} catch (error) {\n console.error(error);\n return '';\n}", + "customFunctionUpdateState": "" + }, + "outputAnchors": [ + { + "id": "customFunctionAgentflow_2-output-customFunctionAgentflow", + "label": "Custom Function", + "name": "customFunctionAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 86, + "height": 66, + "selected": false, + "positionAbsolute": { + "x": 1168.662907206442, + "y": -51.686263858425704 + }, + "dragging": false + }, + { + "id": "customFunctionAgentflow_3", + "position": { + "x": 1174.0290731289342, + "y": 59.53925697357232 + }, + "data": { + "id": "customFunctionAgentflow_3", + "label": "f4", + "version": 1, + "name": "customFunctionAgentflow", + "type": "CustomFunction", + "color": "#E4B7FF", + "baseClasses": [ + "CustomFunction" + ], + "category": "Agent Flows", + "description": "Execute custom function", + "inputParams": [ + { + "label": "Input Variables", + "name": "customFunctionInputVariables", + "description": "Input variables can be used in the function with prefix $. For example: $foo", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Variable Name", + "name": "variableName", + "type": "string" + }, + { + "label": "Variable Value", + "name": "variableValue", + "type": "string", + "acceptVariable": true + } + ], + "id": "customFunctionAgentflow_3-input-customFunctionInputVariables-array", + "display": true + }, + { + "label": "Javascript Function", + "name": "customFunctionJavascriptFunction", + "type": "code", + "codeExample": "/*\n* You can use any libraries imported in Flowise\n* You can use properties specified in Input Variables with the prefix $. For example: $foo\n* You can get default flow config: $flow.sessionId, $flow.chatId, $flow.chatflowId, $flow.input, $flow.state\n* You can get global variables: $vars.\n* Must return a string value at the end of function\n*/\n\nconst fetch = require('node-fetch');\nconst url = 'https://api.open-meteo.com/v1/forecast?latitude=52.52&longitude=13.41¤t_weather=true';\nconst options = {\n method: 'GET',\n headers: {\n 'Content-Type': 'application/json'\n }\n};\ntry {\n const response = await fetch(url, options);\n const text = await response.text();\n return text;\n} catch (error) {\n console.error(error);\n return '';\n}", + "description": "The function to execute. Must return a string or an object that can be converted to a string.", + "id": "customFunctionAgentflow_3-input-customFunctionJavascriptFunction-code", + "display": true + }, + { + "label": "Update Flow State", + "name": "customFunctionUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "customFunctionAgentflow_3-input-customFunctionUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "customFunctionInputVariables": [ + { + "variableName": "var1", + "variableValue": "

{{ agentAgentflow_4 }}

" + } + ], + "customFunctionJavascriptFunction": "/*\n* You can use any libraries imported in Flowise\n* You can use properties specified in Input Variables with the prefix $. For example: $foo\n* You can get default flow config: $flow.sessionId, $flow.chatId, $flow.chatflowId, $flow.input, $flow.state\n* You can get global variables: $vars.\n* Must return a string value at the end of function\n*/\n\n\ntry {\n\n $flow.state.answers = \"\\n\" + $flow.state.answers + \"\\n\" + $var1 ;\n \n \n return $var1;\n} catch (error) {\n console.error(error);\n return '';\n}", + "customFunctionUpdateState": "" + }, + "outputAnchors": [ + { + "id": "customFunctionAgentflow_3-output-customFunctionAgentflow", + "label": "Custom Function", + "name": "customFunctionAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 85, + "height": 66, + "selected": false, + "positionAbsolute": { + "x": 1174.0290731289342, + "y": 59.53925697357232 + }, + "dragging": false + }, + { + "id": "customFunctionAgentflow_4", + "position": { + "x": 1176.204833144868, + "y": 175.43899879084063 + }, + "data": { + "id": "customFunctionAgentflow_4", + "label": "f5", + "version": 1, + "name": "customFunctionAgentflow", + "type": "CustomFunction", + "color": "#E4B7FF", + "baseClasses": [ + "CustomFunction" + ], + "category": "Agent Flows", + "description": "Execute custom function", + "inputParams": [ + { + "label": "Input Variables", + "name": "customFunctionInputVariables", + "description": "Input variables can be used in the function with prefix $. For example: $foo", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Variable Name", + "name": "variableName", + "type": "string" + }, + { + "label": "Variable Value", + "name": "variableValue", + "type": "string", + "acceptVariable": true + } + ], + "id": "customFunctionAgentflow_4-input-customFunctionInputVariables-array", + "display": true + }, + { + "label": "Javascript Function", + "name": "customFunctionJavascriptFunction", + "type": "code", + "codeExample": "/*\n* You can use any libraries imported in Flowise\n* You can use properties specified in Input Variables with the prefix $. For example: $foo\n* You can get default flow config: $flow.sessionId, $flow.chatId, $flow.chatflowId, $flow.input, $flow.state\n* You can get global variables: $vars.\n* Must return a string value at the end of function\n*/\n\nconst fetch = require('node-fetch');\nconst url = 'https://api.open-meteo.com/v1/forecast?latitude=52.52&longitude=13.41¤t_weather=true';\nconst options = {\n method: 'GET',\n headers: {\n 'Content-Type': 'application/json'\n }\n};\ntry {\n const response = await fetch(url, options);\n const text = await response.text();\n return text;\n} catch (error) {\n console.error(error);\n return '';\n}", + "description": "The function to execute. Must return a string or an object that can be converted to a string.", + "id": "customFunctionAgentflow_4-input-customFunctionJavascriptFunction-code", + "display": true + }, + { + "label": "Update Flow State", + "name": "customFunctionUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "customFunctionAgentflow_4-input-customFunctionUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "customFunctionInputVariables": [ + { + "variableName": "var1", + "variableValue": "

{{ agentAgentflow_5 }}

" + } + ], + "customFunctionJavascriptFunction": "/*\n* You can use any libraries imported in Flowise\n* You can use properties specified in Input Variables with the prefix $. For example: $foo\n* You can get default flow config: $flow.sessionId, $flow.chatId, $flow.chatflowId, $flow.input, $flow.state\n* You can get global variables: $vars.\n* Must return a string value at the end of function\n*/\n\n\ntry {\n\n $flow.state.answers = \"\\n\" + $flow.state.answers + \"\\n\" + $var1 ;\n \n \n return $var1;\n} catch (error) {\n console.error(error);\n return '';\n}", + "customFunctionUpdateState": "" + }, + "outputAnchors": [ + { + "id": "customFunctionAgentflow_4-output-customFunctionAgentflow", + "label": "Custom Function", + "name": "customFunctionAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 85, + "height": 66, + "selected": false, + "positionAbsolute": { + "x": 1176.204833144868, + "y": 175.43899879084063 + }, + "dragging": false + }, + { + "id": "customFunctionAgentflow_5", + "position": { + "x": 1171.426409963465, + "y": 307.6178570593204 + }, + "data": { + "id": "customFunctionAgentflow_5", + "label": "f6", + "version": 1, + "name": "customFunctionAgentflow", + "type": "CustomFunction", + "color": "#E4B7FF", + "baseClasses": [ + "CustomFunction" + ], + "category": "Agent Flows", + "description": "Execute custom function", + "inputParams": [ + { + "label": "Input Variables", + "name": "customFunctionInputVariables", + "description": "Input variables can be used in the function with prefix $. For example: $foo", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Variable Name", + "name": "variableName", + "type": "string" + }, + { + "label": "Variable Value", + "name": "variableValue", + "type": "string", + "acceptVariable": true + } + ], + "id": "customFunctionAgentflow_5-input-customFunctionInputVariables-array", + "display": true + }, + { + "label": "Javascript Function", + "name": "customFunctionJavascriptFunction", + "type": "code", + "codeExample": "/*\n* You can use any libraries imported in Flowise\n* You can use properties specified in Input Variables with the prefix $. For example: $foo\n* You can get default flow config: $flow.sessionId, $flow.chatId, $flow.chatflowId, $flow.input, $flow.state\n* You can get global variables: $vars.\n* Must return a string value at the end of function\n*/\n\nconst fetch = require('node-fetch');\nconst url = 'https://api.open-meteo.com/v1/forecast?latitude=52.52&longitude=13.41¤t_weather=true';\nconst options = {\n method: 'GET',\n headers: {\n 'Content-Type': 'application/json'\n }\n};\ntry {\n const response = await fetch(url, options);\n const text = await response.text();\n return text;\n} catch (error) {\n console.error(error);\n return '';\n}", + "description": "The function to execute. Must return a string or an object that can be converted to a string.", + "id": "customFunctionAgentflow_5-input-customFunctionJavascriptFunction-code", + "display": true + }, + { + "label": "Update Flow State", + "name": "customFunctionUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "customFunctionAgentflow_5-input-customFunctionUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "customFunctionInputVariables": [ + { + "variableName": "var1", + "variableValue": "

{{ agentAgentflow_6 }}

" + } + ], + "customFunctionJavascriptFunction": "/*\n* You can use any libraries imported in Flowise\n* You can use properties specified in Input Variables with the prefix $. For example: $foo\n* You can get default flow config: $flow.sessionId, $flow.chatId, $flow.chatflowId, $flow.input, $flow.state\n* You can get global variables: $vars.\n* Must return a string value at the end of function\n*/\n\ntry {\n $flow.state.answers = \"\\n\" + $flow.state.answers + \"\\n\" + $var1 ;\n return $var1;\n} catch (error) {\n console.error(error);\n return '';\n}", + "customFunctionUpdateState": "" + }, + "outputAnchors": [ + { + "id": "customFunctionAgentflow_5-output-customFunctionAgentflow", + "label": "Custom Function", + "name": "customFunctionAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 86, + "height": 66, + "selected": false, + "positionAbsolute": { + "x": 1171.426409963465, + "y": 307.6178570593204 + }, + "dragging": false + }, + { + "id": "customFunctionAgentflow_6", + "position": { + "x": 1175.0905427754058, + "y": 424.92570682056066 + }, + "data": { + "id": "customFunctionAgentflow_6", + "label": "f7", + "version": 1, + "name": "customFunctionAgentflow", + "type": "CustomFunction", + "color": "#E4B7FF", + "baseClasses": [ + "CustomFunction" + ], + "category": "Agent Flows", + "description": "Execute custom function", + "inputParams": [ + { + "label": "Input Variables", + "name": "customFunctionInputVariables", + "description": "Input variables can be used in the function with prefix $. For example: $foo", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Variable Name", + "name": "variableName", + "type": "string" + }, + { + "label": "Variable Value", + "name": "variableValue", + "type": "string", + "acceptVariable": true + } + ], + "id": "customFunctionAgentflow_6-input-customFunctionInputVariables-array", + "display": true + }, + { + "label": "Javascript Function", + "name": "customFunctionJavascriptFunction", + "type": "code", + "codeExample": "/*\n* You can use any libraries imported in Flowise\n* You can use properties specified in Input Variables with the prefix $. For example: $foo\n* You can get default flow config: $flow.sessionId, $flow.chatId, $flow.chatflowId, $flow.input, $flow.state\n* You can get global variables: $vars.\n* Must return a string value at the end of function\n*/\n\nconst fetch = require('node-fetch');\nconst url = 'https://api.open-meteo.com/v1/forecast?latitude=52.52&longitude=13.41¤t_weather=true';\nconst options = {\n method: 'GET',\n headers: {\n 'Content-Type': 'application/json'\n }\n};\ntry {\n const response = await fetch(url, options);\n const text = await response.text();\n return text;\n} catch (error) {\n console.error(error);\n return '';\n}", + "description": "The function to execute. Must return a string or an object that can be converted to a string.", + "id": "customFunctionAgentflow_6-input-customFunctionJavascriptFunction-code", + "display": true + }, + { + "label": "Update Flow State", + "name": "customFunctionUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "customFunctionAgentflow_6-input-customFunctionUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "customFunctionInputVariables": [ + { + "variableName": "var1", + "variableValue": "

{{ agentAgentflow_7 }}

" + } + ], + "customFunctionJavascriptFunction": "/*\n* You can use any libraries imported in Flowise\n* You can use properties specified in Input Variables with the prefix $. For example: $foo\n* You can get default flow config: $flow.sessionId, $flow.chatId, $flow.chatflowId, $flow.input, $flow.state\n* You can get global variables: $vars.\n* Must return a string value at the end of function\n*/\n\n\ntry {\n\n $flow.state.answers = \"\\n\" + $flow.state.answers + \"\\n\" + $var1 ;\n \n \n return $var1;\n} catch (error) {\n console.error(error);\n return '';\n}", + "customFunctionUpdateState": "" + }, + "outputAnchors": [ + { + "id": "customFunctionAgentflow_6-output-customFunctionAgentflow", + "label": "Custom Function", + "name": "customFunctionAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 85, + "height": 66, + "selected": false, + "positionAbsolute": { + "x": 1175.0905427754058, + "y": 424.92570682056066 + }, + "dragging": false + }, + { + "id": "customFunctionAgentflow_7", + "position": { + "x": 1175.4181579896494, + "y": 543.070235375213 + }, + "data": { + "id": "customFunctionAgentflow_7", + "label": "f8", + "version": 1, + "name": "customFunctionAgentflow", + "type": "CustomFunction", + "color": "#E4B7FF", + "baseClasses": [ + "CustomFunction" + ], + "category": "Agent Flows", + "description": "Execute custom function", + "inputParams": [ + { + "label": "Input Variables", + "name": "customFunctionInputVariables", + "description": "Input variables can be used in the function with prefix $. For example: $foo", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Variable Name", + "name": "variableName", + "type": "string" + }, + { + "label": "Variable Value", + "name": "variableValue", + "type": "string", + "acceptVariable": true + } + ], + "id": "customFunctionAgentflow_7-input-customFunctionInputVariables-array", + "display": true + }, + { + "label": "Javascript Function", + "name": "customFunctionJavascriptFunction", + "type": "code", + "codeExample": "/*\n* You can use any libraries imported in Flowise\n* You can use properties specified in Input Variables with the prefix $. For example: $foo\n* You can get default flow config: $flow.sessionId, $flow.chatId, $flow.chatflowId, $flow.input, $flow.state\n* You can get global variables: $vars.\n* Must return a string value at the end of function\n*/\n\nconst fetch = require('node-fetch');\nconst url = 'https://api.open-meteo.com/v1/forecast?latitude=52.52&longitude=13.41¤t_weather=true';\nconst options = {\n method: 'GET',\n headers: {\n 'Content-Type': 'application/json'\n }\n};\ntry {\n const response = await fetch(url, options);\n const text = await response.text();\n return text;\n} catch (error) {\n console.error(error);\n return '';\n}", + "description": "The function to execute. Must return a string or an object that can be converted to a string.", + "id": "customFunctionAgentflow_7-input-customFunctionJavascriptFunction-code", + "display": true + }, + { + "label": "Update Flow State", + "name": "customFunctionUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "customFunctionAgentflow_7-input-customFunctionUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "customFunctionInputVariables": [ + { + "variableName": "var1", + "variableValue": "

{{ agentAgentflow_8 }}

" + } + ], + "customFunctionJavascriptFunction": "/*\n* You can use any libraries imported in Flowise\n* You can use properties specified in Input Variables with the prefix $. For example: $foo\n* You can get default flow config: $flow.sessionId, $flow.chatId, $flow.chatflowId, $flow.input, $flow.state\n* You can get global variables: $vars.\n* Must return a string value at the end of function\n*/\n\n\ntry {\n\n $flow.state.answers = \"\\n\" + $flow.state.answers + \"\\n\" + $var1 ;\n \n \n return $var1;\n} catch (error) {\n console.error(error);\n return '';\n}", + "customFunctionUpdateState": "" + }, + "outputAnchors": [ + { + "id": "customFunctionAgentflow_7-output-customFunctionAgentflow", + "label": "Custom Function", + "name": "customFunctionAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 86, + "height": 66, + "selected": false, + "positionAbsolute": { + "x": 1175.4181579896494, + "y": 543.070235375213 + }, + "dragging": false + }, + { + "id": "customFunctionAgentflow_9", + "position": { + "x": 1176.2364682800883, + "y": 667.0335327583529 + }, + "data": { + "id": "customFunctionAgentflow_9", + "label": "f9", + "version": 1, + "name": "customFunctionAgentflow", + "type": "CustomFunction", + "color": "#E4B7FF", + "baseClasses": [ + "CustomFunction" + ], + "category": "Agent Flows", + "description": "Execute custom function", + "inputParams": [ + { + "label": "Input Variables", + "name": "customFunctionInputVariables", + "description": "Input variables can be used in the function with prefix $. For example: $foo", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Variable Name", + "name": "variableName", + "type": "string" + }, + { + "label": "Variable Value", + "name": "variableValue", + "type": "string", + "acceptVariable": true + } + ], + "id": "customFunctionAgentflow_9-input-customFunctionInputVariables-array", + "display": true + }, + { + "label": "Javascript Function", + "name": "customFunctionJavascriptFunction", + "type": "code", + "codeExample": "/*\n* You can use any libraries imported in Flowise\n* You can use properties specified in Input Variables with the prefix $. For example: $foo\n* You can get default flow config: $flow.sessionId, $flow.chatId, $flow.chatflowId, $flow.input, $flow.state\n* You can get global variables: $vars.\n* Must return a string value at the end of function\n*/\n\nconst fetch = require('node-fetch');\nconst url = 'https://api.open-meteo.com/v1/forecast?latitude=52.52&longitude=13.41¤t_weather=true';\nconst options = {\n method: 'GET',\n headers: {\n 'Content-Type': 'application/json'\n }\n};\ntry {\n const response = await fetch(url, options);\n const text = await response.text();\n return text;\n} catch (error) {\n console.error(error);\n return '';\n}", + "description": "The function to execute. Must return a string or an object that can be converted to a string.", + "id": "customFunctionAgentflow_9-input-customFunctionJavascriptFunction-code", + "display": true + }, + { + "label": "Update Flow State", + "name": "customFunctionUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "customFunctionAgentflow_9-input-customFunctionUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "customFunctionInputVariables": [ + { + "variableName": "var1", + "variableValue": "

{{ agentAgentflow_9 }}

" + } + ], + "customFunctionJavascriptFunction": "/*\n* You can use any libraries imported in Flowise\n* You can use properties specified in Input Variables with the prefix $. For example: $foo\n* You can get default flow config: $flow.sessionId, $flow.chatId, $flow.chatflowId, $flow.input, $flow.state\n* You can get global variables: $vars.\n* Must return a string value at the end of function\n*/\n\n\ntry {\n\n $flow.state.answers = \"\\n\" + $flow.state.answers + \"\\n\" + $var1 ;\n \n \n return $var1;\n} catch (error) {\n console.error(error);\n return '';\n}", + "customFunctionUpdateState": "" + }, + "outputAnchors": [ + { + "id": "customFunctionAgentflow_9-output-customFunctionAgentflow", + "label": "Custom Function", + "name": "customFunctionAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 86, + "height": 66, + "selected": false, + "positionAbsolute": { + "x": 1176.2364682800883, + "y": 667.0335327583529 + }, + "dragging": false + } + ], + "edges": [ + { + "source": "startAgentflow_0", + "sourceHandle": "startAgentflow_0-output-startAgentflow", + "target": "llmAgentflow_0", + "targetHandle": "llmAgentflow_0", + "data": { + "sourceColor": "#7EE787", + "targetColor": "#64B5F6", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "startAgentflow_0-startAgentflow_0-output-startAgentflow-llmAgentflow_0-llmAgentflow_0" + }, + { + "source": "llmAgentflow_0", + "sourceHandle": "llmAgentflow_0-output-llmAgentflow", + "target": "conditionAgentflow_0", + "targetHandle": "conditionAgentflow_0", + "data": { + "sourceColor": "#64B5F6", + "targetColor": "#FFB938", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "llmAgentflow_0-llmAgentflow_0-output-llmAgentflow-conditionAgentflow_0-conditionAgentflow_0" + }, + { + "source": "conditionAgentflow_0", + "sourceHandle": "conditionAgentflow_0-output-7", + "target": "agentAgentflow_8", + "targetHandle": "agentAgentflow_8", + "data": { + "sourceColor": "#FFB938", + "targetColor": "#4DD0E1", + "edgeLabel": "7", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "conditionAgentflow_0-conditionAgentflow_0-output-7-agentAgentflow_8-agentAgentflow_8" + }, + { + "source": "conditionAgentflow_0", + "sourceHandle": "conditionAgentflow_0-output-9", + "target": "agentAgentflow_9", + "targetHandle": "agentAgentflow_9", + "data": { + "sourceColor": "#FFB938", + "targetColor": "#4DD0E1", + "edgeLabel": "9", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "conditionAgentflow_0-conditionAgentflow_0-output-9-agentAgentflow_9-agentAgentflow_9" + }, + { + "source": "conditionAgentflow_0", + "sourceHandle": "conditionAgentflow_0-output-0", + "target": "agentAgentflow_0", + "targetHandle": "agentAgentflow_0", + "data": { + "sourceColor": "#FFB938", + "targetColor": "#4DD0E1", + "edgeLabel": "0", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "conditionAgentflow_0-conditionAgentflow_0-output-0-agentAgentflow_0-agentAgentflow_0" + }, + { + "source": "conditionAgentflow_0", + "sourceHandle": "conditionAgentflow_0-output-1", + "target": "agentAgentflow_1", + "targetHandle": "agentAgentflow_1", + "data": { + "sourceColor": "#FFB938", + "targetColor": "#4DD0E1", + "edgeLabel": "1", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "conditionAgentflow_0-conditionAgentflow_0-output-1-agentAgentflow_1-agentAgentflow_1" + }, + { + "source": "conditionAgentflow_0", + "sourceHandle": "conditionAgentflow_0-output-2", + "target": "agentAgentflow_2", + "targetHandle": "agentAgentflow_2", + "data": { + "sourceColor": "#FFB938", + "targetColor": "#4DD0E1", + "edgeLabel": "2", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "conditionAgentflow_0-conditionAgentflow_0-output-2-agentAgentflow_2-agentAgentflow_2" + }, + { + "source": "conditionAgentflow_0", + "sourceHandle": "conditionAgentflow_0-output-3", + "target": "agentAgentflow_4", + "targetHandle": "agentAgentflow_4", + "data": { + "sourceColor": "#FFB938", + "targetColor": "#4DD0E1", + "edgeLabel": "3", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "conditionAgentflow_0-conditionAgentflow_0-output-3-agentAgentflow_4-agentAgentflow_4", + "selected": false + }, + { + "source": "conditionAgentflow_0", + "sourceHandle": "conditionAgentflow_0-output-4", + "target": "agentAgentflow_5", + "targetHandle": "agentAgentflow_5", + "data": { + "sourceColor": "#FFB938", + "targetColor": "#4DD0E1", + "edgeLabel": "4", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "conditionAgentflow_0-conditionAgentflow_0-output-4-agentAgentflow_5-agentAgentflow_5" + }, + { + "source": "conditionAgentflow_0", + "sourceHandle": "conditionAgentflow_0-output-5", + "target": "agentAgentflow_6", + "targetHandle": "agentAgentflow_6", + "data": { + "sourceColor": "#FFB938", + "targetColor": "#4DD0E1", + "edgeLabel": "5", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "conditionAgentflow_0-conditionAgentflow_0-output-5-agentAgentflow_6-agentAgentflow_6" + }, + { + "source": "conditionAgentflow_0", + "sourceHandle": "conditionAgentflow_0-output-6", + "target": "agentAgentflow_7", + "targetHandle": "agentAgentflow_7", + "data": { + "sourceColor": "#FFB938", + "targetColor": "#4DD0E1", + "edgeLabel": "6", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "conditionAgentflow_0-conditionAgentflow_0-output-6-agentAgentflow_7-agentAgentflow_7" + }, + { + "source": "conditionAgentflow_0", + "sourceHandle": "conditionAgentflow_0-output-10", + "target": "llmAgentflow_1", + "targetHandle": "llmAgentflow_1", + "data": { + "sourceColor": "#FFB938", + "targetColor": "#64B5F6", + "edgeLabel": "10", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "conditionAgentflow_0-conditionAgentflow_0-output-10-llmAgentflow_1-llmAgentflow_1" + }, + { + "source": "conditionAgentflow_0", + "sourceHandle": "conditionAgentflow_0-output-11", + "target": "agentAgentflow_3", + "targetHandle": "agentAgentflow_3", + "data": { + "sourceColor": "#FFB938", + "targetColor": "#4DD0E1", + "edgeLabel": "11", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "conditionAgentflow_0-conditionAgentflow_0-output-11-agentAgentflow_3-agentAgentflow_3" + }, + { + "source": "agentAgentflow_0", + "sourceHandle": "agentAgentflow_0-output-agentAgentflow", + "target": "customFunctionAgentflow_0", + "targetHandle": "customFunctionAgentflow_0", + "data": { + "sourceColor": "#4DD0E1", + "targetColor": "#E4B7FF", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "agentAgentflow_0-agentAgentflow_0-output-agentAgentflow-customFunctionAgentflow_0-customFunctionAgentflow_0" + }, + { + "source": "customFunctionAgentflow_0", + "sourceHandle": "customFunctionAgentflow_0-output-customFunctionAgentflow", + "target": "loopAgentflow_8", + "targetHandle": "loopAgentflow_8", + "data": { + "sourceColor": "#E4B7FF", + "targetColor": "#FFA07A", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "customFunctionAgentflow_0-customFunctionAgentflow_0-output-customFunctionAgentflow-loopAgentflow_8-loopAgentflow_8" + }, + { + "source": "agentAgentflow_1", + "sourceHandle": "agentAgentflow_1-output-agentAgentflow", + "target": "customFunctionAgentflow_1", + "targetHandle": "customFunctionAgentflow_1", + "data": { + "sourceColor": "#4DD0E1", + "targetColor": "#E4B7FF", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "agentAgentflow_1-agentAgentflow_1-output-agentAgentflow-customFunctionAgentflow_1-customFunctionAgentflow_1" + }, + { + "source": "agentAgentflow_2", + "sourceHandle": "agentAgentflow_2-output-agentAgentflow", + "target": "customFunctionAgentflow_2", + "targetHandle": "customFunctionAgentflow_2", + "data": { + "sourceColor": "#4DD0E1", + "targetColor": "#E4B7FF", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "agentAgentflow_2-agentAgentflow_2-output-agentAgentflow-customFunctionAgentflow_2-customFunctionAgentflow_2" + }, + { + "source": "agentAgentflow_4", + "sourceHandle": "agentAgentflow_4-output-agentAgentflow", + "target": "customFunctionAgentflow_3", + "targetHandle": "customFunctionAgentflow_3", + "data": { + "sourceColor": "#4DD0E1", + "targetColor": "#E4B7FF", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "agentAgentflow_4-agentAgentflow_4-output-agentAgentflow-customFunctionAgentflow_3-customFunctionAgentflow_3" + }, + { + "source": "agentAgentflow_5", + "sourceHandle": "agentAgentflow_5-output-agentAgentflow", + "target": "customFunctionAgentflow_4", + "targetHandle": "customFunctionAgentflow_4", + "data": { + "sourceColor": "#4DD0E1", + "targetColor": "#E4B7FF", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "agentAgentflow_5-agentAgentflow_5-output-agentAgentflow-customFunctionAgentflow_4-customFunctionAgentflow_4" + }, + { + "source": "agentAgentflow_6", + "sourceHandle": "agentAgentflow_6-output-agentAgentflow", + "target": "customFunctionAgentflow_5", + "targetHandle": "customFunctionAgentflow_5", + "data": { + "sourceColor": "#4DD0E1", + "targetColor": "#E4B7FF", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "agentAgentflow_6-agentAgentflow_6-output-agentAgentflow-customFunctionAgentflow_5-customFunctionAgentflow_5" + }, + { + "source": "agentAgentflow_7", + "sourceHandle": "agentAgentflow_7-output-agentAgentflow", + "target": "customFunctionAgentflow_6", + "targetHandle": "customFunctionAgentflow_6", + "data": { + "sourceColor": "#4DD0E1", + "targetColor": "#E4B7FF", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "agentAgentflow_7-agentAgentflow_7-output-agentAgentflow-customFunctionAgentflow_6-customFunctionAgentflow_6" + }, + { + "source": "agentAgentflow_8", + "sourceHandle": "agentAgentflow_8-output-agentAgentflow", + "target": "customFunctionAgentflow_7", + "targetHandle": "customFunctionAgentflow_7", + "data": { + "sourceColor": "#4DD0E1", + "targetColor": "#E4B7FF", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "agentAgentflow_8-agentAgentflow_8-output-agentAgentflow-customFunctionAgentflow_7-customFunctionAgentflow_7" + }, + { + "source": "agentAgentflow_9", + "sourceHandle": "agentAgentflow_9-output-agentAgentflow", + "target": "customFunctionAgentflow_9", + "targetHandle": "customFunctionAgentflow_9", + "data": { + "sourceColor": "#4DD0E1", + "targetColor": "#E4B7FF", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "agentAgentflow_9-agentAgentflow_9-output-agentAgentflow-customFunctionAgentflow_9-customFunctionAgentflow_9" + }, + { + "source": "customFunctionAgentflow_3", + "sourceHandle": "customFunctionAgentflow_3-output-customFunctionAgentflow", + "target": "loopAgentflow_8", + "targetHandle": "loopAgentflow_8", + "data": { + "sourceColor": "#E4B7FF", + "targetColor": "#FFA07A", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "customFunctionAgentflow_3-customFunctionAgentflow_3-output-customFunctionAgentflow-loopAgentflow_8-loopAgentflow_8" + }, + { + "source": "customFunctionAgentflow_4", + "sourceHandle": "customFunctionAgentflow_4-output-customFunctionAgentflow", + "target": "loopAgentflow_8", + "targetHandle": "loopAgentflow_8", + "data": { + "sourceColor": "#E4B7FF", + "targetColor": "#FFA07A", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "customFunctionAgentflow_4-customFunctionAgentflow_4-output-customFunctionAgentflow-loopAgentflow_8-loopAgentflow_8" + }, + { + "source": "customFunctionAgentflow_5", + "sourceHandle": "customFunctionAgentflow_5-output-customFunctionAgentflow", + "target": "loopAgentflow_8", + "targetHandle": "loopAgentflow_8", + "data": { + "sourceColor": "#E4B7FF", + "targetColor": "#FFA07A", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "customFunctionAgentflow_5-customFunctionAgentflow_5-output-customFunctionAgentflow-loopAgentflow_8-loopAgentflow_8" + }, + { + "source": "customFunctionAgentflow_6", + "sourceHandle": "customFunctionAgentflow_6-output-customFunctionAgentflow", + "target": "loopAgentflow_8", + "targetHandle": "loopAgentflow_8", + "data": { + "sourceColor": "#E4B7FF", + "targetColor": "#FFA07A", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "customFunctionAgentflow_6-customFunctionAgentflow_6-output-customFunctionAgentflow-loopAgentflow_8-loopAgentflow_8" + }, + { + "source": "customFunctionAgentflow_7", + "sourceHandle": "customFunctionAgentflow_7-output-customFunctionAgentflow", + "target": "loopAgentflow_8", + "targetHandle": "loopAgentflow_8", + "data": { + "sourceColor": "#E4B7FF", + "targetColor": "#FFA07A", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "customFunctionAgentflow_7-customFunctionAgentflow_7-output-customFunctionAgentflow-loopAgentflow_8-loopAgentflow_8" + }, + { + "source": "customFunctionAgentflow_9", + "sourceHandle": "customFunctionAgentflow_9-output-customFunctionAgentflow", + "target": "loopAgentflow_8", + "targetHandle": "loopAgentflow_8", + "data": { + "sourceColor": "#E4B7FF", + "targetColor": "#FFA07A", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "customFunctionAgentflow_9-customFunctionAgentflow_9-output-customFunctionAgentflow-loopAgentflow_8-loopAgentflow_8", + "selected": false + }, + { + "source": "customFunctionAgentflow_2", + "sourceHandle": "customFunctionAgentflow_2-output-customFunctionAgentflow", + "target": "loopAgentflow_8", + "targetHandle": "loopAgentflow_8", + "data": { + "sourceColor": "#E4B7FF", + "targetColor": "#FFA07A", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "customFunctionAgentflow_2-customFunctionAgentflow_2-output-customFunctionAgentflow-loopAgentflow_8-loopAgentflow_8" + }, + { + "source": "customFunctionAgentflow_1", + "sourceHandle": "customFunctionAgentflow_1-output-customFunctionAgentflow", + "target": "loopAgentflow_8", + "targetHandle": "loopAgentflow_8", + "data": { + "sourceColor": "#E4B7FF", + "targetColor": "#FFA07A", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "customFunctionAgentflow_1-customFunctionAgentflow_1-output-customFunctionAgentflow-loopAgentflow_8-loopAgentflow_8" + }, + { + "source": "conditionAgentflow_0", + "sourceHandle": "conditionAgentflow_0-output-10", + "target": "agentAgentflow_3", + "targetHandle": "agentAgentflow_3", + "data": { + "sourceColor": "#FFB938", + "targetColor": "#4DD0E1", + "edgeLabel": "10", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "conditionAgentflow_0-conditionAgentflow_0-output-10-agentAgentflow_3-agentAgentflow_3" + } + ] +} \ No newline at end of file diff --git a/examples/app-flowise/flowise_teradata_agents/Teradata_VectorStore_RAG_Agent_V2.json b/examples/app-flowise/flowise_teradata_agents/Teradata_VectorStore_RAG_Agent_V2.json new file mode 100644 index 0000000..adbb5a3 --- /dev/null +++ b/examples/app-flowise/flowise_teradata_agents/Teradata_VectorStore_RAG_Agent_V2.json @@ -0,0 +1,708 @@ +{ + "nodes": [ + { + "id": "startAgentflow_0", + "type": "agentFlow", + "position": { + "x": -978.9779419438005, + "y": -323.75643697211603 + }, + "data": { + "id": "startAgentflow_0", + "label": "Start", + "version": 1.1, + "name": "startAgentflow", + "type": "Start", + "color": "#7EE787", + "hideInput": true, + "baseClasses": [ + "Start" + ], + "category": "Agent Flows", + "description": "Starting point of the agentflow", + "inputParams": [ + { + "label": "Input Type", + "name": "startInputType", + "type": "options", + "options": [ + { + "label": "Chat Input", + "name": "chatInput", + "description": "Start the conversation with chat input" + }, + { + "label": "Form Input", + "name": "formInput", + "description": "Start the workflow with form inputs" + } + ], + "default": "chatInput", + "id": "startAgentflow_0-input-startInputType-options", + "display": true + }, + { + "label": "Form Title", + "name": "formTitle", + "type": "string", + "placeholder": "Please Fill Out The Form", + "show": { + "startInputType": "formInput" + }, + "id": "startAgentflow_0-input-formTitle-string", + "display": false + }, + { + "label": "Form Description", + "name": "formDescription", + "type": "string", + "placeholder": "Complete all fields below to continue", + "show": { + "startInputType": "formInput" + }, + "id": "startAgentflow_0-input-formDescription-string", + "display": false + }, + { + "label": "Form Input Types", + "name": "formInputTypes", + "description": "Specify the type of form input", + "type": "array", + "show": { + "startInputType": "formInput" + }, + "array": [ + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + }, + { + "label": "Options", + "name": "options" + } + ], + "default": "string" + }, + { + "label": "Label", + "name": "label", + "type": "string", + "placeholder": "Label for the input" + }, + { + "label": "Variable Name", + "name": "name", + "type": "string", + "placeholder": "Variable name for the input (must be camel case)", + "description": "Variable name must be camel case. For example: firstName, lastName, etc." + }, + { + "label": "Add Options", + "name": "addOptions", + "type": "array", + "show": { + "formInputTypes[$index].type": "options" + }, + "array": [ + { + "label": "Option", + "name": "option", + "type": "string" + } + ] + } + ], + "id": "startAgentflow_0-input-formInputTypes-array", + "display": false + }, + { + "label": "Ephemeral Memory", + "name": "startEphemeralMemory", + "type": "boolean", + "description": "Start fresh for every execution without past chat history", + "optional": true, + "id": "startAgentflow_0-input-startEphemeralMemory-boolean", + "display": true + }, + { + "label": "Flow State", + "name": "startState", + "description": "Runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "string", + "placeholder": "Foo" + }, + { + "label": "Value", + "name": "value", + "type": "string", + "placeholder": "Bar", + "optional": true + } + ], + "id": "startAgentflow_0-input-startState-array", + "display": true + }, + { + "label": "Persist State", + "name": "startPersistState", + "type": "boolean", + "description": "Persist the state in the same session", + "optional": true, + "id": "startAgentflow_0-input-startPersistState-boolean", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "startInputType": "chatInput", + "startEphemeralMemory": false, + "startState": [], + "startPersistState": "" + }, + "outputAnchors": [ + { + "id": "startAgentflow_0-output-startAgentflow", + "label": "Start", + "name": "startAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "width": 104, + "height": 66, + "positionAbsolute": { + "x": -978.9779419438005, + "y": -323.75643697211603 + }, + "selected": false, + "dragging": false + }, + { + "id": "agentAgentflow_0", + "position": { + "x": -817.1987825144278, + "y": -429.58453567229395 + }, + "data": { + "id": "agentAgentflow_0", + "label": "Teradata Enterprise Vector Store RAG", + "version": 2.2, + "name": "agentAgentflow", + "type": "Agent", + "color": "#4DD0E1", + "baseClasses": [ + "Agent" + ], + "category": "Agent Flows", + "description": "Dynamically choose and utilize tools during runtime, enabling multi-step reasoning", + "inputParams": [ + { + "label": "Model", + "name": "agentModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "agentAgentflow_0-input-agentModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "agentMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "agentAgentflow_0-input-agentMessages-array", + "display": true + }, + { + "label": "OpenAI Built-in Tools", + "name": "agentToolsBuiltInOpenAI", + "type": "multiOptions", + "optional": true, + "options": [ + { + "label": "Web Search", + "name": "web_search_preview", + "description": "Search the web for the latest information" + }, + { + "label": "Code Interpreter", + "name": "code_interpreter", + "description": "Write and run Python code in a sandboxed environment" + }, + { + "label": "Image Generation", + "name": "image_generation", + "description": "Generate images based on a text prompt" + } + ], + "show": { + "agentModel": "chatOpenAI" + }, + "id": "agentAgentflow_0-input-agentToolsBuiltInOpenAI-multiOptions", + "display": false + }, + { + "label": "Gemini Built-in Tools", + "name": "agentToolsBuiltInGemini", + "type": "multiOptions", + "optional": true, + "options": [ + { + "label": "URL Context", + "name": "urlContext", + "description": "Extract content from given URLs" + }, + { + "label": "Google Search", + "name": "googleSearch", + "description": "Search real-time web content" + } + ], + "show": { + "agentModel": "chatGoogleGenerativeAI" + }, + "id": "agentAgentflow_0-input-agentToolsBuiltInGemini-multiOptions", + "display": false + }, + { + "label": "Anthropic Built-in Tools", + "name": "agentToolsBuiltInAnthropic", + "type": "multiOptions", + "optional": true, + "options": [ + { + "label": "Web Search", + "name": "web_search_20250305", + "description": "Search the web for the latest information" + }, + { + "label": "Web Fetch", + "name": "web_fetch_20250910", + "description": "Retrieve full content from specified web pages" + } + ], + "show": { + "agentModel": "chatAnthropic" + }, + "id": "agentAgentflow_0-input-agentToolsBuiltInAnthropic-multiOptions", + "display": false + }, + { + "label": "Tools", + "name": "agentTools", + "type": "array", + "optional": true, + "array": [ + { + "label": "Tool", + "name": "agentSelectedTool", + "type": "asyncOptions", + "loadMethod": "listTools", + "loadConfig": true + }, + { + "label": "Require Human Input", + "name": "agentSelectedToolRequiresHumanInput", + "type": "boolean", + "optional": true + } + ], + "id": "agentAgentflow_0-input-agentTools-array", + "display": true + }, + { + "label": "Knowledge (Document Stores)", + "name": "agentKnowledgeDocumentStores", + "type": "array", + "description": "Give your agent context about different document sources. Document stores must be upserted in advance.", + "array": [ + { + "label": "Document Store", + "name": "documentStore", + "type": "asyncOptions", + "loadMethod": "listStores" + }, + { + "label": "Describe Knowledge", + "name": "docStoreDescription", + "type": "string", + "generateDocStoreDescription": true, + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_0-input-agentKnowledgeDocumentStores-array", + "display": true + }, + { + "label": "Knowledge (Vector Embeddings)", + "name": "agentKnowledgeVSEmbeddings", + "type": "array", + "description": "Give your agent context about different document sources from existing vector stores and embeddings", + "array": [ + { + "label": "Vector Store", + "name": "vectorStore", + "type": "asyncOptions", + "loadMethod": "listVectorStores", + "loadConfig": true + }, + { + "label": "Embedding Model", + "name": "embeddingModel", + "type": "asyncOptions", + "loadMethod": "listEmbeddings", + "loadConfig": true + }, + { + "label": "Knowledge Name", + "name": "knowledgeName", + "type": "string", + "placeholder": "A short name for the knowledge base, this is useful for the AI to know when and how to search for correct information" + }, + { + "label": "Describe Knowledge", + "name": "knowledgeDescription", + "type": "string", + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_0-input-agentKnowledgeVSEmbeddings-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "agentEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "agentAgentflow_0-input-agentEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "agentMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_0-input-agentMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "agentMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "agentMemoryType": "windowSize" + }, + "id": "agentAgentflow_0-input-agentMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "agentMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "agentMemoryType": "conversationSummaryBuffer" + }, + "id": "agentAgentflow_0-input-agentMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "agentUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_0-input-agentUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "agentReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "agentAgentflow_0-input-agentReturnResponseAs-options", + "display": true + }, + { + "label": "Update Flow State", + "name": "agentUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "agentAgentflow_0-input-agentUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "agentModel": "azureChatOpenAI", + "agentMessages": [ + { + "role": "system", + "content": "
\nYou are a **Assistant Using Teradata Vector Store ** — an expert system designed to help users query enterprise-grade vector stores.\n\n\nYou will guide the user through **three structured phases** to identify the correct vector store, formulate the appropriate question, and return an insightful response.\n\n## Your role will work through the phases\n   Perform the phases in order, and do not skip any phase.\n\n## Start With Greeting - \"Hello! and run Phase 0 and show list of vector store\" \n\n## Phase 0: List Vector store for user\n    - list vector using tool - teradata_vectorstore_ask\n    - show vector list to user where user have only access\n   \n    - Format your output as: `Phase 1 - Available Vector store list with access`\n\n    ```markdown\n    ***Phase 0 - Vector store list *** \n    ***VS List:*** `Vector store list `\n\n\n## Phase 1: Get Question Context\n    - If the user provides a question or context, use it directly.\n    - If the user does not provide a question, ask them to clarify what they want to know.\n    - If the user provides a question, confirm it with them before proceeding.\n    - Format your output as: `Phase 0 - Question Context`\n\n    ```markdown\n    ***Phase 1 - Question Context*** \n    ***Question:*** `question_text`\n\n\n## Phase 2: Query the Vector Store\n\n    - Use the teradata_vectorstore_ask tool and use argument in below format  \n    - Prompt   Do not assume information. Only provide information that is present in the data.\n               Format results in table format with :\n          \n      \n      \"args\": {\n            \"vector_store_name\": {{$flow.state.vector_store_name}},\n            \"ask\": {\n                \"question\": \"asked_question_string\",\n                \"prompt\": \"above_prompt\"\n            }\n        }\n    \n    - Provide the selected vector store name and the user's question/prompt.\n    - Return the vector store’s answer in a clear, readable format.\n    - If needed, summarize the answer or format it as a table.\n\n    ```markdown\n\n    ***Phase 3 - Vector Store Response***\n\n    ***Vector Store Name:*** `vs_name`\n\n    ***Question:*** `question_text`\n\n    ***Prompt:*** `prompt_text`\n\n    ***Response:*** `response_text`\n\n## Communication guidelines:\n        - Indicate the current phase clearly.\n        - Summarize outcomes before proceeding to the next phase.\n        - Keep explanations concise, professional, and easy to follow.\n        - Always return results in Markdown format with proper headings and formatting.\n\n\"\"\"
" + } + ], + "agentToolsBuiltInOpenAI": "", + "agentToolsBuiltInGemini": "", + "agentToolsBuiltInAnthropic": "", + "agentTools": [ + { + "agentSelectedTool": "customMCP", + "agentSelectedToolRequiresHumanInput": true, + "agentSelectedToolConfig": { + "mcpServerConfig": "{ \n\"url\": \"http://teradata-mcp-server:8001/mcp/\"\n}", + "mcpActions": "[\"tdvs_ask\",\"tdvs_list\",\"tdvs_similarity_search\"]", + "agentSelectedTool": "customMCP" + } + } + ], + "agentKnowledgeDocumentStores": "", + "agentKnowledgeVSEmbeddings": "", + "agentEnableMemory": true, + "agentMemoryType": "allMessages", + "agentMemoryWindowSize": "20", + "agentMemoryMaxTokenLimit": "2000", + "agentUserMessage": "

", + "agentReturnResponseAs": "userMessage", + "agentUpdateState": [], + "agentModelConfig": { + "credential": "", + "modelName": "gpt-4.1-mini", + "temperature": 0.9, + "maxTokens": "", + "streaming": true, + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "basepath": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low", + "agentModel": "azureChatOpenAI" + } + }, + "outputAnchors": [ + { + "id": "agentAgentflow_0-output-agentAgentflow", + "label": "Agent", + "name": "agentAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 316, + "height": 101, + "selected": false, + "positionAbsolute": { + "x": -817.1987825144278, + "y": -429.58453567229395 + }, + "dragging": false + }, + { + "id": "stickyNoteAgentflow_0", + "position": { + "x": -1211.0883622291142, + "y": -422.92048632220497 + }, + "data": { + "id": "stickyNoteAgentflow_0", + "label": "Sticky Note", + "version": 1, + "name": "stickyNoteAgentflow", + "type": "StickyNote", + "color": "#fee440", + "baseClasses": [ + "StickyNote" + ], + "category": "Agent Flows", + "description": "Add notes to the agent flow", + "inputParams": [ + { + "label": "", + "name": "note", + "type": "string", + "rows": 1, + "placeholder": "Type something here", + "optional": true, + "id": "stickyNoteAgentflow_0-input-note-string", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "note": "This AgentWorkflow - 1. list down vector store list where you have access\n\n2. allow to choose vector store name to ask your question on selected vector store name" + }, + "outputAnchors": [ + { + "id": "stickyNoteAgentflow_0-output-stickyNoteAgentflow", + "label": "Sticky Note", + "name": "stickyNoteAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "stickyNote", + "width": 204, + "height": 243, + "selected": false, + "dragging": false, + "positionAbsolute": { + "x": -1211.0883622291142, + "y": -422.92048632220497 + } + } + ], + "edges": [ + { + "source": "startAgentflow_0", + "sourceHandle": "startAgentflow_0-output-startAgentflow", + "target": "agentAgentflow_0", + "targetHandle": "agentAgentflow_0", + "data": { + "sourceColor": "#7EE787", + "targetColor": "#4DD0E1", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "startAgentflow_0-startAgentflow_0-output-startAgentflow-agentAgentflow_0-agentAgentflow_0" + } + ] +} \ No newline at end of file diff --git a/examples/app-flowise/flowise_teradata_agents/Teradata_visualized_Agents_V2.json b/examples/app-flowise/flowise_teradata_agents/Teradata_visualized_Agents_V2.json new file mode 100644 index 0000000..0aa5d9e --- /dev/null +++ b/examples/app-flowise/flowise_teradata_agents/Teradata_visualized_Agents_V2.json @@ -0,0 +1,833 @@ +{ + "nodes": [ + { + "id": "startAgentflow_0", + "type": "agentFlow", + "position": { + "x": -763, + "y": -259 + }, + "data": { + "id": "startAgentflow_0", + "label": "Start", + "version": 1.1, + "name": "startAgentflow", + "type": "Start", + "color": "#7EE787", + "hideInput": true, + "baseClasses": [ + "Start" + ], + "category": "Agent Flows", + "description": "Starting point of the agentflow", + "inputParams": [ + { + "label": "Input Type", + "name": "startInputType", + "type": "options", + "options": [ + { + "label": "Chat Input", + "name": "chatInput", + "description": "Start the conversation with chat input" + }, + { + "label": "Form Input", + "name": "formInput", + "description": "Start the workflow with form inputs" + } + ], + "default": "chatInput", + "id": "startAgentflow_0-input-startInputType-options", + "display": true + }, + { + "label": "Form Title", + "name": "formTitle", + "type": "string", + "placeholder": "Please Fill Out The Form", + "show": { + "startInputType": "formInput" + }, + "id": "startAgentflow_0-input-formTitle-string", + "display": false + }, + { + "label": "Form Description", + "name": "formDescription", + "type": "string", + "placeholder": "Complete all fields below to continue", + "show": { + "startInputType": "formInput" + }, + "id": "startAgentflow_0-input-formDescription-string", + "display": false + }, + { + "label": "Form Input Types", + "name": "formInputTypes", + "description": "Specify the type of form input", + "type": "array", + "show": { + "startInputType": "formInput" + }, + "array": [ + { + "label": "Type", + "name": "type", + "type": "options", + "options": [ + { + "label": "String", + "name": "string" + }, + { + "label": "Number", + "name": "number" + }, + { + "label": "Boolean", + "name": "boolean" + }, + { + "label": "Options", + "name": "options" + } + ], + "default": "string" + }, + { + "label": "Label", + "name": "label", + "type": "string", + "placeholder": "Label for the input" + }, + { + "label": "Variable Name", + "name": "name", + "type": "string", + "placeholder": "Variable name for the input (must be camel case)", + "description": "Variable name must be camel case. For example: firstName, lastName, etc." + }, + { + "label": "Add Options", + "name": "addOptions", + "type": "array", + "show": { + "formInputTypes[$index].type": "options" + }, + "array": [ + { + "label": "Option", + "name": "option", + "type": "string" + } + ] + } + ], + "id": "startAgentflow_0-input-formInputTypes-array", + "display": false + }, + { + "label": "Ephemeral Memory", + "name": "startEphemeralMemory", + "type": "boolean", + "description": "Start fresh for every execution without past chat history", + "optional": true, + "id": "startAgentflow_0-input-startEphemeralMemory-boolean", + "display": true + }, + { + "label": "Flow State", + "name": "startState", + "description": "Runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "string", + "placeholder": "Foo" + }, + { + "label": "Value", + "name": "value", + "type": "string", + "placeholder": "Bar", + "optional": true + } + ], + "id": "startAgentflow_0-input-startState-array", + "display": true + }, + { + "label": "Persist State", + "name": "startPersistState", + "type": "boolean", + "description": "Persist the state in the same session", + "optional": true, + "id": "startAgentflow_0-input-startPersistState-boolean", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "startInputType": "chatInput", + "formTitle": "", + "formDescription": "", + "formInputTypes": "", + "startEphemeralMemory": "", + "startState": "", + "startPersistState": "" + }, + "outputAnchors": [ + { + "id": "startAgentflow_0-output-startAgentflow", + "label": "Start", + "name": "startAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "width": 104, + "height": 66, + "selected": false, + "positionAbsolute": { + "x": -763, + "y": -259 + }, + "dragging": false + }, + { + "id": "agentAgentflow_0", + "position": { + "x": -603.4327999101338, + "y": -309.6339518229167 + }, + "data": { + "id": "agentAgentflow_0", + "label": "Teradata visualization Agent", + "version": 2.2, + "name": "agentAgentflow", + "type": "Agent", + "color": "#4DD0E1", + "baseClasses": [ + "Agent" + ], + "category": "Agent Flows", + "description": "Dynamically choose and utilize tools during runtime, enabling multi-step reasoning", + "inputParams": [ + { + "label": "Model", + "name": "agentModel", + "type": "asyncOptions", + "loadMethod": "listModels", + "loadConfig": true, + "id": "agentAgentflow_0-input-agentModel-asyncOptions", + "display": true + }, + { + "label": "Messages", + "name": "agentMessages", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Role", + "name": "role", + "type": "options", + "options": [ + { + "label": "System", + "name": "system" + }, + { + "label": "Assistant", + "name": "assistant" + }, + { + "label": "Developer", + "name": "developer" + }, + { + "label": "User", + "name": "user" + } + ] + }, + { + "label": "Content", + "name": "content", + "type": "string", + "acceptVariable": true, + "generateInstruction": true, + "rows": 4 + } + ], + "id": "agentAgentflow_0-input-agentMessages-array", + "display": true + }, + { + "label": "OpenAI Built-in Tools", + "name": "agentToolsBuiltInOpenAI", + "type": "multiOptions", + "optional": true, + "options": [ + { + "label": "Web Search", + "name": "web_search_preview", + "description": "Search the web for the latest information" + }, + { + "label": "Code Interpreter", + "name": "code_interpreter", + "description": "Write and run Python code in a sandboxed environment" + }, + { + "label": "Image Generation", + "name": "image_generation", + "description": "Generate images based on a text prompt" + } + ], + "show": { + "agentModel": "chatOpenAI" + }, + "id": "agentAgentflow_0-input-agentToolsBuiltInOpenAI-multiOptions", + "display": false + }, + { + "label": "Gemini Built-in Tools", + "name": "agentToolsBuiltInGemini", + "type": "multiOptions", + "optional": true, + "options": [ + { + "label": "URL Context", + "name": "urlContext", + "description": "Extract content from given URLs" + }, + { + "label": "Google Search", + "name": "googleSearch", + "description": "Search real-time web content" + } + ], + "show": { + "agentModel": "chatGoogleGenerativeAI" + }, + "id": "agentAgentflow_0-input-agentToolsBuiltInGemini-multiOptions", + "display": false + }, + { + "label": "Anthropic Built-in Tools", + "name": "agentToolsBuiltInAnthropic", + "type": "multiOptions", + "optional": true, + "options": [ + { + "label": "Web Search", + "name": "web_search_20250305", + "description": "Search the web for the latest information" + }, + { + "label": "Web Fetch", + "name": "web_fetch_20250910", + "description": "Retrieve full content from specified web pages" + } + ], + "show": { + "agentModel": "chatAnthropic" + }, + "id": "agentAgentflow_0-input-agentToolsBuiltInAnthropic-multiOptions", + "display": false + }, + { + "label": "Tools", + "name": "agentTools", + "type": "array", + "optional": true, + "array": [ + { + "label": "Tool", + "name": "agentSelectedTool", + "type": "asyncOptions", + "loadMethod": "listTools", + "loadConfig": true + }, + { + "label": "Require Human Input", + "name": "agentSelectedToolRequiresHumanInput", + "type": "boolean", + "optional": true + } + ], + "id": "agentAgentflow_0-input-agentTools-array", + "display": true + }, + { + "label": "Knowledge (Document Stores)", + "name": "agentKnowledgeDocumentStores", + "type": "array", + "description": "Give your agent context about different document sources. Document stores must be upserted in advance.", + "array": [ + { + "label": "Document Store", + "name": "documentStore", + "type": "asyncOptions", + "loadMethod": "listStores" + }, + { + "label": "Describe Knowledge", + "name": "docStoreDescription", + "type": "string", + "generateDocStoreDescription": true, + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_0-input-agentKnowledgeDocumentStores-array", + "display": true + }, + { + "label": "Knowledge (Vector Embeddings)", + "name": "agentKnowledgeVSEmbeddings", + "type": "array", + "description": "Give your agent context about different document sources from existing vector stores and embeddings", + "array": [ + { + "label": "Vector Store", + "name": "vectorStore", + "type": "asyncOptions", + "loadMethod": "listVectorStores", + "loadConfig": true + }, + { + "label": "Embedding Model", + "name": "embeddingModel", + "type": "asyncOptions", + "loadMethod": "listEmbeddings", + "loadConfig": true + }, + { + "label": "Knowledge Name", + "name": "knowledgeName", + "type": "string", + "placeholder": "A short name for the knowledge base, this is useful for the AI to know when and how to search for correct information" + }, + { + "label": "Describe Knowledge", + "name": "knowledgeDescription", + "type": "string", + "placeholder": "Describe what the knowledge base is about, this is useful for the AI to know when and how to search for correct information", + "rows": 4 + }, + { + "label": "Return Source Documents", + "name": "returnSourceDocuments", + "type": "boolean", + "optional": true + } + ], + "optional": true, + "id": "agentAgentflow_0-input-agentKnowledgeVSEmbeddings-array", + "display": true + }, + { + "label": "Enable Memory", + "name": "agentEnableMemory", + "type": "boolean", + "description": "Enable memory for the conversation thread", + "default": true, + "optional": true, + "id": "agentAgentflow_0-input-agentEnableMemory-boolean", + "display": true + }, + { + "label": "Memory Type", + "name": "agentMemoryType", + "type": "options", + "options": [ + { + "label": "All Messages", + "name": "allMessages", + "description": "Retrieve all messages from the conversation" + }, + { + "label": "Window Size", + "name": "windowSize", + "description": "Uses a fixed window size to surface the last N messages" + }, + { + "label": "Conversation Summary", + "name": "conversationSummary", + "description": "Summarizes the whole conversation" + }, + { + "label": "Conversation Summary Buffer", + "name": "conversationSummaryBuffer", + "description": "Summarize conversations once token limit is reached. Default to 2000" + } + ], + "optional": true, + "default": "allMessages", + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_0-input-agentMemoryType-options", + "display": true + }, + { + "label": "Window Size", + "name": "agentMemoryWindowSize", + "type": "number", + "default": "20", + "description": "Uses a fixed window size to surface the last N messages", + "show": { + "agentMemoryType": "windowSize" + }, + "id": "agentAgentflow_0-input-agentMemoryWindowSize-number", + "display": false + }, + { + "label": "Max Token Limit", + "name": "agentMemoryMaxTokenLimit", + "type": "number", + "default": "2000", + "description": "Summarize conversations once token limit is reached. Default to 2000", + "show": { + "agentMemoryType": "conversationSummaryBuffer" + }, + "id": "agentAgentflow_0-input-agentMemoryMaxTokenLimit-number", + "display": false + }, + { + "label": "Input Message", + "name": "agentUserMessage", + "type": "string", + "description": "Add an input message as user message at the end of the conversation", + "rows": 4, + "optional": true, + "acceptVariable": true, + "show": { + "agentEnableMemory": true + }, + "id": "agentAgentflow_0-input-agentUserMessage-string", + "display": true + }, + { + "label": "Return Response As", + "name": "agentReturnResponseAs", + "type": "options", + "options": [ + { + "label": "User Message", + "name": "userMessage" + }, + { + "label": "Assistant Message", + "name": "assistantMessage" + } + ], + "default": "userMessage", + "id": "agentAgentflow_0-input-agentReturnResponseAs-options", + "display": true + }, + { + "label": "Update Flow State", + "name": "agentUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "agentAgentflow_0-input-agentUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "agentModel": "azureChatOpenAI", + "agentMessages": [ + { + "role": "system", + "content": "

Always output the final answer strictly in JSON format.

Do not add explanations, notes, or extra text.

If using a chart, the JSON must have the structure:

{

\"type\": \"type_of_chart\",

\"title\": \"chart title\",

\"labels\": [...],

\"datasets\": [...]

}

" + } + ], + "agentToolsBuiltInOpenAI": "", + "agentToolsBuiltInGemini": "", + "agentToolsBuiltInAnthropic": "", + "agentTools": [ + { + "agentSelectedTool": "customMCP", + "agentSelectedToolRequiresHumanInput": "", + "agentSelectedToolConfig": { + "mcpServerConfig": "{\n \"url\": \"http://teradata-mcp-server:8001/mcp\"\n}", + "mcpActions": "[\"plot_line_chart\",\"plot_pie_chart\",\"plot_polar_chart\",\"plot_radar_chart\"]", + "agentSelectedTool": "customMCP" + } + } + ], + "agentKnowledgeDocumentStores": "", + "agentKnowledgeVSEmbeddings": "", + "agentEnableMemory": true, + "agentMemoryType": "allMessages", + "agentMemoryWindowSize": "20", + "agentMemoryMaxTokenLimit": "2000", + "agentUserMessage": "", + "agentReturnResponseAs": "userMessage", + "agentUpdateState": "", + "agentModelConfig": { + "credential": "", + "modelName": "gpt-4.1", + "temperature": 0.9, + "maxTokens": "", + "streaming": true, + "topP": "", + "frequencyPenalty": "", + "presencePenalty": "", + "timeout": "", + "basepath": "", + "baseOptions": "", + "allowImageUploads": "", + "imageResolution": "low", + "agentModel": "azureChatOpenAI" + } + }, + "outputAnchors": [ + { + "id": "agentAgentflow_0-output-agentAgentflow", + "label": "Agent", + "name": "agentAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 257, + "height": 101, + "selected": false, + "positionAbsolute": { + "x": -603.4327999101338, + "y": -309.6339518229167 + }, + "dragging": false + }, + { + "id": "customFunctionAgentflow_0", + "position": { + "x": -294.1125813802083, + "y": -246.31697591145837 + }, + "data": { + "id": "customFunctionAgentflow_0", + "label": "td_func_draw_plot", + "version": 1, + "name": "customFunctionAgentflow", + "type": "CustomFunction", + "color": "#E4B7FF", + "baseClasses": [ + "CustomFunction" + ], + "category": "Agent Flows", + "description": "Execute custom function", + "inputParams": [ + { + "label": "Input Variables", + "name": "customFunctionInputVariables", + "description": "Input variables can be used in the function with prefix $. For example: $foo", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Variable Name", + "name": "variableName", + "type": "string" + }, + { + "label": "Variable Value", + "name": "variableValue", + "type": "string", + "acceptVariable": true + } + ], + "id": "customFunctionAgentflow_0-input-customFunctionInputVariables-array", + "display": true + }, + { + "label": "Javascript Function", + "name": "customFunctionJavascriptFunction", + "type": "code", + "codeExample": "/*\n* You can use any libraries imported in Flowise\n* You can use properties specified in Input Schema as variables. Ex: Property = userid, Variable = $userid\n* You can get default flow config: $flow.sessionId, $flow.chatId, $flow.chatflowId, $flow.input, $flow.state\n* You can get custom variables: $vars.\n* Must return a string value at the end of function\n*/\n\nconst fetch = require('node-fetch');\nconst url = 'https://api.open-meteo.com/v1/forecast?latitude=52.52&longitude=13.41¤t_weather=true';\nconst options = {\n method: 'GET',\n headers: {\n 'Content-Type': 'application/json'\n }\n};\ntry {\n const response = await fetch(url, options);\n const text = await response.text();\n return text;\n} catch (error) {\n console.error(error);\n return '';\n}", + "description": "The function to execute. Must return a string or an object that can be converted to a string.", + "id": "customFunctionAgentflow_0-input-customFunctionJavascriptFunction-code", + "display": true + }, + { + "label": "Update Flow State", + "name": "customFunctionUpdateState", + "description": "Update runtime state during the execution of the workflow", + "type": "array", + "optional": true, + "acceptVariable": true, + "array": [ + { + "label": "Key", + "name": "key", + "type": "asyncOptions", + "loadMethod": "listRuntimeStateKeys", + "freeSolo": true + }, + { + "label": "Value", + "name": "value", + "type": "string", + "acceptVariable": true, + "acceptNodeOutputAsVariable": true + } + ], + "id": "customFunctionAgentflow_0-input-customFunctionUpdateState-array", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "customFunctionInputVariables": [ + { + "variableName": "inputs_data", + "variableValue": "

{{ agentAgentflow_0 }}

" + } + ], + "customFunctionJavascriptFunction": "let chartType = \"line\";\nlet chartTitle = \"Chart\";\nlet chartData = { labels: [], datasets: [] };\nlet isValidChartData = false;\n\ntry {\n const input = JSON.parse($inputs_data);\n console.log(\"inputs data:\", $inputs_data);\n // Check if input looks like chart data (has labels and datasets arrays)\n if (\n input &&\n typeof input === \"object\" &&\n Array.isArray(input.labels) &&\n Array.isArray(input.datasets)\n ) {\n chartType = input.type || \"line\";\n chartTitle = input.title || chartType.toUpperCase() + \" Chart\";\n chartData.labels = input.labels;\n chartData.datasets = input.datasets;\n isValidChartData = true;\n }\n} catch {\n // $inputs_data not JSON, treat as plain message\n}\n\nif (!isValidChartData) {\n const message =\n typeof $inputs_data === \"string\"\n ? $inputs_data\n : \"No chart data provided.\";\n let res;\n try {\n res = JSON.parse(message);\n } catch {\n res = { message };\n }\n return res.message || message;\n}\n\n// Build QuickChart URL\nconst quickChartUrl = `https://quickchart.io/chart?c=${encodeURIComponent(\n JSON.stringify({\n type: chartType,\n data: chartData,\n options: {\n plugins: {\n legend: { position: \"top\" },\n title: { display: true, text: chartTitle },\n },\n },\n })\n)}`;\n\n// Return Markdown image (Flowise chat supports it)\nreturn `Here is your chart:\\n\\n![Chart](${quickChartUrl})`;\n", + "customFunctionUpdateState": "" + }, + "outputAnchors": [ + { + "id": "customFunctionAgentflow_0-output-customFunctionAgentflow", + "label": "Custom Function", + "name": "customFunctionAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "agentFlow", + "width": 191, + "height": 66, + "selected": false, + "dragging": false, + "positionAbsolute": { + "x": -294.1125813802083, + "y": -246.31697591145837 + } + }, + { + "id": "stickyNoteAgentflow_0", + "position": { + "x": -903.1125813802083, + "y": -166.81697591145837 + }, + "data": { + "id": "stickyNoteAgentflow_0", + "label": "Sticky Note", + "version": 1, + "name": "stickyNoteAgentflow", + "type": "StickyNote", + "color": "#fee440", + "baseClasses": [ + "StickyNote" + ], + "category": "Agent Flows", + "description": "Add notes to the agent flow", + "inputParams": [ + { + "label": "", + "name": "note", + "type": "string", + "rows": 1, + "placeholder": "Type something here", + "optional": true, + "id": "stickyNoteAgentflow_0-input-note-string", + "display": true + } + ], + "inputAnchors": [], + "inputs": { + "note": "this agent will draw chart for tables data\n\nsupported chart types - line , pie, radar, polar " + }, + "outputAnchors": [ + { + "id": "stickyNoteAgentflow_0-output-stickyNoteAgentflow", + "label": "Sticky Note", + "name": "stickyNoteAgentflow" + } + ], + "outputs": {}, + "selected": false + }, + "type": "stickyNote", + "width": 204, + "height": 143, + "selected": false, + "dragging": false, + "positionAbsolute": { + "x": -903.1125813802083, + "y": -166.81697591145837 + } + } + ], + "edges": [ + { + "source": "startAgentflow_0", + "sourceHandle": "startAgentflow_0-output-startAgentflow", + "target": "agentAgentflow_0", + "targetHandle": "agentAgentflow_0", + "data": { + "sourceColor": "#7EE787", + "targetColor": "#4DD0E1", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "startAgentflow_0-startAgentflow_0-output-startAgentflow-agentAgentflow_0-agentAgentflow_0" + }, + { + "source": "agentAgentflow_0", + "sourceHandle": "agentAgentflow_0-output-agentAgentflow", + "target": "customFunctionAgentflow_0", + "targetHandle": "customFunctionAgentflow_0", + "data": { + "sourceColor": "#4DD0E1", + "targetColor": "#E4B7FF", + "isHumanInput": false + }, + "type": "agentFlow", + "id": "agentAgentflow_0-agentAgentflow_0-output-agentAgentflow-customFunctionAgentflow_0-customFunctionAgentflow_0" + } + ] +} \ No newline at end of file diff --git a/pyproject.toml b/pyproject.toml index a83c2fb..709f490 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -34,7 +34,7 @@ dependencies = [ [project.optional-dependencies] # Feature Store functionality fs = [ - "teradataml>=20.0.0.5", + "teradataml>=20.0.0.7", "tdfs4ds>=0.2.4.0", ] # Enterprise Vector Store functionality @@ -43,7 +43,7 @@ evs = [ ] # Teradata Vector Store functionality tdvs = [ - "teradatagenai>=20.0.0.0" + "teradatagenai>=20.0.0.3" ] # Development dependencies dev = [ diff --git a/src/teradata_mcp_server/config/profiles.yml b/src/teradata_mcp_server/config/profiles.yml index 5ab592a..2fcfa5b 100644 --- a/src/teradata_mcp_server/config/profiles.yml +++ b/src/teradata_mcp_server/config/profiles.yml @@ -33,6 +33,7 @@ dataScientist: - ^sec_userDbPermissions - ^dba_userSqlList - ^plot_* + - ^tdml_* prompt: - ^rag_* - ^sql_* diff --git a/src/teradata_mcp_server/tools/evs_connect.py b/src/teradata_mcp_server/tools/evs_connect.py index e10f4ef..f280d88 100644 --- a/src/teradata_mcp_server/tools/evs_connect.py +++ b/src/teradata_mcp_server/tools/evs_connect.py @@ -31,7 +31,7 @@ def get_evs() -> VectorStore: set_auth_token( base_url=os.getenv("TD_BASE_URL"), - pat_token=os.getenv("TD_PAT"), + pat_token=os.getenv("TD_PAT") or None, pem_file=os.getenv("TD_PEM") or None, ) VSManager.health() diff --git a/src/teradata_mcp_server/tools/plot/README.md b/src/teradata_mcp_server/tools/plot/README.md index 0c2e6ce..63fbf86 100644 --- a/src/teradata_mcp_server/tools/plot/README.md +++ b/src/teradata_mcp_server/tools/plot/README.md @@ -6,10 +6,10 @@ Assumes Teradata >=17.20. **Plot** tools: - - generate_line_plot - Generates json for line plot. Json is specific to chart.js . - - generate_polar_area_plot - Generates json for polar area plot. Json is specific to chart.js . - - generate_pie_plot - Generates json for pie plot. Json is specific to chart.js . - - generate_radar_plot - Generates json for radar plot. Json is specific to chart.js . + - plot_line_chart - Generates json for line plot. Json is specific to chart.js . + - plot_polar_chart - Generates json for polar area plot. Json is specific to chart.js . + - plot_pie_chart - Generates json for pie plot. Json is specific to chart.js . + - plot_radar_chart - Generates json for radar plot. Json is specific to chart.js . **Base** Prompts: diff --git a/src/teradata_mcp_server/tools/tdvs/constants.py b/src/teradata_mcp_server/tools/tdvs/constants.py index 72485c1..45a9e9a 100644 --- a/src/teradata_mcp_server/tools/tdvs/constants.py +++ b/src/teradata_mcp_server/tools/tdvs/constants.py @@ -31,6 +31,6 @@ load_dotenv() TD_VS_BASE_URL = os.getenv("TD_BASE_URL", None) -TD_PAT_TOKEN = os.getenv("TD_PAT", None) -TD_PEM_FILE = os.getenv("TD_PEM", None) +TD_PAT_TOKEN = os.getenv("TD_PAT") or None +TD_PEM_FILE = os.getenv("TD_PEM") or None DATABASE_URI = os.getenv("DATABASE_URI", None) # e.g., "teradatasql://user:password@host/database" diff --git a/src/teradata_mcp_server/tools/tdvs/tdvs_utilies.py b/src/teradata_mcp_server/tools/tdvs/tdvs_utilies.py index 4b1ce3b..9850155 100644 --- a/src/teradata_mcp_server/tools/tdvs/tdvs_utilies.py +++ b/src/teradata_mcp_server/tools/tdvs/tdvs_utilies.py @@ -38,10 +38,13 @@ def create_teradataml_context(): username=conn_url.username, password=conn_url.password) logger.info("teradataml context ready.") + else: + logger.info("teradataml context already exists.") if TD_VS_BASE_URL is None: raise ValueError("TD_BASE_URL environment variable is not set.") - + + logger.info(f"Vector Store base URL: {TD_VS_BASE_URL}") if TD_PAT_TOKEN is not None and TD_PEM_FILE is not None: set_auth_token( base_url=TD_VS_BASE_URL, diff --git a/uv.lock b/uv.lock index 19db41b..c92cc3c 100644 --- a/uv.lock +++ b/uv.lock @@ -1,5 +1,5 @@ version = 1 -revision = 2 +revision = 3 requires-python = ">=3.11" resolution-markers = [ "python_full_version >= '3.12'", @@ -373,7 +373,7 @@ version = "3.24.0" source = { registry = "https://pypi.org/simple" } dependencies = [ { name = "attrs" }, - { name = "docstring-parser", marker = "python_full_version < '4.0'" }, + { name = "docstring-parser", marker = "python_full_version < '4'" }, { name = "rich" }, { name = "rich-rst" }, ] @@ -2318,6 +2318,9 @@ fs = [ { name = "tdfs4ds" }, { name = "teradataml" }, ] +tdvs = [ + { name = "teradatagenai" }, +] [package.dev-dependencies] dev = [ @@ -2336,11 +2339,12 @@ requires-dist = [ { name = "sqlalchemy", specifier = ">=2.0.0,<3.0.0" }, { name = "tdfs4ds", marker = "extra == 'fs'", specifier = ">=0.2.4.0" }, { name = "teradatagenai", marker = "extra == 'evs'", specifier = ">=20.0.0.0" }, - { name = "teradataml", marker = "extra == 'fs'", specifier = ">=20.0.0.5" }, + { name = "teradatagenai", marker = "extra == 'tdvs'", specifier = ">=20.0.0.3" }, + { name = "teradataml", marker = "extra == 'fs'", specifier = ">=20.0.0.7" }, { name = "teradatasqlalchemy", specifier = ">=20.0.0.0" }, { name = "types-pyyaml", marker = "extra == 'dev'" }, ] -provides-extras = ["fs", "evs", "dev"] +provides-extras = ["fs", "evs", "tdvs", "dev"] [package.metadata.requires-dev] dev = [{ name = "build", specifier = ">=1.3.0" }]