From ef0ca504b3e50b33a6810127a409250e73ea2508 Mon Sep 17 00:00:00 2001
From: Omar Salem <57724072+oomaroo02@users.noreply.github.com>
Date: Mon, 20 Oct 2025 18:28:00 +0200
Subject: [PATCH] Add files via upload
---
.../sql_graph_generator_dashboard/LICENSE.txt | 35 ++
.../sql_graph_generator_dashboard/README.md | 231 +++++++++
.../files/DATABASE_SETUP.md | 130 +++++
.../files/README_FILES.md | 180 +++++++
.../files/SETUP_GUIDE.md | 118 +++++
.../files/backend/api/main.py | 119 +++++
.../orchestration/conversation_manager.py | 211 +++++++++
.../langchain_orchestrator_v2.py | 443 +++++++++++++++++
.../orchestration/oci_direct_runnables.py | 412 ++++++++++++++++
.../backend/orchestration/oci_runnables.py | 374 +++++++++++++++
.../files/backend/requirements.txt | 22 +
.../backend/tools/genai_chart_generator.py | 341 ++++++++++++++
.../files/backend/utils/config.py | 45 ++
.../files/database/customers.csv | 16 +
.../files/database/order_items.csv | 36 ++
.../files/database/orders.csv | 21 +
.../files/database/products.csv | 16 +
.../frontend/app/components/Chat/Chat.js | 352 ++++++++++++++
.../app/components/Chat/ChatHeader.js | 91 ++++
.../app/components/Chat/ChatInputBar.js | 440 +++++++++++++++++
.../app/components/Chat/ChatPreview.js | 186 ++++++++
.../app/components/Chat/MessageContent.js | 444 ++++++++++++++++++
.../app/components/Chat/MessageItem.js | 183 ++++++++
.../app/components/Chat/MessageList.js | 94 ++++
.../app/components/Chat/TypingIndicator.js | 79 ++++
.../app/components/Chat/WelcomeScreen.js | 102 ++++
.../app/components/ClientThemeProvider.js | 13 +
.../files/frontend/app/components/NavMenu.js | 236 ++++++++++
.../app/components/Settings/ProjectCard.js | 184 ++++++++
.../app/components/Settings/ProjectModal.js | 411 ++++++++++++++++
.../app/components/TestOracleSpeech.js | 208 ++++++++
.../files/frontend/app/config/app.js | 32 ++
.../frontend/app/contexts/ChatContext.js | 294 ++++++++++++
.../app/contexts/DynamicThemeProvider.js | 41 ++
.../frontend/app/contexts/ProjectsContext.js | 162 +++++++
.../files/frontend/app/favicon.ico | Bin 0 -> 15086 bytes
.../files/frontend/app/globals.css | 40 ++
.../files/frontend/app/layout.js | 38 ++
.../files/frontend/app/page.js | 108 +++++
.../files/frontend/app/page.module.css | 0
.../files/frontend/app/services/apiClient.js | 25 +
.../app/services/genaiAgentService.js | 47 ++
.../app/services/oracleSpeechService.js | 81 ++++
.../frontend/app/services/speechService.js | 69 +++
.../files/frontend/app/theme/overrides.js | 56 +++
.../files/frontend/app/theme/palette.js | 19 +
.../files/frontend/app/theme/theme.js | 20 +
.../files/frontend/app/utils/messageUtils.js | 200 ++++++++
48 files changed, 7005 insertions(+)
create mode 100644 ai/gen-ai-agents/sql_graph_generator_dashboard/LICENSE.txt
create mode 100644 ai/gen-ai-agents/sql_graph_generator_dashboard/README.md
create mode 100644 ai/gen-ai-agents/sql_graph_generator_dashboard/files/DATABASE_SETUP.md
create mode 100644 ai/gen-ai-agents/sql_graph_generator_dashboard/files/README_FILES.md
create mode 100644 ai/gen-ai-agents/sql_graph_generator_dashboard/files/SETUP_GUIDE.md
create mode 100644 ai/gen-ai-agents/sql_graph_generator_dashboard/files/backend/api/main.py
create mode 100644 ai/gen-ai-agents/sql_graph_generator_dashboard/files/backend/orchestration/conversation_manager.py
create mode 100644 ai/gen-ai-agents/sql_graph_generator_dashboard/files/backend/orchestration/langchain_orchestrator_v2.py
create mode 100644 ai/gen-ai-agents/sql_graph_generator_dashboard/files/backend/orchestration/oci_direct_runnables.py
create mode 100644 ai/gen-ai-agents/sql_graph_generator_dashboard/files/backend/orchestration/oci_runnables.py
create mode 100644 ai/gen-ai-agents/sql_graph_generator_dashboard/files/backend/requirements.txt
create mode 100644 ai/gen-ai-agents/sql_graph_generator_dashboard/files/backend/tools/genai_chart_generator.py
create mode 100644 ai/gen-ai-agents/sql_graph_generator_dashboard/files/backend/utils/config.py
create mode 100644 ai/gen-ai-agents/sql_graph_generator_dashboard/files/database/customers.csv
create mode 100644 ai/gen-ai-agents/sql_graph_generator_dashboard/files/database/order_items.csv
create mode 100644 ai/gen-ai-agents/sql_graph_generator_dashboard/files/database/orders.csv
create mode 100644 ai/gen-ai-agents/sql_graph_generator_dashboard/files/database/products.csv
create mode 100644 ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/components/Chat/Chat.js
create mode 100644 ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/components/Chat/ChatHeader.js
create mode 100644 ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/components/Chat/ChatInputBar.js
create mode 100644 ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/components/Chat/ChatPreview.js
create mode 100644 ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/components/Chat/MessageContent.js
create mode 100644 ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/components/Chat/MessageItem.js
create mode 100644 ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/components/Chat/MessageList.js
create mode 100644 ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/components/Chat/TypingIndicator.js
create mode 100644 ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/components/Chat/WelcomeScreen.js
create mode 100644 ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/components/ClientThemeProvider.js
create mode 100644 ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/components/NavMenu.js
create mode 100644 ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/components/Settings/ProjectCard.js
create mode 100644 ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/components/Settings/ProjectModal.js
create mode 100644 ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/components/TestOracleSpeech.js
create mode 100644 ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/config/app.js
create mode 100644 ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/contexts/ChatContext.js
create mode 100644 ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/contexts/DynamicThemeProvider.js
create mode 100644 ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/contexts/ProjectsContext.js
create mode 100644 ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/favicon.ico
create mode 100644 ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/globals.css
create mode 100644 ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/layout.js
create mode 100644 ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/page.js
create mode 100644 ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/page.module.css
create mode 100644 ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/services/apiClient.js
create mode 100644 ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/services/genaiAgentService.js
create mode 100644 ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/services/oracleSpeechService.js
create mode 100644 ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/services/speechService.js
create mode 100644 ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/theme/overrides.js
create mode 100644 ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/theme/palette.js
create mode 100644 ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/theme/theme.js
create mode 100644 ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/utils/messageUtils.js
diff --git a/ai/gen-ai-agents/sql_graph_generator_dashboard/LICENSE.txt b/ai/gen-ai-agents/sql_graph_generator_dashboard/LICENSE.txt
new file mode 100644
index 000000000..46c0c79d9
--- /dev/null
+++ b/ai/gen-ai-agents/sql_graph_generator_dashboard/LICENSE.txt
@@ -0,0 +1,35 @@
+Copyright (c) 2025 Oracle and/or its affiliates.
+
+The Universal Permissive License (UPL), Version 1.0
+
+Subject to the condition set forth below, permission is hereby granted to any
+person obtaining a copy of this software, associated documentation and/or data
+(collectively the "Software"), free of charge and under any and all copyright
+rights in the Software, and any and all patent rights owned or freely
+licensable by each licensor hereunder covering either (i) the unmodified
+Software as contributed to or provided by such licensor, or (ii) the Larger
+Works (as defined below), to deal in both
+
+(a) the Software, and
+(b) any piece of software and/or hardware listed in the lrgrwrks.txt file if
+one is included with the Software (each a "Larger Work" to which the Software
+is contributed by such licensors),
+
+without restriction, including without limitation the rights to copy, create
+derivative works of, display, perform, and distribute the Software and make,
+use, sell, offer for sale, import, export, have made, and have sold the
+Software and the Larger Work(s), and to sublicense the foregoing rights on
+either these or other terms.
+
+This license is subject to the following condition:
+The above copyright notice and either this complete permission notice or at
+a minimum a reference to the UPL must be included in all copies or
+substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/ai/gen-ai-agents/sql_graph_generator_dashboard/README.md b/ai/gen-ai-agents/sql_graph_generator_dashboard/README.md
new file mode 100644
index 000000000..20185e3c6
--- /dev/null
+++ b/ai/gen-ai-agents/sql_graph_generator_dashboard/README.md
@@ -0,0 +1,231 @@
+# SQL Graph Generator Dashboard
+
+SQL Graph Generator Dashboard is an AI-powered assistant that enables natural language database queries and intelligent chart generation.
+It extracts data from your database using conversational queries, automatically generates appropriate visualizations, and provides multi-turn conversational context for data exploration.
+It runs as an interactive Next.js web app backed by a FastAPI server, LangChain orchestration, and Oracle Cloud Infrastructure GenAI models.
+
+Reviewed: October 13, 2025
+
+# When to use this asset?
+
+Use this asset when you want to:
+
+- Query databases using natural language instead of SQL
+- Automatically generate charts and visualizations from query results
+- Build conversational data exploration interfaces
+- Integrate OCI GenAI models with database operations
+- Demonstrate intelligent routing between data queries, visualizations, and insights
+
+Ideal for:
+
+- AI engineers building conversational data analytics tools
+- Data teams needing natural language database interfaces
+- OCI customers integrating GenAI into business intelligence workflows
+- Anyone showcasing LangChain + OCI GenAI + dynamic visualization generation
+
+# How to use this asset?
+
+This assistant can be launched via:
+
+- Next.js Web UI
+
+It supports:
+
+- Natural language to SQL conversion
+- Automatic chart generation from query results
+- Multi-turn conversations with context preservation
+- Multiple chart types: bar, line, pie, scatter, heatmap
+- Real-time data visualization using matplotlib/seaborn
+- Intelligent routing between data queries, visualizations, and insights
+
+## Setup Instructions
+
+### OCI Generative AI Model Configuration
+
+1. Go to: OCI Console → Generative AI
+2. Select your model (this demo uses OpenAI GPT OSS 120B):
+ `ocid1.generativeaimodel.oc1.eu-frankfurt-1.amaaaaaask7dceyav...`
+3. Set up an OCI Agent Runtime endpoint for SQL queries
+4. Copy the following values:
+ - MODEL_ID
+ - AGENT_ENDPOINT_ID
+ - COMPARTMENT_ID
+ - SERVICE_ENDPOINT (e.g., `https://inference.generativeai.eu-frankfurt-1.oci.oraclecloud.com`)
+5. Configure them in `backend/utils/config.py`
+
+Documentation:
+[OCI Generative AI Documentation](https://docs.oracle.com/en-us/iaas/Content/generative-ai/home.htm)
+
+No API key is required — authentication is handled via OCI identity.
+
+Ensure your OCI CLI credentials are configured.
+Edit or create the following config file at `~/.oci/config`:
+
+```
+[DEFAULT]
+user=ocid1.user.oc1..exampleuniqueID
+fingerprint=c6:4f:66:e7:xx:xx:xx:xx
+tenancy=ocid1.tenancy.oc1..exampleuniqueID
+region=eu-frankfurt-1
+key_file=~/.oci/oci_api_key.pem
+```
+
+### Install Dependencies
+
+Backend:
+
+```bash
+cd backend
+pip install -r requirements.txt
+```
+
+Frontend:
+
+```bash
+cd ..
+npm install
+```
+
+### Configure Database
+
+1. Set up your database connection in OCI Agent Runtime
+2. The demo uses a sample e-commerce database with tables:
+ - orders
+ - customers
+ - products
+ - order_items
+
+### Start the Application
+
+Backend (FastAPI):
+
+```bash
+cd backend
+python -m uvicorn api.main:app --reload --host 0.0.0.0 --port 8000
+```
+
+Frontend (Next.js):
+
+```bash
+npm run dev
+```
+
+Access the application at: http://localhost:3000
+
+## Key Features
+
+| Feature | Description |
+| ------------------------ | ---------------------------------------------------------------- |
+| Natural Language Queries | Ask questions like "show me the top 5 orders" |
+| Intelligent Routing | GenAI-powered routing between data queries, charts, and insights |
+| Auto Chart Generation | Automatically creates appropriate visualizations from data |
+| Multi-Turn Conversations | Maintains context across multiple queries |
+| Real-Time Visualization | Generates matplotlib/seaborn charts as base64 images |
+| Multiple Chart Types | Supports bar, line, pie, scatter, and heatmap charts |
+| OCI GenAI Integration | Uses OCI Agent Runtime and Chat API |
+| LangChain Runnables | Clean integration pattern wrapping OCI SDK calls |
+| Conversation Management | Tracks query history and data state |
+| Error Handling | Clear error messages and fallback behavior |
+
+## Architecture
+
+### Backend Components
+
+1. **Router Agent** (OCI Chat API)
+
+ - Intelligent query routing using GenAI
+ - Routes: DATA_QUERY, CHART_EDIT, INSIGHT_QA
+ - Returns structured JSON decisions
+
+2. **SQL Agent** (OCI Agent Runtime)
+
+ - Natural language to SQL conversion
+ - Database query execution
+ - Structured data extraction
+
+3. **Chart Generator** (OCI Chat API + Python Execution)
+
+ - GenAI generates matplotlib/seaborn code
+ - Safe code execution in sandboxed environment
+ - Returns base64-encoded chart images
+
+4. **Orchestrator**
+ - Coordinates agents based on routing decisions
+ - Manages conversation state
+ - Handles multi-turn context
+
+### Frontend Components
+
+1. **Chat Interface**
+
+ - Real-time message display
+ - Support for text, tables, and images
+ - Speech recognition integration
+
+2. **Service Layer**
+
+ - API communication with backend
+ - Response transformation
+ - Error handling
+
+3. **Context Management**
+ - User session handling
+ - Message history
+ - State management
+
+## Example Queries
+
+```
+"Show me the top 5 orders"
+→ Returns table with order data
+
+"Make a bar chart of those orders by total amount"
+→ Generates bar chart visualization
+
+"Show me orders grouped by region"
+→ Returns data aggregated by region
+
+"Create a pie chart of the order distribution"
+→ Generates pie chart from current data
+
+"What insights can you provide about these sales?"
+→ Provides AI-generated analysis
+```
+
+## Configuration Files
+
+Key configuration in `backend/utils/config.py`:
+
+- MODEL_ID: Your OCI GenAI model OCID
+- AGENT_ENDPOINT_ID: Your OCI Agent Runtime endpoint
+- COMPARTMENT_ID: Your OCI compartment
+- SERVICE_ENDPOINT: GenAI service endpoint URL
+- DATABASE_SCHEMA: Database table definitions
+
+## Notes
+
+- Prompts can be customized in `backend/orchestration/oci_direct_runnables.py`
+- Chart generation code is dynamically created by GenAI
+- Designed specifically for Oracle Cloud Infrastructure + Generative AI
+- Sample database schema included for e-commerce use case
+- Frontend uses Material-UI for consistent design
+
+# Useful Links
+
+- [OCI Generative AI](https://docs.oracle.com/en-us/iaas/Content/generative-ai/home.htm)
+ Official documentation for Oracle Generative AI
+
+- [OCI Agent Runtime](https://docs.oracle.com/en-us/iaas/Content/generative-ai/agent-runtime.htm)
+ Documentation for OCI Agent Runtime
+
+- [LangChain Documentation](https://python.langchain.com/docs/get_started/introduction)
+ LangChain framework documentation
+
+- [Next.js Documentation](https://nextjs.org/docs)
+ Next.js framework documentation
+
+# License
+
+Copyright (c) 2025 Oracle and/or its affiliates.
+
+Licensed under the Universal Permissive License (UPL), Version 1.0.
diff --git a/ai/gen-ai-agents/sql_graph_generator_dashboard/files/DATABASE_SETUP.md b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/DATABASE_SETUP.md
new file mode 100644
index 000000000..8b07ba82a
--- /dev/null
+++ b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/DATABASE_SETUP.md
@@ -0,0 +1,130 @@
+# Database Setup
+
+## Overview
+
+This application uses OCI Agent Runtime to query your database. The sample schema is for an e-commerce database.
+
+## Database Schema
+
+### CUSTOMERS Table
+```sql
+CREATE TABLE CUSTOMERS (
+ CUSTOMER_ID NUMBER PRIMARY KEY,
+ CUSTOMER_NAME VARCHAR2(100),
+ EMAIL VARCHAR2(100),
+ SIGNUP_DATE DATE,
+ SEGMENT VARCHAR2(50),
+ COUNTRY VARCHAR2(50),
+ LIFETIME_VALUE NUMBER(10,2),
+ CREATION_DATE DATE,
+ CREATED_BY VARCHAR2(50),
+ LAST_UPDATED_DATE DATE,
+ LAST_UPDATED_BY VARCHAR2(50)
+);
+```
+
+### PRODUCTS Table
+```sql
+CREATE TABLE PRODUCTS (
+ PRODUCT_ID NUMBER PRIMARY KEY,
+ PRODUCT_NAME VARCHAR2(200),
+ CATEGORY VARCHAR2(100),
+ PRICE NUMBER(10,2),
+ COST NUMBER(10,2),
+ STOCK_QUANTITY NUMBER,
+ LAUNCH_DATE DATE,
+ CREATION_DATE DATE,
+ CREATED_BY VARCHAR2(50),
+ LAST_UPDATED_DATE DATE,
+ LAST_UPDATED_BY VARCHAR2(50)
+);
+```
+
+### ORDERS Table
+```sql
+CREATE TABLE ORDERS (
+ ORDER_ID NUMBER PRIMARY KEY,
+ CUSTOMER_ID NUMBER,
+ ORDER_DATE DATE,
+ TOTAL_AMOUNT NUMBER(10,2),
+ STATUS VARCHAR2(50),
+ REGION VARCHAR2(100),
+ SALES_REP VARCHAR2(100),
+ CREATION_DATE DATE,
+ CREATED_BY VARCHAR2(50),
+ LAST_UPDATED_DATE DATE,
+ LAST_UPDATED_BY VARCHAR2(50),
+ FOREIGN KEY (CUSTOMER_ID) REFERENCES CUSTOMERS(CUSTOMER_ID)
+);
+```
+
+### ORDER_ITEMS Table
+```sql
+CREATE TABLE ORDER_ITEMS (
+ ORDER_ITEM_ID NUMBER PRIMARY KEY,
+ ORDER_ID NUMBER,
+ PRODUCT_ID NUMBER,
+ QUANTITY NUMBER,
+ UNIT_PRICE NUMBER(10,2),
+ DISCOUNT_PERCENT NUMBER(5,2),
+ CREATION_DATE DATE,
+ CREATED_BY VARCHAR2(50),
+ LAST_UPDATED_DATE DATE,
+ LAST_UPDATED_BY VARCHAR2(50),
+ FOREIGN KEY (ORDER_ID) REFERENCES ORDERS(ORDER_ID),
+ FOREIGN KEY (PRODUCT_ID) REFERENCES PRODUCTS(PRODUCT_ID)
+);
+```
+
+## Sample Data
+
+### Sample Customers
+```sql
+INSERT INTO CUSTOMERS VALUES (1, 'Acme Corp', 'contact@acme.com', DATE '2023-01-15', 'Enterprise', 'USA', 150000, SYSDATE, 'SYSTEM', SYSDATE, 'SYSTEM');
+INSERT INTO CUSTOMERS VALUES (2, 'TechStart Inc', 'info@techstart.com', DATE '2023-03-20', 'SMB', 'UK', 45000, SYSDATE, 'SYSTEM', SYSDATE, 'SYSTEM');
+INSERT INTO CUSTOMERS VALUES (3, 'Global Solutions', 'sales@global.com', DATE '2023-02-10', 'Enterprise', 'Germany', 200000, SYSDATE, 'SYSTEM', SYSDATE, 'SYSTEM');
+```
+
+### Sample Products
+```sql
+INSERT INTO PRODUCTS VALUES (1, 'Enterprise Security Suite', 'Software', 3499.99, 1200, 100, DATE '2023-01-01', SYSDATE, 'SYSTEM', SYSDATE, 'SYSTEM');
+INSERT INTO PRODUCTS VALUES (2, 'AI Analytics Platform', 'Software', 2999.99, 1000, 150, DATE '2023-02-01', SYSDATE, 'SYSTEM', SYSDATE, 'SYSTEM');
+INSERT INTO PRODUCTS VALUES (3, 'Cloud Storage Pro', 'Cloud', 999.99, 300, 500, DATE '2023-03-01', SYSDATE, 'SYSTEM', SYSDATE, 'SYSTEM');
+INSERT INTO PRODUCTS VALUES (4, 'Premium Consulting', 'Services', 5000, 2000, 50, DATE '2023-01-15', SYSDATE, 'SYSTEM', SYSDATE, 'SYSTEM');
+INSERT INTO PRODUCTS VALUES (5, 'Training Program', 'Services', 2500, 800, 100, DATE '2023-02-20', SYSDATE, 'SYSTEM', SYSDATE, 'SYSTEM');
+```
+
+### Sample Orders
+```sql
+INSERT INTO ORDERS VALUES (1, 1, DATE '2024-01-15', 8999.98, 'Completed', 'North America', 'John Smith', SYSDATE, 'SYSTEM', SYSDATE, 'SYSTEM');
+INSERT INTO ORDERS VALUES (2, 2, DATE '2024-01-20', 2999.99, 'Completed', 'Europe', 'Sarah Johnson', SYSDATE, 'SYSTEM', SYSDATE, 'SYSTEM');
+INSERT INTO ORDERS VALUES (3, 3, DATE '2024-02-01', 12499.97, 'Completed', 'Europe', 'Mike Davis', SYSDATE, 'SYSTEM', SYSDATE, 'SYSTEM');
+INSERT INTO ORDERS VALUES (4, 1, DATE '2024-02-15', 7500, 'Processing', 'North America', 'John Smith', SYSDATE, 'SYSTEM', SYSDATE, 'SYSTEM');
+INSERT INTO ORDERS VALUES (5, 2, DATE '2024-03-01', 999.99, 'Completed', 'Europe', 'Sarah Johnson', SYSDATE, 'SYSTEM', SYSDATE, 'SYSTEM');
+```
+
+### Sample Order Items
+```sql
+INSERT INTO ORDER_ITEMS VALUES (1, 1, 1, 2, 3499.99, 0, SYSDATE, 'SYSTEM', SYSDATE, 'SYSTEM');
+INSERT INTO ORDER_ITEMS VALUES (2, 1, 3, 2, 999.99, 10, SYSDATE, 'SYSTEM', SYSDATE, 'SYSTEM');
+INSERT INTO ORDER_ITEMS VALUES (3, 2, 2, 1, 2999.99, 0, SYSDATE, 'SYSTEM', SYSDATE, 'SYSTEM');
+INSERT INTO ORDER_ITEMS VALUES (4, 3, 1, 1, 3499.99, 0, SYSDATE, 'SYSTEM', SYSDATE, 'SYSTEM');
+INSERT INTO ORDER_ITEMS VALUES (5, 3, 2, 2, 2999.99, 10, SYSDATE, 'SYSTEM', SYSDATE, 'SYSTEM');
+INSERT INTO ORDER_ITEMS VALUES (6, 3, 5, 1, 2500, 0, SYSDATE, 'SYSTEM', SYSDATE, 'SYSTEM');
+INSERT INTO ORDER_ITEMS VALUES (7, 4, 4, 1, 5000, 0, SYSDATE, 'SYSTEM', SYSDATE, 'SYSTEM');
+INSERT INTO ORDER_ITEMS VALUES (8, 4, 5, 1, 2500, 0, SYSDATE, 'SYSTEM', SYSDATE, 'SYSTEM');
+INSERT INTO ORDER_ITEMS VALUES (9, 5, 3, 1, 999.99, 0, SYSDATE, 'SYSTEM', SYSDATE, 'SYSTEM');
+```
+
+## OCI Agent Runtime Configuration
+
+1. Create database connection in OCI Agent Runtime
+2. Configure database tool/function with:
+ - Connection string
+ - User credentials
+ - Query permissions
+3. Test connection with simple query
+4. Update AGENT_ENDPOINT_ID in config.py
+
+
+
diff --git a/ai/gen-ai-agents/sql_graph_generator_dashboard/files/README_FILES.md b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/README_FILES.md
new file mode 100644
index 000000000..3278b89a6
--- /dev/null
+++ b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/README_FILES.md
@@ -0,0 +1,180 @@
+# Files Directory - Quick Start Guide
+
+This directory contains all necessary files to run the SQL Graph Generator Dashboard.
+
+## Directory Structure
+
+```
+files/
+├── backend/
+│ ├── api/
+│ │ └── main.py # FastAPI server entry point
+│ ├── orchestration/
+│ │ ├── langchain_orchestrator_v2.py # Main orchestrator with routing logic
+│ │ ├── oci_direct_runnables.py # OCI GenAI Chat API wrappers
+│ │ ├── oci_runnables.py # OCI Agent Runtime wrappers
+│ │ └── conversation_manager.py # Conversation state management
+│ ├── tools/
+│ │ └── genai_chart_generator.py # Chart generation with GenAI
+│ ├── utils/
+│ │ └── config.py # OCI configuration (UPDATE THIS)
+│ └── requirements.txt # Python dependencies
+├── frontend/
+│ ├── services/
+│ │ └── genaiAgentService.js # Backend API communication
+│ ├── contexts/
+│ │ └── ChatContext.js # Chat state management
+│ └── package.json # Node.js dependencies
+├── database/
+│ ├── customers.csv # Sample customer data
+│ ├── products.csv # Sample product data
+│ ├── orders.csv # Sample order data
+│ └── order_items.csv # Sample order items data
+├── SETUP_GUIDE.md # Detailed setup instructions
+├── DATABASE_SETUP.md # Database schema and setup
+└── README_FILES.md # This file
+```
+
+## Quick Start (5 Steps)
+
+### 1. Update OCI Configuration
+
+Edit `backend/utils/config.py`:
+```python
+MODEL_ID = "ocid1.generativeaimodel.oc1.YOUR_REGION.YOUR_MODEL_ID"
+AGENT_ENDPOINT_ID = "ocid1.genaiagentendpoint.oc1.YOUR_REGION.YOUR_ENDPOINT_ID"
+COMPARTMENT_ID = "ocid1.compartment.oc1..YOUR_COMPARTMENT_ID"
+SERVICE_ENDPOINT = "https://inference.generativeai.YOUR_REGION.oci.oraclecloud.com"
+```
+
+### 2. Setup OCI CLI
+
+Create `~/.oci/config`:
+```
+[DEFAULT]
+user=ocid1.user.oc1..YOUR_USER_OCID
+fingerprint=YOUR_FINGERPRINT
+tenancy=ocid1.tenancy.oc1..YOUR_TENANCY_OCID
+region=YOUR_REGION
+key_file=~/.oci/oci_api_key.pem
+```
+
+### 3. Install Dependencies
+
+Backend:
+```bash
+cd backend
+pip install -r requirements.txt
+```
+
+Frontend (in project root):
+```bash
+npm install
+```
+
+### 4. Setup Database
+
+The database CSV files are included in `database/` directory.
+Configure your OCI Agent Runtime to access these files or load them into your database.
+
+See `DATABASE_SETUP.md` for SQL schema.
+
+### 5. Run the Application
+
+Terminal 1 - Backend:
+```bash
+cd backend
+python -m uvicorn api.main:app --reload --host 0.0.0.0 --port 8000
+```
+
+Terminal 2 - Frontend (from project root):
+```bash
+npm run dev
+```
+
+Open: http://localhost:3000
+
+## Key Files Explained
+
+### Backend
+
+**main.py** - FastAPI server with `/query` endpoint
+- Receives natural language questions
+- Returns data, charts, or text responses
+
+**langchain_orchestrator_v2.py** - Main orchestration logic
+- Routes queries to appropriate agents
+- Manages conversation state
+- Coordinates data retrieval and chart generation
+
+**oci_direct_runnables.py** - OCI GenAI Chat API integration
+- Router for intelligent query routing
+- Uses GenAI for decision making
+
+**oci_runnables.py** - OCI Agent Runtime integration
+- SQL Agent for database queries
+- Extracts structured data from tool outputs
+
+**genai_chart_generator.py** - Chart generation
+- Uses GenAI to create matplotlib code
+- Executes code safely
+- Returns base64-encoded images
+
+**conversation_manager.py** - State management
+- Tracks conversation history
+- Maintains data context
+
+### Frontend
+
+**genaiAgentService.js** - API client
+- Communicates with backend
+- Maps response fields (chart_base64 → diagram_base64)
+
+**ChatContext.js** - React context
+- Manages chat state
+- Processes responses for display
+- Handles different message types
+
+## Configuration Tips
+
+1. **Region Consistency**: Ensure all OCIDs and endpoints use the same region
+2. **Model Selection**: OpenAI GPT OSS 120B recommended for routing and generation
+3. **Agent Tools**: Configure database tools in OCI Agent Runtime console
+4. **Permissions**: Ensure OCI user has GenAI and Agent Runtime permissions
+
+## Common Issues
+
+**Authentication Error:**
+- Check `~/.oci/config` file
+- Verify API key is uploaded to OCI Console
+- Test with: `oci iam region list`
+
+**Module Import Error:**
+- Ensure you're in the correct directory
+- Check all `__init__.py` files exist
+- Verify Python path includes backend directory
+
+**Chart Not Displaying:**
+- Check browser console for errors
+- Verify chart_base64 field in API response
+- Ensure frontend compiled successfully
+
+**SQL Agent Timeout:**
+- Check AGENT_ENDPOINT_ID is correct
+- Verify agent is deployed and active
+- Test agent in OCI Console first
+
+## Next Steps
+
+1. Customize DATABASE_SCHEMA in config.py for your database
+2. Adjust prompts in oci_direct_runnables.py for your use case
+3. Add custom chart types in genai_chart_generator.py
+4. Extend routing logic for additional query types
+
+## Support
+
+For OCI GenAI documentation:
+https://docs.oracle.com/en-us/iaas/Content/generative-ai/home.htm
+
+For OCI Agent Runtime:
+https://docs.oracle.com/en-us/iaas/Content/generative-ai/agent-runtime.htm
diff --git a/ai/gen-ai-agents/sql_graph_generator_dashboard/files/SETUP_GUIDE.md b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/SETUP_GUIDE.md
new file mode 100644
index 000000000..72ae21fc4
--- /dev/null
+++ b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/SETUP_GUIDE.md
@@ -0,0 +1,118 @@
+# Setup Guide
+
+## Prerequisites
+
+1. Oracle Cloud Infrastructure (OCI) account
+2. Python 3.8+
+3. Node.js 18+
+4. OCI CLI configured
+
+## Step 1: OCI Configuration
+
+Create `~/.oci/config`:
+
+```
+[DEFAULT]
+user=ocid1.user.oc1..YOUR_USER_OCID
+fingerprint=YOUR_FINGERPRINT
+tenancy=ocid1.tenancy.oc1..YOUR_TENANCY_OCID
+region=eu-frankfurt-1
+key_file=~/.oci/oci_api_key.pem
+```
+
+Generate API key:
+```bash
+openssl genrsa -out ~/.oci/oci_api_key.pem 2048
+openssl rsa -pubout -in ~/.oci/oci_api_key.pem -out ~/.oci/oci_api_key_public.pem
+```
+
+Upload public key to OCI Console → User Settings → API Keys
+
+## Step 2: OCI GenAI Setup
+
+1. Go to OCI Console → Generative AI
+2. Create or select a model (e.g., OpenAI GPT OSS 120B)
+3. Note the MODEL_ID
+4. Create an Agent Runtime endpoint for SQL queries
+5. Note the AGENT_ENDPOINT_ID
+6. Get your COMPARTMENT_ID
+
+## Step 3: Update Configuration
+
+Edit `backend/utils/config.py`:
+- Replace MODEL_ID with your model OCID
+- Replace AGENT_ENDPOINT_ID with your agent endpoint OCID
+- Replace COMPARTMENT_ID with your compartment OCID
+- Update region if different from eu-frankfurt-1
+
+## Step 4: Install Dependencies
+
+Backend:
+```bash
+cd backend
+pip install -r requirements.txt
+```
+
+Frontend:
+```bash
+cd ..
+npm install
+```
+
+## Step 5: Database Setup
+
+This demo uses OCI Agent Runtime with database tools.
+Configure your database connection in the OCI Agent Runtime console:
+1. Go to OCI Console → Generative AI → Agents
+2. Create or configure your agent
+3. Add database tool/function
+4. Configure connection to your database
+
+Sample schema is provided in `config.py` for reference.
+
+## Step 6: Run the Application
+
+Terminal 1 (Backend):
+```bash
+cd backend
+python -m uvicorn api.main:app --reload --host 0.0.0.0 --port 8000
+```
+
+Terminal 2 (Frontend):
+```bash
+npm run dev
+```
+
+Access: http://localhost:3000
+
+## Troubleshooting
+
+**OCI Authentication Error:**
+- Verify ~/.oci/config is correct
+- Check API key permissions in OCI Console
+- Ensure key_file path is absolute
+
+**Model Not Found:**
+- Verify MODEL_ID matches your OCI model OCID
+- Check model is in same region as config
+- Ensure compartment access permissions
+
+**Agent Endpoint Error:**
+- Verify AGENT_ENDPOINT_ID is correct
+- Check agent is deployed and active
+- Ensure database tools are configured
+
+**Chart Generation Fails:**
+- Check matplotlib/seaborn are installed
+- Verify python code execution permissions
+- Check logs for specific errors
+
+## Environment Variables (Optional)
+
+Instead of editing config.py, you can use environment variables:
+
+```bash
+export MODEL_ID="ocid1.generativeaimodel..."
+export AGENT_ENDPOINT_ID="ocid1.genaiagentendpoint..."
+export COMPARTMENT_ID="ocid1.compartment..."
+```
diff --git a/ai/gen-ai-agents/sql_graph_generator_dashboard/files/backend/api/main.py b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/backend/api/main.py
new file mode 100644
index 000000000..e706a936a
--- /dev/null
+++ b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/backend/api/main.py
@@ -0,0 +1,119 @@
+"""
+FastAPI server for SQL Graph Generator Dashboard
+"""
+
+from fastapi import FastAPI, HTTPException
+from fastapi.middleware.cors import CORSMiddleware
+from pydantic import BaseModel
+from typing import Dict, Any, List, Optional
+import json
+import logging
+
+from orchestration.langchain_orchestrator_v2 import LangChainOrchestratorV2
+
+# Setup logging
+logging.basicConfig(level=logging.INFO)
+logger = logging.getLogger(__name__)
+
+app = FastAPI(title="SQL Graph Generator Dashboard", version="1.0.0")
+
+# CORS configuration
+app.add_middleware(
+ CORSMiddleware,
+ allow_origins=["http://localhost:3000", "http://localhost:3001"],
+ allow_credentials=True,
+ allow_methods=["*"],
+ allow_headers=["*"],
+)
+
+# Initialize LangChain orchestrator
+orchestrator = LangChainOrchestratorV2()
+
+# Request/Response models
+class QueryRequest(BaseModel):
+ question: str
+ context: Optional[str] = ""
+
+class QueryResponse(BaseModel):
+ success: bool
+ response_type: str # "visualization", "data", "error"
+ query: Optional[str] = None
+ agent_response: Optional[str] = None
+ dashboard: Optional[Dict] = None
+ data: Optional[List[Dict]] = None
+ insights: Optional[List[str]] = None
+ text_response: Optional[str] = None
+ error: Optional[str] = None
+ chart_base64: Optional[str] = None
+ chart_config: Optional[Dict] = None
+ method: Optional[str] = None
+ generated_sql: Optional[str] = None
+ additional_info: Optional[str] = None
+
+@app.get("/")
+async def root():
+ return {
+ "message": "SQL Graph Generator Dashboard API",
+ "version": "1.0.0",
+ "status": "active"
+ }
+
+@app.get("/health")
+async def health_check():
+ return {"status": "healthy", "service": "sql-graph-generator"}
+
+@app.post("/query", response_model=QueryResponse)
+async def process_query(request: QueryRequest):
+ """
+ Process a user query and return data, visualization, or text response
+ """
+ try:
+ logger.info(f"Processing query: {request.question}")
+
+ result = orchestrator.process_natural_language_query(request.question)
+
+ return QueryResponse(**result)
+
+ except Exception as e:
+ logger.error(f"Error processing query: {str(e)}")
+ raise HTTPException(status_code=500, detail=str(e))
+
+@app.get("/sample-questions")
+async def get_sample_questions():
+ """
+ Get sample questions that users can ask
+ """
+ return {
+ "questions": orchestrator.get_sample_questions(),
+ "description": "Sample questions you can ask the SQL Graph Generator"
+ }
+
+@app.get("/database-schema")
+async def get_database_schema():
+ """
+ Get the database schema information
+ """
+ from utils.config import DATABASE_SCHEMA
+ return {
+ "schema": DATABASE_SCHEMA,
+ "description": "E-commerce database schema with orders, customers, products, and order_items"
+ }
+
+@app.get("/chart-types")
+async def get_supported_chart_types():
+ """
+ Get supported chart types
+ """
+ return {
+ "chart_types": [
+ {"type": "bar", "description": "Bar charts for category comparisons"},
+ {"type": "line", "description": "Line charts for trends over time"},
+ {"type": "pie", "description": "Pie charts for distributions"},
+ {"type": "scatter", "description": "Scatter plots for correlations"},
+ {"type": "heatmap", "description": "Heatmaps for correlation analysis"}
+ ]
+ }
+
+if __name__ == "__main__":
+ import uvicorn
+ uvicorn.run(app, host="0.0.0.0", port=8000)
\ No newline at end of file
diff --git a/ai/gen-ai-agents/sql_graph_generator_dashboard/files/backend/orchestration/conversation_manager.py b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/backend/orchestration/conversation_manager.py
new file mode 100644
index 000000000..fba047a15
--- /dev/null
+++ b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/backend/orchestration/conversation_manager.py
@@ -0,0 +1,211 @@
+"""
+Conversation History Manager for Multi-Turn Conversational Flow
+Tracks context across multiple GenAI calls for intelligent responses
+"""
+
+from typing import Dict, Any, List, Optional
+from dataclasses import dataclass, asdict
+from datetime import datetime
+import json
+
+
+@dataclass
+class ConversationTurn:
+ """Single conversation turn with full context"""
+ user_query: str
+ route: str
+ data: Optional[List[Dict]]
+ chart_config: Optional[Dict]
+ response_type: str
+ agent_response: str
+ generated_sql: Optional[str]
+ chart_base64: Optional[str]
+ timestamp: datetime
+ success: bool
+ method: str
+
+ def to_dict(self) -> Dict[str, Any]:
+ """Convert to dictionary for JSON serialization"""
+ return {
+ **asdict(self),
+ 'timestamp': self.timestamp.isoformat(),
+ 'data_summary': {
+ 'count': len(self.data) if self.data else 0,
+ 'columns': list(self.data[0].keys()) if self.data else [],
+ 'sample': self.data[:2] if self.data else []
+ } if self.data else None
+ }
+
+ def to_context_string(self) -> str:
+ """Convert to concise context string for prompts"""
+ context_parts = [
+ f"Q: {self.user_query}",
+ f"Route: {self.route}",
+ f"Response: {self.agent_response[:100]}..." if len(self.agent_response) > 100 else f"Response: {self.agent_response}"
+ ]
+
+ if self.data:
+ context_parts.append(f"Data: {len(self.data)} rows with columns {list(self.data[0].keys())}")
+
+ if self.chart_config:
+ chart_type = self.chart_config.get('chart_type', 'unknown')
+ context_parts.append(f"Chart: {chart_type} chart created")
+
+ return " | ".join(context_parts)
+
+
+class ConversationManager:
+ """
+ Manages conversation history and context for multi-turn interactions
+ """
+
+ def __init__(self, max_history: int = 10):
+ self.conversation_history: List[ConversationTurn] = []
+ self.max_history = max_history
+ self.session_id = datetime.now().strftime("%Y%m%d_%H%M%S")
+
+ def add_turn(self,
+ user_query: str,
+ route: str,
+ result: Dict[str, Any]) -> None:
+ """Add a new conversation turn"""
+
+ turn = ConversationTurn(
+ user_query=user_query,
+ route=route,
+ data=result.get('data'),
+ chart_config=result.get('chart_config'),
+ response_type=result.get('response_type', 'unknown'),
+ agent_response=result.get('agent_response', ''),
+ generated_sql=result.get('generated_sql'),
+ chart_base64=result.get('chart_base64'),
+ timestamp=datetime.now(),
+ success=result.get('success', False),
+ method=result.get('method', 'unknown')
+ )
+
+ self.conversation_history.append(turn)
+
+ # Keep only recent history
+ if len(self.conversation_history) > self.max_history:
+ self.conversation_history = self.conversation_history[-self.max_history:]
+
+ print(f" Added conversation turn: {user_query} → {route}")
+
+ def get_context_for_prompt(self, context_window: int = 3) -> str:
+ """
+ Get formatted conversation context for GenAI prompts
+ """
+ if not self.conversation_history:
+ return "No previous conversation history."
+
+ recent_turns = self.conversation_history[-context_window:] if context_window else self.conversation_history
+
+ context_lines = ["Previous conversation context:"]
+ for i, turn in enumerate(recent_turns, 1):
+ context_lines.append(f"{i}. {turn.to_context_string()}")
+
+ return "\n".join(context_lines)
+
+ def get_current_data(self) -> Optional[List[Dict]]:
+ """Get data from the most recent turn that has data"""
+ for turn in reversed(self.conversation_history):
+ if turn.data and turn.success:
+ return turn.data
+ return None
+
+ def get_current_chart_config(self) -> Optional[Dict]:
+ """Get chart config from the most recent turn that has a chart"""
+ for turn in reversed(self.conversation_history):
+ if turn.chart_config and turn.success:
+ return turn.chart_config
+ return None
+
+ def get_current_chart_base64(self) -> Optional[str]:
+ """Get the most recent chart image"""
+ for turn in reversed(self.conversation_history):
+ if turn.chart_base64 and turn.success:
+ return turn.chart_base64
+ return None
+
+ def has_data_context(self) -> bool:
+ """Check if we have data in recent context"""
+ return self.get_current_data() is not None
+
+ def has_chart_context(self) -> bool:
+ """Check if we have a chart in recent context"""
+ return self.get_current_chart_config() is not None
+
+ def get_data_summary(self) -> Dict[str, Any]:
+ """Get summary of current data context"""
+ data = self.get_current_data()
+ if not data:
+ return {"has_data": False}
+
+ return {
+ "has_data": True,
+ "row_count": len(data),
+ "columns": list(data[0].keys()) if data else [],
+ "sample_row": data[0] if data else None
+ }
+
+ def get_chart_summary(self) -> Dict[str, Any]:
+ """Get summary of current chart context"""
+ chart_config = self.get_current_chart_config()
+ if not chart_config:
+ return {"has_chart": False}
+
+ return {
+ "has_chart": True,
+ "chart_type": chart_config.get("chart_type", "unknown"),
+ "x_axis": chart_config.get("x_axis", "unknown"),
+ "y_axis": chart_config.get("y_axis", "unknown"),
+ "title": chart_config.get("title", "")
+ }
+
+ def clear_history(self) -> None:
+ """Clear conversation history"""
+ self.conversation_history = []
+ print(" Conversation history cleared")
+
+ def export_history(self) -> List[Dict]:
+ """Export conversation history as JSON-serializable format"""
+ return [turn.to_dict() for turn in self.conversation_history]
+
+ def get_recent_queries(self, count: int = 5) -> List[str]:
+ """Get recent user queries for context"""
+ recent_turns = self.conversation_history[-count:] if count else self.conversation_history
+ return [turn.user_query for turn in recent_turns]
+
+ def get_last_successful_sql(self) -> Optional[str]:
+ """Get the most recent successful SQL query"""
+ for turn in reversed(self.conversation_history):
+ if turn.generated_sql and turn.success and turn.route == "DATA_QUERY":
+ return turn.generated_sql
+ return None
+
+ def should_use_existing_data(self, user_query: str) -> bool:
+ """
+ Determine if the query can use existing data or needs new data
+ """
+ query_lower = user_query.lower()
+
+ # Keywords that suggest working with existing data
+ chart_keywords = ["chart", "graph", "plot", "visualize", "show", "display"]
+ edit_keywords = ["change", "modify", "edit", "update", "make it", "convert to"]
+ analysis_keywords = ["analyze", "explain", "what does", "tell me about", "insights", "trends"]
+
+ has_data = self.has_data_context()
+
+ # If we have data and query suggests chart/analysis work
+ if has_data and any(keyword in query_lower for keyword in chart_keywords + edit_keywords + analysis_keywords):
+ return True
+
+ # If query explicitly asks for new data
+ new_data_keywords = ["get", "find", "show me", "list", "select", "data"]
+ specific_requests = ["orders", "customers", "products", "sales"]
+
+ if any(keyword in query_lower for keyword in new_data_keywords + specific_requests):
+ return False
+
+ return has_data # Default to using existing data if available
\ No newline at end of file
diff --git a/ai/gen-ai-agents/sql_graph_generator_dashboard/files/backend/orchestration/langchain_orchestrator_v2.py b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/backend/orchestration/langchain_orchestrator_v2.py
new file mode 100644
index 000000000..771c2b510
--- /dev/null
+++ b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/backend/orchestration/langchain_orchestrator_v2.py
@@ -0,0 +1,443 @@
+"""
+LangChain orchestrator using RunnableSequence for SQL Graph Dashboard
+Router → branch(DATA_QUERY→OCI, CHART_EDIT→viz_edit, INSIGHT_QA→insight)
+"""
+
+from langchain_core.runnables import Runnable, RunnableLambda, RunnableBranch
+from langchain_core.runnables.utils import Input, Output
+from typing import Dict, Any, List, Optional
+import base64
+import json
+
+from .oci_runnables import OciSqlAgentRunnable
+from .oci_direct_runnables import RouterRunnable, VizGeneratorRunnable, InsightQARunnable
+from .conversation_manager import ConversationManager
+from tools.genai_chart_generator import GenAIChartGenerator
+
+
+class ChartEditRunnable(Runnable):
+ """
+ Runnable for editing existing chart configurations
+ """
+
+ def invoke(self, input_data: Dict[str, Any]) -> Dict[str, Any]:
+ """
+ Modify existing chart based on user request
+ """
+ current_config = input_data.get("current_chart_config", {})
+ question = input_data.get("question", "")
+ data = input_data.get("data", [])
+
+ # Simple chart type modifications
+ if "pie" in question.lower():
+ current_config["chart_type"] = "pie"
+ elif "bar" in question.lower():
+ current_config["chart_type"] = "bar"
+ elif "line" in question.lower():
+ current_config["chart_type"] = "line"
+ elif "scatter" in question.lower():
+ current_config["chart_type"] = "scatter"
+
+ # Sorting modifications
+ if "sort" in question.lower():
+ if "desc" in question.lower() or "highest" in question.lower():
+ current_config["sort_direction"] = "desc"
+ else:
+ current_config["sort_direction"] = "asc"
+
+ return {
+ "success": True,
+ "config": current_config,
+ "data": data,
+ "method": "chart_edit",
+ "response_type": "visualization"
+ }
+
+
+class InsightQARunnable(Runnable):
+ """
+ Runnable for generating insights about current data
+ """
+
+ def __init__(self):
+ try:
+ from langchain_community.chat_models.oci_generative_ai import ChatOCIGenAI
+ from langchain_core.messages import HumanMessage
+ from utils import config
+
+ self.genai_client = ChatOCIGenAI(
+ model_id=config.MODEL_ID,
+ service_endpoint=config.SERVICE_ENDPOINT,
+ compartment_id=config.COMPARTMENT_ID,
+ model_kwargs={
+ "temperature": 0.7,
+ "top_p": 0.9,
+ "max_tokens": 500
+ }
+ )
+ self.oci_available = True
+ print(" Insight QA Runnable initialized")
+ except Exception as e:
+ print(f"⚠️ Insight QA fallback mode: {e}")
+ self.genai_client = None
+ self.oci_available = False
+
+ def invoke(self, input_data: Dict[str, Any]) -> Dict[str, Any]:
+ """
+ Generate insights about the current data
+ """
+ data = input_data.get("data", [])
+ question = input_data.get("question", "")
+
+ if not data:
+ return {
+ "success": False,
+ "error": "No data available for analysis",
+ "response_type": "text_response"
+ }
+
+ # Create analysis prompt
+ data_summary = {
+ "total_rows": len(data),
+ "columns": list(data[0].keys()) if data else [],
+ "sample_data": data[:3]
+ }
+
+ prompt = f"""Analyze this data and answer the user's question with insights.
+
+User Question: "{question}"
+
+Data Summary:
+- Total rows: {data_summary['total_rows']}
+- Columns: {data_summary['columns']}
+- Sample data: {data_summary['sample_data']}
+
+Provide a concise analysis with specific insights, trends, or patterns you observe in the data.
+"""
+
+ if self.oci_available:
+ try:
+ from langchain_core.messages import HumanMessage
+ messages = [HumanMessage(content=prompt)]
+ response = self.genai_client.invoke(messages)
+
+ # Extract content
+ if hasattr(response, 'content'):
+ insight_text = response.content
+ else:
+ insight_text = str(response)
+
+ return {
+ "success": True,
+ "text_response": insight_text,
+ "data": data,
+ "response_type": "text_response",
+ "method": "genai_analysis"
+ }
+
+ except Exception as e:
+ print(f" Insight generation error: {e}")
+ return self._fallback_insight(data, question)
+ else:
+ return self._fallback_insight(data, question)
+
+ def _fallback_insight(self, data: List[Dict], question: str) -> Dict[str, Any]:
+ """Generate simple fallback insights"""
+ if not data:
+ return {
+ "success": True,
+ "text_response": "No data available for analysis.",
+ "response_type": "text_response"
+ }
+
+ insights = [
+ f"Dataset contains {len(data)} records",
+ f"Available fields: {', '.join(data[0].keys()) if data else 'None'}"
+ ]
+
+ # Simple numeric analysis
+ numeric_fields = []
+ for field in data[0].keys() if data else []:
+ try:
+ values = [float(row.get(field, 0)) for row in data[:10]]
+ if values:
+ avg_val = sum(values) / len(values)
+ insights.append(f"{field} average: {avg_val:.2f}")
+ numeric_fields.append(field)
+ except (ValueError, TypeError):
+ pass
+
+ if not numeric_fields:
+ insights.append("No numeric fields found for statistical analysis.")
+
+ return {
+ "success": True,
+ "text_response": "\n".join(insights),
+ "data": data,
+ "response_type": "text_response",
+ "method": "fallback_analysis"
+ }
+
+
+class LangChainOrchestratorV2:
+ """
+ Clean LangChain orchestrator using RunnableSequence architecture
+ """
+
+ def __init__(self):
+ print("🚀 Initializing LangChain Orchestrator V2...")
+
+ # Initialize all runnables
+ self.router = RouterRunnable()
+ self.sql_agent = OciSqlAgentRunnable()
+ self.viz_generator = VizGeneratorRunnable()
+ self.chart_editor = ChartEditRunnable()
+ self.insight_qa = InsightQARunnable() # Now using direct OCI calls
+ self.chart_generator = GenAIChartGenerator()
+
+ # Conversation history manager
+ self.conversation = ConversationManager()
+
+ # Track current state (for backward compatibility)
+ self.current_data = None
+ self.current_chart_config = None
+
+ print(" LangChain Orchestrator V2 initialized")
+
+ def process_natural_language_query(self, user_question: str) -> Dict[str, Any]:
+ """
+ Main entry point - processes user query through the complete pipeline
+ """
+ try:
+ print(f" Processing query: {user_question}")
+
+ # Step 1: Route the query with conversation context
+ route_input = {
+ "question": user_question,
+ "context": {
+ "has_data": self.conversation.has_data_context(),
+ "has_chart": self.conversation.has_chart_context(),
+ "conversation_history": self.conversation.get_context_for_prompt(3),
+ "data_summary": self.conversation.get_data_summary(),
+ "chart_summary": self.conversation.get_chart_summary()
+ }
+ }
+
+ routing_result = self.router.invoke(route_input)
+ route = routing_result.get("route", "DATA_QUERY")
+ print(f" Router decision: {route} (confidence: {routing_result.get('confidence', 0.5)})")
+ print(f" Reasoning: {routing_result.get('reasoning', 'No reasoning')}")
+
+ # Step 2: Branch based on route
+ if route == "DATA_QUERY":
+ result = self._handle_data_query(user_question)
+ elif route == "CHART_EDIT":
+ result = self._handle_chart_edit(user_question)
+ elif route == "INSIGHT_QA":
+ result = self._handle_insight_qa(user_question)
+ else:
+ # Fallback to data query
+ result = self._handle_data_query(user_question)
+
+ # Step 3: Record this conversation turn
+ self.conversation.add_turn(user_question, route, result)
+
+ # Update backward compatibility state
+ if result.get('data'):
+ self.current_data = result['data']
+ if result.get('chart_config'):
+ self.current_chart_config = result['chart_config']
+
+ return result
+
+ except Exception as e:
+ print(f" Orchestrator error: {e}")
+ import traceback
+ traceback.print_exc()
+ return {
+ "success": False,
+ "error": str(e),
+ "response_type": "error"
+ }
+
+ def _handle_data_query(self, user_question: str) -> Dict[str, Any]:
+ """
+ Handle DATA_QUERY route: SQL Agent → Viz Generator → Chart Generator
+ """
+ try:
+ # Step 1: Get data from OCI SQL Agent
+ sql_input = {"question": user_question}
+ sql_result = self.sql_agent.invoke(sql_input)
+
+ if not sql_result.get("success", False):
+ return {
+ "success": False,
+ "error": sql_result.get("error", "SQL query failed"),
+ "response_type": "error"
+ }
+
+ data = sql_result.get("data", [])
+ if not data:
+ return {
+ "success": True,
+ "query": user_question,
+ "agent_response": sql_result.get("agent_response", "No data found"),
+ "response_type": "text_response",
+ "text_response": sql_result.get("agent_response", "No data found"),
+ "data": []
+ }
+
+ # Update current state (conversation manager handles this)
+
+ # DATA_QUERY only returns data - no automatic chart generation
+ # Charts should only be created when explicitly requested via CHART_EDIT
+
+ # Store data for conversation context
+ self.current_data = data
+
+ # Add to conversation history
+ self.conversation.add_turn(user_question, "DATA_QUERY", {"data": data})
+
+ # Return data without chart
+ return {
+ "success": True,
+ "query": user_question,
+ "agent_response": sql_result.get("agent_response", ""),
+ "response_type": "data",
+ "data": data,
+ "generated_sql": sql_result.get("generated_sql"),
+ "additional_info": sql_result.get("additional_info"),
+ "method": "data_only"
+ }
+
+ except Exception as e:
+ print(f" Data query handling error: {e}")
+ return {
+ "success": False,
+ "error": str(e),
+ "response_type": "error"
+ }
+
+ def _handle_chart_edit(self, user_question: str) -> Dict[str, Any]:
+ """
+ Handle CHART_EDIT route: modify existing chart
+ """
+ # Always get fresh data for chart requests to ensure we're using the right dataset
+ print(" Getting fresh data for chart...")
+ sql_input = {"question": user_question}
+ sql_result = self.sql_agent.invoke(sql_input)
+
+ if not sql_result.get("success", False):
+ return {
+ "success": False,
+ "error": f"Failed to get data for chart: {sql_result.get('error', 'Unknown error')}",
+ "response_type": "error"
+ }
+
+ current_data = sql_result.get("data", [])
+ if not current_data:
+ return {
+ "success": False,
+ "error": "No data available for chart creation",
+ "response_type": "error"
+ }
+
+ # Store the new data
+ self.current_data = current_data
+ print(f" Retrieved {len(current_data)} rows for chart generation")
+
+ # Get current chart config for potential reuse
+ current_chart_config = self.conversation.get_current_chart_config()
+
+ # If we have data but no chart config, create a new chart (don't redirect to data query)
+
+ try:
+ # Generate chart directly using GenAI Chart Generator
+ chart_result = self.chart_generator.generate_chart(
+ user_request=user_question,
+ data=current_data,
+ chart_params=current_chart_config or {}
+ )
+
+ if chart_result.get("success", False):
+ # Store the chart config for future use
+ self.current_chart_config = chart_result.get("chart_config", {})
+
+ # Add to conversation history
+ self.conversation.add_turn(user_question, "CHART_EDIT", {
+ "chart_config": chart_result.get("chart_config", {}),
+ "chart_base64": chart_result.get("chart_base64")
+ })
+
+ return {
+ "success": True,
+ "query": user_question,
+ "agent_response": f"Chart created: {user_question}",
+ "response_type": "visualization",
+ "data": current_data,
+ "chart_base64": chart_result.get("chart_base64"),
+ "chart_config": chart_result.get("chart_config", {}),
+ "method": f"chart_generated_+_{chart_result.get('method', 'unknown')}"
+ }
+ else:
+ return {
+ "success": False,
+ "error": f"Failed to update chart: {chart_result.get('error', 'Unknown error')}",
+ "response_type": "error"
+ }
+
+ except Exception as e:
+ print(f" Chart edit handling error: {e}")
+ return {
+ "success": False,
+ "error": str(e),
+ "response_type": "error"
+ }
+
+ def _handle_insight_qa(self, user_question: str) -> Dict[str, Any]:
+ """
+ Handle INSIGHT_QA route: analyze current data
+ """
+ if not self.current_data:
+ # No data to analyze, redirect to data query
+ return self._handle_data_query(user_question)
+
+ try:
+ insight_input = {
+ "question": user_question,
+ "data": self.current_data
+ }
+
+ insight_result = self.insight_qa.invoke(insight_input)
+
+ return {
+ "success": insight_result.get("success", True),
+ "query": user_question,
+ "agent_response": insight_result.get("text_response", "No insights generated"),
+ "response_type": "text_response",
+ "text_response": insight_result.get("text_response", "No insights generated"),
+ "data": self.current_data,
+ "method": insight_result.get("method", "insight_analysis")
+ }
+
+ except Exception as e:
+ print(f" Insight QA handling error: {e}")
+ return {
+ "success": False,
+ "error": str(e),
+ "response_type": "error"
+ }
+
+ def get_current_data(self) -> Optional[List[Dict]]:
+ """Get current data for transparency"""
+ return self.current_data
+
+ def get_current_chart_config(self) -> Optional[Dict]:
+ """Get current chart config for transparency"""
+ return self.current_chart_config
+
+ def clear_context(self):
+ """Clear current context"""
+ self.current_data = None
+ self.current_chart_config = None
+ print(" Context cleared")
\ No newline at end of file
diff --git a/ai/gen-ai-agents/sql_graph_generator_dashboard/files/backend/orchestration/oci_direct_runnables.py b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/backend/orchestration/oci_direct_runnables.py
new file mode 100644
index 000000000..aefda1ecf
--- /dev/null
+++ b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/backend/orchestration/oci_direct_runnables.py
@@ -0,0 +1,412 @@
+"""
+LangChain Runnables using direct OCI SDK calls for GenAI models
+Pure OCI SDK wrapped as LangChain Runnables - no langchain-community dependencies
+"""
+
+from langchain_core.runnables import Runnable
+from typing import Dict, Any, List
+import oci
+import json
+from utils import config
+
+
+class OciGenAIRunnable(Runnable):
+ """
+ Direct OCI GenAI model calls wrapped as LangChain Runnable
+ """
+
+ def __init__(self, purpose: str = "general"):
+ self.purpose = purpose
+ try:
+ # Initialize OCI GenAI client with correct endpoint
+ oci_config = oci.config.from_file()
+ # Override endpoint to match the model's region
+ oci_config['region'] = 'eu-frankfurt-1'
+ self.genai_client = oci.generative_ai_inference.GenerativeAiInferenceClient(oci_config)
+
+ # Set correct service endpoint
+ self.genai_client.base_client.endpoint = config.SERVICE_ENDPOINT
+
+ self.model_id = config.MODEL_ID
+ self.service_endpoint = config.SERVICE_ENDPOINT
+ self.compartment_id = config.COMPARTMENT_ID
+ self.oci_available = True
+ print(f"OCI GenAI Direct Runnable ({purpose}) initialized with endpoint: {config.SERVICE_ENDPOINT}")
+ except Exception as e:
+ print(f"OCI GenAI Direct Runnable ({purpose}) failed: {e}")
+ self.genai_client = None
+ self.oci_available = False
+
+ def invoke(self, input_data: Dict[str, Any]) -> Dict[str, Any]:
+ """
+ Call OCI GenAI model directly
+ """
+ prompt = input_data.get("prompt", "")
+ max_tokens = input_data.get("max_tokens", 500)
+ temperature = input_data.get("temperature", 0.7)
+
+ if not self.oci_available:
+ return {
+ "success": False,
+ "error": "OCI GenAI not available",
+ "response": "",
+ "method": "error"
+ }
+
+ try:
+ # Create chat request using Oracle demo format for OpenAI GPT OSS 120B
+ content = oci.generative_ai_inference.models.TextContent()
+ content.text = prompt
+
+ message = oci.generative_ai_inference.models.Message()
+ message.role = "USER"
+ message.content = [content]
+
+ chat_request = oci.generative_ai_inference.models.GenericChatRequest()
+ chat_request.api_format = oci.generative_ai_inference.models.BaseChatRequest.API_FORMAT_GENERIC
+ chat_request.messages = [message]
+ chat_request.max_tokens = max_tokens
+ chat_request.temperature = temperature
+ chat_request.frequency_penalty = 0
+ chat_request.presence_penalty = 0
+ chat_request.top_p = 1
+ chat_request.top_k = 0
+
+ chat_detail = oci.generative_ai_inference.models.ChatDetails()
+ chat_detail.serving_mode = oci.generative_ai_inference.models.OnDemandServingMode(model_id=self.model_id)
+ chat_detail.chat_request = chat_request
+ chat_detail.compartment_id = self.compartment_id
+
+ # Call OCI GenAI
+ response = self.genai_client.chat(chat_detail)
+
+ # Extract response text
+ response_text = ""
+ if hasattr(response.data, 'chat_response') and response.data.chat_response:
+ if hasattr(response.data.chat_response, 'choices') and response.data.chat_response.choices:
+ choice = response.data.chat_response.choices[0]
+ if hasattr(choice, 'message') and choice.message:
+ if hasattr(choice.message, 'content') and choice.message.content:
+ for content in choice.message.content:
+ if hasattr(content, 'text'):
+ response_text += content.text
+
+ return {
+ "success": True,
+ "response": response_text.strip(),
+ "method": "oci_direct",
+ "model_id": self.model_id
+ }
+
+ except Exception as e:
+ error_msg = str(e)
+ print(f"OCI GenAI Direct call failed ({self.purpose}): {error_msg}")
+
+ # Check for specific error types
+ if "does not support" in error_msg:
+ return {
+ "success": False,
+ "error": f"Model {self.model_id} API format incompatible",
+ "response": "",
+ "method": "model_error"
+ }
+
+ return {
+ "success": False,
+ "error": error_msg,
+ "response": "",
+ "method": "call_error"
+ }
+
+
+class RouterRunnable(Runnable):
+ """
+ Intelligent routing using direct OCI GenAI calls
+ """
+
+ def __init__(self):
+ self.genai_runnable = OciGenAIRunnable("router")
+
+ def invoke(self, input_data: Dict[str, Any]) -> Dict[str, Any]:
+ """
+ Route user query and return routing decision
+ """
+ user_question = input_data.get("question", "")
+ context = input_data.get("context", {})
+
+ # Create routing prompt
+ prompt = f"""You are an intelligent router for a data dashboard. Analyze the user query and decide which tool to use.
+
+Tools Available:
+1. DATA_QUERY: For getting NEW data from database (show orders, get customers, list products, etc.)
+2. CHART_EDIT: For creating ANY charts or visualizations (make chart, graph, pie chart, bar chart, etc.) - Will automatically get data if needed
+3. INSIGHT_QA: For analyzing current data (trends, patterns, outliers)
+
+IMPORTANT: If user asks for ANY chart/graph/visualization, always choose CHART_EDIT regardless of whether data exists or not.
+
+Context:
+- Has existing data: {context.get('has_data', False)}
+- Has existing chart: {context.get('has_chart', False)}
+
+User Query: "{user_question}"
+
+Respond with ONLY a JSON object:
+{{"route": "DATA_QUERY|CHART_EDIT|INSIGHT_QA", "reasoning": "Brief explanation", "confidence": 0.0-1.0}}"""
+
+ if not self.genai_runnable.oci_available:
+ return self._fallback_route(user_question)
+
+ # Call OCI GenAI
+ genai_input = {
+ "prompt": prompt,
+ "max_tokens": 200,
+ "temperature": 0.3
+ }
+
+ result = self.genai_runnable.invoke(genai_input)
+
+ if result.get("success"):
+ try:
+ # Parse JSON response
+ route_data = json.loads(result["response"])
+ return {
+ "route": route_data.get("route", "DATA_QUERY"),
+ "reasoning": route_data.get("reasoning", "GenAI routing"),
+ "confidence": route_data.get("confidence", 0.9),
+ "method": "oci_genai"
+ }
+ except json.JSONDecodeError:
+ print(f"Failed to parse GenAI response: {result['response']}")
+ return self._fallback_route(user_question)
+ else:
+ print(f"GenAI routing failed: {result.get('error')}")
+ return self._fallback_route(user_question)
+
+ def _fallback_route(self, user_question: str) -> Dict[str, Any]:
+ """Simple rule-based fallback routing"""
+ user_lower = user_question.lower()
+
+ if any(word in user_lower for word in ["show", "get", "find", "list", "data"]):
+ return {
+ "route": "DATA_QUERY",
+ "reasoning": "Fallback: Detected data request",
+ "confidence": 0.5,
+ "method": "fallback"
+ }
+ elif any(word in user_lower for word in ["chart", "pie", "bar", "line", "graph"]):
+ return {
+ "route": "CHART_EDIT",
+ "reasoning": "Fallback: Detected chart modification",
+ "confidence": 0.5,
+ "method": "fallback"
+ }
+ else:
+ return {
+ "route": "INSIGHT_QA",
+ "reasoning": "Fallback: Default to analysis",
+ "confidence": 0.3,
+ "method": "fallback"
+ }
+
+
+class VizGeneratorRunnable(Runnable):
+ """
+ Generate visualization configs using direct OCI GenAI calls
+ """
+
+ def __init__(self):
+ self.genai_runnable = OciGenAIRunnable("viz_generator")
+
+ def invoke(self, input_data: Dict[str, Any]) -> Dict[str, Any]:
+ """
+ Generate chart config from data and user question
+ """
+ data = input_data.get("data", [])
+ question = input_data.get("question", "")
+
+ if not data:
+ return {
+ "success": False,
+ "error": "No data provided for visualization"
+ }
+
+ # Analyze data structure
+ sample_row = data[0] if data else {}
+ columns = list(sample_row.keys()) if sample_row else []
+
+ # Generate chart config prompt
+ prompt = f"""Generate a chart configuration for this data visualization request.
+
+User Question: "{question}"
+Data Columns: {columns}
+Data Sample (first 2 rows): {data[:2]}
+Total Rows: {len(data)}
+
+Respond with ONLY a JSON object:
+{{"chart_type": "bar|line|pie|scatter", "x_axis": "column_name", "y_axis": "column_name", "title": "Chart Title", "caption": "Brief insight"}}"""
+
+ if not self.genai_runnable.oci_available:
+ return self._fallback_config(data, question)
+
+ # Call OCI GenAI
+ genai_input = {
+ "prompt": prompt,
+ "max_tokens": 300,
+ "temperature": 0.3
+ }
+
+ result = self.genai_runnable.invoke(genai_input)
+
+ if result.get("success"):
+ try:
+ # Parse JSON response
+ config_data = json.loads(result["response"])
+ return {
+ "success": True,
+ "config": config_data,
+ "method": "oci_genai"
+ }
+ except json.JSONDecodeError:
+ print(f"Failed to parse viz config: {result['response']}")
+ return self._fallback_config(data, question)
+ else:
+ print(f"Viz generation failed: {result.get('error')}")
+ return self._fallback_config(data, question)
+
+ def _fallback_config(self, data: List[Dict], question: str) -> Dict[str, Any]:
+ """Generate simple fallback chart config"""
+ if not data:
+ return {"success": False, "error": "No data"}
+
+ sample_row = data[0]
+ columns = list(sample_row.keys())
+
+ # Find numeric columns
+ numeric_cols = []
+ for col in columns:
+ try:
+ float(str(sample_row[col]))
+ numeric_cols.append(col)
+ except (ValueError, TypeError):
+ pass
+
+ # Simple config generation
+ if len(columns) >= 2:
+ x_axis = columns[0]
+ y_axis = numeric_cols[0] if numeric_cols else columns[1]
+ chart_type = "bar"
+ else:
+ x_axis = columns[0]
+ y_axis = columns[0]
+ chart_type = "bar"
+
+ return {
+ "success": True,
+ "config": {
+ "chart_type": chart_type,
+ "x_axis": x_axis,
+ "y_axis": y_axis,
+ "title": f"Chart for: {question}",
+ "caption": "Fallback visualization configuration"
+ },
+ "method": "fallback"
+ }
+
+
+class InsightQARunnable(Runnable):
+ """
+ Generate insights using direct OCI GenAI calls
+ """
+
+ def __init__(self):
+ self.genai_runnable = OciGenAIRunnable("insight_qa")
+
+ def invoke(self, input_data: Dict[str, Any]) -> Dict[str, Any]:
+ """
+ Generate insights about the current data
+ """
+ data = input_data.get("data", [])
+ question = input_data.get("question", "")
+
+ if not data:
+ return {
+ "success": False,
+ "error": "No data available for analysis",
+ "response_type": "text_response"
+ }
+
+ # Create analysis prompt
+ data_summary = {
+ "total_rows": len(data),
+ "columns": list(data[0].keys()) if data else [],
+ "sample_data": data[:3]
+ }
+
+ prompt = f"""Analyze this data and answer the user's question with insights.
+
+User Question: "{question}"
+
+Data Summary:
+- Total rows: {data_summary['total_rows']}
+- Columns: {data_summary['columns']}
+- Sample data: {data_summary['sample_data']}
+
+Provide a concise analysis with specific insights, trends, or patterns you observe in the data.
+"""
+
+ if not self.genai_runnable.oci_available:
+ return self._fallback_insight(data, question)
+
+ # Call OCI GenAI
+ genai_input = {
+ "prompt": prompt,
+ "max_tokens": 400,
+ "temperature": 0.7
+ }
+
+ result = self.genai_runnable.invoke(genai_input)
+
+ if result.get("success"):
+ return {
+ "success": True,
+ "text_response": result["response"],
+ "data": data,
+ "response_type": "text_response",
+ "method": "oci_genai"
+ }
+ else:
+ print(f"⚠️ Insight generation failed: {result.get('error')}")
+ return self._fallback_insight(data, question)
+
+ def _fallback_insight(self, data: List[Dict], question: str) -> Dict[str, Any]:
+ """Generate simple fallback insights"""
+ if not data:
+ return {
+ "success": True,
+ "text_response": "No data available for analysis.",
+ "response_type": "text_response",
+ "method": "fallback"
+ }
+
+ insights = [
+ f"Dataset contains {len(data)} records",
+ f"Available fields: {', '.join(data[0].keys()) if data else 'None'}"
+ ]
+
+ # Simple numeric analysis
+ for field in data[0].keys() if data else []:
+ try:
+ values = [float(row.get(field, 0)) for row in data[:10]]
+ if values:
+ avg_val = sum(values) / len(values)
+ insights.append(f"{field} average: {avg_val:.2f}")
+ except (ValueError, TypeError):
+ pass
+
+ return {
+ "success": True,
+ "text_response": "\n".join(insights),
+ "data": data,
+ "response_type": "text_response",
+ "method": "fallback"
+ }
\ No newline at end of file
diff --git a/ai/gen-ai-agents/sql_graph_generator_dashboard/files/backend/orchestration/oci_runnables.py b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/backend/orchestration/oci_runnables.py
new file mode 100644
index 000000000..212854ad1
--- /dev/null
+++ b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/backend/orchestration/oci_runnables.py
@@ -0,0 +1,374 @@
+"""
+LangChain Runnables that wrap OCI SDK calls for clean integration
+"""
+
+from langchain_core.runnables import Runnable
+try:
+ from langchain_oci.chat_models import ChatOCIGenAI
+except ImportError:
+ try:
+ from langchain_community.chat_models.oci_generative_ai import ChatOCIGenAI
+ except ImportError:
+ print("⚠️ Neither langchain-oci nor langchain-community ChatOCIGenAI available")
+ ChatOCIGenAI = None
+from langchain_core.messages import HumanMessage
+from typing import Dict, Any, List
+import oci
+from utils import config
+import json
+
+class OciSqlAgentRunnable(Runnable):
+ """
+ LangChain Runnable that wraps OCI Agent Runtime SDK to extract tool_outputs reliably
+ """
+
+ def __init__(self):
+ # Initialize OCI Agent Runtime client
+ try:
+ oci_config = oci.config.from_file()
+ # Override region to match the agent endpoint
+ oci_config['region'] = 'eu-frankfurt-1'
+ self.client = oci.generative_ai_agent_runtime.GenerativeAiAgentRuntimeClient(oci_config)
+ self.agent_endpoint_id = config.AGENT_ENDPOINT_ID
+ print("OCI SQL Agent Runnable initialized with eu-frankfurt-1")
+ except Exception as e:
+ print(f"Failed to initialize OCI Agent Runtime: {e}")
+ self.client = None
+ self.agent_endpoint_id = None
+
+ def invoke(self, input_data: Dict[str, Any]) -> Dict[str, Any]:
+ """
+ Call OCI Agent and extract tool_outputs[0].result for reliable data
+ """
+ user_question = input_data.get("question", "") if isinstance(input_data, dict) else str(input_data)
+
+ if not self.client or not self.agent_endpoint_id:
+ return {
+ "success": False,
+ "error": "OCI Agent Runtime not available",
+ "data": [],
+ "agent_response": "Agent not initialized"
+ }
+
+ try:
+ print(f"OCI SQL Agent: Executing query: {user_question}")
+
+ # Step 1: Create a session first (required for sessionId)
+ create_session_response = self.client.create_session(
+ create_session_details=oci.generative_ai_agent_runtime.models.CreateSessionDetails(
+ display_name="SQL Query Session",
+ description="Session for SQL query execution"
+ ),
+ agent_endpoint_id=self.agent_endpoint_id
+ )
+ session_id = create_session_response.data.id
+ print(f"Created session: {session_id}")
+
+ # Step 2: Create chat request with required sessionId
+ chat_request = oci.generative_ai_agent_runtime.models.ChatDetails(
+ user_message=user_question,
+ session_id=session_id,
+ should_stream=False
+ )
+
+ # Step 3: Call OCI Agent
+ response = self.client.chat(
+ agent_endpoint_id=self.agent_endpoint_id,
+ chat_details=chat_request
+ )
+
+ # Extract message content
+ message_content = ""
+ if hasattr(response.data, 'message') and response.data.message:
+ if hasattr(response.data.message, 'content') and response.data.message.content:
+ if hasattr(response.data.message.content, 'text'):
+ message_content = response.data.message.content.text or ""
+
+ # Extract tool outputs (where SQL data lives)
+ tool_outputs = getattr(response.data, 'tool_outputs', []) or []
+ data = []
+ generated_sql = None
+ additional_info = None
+
+ if tool_outputs and len(tool_outputs) > 0:
+ result = tool_outputs[0].result if hasattr(tool_outputs[0], 'result') else None
+ if result:
+ try:
+ # Parse JSON data from tool output
+ if isinstance(result, str):
+ parsed_result = json.loads(result)
+ else:
+ parsed_result = result
+
+ if isinstance(parsed_result, list):
+ data = parsed_result
+ elif isinstance(parsed_result, dict):
+ data = parsed_result.get('data', [])
+ generated_sql = parsed_result.get('generated_sql')
+ additional_info = parsed_result.get('additional_info')
+ except json.JSONDecodeError:
+ # If not JSON, treat as raw data
+ data = [{"result": result}]
+
+ return {
+ "success": True,
+ "agent_response": message_content.strip(),
+ "data": data,
+ "generated_sql": generated_sql,
+ "additional_info": additional_info,
+ "tool_outputs": tool_outputs # Pass through for transparency
+ }
+
+ except Exception as e:
+ print(f"OCI SQL Agent error: {e}")
+ return {
+ "success": False,
+ "error": str(e),
+ "data": [],
+ "agent_response": f"Error calling SQL Agent: {str(e)}"
+ }
+
+
+class RouterRunnable(Runnable):
+ """
+ LangChain Runnable for intelligent routing using ChatOCIGenAI
+ """
+
+ def __init__(self):
+ self.genai_client = None
+ self.oci_available = False
+
+ if ChatOCIGenAI is None:
+ print("ChatOCIGenAI not available - Router using fallback")
+ return
+
+ try:
+ self.genai_client = ChatOCIGenAI(
+ model_id=config.MODEL_ID,
+ service_endpoint=config.SERVICE_ENDPOINT,
+ compartment_id=config.COMPARTMENT_ID,
+ model_kwargs={
+ "temperature": config.TEMPERATURE,
+ "top_p": config.TOP_P,
+ "max_tokens": config.MAX_TOKENS
+ }
+ )
+ self.oci_available = True
+ print("Router Runnable with ChatOCIGenAI initialized")
+ except Exception as e:
+ print(f"Router Runnable fallback mode: {e}")
+ self.genai_client = None
+ self.oci_available = False
+
+ def invoke(self, input_data: Dict[str, Any]) -> Dict[str, Any]:
+ """
+ Route user query and return routing decision
+ """
+ user_question = input_data.get("question", "") if isinstance(input_data, dict) else str(input_data)
+ context = input_data.get("context", {}) if isinstance(input_data, dict) else {}
+
+ # Routing prompt
+ prompt = f"""You are an intelligent router for a data dashboard. Analyze the user query and decide which tool to use.
+
+Tools Available:
+1. DATA_QUERY: For getting new data from database (show orders, get customers, etc.)
+2. CHART_EDIT: For modifying existing charts (make it pie chart, sort by amount, etc.)
+3. INSIGHT_QA: For analyzing current data (trends, patterns, outliers)
+
+User Query: "{user_question}"
+
+Respond with ONLY a JSON object:
+{{
+ "route": "DATA_QUERY|CHART_EDIT|INSIGHT_QA",
+ "reasoning": "Brief explanation",
+ "confidence": 0.0-1.0,
+ "params": {{}}
+}}"""
+
+ if self.oci_available:
+ try:
+ messages = [HumanMessage(content=prompt)]
+ response = self.genai_client.invoke(messages)
+
+ # Extract content from response
+ if hasattr(response, 'content'):
+ content = response.content
+ else:
+ content = str(response)
+
+ # Parse JSON response
+ try:
+ import json
+ route_data = json.loads(content)
+ return {
+ "route": route_data.get("route", "DATA_QUERY"),
+ "reasoning": route_data.get("reasoning", "GenAI routing"),
+ "confidence": route_data.get("confidence", 0.9),
+ "params": route_data.get("params", {})
+ }
+ except json.JSONDecodeError:
+ print(f"Failed to parse GenAI response: {content}")
+ return self._fallback_route(user_question)
+
+ except Exception as e:
+ print(f"GenAI routing error: {e}")
+ return self._fallback_route(user_question)
+ else:
+ return self._fallback_route(user_question)
+
+ def _fallback_route(self, user_question: str) -> Dict[str, Any]:
+ """Simple rule-based fallback routing"""
+ user_lower = user_question.lower()
+
+ if any(word in user_lower for word in ["show", "get", "find", "list", "data"]):
+ return {
+ "route": "DATA_QUERY",
+ "reasoning": "Fallback: Detected data request",
+ "confidence": 0.5,
+ "params": {}
+ }
+ elif any(word in user_lower for word in ["chart", "pie", "bar", "line", "graph"]):
+ return {
+ "route": "CHART_EDIT",
+ "reasoning": "Fallback: Detected chart modification",
+ "confidence": 0.5,
+ "params": {}
+ }
+ else:
+ return {
+ "route": "INSIGHT_QA",
+ "reasoning": "Fallback: Default to analysis",
+ "confidence": 0.3,
+ "params": {}
+ }
+
+
+class VizGeneratorRunnable(Runnable):
+ """
+ LangChain Runnable for generating visualization configs from data
+ """
+
+ def __init__(self):
+ try:
+ self.genai_client = ChatOCIGenAI(
+ model_id=config.MODEL_ID,
+ service_endpoint=config.SERVICE_ENDPOINT,
+ compartment_id=config.COMPARTMENT_ID,
+ model_kwargs={
+ "temperature": 0.3,
+ "top_p": 0.9,
+ "max_tokens": 1000
+ }
+ )
+ self.oci_available = True
+ print("Viz Generator Runnable initialized")
+ except Exception as e:
+ print(f"Viz Generator fallback mode: {e}")
+ self.genai_client = None
+ self.oci_available = False
+
+ def invoke(self, input_data: Dict[str, Any]) -> Dict[str, Any]:
+ """
+ Generate chart config from data and user question
+ """
+ data = input_data.get("data", [])
+ question = input_data.get("question", "")
+ suggested_type = input_data.get("chart_type", "auto")
+
+ if not data:
+ return {
+ "success": False,
+ "error": "No data provided for visualization"
+ }
+
+ # Analyze data structure
+ sample_row = data[0] if data else {}
+ columns = list(sample_row.keys()) if sample_row else []
+
+ # Generate chart config prompt
+ prompt = f"""Generate a chart configuration for this data visualization request.
+
+User Question: "{question}"
+Suggested Chart Type: {suggested_type}
+Data Columns: {columns}
+Data Sample (first 3 rows): {data[:3]}
+Total Rows: {len(data)}
+
+Respond with ONLY a JSON object:
+{{
+ "chart_type": "bar|line|pie|scatter",
+ "x_axis": "column_name",
+ "y_axis": "column_name",
+ "title": "Chart Title",
+ "caption": "Brief insight about the data",
+ "color_field": "optional_column_for_colors"
+}}"""
+
+ if self.oci_available:
+ try:
+ messages = [HumanMessage(content=prompt)]
+ response = self.genai_client.invoke(messages)
+
+ # Extract content
+ if hasattr(response, 'content'):
+ content = response.content
+ else:
+ content = str(response)
+
+ # Parse JSON response
+ try:
+ import json
+ config_data = json.loads(content)
+ return {
+ "success": True,
+ "config": config_data,
+ "method": "genai_generated"
+ }
+ except json.JSONDecodeError:
+ print(f"Failed to parse viz config: {content}")
+ return self._fallback_config(data, question)
+
+ except Exception as e:
+ print(f"Viz generation error: {e}")
+ return self._fallback_config(data, question)
+ else:
+ return self._fallback_config(data, question)
+
+ def _fallback_config(self, data: List[Dict], question: str) -> Dict[str, Any]:
+ """Generate simple fallback chart config"""
+ if not data:
+ return {"success": False, "error": "No data"}
+
+ sample_row = data[0]
+ columns = list(sample_row.keys())
+
+ # Find numeric columns
+ numeric_cols = []
+ for col in columns:
+ try:
+ float(str(sample_row[col]))
+ numeric_cols.append(col)
+ except (ValueError, TypeError):
+ pass
+
+ # Simple config generation
+ if len(columns) >= 2:
+ x_axis = columns[0]
+ y_axis = numeric_cols[0] if numeric_cols else columns[1]
+ chart_type = "bar"
+ else:
+ x_axis = columns[0]
+ y_axis = columns[0]
+ chart_type = "bar"
+
+ return {
+ "success": True,
+ "config": {
+ "chart_type": chart_type,
+ "x_axis": x_axis,
+ "y_axis": y_axis,
+ "title": f"Chart for: {question}",
+ "caption": "Fallback visualization configuration"
+ },
+ "method": "fallback"
+ }
\ No newline at end of file
diff --git a/ai/gen-ai-agents/sql_graph_generator_dashboard/files/backend/requirements.txt b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/backend/requirements.txt
new file mode 100644
index 000000000..d9e66384a
--- /dev/null
+++ b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/backend/requirements.txt
@@ -0,0 +1,22 @@
+# Core dependencies
+fastapi==0.104.1
+uvicorn==0.24.0
+pydantic==2.5.0
+python-multipart==0.0.6
+
+# OCI SDK
+oci==2.119.1
+
+# LangChain
+langchain==0.1.0
+langchain-core==0.1.10
+langchain-community==0.0.13
+
+# Data visualization
+matplotlib==3.8.2
+seaborn==0.13.0
+pandas==2.1.4
+numpy==1.26.2
+
+# Utilities
+python-dotenv==1.0.0
diff --git a/ai/gen-ai-agents/sql_graph_generator_dashboard/files/backend/tools/genai_chart_generator.py b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/backend/tools/genai_chart_generator.py
new file mode 100644
index 000000000..1b42fe021
--- /dev/null
+++ b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/backend/tools/genai_chart_generator.py
@@ -0,0 +1,341 @@
+"""
+GenAI-Powered Chart Generator
+Uses OCI GenAI to generate custom visualization code based on data and user requirements
+"""
+
+import json
+import oci
+import matplotlib.pyplot as plt
+import seaborn as sns
+import pandas as pd
+import numpy as np
+import io
+import base64
+from typing import Dict, Any, List, Optional
+from utils import config
+import signal
+
+
+class GenAIChartGenerator:
+ """
+ Generate custom charts using OCI GenAI to create Python visualization code
+ """
+
+ CHART_GENERATION_PROMPT = """You are an expert data visualization developer. Generate Python code to create beautiful, insightful charts.
+
+User Request: "{user_request}"
+
+Available Data (first 3 rows shown):
+{data_preview}
+
+Data Summary:
+- Total rows: {total_rows}
+- Columns: {columns}
+- Numeric columns: {numeric_columns}
+
+Requirements:
+1. Create a matplotlib/seaborn visualization
+2. Use the provided data variable called 'df' (pandas DataFrame)
+3. Make the chart beautiful with proper titles, labels, colors
+4. Return the chart as base64 image
+5. Handle any data preprocessing needed
+6. Choose the most appropriate chart type for the data and request
+
+Generate ONLY Python code in this format:
+```python
+import matplotlib.pyplot as plt
+import seaborn as sns
+import pandas as pd
+import numpy as np
+import io
+import base64
+
+# Set style for beautiful charts
+plt.style.use('seaborn-v0_8')
+sns.set_palette("husl")
+
+# Your visualization code here
+# Use df as the DataFrame variable
+# Example:
+fig, ax = plt.subplots(figsize=(12, 8))
+
+# Create your chart (customize based on user request and data)
+# ... your chart code ...
+
+# Finalize chart
+plt.title("Your Chart Title", fontsize=16, fontweight='bold')
+plt.tight_layout()
+
+# Convert to base64
+img_buffer = io.BytesIO()
+plt.savefig(img_buffer, format='png', dpi=150, bbox_inches='tight',
+ facecolor='white', edgecolor='none')
+img_buffer.seek(0)
+img_base64 = base64.b64encode(img_buffer.getvalue()).decode('utf-8')
+plt.close()
+
+# Return the base64 string
+chart_base64 = img_base64
+```
+
+Generate the complete Python code that will create an appropriate visualization."""
+
+ def __init__(self):
+ # Initialize direct OCI GenAI client using chat API
+ try:
+ oci_config = oci.config.from_file()
+ oci_config['region'] = 'eu-frankfurt-1'
+ self.genai_client = oci.generative_ai_inference.GenerativeAiInferenceClient(oci_config)
+ self.genai_client.base_client.endpoint = config.SERVICE_ENDPOINT
+
+ self.model_id = config.MODEL_ID
+ self.compartment_id = config.COMPARTMENT_ID
+ self.oci_available = True
+ print("LangChain OCI GenAI Chart Generator initialized successfully")
+ except Exception as e:
+ print(f"LangChain OCI GenAI Chart Generator not available: {e}")
+ self.genai_client = None
+ self.oci_available = False
+
+ def generate_chart(self, user_request: str, data: List[Dict], chart_params: Dict[str, Any] = None) -> Dict[str, Any]:
+ """
+ Generate custom chart using GenAI-generated code
+ """
+ try:
+ print(f"GenAI Chart Generator: Creating chart for: {user_request}")
+
+ if not data:
+ return {
+ "success": False,
+ "error": "No data provided for chart generation"
+ }
+
+ # Prepare data summary for GenAI
+ df = pd.DataFrame(data)
+ data_preview = df.head(3).to_dict('records')
+ columns = list(df.columns)
+ numeric_columns = list(df.select_dtypes(include=[np.number]).columns)
+
+ # Create GenAI prompt
+ prompt = self.CHART_GENERATION_PROMPT.format(
+ user_request=user_request,
+ data_preview=json.dumps(data_preview, indent=2, default=str),
+ total_rows=len(df),
+ columns=columns,
+ numeric_columns=numeric_columns
+ )
+
+ # Call GenAI to generate code
+ genai_response = self._call_genai(prompt)
+ print(f"GenAI Response length: {len(genai_response)} chars")
+ print(f"GenAI Response preview: {genai_response[:200]}...")
+
+ # Extract Python code from response
+ python_code = self._extract_code(genai_response)
+ print(f" Extracted code length: {len(python_code) if python_code else 0} chars")
+
+ if not python_code:
+ print(" No Python code extracted, using fallback")
+ return self._fallback_chart(df, user_request)
+
+ print(f" Code preview: {python_code[:100]}...")
+
+ # Execute the generated code
+ print(" Executing generated Python code...")
+ chart_result = self._execute_chart_code(python_code, df)
+ print(f" Chart execution result: {chart_result.get('success', False)}")
+
+ if chart_result["success"]:
+ return {
+ "success": True,
+ "chart_base64": chart_result["chart_base64"],
+ "generated_code": python_code,
+ "method": "genai_generated",
+ "chart_config": {
+ "title": f"GenAI Chart: {user_request}",
+ "type": "custom",
+ "description": "Custom chart generated using GenAI"
+ }
+ }
+ else:
+ print(f" Generated code failed, using fallback: {chart_result['error']}")
+ return self._fallback_chart(df, user_request)
+
+ except Exception as e:
+ print(f" GenAI Chart Generation error: {e}")
+ return self._fallback_chart(pd.DataFrame(data) if data else pd.DataFrame(), user_request)
+
+ def _call_genai(self, prompt: str) -> str:
+ """
+ Call OCI GenAI model to generate chart code using direct Chat API
+ """
+ try:
+ print(" Creating chat request...")
+ # Create chat request using Oracle demo format for OpenAI GPT OSS 120B
+ content = oci.generative_ai_inference.models.TextContent()
+ content.text = prompt
+
+ message = oci.generative_ai_inference.models.Message()
+ message.role = "USER"
+ message.content = [content]
+
+ chat_request = oci.generative_ai_inference.models.GenericChatRequest()
+ chat_request.api_format = oci.generative_ai_inference.models.BaseChatRequest.API_FORMAT_GENERIC
+ chat_request.messages = [message]
+ chat_request.max_tokens = 2000
+ chat_request.temperature = 0.3
+ chat_request.frequency_penalty = 0
+ chat_request.presence_penalty = 0
+ chat_request.top_p = 1
+ chat_request.top_k = 0
+
+ chat_detail = oci.generative_ai_inference.models.ChatDetails()
+ chat_detail.serving_mode = oci.generative_ai_inference.models.OnDemandServingMode(model_id=self.model_id)
+ chat_detail.chat_request = chat_request
+ chat_detail.compartment_id = self.compartment_id
+
+ # Call OCI GenAI
+ print(" Calling OCI GenAI Chat API...")
+ response = self.genai_client.chat(chat_detail)
+ print(" Got response from OCI GenAI")
+
+ # Extract response text
+ response_text = ""
+ if hasattr(response.data, 'chat_response') and response.data.chat_response:
+ if hasattr(response.data.chat_response, 'choices') and response.data.chat_response.choices:
+ choice = response.data.chat_response.choices[0]
+ if hasattr(choice, 'message') and choice.message:
+ if hasattr(choice.message, 'content') and choice.message.content:
+ for content in choice.message.content:
+ if hasattr(content, 'text'):
+ response_text += content.text
+
+ return response_text.strip()
+
+ except Exception as e:
+ print(f" LangChain GenAI API call failed: {e}")
+ return f"Error: {str(e)}"
+
+ def _extract_code(self, genai_response: str) -> Optional[str]:
+ """
+ Extract Python code from GenAI response
+ """
+ try:
+ # Look for code blocks
+ if "```python" in genai_response:
+ start = genai_response.find("```python") + 9
+ end = genai_response.find("```", start)
+ if end != -1:
+ return genai_response[start:end].strip()
+ elif "```" in genai_response:
+ start = genai_response.find("```") + 3
+ end = genai_response.find("```", start)
+ if end != -1:
+ return genai_response[start:end].strip()
+
+ # If no code blocks, try to find code patterns
+ lines = genai_response.split('\n')
+ code_lines = []
+ in_code = False
+
+ for line in lines:
+ if any(keyword in line for keyword in ['import ', 'plt.', 'sns.', 'fig,', 'ax =']):
+ in_code = True
+ if in_code:
+ code_lines.append(line)
+
+ return '\n'.join(code_lines) if code_lines else None
+
+ except Exception as e:
+ print(f" Code extraction error: {e}")
+ return None
+
+ def _execute_chart_code(self, python_code: str, df: pd.DataFrame) -> Dict[str, Any]:
+ """
+ Safely execute the generated Python code
+ """
+ try:
+ # Create a safe execution environment
+ safe_globals = {
+ 'plt': plt,
+ 'sns': sns,
+ 'pd': pd,
+ 'np': np,
+ 'io': io,
+ 'base64': base64,
+ 'df': df,
+ 'chart_base64': None
+ }
+
+ # Execute the code
+ exec(python_code, safe_globals)
+
+ # Get the result
+ chart_base64 = safe_globals.get('chart_base64')
+
+ if chart_base64:
+ return {
+ "success": True,
+ "chart_base64": chart_base64
+ }
+ else:
+ return {
+ "success": False,
+ "error": "No chart_base64 variable found in generated code"
+ }
+
+ except Exception as e:
+ return {
+ "success": False,
+ "error": f"Code execution error: {str(e)}"
+ }
+
+ def _fallback_chart(self, df: pd.DataFrame, user_request: str) -> Dict[str, Any]:
+ """
+ Generate a simple fallback chart when GenAI fails
+ """
+ try:
+ fig, ax = plt.subplots(figsize=(10, 6))
+
+ # Choose chart based on data
+ if len(df.columns) >= 2:
+ numeric_cols = df.select_dtypes(include=[np.number]).columns
+ if len(numeric_cols) >= 2:
+ # Scatter plot for numeric data
+ ax.scatter(df[numeric_cols[0]], df[numeric_cols[1]], alpha=0.7)
+ ax.set_xlabel(numeric_cols[0])
+ ax.set_ylabel(numeric_cols[1])
+ elif len(numeric_cols) == 1:
+ # Bar chart
+ if len(df) <= 20:
+ df[numeric_cols[0]].plot(kind='bar', ax=ax)
+ else:
+ df[numeric_cols[0]].plot(kind='line', ax=ax)
+ ax.set_ylabel(numeric_cols[0])
+
+ plt.title(f"Chart for: {user_request}", fontsize=14)
+ plt.tight_layout()
+
+ # Convert to base64
+ img_buffer = io.BytesIO()
+ plt.savefig(img_buffer, format='png', dpi=150, bbox_inches='tight')
+ img_buffer.seek(0)
+ chart_base64 = base64.b64encode(img_buffer.getvalue()).decode('utf-8')
+ plt.close()
+
+ return {
+ "success": True,
+ "chart_base64": chart_base64,
+ "method": "fallback",
+ "chart_config": {
+ "title": f"Fallback Chart: {user_request}",
+ "type": "auto",
+ "description": "Simple fallback visualization"
+ }
+ }
+
+ except Exception as e:
+ return {
+ "success": False,
+ "error": f"Fallback chart error: {str(e)}"
+ }
\ No newline at end of file
diff --git a/ai/gen-ai-agents/sql_graph_generator_dashboard/files/backend/utils/config.py b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/backend/utils/config.py
new file mode 100644
index 000000000..35278f713
--- /dev/null
+++ b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/backend/utils/config.py
@@ -0,0 +1,45 @@
+MODEL_ID = "ocid1.generativeaimodel.oc1.eu-frankfurt-1.YOUR_MODEL_ID"
+SERVICE_ENDPOINT = "https://inference.generativeai.eu-frankfurt-1.oci.oraclecloud.com"
+COMPARTMENT_ID = "ocid1.compartment.oc1..YOUR_COMPARTMENT_ID"
+AGENT_ENDPOINT_ID = "ocid1.genaiagentendpoint.oc1.eu-frankfurt-1.YOUR_AGENT_ENDPOINT_ID"
+SQL_AGENT_ID = "ocid1.genaiagentendpoint.oc1.eu-frankfurt-1.YOUR_AGENT_ENDPOINT_ID"
+SQL_AGENT_ENDPOINT = "https://agent-runtime.generativeai.eu-frankfurt-1.oci.oraclecloud.com"
+
+TEMPERATURE = 0.1
+MAX_TOKENS = 1024
+TOP_P = 0.9
+MAX_ROWS_IN_CHART = 50
+CHART_EXPORT_FORMAT = "json"
+DEBUG = False
+AUTH = "API_KEY"
+
+# Database Schema - Customize for your database
+DATABASE_SCHEMA = {
+ "CUSTOMERS": [
+ "CUSTOMER_ID", "CUSTOMER_NAME", "EMAIL", "SIGNUP_DATE", "SEGMENT",
+ "COUNTRY", "LIFETIME_VALUE", "CREATION_DATE", "CREATED_BY",
+ "LAST_UPDATED_DATE", "LAST_UPDATED_BY"
+ ],
+ "PRODUCTS": [
+ "PRODUCT_ID", "PRODUCT_NAME", "CATEGORY", "PRICE", "COST",
+ "STOCK_QUANTITY", "LAUNCH_DATE", "CREATION_DATE", "CREATED_BY",
+ "LAST_UPDATED_DATE", "LAST_UPDATED_BY"
+ ],
+ "ORDERS": [
+ "ORDER_ID", "CUSTOMER_ID", "ORDER_DATE", "TOTAL_AMOUNT", "STATUS",
+ "REGION", "SALES_REP", "CREATION_DATE", "CREATED_BY",
+ "LAST_UPDATED_DATE", "LAST_UPDATED_BY"
+ ],
+ "ORDER_ITEMS": [
+ "ORDER_ITEM_ID", "ORDER_ID", "PRODUCT_ID", "QUANTITY", "UNIT_PRICE",
+ "DISCOUNT_PERCENT", "CREATION_DATE", "CREATED_BY",
+ "LAST_UPDATED_DATE", "LAST_UPDATED_BY"
+ ]
+}
+
+ECOMMERCE_CORE_FIELDS = {
+ "CUSTOMERS": ["CUSTOMER_ID", "CUSTOMER_NAME", "SEGMENT", "COUNTRY", "LIFETIME_VALUE"],
+ "PRODUCTS": ["PRODUCT_ID", "PRODUCT_NAME", "CATEGORY", "PRICE"],
+ "ORDERS": ["ORDER_ID", "CUSTOMER_ID", "ORDER_DATE", "TOTAL_AMOUNT", "STATUS", "REGION"],
+ "ORDER_ITEMS": ["ORDER_ITEM_ID", "ORDER_ID", "PRODUCT_ID", "QUANTITY", "UNIT_PRICE"]
+}
diff --git a/ai/gen-ai-agents/sql_graph_generator_dashboard/files/database/customers.csv b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/database/customers.csv
new file mode 100644
index 000000000..c01f396a4
--- /dev/null
+++ b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/database/customers.csv
@@ -0,0 +1,16 @@
+CUSTOMER_ID,CUSTOMER_NAME,EMAIL,SIGNUP_DATE,SEGMENT,COUNTRY,LIFETIME_VALUE
+1001,Tech Innovators Inc,contact@techinnovators.com,2023-01-15,Enterprise,USA,25000
+1002,Global Retail Corp,orders@globalretail.com,2023-02-20,Enterprise,Canada,18500
+1003,Startup Solutions,hello@startupsol.com,2023-01-30,SMB,UK,8500
+1004,Digital Commerce Co,sales@digitalcom.com,2023-03-10,Enterprise,Australia,22000
+1005,Local Business Hub,info@localbiz.com,2023-02-05,SMB,USA,6200
+1006,European Distributors,contact@eudist.com,2023-04-12,SMB,Germany,7800
+1007,Premium Brands Ltd,premium@brands.com,2023-03-25,Enterprise,Spain,28500
+1008,Creative Studios,studio@creative.com,2023-01-08,SMB,France,9200
+1009,Asia Pacific Trade,trade@apac.com,2023-02-18,Enterprise,Japan,31000
+1010,Market Leaders Inc,leaders@market.com,2023-04-05,Enterprise,Mexico,24800
+1011,Regional Partners,partners@regional.com,2023-05-12,SMB,Brazil,5900
+1012,Innovation Labs,labs@innovation.com,2023-06-08,Enterprise,Singapore,19500
+1013,Growth Ventures,growth@ventures.com,2023-07-15,SMB,India,7100
+1014,Excellence Corp,corp@excellence.com,2023-08-22,Enterprise,South Korea,26800
+1015,Future Tech,future@tech.com,2023-09-10,SMB,Netherlands,8900
\ No newline at end of file
diff --git a/ai/gen-ai-agents/sql_graph_generator_dashboard/files/database/order_items.csv b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/database/order_items.csv
new file mode 100644
index 000000000..933cab359
--- /dev/null
+++ b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/database/order_items.csv
@@ -0,0 +1,36 @@
+order_item_id,order_id,product_id,quantity,unit_price,discount_percent
+4001,3001,2001,2,2999.99,10
+4002,3001,2009,1,5000.00,0
+4003,3002,2002,2,1899.99,0
+4004,3003,2012,1,1799.99,0
+4005,3004,2004,1,3499.99,5
+4006,3004,2010,1,2500.00,0
+4007,3005,2005,1,1299.99,0
+4008,3005,2006,1,599.99,0
+4009,3006,2007,1,1599.99,0
+4010,3007,2001,3,2999.99,15
+4011,3007,2009,1,5000.00,0
+4012,3008,2003,2,899.99,10
+4013,3008,2010,1,2500.00,0
+4014,3009,2008,2,899.99,0
+4015,3009,2011,2,1200.00,5
+4016,3010,2004,2,3499.99,10
+4017,3010,2015,1,7500.00,0
+4018,3011,2002,2,1899.99,5
+4019,3011,2005,1,1299.99,0
+4020,3012,2013,1,999.99,0
+4021,3012,2006,2,599.99,10
+4022,3013,2005,1,1299.99,0
+4023,3014,2001,1,2999.99,0
+4024,3014,2009,1,5000.00,0
+4025,3015,2012,1,1799.99,0
+4026,3015,2011,1,1200.00,0
+4027,3016,2015,1,7500.00,5
+4028,3016,2001,1,2999.99,0
+4029,3016,2010,1,2500.00,0
+4030,3017,2004,2,3499.99,8
+4031,3017,2014,1,2299.99,0
+4032,3018,2001,1,2999.99,0
+4033,3019,2012,1,1799.99,0
+4034,3020,2002,2,1899.99,5
+4035,3020,2011,1,1200.00,0
\ No newline at end of file
diff --git a/ai/gen-ai-agents/sql_graph_generator_dashboard/files/database/orders.csv b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/database/orders.csv
new file mode 100644
index 000000000..ff180257f
--- /dev/null
+++ b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/database/orders.csv
@@ -0,0 +1,21 @@
+norder_id,customer_id,order_date,total_amount,status,region,sales_rep
+3001,1001,2024-01-15,8999.97,DELIVERED,North America,Sarah Chen
+3002,1002,2024-01-20,3799.98,DELIVERED,North America,Mike Johnson
+3003,1003,2024-01-25,1799.98,SHIPPED,Europe,Emma Rodriguez
+3004,1004,2024-02-01,6299.97,DELIVERED,Asia Pacific,David Kim
+3005,1005,2024-02-10,2199.98,PROCESSING,North America,Sarah Chen
+3006,1006,2024-02-15,1599.99,DELIVERED,Europe,Emma Rodriguez
+3007,1007,2024-02-20,11999.96,SHIPPED,Europe,Emma Rodriguez
+3008,1008,2024-03-01,3699.98,DELIVERED,Europe,Emma Rodriguez
+3009,1009,2024-03-05,4499.98,DELIVERED,Asia Pacific,David Kim
+3010,1010,2024-03-10,9799.97,PROCESSING,North America,Sarah Chen
+3011,1001,2024-03-15,5999.98,SHIPPED,North America,Sarah Chen
+3012,1003,2024-03-20,2699.98,DELIVERED,Europe,Emma Rodriguez
+3013,1005,2024-04-01,1299.99,PENDING,North America,Sarah Chen
+3014,1007,2024-04-05,7999.98,PROCESSING,Europe,Emma Rodriguez
+3015,1009,2024-04-10,3199.98,SHIPPED,Asia Pacific,David Kim
+3016,1012,2024-05-01,12499.97,DELIVERED,Asia Pacific,David Kim
+3017,1014,2024-05-15,8799.98,DELIVERED,Asia Pacific,David Kim
+3018,1011,2024-06-01,2999.98,SHIPPED,South America,Carlos Lopez
+3019,1013,2024-06-10,1799.99,DELIVERED,Asia Pacific,David Kim
+3020,1015,2024-07-01,4299.98,PROCESSING,Europe,Emma Rodriguez
\ No newline at end of file
diff --git a/ai/gen-ai-agents/sql_graph_generator_dashboard/files/database/products.csv b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/database/products.csv
new file mode 100644
index 000000000..4139547bf
--- /dev/null
+++ b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/database/products.csv
@@ -0,0 +1,16 @@
+product_id,product_name,category,price,cost,stock_quantity,launch_date
+2001,AI Analytics Platform,Software,2999.99,1200.00,100,2023-01-15
+2002,Cloud Infrastructure,Software,1899.99,800.00,150,2023-02-01
+2003,Data Visualization Tool,Software,899.99,350.00,200,2023-01-20
+2004,Enterprise Security Suite,Software,3499.99,1500.00,75,2023-03-01
+2005,Mobile App Framework,Software,1299.99,550.00,120,2023-02-15
+2006,IoT Sensor Kit,Hardware,599.99,250.00,300,2023-04-01
+2007,Smart Dashboard Display,Hardware,1599.99,700.00,80,2023-03-15
+2008,Network Monitoring Device,Hardware,899.99,400.00,150,2023-05-01
+2009,Premium Consulting,Services,5000.00,2000.00,999,2023-01-01
+2010,Training Program,Services,2500.00,800.00,999,2023-02-01
+2011,Support Package,Services,1200.00,400.00,999,2023-01-15
+2012,API Gateway,Software,1799.99,750.00,90,2023-06-01
+2013,Backup Solution,Software,999.99,420.00,180,2023-04-15
+2014,Load Balancer,Hardware,2299.99,1000.00,60,2023-07-01
+2015,Custom Integration,Services,7500.00,3000.00,999,2023-03-01
\ No newline at end of file
diff --git a/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/components/Chat/Chat.js b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/components/Chat/Chat.js
new file mode 100644
index 000000000..2bfa28082
--- /dev/null
+++ b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/components/Chat/Chat.js
@@ -0,0 +1,352 @@
+"use client";
+
+import { APP_CONFIG } from "../../config/app";
+import DynamicThemeProvider from "../../contexts/DynamicThemeProvider";
+import {
+ Alert,
+ alpha,
+ Box,
+ Container,
+ lighten,
+ Paper,
+ Snackbar,
+ Typography,
+} from "@mui/material";
+import { AnimatePresence, motion } from "framer-motion";
+import { useState } from "react";
+import { useChat } from "../../contexts/ChatContext";
+import { useProject } from "../../contexts/ProjectsContext";
+import ChatHeader from "./ChatHeader";
+import ChatInputBar from "./ChatInputBar";
+import MessageList from "./MessageList";
+
+const containerVariants = {
+ initial: {
+ scale: 0.8,
+ opacity: 0,
+ },
+ animate: {
+ scale: 1,
+ opacity: 1,
+ transition: {
+ type: "spring",
+ stiffness: 260,
+ damping: 20,
+ },
+ },
+};
+
+const dynamicIslandVariants = {
+ initial: {
+ y: 100,
+ opacity: 0,
+ },
+ animate: {
+ y: 0,
+ opacity: 1,
+ transition: {
+ type: "spring",
+ stiffness: 350,
+ damping: 25,
+ delay: 0.3,
+ },
+ },
+};
+
+const logoVariants = {
+ initial: {
+ opacity: 0,
+ },
+ animate: {
+ opacity: 1,
+ transition: {
+ duration: 0.3,
+ },
+ },
+ exit: {
+ opacity: 0,
+ transition: {
+ duration: 0.2,
+ },
+ },
+};
+
+export default function Chat({ onAddProject, onEditProject, onDeleteProject }) {
+ const {
+ messages,
+ connected,
+ loading,
+ error,
+ isListening,
+ isWaitingForResponse,
+ sendMessage,
+ sendAttachment,
+ clearChat,
+ toggleSpeechRecognition,
+ setError,
+ currentSpeechProvider,
+ } = useChat();
+
+ const { getCurrentProject } = useProject();
+ const currentProject = getCurrentProject();
+
+ const [isDragOver, setIsDragOver] = useState(false);
+
+ const isOracleRecording = currentSpeechProvider === "oracle" && isListening;
+
+ const handleDragOver = (e) => {
+ e.preventDefault();
+ e.stopPropagation();
+ };
+
+ const handleDragEnter = (e) => {
+ e.preventDefault();
+ e.stopPropagation();
+ setIsDragOver(true);
+ };
+
+ const handleDragLeave = (e) => {
+ e.preventDefault();
+ e.stopPropagation();
+ if (!e.currentTarget.contains(e.relatedTarget)) {
+ setIsDragOver(false);
+ }
+ };
+
+ const handleDrop = (e) => {
+ e.preventDefault();
+ e.stopPropagation();
+ setIsDragOver(false);
+
+ const files = e.dataTransfer.files;
+ if (files.length > 0) {
+ const file = files[0];
+ const isValidType =
+ file.type.startsWith("image/") || file.type === "application/pdf";
+
+ if (isValidType) {
+ window.dispatchEvent(new CustomEvent("fileDropped", { detail: file }));
+ }
+ }
+ };
+
+ const getBackgroundStyle = () => {
+ if (currentProject.backgroundImage) {
+ return {
+ backgroundImage: `url(${currentProject.backgroundImage})`,
+ backgroundSize: "cover",
+ backgroundPosition: "center",
+ backgroundRepeat: "no-repeat",
+ };
+ }
+ return {
+ backgroundColor: lighten(
+ currentProject.backgroundColor || APP_CONFIG.defaults.backgroundColor,
+ 0.5
+ ),
+ };
+ };
+
+ const hasMessages = messages.length > 0 || isWaitingForResponse;
+
+ return (
+
+
+ {isDragOver && (
+
+
+ 📎 Drop images or PDFs here
+
+
+ )}
+
+
+ {/*
+
+ */}
+
+
+
+
+
+
+
+
+
+ {hasMessages && (
+
+
+
+
+
+ )}
+
+
+ setError("")}
+ anchorOrigin={{ vertical: "bottom", horizontal: "center" }}
+ >
+ setError("")} severity="error">
+ {error}
+
+
+
+
+
+
+
+
+
+
+ );
+}
diff --git a/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/components/Chat/ChatHeader.js b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/components/Chat/ChatHeader.js
new file mode 100644
index 000000000..2917619ca
--- /dev/null
+++ b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/components/Chat/ChatHeader.js
@@ -0,0 +1,91 @@
+"use client";
+
+import { LoupeOutlined } from "@mui/icons-material";
+import { Box, IconButton, Stack, Tooltip, Typography } from "@mui/material";
+import { AnimatePresence, motion } from "framer-motion";
+import { useRouter } from "next/navigation";
+
+export default function ChatHeader({
+ messagesCount,
+ onNewChat,
+ isConnected = false,
+ projectName,
+}) {
+ const router = useRouter();
+
+ const titleVariants = {
+ initial: {
+ opacity: 0,
+ y: -10,
+ scale: 0.95,
+ },
+ animate: {
+ opacity: 1,
+ y: 0,
+ scale: 1,
+ transition: {
+ type: "spring",
+ stiffness: 400,
+ damping: 25,
+ duration: 0.4,
+ },
+ },
+ exit: {
+ opacity: 0,
+ y: -10,
+ scale: 0.95,
+ transition: {
+ duration: 0.2,
+ },
+ },
+ };
+
+ return (
+
+
+
+
+ {messagesCount > 0 && (
+
+
+ {projectName}
+
+
+ )}
+
+
+
+
+
+
+
+
+
+
+
+ );
+}
diff --git a/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/components/Chat/ChatInputBar.js b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/components/Chat/ChatInputBar.js
new file mode 100644
index 000000000..7726d7ce8
--- /dev/null
+++ b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/components/Chat/ChatInputBar.js
@@ -0,0 +1,440 @@
+"use client";
+
+import {
+ alpha,
+ Box,
+ IconButton,
+ Stack,
+ TextField,
+ Typography,
+} from "@mui/material";
+import { useTheme } from "@mui/material/styles";
+import { motion } from "framer-motion";
+import { FileText, Forward, Mic, X, Paperclip } from "lucide-react";
+import { useEffect, useRef, useState } from "react";
+
+export default function ChatInputBar({
+ onSendMessage,
+ onToggleSpeechRecognition,
+ onSendAttachment,
+ isConnected,
+ isListening,
+ isPreview,
+ currentSpeechProvider,
+}) {
+ const [input, setInput] = useState("");
+ const [audioLevels, setAudioLevels] = useState([0, 0, 0, 0, 0]);
+ const inputRef = useRef(null);
+ const audioContextRef = useRef(null);
+ const analyserRef = useRef(null);
+ const animationFrameRef = useRef(null);
+
+ const [selectedFile, setSelectedFile] = useState(null);
+ const [filePreview, setFilePreview] = useState(null);
+ const fileInputRef = useRef(null);
+
+ const theme = useTheme();
+ const isOracleListening = currentSpeechProvider === "oracle" && isListening;
+
+ const handleSendMessage = async () => {
+ if (!isConnected) return;
+
+ const hasText = input.trim();
+ const hasFile = selectedFile;
+
+ if (!hasText && !hasFile) return;
+
+ if (hasFile && onSendAttachment) {
+ const success = await onSendAttachment(selectedFile);
+ if (!success) return;
+ }
+
+ if (hasText && onSendMessage) {
+ onSendMessage(input.trim());
+ }
+
+ setInput("");
+ setSelectedFile(null);
+ setFilePreview(null);
+ };
+
+ const handleKeyPress = (e) => {
+ if (e.key === "Enter" && !e.shiftKey) {
+ e.preventDefault();
+ handleSendMessage();
+ }
+ };
+
+ const handleFileSelect = (event) => {
+ const file = event.target.files[0];
+ if (file) {
+ setSelectedFile(file);
+
+ if (file.type.startsWith("image/")) {
+ const reader = new FileReader();
+ reader.onload = (e) => setFilePreview(e.target.result);
+ reader.readAsDataURL(file);
+ } else {
+ setFilePreview(null);
+ }
+
+ event.target.value = "";
+ }
+ };
+
+ const handleRemoveFile = () => {
+ setSelectedFile(null);
+ setFilePreview(null);
+ };
+
+ useEffect(() => {
+ if (isOracleListening) {
+ const startAudioAnalysis = async () => {
+ try {
+ const stream = await navigator.mediaDevices.getUserMedia({
+ audio: { sampleRate: 16000, channelCount: 1 },
+ });
+
+ audioContextRef.current = new AudioContext();
+ analyserRef.current = audioContextRef.current.createAnalyser();
+
+ const source =
+ audioContextRef.current.createMediaStreamSource(stream);
+ source.connect(analyserRef.current);
+
+ analyserRef.current.fftSize = 256;
+ const bufferLength = analyserRef.current.frequencyBinCount;
+ const dataArray = new Uint8Array(bufferLength);
+
+ const updateLevels = () => {
+ analyserRef.current.getByteFrequencyData(dataArray);
+
+ const newLevels = [];
+ const segmentSize = Math.floor(bufferLength / 5);
+
+ for (let i = 0; i < 5; i++) {
+ const start = i * segmentSize;
+ const end = start + segmentSize;
+ let sum = 0;
+
+ for (let j = start; j < end; j++) {
+ sum += dataArray[j];
+ }
+
+ const average = sum / segmentSize;
+ const normalized = Math.min(average / 80, 3);
+ newLevels.push(normalized);
+ }
+
+ setAudioLevels(newLevels);
+ animationFrameRef.current = requestAnimationFrame(updateLevels);
+ };
+
+ updateLevels();
+ } catch (error) {
+ console.error("Error accessing microphone:", error);
+ }
+ };
+
+ startAudioAnalysis();
+ } else {
+ if (animationFrameRef.current) {
+ cancelAnimationFrame(animationFrameRef.current);
+ }
+ if (
+ audioContextRef.current &&
+ audioContextRef.current.state !== "closed"
+ ) {
+ audioContextRef.current.close();
+ audioContextRef.current = null;
+ }
+ setAudioLevels([0, 0, 0, 0, 0]);
+ }
+
+ return () => {
+ if (animationFrameRef.current) {
+ cancelAnimationFrame(animationFrameRef.current);
+ }
+ if (
+ audioContextRef.current &&
+ audioContextRef.current.state !== "closed"
+ ) {
+ audioContextRef.current.close();
+ }
+ };
+ }, [isOracleListening]);
+
+ useEffect(() => {
+ if (isConnected && inputRef.current) {
+ inputRef.current.focus();
+ }
+ }, [isConnected]);
+
+ useEffect(() => {
+ const handleFileDropped = (event) => {
+ const file = event.detail;
+ setSelectedFile(file);
+
+ if (file.type.startsWith("image/")) {
+ const reader = new FileReader();
+ reader.onload = (e) => setFilePreview(e.target.result);
+ reader.readAsDataURL(file);
+ } else {
+ setFilePreview(null);
+ }
+ };
+
+ window.addEventListener("fileDropped", handleFileDropped);
+ return () => window.removeEventListener("fileDropped", handleFileDropped);
+ }, []);
+
+ return (
+
+ {selectedFile && (
+
+
+
+
+
+ {filePreview ? (
+
+ ) : (
+
+
+
+ )}
+
+
+ {selectedFile.name}
+
+
+ )}
+
+
+ fileInputRef.current?.click()}
+ disabled={!isConnected}
+ title="Upload file"
+ sx={{
+ color: theme.palette.text.secondary,
+ "&:hover": {
+ backgroundColor: theme.palette.primary.main + "14",
+ },
+ "&:disabled": {
+ color: theme.palette.text.disabled,
+ },
+ mr: 1,
+ }}
+ >
+
+
+
+ {!isOracleListening && (
+ setInput(e.target.value)}
+ onKeyPress={handleKeyPress}
+ disabled={!isConnected || isListening}
+ multiline
+ maxRows={4}
+ inputRef={inputRef}
+ slotProps={{
+ input: {
+ disableUnderline: true,
+ sx: {
+ color: theme.palette.text.primary,
+ "::placeholder": {
+ color: theme.palette.text.secondary,
+ },
+ },
+ },
+ }}
+ sx={{ pl: 2, pr: 2, py: 0.5 }}
+ />
+ )}
+
+
+ {isOracleListening ? (
+
+ {[2, 1, 0, 1, 2].map((position, index) => (
+
+ ))}
+
+ ) : (
+
+ )}
+
+ {!isOracleListening && (
+
+
+
+ )}
+
+
+
+
+ );
+}
diff --git a/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/components/Chat/ChatPreview.js b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/components/Chat/ChatPreview.js
new file mode 100644
index 000000000..b05f71582
--- /dev/null
+++ b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/components/Chat/ChatPreview.js
@@ -0,0 +1,186 @@
+"use client";
+
+import DynamicThemeProvider from "../../contexts/DynamicThemeProvider";
+import { Alert, Box, lighten, Paper, Snackbar } from "@mui/material";
+import { motion } from "framer-motion";
+import { useState } from "react";
+import ChatHeader from "../Chat/ChatHeader";
+import ChatInputBar from "../Chat/ChatInputBar";
+import MessageList from "../Chat/MessageList";
+
+const containerVariants = {
+ initial: {
+ scale: 0.8,
+ opacity: 0,
+ },
+ animate: {
+ scale: 1,
+ opacity: 1,
+ transition: {
+ type: "spring",
+ stiffness: 260,
+ damping: 20,
+ },
+ },
+};
+
+const dynamicIslandVariants = {
+ initial: {
+ y: 100,
+ opacity: 0,
+ },
+ animate: {
+ y: 0,
+ opacity: 1,
+ transition: {
+ type: "spring",
+ stiffness: 350,
+ damping: 25,
+ delay: 0.3,
+ },
+ },
+};
+
+export default function ChatPreview({ projectData }) {
+ const [error, setError] = useState("");
+
+ const messages = [];
+ const connected = true;
+ const loading = false;
+ const isListening = false;
+
+ const sendMessage = () => {};
+ const clearChat = () => {};
+ const toggleSpeechRecognition = () => {};
+
+ const getBackgroundStyle = () => {
+ if (projectData.backgroundImage) {
+ return {
+ backgroundImage: `url(${projectData.backgroundImage})`,
+ backgroundSize: "cover",
+ backgroundPosition: "center",
+ backgroundRepeat: "no-repeat",
+ };
+ }
+ return {
+ backgroundColor: lighten(projectData.backgroundColor || "#F5F5F5", 0.5),
+ };
+ };
+
+ return (
+
+
+
+
+
+
+
+
+
+
+ setError("")}
+ >
+ setError("")} severity="error">
+ {error}
+
+
+
+
+
+
+
+
+
+
+
+
+ );
+}
diff --git a/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/components/Chat/MessageContent.js b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/components/Chat/MessageContent.js
new file mode 100644
index 000000000..ce5f0874c
--- /dev/null
+++ b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/components/Chat/MessageContent.js
@@ -0,0 +1,444 @@
+"use client";
+
+import {
+ Box,
+ Button,
+ Card,
+ CardContent,
+ Dialog,
+ DialogActions,
+ DialogContent,
+ DialogTitle,
+ IconButton,
+ Paper,
+ Table,
+ TableBody,
+ TableCell,
+ TableContainer,
+ TableHead,
+ TableRow,
+ Tooltip,
+ Typography,
+} from "@mui/material";
+import { ExternalLink, Eye, FileText } from "lucide-react";
+import Markdown from "markdown-to-jsx";
+import { useState } from "react";
+
+export default function MessageContent({ message, isFromBot }) {
+ const payload = message.messagePayload;
+ const [citationDialogOpen, setCitationDialogOpen] = useState(false);
+ const [selectedCitationContent, setSelectedCitationContent] = useState("");
+
+ const isLongText = (value) => {
+ const stringValue =
+ value !== null && value !== undefined ? String(value) : "";
+ return stringValue.length > 100;
+ };
+
+ switch (payload.type) {
+ case "text":
+ return (
+
+
+ {payload.text}
+
+
+ {payload.citations && payload.citations.length > 0 && (
+
+
+ Sources:
+
+ {payload.citations.map((citation, index) => (
+
+
+
+
+
+ {citation.document_name}
+
+
+
+ {citation.page_numbers &&
+ citation.page_numbers.length > 0 && (
+
+ pages {citation.page_numbers.join(", ")}
+
+ )}
+
+
+
+ {
+ setSelectedCitationContent(citation.content);
+ setCitationDialogOpen(true);
+ }}
+ >
+
+
+
+
+
+
+ window.open(citation.source_url, "_blank")
+ }
+ >
+
+
+
+
+
+ ))}
+
+
+
+ )}
+
+ );
+
+ case "diagram":
+ return (
+
+ {payload.text && payload.text.trim() && (
+
+ {payload.text}
+
+ )}
+
+
+ );
+
+ case "sql_result":
+ return (
+
+ {payload.generatedQuery && (
+
+
+ Generated Query:
+
+
+ {payload.generatedQuery}
+
+
+ )}
+
+ {payload.executionResult &&
+ payload.executionResult.length > 0 &&
+ (() => {
+ const data = payload.executionResult;
+ const columns = Object.keys(data[0]);
+
+ const getColumnWidth = (columnName, columnIndex) => {
+ const columnValues = data.map(
+ (row) => Object.values(row)[columnIndex]
+ );
+ const allValues = [columnName, ...columnValues];
+
+ const lengths = allValues.map(
+ (val) => String(val || "").length
+ );
+ const avgLength =
+ lengths.reduce((sum, len) => sum + len, 0) / lengths.length;
+ const maxLength = Math.max(...lengths);
+ const headerLength = columnName.length;
+
+ const headerMinWidth = Math.max(headerLength * 8, 100);
+
+ let calculatedWidth;
+ if (maxLength > 200) {
+ calculatedWidth = { minWidth: "280px", maxWidth: "350px" };
+ } else if (maxLength > 100) {
+ calculatedWidth = { minWidth: "200px", maxWidth: "280px" };
+ } else if (avgLength > 30) {
+ calculatedWidth = { minWidth: "150px", maxWidth: "200px" };
+ } else if (avgLength > 15) {
+ calculatedWidth = { minWidth: "120px", maxWidth: "150px" };
+ } else {
+ calculatedWidth = { minWidth: "100px", maxWidth: "120px" };
+ }
+
+ const finalMinWidth = Math.max(
+ parseInt(calculatedWidth.minWidth),
+ headerMinWidth
+ );
+
+ return {
+ minWidth: `${finalMinWidth}px`,
+ maxWidth: calculatedWidth.maxWidth,
+ };
+ };
+
+ return (
+
+
+ Results:
+
+
+
+
+
+ {columns.map((key, index) => {
+ const widths = getColumnWidth(key, index);
+ return (
+
+ {key}
+
+ );
+ })}
+
+
+
+ {data.map((row, rowIndex) => (
+
+ {Object.values(row).map((value, cellIndex) => {
+ const stringValue =
+ value !== null && value !== undefined
+ ? String(value)
+ : "";
+ const isLong = isLongText(value);
+ const widths = getColumnWidth(
+ columns[cellIndex],
+ cellIndex
+ );
+
+ return (
+
+ {isLong ? (
+
+ {stringValue}
+
+ ) : (
+ 50 && {
+ whiteSpace: "pre-wrap",
+ overflow: "visible",
+ textOverflow: "clip",
+ }),
+ }}
+ title={stringValue}
+ >
+ {stringValue}
+
+ )}
+
+ );
+ })}
+
+ ))}
+
+
+
+
+ );
+ })()}
+
+ );
+
+ case "card":
+ return (
+
+
+ {payload.cards &&
+ payload.cards.map((card, idx) => (
+
+ {card.title && (
+ {card.title}
+ )}
+ {card.description && (
+ {card.description}
+ )}
+ {card.url && (
+
+
+ {card.url}
+
+
+ )}
+
+ ))}
+
+
+ );
+
+ case "attachment":
+ const attachment = payload.attachment;
+
+ if (attachment.type.startsWith("image/")) {
+ return (
+
+ );
+ }
+
+ return (
+
+ Attachment: {attachment.type} -{" "}
+
+ {attachment.title || "View"}
+
+
+ );
+
+ default:
+ return (
+
+ Unsupported message type: {payload.type}
+
+ );
+ }
+}
diff --git a/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/components/Chat/MessageItem.js b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/components/Chat/MessageItem.js
new file mode 100644
index 000000000..ff8259e6f
--- /dev/null
+++ b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/components/Chat/MessageItem.js
@@ -0,0 +1,183 @@
+"use client";
+
+import { Box, useTheme } from "@mui/material";
+import { motion } from "framer-motion";
+import { useChat } from "../../contexts/ChatContext";
+import MessageContent from "./MessageContent";
+
+export default function MessageItem({ message }) {
+ const theme = useTheme();
+ const { speakMessage, cancelAudio, playingMessageId, setPlayingMessageId } =
+ useChat();
+
+ const isFromBot = message.from;
+
+ const primaryColor = theme.palette.primary.main;
+ const primaryLight = theme.palette.primary.light;
+ const primaryDark = theme.palette.primary.dark;
+
+ const messageId = `${message.userId}-${message.date}`;
+ const isPlaying = playingMessageId === messageId;
+
+ const handlePlayAudio = (message) => {
+ if (isPlaying) {
+ cancelAudio();
+ } else {
+ if (playingMessageId) {
+ cancelAudio();
+ }
+ if (speakMessage(message)) {
+ setPlayingMessageId(messageId);
+ }
+ }
+ };
+
+ const botMessageVariants = {
+ initial: {
+ opacity: 0,
+ x: -30,
+ scale: 0.96,
+ },
+ animate: {
+ opacity: 1,
+ x: 0,
+ scale: 1,
+ transition: {
+ type: "spring",
+ stiffness: 300,
+ damping: 30,
+ duration: 0.5,
+ },
+ },
+ };
+
+ const userMessageVariants = {
+ initial: {
+ opacity: 0,
+ x: 30,
+ scale: 0.96,
+ },
+ animate: {
+ opacity: 1,
+ x: 0,
+ scale: 1,
+ transition: {
+ type: "spring",
+ stiffness: 320,
+ damping: 25,
+ duration: 0.4,
+ },
+ },
+ };
+
+ return (
+
+ {isFromBot ? (
+
+
+
+
+ {/*
+ handlePlayAudio(message)}
+ sx={{
+ width: 24,
+ height: 24,
+ backgroundColor: "rgba(0, 0, 0, 0.1)",
+ "&:hover": {
+ backgroundColor: "rgba(0, 0, 0, 0.2)",
+ },
+ }}
+ >
+ {isPlaying ? (
+
+ ) : (
+
+ )}
+
+ */}
+
+ ) : (
+
+
+
+
+
+ )}
+
+ );
+}
diff --git a/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/components/Chat/MessageList.js b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/components/Chat/MessageList.js
new file mode 100644
index 000000000..a75c314fb
--- /dev/null
+++ b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/components/Chat/MessageList.js
@@ -0,0 +1,94 @@
+"use client";
+
+import { Box, CircularProgress, List } from "@mui/material";
+import { AnimatePresence, motion } from "framer-motion";
+import { useEffect, useRef } from "react";
+import MessageItem from "./MessageItem";
+import TypingIndicator from "./TypingIndicator";
+import WelcomeScreen from "./WelcomeScreen";
+
+export default function MessageList({
+ messages,
+ loading,
+ projectName,
+ logoUrl,
+ onSendMessage,
+ isWaitingForResponse = false,
+}) {
+ const messagesEndRef = useRef(null);
+
+ useEffect(() => {
+ scrollToBottom();
+ }, [messages, isWaitingForResponse]);
+
+ const scrollToBottom = () => {
+ messagesEndRef.current?.scrollIntoView({ behavior: "smooth" });
+ };
+
+ const welcomeVariants = {
+ initial: {
+ opacity: 1,
+ scale: 1,
+ },
+ exit: {
+ opacity: 0,
+ scale: 0.95,
+ transition: {
+ duration: 0.3,
+ ease: "easeInOut",
+ },
+ },
+ };
+
+ if (loading && messages.length === 0) {
+ return (
+
+
+
+ );
+ }
+
+ const hasMessages = messages.length > 0 || isWaitingForResponse;
+
+ return (
+
+
+ {!hasMessages && (
+
+
+
+ )}
+
+
+
+ {messages.map((msg, idx) => (
+
+ ))}
+ {isWaitingForResponse && }
+
+
+
+ );
+}
diff --git a/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/components/Chat/TypingIndicator.js b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/components/Chat/TypingIndicator.js
new file mode 100644
index 000000000..17029f596
--- /dev/null
+++ b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/components/Chat/TypingIndicator.js
@@ -0,0 +1,79 @@
+"use client";
+
+import { Box } from "@mui/material";
+import { motion } from "framer-motion";
+
+export default function TypingIndicator() {
+ const dotVariants = {
+ animate: {
+ scale: [1, 1.2, 1],
+ opacity: [0.4, 1, 0.4],
+ transition: {
+ duration: 1.4,
+ repeat: Infinity,
+ ease: "easeInOut",
+ },
+ },
+ };
+
+ const containerVariants = {
+ animate: {
+ transition: {
+ staggerChildren: 0.2,
+ repeat: Infinity,
+ },
+ },
+ };
+
+ return (
+
+
+
+ {[0, 1, 2].map((index) => (
+
+ ))}
+
+
+
+ );
+}
diff --git a/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/components/Chat/WelcomeScreen.js b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/components/Chat/WelcomeScreen.js
new file mode 100644
index 000000000..f14621a89
--- /dev/null
+++ b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/components/Chat/WelcomeScreen.js
@@ -0,0 +1,102 @@
+"use client";
+
+import { Box, Button, Typography, alpha, darken, lighten } from "@mui/material";
+import { useTheme } from "@mui/material/styles";
+
+const SUGGESTIONS = [
+ "show the top 5 orders",
+ "show the number of order per region?",
+];
+
+export default function WelcomeScreen({ projectName, logoUrl, onSendMessage }) {
+ const theme = useTheme();
+
+ const handleSuggestionClick = (suggestion) => {
+ if (onSendMessage) {
+ onSendMessage(suggestion);
+ }
+ };
+
+ return (
+
+
+
+
+ {projectName}
+
+
+ How can I help you today?
+
+
+
+ {SUGGESTIONS.map((suggestion, index) => (
+
+ ))}
+
+
+ );
+}
diff --git a/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/components/ClientThemeProvider.js b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/components/ClientThemeProvider.js
new file mode 100644
index 000000000..df9556b00
--- /dev/null
+++ b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/components/ClientThemeProvider.js
@@ -0,0 +1,13 @@
+"use client";
+import { CssBaseline, ThemeProvider } from "@mui/material";
+import { ProjectProvider } from "../contexts/ProjectsContext";
+import theme from "../theme/theme";
+
+export default function ClientThemeProvider({ children }) {
+ return (
+
+
+ {children}
+
+ );
+}
diff --git a/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/components/NavMenu.js b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/components/NavMenu.js
new file mode 100644
index 000000000..55a242865
--- /dev/null
+++ b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/components/NavMenu.js
@@ -0,0 +1,236 @@
+"use client";
+
+import { Add, Delete, Edit } from "@mui/icons-material";
+import { Avatar, Box, IconButton, Paper, Stack, Tooltip } from "@mui/material";
+import { motion } from "framer-motion";
+import { useRouter } from "next/navigation";
+import { useProject } from "../contexts/ProjectsContext";
+
+const menuVariants = {
+ initial: {
+ x: -100,
+ opacity: 0,
+ },
+ animate: {
+ x: 0,
+ opacity: 1,
+ transition: {
+ type: "spring",
+ stiffness: 350,
+ damping: 25,
+ delay: 0.2,
+ },
+ },
+};
+
+export default function NavMenu({
+ onAddProject,
+ onEditProject,
+ onDeleteProject,
+}) {
+ const router = useRouter();
+ const { projects, switchProject, getCurrentProject } = useProject();
+ const currentProject = getCurrentProject();
+
+ const handleProjectSwitch = (projectId) => {
+ switchProject(projectId);
+ router.push(`/?projectId=${projectId}`);
+ };
+
+ return (
+
+
+ {projects.map((project) => (
+
+
+
+ {project.name}
+
+
+ {
+ e.stopPropagation();
+ handleProjectSwitch(project.id);
+ onEditProject(project);
+ }}
+ sx={{
+ width: 24,
+ height: 24,
+ backgroundColor: "white",
+ color: "black",
+ boxShadow: "0 2px 6px rgba(0, 0, 0, 0.12)",
+ "&:hover": {
+ backgroundColor: "rgba(255,255,255,0.8)",
+ },
+ }}
+ >
+
+
+ {project.id !== "default" && (
+ {
+ e.stopPropagation();
+ onDeleteProject(project.id);
+ }}
+ sx={{
+ width: 24,
+ height: 24,
+ backgroundColor: "white",
+ color: "red",
+ boxShadow: "0 2px 6px rgba(0, 0, 0, 0.12)",
+ "&:hover": {
+ backgroundColor: "rgba(255,255,255,0.8)",
+ },
+ }}
+ >
+
+
+ )}
+
+
+ }
+ placement="right"
+ leaveDelay={100}
+ componentsProps={{
+ tooltip: {
+ sx: {
+ backgroundColor: "transparent",
+ padding: 0,
+ margin: 0,
+ "& .MuiTooltip-arrow": {
+ display: "none",
+ },
+ },
+ },
+ }}
+ >
+ handleProjectSwitch(project.id)}
+ sx={{
+ backgroundColor:
+ currentProject.id === project.id
+ ? project.mainColor
+ : "transparent",
+ padding: 0,
+ display: "flex",
+ alignItems: "center",
+ justifyContent: "center",
+ "&:hover": {
+ backgroundColor:
+ currentProject.id === project.id
+ ? project.mainColor
+ : "rgba(0, 0, 0, 0.04)",
+ },
+ }}
+ >
+
+ {project.name.charAt(0).toUpperCase()}
+
+
+
+
+ ))}
+
+
+
+
+
+ = 8 ? "Maximum 8 projects allowed" : "Add Project"
+ }
+ placement="right"
+ >
+
+
+
+
+
+
+ );
+}
diff --git a/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/components/Settings/ProjectCard.js b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/components/Settings/ProjectCard.js
new file mode 100644
index 000000000..fe52a7d84
--- /dev/null
+++ b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/components/Settings/ProjectCard.js
@@ -0,0 +1,184 @@
+"use client";
+
+import { useProject } from "../../contexts/ProjectsContext";
+import { Box, Card, Typography, alpha, useTheme } from "@mui/material";
+import { motion } from "framer-motion";
+import { MessageSquare, Settings } from "lucide-react";
+import { useRouter } from "next/navigation";
+
+export default function ProjectCard({ project, onEdit }) {
+ const router = useRouter();
+ const { switchProject } = useProject();
+ const theme = useTheme();
+
+ const handleEditClick = (e) => {
+ e.stopPropagation();
+ onEdit(project);
+ };
+
+ const handleGoToChat = (e) => {
+ e.stopPropagation();
+ switchProject(project.id);
+ router.push(`/?projectId=${project.id}`);
+ };
+
+ return (
+ theme.spacing(4, 4, 4, 1),
+ }}
+ >
+ {}
+
+
+
+
+
+
+
+ {project.logoUrl ? (
+
+ ) : (
+
+ {project.name.charAt(0).toUpperCase()}
+
+ )}
+
+
+ {project.name}
+
+
+ {}
+
+
+
+ Go to chat
+
+
+
+
+ );
+}
diff --git a/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/components/Settings/ProjectModal.js b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/components/Settings/ProjectModal.js
new file mode 100644
index 000000000..9b9f542fb
--- /dev/null
+++ b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/components/Settings/ProjectModal.js
@@ -0,0 +1,411 @@
+"use client";
+
+import { APP_CONFIG } from "../../config/app";
+import CloseIcon from "@mui/icons-material/Close";
+import DeleteIcon from "@mui/icons-material/Delete";
+import SaveIcon from "@mui/icons-material/Save";
+import {
+ Box,
+ Button,
+ Dialog,
+ DialogActions,
+ DialogContent,
+ DialogContentText,
+ DialogTitle,
+ FormControl,
+ FormControlLabel,
+ FormLabel,
+ IconButton,
+ Radio,
+ RadioGroup,
+ Stack,
+ TextField,
+ Typography,
+} from "@mui/material";
+import { useEffect, useState } from "react";
+import ChatPreview from "../Chat/ChatPreview";
+
+export default function ProjectModal({
+ open,
+ onClose,
+ project = null,
+ onSave,
+ onDelete,
+}) {
+ const isNewProject = !project;
+ const [formData, setFormData] = useState({
+ name: "",
+ logoUrl: APP_CONFIG.defaults.logoUrl,
+ mainColor: APP_CONFIG.defaults.color,
+ backgroundColor: APP_CONFIG.defaults.backgroundColor,
+ backgroundImage: APP_CONFIG.defaults.image,
+ backgroundType: "image",
+ speechProvider: APP_CONFIG.defaults.speechProvider,
+ });
+ const [errors, setErrors] = useState({});
+ const [deleteDialogOpen, setDeleteDialogOpen] = useState(false);
+
+ useEffect(() => {
+ if (project) {
+ setFormData({
+ name: project.name,
+ logoUrl: project.logoUrl || APP_CONFIG.defaults.logoUrl,
+ mainColor: project.mainColor || APP_CONFIG.defaults.color,
+ backgroundColor:
+ project.backgroundColor || APP_CONFIG.defaults.backgroundColor,
+ backgroundImage: project.backgroundImage || APP_CONFIG.defaults.image,
+ backgroundType: "image",
+ speechProvider:
+ project.speechProvider || APP_CONFIG.defaults.speechProvider,
+ });
+
+ if (
+ !APP_CONFIG.availableColors.includes(
+ project.mainColor || APP_CONFIG.defaults.color
+ )
+ ) {
+ setFormData((prev) => ({
+ ...prev,
+ mainColor: APP_CONFIG.defaults.color,
+ }));
+ }
+ } else {
+ setFormData({
+ name: "",
+ logoUrl: APP_CONFIG.defaults.logoUrl,
+ mainColor: APP_CONFIG.defaults.color,
+ backgroundColor: APP_CONFIG.defaults.backgroundColor,
+ backgroundImage: APP_CONFIG.defaults.image,
+ backgroundType: "image",
+ speechProvider: APP_CONFIG.defaults.speechProvider,
+ });
+ }
+ setErrors({});
+ }, [project, open]);
+
+ const handleInputChange = (e) => {
+ const { name, value } = e.target;
+ setFormData((prev) => ({ ...prev, [name]: value }));
+
+ if (errors[name]) {
+ setErrors((prev) => ({ ...prev, [name]: undefined }));
+ }
+ };
+
+ const handleMainColorSelect = (colorValue) => {
+ setFormData((prev) => ({
+ ...prev,
+ mainColor: colorValue,
+ }));
+ };
+
+ const handleImageSelect = (imageUrl) => {
+ setFormData((prev) => ({
+ ...prev,
+ backgroundImage: imageUrl,
+ backgroundType: "image",
+ }));
+ };
+
+ const validateForm = () => {
+ const newErrors = {};
+
+ if (!formData.name.trim()) {
+ newErrors.name = "Name is required";
+ }
+
+ if (formData.logoUrl && !isValidUrl(formData.logoUrl)) {
+ newErrors.logoUrl = "Please enter a valid URL";
+ }
+
+ setErrors(newErrors);
+ return Object.keys(newErrors).length === 0;
+ };
+
+ const isValidUrl = (string) => {
+ try {
+ new URL(string);
+ return true;
+ } catch (_) {
+ return false;
+ }
+ };
+
+ const handleSave = () => {
+ if (!validateForm()) return;
+ onSave(formData);
+ onClose();
+ };
+
+ const handleDelete = () => {
+ onDelete(project.id);
+ setDeleteDialogOpen(false);
+ onClose();
+ };
+
+ const handleClose = () => {
+ onClose();
+ setDeleteDialogOpen(false);
+ };
+
+ return (
+ <>
+
+ e.stopPropagation()}
+ >
+
+
+
+ {isNewProject ? "Create New Project" : "Edit Project"}
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ Main Color
+
+
+
+ {APP_CONFIG.availableColors.map((color) => (
+ handleMainColorSelect(color)}
+ sx={{
+ width: 40,
+ height: 40,
+ bgcolor: color,
+ borderRadius: 1,
+ cursor: "pointer",
+ border: "1px solid rgba(0, 0, 0, 0.1)",
+ transform:
+ formData.mainColor === color
+ ? "scale(1.2)"
+ : "scale(1)",
+ transition: "transform 0.15s ease-out",
+ "&:hover": {
+ transform:
+ formData.mainColor === color
+ ? "scale(1.2)"
+ : "scale(1.05)",
+ },
+ }}
+ />
+ ))}
+
+
+
+
+
+ Background
+
+
+
+ {APP_CONFIG.availableImages.map((image, index) => (
+ handleImageSelect(image)}
+ sx={{
+ width: 50,
+ height: 50,
+ borderRadius: 1,
+ cursor: "pointer",
+ backgroundImage: `url(${image})`,
+ backgroundSize: "cover",
+ backgroundPosition: "center",
+ transform:
+ formData.backgroundType === "image" &&
+ formData.backgroundImage === image
+ ? "scale(1.2)"
+ : "scale(1)",
+ transition: "transform 0.15s ease-out",
+ "&:hover": {
+ transform:
+ formData.backgroundType === "image" &&
+ formData.backgroundImage === image
+ ? "scale(1.2)"
+ : "scale(1.05)",
+ },
+ }}
+ />
+ ))}
+
+
+
+
+
+ Speech Service
+
+
+ }
+ label="Browser"
+ />
+ }
+ label="Oracle"
+ />
+
+
+
+
+
+
+ {!isNewProject && project?.id !== "default" && (
+ }
+ onClick={() => setDeleteDialogOpen(true)}
+ >
+ Delete
+
+ )}
+
+
+
+ }
+ onClick={handleSave}
+ >
+ {isNewProject ? "Create" : "Save changes"}
+
+
+
+
+
+
+
+
+
+
+
+
+ >
+ );
+}
diff --git a/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/components/TestOracleSpeech.js b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/components/TestOracleSpeech.js
new file mode 100644
index 000000000..12c38a819
--- /dev/null
+++ b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/components/TestOracleSpeech.js
@@ -0,0 +1,208 @@
+"use client";
+
+import { Box, Button, Chip, Paper, Typography } from "@mui/material";
+import { Mic, MicOff } from "lucide-react";
+import { useEffect, useState } from "react";
+import createOracleSpeechService from "../services/oracleSpeechService";
+
+export default function TestOracleSpeech() {
+ const [speechService, setSpeechService] = useState(null);
+ const [isRecording, setIsRecording] = useState(false);
+ const [transcript, setTranscript] = useState("");
+ const [logs, setLogs] = useState([]);
+ const [error, setError] = useState("");
+ const [serviceState, setServiceState] = useState({});
+
+ useEffect(() => {
+ const service = createOracleSpeechService();
+ setSpeechService(service);
+
+ if (!service.isSupported()) {
+ setError("Tu navegador no soporta las APIs necesarias");
+ }
+
+ const interval = setInterval(() => {
+ if (service.getState) {
+ setServiceState(service.getState());
+ }
+ }, 1000);
+
+ return () => {
+ clearInterval(interval);
+ if (service) {
+ service.disconnect();
+ }
+ };
+ }, []);
+
+ const addLog = (message, type = "info") => {
+ const timestamp = new Date().toLocaleTimeString();
+ setLogs((prev) => [...prev, { timestamp, message, type }]);
+ };
+
+ const handleStartRecording = async () => {
+ if (!speechService) return;
+
+ setError("");
+ setTranscript("");
+ addLog("Iniciando grabación...", "info");
+
+ try {
+ const success = await speechService.startRecording(
+ (result) => {
+ addLog(
+ `Transcripción: "${result.transcript}" (Final: ${result.isFinal})`,
+ "success"
+ );
+ if (result.isFinal) {
+ setTranscript((prev) => prev + " " + result.transcript);
+ }
+ },
+ (error) => {
+ addLog(`Error: ${error}`, "error");
+ setError(error);
+ setIsRecording(false);
+ }
+ );
+
+ if (success) {
+ setIsRecording(true);
+ addLog("Grabación iniciada exitosamente", "success");
+ } else {
+ addLog("Fallo al iniciar grabación", "error");
+ }
+ } catch (err) {
+ addLog(`Error no capturado: ${err.message}`, "error");
+ setError(err.message);
+ }
+
+ if (speechService.getState) {
+ setServiceState(speechService.getState());
+ }
+ };
+
+ const handleStopRecording = async () => {
+ if (!speechService) return;
+
+ addLog("Deteniendo grabación...", "info");
+ await speechService.stopRecording();
+ setIsRecording(false);
+ addLog("Grabación detenida", "info");
+
+ if (speechService.getState) {
+ setServiceState(speechService.getState());
+ }
+ };
+
+ const clearLogs = () => {
+ setLogs([]);
+ setTranscript("");
+ setError("");
+ };
+
+ return (
+
+
+ Prueba Oracle Speech Service
+
+
+ {}
+
+
+ Estado del Servicio:
+
+
+
+
+
+
+
+
+
+ {}
+
+ : }
+ disabled={!!error && !isRecording}
+ >
+ {isRecording ? "Detener" : "Grabar"}
+
+
+
+
+ {}
+ {error && (
+
+ {error}
+
+ )}
+
+ {}
+ {transcript && (
+
+
+ Transcripción:
+
+ {transcript}
+
+ )}
+
+ {}
+
+
+ Logs:
+
+ {logs.map((log, index) => (
+
+
+ [{log.timestamp}] {log.message}
+
+
+ ))}
+
+
+ );
+}
diff --git a/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/config/app.js b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/config/app.js
new file mode 100644
index 000000000..e885755d4
--- /dev/null
+++ b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/config/app.js
@@ -0,0 +1,32 @@
+export const APP_CONFIG = {
+ availableColors: [
+ "#2E2E2E",
+ "#2979FF",
+ "#34C759",
+ "#FF9500",
+ "#FF375F",
+ "#AF52DE",
+ "#8E8E93",
+ "#FFD60A",
+ ],
+ availableImages: [
+ "/background.png",
+ "/backgrounds/radial-sky-blue.jpg",
+ "/backgrounds/sonoma.jpg",
+ "/backgrounds/imac-blue.jpg",
+ "/backgrounds/imac-green.jpg",
+ "/backgrounds/imac-orange.jpg",
+ "/backgrounds/imac-pink.jpg",
+ "/backgrounds/imac-purple.jpg",
+ "/backgrounds/imac-silver.jpg",
+ "/backgrounds/imac-yellow.jpg",
+ ],
+ defaults: {
+ color: "#3a3632",
+ image: "/backgrounds/imac-blue.jpg",
+ backgroundColor: "#F5F5F5",
+ name: "OCI Generative AI Agents",
+ logoUrl: "",
+ speechProvider: "browser",
+ },
+};
diff --git a/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/contexts/ChatContext.js b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/contexts/ChatContext.js
new file mode 100644
index 000000000..9c1e13fd9
--- /dev/null
+++ b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/contexts/ChatContext.js
@@ -0,0 +1,294 @@
+"use client";
+
+import {
+ createContext,
+ useCallback,
+ useContext,
+ useEffect,
+ useState,
+} from "react";
+import createGenaiAgentService from "../services/genaiAgentService";
+import createOracleSpeechService from "../services/oracleSpeechService";
+import createSpeechService from "../services/speechService";
+import { createUserMessage } from "../utils/messageUtils";
+import { useProject } from "./ProjectsContext";
+
+const ChatContext = createContext(null);
+
+export const useChat = () => {
+ const context = useContext(ChatContext);
+ if (!context) {
+ throw new Error("useChat must be used within a ChatProvider");
+ }
+ return context;
+};
+
+const createSpeechServiceFactory = (speechProvider) => {
+ switch (speechProvider) {
+ case "oracle":
+ return createOracleSpeechService();
+ case "browser":
+ default:
+ return createSpeechService();
+ }
+};
+
+export const ChatProvider = ({ children }) => {
+ const { getCurrentProject } = useProject();
+ const [messages, setMessages] = useState([]);
+ const [connected, setConnected] = useState(true);
+ const [loading, setLoading] = useState(false);
+ const [error, setError] = useState("");
+ const [userId, setUserId] = useState("");
+ const [isListening, setIsListening] = useState(false);
+ const [isWaitingForResponse, setIsWaitingForResponse] = useState(false);
+ const [genaiService, setGenaiService] = useState(null);
+ const [speechService, setSpeechService] = useState(null);
+ const [currentSpeechProvider, setCurrentSpeechProvider] = useState("browser");
+ const [playingMessageId, setPlayingMessageId] = useState(null);
+
+ useEffect(() => {
+ const storedUserId =
+ typeof window !== "undefined"
+ ? window.localStorage.getItem("chatUserId")
+ : null;
+
+ const newUserId =
+ storedUserId || `user${Math.random().toString(36).substring(2, 10)}`;
+
+ setUserId(newUserId);
+
+ if (typeof window !== "undefined") {
+ window.localStorage.setItem("chatUserId", newUserId);
+ }
+ }, []);
+
+ useEffect(() => {
+ const currentProject = getCurrentProject();
+ const provider = currentProject.speechProvider || "browser";
+
+ if (currentSpeechProvider !== provider) {
+ setSpeechService(createSpeechServiceFactory(provider));
+ setCurrentSpeechProvider(provider);
+ setMessages([]);
+ setIsWaitingForResponse(false);
+ }
+ }, [getCurrentProject, currentSpeechProvider]);
+
+ useEffect(() => {
+ if (!userId) return;
+ setGenaiService(createGenaiAgentService());
+ }, [userId]);
+
+ const sendMessage = useCallback(
+ async (text) => {
+ if (!text.trim() || !genaiService) return false;
+
+ const message = createUserMessage(text, userId);
+ setMessages((prev) => [...prev, message]);
+ setIsWaitingForResponse(true);
+ setError("");
+
+ try {
+ const response = await genaiService.sendMessage(text);
+
+ const botMessage = {
+ userId: "bot",
+ messagePayload: processResponse(response),
+ date: new Date().toISOString(),
+ from: { type: "bot" },
+ };
+
+ setMessages((prev) => [...prev, botMessage]);
+ setIsWaitingForResponse(false);
+ return true;
+ } catch (error) {
+ setError(`Error: ${error.message}`);
+ setIsWaitingForResponse(false);
+ return false;
+ }
+ },
+ [genaiService, userId]
+ );
+
+ const processResponse = (apiResponse) => {
+ const { answer, diagram_base64, citations } = apiResponse;
+
+ if (diagram_base64) {
+ return {
+ type: "diagram",
+ text: answer,
+ diagram_base64: diagram_base64,
+ };
+ }
+
+ try {
+ const parsed = JSON.parse(answer);
+ if (parsed.executionResult) {
+ return {
+ type: "sql_result",
+ generatedQuery: parsed.generatedQuery || "",
+ executionResult: parsed.executionResult || [],
+ text: `Query executed: ${parsed.generatedQuery || "SQL query"}`,
+ };
+ }
+ } catch {}
+
+ return {
+ type: "text",
+ text: answer,
+ citations: citations || [],
+ };
+ };
+
+ const sendAttachment = useCallback(
+ async (file) => {
+ if (!file || !genaiService) return false;
+
+ setIsWaitingForResponse(true);
+ setError("");
+
+ // Step 1: Show file preview in UI
+ const attachmentMessage = {
+ userId: userId,
+ messagePayload: {
+ type: "attachment",
+ attachment: {
+ type: file.type,
+ title: file.name,
+ url: URL.createObjectURL(file),
+ },
+ },
+ date: new Date().toISOString(),
+ from: { type: "user" },
+ };
+ setMessages((prev) => [...prev, attachmentMessage]);
+
+ // Step 2: Upload file to FastAPI backend
+ try {
+ const formData = new FormData();
+ formData.append("message", "Extract text from uploaded file");
+ formData.append("file", file);
+
+ const response = await fetch(
+ `${process.env.NEXT_PUBLIC_GENAI_API_URL}/chat`,
+ {
+ method: "POST",
+ body: formData,
+ }
+ );
+
+ const data = await response.json();
+
+ const botMessage = {
+ userId: "bot",
+ messagePayload: {
+ type: "text",
+ text: data.text || "No response from server.",
+ },
+ date: new Date().toISOString(),
+ from: { type: "bot" },
+ };
+
+ setMessages((prev) => [...prev, botMessage]);
+ setIsWaitingForResponse(false);
+ return true;
+ } catch (error) {
+ console.error("Attachment upload error:", error);
+ setError("Error uploading file.");
+ setIsWaitingForResponse(false);
+ return false;
+ }
+ },
+ [genaiService, userId]
+ );
+
+
+ const clearChat = useCallback(() => {
+ setMessages([]);
+ setIsWaitingForResponse(false);
+ }, []);
+
+ const toggleSpeechRecognition = useCallback(() => {
+ if (!speechService || !speechService.isSupported()) {
+ setError("Speech recognition is not supported");
+ return;
+ }
+
+ if (isListening) {
+ if (speechService.stopListening) {
+ speechService.stopListening();
+ } else if (speechService.stopRecording) {
+ speechService.stopRecording();
+ }
+ setIsListening(false);
+ return;
+ }
+
+ let started = false;
+
+ if (currentSpeechProvider === "oracle") {
+ started = speechService.startRecording(
+ (result) => {
+ if (result.isFinal && result.transcript) {
+ sendMessage(result.transcript);
+ }
+ },
+ (error) => {
+ setIsListening(false);
+ setError(`Speech recognition error: ${error}`);
+ }
+ );
+ } else {
+ started = speechService.startListening(
+ (result) => {
+ if (result.stopped) {
+ setIsListening(false);
+ return;
+ }
+
+ if (result.isFinal || result.stopped) {
+ setIsListening(false);
+ if (result.transcript) {
+ sendMessage(result.transcript);
+ }
+ }
+ },
+ (error) => {
+ setIsListening(false);
+ setError(`Speech recognition error: ${error}`);
+ }
+ );
+ }
+
+ setIsListening(started);
+ }, [isListening, sendMessage, speechService, currentSpeechProvider]);
+
+ useEffect(() => {
+ if (!speechService) {
+ setSpeechService(createSpeechServiceFactory("browser"));
+ }
+ }, [speechService]);
+
+ const value = {
+ messages,
+ connected,
+ loading,
+ error,
+ isListening,
+ isWaitingForResponse,
+ userId,
+ sendMessage,
+ sendAttachment,
+ clearChat,
+ toggleSpeechRecognition,
+ setError,
+ currentSpeechProvider,
+ speakMessage: () => false,
+ cancelAudio: () => setPlayingMessageId(null),
+ playingMessageId,
+ setPlayingMessageId,
+ };
+
+ return {children};
+};
diff --git a/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/contexts/DynamicThemeProvider.js b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/contexts/DynamicThemeProvider.js
new file mode 100644
index 000000000..5ea9b6a40
--- /dev/null
+++ b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/contexts/DynamicThemeProvider.js
@@ -0,0 +1,41 @@
+"use client";
+
+import { createTheme, ThemeProvider } from "@mui/material/styles";
+import { useMemo } from "react";
+import { APP_CONFIG } from "../config/app";
+
+export default function DynamicThemeProvider({
+ children,
+ projectConfig = null,
+}) {
+ const theme = useMemo(() => {
+ const baseTheme = createTheme({
+ palette: {
+ mode: "light",
+ primary: {
+ main: projectConfig?.mainColor || APP_CONFIG.defaults.color,
+ light: projectConfig?.mainColor || APP_CONFIG.defaults.color,
+ dark: projectConfig?.mainColor || APP_CONFIG.defaults.color,
+ contrastText: "#FFFFFF",
+ },
+ secondary: {
+ main: "#3FB37F",
+ light: "#52C08D",
+ dark: "#36A071",
+ contrastText: "#EDEBE6",
+ },
+ background: {
+ default: "#F5F5F5",
+ paper: "#FFFFFF",
+ },
+ },
+ shape: {
+ borderRadius: 14,
+ },
+ });
+
+ return baseTheme;
+ }, [projectConfig]);
+
+ return {children};
+}
diff --git a/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/contexts/ProjectsContext.js b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/contexts/ProjectsContext.js
new file mode 100644
index 000000000..3717469fc
--- /dev/null
+++ b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/contexts/ProjectsContext.js
@@ -0,0 +1,162 @@
+"use client";
+
+import { createContext, useContext, useEffect, useState } from "react";
+import { APP_CONFIG } from "../config/app";
+
+const ProjectContext = createContext(null);
+
+export const useProject = () => {
+ const context = useContext(ProjectContext);
+ if (!context) {
+ throw new Error("useProject must be used within a ProjectProvider");
+ }
+ return context;
+};
+
+export const ProjectProvider = ({ children }) => {
+ const [projects, setProjects] = useState([]);
+ const [currentProjectId, setCurrentProjectId] = useState("");
+
+ useEffect(() => {
+ if (typeof window !== "undefined") {
+ const storedProjects = localStorage.getItem("chatProjects");
+ const storedCurrentProjectId = localStorage.getItem("currentProjectId");
+
+ if (!storedProjects) {
+ const defaultProject = {
+ id: "default",
+ name: APP_CONFIG.defaults.name,
+ logoUrl: APP_CONFIG.defaults.logoUrl,
+ mainColor: APP_CONFIG.defaults.color,
+ backgroundColor: APP_CONFIG.defaults.backgroundColor,
+ backgroundImage: APP_CONFIG.defaults.image,
+ speechProvider: APP_CONFIG.defaults.speechProvider,
+ };
+
+ setProjects([defaultProject]);
+ setCurrentProjectId("default");
+
+ localStorage.setItem("chatProjects", JSON.stringify([defaultProject]));
+ localStorage.setItem("currentProjectId", "default");
+ } else {
+ const parsedProjects = JSON.parse(storedProjects);
+ const migratedProjects = parsedProjects.map((project) => ({
+ ...project,
+ mainColor:
+ project.mainColor ||
+ project.backgroundColor ||
+ APP_CONFIG.defaults.color,
+ backgroundColor:
+ project.backgroundColor || APP_CONFIG.defaults.backgroundColor,
+ speechProvider:
+ project.speechProvider || APP_CONFIG.defaults.speechProvider,
+ }));
+
+ setProjects(migratedProjects);
+ setCurrentProjectId(storedCurrentProjectId || "default");
+ }
+ }
+ }, []);
+
+ useEffect(() => {
+ if (typeof window !== "undefined" && projects.length > 0) {
+ localStorage.setItem("chatProjects", JSON.stringify(projects));
+ }
+ }, [projects]);
+
+ useEffect(() => {
+ if (typeof window !== "undefined" && currentProjectId) {
+ localStorage.setItem("currentProjectId", currentProjectId);
+ }
+ }, [currentProjectId]);
+
+ const createProject = (projectData) => {
+ if (projects.length >= 8) {
+ console.warn("Maximum number of projects (8) reached");
+ return null;
+ }
+
+ const newProject = {
+ id: Date.now().toString(),
+ name: APP_CONFIG.defaults.name,
+ logoUrl: APP_CONFIG.defaults.logoUrl,
+ mainColor: APP_CONFIG.defaults.color,
+ backgroundColor: APP_CONFIG.defaults.backgroundColor,
+ speechProvider: APP_CONFIG.defaults.speechProvider,
+ ...projectData,
+ };
+
+ setProjects((prev) => [...prev, newProject]);
+ return newProject.id;
+ };
+
+ const updateProject = (id, projectData) => {
+ setProjects((prev) =>
+ prev.map((project) =>
+ project.id === id ? { ...project, ...projectData } : project
+ )
+ );
+ };
+
+ const deleteProject = (id) => {
+ if (id === "default") return false;
+
+ setProjects((prev) => prev.filter((project) => project.id !== id));
+
+ if (currentProjectId === id) {
+ setCurrentProjectId("default");
+ }
+
+ return true;
+ };
+
+ const getCurrentProject = () => {
+ const project =
+ projects.find((p) => p.id === currentProjectId) || projects[0];
+
+ if (project?.id === "default") {
+ return {
+ ...project,
+ name: APP_CONFIG.defaults.name,
+ logoUrl: APP_CONFIG.defaults.logoUrl,
+ mainColor: APP_CONFIG.defaults.color,
+ backgroundColor: APP_CONFIG.defaults.backgroundColor,
+ backgroundImage: APP_CONFIG.defaults.image,
+ speechProvider: APP_CONFIG.defaults.speechProvider,
+ };
+ }
+
+ return (
+ project || {
+ id: "default",
+ name: APP_CONFIG.defaults.name,
+ logoUrl: APP_CONFIG.defaults.logoUrl,
+ mainColor: APP_CONFIG.defaults.color,
+ backgroundColor: APP_CONFIG.defaults.backgroundColor,
+ speechProvider: APP_CONFIG.defaults.speechProvider,
+ }
+ );
+ };
+
+ const switchProject = (id) => {
+ if (currentProjectId !== id) {
+ setCurrentProjectId(id);
+ }
+ };
+
+ return (
+
+ {children}
+
+ );
+};
diff --git a/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/favicon.ico b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/favicon.ico
new file mode 100644
index 0000000000000000000000000000000000000000..b0090fb52b81227b1457674aba279f5eeb3a5fb4
GIT binary patch
literal 15086
zcmeHO>u*#=6rTb@kSJ>Og9d1c7_|m^A1w{OTDx~{sYFA>P2>xr2_hvBzbRkrY9bHg
z>mT6liy9$*F#%r%+P!m^iYej;!N(eOZ
zUU@1SsA!;~frE(a5pTvKQwqlyq+HPU`3x;27O9543TeB_
zMXzdzb9ldu{NEBK@Us3Q-ijf1D5mu+$t2l9M0u7(oGE&1ucmRa6Bth;4I??@lt|WT
zlC8H|8jFm5OcZ`>y~^#sq35gPgju2+$OZ-_B--=TI%{kba}+N20`mmo4vaHl+0)o8
z=H%*!`pEG+G(#M~TpXD=od>Wd$Hf}aLe_r0x%c*Leb9JpFV+yPNy%7_mRX7seMvKg
zzSy+qKlpf0bunqNd3ZkYg3dNz%^uDvo3L?vPbPk{Te>OQv(wraHN!rjgnAknbE-1w
zOZl7Zj%HZ*b%v*sAHgI(Adh;6dNNgtTu<>_dtWHSU
zj40gNwxI2F;Xb6u4%LQFP6y7{flcOn`UtzqVJ8dh_6^#xhxk0GLgalu-tUwZ7`+)T%>maqV0aQ$-8tr)i4ei~NC^2l8H
zFRcxCPKWJ}V9zYGktb;XpzFOipnfa*Bip|XDyHCtoIi{19oB3e7bb9F|C?^#lUTRV
z{*C0Qt#%#GxxY!;kv|wf-KvM4?7pdJOnEY({WJECt05P%1K0^uTwD6ByMHE@v_ID9
zye@C+V18YIulT5;HTFQB5DIRl8_|W=I=|>9A0Uj`@Kbq%d3Y|)(jR1-
zOA}R2dT%geb8}($*Qutp(Vh1b9KS+55$AY&gCAR7@33F6tM$SJ~tVneWJVmHR|I1<^lQmzr2+XKW~4OzR$b`~AKl@8M6
zi=RuoQ7NowprV0_1}Yku^cry72PgGH^EhKO9MF>{@aGEUO{sj4e$QqsPB|jkULKj@
zID_~S5~+5aA^IZHlJ0j~LmWi@2hIc(XQDXeSQ2PJ%dJDii+Fl*oooX^-^J=e;suS{{g74CGZgW}
zH>ouwLngnC&O_YVC6S9hTUSs{=b|wr&~Q5tGr{?1C&}_-aN|U{-;yDl?pQ#Fb_{3L
zH}M^OlWvGt@=77GN#XW3oU;XZUJl~0xBKHfbgw2&HsR*NQbI9&S1;~qKnw@Gj|AgK
z_A276Khb+5@cG8uz1Q;Dl5lipagtNi$?vV700+(A#Ju97e@XcFnfQ*K(RcAH#raay
zrFmOq_RVt7?w)+)q<1A`?;pj4GQ{ZCltyBZ3*BIw9NQjtTBX^#@UxwuEGO@?6yPF??ky$a>1nq96xoDQV_0M!oQ3gCyRSfN-?cx6I6FEgU?UK(
z+Lz5w6oQL*Vca_*!)Jf&ey^W;&<4S8SX~sKD@2~3rh)fJ?JxD+Yv=kO_$8kX@D0!P
zUF3hFDBl9#`0|)i$l413V}Ul{e>5Yu!|#6xhwFdcm}=Tw@NkU5?|MF3?tg)2cc%aK
z^pF@Qdo9M_KOaWtYnpzNHY?vU=;EvvimtuZyrMl)ZEN&4#7j4V_ZLWKFa`z0m|dDP
zuyKcoS3dhMPYsKK;L2NpGuTmj*SQ_eN$9IY`xOa#`v~$&z%@koGJH*PzLGB>8terw
zx%7rLbgeVApJ9q#T=MYaeDRdOO|ExpOZZ*dl>W}CX6(^ibX12sG%5Xomc}48e%2L?
z-S_abU&<2ZL~8sn=0r<8X)e@rd!@T4;Wo_?+D`>jqAps`qbpPMhd4~$ClL#!cqpPl
Vt~Ov)U|6b$(i4@@Lv8#t{{a$t8?FEV
literal 0
HcmV?d00001
diff --git a/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/globals.css b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/globals.css
new file mode 100644
index 000000000..00cf4305a
--- /dev/null
+++ b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/globals.css
@@ -0,0 +1,40 @@
+html,
+body {
+ margin: 0;
+ padding: 0;
+}
+
+body,
+h1,
+h2,
+h3,
+h4,
+h5,
+h6 {
+ margin: 0;
+}
+
+* {
+ box-sizing: border-box;
+}
+
+.markdown-content > * + * {
+ margin-top: 1rem;
+}
+
+.markdown-content ol,
+.markdown-content ul {
+ margin: 1rem 0;
+ padding-left: 1.5rem;
+}
+
+.markdown-content {
+ font-family: var(--font-roboto);
+}
+
+* span {
+ font-family: var(--font-exo2), sans-serif !important;
+ font-size: 0.88rem !important;
+ line-height: 1.5 !important;
+ letter-spacing: 0.01px !important;
+}
diff --git a/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/layout.js b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/layout.js
new file mode 100644
index 000000000..070af89e9
--- /dev/null
+++ b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/layout.js
@@ -0,0 +1,38 @@
+import { AppRouterCacheProvider } from "@mui/material-nextjs/v15-appRouter";
+import { Exo_2, Roboto } from "next/font/google";
+import { ProjectProvider } from "./contexts/ProjectsContext";
+import DynamicThemeProvider from "./contexts/DynamicThemeProvider";
+import "./globals.css";
+
+const roboto = Roboto({
+ weight: ["300", "400", "500", "700"],
+ subsets: ["latin"],
+ display: "swap",
+ variable: "--font-roboto",
+});
+
+const exo2 = Exo_2({
+ weight: ["300", "400", "500", "600", "700"],
+ subsets: ["latin"],
+ display: "swap",
+ variable: "--font-exo2",
+});
+
+export const metadata = {
+ title: "OCI Generative AI Agents",
+ description: "Chat with our AI assistant",
+};
+
+export default function RootLayout({ children }) {
+ return (
+
+
+
+
+ {children}
+
+
+
+
+ );
+}
diff --git a/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/page.js b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/page.js
new file mode 100644
index 000000000..98b7c8af5
--- /dev/null
+++ b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/page.js
@@ -0,0 +1,108 @@
+"use client";
+
+import { Box, CircularProgress } from "@mui/material";
+import { useRouter, useSearchParams } from "next/navigation";
+import { Suspense, useEffect, useState } from "react";
+import Chat from "./components/Chat/Chat";
+import ProjectModal from "./components/Settings/ProjectModal";
+import { ChatProvider } from "./contexts/ChatContext";
+import { useProject } from "./contexts/ProjectsContext";
+
+function HomeContent() {
+ const searchParams = useSearchParams();
+ const projectId = searchParams.get("projectId");
+ const router = useRouter();
+ const { switchProject, createProject, updateProject, deleteProject } =
+ useProject();
+
+ const [modalOpen, setModalOpen] = useState(false);
+ const [selectedProject, setSelectedProject] = useState(null);
+
+ useEffect(() => {
+ if (projectId) {
+ switchProject(projectId);
+ }
+ }, [projectId, switchProject]);
+
+ const handleAddProject = () => {
+ setSelectedProject(null);
+ setModalOpen(true);
+ };
+
+ const handleEditProject = (project) => {
+ setSelectedProject(project);
+ setModalOpen(true);
+ };
+
+ const handleSaveProject = (formData) => {
+ if (selectedProject) {
+ updateProject(selectedProject.id, formData);
+ setModalOpen(false);
+ setSelectedProject(null);
+ } else {
+ const newProjectId = createProject(formData);
+ if (newProjectId) {
+ switchProject(newProjectId);
+ router.push(`/?projectId=${newProjectId}`);
+ setModalOpen(false);
+ setSelectedProject(null);
+ } else {
+ alert("Maximum number of projects (8) reached");
+ }
+ }
+ };
+
+ const handleDeleteProject = (projectId) => {
+ deleteProject(projectId);
+ setModalOpen(false);
+ setSelectedProject(null);
+ };
+
+ const handleCloseModal = () => {
+ setModalOpen(false);
+ setSelectedProject(null);
+ };
+
+ return (
+ <>
+
+
+
+
+
+ >
+ );
+}
+
+function LoadingFallback() {
+ return (
+
+
+
+ );
+}
+
+export default function Home() {
+ return (
+ }>
+
+
+ );
+}
diff --git a/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/page.module.css b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/page.module.css
new file mode 100644
index 000000000..e69de29bb
diff --git a/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/services/apiClient.js b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/services/apiClient.js
new file mode 100644
index 000000000..a06e0b2ed
--- /dev/null
+++ b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/services/apiClient.js
@@ -0,0 +1,25 @@
+"use client";
+
+const API_BASE_URL = process.env.NEXT_PUBLIC_API_URL;
+console.log("API_BASE_URL:", API_BASE_URL);
+
+async function request(endpoint, options = {}) {
+ const url = `${API_BASE_URL}${endpoint}`;
+
+ try {
+ const response = await fetch(url, options);
+
+ if (!response.ok) {
+ throw new Error(`API error: ${response.status}`);
+ }
+
+ return await response.json();
+ } catch (error) {
+ console.error(`Request failed: ${url}`, error);
+ throw error;
+ }
+}
+
+export default {
+ request,
+};
diff --git a/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/services/genaiAgentService.js b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/services/genaiAgentService.js
new file mode 100644
index 000000000..10a86c091
--- /dev/null
+++ b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/services/genaiAgentService.js
@@ -0,0 +1,47 @@
+"use client";
+
+const createGenaiAgentService = () => {
+ const API_BASE_URL =
+ process.env.NEXT_PUBLIC_GENAI_API_URL || "http://localhost:8000";
+ let sessionId = null;
+
+ const sendMessage = async (text) => {
+ const payload = {
+ question: text,
+ context: "",
+ };
+
+ const response = await fetch(`${API_BASE_URL}/query`, {
+ method: "POST",
+ headers: {
+ "Content-Type": "application/json",
+ },
+ body: JSON.stringify(payload),
+ });
+
+ if (!response.ok) {
+ throw new Error(`API Error: ${response.status}`);
+ }
+
+ const data = await response.json();
+
+ // Transform backend response to match frontend expectations
+ return {
+ answer: data.agent_response || data.text_response || "No response",
+ response_type: data.response_type,
+ query: data.query,
+ dashboard: data.dashboard,
+ data: data.data,
+ insights: data.insights,
+ diagram_base64: data.chart_base64, // Map chart_base64 to diagram_base64 for frontend display
+ chart_config: data.chart_config,
+ method: data.method,
+ };
+ };
+
+ return {
+ sendMessage,
+ };
+};
+
+export default createGenaiAgentService;
diff --git a/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/services/oracleSpeechService.js b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/services/oracleSpeechService.js
new file mode 100644
index 000000000..610d5492c
--- /dev/null
+++ b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/services/oracleSpeechService.js
@@ -0,0 +1,81 @@
+"use client";
+
+const createOracleSpeechService = () => {
+ let ws = null;
+ let mediaRecorder = null;
+ let mediaStream = null;
+ let isRecording = false;
+
+ const startRecording = async (onResult, onError) => {
+ try {
+ ws = new WebSocket("ws://localhost:3001/ws/speech");
+
+ ws.onopen = () => {};
+ ws.onmessage = (event) => {
+ const data = JSON.parse(event.data);
+ if (data.type === "ready") {
+ }
+ if (data.type === "transcription") {
+ onResult({ transcript: data.text, isFinal: data.isFinal });
+ } else if (data.type === "error") {
+ console.error("[Cliente] Oracle WS error:", data.message);
+ onError(data.message);
+ }
+ };
+ ws.onerror = (err) => {
+ console.error("[Cliente][oracleSpeechService] 🔴 WS error:", err);
+ onError("WebSocket error");
+ };
+ ws.onclose = (ev) => {};
+
+ await new Promise((resolve) => {
+ ws.onopen = resolve;
+ });
+
+ mediaStream = await navigator.mediaDevices.getUserMedia({
+ audio: { sampleRate: 16000, channelCount: 1 },
+ });
+
+ mediaRecorder = new MediaRecorder(mediaStream);
+ mediaRecorder.ondataavailable = (event) => {
+ if (event.data.size > 0 && ws.readyState === WebSocket.OPEN) {
+ event.data.arrayBuffer().then((buffer) => {
+ ws.send(buffer);
+ });
+ }
+ };
+ mediaRecorder.onstart = () =>
+ (mediaRecorder.onstop = () =>
+ (mediaRecorder.onerror = (e) =>
+ console.error("[Cliente] Recorder error:", e)));
+
+ mediaRecorder.start(500);
+ isRecording = true;
+ return true;
+ } catch (error) {
+ console.error("[Cliente] startRecording error:", error);
+ onError(error.message);
+ return false;
+ }
+ };
+
+ const stopRecording = () => {
+ if (mediaRecorder) mediaRecorder.stop();
+ if (mediaStream) mediaStream.getTracks().forEach((t) => t.stop());
+ if (ws) ws.close();
+ isRecording = false;
+ return true;
+ };
+
+ const isSupported = () =>
+ !!(navigator.mediaDevices && window.MediaRecorder && window.WebSocket);
+
+ return {
+ isSupported,
+ startRecording,
+ stopRecording,
+ isRecording: () => isRecording,
+ };
+};
+
+export default createOracleSpeechService;
diff --git a/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/services/speechService.js b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/services/speechService.js
new file mode 100644
index 000000000..c48a74319
--- /dev/null
+++ b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/services/speechService.js
@@ -0,0 +1,69 @@
+const createSpeechService = () => {
+ let recognition = null;
+
+ const isSupported = () => {
+ return "SpeechRecognition" in window || "webkitSpeechRecognition" in window;
+ };
+
+ const startListening = (onResult, onError) => {
+ if (!isSupported()) {
+ onError("Speech recognition is not supported in your browser");
+ return false;
+ }
+
+ try {
+ const SpeechRecognition =
+ window.SpeechRecognition || window.webkitSpeechRecognition;
+ recognition = new SpeechRecognition();
+
+ recognition.lang = "en-US";
+ recognition.interimResults = true;
+ recognition.continuous = false;
+
+ recognition.onresult = (event) => {
+ const transcript = Array.from(event.results)
+ .map((result) => result[0])
+ .map((result) => result.transcript)
+ .join("");
+
+ onResult({
+ transcript,
+ isFinal: event.results[event.results.length - 1].isFinal,
+ });
+ };
+
+ recognition.onend = () => {
+ onResult({ stopped: true });
+ };
+
+ recognition.onerror = (event) => {
+ console.error("Speech recognition error", event);
+ onError(event.error);
+ };
+
+ recognition.start();
+ return true;
+ } catch (err) {
+ console.error("Error starting speech recognition:", err);
+ onError(err.message);
+ return false;
+ }
+ };
+
+ const stopListening = () => {
+ if (recognition) {
+ recognition.stop();
+ recognition = null;
+ return true;
+ }
+ return false;
+ };
+
+ return {
+ isSupported,
+ startListening,
+ stopListening,
+ };
+};
+
+export default createSpeechService;
diff --git a/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/theme/overrides.js b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/theme/overrides.js
new file mode 100644
index 000000000..59343cb7d
--- /dev/null
+++ b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/theme/overrides.js
@@ -0,0 +1,56 @@
+export const getComponentOverrides = (theme) => ({
+ MuiButton: {
+ styleOverrides: {
+ root: {
+ textTransform: "none",
+ borderRadius: theme.shape.borderRadius * 2,
+ boxShadow: "none",
+ "&:hover": {
+ boxShadow: "none",
+ },
+ },
+ },
+ },
+ MuiAvatar: {
+ styleOverrides: {
+ root: {
+ fontSize: "0.875rem",
+ fontWeight: 600,
+ },
+ },
+ },
+ MuiListItemButton: {
+ styleOverrides: {
+ root: {
+ borderRadius: "4px",
+ margin: "2px 0",
+ "&:hover": {
+ backgroundColor: "rgba(255,255,255,0.1)",
+ },
+ },
+ },
+ },
+ MuiListItemIcon: {
+ styleOverrides: {
+ root: {
+ minWidth: "36px",
+ },
+ },
+ },
+ MuiDivider: {
+ styleOverrides: {
+ root: {
+ borderColor: "rgba(255,255,255,0.1)",
+ },
+ },
+ },
+ MuiCard: {
+ styleOverrides: {
+ root: {
+ backgroundColor: theme.palette.background.paper,
+ boxShadow: "none",
+ border: "1px solid rgba(255,255,255,0.1)",
+ },
+ },
+ },
+});
diff --git a/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/theme/palette.js b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/theme/palette.js
new file mode 100644
index 000000000..f9481c420
--- /dev/null
+++ b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/theme/palette.js
@@ -0,0 +1,19 @@
+export const palette = {
+ mode: "light",
+ primary: {
+ main: "#525252",
+ light: "#737373",
+ dark: "#404040",
+ contrastText: "#FFFFFF",
+ },
+ secondary: {
+ main: "#3FB37F",
+ light: "#52C08D",
+ dark: "#36A071",
+ contrastText: "#EDEBE6",
+ },
+ background: {
+ default: "#F5F5F5",
+ paper: "#FFFFFF",
+ },
+};
diff --git a/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/theme/theme.js b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/theme/theme.js
new file mode 100644
index 000000000..04038c60e
--- /dev/null
+++ b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/theme/theme.js
@@ -0,0 +1,20 @@
+"use client";
+
+import { createTheme } from "@mui/material/styles";
+import { getComponentOverrides } from "./overrides";
+import { palette } from "./palette";
+
+const theme = createTheme({
+ palette,
+ typography: {
+ fontFamily: "var(--font-roboto)",
+ },
+ shape: {
+ borderRadius: 14,
+ },
+ spacing: 8,
+});
+
+theme.components = getComponentOverrides(theme);
+
+export default theme;
diff --git a/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/utils/messageUtils.js b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/utils/messageUtils.js
new file mode 100644
index 000000000..75a04111a
--- /dev/null
+++ b/ai/gen-ai-agents/sql_graph_generator_dashboard/files/frontend/app/utils/messageUtils.js
@@ -0,0 +1,200 @@
+export const createUserMessage = (text, userId) => {
+ return {
+ userId,
+ messagePayload: {
+ type: "text",
+ text: text.trim(),
+ },
+ date: new Date().toISOString(),
+ from: null,
+ };
+};
+
+export const createSuggestionRequest = (query, userId) => {
+ return {
+ userId,
+ messagePayload: {
+ type: "suggest",
+ query: query,
+ threshold: 5,
+ },
+ };
+};
+
+export const isFromBot = (message) => {
+ return message.from && message.from.type === "bot";
+};
+
+export const isFromUser = (message) => {
+ return message.from === null;
+};
+
+export const getMessageType = (message) => {
+ return message.messagePayload?.type || "unknown";
+};
+
+export const getMessageText = (message) => {
+ const payload = message.messagePayload;
+ if (!payload) return "";
+
+ switch (payload.type) {
+ case "text":
+ return payload.text;
+ case "card":
+ if (payload.cards && payload.cards.length > 0) {
+ const card = payload.cards[0];
+ return card.title || card.description || "Card message";
+ }
+ return "Card message";
+ case "attachment":
+ return `Attachment: ${payload.attachment.type}`;
+ default:
+ return `Message of type: ${payload.type}`;
+ }
+};
+
+export const formatMessageTime = (dateString) => {
+ try {
+ const date = new Date(dateString);
+ return date.toLocaleTimeString([], { hour: "2-digit", minute: "2-digit" });
+ } catch (e) {
+ return "";
+ }
+};
+
+export const extractCodeBlocks = (text) => {
+ const codeBlockRegex = /```(\w+)?\n([\s\S]*?)\n```/g;
+ const matches = [];
+ let match;
+
+ while ((match = codeBlockRegex.exec(text)) !== null) {
+ matches.push({
+ language: match[1] || "",
+ code: match[2],
+ });
+ }
+
+ return matches;
+};
+
+export const convertMarkdownLinks = (text) => {
+ const linkRegex = /\[([^\]]+)\]\(([^)]+)\)/g;
+ return text.replace(
+ linkRegex,
+ '$1'
+ );
+};
+
+export const processMessageContent = (text) => {
+ if (!text) return "";
+
+ const codeBlocks = extractCodeBlocks(text);
+ let processedText = text;
+
+ codeBlocks.forEach((block, index) => {
+ const placeholder = `__CODE_BLOCK_${index}__`;
+ processedText = processedText.replace(
+ /```(\w+)?\n([\s\S]*?)\n```/,
+ placeholder
+ );
+ });
+
+ processedText = convertMarkdownLinks(processedText);
+
+ codeBlocks.forEach((block, index) => {
+ const placeholder = `__CODE_BLOCK_${index}__`;
+ const formattedCode = `${block.code}
`;
+ processedText = processedText.replace(placeholder, formattedCode);
+ });
+
+ return processedText;
+};
+
+export const formatConversationTime = (dateString) => {
+ try {
+ const date = new Date(dateString);
+ const now = new Date();
+ const diffInHours = Math.floor((now - date) / (1000 * 60 * 60));
+
+ if (diffInHours < 24) {
+ return date.toLocaleTimeString([], {
+ hour: "2-digit",
+ minute: "2-digit",
+ });
+ } else if (diffInHours < 48) {
+ return "Yesterday";
+ } else {
+ return date.toLocaleDateString([], { month: "short", day: "numeric" });
+ }
+ } catch (e) {
+ return "";
+ }
+};
+
+export const truncateText = (text, maxLength = 60) => {
+ if (!text) return "";
+ if (text.length <= maxLength) return text;
+
+ return text.substring(0, maxLength).trim() + "...";
+};
+
+export const sanitizeHtml = (html) => {
+ if (!html) return "";
+
+ return html
+ .replace(/