diff --git a/.cursor/rules/coding_standards.mdc b/.cursor/rules/coding_standards.mdc deleted file mode 100644 index e6e9ec96..00000000 --- a/.cursor/rules/coding_standards.mdc +++ /dev/null @@ -1,69 +0,0 @@ ---- -description: Coding Standards -globs: *.ts,*.tsx,*.js,*.jsx ---- -# Coding Standards for Starknet Agent - -## Naming Conventions -- Variables and functions: Use `camelCase` (e.g., `fetchData`, `generateEmbeddings`). -- Classes and components: Use `PascalCase` (e.g., `RagAgent`, `ChatInterface`). -- Constants: Use `UPPER_CASE` with underscores (e.g., `DEFAULT_CHAT_MODEL`). -- Type interfaces: Use `PascalCase` with `I` prefix (e.g., `IAgentConfig`). -- Ingester classes: Use `PascalCase` with `Ingester` suffix (e.g., `CairoBookIngester`). -- Pipeline components: Use descriptive names ending with their role (e.g., `QueryProcessor`, `DocumentRetriever`). - -## Indentation and Formatting -- Use 2 spaces for indentation (no tabs). -- Keep lines under 100 characters where possible. -- Place opening braces on the same line as the statement (e.g., `if (condition) {`). -- Use Prettier for consistent formatting across the codebase. -- Run `pnpm format:write` before committing changes. - -## Imports and Structure -- Group external imports first, followed by internal modules. -- Use barrel exports (index.ts files) to simplify imports. -- Prefer destructured imports when importing multiple items from a single module. -- Order imports alphabetically within their groups. -- Use relative paths for imports within the same package, absolute paths for cross-package imports. - -## Comments -- Add JSDoc comments for functions and classes, especially in the agent pipeline and ingester components. -- Use `//` for single-line comments and `/* ... */` for multi-line comments. -- Document ingester classes with clear descriptions of the source and processing approach. -- Include explanations for complex algorithms or non-obvious design decisions. -- For the RAG pipeline components, document the input/output expectations clearly. - -## TypeScript Usage -- Use explicit typing for function parameters and return values. -- Prefer interfaces over types for object definitions. -- Use generics where appropriate, especially in the pipeline components and ingester classes. -- Example: `function processQuery(query: T): Promise` -- Use abstract classes for base implementations (e.g., `BaseIngester`). -- Leverage type guards for safe type narrowing. -- Use discriminated unions for state management, especially in the UI components. - -## Error Handling -- Wrap async operations in `try/catch` blocks. -- Log errors with context using the logger utility (e.g., `logger.error('Failed to retrieve documents:', error)`). -- Use custom error classes for specific error types in the agent pipeline and ingestion process. -- Implement proper cleanup in error handlers, especially for file operations in ingesters. -- Ensure errors are propagated appropriately and handled at the right level of abstraction. -- Use async/await with proper error handling rather than promise chains where possible. - -## Testing -- Write unit tests for utility functions, pipeline components, and ingester classes. -- Use Jest for testing framework. -- Mock external dependencies (LLMs, vector stores, etc.) using jest-mock-extended. -- Aim for high test coverage in core agent functionality and ingestion processes. -- Test each ingester implementation separately. -- Use descriptive test names that explain the behavior being tested. -- Follow the AAA pattern (Arrange, Act, Assert) for test structure. - -## Code Organization -- Keep files focused on a single responsibility. -- Group related functionality in directories. -- Separate business logic from UI components. -- Organize ingesters by source type in dedicated directories. -- Follow the template method pattern for ingester implementations. -- Use the factory pattern for creating appropriate instances based on configuration. -- Implement dependency injection for easier testing and component replacement. diff --git a/.cursor/rules/common_patterns.mdc b/.cursor/rules/common_patterns.mdc deleted file mode 100644 index a075e63a..00000000 --- a/.cursor/rules/common_patterns.mdc +++ /dev/null @@ -1,95 +0,0 @@ ---- -description: Common Patterns -globs: *.ts,*.tsx,*.js,*.jsx ---- -# Common Patterns in Starknet Agent - -## RAG Pipeline Architecture -- Core pattern for information retrieval and response generation. -- Steps in the RAG pipeline: - 1. **Query Processor**: `packages/agents/src/pipeline/queryProcessor.ts` - - Analyzes user queries and chat history - - Reformulates queries to optimize document retrieval - 2. **Document Retriever**: `packages/agents/src/pipeline/documentRetriever.ts` - - Converts queries to vector embeddings - - Searches vector database using cosine similarity - - Returns relevant document chunks with metadata - 3. **Answer Generator**: `packages/agents/src/pipeline/answerGenerator.ts` - - Uses LLMs to generate comprehensive responses - - Includes source citations in the response - - Handles different conversation contexts - 4. **RAG Pipeline**: `packages/agents/src/pipeline/ragPipeline.ts` - - Orchestrates the entire process flow - - Manages error handling and logging - -## Factory Pattern -- Used for creating RAG agents with different configurations. -- Example: `packages/agents/src/ragAgentFactory.ts` - - Creates different agent instances based on focus mode. - - Configures appropriate vector stores and prompt templates. -- Also used in the ingester package: `packages/ingester/src/IngesterFactory.ts` - - Creates appropriate ingester instances based on documentation source. - - Enables easy addition of new document sources. - -## Template Method Pattern -- Used in the ingester package for standardizing the ingestion process. -- Example: `packages/ingester/src/BaseIngester.ts` - - Defines the skeleton of the ingestion algorithm in a method. - - Defers some steps to subclasses (download, extract, process). - - Ensures consistent process flow while allowing customization. - - Common workflow: Download → Extract → Process → Generate Embeddings → Store - -## WebSocket Streaming Architecture -- Used for real-time streaming of agent responses. -- Example: `packages/backend/src/websocket/` - - Components: - - `connectionManager.ts`: Manages WebSocket connections and sessions - - `messageHandler.ts`: Processes incoming messages and routes to appropriate handlers - - Flow: Connection → Authentication → Message Handling → Response Streaming - - Enables real-time, chunk-by-chunk delivery of LLM responses - -## Repository Pattern -- Used for database interactions. -- Example: `packages/agents/src/db/vectorStore.ts` - - Abstracts MongoDB vector search operations - - Provides methods for similarity search and filtering - - Handles connection pooling and error handling -- Used in ingester for vector store operations: `packages/ingester/src/utils/vectorStoreUtils.ts` - -## Configuration Management -- Centralized configuration using TOML files. -- Example: `packages/agents/src/config.ts` and `packages/agents/sample.config.toml` - - Loads configuration from files and environment variables. - - Provides typed access to configuration values. - - Supports multiple LLM providers (OpenAI, Anthropic, etc.) - - Configures multiple vector databases for different focus modes - -## Dependency Injection -- Used for providing services to components. -- Example: `packages/agents/src/ragAgentFactory.ts` - - Injects vector stores, LLM providers, and config settings into pipeline components - - Makes testing easier by allowing mock implementations - - Enables flexible configuration of different agent types - -## Focus Mode Implementation -- Pattern for targeting specific document sources. -- Example: `packages/agents/src/config/agentConfigs.ts` - - Defines different focus modes (Starknet Ecosystem, Cairo Book, etc.) - - Configures different vector stores for each mode - - Customizes prompts and retrieval parameters per mode - - Enables specialized knowledge domains - -## React Hooks for State Management -- Custom hooks for managing UI state and WebSocket communication. -- Example: `packages/ui/lib/hooks/` - - Encapsulates WebSocket connection logic. - - Manages chat history and UI state. - - Handles real-time streaming of responses. - -## Error Handling and Logging -- Centralized error handling with detailed logging. -- Example: `packages/agents/src/utils/logger.ts` - - Configurable log levels based on environment - - Context-rich error messages with timestamps and stack traces - - Proper error propagation through the pipeline -- Used throughout the codebase for consistent error reporting. diff --git a/.cursor/rules/documentation.mdc b/.cursor/rules/documentation.mdc deleted file mode 100644 index bda8aed0..00000000 --- a/.cursor/rules/documentation.mdc +++ /dev/null @@ -1,46 +0,0 @@ ---- -description: Documentation -globs: ---- -# Documentation for Starknet Agent - -## External Resources -- Starknet Documentation: [https://docs.starknet.io](https://docs.starknet.io) - - Referenced in the agent's knowledge base. -- Cairo Book: [https://book.cairo-lang.org](https://book.cairo-lang.org) - - Core resource for Cairo language information. -- MongoDB Atlas Vector Search: [https://www.mongodb.com/docs/atlas/vector-search/](https://www.mongodb.com/docs/atlas/vector-search/) - - Used for vector database implementation. -- Anthropic Claude API: [https://docs.anthropic.com/claude/reference/getting-started-with-the-api](https://docs.anthropic.com/claude/reference/getting-started-with-the-api) - - Used for LLM integration. - -## Internal Documentation -- Architecture Overview: `docs/architecture/README.md` - - Explains the RAG pipeline architecture. -- API Integration Guide: `API_INTEGRATION.md` - - Details how to integrate with the agent's API. -- Contributing Guidelines: `CONTRIBUTING.md` - - Instructions for contributing to the project. - -## Code Documentation -- JSDoc comments are used throughout the codebase, especially in: - - `packages/agents/src/pipeline/`: Documents the RAG pipeline components. - - `packages/agents/src/core/`: Documents core agent functionality. - - `packages/backend/src/websocket/`: Documents WebSocket communication. - -## Configuration Documentation -- Sample configuration: `packages/agents/sample.config.toml` - - Documents available configuration options. -- Environment variables: `.env.example` files - - Documents required environment variables. - -## Database Schema -- MongoDB collections structure is documented in: - - `packages/agents/src/db/`: Database interaction code. - - Vector embeddings format and schema. - -## Deployment Documentation -- Docker deployment: `docker-compose.yaml` and related Dockerfiles - - Instructions for containerized deployment. -- Production hosting: `docker-compose.prod-hosted.yml` - - Configuration for production environments. diff --git a/.cursor/rules/imports.mdc b/.cursor/rules/imports.mdc deleted file mode 100644 index 6b2e3e33..00000000 --- a/.cursor/rules/imports.mdc +++ /dev/null @@ -1,128 +0,0 @@ ---- -description: Cairo Imports -globs: *.ts,*.tsx,*.js,*.jsx ---- -# Imports in Cairo Coder - -## External Libraries - -### Backend and Agent Libraries -- `express`: Web server framework. - - Used in: `packages/backend/src/routes/cairocoder.ts` - - Import: `import express, { Router } from 'express';` -- `cors`: CORS middleware for Express. - - Used in: `packages/backend/src/server.ts` - - Import: `import cors from 'cors';` -- `mongodb`: MongoDB client for database operations. - - Used in: `packages/agents/src/db/vectorStore.ts` - - Import: `import { MongoClient } from 'mongodb';` -- `@langchain/core`: LangChain core libraries. - - Used in: `packages/agents/src/core/agentFactory.ts` - - Import: `import { BaseMessage } from '@langchain/core/messages';` - - Import: `import { Embeddings } from '@langchain/core/embeddings';` - - Import: `import { Document } from '@langchain/core/documents';` -- `anthropic`: Anthropic Claude API client. - - Used in: `packages/agents/src/models/` - - Import: `import Anthropic from '@anthropic-ai/sdk';` -- `openai`: OpenAI API client. - - Used in: `packages/agents/src/models/` - - Import: `import OpenAI from 'openai';` -- `@google/generative-ai`: Google AI API client. - - Used in: `packages/agents/src/models/` - - Import: `import { GoogleGenerativeAI } from '@google/generative-ai';` -- `uuid`: For generating unique identifiers. - - Used in: `packages/backend/src/routes/cairocoder.ts` - - Import: `import { v4 as uuidv4 } from 'uuid';` -- `toml`: For parsing TOML configuration files. - - Used in: `packages/agents/src/config/settings.ts` - - Import: `import toml from '@iarna/toml';` - -### Data Ingestion Libraries -- `axios`: HTTP client for downloading documentation. - - Used in: `packages/ingester/src/ingesters/MarkdownIngester.ts` - - Import: `import axios from 'axios';` -- `adm-zip`: For handling ZIP archives. - - Used in: `packages/ingester/src/ingesters/MarkdownIngester.ts` - - Import: `import AdmZip from 'adm-zip';` -- `fs/promises`: Node.js file system promises API. - - Used in: `packages/ingester/src/ingesters/MarkdownIngester.ts` - - Import: `import * as fs from 'fs/promises';` - -## Internal Modules - -### Agent Modules -- `core/pipeline`: RAG pipeline components. - - Used in: `packages/agents/src/core/agentFactory.ts` - - Import: `import { RagPipeline } from './pipeline/ragPipeline';` -- `config`: Configuration management. - - Used in: `packages/agents/src/core/agentFactory.ts` - - Import: `import { getAgentConfig } from '../config/agent';` -- `db`: Database interaction. - - Used in: `packages/agents/src/core/agentFactory.ts` - - Import: `import { VectorStore } from '../db/vectorStore';` -- `types`: Type definitions. - - Used in: `packages/agents/src/core/agentFactory.ts` - - Import: `import { LLMConfig } from '../types';` -- `utils`: Utility functions. - - Used in: `packages/backend/src/app.ts` - - Import: `import { logger } from '@cairo-coder/agents/utils/index';` - -### Backend Modules -- `routes`: API route definitions. - - Used in: `packages/backend/src/server.ts` - - Import: `import routes from '../routes';` -- `config`: Server configuration. - - Used in: `packages/backend/src/server.ts` - - Import: `import { initializeLLMConfig } from './config/llm';` - - Import: `import { getPort } from '@cairo-coder/agents/config/settings';` -- `cairocoder`: Main endpoint handler. - - Used in: `packages/backend/src/routes/index.ts` - - Import: `import cairocoderRouter from './cairocoder';` - -### Ingester Modules -- `BaseIngester`: Abstract base class for all ingesters. - - Used in: `packages/ingester/src/ingesters/MarkdownIngester.ts` - - Import: `import { BaseIngester } from '../BaseIngester';` -- `IngesterFactory`: Factory for creating ingesters. - - Used in: `packages/ingester/src/chat/completionsEmbeddings.ts` - - Import: `import { IngesterFactory } from './IngesterFactory';` -- `utils`: Utility functions for ingestion. - - Used in: `packages/ingester/src/ingesters/MarkdownIngester.ts` - - Import: `import { processDocFiles } from '../utils/fileUtils';` - - Import: `import { isInsideCodeBlock, calculateHash } from '../utils/contentUtils';` - -## Common Import Patterns - -### For Backend API Routes -```typescript -import express, { Router } from 'express'; -import { AIMessage, HumanMessage, SystemMessage, BaseMessage } from '@langchain/core/messages'; -import { v4 as uuidv4 } from 'uuid'; -import { - getVectorDbConfig, - logger, - RagAgentFactory, - LLMConfig, - VectorStore, -} from '@cairo-coder/agents'; -``` - -### For Agent Core -```typescript -import { BaseMessage } from '@langchain/core/messages'; -import { Embeddings } from '@langchain/core/embeddings'; -import { getAgentConfig } from '../config/agent'; -import { RagPipeline } from './pipeline/ragPipeline'; -import { VectorStore } from '../db/vectorStore'; -import { LLMConfig } from '../types'; -``` - -### For Ingesters -```typescript -import * as fs from 'fs/promises'; -import * as path from 'path'; -import { Document } from '@langchain/core/documents'; -import { BookChunk, DocumentSource } from '@cairo-coder/agents/types/index'; -import { BaseIngester } from '../BaseIngester'; -import { BookConfig, BookPageDto, ParsedSection } from '../utils/types'; -``` diff --git a/.cursor/rules/navigation.mdc b/.cursor/rules/navigation.mdc deleted file mode 100644 index 7b9f3adc..00000000 --- a/.cursor/rules/navigation.mdc +++ /dev/null @@ -1,105 +0,0 @@ -# Navigation Rules for Cairo Coder Codebase - -## Project Structure Overview - -The Cairo Coder codebase is organized as a monorepo with multiple packages: - -- `packages/agents`: Contains the core agent logic for RAG-based Cairo code generation -- `packages/backend`: Express-based server handling API endpoints for code generation -- `packages/ingester`: Tools for ingesting and processing Cairo documentation for the vector database -- `packages/typescript-config`: Shared TypeScript configuration - -## Key Directories and Files - -### Agent Logic -- `packages/agents/src/core/pipeline`: Contains the RAG pipeline implementation - - `ragPipeline.ts`: Orchestrates the entire RAG process - - `queryProcessor.ts`: Processes and reformulates user queries - - `documentRetriever.ts`: Retrieves relevant documents from vector database - - `answerGenerator.ts`: Generates Cairo code based on retrieved documents -- `packages/agents/src/core`: Core agent functionality - - `agentFactory.ts`: Factory for creating RAG agents -- `packages/agents/src/db`: Database interaction logic -- `packages/agents/src/config`: Configuration handling -- `packages/agents/src/utils`: Utility functions -- `packages/agents/src/types`: TypeScript type definitions - -### Backend -- `packages/backend/src/api`: API routes and handlers - - `cairocoder.ts`: Main handler for the Cairo code generation endpoint - - `routes.ts`: API route definitions - - `config.ts`: API configuration -- `packages/backend/src/app.ts`: Express application setup -- `packages/backend/src/server.ts`: Server initialization -- `packages/backend/src/config`: Server configuration - -### Ingestion System -- `packages/ingester/src/`: Source code for the ingester package - - `BaseIngester.ts`: Abstract base class for all ingesters - - `IngesterFactory.ts`: Factory for creating ingesters based on source - - `generateEmbeddings.ts`: Main script for generating embeddings - - `ingesters/`: Source-specific ingester implementations - - `CairoBookIngester.ts`: Ingester for Cairo Book - - `CairoByExampleIngester.ts`: Ingester for Cairo By Example - - `StarknetDocsIngester.ts`: Ingester for Starknet Docs (may be used for Cairo docs) - - `StarknetFoundryIngester.ts`: Ingester for Starknet Foundry - - `OpenZeppelinDocsIngester.ts`: Ingester for OpenZeppelin Docs - - `MarkdownIngester.ts`: Base ingester for Markdown format docs - - `AsciiDocIngester.ts`: Base ingester for AsciiDoc format docs - - `utils/`: Utility functions for ingestion - - `shared.ts`: Shared types and interfaces - -## Navigation Patterns - -1. **Following the RAG Pipeline Flow**: - - Start at `packages/agents/src/core/agentFactory.ts` - - Explore the pipeline components in `packages/agents/src/core/pipeline/` - - See how code generation happens in `packages/agents/src/core/pipeline/answerGenerator.ts` - -2. **Understanding API Endpoints**: - - Start at `packages/backend/src/app.ts` - - Follow through to `packages/backend/src/api/routes.ts` - - Examine the main handler in `packages/backend/src/api/cairocoder.ts` - -3. **Configuration Flow**: - - Look for configuration files in the root directory - - Check `packages/agents/src/config/` for agent configuration - - See how configuration is loaded in backend via `packages/backend/src/config/` - -## Common Navigation Tasks - -- **To understand the RAG process**: Follow the pipeline components in `packages/agents/src/core/pipeline/` -- **To see how user queries are processed**: Start at the API handlers in `packages/backend/src/api/cairocoder.ts` -- **To understand data ingestion**: Check the ingester implementations in `packages/ingester/src/ingesters/` -- **To modify embeddings generation**: Look at `packages/ingester/src/chat/completionsEmbeddings.ts` - -## Key Files for Common Tasks - -### Adding a New Documentation Source -1. Create a new ingester in `packages/ingester/src/ingesters/` -2. Update `IngesterFactory.ts` to include the new source -3. Update the document source types in `packages/agents/src/types/` - -### Modifying the RAG Pipeline -1. Update the relevant component in `packages/agents/src/core/pipeline/` -2. Adjust the pipeline configuration in `packages/agents/src/core/agentFactory.ts` if needed - -### Adding or Modifying API Endpoints -1. Update route definitions in `packages/backend/src/api/routes.ts` -2. Implement handlers in `packages/backend/src/api/cairocoder.ts` or add new handlers - -### Running Ingestion -1. Use the script at `packages/ingester/src/chat/completionsEmbeddings.ts` -2. Or run `pnpm generate-embeddings` from the project root - -## Key Endpoints - -- `/chat/completions`: Main endpoint for Cairo code generation - - Accepts POST requests with messages array in OpenAI format - - Returns generated Cairo code - -## Docker and Deployment Files - -- `docker-compose.yml`: Main Docker Compose configuration -- `backend.dockerfile`: Dockerfile for the backend service -- `ingest.dockerfile`: Dockerfile for running ingestion tasks diff --git a/.cursor/rules/project_instructions.mdc b/.cursor/rules/project_instructions.mdc deleted file mode 100644 index 58e60960..00000000 --- a/.cursor/rules/project_instructions.mdc +++ /dev/null @@ -1,73 +0,0 @@ ---- -description: Project Instructions -globs: ---- -# Starknet Agent Project Instructions - -## Overview -- Starknet Agent is an AI-powered search engine specifically designed for the Starknet Ecosystem. -- It uses Retrieval-Augmented Generation (RAG) to provide accurate, source-cited answers to questions about Starknet and Cairo. -- The project is built with TypeScript, Node.js, Express, MongoDB Atlas (vector search), and Next.js. -- Originally forked from Perplexica, adapted for the Starknet ecosystem. - -## Architecture -- Monorepo structure with multiple packages: - - `packages/agents/`: Core RAG pipeline (query processing, document retrieval, answer generation) - - `packages/backend/`: Express server with WebSocket support for real-time streaming - - `packages/ui/`: Next.js frontend application with chat interface - - `packages/ingester/`: Data ingestion tools for documentation sources - - `packages/typescript-config/`: Shared TypeScript configuration - -## RAG Pipeline Flow -1. **Query Processing**: Analyzes and reformulates user queries to improve retrieval -2. **Document Retrieval**: Searches vector database for relevant documents using cosine similarity -3. **Answer Generation**: Uses LLMs to generate comprehensive responses with source citations -4. **Real-time Streaming**: Delivers responses to the UI as they're generated - -## Focus Modes -- **Starknet Ecosystem**: Searches across all indexed resources -- **Cairo Book**: Focuses on the Cairo programming language book -- **Starknet Docs**: Targets official Starknet documentation -- **Starknet Foundry**: Searches Starknet Foundry documentation -- **Cairo By Example**: Provides examples from Cairo By Example resource -- **OpenZeppelin Docs**: Searches OpenZeppelin's Starknet documentation - -## Ingestion System -- The ingester package handles downloading, processing, and storing documentation. -- Supported documentation sources: - - Cairo Book - - Starknet Docs - - Starknet Foundry - - Cairo By Example - - OpenZeppelin Docs -- Modular architecture with a `BaseIngester` abstract class and source-specific implementations. -- Follows the template method pattern for standardized ingestion process. -- Run ingestion with `pnpm generate-embeddings` or `pnpm generate-embeddings:yes` from the project root. -- Weekly automated embedding generation via GitHub Actions. - -## Development Workflow -- Use `pnpm dev` or `turbo dev` to start the development server -- MongoDB Atlas with vector search capabilities required for embeddings storage -- Configuration is managed through TOML files (copy `sample.config.toml` to `config.toml`) -- Docker is used for containerization and deployment -- Add new documentation sources by extending the `BaseIngester` class and registering in `IngesterFactory` - -## Commands -- **Build**: `pnpm build`, `pnpm --filter @cairo-coder/agents build` -- **Dev**: `pnpm dev` (starts all services with auto-reload) -- **Test**: `pnpm --filter @cairo-coder/agents test` -- **Single test**: `pnpm --filter @cairo-coder/agents test -- -t "test name pattern"` -- **Type check**: `pnpm --filter @cairo-coder/backend check-types` - -## Testing -- Jest is used for all testing -- Run tests with `pnpm test` or `turbo test` -- Test files are located in `__tests__/` directories -- Mock external dependencies, especially LLM and database calls -- Test each ingester implementation separately with mocked vector stores - -## Deployment -- Docker Compose is used for deployment -- Production configuration in `docker-compose.prod-hosted.yml` -- Environment variables should be set according to `.env.example` files -- Ingestion can be run as a separate process using `ingest.dockerfile` diff --git a/.gitignore b/.gitignore index 9325d5f3..4aaaa8e1 100644 --- a/.gitignore +++ b/.gitignore @@ -24,7 +24,6 @@ packages/ui/.env # Config files (legacy) config.toml -packages/backend/config.toml # Log files logs/ diff --git a/API_DOCUMENTATION.md b/API_DOCUMENTATION.md new file mode 100644 index 00000000..b34d729b --- /dev/null +++ b/API_DOCUMENTATION.md @@ -0,0 +1,251 @@ +# Cairo Coder API + +This document describes the publicly exposed HTTP API served by the Cairo Coder backend. The API is intentionally compatible with OpenAI's Chat Completions interface, with a few Cairo-specific extensions. + +## Base URL + +The service listens on `http://:3001` by default (see `uvicorn` invocation in `python/src/cairo_coder/server/app.py`). Adjust for your environment as needed. + +## Authentication + +No authentication is enforced by the server. Add your own gateway (API key, OAuth, etc.) if you are exposing Cairo Coder outside a trusted network. + +## Common Conventions + +- **Content type**: All non-streaming responses use `application/json`. Streaming responses use `text/event-stream`. +- **Charset**: UTF-8. +- **Request body**: JSON-encoded payloads. +- **Date & time**: Epoch seconds (`created` fields). +- **Errors**: Consistent JSON envelope described in [Error Handling](#error-handling). + +### Headers + +- `Content-Type: application/json` — required for JSON POSTs. +- `Accept: application/json` or `text/event-stream` depending on `stream` usage. +- `x-mcp-mode: true` or `mcp: true` — optional. When present (any value), the request runs in **MCP mode**, returning raw documentation snippets instead of synthesized answers. See [MCP Mode](#mcp-mode). + +## Health Check + +### `GET /` + +Returns a basic readiness signal. + +**Response** `200 OK` + +```json +{ "status": "ok" } +``` + +## Agent Directory + +### `GET /v1/agents` + +Lists every agent registered in Cairo Coder. + +**Response** `200 OK` + +```json +[ + { + "id": "cairo-coder", + "name": "Cairo Coder", + "description": "General Cairo programming assistant", + "sources": [ + "cairo_book", + "starknet_docs", + "starknet_foundry", + "cairo_by_example", + "openzeppelin_docs", + "corelib_docs", + "scarb_docs" + ] + }, + { + "id": "scarb-assistant", + "name": "Scarb Assistant", + "description": "Specialized assistant for Scarb build tool", + "sources": ["scarb_docs"] + } +] +``` + +`sources` values correspond to the internal `DocumentSource` enum: + +| Source ID | Description | +| ------------------- | ------------------------------------------ | +| `cairo_book` | Cairo book reference | +| `starknet_docs` | Starknet official documentation | +| `starknet_foundry` | Starknet Foundry resources | +| `cairo_by_example` | Cairo by Example guides | +| `openzeppelin_docs` | OpenZeppelin Cairo contracts documentation | +| `corelib_docs` | Cairo core library docs | +| `scarb_docs` | Scarb package manager documentation | + +## Chat Completions + +Cairo Coder mirrors OpenAI's Chat Completions API. Three route variants exist for backward compatibility, sharing the same payload structure and behaviour: + +- `POST /v1/agents/{agent_id}/chat/completions` — target a specific agent. +- `POST /v1/chat/completions` — default agent (`cairo-coder`). +- `POST /chat/completions` — legacy alias of the above. + +### Request Schema + +```json +{ + "model": "cairo-coder", + "messages": [ + { "role": "system", "content": "Optional instructions" }, + { "role": "user", "content": "Your Cairo/StarkNet question" } + ], + "stream": false, + "temperature": 0.2, + "max_tokens": 1024, + "top_p": 1.0, + "n": 1, + "stop": null, + "presence_penalty": 0, + "frequency_penalty": 0, + "user": "optional-tracking-id" +} +``` + +Field notes: + +- `messages` **must** contain at least one entry. The final message must have `role: "user"`; otherwise the server returns `400`. +- Roles accepted: `system`, `user`, `assistant`. Setting `name` on a message is optional. +- Only `stream`, `messages`, and the optional MCP headers influence behaviour today. Other OpenAI fields are accepted for compatibility but currently ignored. +- `model` defaults to `"cairo-coder"`. When using the agent-specific endpoint you can still pass the field; the server ignores mismatches and uses the `agent_id` route parameter. +- Set `stream: true` to receive Server-Sent Events (SSE) chunks. The default is `false` (single JSON response). +- `history` is implied by providing earlier `assistant`/`system` messages in `messages`; the backend keeps only the last 10 history items when rebuilding context. + +### Non-Streaming Response + +**Request** + +```bash +curl -X POST http://localhost:3001/v1/agents/cairo-coder/chat/completions \ + -H 'Content-Type: application/json' \ + -d '{ + "messages": [ + {"role": "system", "content": "You are a Cairo mentor."}, + {"role": "user", "content": "How do I define a storage variable?"} + ] + }' +``` + +**Response** `200 OK` + +```json +{ + "id": "fa43012d-2d0c-4ad2-82c9-2e1ec7aaa43d", + "object": "chat.completion", + "created": 1718123456, + "model": "cairo-coder", + "choices": [ + { + "index": 0, + "message": { + "role": "assistant", + "content": "" + }, + "finish_reason": "stop" + } + ], + "usage": { + "prompt_tokens": 123, + "completion_tokens": 512, + "total_tokens": 635 + } +} +``` + +`usage` fields are aggregated from all DSPy sub-modules. If usage data is unavailable the server returns zeros. + +### Streaming Response + +Set `"stream": true` to receive SSE chunks that match OpenAI's `chat.completion.chunk` format. Each SSE frame is emitted as `data: {JSON}\n\n`, ending with `data: [DONE]\n\n`. + +**Request** + +```bash +curl -N -X POST http://localhost:3001/v1/chat/completions \ + -H 'Content-Type: application/json' \ + -d '{ + "stream": true, + "messages": [ + {"role": "user", "content": "Explain felt252 arithmetic."} + ] + }' +``` + +**Stream** (abridged) + +```json +data: {"id":"...","object":"chat.completion.chunk","created":1718123456,"model":"cairo-coder","choices":[{"index":0,"delta":{"role":"assistant"},"finish_reason":null}]} + +data: {"id":"...","object":"chat.completion.chunk","created":1718123456,"model":"cairo-coder","choices":[{"index":0,"delta":{"content":"Felt252 is the base field..."},"finish_reason":null}]} + +data: {"id":"...","object":"chat.completion.chunk","created":1718123456,"model":"cairo-coder","choices":[{"index":0,"delta":{},"finish_reason":"stop"}]} + +data: [DONE] +``` + +### Agent Selection + +`POST /v1/agents/{agent_id}/chat/completions` validates that `{agent_id}` exists. Unknown IDs return `404 Not Found` with an OpenAI-style error payload. When the `agent_id` is omitted (`/v1/chat/completions` or `/chat/completions`) the server falls back to `cairo-coder`. + +## MCP Mode + +Setting either `mcp` or `x-mcp-mode` headers triggers **Model Context Protocol mode**, bypassing the LLM synthesiser: + +- Non-streaming responses still use the standard `chat.completion` envelope, but `choices[0].message.content` contains curated documentation blocks instead of prose answers. +- Streaming responses emit the same SSE wrapper; the payloads contain the formatted documentation as incremental `delta.content` strings. +- MCP mode does not consume generation tokens (`usage.completion_tokens` reflects only retrieval/query processing). + +Example non-streaming request: + +```bash +curl -X POST http://localhost:3001/v1/chat/completions \ + -H 'Content-Type: application/json' \ + -H 'x-mcp-mode: true' \ + -d '{"messages": [{"role": "user", "content": "Selectors"}]}' +``` + +## Error Handling + +All errors return HTTP status codes with a JSON envelope compatible with OpenAI: + +```json +{ + "error": { + "message": "Agent 'foo' not found", + "type": "invalid_request_error", + "code": "agent_not_found", + "param": "agent_id" + } +} +``` + +Common cases: + +- `400 Bad Request` — validation failures (empty `messages`, last message not from `user`). +- `404 Not Found` — unknown agent id. +- `500 Internal Server Error` — unexpected backend issues. + +## Versioning & Compatibility + +- Current API version: `1.0.0` (see FastAPI metadata). +- The server aims to stay wire-compatible with OpenAI's Chat Completions (`/v1/chat/completions`). +- `POST /chat/completions` exists for older clients; prefer `/v1/…` for new integrations. + +## Production Checklist + +1. Front the service with TLS and authentication if exposed publicly. +2. Monitor latency and token usage via the returned `usage` object and application logs. +3. Warm up the vector store connection (handled automatically by the FastAPI lifespan event) before routing traffic. +4. If you swap or add agents, ensure they are registered in `python/src/cairo_coder/agents/registry.py`; they appear immediately in `/v1/agents` responses. + +## Support + +For bugs or feature requests open an issue in the Cairo Coder repository. Mention the API version, endpoint, and reproduction steps. diff --git a/CLAUDE.md b/CLAUDE.md index be7e88af..332a6980 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -23,7 +23,6 @@ Cairo Coder is an open-source Cairo language code generation service using Retri - `pnpm test` - Run all tests across packages - `pnpm --filter @cairo-coder/agents test` - Run tests for specific package - `pnpm --filter @cairo-coder/agents test -- -t "test name"` - Run specific test -- `pnpm --filter @cairo-coder/backend check-types` - Type check specific package ### Documentation Ingestion @@ -40,7 +39,6 @@ Cairo Coder is an open-source Cairo language code generation service using Retri ### Monorepo Structure - **packages/agents**: Core RAG pipeline orchestrating query processing, document retrieval, and code generation -- **packages/backend**: Express API server providing OpenAI-compatible endpoints - **packages/ingester**: Documentation processing system using template method pattern - **packages/typescript-config**: Shared TypeScript configuration diff --git a/MIGRATION_GUIDE.md b/MIGRATION_GUIDE.md deleted file mode 100644 index 081187ed..00000000 --- a/MIGRATION_GUIDE.md +++ /dev/null @@ -1,168 +0,0 @@ -# Configuration Migration Guide - -This guide helps you migrate from the old multi-file configuration system to the new centralized environment variable approach. - -## What Changed - -### Before (Multiple Config Files) - -- `python/.env` - Python backend secrets -- `python/config.toml` - Python backend configuration -- `packages/agents/config.toml` - TypeScript ingester configuration -- `packages/backend/.env` - Backend service variables -- Manual host switching between `localhost` and `postgres` - -### After (Single Source of Truth) - -- `.env` - All configuration in one place -- Environment variables take precedence -- Automatic host detection for Docker vs local development -- No secrets in Docker images - -## Migration Steps - -1. **Backup your existing configuration** - - ```bash - cp python/.env python/.env.backup - cp python/config.toml python/config.toml.backup - cp packages/agents/config.toml packages/agents/config.toml.backup - ``` - -2. **Create the new .env file** - - ```bash - cp .env.example .env - ``` - -3. **Migrate your credentials** - - Copy these values from your old config files to the new `.env`: - - ```bash - # From [VECTOR_DB] section - POSTGRES_USER="your-value" - POSTGRES_PASSWORD="your-value" - POSTGRES_DB="your-value" - POSTGRES_HOST="postgres" # or "localhost" for local dev - POSTGRES_PORT="5432" - POSTGRES_TABLE_NAME="documents" - - # From [GENERAL] section - PORT="3001" - SIMILARITY_MEASURE="cosine" - - # From [PROVIDERS] section - DEFAULT_CHAT_PROVIDER="gemini" - DEFAULT_CHAT_MODEL="Gemini Flash 2.5" - DEFAULT_FAST_CHAT_PROVIDER="gemini" - DEFAULT_FAST_CHAT_MODEL="Gemini Flash 2.5" - DEFAULT_EMBEDDING_PROVIDER="openai" - DEFAULT_EMBEDDING_MODEL="Text embedding 3 large" - - # From [API_KEYS] section - OPENAI_API_KEY="your-key" - ANTHROPIC_API_KEY="your-key" - GEMINI_API_KEY="your-key" - DEEPSEEK_API_KEY="your-key" - GROQ_API_KEY="your-key" - - # From [VERSIONS] section - STARKNET_FOUNDRY_VERSION="0.38.0" - SCARB_VERSION="2.11.2" - - # From python/.env (if using LangSmith) - LANGSMITH_API_KEY="your-key" - ``` - -4. **Remove old configuration files** - - ```bash - rm python/.env python/config.toml - rm packages/agents/config.toml - rm packages/backend/.env - ``` - -5. **Update your Docker images** - ```bash - docker compose build --no-cache - ``` - -## Key Differences - -### Database Host Configuration - -**Before:** Manual switching in config files - -```toml -# For local development -POSTGRES_HOST = "localhost" - -# For Docker -POSTGRES_HOST = "postgres" -``` - -**After:** Automatic detection - -```bash -# .env file always uses "postgres" -POSTGRES_HOST="postgres" - -# For local development, override with: -export POSTGRES_HOST=localhost -``` - -### API Keys - -**Before:** Scattered across multiple files - -- `python/.env`: Some keys -- `packages/agents/config.toml`: Other keys - -**After:** All in one `.env` file - -```bash -# All API keys in one place -OPENAI_API_KEY="" -ANTHROPIC_API_KEY="" -GEMINI_API_KEY="" -``` - -## Troubleshooting - -### "Config file not found" warnings - -These warnings are normal and can be ignored. The system now uses environment variables by default. - -### Database connection issues - -Ensure your `.env` file has the correct database credentials and that the `POSTGRES_HOST` is set correctly: - -- Docker: `POSTGRES_HOST="postgres"` -- Local: `export POSTGRES_HOST=localhost` - -### Missing API keys - -Check that all required API keys are present in your `.env` file. The application will show clear error messages for missing keys. - -## Python-Specific Changes - -### Before - -- `python/config.toml` - Python-specific configuration -- `ConfigManager.load_config(path)` - Could load from custom paths -- `toml` dependency required - -### After - -- `ConfigManager.load_config()` - No parameters, environment only -- No TOML file support -- `toml` dependency removed from pyproject.toml - -## Benefits of the New System - -1. **Security**: No secrets in Docker images or config files -2. **Simplicity**: One `.env` file to configure everything -3. **Flexibility**: Easy to override with environment variables -4. **Consistency**: Same configuration approach across all services -5. **No file I/O**: Faster startup, no file permission issues diff --git a/README.md b/README.md index 95b3cd74..9364ab31 100644 --- a/README.md +++ b/README.md @@ -106,7 +106,7 @@ curl -X POST http://localhost:3001/v1/chat/completions \ }' ``` -For a full list of parameters and agent-specific endpoints, see the [API Documentation](./packages/backend/API_DOCUMENTATION.md). +For a full list of parameters and agent-specific endpoints, see the [API Documentation](./API_DOCUMENTATION.md). ## Architecture @@ -119,7 +119,7 @@ The project is organized as a monorepo with multiple packages: - **python/**: The core RAG agent and API server implementation using DSPy and FastAPI. - **packages/ingester/**: (TypeScript) Data ingestion tools for Cairo documentation sources. - **packages/typescript-config/**: Shared TypeScript configuration. -- **(Legacy)** `packages/agents` & `packages/backend`: The original Langchain-based TypeScript implementation. +- **(Legacy)** `packages/agents`: The original Langchain-based TypeScript implementation. ### RAG Pipeline (Python/DSPy) diff --git a/README.old.md b/README.old.md deleted file mode 100644 index 0501e30f..00000000 --- a/README.old.md +++ /dev/null @@ -1,63 +0,0 @@ -# Legacy TypeScript Backend Instructions - -**Note:** These instructions are for the original TypeScript backend, which has been superseded by the Python implementation. The Python backend is now the recommended and default service. Use these instructions only if you need to run the legacy service for a specific reason. - -## Installation (TypeScript) - -1. **Prerequisites**: Ensure Docker is installed and running. - -2. **Clone the Repository**: - - ```bash - git clone https://github.com/KasarLabs/cairo-coder.git - cd cairo-coder - ``` - -3. **Install Dependencies**: - - ```bash - pnpm install - ``` - -4. **Configure Backend (`packages/agents/config.toml`)**: - Inside the `packages/agents` package, copy `sample.config.toml` to `config.toml`. Fill in your OpenAI or Gemini API keys. - -5. **Configure PostgreSQL Database**: - - **a. Database Container Initialization (`.env` file):** - Create a `.env` file in the root directory with the following PostgreSQL configuration: - - ```toml - POSTGRES_USER="YOUR_POSTGRES_USER" - POSTGRES_PASSWORD="YOUR_POSTGRES_PASSWORD" - POSTGRES_DB="YOUR_POSTGRES_DB" - ``` - - **b. Application Connection Settings (`config.toml` file):** - In `packages/agents/config.toml`, configure the database connection section to match the `.env` file: - - ```toml - [VECTOR_DB] - POSTGRES_USER="YOUR_POSTGRES_USER" - POSTGRES_PASSWORD="YOUR_POSTGRES_PASSWORD" - POSTGRES_DB="YOUR_POSTGRES_DB" - POSTGRES_HOST="postgres" - POSTGRES_PORT="5432" - ``` - -6. **Configure LangSmith (Optional)**: - Create a `.env` file in `packages/backend` with your LangSmith credentials. See the main `README.md` for more details on the variables. - -7. **Run the Application**: - ```bash - docker compose up postgres backend - ``` - The API will be available at `http://localhost:3001/v1/chat/completions`. - -## Running the Ingester (TypeScript) - -After you have the main application running, run the ingester to process and embed documentation from various sources. - -```bash -docker compose --profile ingester up -``` diff --git a/backend.old.dockerfile b/backend.old.dockerfile deleted file mode 100644 index 06d9f63c..00000000 --- a/backend.old.dockerfile +++ /dev/null @@ -1,25 +0,0 @@ -FROM node:20 AS base -ENV PNPM_HOME="/pnpm" -ENV PATH="$PNPM_HOME:$PATH" -RUN corepack enable - -WORKDIR /app - -# Copy root workspace files -COPY pnpm-workspace.yaml ./ -COPY package.json ./ -COPY pnpm-lock.yaml ./ -COPY turbo.json ./ - -# Copy backend & agents packages -COPY packages/backend ./packages/backend -COPY packages/agents ./packages/agents - -# Copy shared TypeScript config -COPY packages/typescript-config ./packages/typescript-config - -RUN mkdir /app/data && \ - pnpm install --frozen-lockfile && \ - pnpm install -g turbo - -CMD ["turbo", "start"] diff --git a/ingester.dockerfile b/ingester.dockerfile index 15239317..2475f16d 100644 --- a/ingester.dockerfile +++ b/ingester.dockerfile @@ -11,8 +11,7 @@ COPY package.json ./ COPY pnpm-lock.yaml ./ COPY turbo.json ./ -# Copy backend & agents packages -COPY packages/backend ./packages/backend +# Copy ingester & agents packages COPY packages/ingester ./packages/ingester COPY packages/agents ./packages/agents diff --git a/package.json b/package.json index be783c99..cf696f4c 100644 --- a/package.json +++ b/package.json @@ -4,9 +4,7 @@ "version": "0.2.1", "scripts": { "build": "turbo run build", - "dev": "turbo run dev", "lint": "turbo run lint", - "start": "turbo run start", "generate-embeddings": "turbo run generate-embeddings", "generate-embeddings:yes": "turbo run generate-embeddings:yes", "clean": "find packages -type d -name 'dist' -exec rm -rf {} +; find packages -type d -name '.turbo' -exec rm -rf {} +", diff --git a/packages/backend/API_DOCUMENTATION.md b/packages/backend/API_DOCUMENTATION.md deleted file mode 100644 index a3c73bf2..00000000 --- a/packages/backend/API_DOCUMENTATION.md +++ /dev/null @@ -1,224 +0,0 @@ -# Cairo Coder API Documentation - -## Overview - -The Cairo Coder backend provides OpenAI-compatible chat completion endpoints with support for multiple specialized agents. Each agent can be configured with specific documentation sources, prompts, and parameters. - -## Base URL - -```text -http://localhost:3001/v1 -``` - -## Endpoints - -### 1. Legacy Chat Completions (Default Agent) - -**Endpoint:** `POST /v1/chat/completions` - -This endpoint maintains backward compatibility and uses the default "Cairo Coder" agent. - -**Request Body:** - -```json -{ - "model": "cairo-coder", - "messages": [ - { - "role": "user", - "content": "How do I create a struct in Cairo?" - } - ], - "stream": false, - "temperature": 0.7 -} -``` - -**Response:** - -```json -{ - "id": "chatcmpl-123456", - "object": "chat.completion", - "created": 1234567890, - "model": "cairo-coder", - "choices": [ - { - "index": 0, - "message": { - "role": "assistant", - "content": "Here's how to create a struct in Cairo..." - }, - "finish_reason": "stop" - } - ], - "usage": { - "prompt_tokens": 10, - "completion_tokens": 50, - "total_tokens": 60 - } -} -``` - -### 2. List Available Agents - -**Endpoint:** `GET /v1/agents` - -Returns a list of all available agents with their configurations. - -**Response:** - -```json -{ - "agents": [ - { - "id": "cairo-coder", - "name": "Cairo Coder", - "description": "Default Cairo language and smart contract assistant with full documentation access", - "pipeline": "rag", - "sources": [ - "CAIRO_BOOK", - "CAIRO_BY_EXAMPLE", - "STARKNET_FOUNDRY", - "CORELIB_DOCS", - "OPENZEPPELIN_DOCS", - "SCARB_DOCS" - ] - }, - { - "id": "scarb-assistant", - "name": "Scarb Assistant", - "description": "Specialized Scarb build tool assistance", - "pipeline": "rag", - "sources": ["SCARB_DOCS"] - } - ], - "total": 3 -} -``` - -### 3. Agent-Specific Chat Completions - -**Endpoint:** `POST /v1/agents/:agentId/chat/completions` - -Use a specific agent for chat completions. - -**Parameters:** - -- `agentId` (path parameter): The ID of the agent to use (e.g., "cairo-coder", "scarb-assistant") - -**Request Body:** Same as legacy endpoint - -#### Example: Cairo Coder - -```bash -curl -X POST http://localhost:3001/v1/agents/cairo-coder/chat/completions \ - -H "Content-Type: application/json" \ - -d '{ - "messages": [ - {"role": "user", "content": "How do I create a Vec in Cairo?"} - ], - "stream": false - }' -``` - -#### Example: Scarb Assistant - -```bash -curl -X POST http://localhost:3001/v1/agents/scarb-assistant/chat/completions \ - -H "Content-Type: application/json" \ - -d '{ - "messages": [ - {"role": "user", "content": "How do I add a dependency in Scarb?"} - ], - "stream": false - }' -``` - -## Streaming Responses - -All chat completion endpoints support streaming responses by setting `"stream": true` in the request body. When streaming is enabled, the response will be sent as Server-Sent Events (SSE). - -**Example:** - -```bash -curl -X POST http://localhost:3001/v1/agents/cairo-coder/chat/completions \ - -H "Content-Type: application/json" \ - -d '{ - "messages": [{"role": "user", "content": "Explain Cairo arrays"}], - "stream": true - }' -``` - -**Streaming Response Format:** - -```text -data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1234567890,"model":"cairo-coder","choices":[{"index":0,"delta":{"content":"Arrays in Cairo"},"finish_reason":null}]} - -data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1234567890,"model":"cairo-coder","choices":[{"index":0,"delta":{"content":" are..."},"finish_reason":null}]} - -data: {"id":"chatcmpl-123","object":"chat.completion.chunk","created":1234567890,"model":"cairo-coder","choices":[{"index":0,"delta":{},"finish_reason":"stop"}],"usage":{"prompt_tokens":10,"completion_tokens":20,"total_tokens":30}} - -data: [DONE] -``` - -## MCP Mode - -All endpoints support Model Context Protocol (MCP) mode for retrieving raw documentation chunks without LLM generation. Enable MCP mode by adding the header `x-mcp-mode: true` or `mcp: true`. - -**Example:** - -```bash -curl -X POST http://localhost:3001/v1/agents/scarb-assistant/chat/completions \ - -H "Content-Type: application/json" \ - -H "x-mcp-mode: true" \ - -d '{ - "messages": [{"role": "user", "content": "Scarb workspace configuration"}] - }' -``` - -In MCP mode, the response includes a `sources` field with retrieved documentation chunks. - -## Error Handling - -All endpoints return errors in OpenAI-compatible format: - -```json -{ - "error": { - "message": "Error description", - "type": "error_type", - "param": "parameter_name", - "code": "error_code" - } -} -``` - -Common error types: - -- `invalid_request_error`: Invalid request parameters -- `server_error`: Internal server error -- `rate_limit_error`: Rate limit exceeded - -HTTP Status Codes: - -- `200`: Success -- `400`: Bad Request -- `404`: Agent not found -- `429`: Rate limit exceeded -- `500`: Internal server error - -## Token Usage - -All responses include token usage information in the `usage` field. Additionally, the total token count is returned in the `x-total-tokens` response header. - -## Agent Configuration - -Agents can be configured with: - -- **Sources**: Specific documentation sources to search -- **Prompts**: Custom prompts for query processing and response generation -- **Parameters**: Search parameters like `maxSourceCount` and `similarityThreshold` -- **Pipeline**: Processing pipeline type (`rag`, `mcp`, or custom) - -Agent configurations are loaded from TOML files and can be extended or modified without code changes. diff --git a/packages/backend/__tests__/server.test.ts b/packages/backend/__tests__/server.test.ts deleted file mode 100644 index d2bc7f9b..00000000 --- a/packages/backend/__tests__/server.test.ts +++ /dev/null @@ -1,118 +0,0 @@ -import { createApplication } from '../src/server'; -import { Container } from '../src/config/context'; -import express from 'express'; -import { Server } from 'http'; -import supertest from 'supertest'; - -// Mock the LLM config initialization -jest.mock('../src/config/llm', () => ({ - initializeLLMConfig: jest.fn().mockResolvedValue({ - defaultLLM: {}, - fastLLM: {}, - embeddings: {}, - }), -})); - -// Mock config to avoid the getStarknetFoundryVersion issue -jest.mock('@cairo-coder/agents/config/settings', () => ({ - getPort: jest.fn().mockReturnValue(3001), - getStarknetFoundryVersion: jest.fn().mockReturnValue('0.1.0'), - getScarbVersion: jest.fn().mockReturnValue('0.1.0'), - getSimilarityMeasure: jest.fn().mockReturnValue('cosine'), - getVectorDbConfig: jest.fn().mockReturnValue({ - POSTGRES_USER: 'test', - POSTGRES_PASSWORD: 'test', - POSTGRES_DB: 'test', - POSTGRES_HOST: 'localhost', - POSTGRES_PORT: '5432', - }), - getOpenaiApiKey: jest.fn().mockReturnValue('test-key'), - getAnthropicApiKey: jest.fn().mockReturnValue(''), - getGeminiApiKey: jest.fn().mockReturnValue('test-key'), - getGroqApiKey: jest.fn().mockReturnValue(''), - getDeepseekApiKey: jest.fn().mockReturnValue(''), - getHostedModeConfig: jest.fn().mockReturnValue({ - DEFAULT_CHAT_PROVIDER: 'gemini', - DEFAULT_CHAT_MODEL: 'Gemini Flash 2.5', - DEFAULT_FAST_CHAT_PROVIDER: 'gemini', - DEFAULT_FAST_CHAT_MODEL: 'Gemini Flash 2.5', - DEFAULT_EMBEDDING_PROVIDER: 'openai', - DEFAULT_EMBEDDING_MODEL: 'Text embedding 3 large', - }), -})); - -// Mock HTTP handling to avoid actual initialization -jest.mock('../src/config/http', () => ({ - initializeHTTP: jest.fn(), -})); - -describe('Server', () => { - let container: Container; - let server: Server; - - beforeEach(() => { - // Reset container instance - (Container as any).instance = undefined; - container = Container.getInstance(); - - // Set up container with minimal configuration - container.setContext({ - config: { - port: 3001, - models: { - defaultLLM: {} as any, - fastLLM: {} as any, - embeddings: {} as any, - }, - cors: { - origin: '*', - }, - }, - }); - }); - - afterEach(() => { - // Close server if it's running - if (server) { - server.close(); - } - }); - - it('should create an HTTP server and container', async () => { - // Act - const result = await createApplication(); - server = result.server; - - // Assert - expect(result.server).toBeDefined(); - expect(result.container).toBeDefined(); - expect(result.container).toBe(Container.getInstance()); - }); - - it('should set up CORS', async () => { - // Arrange - const result = await createApplication(); - server = result.server; - const app = Container.getInstance().getContext().app as express.Express; - - // Act - Send request with Origin header - const response = await supertest(app) - .options('/') - .set('Origin', 'http://localhost:3000') - .set('Access-Control-Request-Method', 'GET'); - - // Assert - CORS headers should be present - expect(response.headers['access-control-allow-origin']).toBeDefined(); - }); - - it('should set up the correct Container context', async () => { - // Act - await createApplication(); - const context = Container.getInstance().getContext(); - - // Assert - expect(context.app).toBeDefined(); - expect(context.config).toBeDefined(); - expect(context.config.port).toBe(3001); - }); -}); diff --git a/packages/backend/__tests__/types/context.test.ts b/packages/backend/__tests__/types/context.test.ts deleted file mode 100644 index 4252a98c..00000000 --- a/packages/backend/__tests__/types/context.test.ts +++ /dev/null @@ -1,85 +0,0 @@ -import { Container } from '../../src/config/context'; -import { Express } from 'express'; -import { ServerConfig, ServerContext } from '../../src/types'; - -describe('Container', () => { - // Clear the Container instance before each test - beforeEach(() => { - // Access the private static instance property using type assertion - (Container as any).instance = undefined; - }); - - it('should be a singleton', () => { - // Get two instances and check they are the same object - const instance1 = Container.getInstance(); - const instance2 = Container.getInstance(); - - expect(instance1).toBe(instance2); - }); - - it('should set and get context', () => { - // Arrange - const container = Container.getInstance(); - const mockConfig: ServerConfig = { - port: 3000, - models: { - defaultLLM: {} as any, - fastLLM: {} as any, - embeddings: {} as any, - }, - cors: { - origin: '*', - }, - }; - - const partialContext: Partial = { - config: mockConfig, - }; - - // Act - container.setContext(partialContext); - const context = container.getContext(); - - // Assert - expect(context.config).toBe(mockConfig); - }); - - it('should merge partial context with existing context', () => { - // Arrange - const container = Container.getInstance(); - const initialConfig: ServerConfig = { - port: 3000, - models: { - defaultLLM: {} as any, - fastLLM: {} as any, - embeddings: {} as any, - }, - cors: { - origin: '*', - }, - }; - - // Set initial context - container.setContext({ config: initialConfig }); - - // Create mock Express app - const mockApp = {} as Express; - - // Act - add app to context - container.setContext({ app: mockApp }); - const context = container.getContext(); - - // Assert - should have both config and app - expect(context.config).toBe(initialConfig); - expect(context.app).toBe(mockApp); - }); - - it('should initialize with empty context', () => { - // Arrange & Act - const container = Container.getInstance(); - const context = container.getContext(); - - // Assert - expect(context).toEqual({}); - }); -}); diff --git a/packages/backend/jest.config.js b/packages/backend/jest.config.js deleted file mode 100644 index 8ddf4c7c..00000000 --- a/packages/backend/jest.config.js +++ /dev/null @@ -1,32 +0,0 @@ -module.exports = { - preset: 'ts-jest', - testEnvironment: 'node', - roots: ['/src', '/__tests__'], - testMatch: ['**/__tests__/**/*.ts', '**/?(*.)+(spec|test).ts'], - transform: { - '^.+\\.ts$': [ - 'ts-jest', - { - tsconfig: 'tsconfig.test.json', - }, - ], - }, - moduleNameMapper: { - '^@/(.*)$': '/src/$1', - }, - setupFilesAfterEnv: ['/jest.setup.js'], - collectCoverageFrom: [ - 'src/**/*.ts', - '!src/**/*.d.ts', - '!src/**/*.interface.ts', - '!src/**/*.types.ts', - ], - coverageThreshold: { - global: { - branches: 70, - functions: 70, - lines: 70, - statements: 70, - }, - }, -}; diff --git a/packages/backend/jest.setup.js b/packages/backend/jest.setup.js deleted file mode 100644 index c4c86e13..00000000 --- a/packages/backend/jest.setup.js +++ /dev/null @@ -1,34 +0,0 @@ -// Increase timeout for all tests to 10 seconds -jest.setTimeout(10000); - -// Silence console.log during tests -global.console = { - ...global.console, - // Keep error and warn for debugging tests - log: jest.fn(), - info: jest.fn(), - debug: jest.fn(), -}; - -// Mock process.env for testing -process.env.NODE_ENV = 'test'; - -// Add custom matchers if needed -expect.extend({ - toBeWithinRange(received, floor, ceiling) { - const pass = received >= floor && received <= ceiling; - if (pass) { - return { - message: () => - `expected ${received} not to be within range ${floor} - ${ceiling}`, - pass: true, - }; - } else { - return { - message: () => - `expected ${received} to be within range ${floor} - ${ceiling}`, - pass: false, - }; - } - }, -}); diff --git a/packages/backend/package.json b/packages/backend/package.json deleted file mode 100644 index 40024caf..00000000 --- a/packages/backend/package.json +++ /dev/null @@ -1,52 +0,0 @@ -{ - "name": "@cairo-coder/backend", - "version": "1.0.0", - "private": true, - "scripts": { - "build": "tsc -p tsconfig.json", - "dev": "LOG_LEVEL=debug nodemon src/app.ts", - "start": "node dist/src/app.js", - "test": "jest", - "test:watch": "jest --watch", - "test:coverage": "jest --coverage", - "check-types": "tsc --noEmit" - }, - "dependencies": { - "@cairo-coder/agents": "workspace:*", - "@iarna/toml": "^2.2.5", - "@langchain/anthropic": "^0.2.18", - "@langchain/community": "^0.3.32", - "@langchain/core": "^0.2.36", - "@langchain/google-genai": "^0.1.8", - "@langchain/openai": "^0.0.25", - "@types/node": "^20", - "cors": "^2.8.5", - "dotenv": "^16.4.7", - "express": "^4.21.2", - "node-fetch": "2.7.0", - "uuid": "^11.1.0", - "winston": "^3.17.0", - "ws": "^8.18.1" - }, - "exports": { - "./*": { - "types": "./src/*.ts", - "default": "./dist/src/*.js" - } - }, - "devDependencies": { - "@cairo-coder/typescript-config": "workspace:*", - "@types/cors": "^2.8.17", - "@types/express": "^4.17.21", - "@types/jest": "^29.5.14", - "@types/supertest": "^6.0.2", - "jest": "^29.7.0", - "jest-mock-extended": "4.0.0-beta1", - "nodemon": "^3.1.9", - "prettier": "^3.5.2", - "supertest": "^7.0.0", - "ts-jest": "^29.2.5", - "ts-node": "^10.9.2", - "typescript": "^5.7.3" - } -} diff --git a/packages/backend/src/__tests__/agents.e2e.test.ts b/packages/backend/src/__tests__/agents.e2e.test.ts deleted file mode 100644 index 9042c42e..00000000 --- a/packages/backend/src/__tests__/agents.e2e.test.ts +++ /dev/null @@ -1,286 +0,0 @@ -// Place all mocks at the top level, outside describe -jest.mock('@cairo-coder/agents/utils/index', () => ({ - logger: { - info: jest.fn(), - error: jest.fn(), - warn: jest.fn(), - debug: jest.fn(), - }, -})); - -jest.mock('@cairo-coder/agents/config/settings', () => ({ - getVectorDbConfig: jest.fn().mockReturnValue({ - host: 'localhost', - port: 5432, - user: 'test', - password: 'test', - database: 'test', - }), - getPort: jest.fn().mockReturnValue(3000), - getHostedModeConfig: jest.fn().mockReturnValue({ - DEFAULT_CHAT_PROVIDER: 'openai', - DEFAULT_CHAT_MODEL: 'gpt-4', - DEFAULT_FAST_CHAT_PROVIDER: 'openai', - DEFAULT_FAST_CHAT_MODEL: 'gpt-3.5-turbo', - DEFAULT_EMBEDDING_PROVIDER: 'openai', - DEFAULT_EMBEDDING_MODEL: 'text-embedding-ada-002', - }), -})); - -jest.mock('../config/provider', () => ({ - getAvailableChatModelProviders: jest.fn().mockResolvedValue({ - openai: { - 'gpt-4': { - invoke: jest.fn().mockResolvedValue({ content: 'Mocked response' }), - }, - 'gpt-3.5-turbo': { - invoke: jest.fn().mockResolvedValue({ content: 'Mocked response' }), - }, - }, - }), - getAvailableEmbeddingModelProviders: jest.fn().mockResolvedValue({ - openai: { - 'text-embedding-ada-002': { - embedQuery: jest.fn().mockResolvedValue([0.1, 0.2, 0.3]), - }, - }, - }), -})); - -jest.mock('@cairo-coder/agents/db/postgresVectorStore', () => ({ - VectorStore: { - getInstance: jest.fn().mockResolvedValue({ - similaritySearch: jest.fn().mockResolvedValue([]), - }), - }, -})); - -const EventEmitter = require('events'); -jest.mock('@cairo-coder/agents', () => ({ - logger: { - info: jest.fn(), - error: jest.fn(), - warn: jest.fn(), - debug: jest.fn(), - }, - getVectorDbConfig: jest.fn().mockReturnValue({ - host: 'localhost', - port: 5432, - user: 'test', - password: 'test', - database: 'test', - }), - RagAgentFactory: { - createAgent: jest.fn().mockImplementation(() => { - const emitter = new EventEmitter(); - process.nextTick(() => { - emitter.emit( - 'data', - JSON.stringify({ type: 'response', data: 'Test response' }), - ); - emitter.emit('end'); - }); - return emitter; - }), - createAgentById: jest.fn().mockImplementation(() => { - const emitter = new EventEmitter(); - process.nextTick(() => { - emitter.emit( - 'data', - JSON.stringify({ type: 'response', data: 'Agent response' }), - ); - emitter.emit('end'); - }); - return emitter; - }), - }, - TokenTracker: Object.assign( - jest.fn().mockImplementation(() => ({ - getTotalTokens: jest.fn().mockReturnValue(100), - reset: jest.fn(), - })), - { - getSessionTokenUsage: jest.fn().mockReturnValue({ - totalTokens: 100, - promptTokens: 50, - completionTokens: 50, - }), - }, - ), - getAgent: jest.fn().mockImplementation((id) => { - if (id === 'cairo-coder') { - return { - id: 'cairo-coder', - name: 'Cairo Coder', - description: 'Cairo assistant', - }; - } - return null; - }), - listAgents: jest.fn().mockReturnValue([ - { - id: 'cairo-coder', - name: 'Cairo Coder', - description: 'Cairo assistant', - sources: ['cairo-docs'], - }, - ]), - getAgents: jest.fn().mockReturnValue([ - { - id: 'cairo-coder', - name: 'Cairo Coder', - description: 'Cairo assistant', - sources: ['cairo-docs'], - }, - ]), -})); - -import request from 'supertest'; -import { Express } from 'express'; -import { createApplication } from '../server'; - -describe('Agents E2E Tests (Mocked)', () => { - let app: Express; - - beforeAll(async () => { - const { server, container } = await createApplication(); - app = container.getContext().app; - }); - - describe('Basic functionality', () => { - it('should list agents', async () => { - const response = await request(app).get('/v1/agents').expect(200); - - expect(response.body).toHaveProperty('agents'); - expect(response.body.agents).toHaveLength(1); - expect(response.body.agents[0].id).toBe('cairo-coder'); - }); - - it('should handle chat completions', async () => { - const response = await request(app) - .post('/v1/chat/completions') - .send({ - messages: [{ role: 'user', content: 'Hello' }], - stream: false, - }) - .expect(200); - - expect(response.body).toHaveProperty('choices'); - expect(response.body.choices[0].message.content).toBe('Test response'); - expect(response.body).toHaveProperty('usage'); - expect(response.headers['x-total-tokens']).toBe('100'); - }); - - it('should handle agent-specific completions', async () => { - const response = await request(app) - .post('/v1/agents/cairo-coder/chat/completions') - .send({ - messages: [{ role: 'user', content: 'Hello' }], - stream: false, - }) - .expect(200); - - expect(response.body.choices[0].message.content).toBe('Agent response'); - }); - - it('should return 404 for unknown agent', async () => { - const response = await request(app) - .post('/v1/agents/unknown/chat/completions') - .send({ - messages: [{ role: 'user', content: 'Hello' }], - }) - .expect(404); - - expect(response.body.error.code).toBe('agent_not_found'); - }); - - it('should validate empty messages', async () => { - const response = await request(app) - .post('/v1/chat/completions') - .send({ - messages: [], - }) - .expect(400); - - expect(response.body.error.code).toBe('invalid_messages'); - }); - - it('should validate last message is from user', async () => { - const response = await request(app) - .post('/v1/chat/completions') - .send({ - messages: [ - { role: 'user', content: 'Hello' }, - { role: 'assistant', content: 'Hi' }, - ], - }) - .expect(400); - - expect(response.body.error.code).toBe('invalid_last_message'); - }); - }); - - describe('Error handling', () => { - it('should handle invalid message roles', async () => { - const response = await request(app) - .post('/v1/chat/completions') - .send({ - messages: [{ role: 'invalid-role', content: 'Hello' }], - }) - .expect(500); - - expect(response.body).toHaveProperty('error'); - }); - - it('should handle missing content in messages', async () => { - const response = await request(app) - .post('/v1/chat/completions') - .send({ - messages: [{ role: 'user' }], - }) - .expect(500); - - expect(response.body).toHaveProperty('error'); - }); - }); - - describe('MCP Mode', () => { - it('should handle MCP mode requests', async () => { - // Update mock for MCP mode - const { RagAgentFactory } = require('@cairo-coder/agents'); - RagAgentFactory.createAgentById.mockImplementationOnce(() => { - const emitter = new EventEmitter(); - process.nextTick(() => { - emitter.emit( - 'data', - JSON.stringify({ type: 'response', data: 'MCP response' }), - ); - emitter.emit( - 'data', - JSON.stringify({ - type: 'sources', - data: [ - { pageContent: 'Source content', metadata: { source: 'test' } }, - ], - }), - ); - emitter.emit('end'); - }); - return emitter; - }); - - const response = await request(app) - .post('/v1/agents/cairo-coder/chat/completions') - .set('x-mcp-mode', 'true') - .send({ - messages: [{ role: 'user', content: 'Test MCP' }], - stream: false, - }) - .expect(200); - - expect(response.body).toHaveProperty('sources'); - expect(Array.isArray(response.body.sources)).toBe(true); - expect(response.body.sources[0]).toHaveProperty('pageContent'); - }); - }); -}); diff --git a/packages/backend/src/app.ts b/packages/backend/src/app.ts deleted file mode 100644 index 65a2aa1c..00000000 --- a/packages/backend/src/app.ts +++ /dev/null @@ -1,31 +0,0 @@ -import { createApplication } from './server'; -import { logger } from '@cairo-coder/agents/utils/index'; -import dotenv from 'dotenv'; - -dotenv.config(); - -// Error handling for uncaught exceptions -process.on('uncaughtException', (err, origin) => { - logger.error(`Uncaught Exception at ${origin}: ${err}`); -}); - -process.on('unhandledRejection', (reason, promise) => { - logger.error(`Unhandled Rejection at: ${promise}, reason: ${reason}`); -}); - -// Start the application -async function startApplication() { - try { - const { server, container } = await createApplication(); - const { port } = container.getContext().config; - - server.listen(port, () => { - logger.info(`Server is running on port ${port}`); - }); - } catch (error) { - logger.error('Failed to start application:', error); - process.exit(1); - } -} - -startApplication(); diff --git a/packages/backend/src/config/context.ts b/packages/backend/src/config/context.ts deleted file mode 100644 index 26a515a7..00000000 --- a/packages/backend/src/config/context.ts +++ /dev/null @@ -1,25 +0,0 @@ -import { ServerContext } from '../types'; - -export class Container { - private static instance: Container; - private context: ServerContext; - - private constructor() { - this.context = {} as ServerContext; - } - - public static getInstance(): Container { - if (!Container.instance) { - Container.instance = new Container(); - } - return Container.instance; - } - - public setContext(context: Partial) { - this.context = { ...this.context, ...context }; - } - - public getContext(): ServerContext { - return this.context; - } -} diff --git a/packages/backend/src/config/http.ts b/packages/backend/src/config/http.ts deleted file mode 100644 index 677407ab..00000000 --- a/packages/backend/src/config/http.ts +++ /dev/null @@ -1,39 +0,0 @@ -import express from 'express'; -import routes from '../routes'; -import { logger } from '@cairo-coder/agents/utils/index'; -import { Container } from './context'; - -export function initializeHTTP(app: express.Application, container: Container) { - const context = container.getContext(); - - // Store models in app.locals for backward compatibility - app.locals.defaultLLM = context.config.models.defaultLLM; - app.locals.fastLLM = context.config.models.fastLLM; - app.locals.embeddings = context.config.models.embeddings; - - // Mount routes - app.use('/', routes); - - // Health check endpoint - app.get('/', (_, res) => { - res.status(200).json({ status: 'ok' }); - }); - - // Error handling middleware - app.use( - ( - err: any, - req: express.Request, - res: express.Response, - next: express.NextFunction, - ) => { - logger.error('Express error handler:', err); - res.status(500).json({ - error: { - message: 'Internal Server Error', - type: 'server_error', - }, - }); - }, - ); -} diff --git a/packages/backend/src/config/llm.ts b/packages/backend/src/config/llm.ts deleted file mode 100644 index 4f6a820f..00000000 --- a/packages/backend/src/config/llm.ts +++ /dev/null @@ -1,66 +0,0 @@ -import { - getAvailableEmbeddingModelProviders, - getAvailableChatModelProviders, -} from './provider'; -import { getHostedModeConfig } from '@cairo-coder/agents/config/settings'; -import { logger } from '@cairo-coder/agents/utils/index'; -import { ModelConfig } from '../types'; -let modelConfig: ModelConfig | null = null; - -export async function initializeLLMConfig(): Promise { - // If already initialized, return the existing config - if (modelConfig) { - return modelConfig; - } - - try { - const [chatModelProviders, embeddingModelProviders] = await Promise.all([ - getAvailableChatModelProviders(), - getAvailableEmbeddingModelProviders(), - ]); - - const hostedModeConfig = getHostedModeConfig(); - - // Default LLM setup - const defaultLLM = - chatModelProviders[hostedModeConfig.DEFAULT_CHAT_PROVIDER][ - hostedModeConfig.DEFAULT_CHAT_MODEL - ]; - - // Fast LLM setup - const fastLLM = - chatModelProviders[hostedModeConfig.DEFAULT_FAST_CHAT_PROVIDER][ - hostedModeConfig.DEFAULT_FAST_CHAT_MODEL - ]; - - // Embedding model setup - const embeddingModelProvider = - embeddingModelProviders[hostedModeConfig.DEFAULT_EMBEDDING_PROVIDER]; - const embeddings = - embeddingModelProvider[hostedModeConfig.DEFAULT_EMBEDDING_MODEL]; - - if (!defaultLLM || !fastLLM || !embeddings) { - throw new Error( - 'Failed to initialize one or more required models (default LLM, fast LLM, or embeddings)', - ); - } - - modelConfig = { - defaultLLM, - fastLLM, - embeddings, - }; - - return modelConfig; - } catch (error) { - logger.error('Failed to initialize model configuration:', error); - throw error; - } -} - -export function getModelConfig(): ModelConfig { - if (!modelConfig) { - throw new Error('Model configuration not initialized'); - } - return modelConfig; -} diff --git a/packages/backend/src/config/provider/anthropic.ts b/packages/backend/src/config/provider/anthropic.ts deleted file mode 100644 index 07e28129..00000000 --- a/packages/backend/src/config/provider/anthropic.ts +++ /dev/null @@ -1,39 +0,0 @@ -import { ChatAnthropic } from '@langchain/anthropic'; -import { getAnthropicApiKey } from '@cairo-coder/agents/config/settings'; -import { logger } from '@cairo-coder/agents/utils/index'; - -export const loadAnthropicChatModels = async () => { - const anthropicApiKey = getAnthropicApiKey(); - - if (!anthropicApiKey) return {}; - - try { - const chatModels = { - 'Claude 3.5 Sonnet': new ChatAnthropic({ - temperature: 0.7, - anthropicApiKey: anthropicApiKey, - model: 'claude-3-5-sonnet-20240620', - }), - 'Claude 3 Opus': new ChatAnthropic({ - temperature: 0.7, - anthropicApiKey: anthropicApiKey, - model: 'claude-3-opus-20240229', - }), - 'Claude 3 Sonnet': new ChatAnthropic({ - temperature: 0.7, - anthropicApiKey: anthropicApiKey, - model: 'claude-3-sonnet-20240229', - }), - 'Claude 3 Haiku': new ChatAnthropic({ - temperature: 0.7, - anthropicApiKey: anthropicApiKey, - model: 'claude-3-haiku-20240307', - }), - }; - - return chatModels; - } catch (err) { - logger.error(`Error loading Anthropic models: ${err}`); - return {}; - } -}; diff --git a/packages/backend/src/config/provider/deepseek.ts b/packages/backend/src/config/provider/deepseek.ts deleted file mode 100644 index 8adc7270..00000000 --- a/packages/backend/src/config/provider/deepseek.ts +++ /dev/null @@ -1,28 +0,0 @@ -import { getDeepseekApiKey } from '@cairo-coder/agents/config/settings'; -import { logger } from '@cairo-coder/agents/utils/index'; -import { ChatOpenAI, OpenAI } from '@langchain/openai'; - -export const loadDeepseekChatModels = async () => { - const deepseekApiKey = getDeepseekApiKey(); - - if (!deepseekApiKey) return {}; - - try { - const chatModels = { - 'DeepSeek Chat': new ChatOpenAI({ - temperature: 0.7, - openAIApiKey: deepseekApiKey, - modelName: 'deepseek-chat', - configuration: { - apiKey: deepseekApiKey, - baseURL: 'https://api.deepseek.com/v1', - }, - }), - }; - - return chatModels; - } catch (err) { - logger.error(`Error loading DeepSeek models: ${err}`); - return {}; - } -}; diff --git a/packages/backend/src/config/provider/gemini.ts b/packages/backend/src/config/provider/gemini.ts deleted file mode 100644 index 61c3a98b..00000000 --- a/packages/backend/src/config/provider/gemini.ts +++ /dev/null @@ -1,29 +0,0 @@ -import { getGeminiApiKey } from '@cairo-coder/agents/config/settings'; -import { logger } from '@cairo-coder/agents/utils/index'; -import { ChatGoogleGenerativeAI } from '@langchain/google-genai'; - -export const loadGeminiChatModels = async () => { - const geminiApiKey = getGeminiApiKey(); - - if (!geminiApiKey) return {}; - - try { - const chatModels = { - 'Gemini Flash': new ChatGoogleGenerativeAI({ - temperature: 0.7, - apiKey: geminiApiKey, - modelName: 'gemini-2.0-flash', - }), - 'Gemini Flash 2.5': new ChatGoogleGenerativeAI({ - temperature: 0.7, - apiKey: geminiApiKey, - modelName: 'gemini-2.5-flash', - }), - }; - - return chatModels; - } catch (err) { - logger.error(`Error loading Gemini models: ${err}`); - return {}; - } -}; diff --git a/packages/backend/src/config/provider/groq.ts b/packages/backend/src/config/provider/groq.ts deleted file mode 100644 index f46827da..00000000 --- a/packages/backend/src/config/provider/groq.ts +++ /dev/null @@ -1,89 +0,0 @@ -import { ChatOpenAI } from '@langchain/openai'; -import { getGroqApiKey } from '@cairo-coder/agents/config/settings'; -import { logger } from '@cairo-coder/agents/utils/index'; - -export const loadGroqChatModels = async () => { - const groqApiKey = getGroqApiKey(); - - if (!groqApiKey) return {}; - - try { - const chatModels = { - 'Llama 3.1 70B': new ChatOpenAI( - { - openAIApiKey: groqApiKey, - modelName: 'llama-3.1-70b-versatile', - temperature: 0.7, - }, - { - baseURL: 'https://api.groq.com/openai/v1', - }, - ), - 'Llama 3.1 8B': new ChatOpenAI( - { - openAIApiKey: groqApiKey, - modelName: 'llama-3.1-8b-instant', - temperature: 0.7, - }, - { - baseURL: 'https://api.groq.com/openai/v1', - }, - ), - 'LLaMA3 8b': new ChatOpenAI( - { - openAIApiKey: groqApiKey, - modelName: 'llama3-8b-8192', - temperature: 0.7, - }, - { - baseURL: 'https://api.groq.com/openai/v1', - }, - ), - 'LLaMA3 70b': new ChatOpenAI( - { - openAIApiKey: groqApiKey, - modelName: 'llama3-70b-8192', - temperature: 0.7, - }, - { - baseURL: 'https://api.groq.com/openai/v1', - }, - ), - 'Mixtral 8x7b': new ChatOpenAI( - { - openAIApiKey: groqApiKey, - modelName: 'mixtral-8x7b-32768', - temperature: 0.7, - }, - { - baseURL: 'https://api.groq.com/openai/v1', - }, - ), - 'Gemma 7b': new ChatOpenAI( - { - openAIApiKey: groqApiKey, - modelName: 'gemma-7b-it', - temperature: 0.7, - }, - { - baseURL: 'https://api.groq.com/openai/v1', - }, - ), - 'Gemma2 9b': new ChatOpenAI( - { - openAIApiKey: groqApiKey, - modelName: 'gemma2-9b-it', - temperature: 0.7, - }, - { - baseURL: 'https://api.groq.com/openai/v1', - }, - ), - }; - - return chatModels; - } catch (err) { - logger.error(`Error loading Groq models: ${err}`); - return {}; - } -}; diff --git a/packages/backend/src/config/provider/index.ts b/packages/backend/src/config/provider/index.ts deleted file mode 100644 index cdf5b529..00000000 --- a/packages/backend/src/config/provider/index.ts +++ /dev/null @@ -1,67 +0,0 @@ -import { loadGroqChatModels } from './groq'; -import { loadOpenAIChatModels, loadOpenAIEmbeddingsModels } from './openai'; -import { loadAnthropicChatModels } from './anthropic'; -import { loadDeepseekChatModels } from './deepseek'; -import { getHostedModeConfig } from '@cairo-coder/agents/config/settings'; -import { loadGeminiChatModels } from './gemini'; - -const chatModelProviders = { - openai: loadOpenAIChatModels, - groq: loadGroqChatModels, - anthropic: loadAnthropicChatModels, - deepseek: loadDeepseekChatModels, - gemini: loadGeminiChatModels, -}; - -const embeddingModelProviders = { - openai: loadOpenAIEmbeddingsModels, -}; - -export const getAvailableChatModelProviders = async () => { - const models = {}; - - for (const provider in chatModelProviders) { - const providerModels = await chatModelProviders[provider](); - if (Object.keys(providerModels).length > 0) { - models[provider] = providerModels; - } - } - - const hostedModeConfig = getHostedModeConfig(); - return { - [hostedModeConfig.DEFAULT_CHAT_PROVIDER]: { - [hostedModeConfig.DEFAULT_CHAT_MODEL]: - models[hostedModeConfig.DEFAULT_CHAT_PROVIDER][ - hostedModeConfig.DEFAULT_CHAT_MODEL - ], - }, - [hostedModeConfig.DEFAULT_FAST_CHAT_PROVIDER]: { - [hostedModeConfig.DEFAULT_FAST_CHAT_MODEL]: - models[hostedModeConfig.DEFAULT_FAST_CHAT_PROVIDER][ - hostedModeConfig.DEFAULT_FAST_CHAT_MODEL - ], - }, - }; -}; - -export const getAvailableEmbeddingModelProviders = async () => { - const models = {}; - - for (const provider in embeddingModelProviders) { - const providerModels = await embeddingModelProviders[provider](); - if (Object.keys(providerModels).length > 0) { - models[provider] = providerModels; - } - } - - const hostedModeConfig = getHostedModeConfig(); - const PROVIDERSl = - models[hostedModeConfig.DEFAULT_EMBEDDING_PROVIDER][ - hostedModeConfig.DEFAULT_EMBEDDING_MODEL - ]; - return { - [hostedModeConfig.DEFAULT_EMBEDDING_PROVIDER]: { - [hostedModeConfig.DEFAULT_EMBEDDING_MODEL]: PROVIDERSl, - }, - }; -}; diff --git a/packages/backend/src/config/provider/openai.ts b/packages/backend/src/config/provider/openai.ts deleted file mode 100644 index de6a49a1..00000000 --- a/packages/backend/src/config/provider/openai.ts +++ /dev/null @@ -1,72 +0,0 @@ -import { ChatOpenAI, OpenAIEmbeddings } from '@langchain/openai'; -import { getOpenaiApiKey } from '@cairo-coder/agents/config/settings'; -import { logger } from '@cairo-coder/agents/utils/index'; - -export const loadOpenAIChatModels = async () => { - const openAIApiKey = getOpenaiApiKey(); - - if (!openAIApiKey) return {}; - - try { - const chatModels = { - 'GPT-3.5 turbo': new ChatOpenAI({ - openAIApiKey, - modelName: 'gpt-3.5-turbo', - temperature: 0.7, - }), - 'GPT-4': new ChatOpenAI({ - openAIApiKey, - modelName: 'gpt-4', - temperature: 0.7, - }), - 'GPT-4 turbo': new ChatOpenAI({ - openAIApiKey, - modelName: 'gpt-4-turbo', - temperature: 0.7, - }), - 'GPT-4 omni': new ChatOpenAI({ - openAIApiKey, - modelName: 'gpt-4o', - temperature: 0.7, - }), - 'GPT-4 omni mini': new ChatOpenAI({ - openAIApiKey, - modelName: 'gpt-4o-mini', - temperature: 0.7, - }), - }; - - return chatModels; - } catch (err) { - logger.error(`Error loading OpenAI models: ${err}`); - return {}; - } -}; - -export const loadOpenAIEmbeddingsModels = async () => { - const openAIApiKey = getOpenaiApiKey(); - - if (!openAIApiKey) return {}; - - try { - const embeddingModels = { - 'Text embedding 3 small': new OpenAIEmbeddings({ - openAIApiKey, - modelName: 'text-embedding-3-small', - batchSize: 512, - dimensions: 1536, - }), - 'Text embedding 3 large': new OpenAIEmbeddings({ - openAIApiKey, - modelName: 'text-embedding-3-large', - batchSize: 512, - dimensions: 1536, - }), - }; - - return embeddingModels; - } catch (err) { - logger.error(`Error loading OpenAI embeddings model: ${err}`); - return {}; - } -}; diff --git a/packages/backend/src/config/validateConfig.ts b/packages/backend/src/config/validateConfig.ts deleted file mode 100644 index f7e74349..00000000 --- a/packages/backend/src/config/validateConfig.ts +++ /dev/null @@ -1,16 +0,0 @@ -import { ServerConfig } from '../types'; - -export function validateConfig(config: ServerConfig): void { - if (!config.port) { - throw new Error('Port is required'); - } - if (!config.models) { - throw new Error('Models configuration is required'); - } - if (!config.models.defaultLLM) { - throw new Error('Default LLM is required'); - } - if (!config.models.embeddings) { - throw new Error('Embeddings model is required'); - } -} diff --git a/packages/backend/src/routes/agents.ts b/packages/backend/src/routes/agents.ts deleted file mode 100644 index 6811a5fd..00000000 --- a/packages/backend/src/routes/agents.ts +++ /dev/null @@ -1,68 +0,0 @@ -import { Router } from 'express'; -import { handleChatCompletion } from './chatCompletionHandler'; -import { listAgents, getAgent } from '@cairo-coder/agents'; -import { ChatCompletionRequest } from '../types'; - -const router = Router(); - -// GET /v1/agents - List available agents -router.get('/', async (req, res) => { - try { - const agents = listAgents().map((agent) => ({ - id: agent.id, - name: agent.name, - description: agent.description, - sources: agent.sources, - })); - - res.json({ - agents, - total: agents.length, - }); - } catch (error) { - console.error('Error listing agents:', error); - res.status(500).json({ - error: { - message: 'Failed to list agents', - type: 'internal_error', - }, - }); - } -}); - -// POST /v1/agents/:agentId/chat/completions - Agent-specific chat completions -router.post('/:agentId/chat/completions', async (req, res) => { - const { agentId } = req.params; - const request = req.body as ChatCompletionRequest; - - try { - // Validate agent exists - const agent = getAgent(agentId); - if (!agent) { - return res.status(404).json({ - error: { - message: `Agent not found: ${agentId}`, - type: 'invalid_request_error', - param: 'agentId', - code: 'agent_not_found', - }, - }); - } - - // Use the shared handler with the specific agent ID - await handleChatCompletion(req, res, { agentId }); - } catch (error) { - console.error( - `Error handling chat completion for agent ${agentId}:`, - error, - ); - res.status(500).json({ - error: { - message: 'Internal server error', - type: 'internal_error', - }, - }); - } -}); - -export default router; diff --git a/packages/backend/src/routes/cairocoder.ts b/packages/backend/src/routes/cairocoder.ts deleted file mode 100644 index 9781640b..00000000 --- a/packages/backend/src/routes/cairocoder.ts +++ /dev/null @@ -1,28 +0,0 @@ -import express, { Router } from 'express'; -import { logger } from '@cairo-coder/agents'; -import { handleChatCompletion } from './chatCompletionHandler'; - -const router: Router = express.Router(); - -router.post('/', async (req, res) => { - try { - // Use the shared handler without agentId (uses default agent) - await handleChatCompletion(req, res); - } catch (error) { - logger.error('Error in /chat/completions:', error); - - // Error handling is done in the shared handler - // This is just a safety net for unexpected errors - if (!res.headersSent) { - res.status(500).json({ - error: { - message: 'Internal Server Error', - type: 'server_error', - code: 'internal_error', - }, - }); - } - } -}); - -export default router; diff --git a/packages/backend/src/routes/chatCompletionHandler.ts b/packages/backend/src/routes/chatCompletionHandler.ts deleted file mode 100644 index dc00c3be..00000000 --- a/packages/backend/src/routes/chatCompletionHandler.ts +++ /dev/null @@ -1,324 +0,0 @@ -import { Request, Response } from 'express'; -import { - BaseMessage, - HumanMessage, - AIMessage, - SystemMessage, -} from '@langchain/core/messages'; -import { - RagAgentFactory, - TokenTracker, - LLMConfig, - getVectorDbConfig, -} from '@cairo-coder/agents'; -import { VectorStore } from '@cairo-coder/agents/db/postgresVectorStore'; -import { ChatCompletionRequest } from '../types'; -import { v4 as uuidv4 } from 'uuid'; - -type MessageType = { - role: string; - content: string; -}; - -interface ChatCompletionOptions { - agentId?: string; -} - -export async function handleChatCompletion( - req: Request, - res: Response, - options: ChatCompletionOptions = {}, -) { - const request = req.body as ChatCompletionRequest; - const { messages, stream = false, model = 'cairo-coder' } = request; - const { agentId } = options; - - // Basic validation - if (!messages || !Array.isArray(messages) || messages.length === 0) { - return res.status(400).json({ - error: { - message: 'Invalid request: messages array is missing.', - type: 'invalid_request_error', - param: 'messages', - code: 'invalid_messages', - }, - }); - } - - // Get dependencies from app locals (backward compatibility) - const chatModel = req.app.locals.defaultLLM; - const fastChatModel = req.app.locals.fastLLM; - const embeddings = req.app.locals.embeddings; - - if (!chatModel || !fastChatModel || !embeddings) { - return res.status(500).json({ - error: { - message: 'Internal Server Error: Models not initialized', - type: 'server_error', - code: 'models_not_initialized', - }, - }); - } - - // Convert messages to BaseMessage format - const history: BaseMessage[] = messages.map((msg: MessageType) => { - switch (msg.role) { - case 'system': - return new SystemMessage(msg.content); - case 'user': - return new HumanMessage(msg.content); - case 'assistant': - return new AIMessage(msg.content); - default: - throw new Error(`Unsupported message role: ${msg.role}`); - } - }); - - // Get the latest user message - const latestMessage = messages[messages.length - 1]; - if (!latestMessage || latestMessage.role !== 'user') { - return res.status(400).json({ - error: { - message: 'Last message must be from user', - type: 'invalid_request_error', - param: 'messages', - code: 'invalid_last_message', - }, - }); - } - - const query = latestMessage.content; - const mcp = - req.headers['mcp'] === 'true' || req.headers['x-mcp-mode'] === 'true'; - - // Set up LLM config - const llmConfig: LLMConfig = { - defaultLLM: chatModel, - fastLLM: fastChatModel, - }; - - // Get vector store - const dbConfig = getVectorDbConfig(); - const vectorStore = await VectorStore.getInstance(dbConfig, embeddings); - - try { - // Create agent based on whether agentId is provided - const agent = agentId - ? await RagAgentFactory.createAgentById( - query, - history, - agentId, - llmConfig, - embeddings, - vectorStore, - mcp, - ) - : RagAgentFactory.createAgent( - query, - history, - llmConfig, - embeddings, - vectorStore, - mcp, - ); - - if (stream) { - // Set up SSE headers - res.setHeader('Content-Type', 'text/event-stream'); - res.setHeader('Cache-Control', 'no-cache'); - res.setHeader('Connection', 'keep-alive'); - res.setHeader('Transfer-Encoding', 'chunked'); - - let responseContent = ''; - - agent.on('data', (data: any) => { - // Check if response is already finished - if (res.destroyed || res.writableEnded) { - return; - } - - const parsed = JSON.parse(data); - - if (parsed.type === 'response') { - responseContent += parsed.data; - - // If we have content to send - if (parsed.data) { - const chunk = { - id: uuidv4(), - object: 'chat.completion.chunk', - created: Date.now(), - model: model, - choices: [ - { - index: 0, - delta: { - role: 'assistant', - content: parsed.data, - }, - finish_reason: null, - }, - ], - }; - res.write(`data: ${JSON.stringify(chunk)}\n\n`); - } - } - }); - - agent.on('error', (error: any) => { - console.error('Agent error:', error); - if (!res.destroyed && !res.writableEnded) { - res.write(`data: ${JSON.stringify({ error: error.message })}\n\n`); - res.end(); - } - }); - - agent.on('end', () => { - // Check if response is already finished - if (res.destroyed || res.writableEnded) { - return; - } - - const tokenUsage = TokenTracker.getSessionTokenUsage(); - - // Check if headers haven't been sent yet - if (!res.headersSent) { - res.setHeader('x-total-tokens', tokenUsage.totalTokens.toString()); - } - - const finalChunk = { - id: uuidv4(), - object: 'chat.completion.chunk', - created: Date.now(), - model: model, - choices: [ - { - index: 0, - delta: {}, - finish_reason: 'stop', - }, - ], - usage: { - prompt_tokens: tokenUsage.promptTokens, - completion_tokens: tokenUsage.responseTokens, - total_tokens: tokenUsage.totalTokens, - }, - }; - res.write(`data: ${JSON.stringify(finalChunk)}\n\n`); - res.write('data: [DONE]\n\n'); - res.end(); - }); - } else { - // Non-streaming response - let responseContent = ''; - let sources: any[] = []; - - agent.on('data', (data: any) => { - const parsed = JSON.parse(data); - - if (parsed.type === 'response') { - responseContent += parsed.data; - } else if (parsed.type === 'sources') { - sources = parsed.data; - } - }); - - agent.on('error', (error: any) => { - console.error('Agent error:', error); - - // Check if headers haven't been sent yet - if (!res.headersSent) { - res.status(500).json({ - error: { - message: error.message, - type: 'server_error', - code: 'internal_error', - }, - }); - } - }); - - agent.on('end', () => { - // Check if headers haven't been sent yet - if (res.headersSent) { - return; - } - - const tokenUsage = TokenTracker.getSessionTokenUsage(); - - res.setHeader('x-total-tokens', tokenUsage.totalTokens.toString()); - - const responsePayload = { - id: uuidv4(), - object: 'chat.completion', - created: Date.now(), - model: model, - choices: [ - { - index: 0, - message: { - role: 'assistant', - content: responseContent, - }, - logprobs: null, - finish_reason: 'stop', - }, - ], - usage: { - prompt_tokens: tokenUsage.promptTokens, - completion_tokens: tokenUsage.responseTokens, - total_tokens: tokenUsage.totalTokens, - }, - }; - - // Add sources if in MCP mode - if (mcp && sources.length > 0) { - (responsePayload as any).sources = sources; - } - - res.json(responsePayload); - }); - } - } catch (error) { - console.error('Error in chat completion:', error); - - // Check if headers haven't been sent yet - if (res.headersSent) { - return; - } - - // Map common errors to OpenAI error format - if (error instanceof Error) { - const errorResponse: any = { - error: { - message: error.message, - type: 'server_error', - code: 'internal_error', - }, - }; - - // Map specific error types - if (error.message.includes('rate limit')) { - errorResponse.error.type = 'rate_limit_error'; - errorResponse.error.code = 'rate_limit_exceeded'; - return res.status(429).json(errorResponse); - } - - if (error.message.includes('invalid')) { - errorResponse.error.type = 'invalid_request_error'; - return res.status(400).json(errorResponse); - } - - return res.status(500).json(errorResponse); - } - - // Generic error - res.status(500).json({ - error: { - message: 'Internal Server Error', - type: 'server_error', - code: 'internal_error', - }, - }); - } -} diff --git a/packages/backend/src/routes/index.ts b/packages/backend/src/routes/index.ts deleted file mode 100644 index 00fcbef8..00000000 --- a/packages/backend/src/routes/index.ts +++ /dev/null @@ -1,8 +0,0 @@ -import express, { Router } from 'express'; -import v1Router from './version'; - -const router: Router = express.Router(); - -router.use('/v1', v1Router); - -export default router; diff --git a/packages/backend/src/routes/version.ts b/packages/backend/src/routes/version.ts deleted file mode 100644 index fcd1f84e..00000000 --- a/packages/backend/src/routes/version.ts +++ /dev/null @@ -1,13 +0,0 @@ -import express, { Router } from 'express'; -import cairocoderRouter from './cairocoder'; -import agentsRouter from './agents'; - -const router: Router = express.Router(); - -// Legacy endpoint for backward compatibility -router.use('/chat/completions', cairocoderRouter); - -// New agent-specific endpoints -router.use('/agents', agentsRouter); - -export default router; diff --git a/packages/backend/src/server.ts b/packages/backend/src/server.ts deleted file mode 100644 index daff2b66..00000000 --- a/packages/backend/src/server.ts +++ /dev/null @@ -1,53 +0,0 @@ -import express from 'express'; -import http from 'http'; -import cors from 'cors'; -import { initializeLLMConfig } from './config/llm'; -import { getPort } from '@cairo-coder/agents/config/settings'; -import { logger } from '@cairo-coder/agents/utils/index'; -import { initializeHTTP } from './config/http'; -import { Container } from './config/context'; -import { validateConfig } from './config/validateConfig'; - -export async function createApplication() { - try { - // Initialize container - const container = Container.getInstance(); - - // Initialize LLM models - const models = await initializeLLMConfig(); - - // Create config - const config = { - port: getPort(), - models, - cors: { - origin: '*', - }, - }; - - // Validate config - validateConfig(config); - - // Create initial context - container.setContext({ config }); - - // Create Express app and HTTP server - const app = express(); - const server = http.createServer(app); - - // Apply basic middleware - app.use(cors(config.cors)); - app.use(express.json({ limit: '50mb' })); - - // Initialize both HTTP with container - initializeHTTP(app, container); - - // Update container with initialized services - container.setContext({ app }); - - return { server, container }; - } catch (error) { - logger.error('Failed to create application:', error); - throw error; - } -} diff --git a/packages/backend/src/types/index.ts b/packages/backend/src/types/index.ts deleted file mode 100644 index 98431770..00000000 --- a/packages/backend/src/types/index.ts +++ /dev/null @@ -1,76 +0,0 @@ -import eventEmitter from 'events'; -import { BaseMessage } from '@langchain/core/messages'; -import { Embeddings } from '@langchain/core/embeddings'; -import { LLMConfig } from '@cairo-coder/agents/types/index'; -import { VectorStore } from '@cairo-coder/agents/db/postgresVectorStore'; -import { BaseChatModel } from '@langchain/core/language_models/chat_models'; -import { CorsOptions } from 'cors'; -import { Express } from 'express'; - -export interface ServerConfig { - port: number; - models: ModelConfig; - cors: CorsOptions; -} - -export interface ModelConfig { - defaultLLM: BaseChatModel; - fastLLM: BaseChatModel; - embeddings: Embeddings; -} - -export interface ServerContext { - config: ServerConfig; - app?: Express; -} - -export interface HandlerOptions { - vectorStore?: VectorStore; -} - -export type SearchHandler = ( - content: string, - history: BaseMessage[], - llm: LLMConfig, - embeddings: Embeddings, - options: HandlerOptions, -) => eventEmitter; - -export interface ChatCompletionRequest { - model: string; - messages: Array<{ - role: string; - content: string; - name?: string; - function_call?: { - name: string; - arguments: string; - }; - }>; - functions?: Array<{ - name: string; - description?: string; - parameters: Record; - }>; - function_call?: string | { name: string }; - tools?: Array<{ - type: string; - function: { - name: string; - description?: string; - parameters: Record; - }; - }>; - tool_choice?: string | { type: string; function: { name: string } }; - temperature?: number; - top_p?: number; - n?: number; - stream?: boolean; - stop?: string | string[]; - max_tokens?: number; - presence_penalty?: number; - frequency_penalty?: number; - logit_bias?: Record; - user?: string; - response_format?: { type: 'text' | 'json_object' }; -} diff --git a/packages/backend/tsconfig.json b/packages/backend/tsconfig.json deleted file mode 100644 index 71370409..00000000 --- a/packages/backend/tsconfig.json +++ /dev/null @@ -1,16 +0,0 @@ -{ - "extends": "@cairo-coder/typescript-config/base.json", - "compilerOptions": { - "outDir": "dist", - "sourceMap": true, - "esModuleInterop": true, - "experimentalDecorators": true, - "emitDecoratorMetadata": true, - "allowSyntheticDefaultImports": true, - "skipLibCheck": true, - "skipDefaultLibCheck": true, - "types": ["node", "jest"] - }, - "include": ["src", "**/*.test.ts"], - "exclude": ["node_modules", "**/*.spec.ts"] -} diff --git a/packages/backend/tsconfig.test.json b/packages/backend/tsconfig.test.json deleted file mode 100644 index 877ede0a..00000000 --- a/packages/backend/tsconfig.test.json +++ /dev/null @@ -1,9 +0,0 @@ -{ - "extends": "./tsconfig.json", - "compilerOptions": { - "types": ["node", "jest"], - "esModuleInterop": true - }, - "include": ["src", "__tests__"], - "exclude": ["node_modules"] -} diff --git a/packages/ingester/package.json b/packages/ingester/package.json index af408c28..68748e3c 100644 --- a/packages/ingester/package.json +++ b/packages/ingester/package.json @@ -10,10 +10,10 @@ "dependencies": { "@antora/lunr-extension": "1.0.0-alpha.10", "@asciidoctor/tabs": "1.0.0-beta.6", + "@cairo-coder/agents": "workspace:*", "@iarna/toml": "^2.2.5", "@langchain/core": "^0.2.36", - "@cairo-coder/agents": "workspace:*", - "@cairo-coder/backend": "workspace:*", + "@langchain/openai": "^0.0.25", "adm-zip": "^0.5.16", "asciidoctor": "^3.0.4", "axios": "^1.7.9", diff --git a/packages/ingester/src/generateEmbeddings.ts b/packages/ingester/src/generateEmbeddings.ts index 51547552..bc07284f 100644 --- a/packages/ingester/src/generateEmbeddings.ts +++ b/packages/ingester/src/generateEmbeddings.ts @@ -1,10 +1,41 @@ import { createInterface } from 'readline'; import { logger } from '@cairo-coder/agents/utils/index'; import { VectorStore } from '@cairo-coder/agents/db/postgresVectorStore'; -import { getVectorDbConfig } from '@cairo-coder/agents/config/settings'; -import { loadOpenAIEmbeddingsModels } from '@cairo-coder/backend/config/provider/openai'; +import { + getOpenaiApiKey, + getVectorDbConfig, +} from '@cairo-coder/agents/config/settings'; import { DocumentSource } from '@cairo-coder/agents/types/index'; import { IngesterFactory } from './IngesterFactory'; +import { OpenAIEmbeddings } from '@langchain/openai'; + +export const loadOpenAIEmbeddingsModels = async () => { + const openAIApiKey = getOpenaiApiKey(); + + if (!openAIApiKey) return {}; + + try { + const embeddingModels = { + 'Text embedding 3 small': new OpenAIEmbeddings({ + openAIApiKey, + modelName: 'text-embedding-3-small', + batchSize: 512, + dimensions: 1536, + }), + 'Text embedding 3 large': new OpenAIEmbeddings({ + openAIApiKey, + modelName: 'text-embedding-3-large', + batchSize: 512, + dimensions: 1536, + }), + }; + + return embeddingModels; + } catch (err) { + logger.error(`Error loading OpenAI embeddings model: ${err}`); + return {}; + } +}; /** * Global vector store instance diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 592f46cd..eee04858 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -25,19 +25,19 @@ importers: version: 2.2.5 '@langchain/anthropic': specifier: ^0.2.18 - version: 0.2.18(openai@5.12.2(ws@8.18.3)(zod@3.25.76)) + version: 0.2.18(openai@4.104.0(ws@8.18.3)(zod@3.25.76)) '@langchain/community': specifier: ^0.3.32 - version: 0.3.55(@browserbasehq/sdk@2.6.0)(@browserbasehq/stagehand@1.14.0(@playwright/test@1.55.0)(deepmerge@4.3.1)(dotenv@16.6.1)(openai@5.12.2(ws@8.18.3)(zod@3.25.76))(zod@3.25.76))(@ibm-cloud/watsonx-ai@1.6.12)(@langchain/anthropic@0.2.18(openai@5.12.2(ws@8.18.3)(zod@3.25.76)))(@langchain/core@0.2.36(openai@5.12.2(ws@8.18.3)(zod@3.25.76)))(@langchain/google-genai@0.1.12(@langchain/core@0.2.36(openai@5.12.2(ws@8.18.3)(zod@3.25.76)))(zod@3.25.76))(axios@1.11.0)(fast-xml-parser@4.5.3)(handlebars@4.7.8)(ibm-cloud-sdk-core@5.4.2)(jsonwebtoken@9.0.2)(mongodb@6.19.0)(openai@5.12.2(ws@8.18.3)(zod@3.25.76))(pg@8.16.3)(playwright@1.55.0)(weaviate-client@3.8.1)(ws@8.18.3) + version: 0.3.55(@browserbasehq/sdk@2.6.0)(@browserbasehq/stagehand@1.14.0(@playwright/test@1.55.0)(deepmerge@4.3.1)(dotenv@16.6.1)(openai@4.104.0(ws@8.18.3)(zod@3.25.76))(zod@3.25.76))(@ibm-cloud/watsonx-ai@1.6.12)(@langchain/anthropic@0.2.18(openai@4.104.0(ws@8.18.3)(zod@3.25.76)))(@langchain/core@0.2.36(openai@4.104.0(ws@8.18.3)(zod@3.25.76)))(@langchain/google-genai@0.1.12(@langchain/core@0.2.36(openai@4.104.0(ws@8.18.3)(zod@3.25.76)))(zod@3.25.76))(axios@1.11.0)(fast-xml-parser@4.5.3)(handlebars@4.7.8)(ibm-cloud-sdk-core@5.4.2)(jsonwebtoken@9.0.2)(mongodb@6.19.0)(openai@4.104.0(ws@8.18.3)(zod@3.25.76))(pg@8.16.3)(playwright@1.55.0)(weaviate-client@3.8.1)(ws@8.18.3) '@langchain/core': specifier: ^0.2.36 - version: 0.2.36(openai@5.12.2(ws@8.18.3)(zod@3.25.76)) + version: 0.2.36(openai@4.104.0(ws@8.18.3)(zod@3.25.76)) '@langchain/google-genai': specifier: ^0.1.8 - version: 0.1.12(@langchain/core@0.2.36(openai@5.12.2(ws@8.18.3)(zod@3.25.76)))(zod@3.25.76) + version: 0.1.12(@langchain/core@0.2.36(openai@4.104.0(ws@8.18.3)(zod@3.25.76)))(zod@3.25.76) '@langchain/mongodb': specifier: ^0.0.5 - version: 0.0.5(openai@5.12.2(ws@8.18.3)(zod@3.25.76)) + version: 0.0.5(openai@4.104.0(ws@8.18.3)(zod@3.25.76)) '@langchain/openai': specifier: ^0.0.25 version: 0.0.25(ws@8.18.3) @@ -103,94 +103,6 @@ importers: specifier: ^5.7.3 version: 5.9.2 - packages/backend: - dependencies: - '@cairo-coder/agents': - specifier: workspace:* - version: link:../agents - '@iarna/toml': - specifier: ^2.2.5 - version: 2.2.5 - '@langchain/anthropic': - specifier: ^0.2.18 - version: 0.2.18(openai@4.104.0(ws@8.18.3)(zod@3.25.76)) - '@langchain/community': - specifier: ^0.3.32 - version: 0.3.55(@browserbasehq/sdk@2.6.0)(@browserbasehq/stagehand@1.14.0(@playwright/test@1.55.0)(deepmerge@4.3.1)(dotenv@16.6.1)(openai@4.104.0(ws@8.18.3)(zod@3.25.76))(zod@3.25.76))(@ibm-cloud/watsonx-ai@1.6.12)(@langchain/anthropic@0.2.18(openai@4.104.0(ws@8.18.3)(zod@3.25.76)))(@langchain/core@0.2.36(openai@4.104.0(ws@8.18.3)(zod@3.25.76)))(@langchain/google-genai@0.1.12(@langchain/core@0.2.36(openai@4.104.0(ws@8.18.3)(zod@3.25.76)))(zod@3.25.76))(axios@1.11.0)(fast-xml-parser@4.5.3)(handlebars@4.7.8)(ibm-cloud-sdk-core@5.4.2)(jsonwebtoken@9.0.2)(mongodb@6.19.0)(openai@4.104.0(ws@8.18.3)(zod@3.25.76))(pg@8.16.3)(playwright@1.55.0)(weaviate-client@3.8.1)(ws@8.18.3) - '@langchain/core': - specifier: ^0.2.36 - version: 0.2.36(openai@4.104.0(ws@8.18.3)(zod@3.25.76)) - '@langchain/google-genai': - specifier: ^0.1.8 - version: 0.1.12(@langchain/core@0.2.36(openai@4.104.0(ws@8.18.3)(zod@3.25.76)))(zod@3.25.76) - '@langchain/openai': - specifier: ^0.0.25 - version: 0.0.25(ws@8.18.3) - '@types/node': - specifier: ^20 - version: 20.19.13 - cors: - specifier: ^2.8.5 - version: 2.8.5 - dotenv: - specifier: ^16.4.7 - version: 16.6.1 - express: - specifier: ^4.21.2 - version: 4.21.2 - node-fetch: - specifier: 2.7.0 - version: 2.7.0 - uuid: - specifier: ^11.1.0 - version: 11.1.0 - winston: - specifier: ^3.17.0 - version: 3.17.0 - ws: - specifier: ^8.18.1 - version: 8.18.3 - devDependencies: - '@cairo-coder/typescript-config': - specifier: workspace:* - version: link:../typescript-config - '@types/cors': - specifier: ^2.8.17 - version: 2.8.19 - '@types/express': - specifier: ^4.17.21 - version: 4.17.23 - '@types/jest': - specifier: ^29.5.14 - version: 29.5.14 - '@types/supertest': - specifier: ^6.0.2 - version: 6.0.3 - jest: - specifier: ^29.7.0 - version: 29.7.0(@types/node@20.19.13)(ts-node@10.9.2(@types/node@20.19.13)(typescript@5.9.2)) - jest-mock-extended: - specifier: 4.0.0-beta1 - version: 4.0.0-beta1(@jest/globals@29.7.0)(jest@29.7.0(@types/node@20.19.13)(ts-node@10.9.2(@types/node@20.19.13)(typescript@5.9.2)))(typescript@5.9.2) - nodemon: - specifier: ^3.1.9 - version: 3.1.10 - prettier: - specifier: ^3.5.2 - version: 3.6.2 - supertest: - specifier: ^7.0.0 - version: 7.1.4 - ts-jest: - specifier: ^29.2.5 - version: 29.4.1(@babel/core@7.28.4)(@jest/transform@29.7.0)(@jest/types@29.6.3)(babel-jest@29.7.0(@babel/core@7.28.4))(jest-util@29.7.0)(jest@29.7.0(@types/node@20.19.13)(ts-node@10.9.2(@types/node@20.19.13)(typescript@5.9.2)))(typescript@5.9.2) - ts-node: - specifier: ^10.9.2 - version: 10.9.2(@types/node@20.19.13)(typescript@5.9.2) - typescript: - specifier: ^5.7.3 - version: 5.9.2 - packages/ingester: dependencies: '@antora/lunr-extension': @@ -202,15 +114,15 @@ importers: '@cairo-coder/agents': specifier: workspace:* version: link:../agents - '@cairo-coder/backend': - specifier: workspace:* - version: link:../backend '@iarna/toml': specifier: ^2.2.5 version: 2.2.5 '@langchain/core': specifier: ^0.2.36 - version: 0.2.36(openai@5.12.2(ws@8.18.3)(zod@3.25.76)) + version: 0.2.36(openai@5.12.2(ws@8.18.3)) + '@langchain/openai': + specifier: ^0.0.25 + version: 0.0.25(ws@8.18.3) adm-zip: specifier: ^0.5.16 version: 0.5.16 @@ -1108,33 +1020,15 @@ packages: '@types/babel__traverse@7.28.0': resolution: {integrity: sha512-8PvcXf70gTDZBgt9ptxJ8elBeBjcLOAcOtoO/mPJjtji1+CdGbHgm77om1GrsPxsiE+uXIpNSK64UYaIwQXd4Q==} - '@types/body-parser@1.19.6': - resolution: {integrity: sha512-HLFeCYgz89uk22N5Qg3dvGvsv46B8GLvKKo1zKG4NybA8U2DiEO3w9lqGg29t/tfLRJpJ6iQxnVw4OnB7MoM9g==} - - '@types/connect@3.4.38': - resolution: {integrity: sha512-K6uROf1LD88uDQqJCktA4yzL1YYAK6NgfsI0v/mTgyPKWsX1CnJ0XPSDhViejru1GcRkLWb8RlzFYJRqGUbaug==} - '@types/cookiejar@2.1.5': resolution: {integrity: sha512-he+DHOWReW0nghN24E1WUqM0efK4kI9oTqDm6XmK8ZPe2djZ90BSNdGnIyCLzCPw7/pogPlGbzI2wHGGmi4O/Q==} - '@types/cors@2.8.19': - resolution: {integrity: sha512-mFNylyeyqN93lfe/9CSxOGREz8cpzAhH+E93xJ4xWQf62V8sQ/24reV2nyzUWM6H6Xji+GGHpkbLe7pVoUEskg==} - '@types/debug@4.1.12': resolution: {integrity: sha512-vIChWdVG3LG1SMxEvI/AK+FWJthlrqlTu7fbrlywTkkaONwk/UAGaULXRlf8vkzFBLVm0zkMdCquhL5aOjhXPQ==} - '@types/express-serve-static-core@4.19.6': - resolution: {integrity: sha512-N4LZ2xG7DatVqhCZzOGb1Yi5lMbXSZcmdLDe9EzSndPV2HpWYWzRbaerl2n27irrm94EPpprqa8KpskPT085+A==} - - '@types/express@4.17.23': - resolution: {integrity: sha512-Crp6WY9aTYP3qPi2wGDo9iUe/rceX01UMhnF1jmwDcKCFM6cx7YhGP/Mpr3y9AASpfHixIG0E6azCcL5OcDHsQ==} - '@types/graceful-fs@4.1.9': resolution: {integrity: sha512-olP3sd1qOEe5dXTSaFvQG+02VdRXcdytWLAZsAq1PecU8uqQAhkrnbli7DagjtXKW/Bl7YJbUsa8MPcuc8LHEQ==} - '@types/http-errors@2.0.5': - resolution: {integrity: sha512-r8Tayk8HJnX0FztbZN7oVqGccWgw98T/0neJphO91KkmOzug1KkofZURD4UaD5uH8AqcFLfdPErnBod0u71/qg==} - '@types/istanbul-lib-coverage@2.0.6': resolution: {integrity: sha512-2QF/t/auWm0lsy8XtKVPG19v3sSOQlJe/YHZgfjb/KBBHOGSV+J2q/S671rcq9uTBrLAXmZpqJiaQbMT+zNU1w==} @@ -1150,9 +1044,6 @@ packages: '@types/methods@1.1.4': resolution: {integrity: sha512-ymXWVrDiCxTBE3+RIrrP533E70eA+9qu7zdWoHuOmGujkYtzf4HQF96b8nwHLqhuf4ykX61IGRIB38CC6/sImQ==} - '@types/mime@1.3.5': - resolution: {integrity: sha512-/pyBZWSLD2n0dcHE3hq8s8ZvcETHtEuF+3E7XVt0Ig2nvsVQXdghHVcEkIWjy9A0wKfTn97a/PSDYohKIlnP/w==} - '@types/ms@2.1.0': resolution: {integrity: sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA==} @@ -1165,21 +1056,9 @@ packages: '@types/node@20.19.13': resolution: {integrity: sha512-yCAeZl7a0DxgNVteXFHt9+uyFbqXGy/ShC4BlcHkoE0AfGXYv/BUiplV72DjMYXHDBXFjhvr6DD1NiRVfB4j8g==} - '@types/qs@6.14.0': - resolution: {integrity: sha512-eOunJqu0K1923aExK6y8p6fsihYEn/BYuQ4g0CxAAgFc4b/ZLN4CrsRZ55srTdqoiLzU2B2evC+apEIxprEzkQ==} - - '@types/range-parser@1.2.7': - resolution: {integrity: sha512-hKormJbkJqzQGhziax5PItDUTMAM9uE2XXQmM37dyd4hVM+5aVl7oVxMVUiVQn2oCQFN/LKCZdvSM0pFRqbSmQ==} - '@types/retry@0.12.0': resolution: {integrity: sha512-wWKOClTTiizcZhXnPY4wikVAwmdYHp8q6DmC+EJUzAMsycb7HB32Kh9RN4+0gExjmPmZSAQjgURXIGATPegAvA==} - '@types/send@0.17.5': - resolution: {integrity: sha512-z6F2D3cOStZvuk2SaP6YrwkNO65iTZcwA2ZkSABegdkAh/lf+Aa/YQndZVfmEXT5vgAp6zv06VQ3ejSVjAny4w==} - - '@types/serve-static@1.15.8': - resolution: {integrity: sha512-roei0UY3LhpOJvjbIP6ZZFngyLKl5dskOtDhxY5THRSpO+ZI+nzJ+m5yUMzGrp89YRa7lvknKkMYjqQFGwA7Sg==} - '@types/stack-utils@2.0.3': resolution: {integrity: sha512-9aEbYZ3TbYMznPdcdr3SmIrLXwC/AKZXQeCf9Pgao5CKb8CyHuEX5jzWPTkvregvhRJHcpRO6BFoGW9ycaOkYw==} @@ -1220,10 +1099,6 @@ packages: resolution: {integrity: sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg==} engines: {node: '>=6.5'} - accepts@1.3.8: - resolution: {integrity: sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==} - engines: {node: '>= 0.6'} - acorn-walk@8.3.4: resolution: {integrity: sha512-ueEepnujpqee2o5aIYnvHU6C0A42MNdsIDeqy5BydrkuC5R1ZuUFnm27EeFJGoEHJQgn3uleRvmTXaJgfXbt4g==} engines: {node: '>=0.4.0'} @@ -1275,9 +1150,6 @@ packages: argparse@2.0.1: resolution: {integrity: sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==} - array-flatten@1.1.1: - resolution: {integrity: sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg==} - asap@2.0.6: resolution: {integrity: sha512-BSHWgDSAiKs50o2Re8ppvp3seVHXSRM44cdSsT9FfNEUUZLOGWVCsiWaRPWM1Znn+mqZ1OfVZ3z3DWEzSp7hRA==} @@ -1340,10 +1212,6 @@ packages: binary-search@1.3.6: resolution: {integrity: sha512-nbE1WxOTTrUWIfsfZ4aHGYu5DOuNkbxGokjV6Z2kxfJK3uaAb8zNK1muzOeipoLHZjInT4Br88BHpzevc681xA==} - body-parser@1.20.3: - resolution: {integrity: sha512-7rAxByjUMqQ3/bHJy7D6OGXvx/MMc4IqBn/X0fcM1QUcAItpZrBEYhWGem+tzXH90c+G01ypMcYJBO9Y30203g==} - engines: {node: '>= 0.8', npm: 1.2.8000 || >= 1.4.16} - brace-expansion@1.1.12: resolution: {integrity: sha512-9T9UjW3r0UW5c1Q7GTwllptXwhvYmEzFhzMfZ9H7FQWt+uZePjZPjBP/W1ZEyZ1twGWom5/56TF4lPcqjnDHcg==} @@ -1379,10 +1247,6 @@ packages: buffer@6.0.3: resolution: {integrity: sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA==} - bytes@3.1.2: - resolution: {integrity: sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==} - engines: {node: '>= 0.8'} - call-bind-apply-helpers@1.0.2: resolution: {integrity: sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==} engines: {node: '>= 0.4'} @@ -1501,31 +1365,12 @@ packages: constantinople@4.0.1: resolution: {integrity: sha512-vCrqcSIq4//Gx74TXXCGnHpulY1dskqLTFGDmhrGxzeXL8lF8kvXv6mpNWlJj1uD4DW23D4ljAqbY4RRaaUZIw==} - content-disposition@0.5.4: - resolution: {integrity: sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ==} - engines: {node: '>= 0.6'} - - content-type@1.0.5: - resolution: {integrity: sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==} - engines: {node: '>= 0.6'} - convert-source-map@2.0.0: resolution: {integrity: sha512-Kvp459HrV2FEJ1CAsi1Ku+MY3kasH19TFykTz2xWmMeq6bk2NU3XXvfJ+Q61m0xktWwt+1HSYf3JZsTms3aRJg==} - cookie-signature@1.0.6: - resolution: {integrity: sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ==} - - cookie@0.7.1: - resolution: {integrity: sha512-6DnInpx7SJ2AK3+CTUE/ZM0vWTUboZCegxhC2xiIydHR9jNuTAASBrfEpHhiGOZw/nX51bHt6YQl8jsGo4y/0w==} - engines: {node: '>= 0.6'} - cookiejar@2.1.4: resolution: {integrity: sha512-LDx6oHrK+PhzLKJU9j5S7/Y3jM/mUHvD/DeI1WQmJn652iPC5Y4TBzC9l+5OMOXlyTTA+SmVUPm0HQUwpD5Jqw==} - cors@2.8.5: - resolution: {integrity: sha512-KIHbLJqu73RGr/hnbrO9uBeixNGuvSQjul/jdFvS/KFSIH1hWVd1ng7zOHx+YrEfInLG7q4n6GHQ9cDtxv/P6g==} - engines: {node: '>= 0.10'} - create-jest@29.7.0: resolution: {integrity: sha512-Adz2bdH0Vq3F53KEMJOoftQFutWCukm6J24wbPWRO4k1kMY7gS7ds/uoJkNuV8wDCtWWnuwGcJwpWcih+zEW1Q==} engines: {node: ^14.15.0 || ^16.10.0 || >=18.0.0} @@ -1545,14 +1390,6 @@ packages: resolution: {integrity: sha512-0R9ikRb668HB7QDxT1vkpuUBtqc53YyAwMwGeUFKRojY/NWKvdZ+9UYtRfGmhqNbRkTSVpMbmyhXipFFv2cb/A==} engines: {node: '>= 12'} - debug@2.6.9: - resolution: {integrity: sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==} - peerDependencies: - supports-color: '*' - peerDependenciesMeta: - supports-color: - optional: true - debug@4.4.1: resolution: {integrity: sha512-KcKCqiftBJcZr++7ykoDIEwSa3XWowTfNPo92BYxjXiyYEVrUQh2aLyhxBCwww+heortUFxEJYcRzosstTEBYQ==} engines: {node: '>=6.0'} @@ -1582,14 +1419,6 @@ packages: resolution: {integrity: sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==} engines: {node: '>=0.4.0'} - depd@2.0.0: - resolution: {integrity: sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==} - engines: {node: '>= 0.8'} - - destroy@1.2.0: - resolution: {integrity: sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg==} - engines: {node: '>= 0.8', npm: 1.2.8000 || >= 1.4.16} - detect-newline@3.1.0: resolution: {integrity: sha512-TLz+x/vEXm/Y7P7wn1EJFNLxYpUD4TgMosxY6fAVJUnJMbupHBOncxyWUG9OpTaH9EBD7uFI5LfEgmMOc54DsA==} engines: {node: '>=8'} @@ -1637,9 +1466,6 @@ packages: ecdsa-sig-formatter@1.0.11: resolution: {integrity: sha512-nagl3RYrbNv6kQkeJIpt6NJZy8twLB/2vtz6yN9Z4vRKHN4/QZJIEbqohALSgwKdnksuY3k5Addp5lg8sVoVcQ==} - ee-first@1.1.1: - resolution: {integrity: sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==} - ejs@3.1.10: resolution: {integrity: sha512-UeJmFfOrAQS8OJWPZ4qtgHyWExa088/MtK5UEyoJGFH67cDEXkZSviOiKRCZ4Xij0zxI3JECgYs3oKx+AizQBA==} engines: {node: '>=0.10.0'} @@ -1658,14 +1484,6 @@ packages: enabled@2.0.0: resolution: {integrity: sha512-AKrN98kuwOzMIdAizXGI86UFBoo26CL21UM763y1h/GMSJ4/OHU9k2YlsmBpyScFo/wbLzWQJBMCW4+IO3/+OQ==} - encodeurl@1.0.2: - resolution: {integrity: sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==} - engines: {node: '>= 0.8'} - - encodeurl@2.0.0: - resolution: {integrity: sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg==} - engines: {node: '>= 0.8'} - entities@4.5.0: resolution: {integrity: sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==} engines: {node: '>=0.12'} @@ -1693,9 +1511,6 @@ packages: resolution: {integrity: sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==} engines: {node: '>=6'} - escape-html@1.0.3: - resolution: {integrity: sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==} - escape-string-regexp@2.0.0: resolution: {integrity: sha512-UpzcLCXolUWcNu5HtVMHYdXJjArjsF9C0aNnquZYY4uW/Vu0miy5YoWvbV345HauVvcAUnpRuhMMcqTcGOY2+w==} engines: {node: '>=8'} @@ -1705,10 +1520,6 @@ packages: engines: {node: '>=4'} hasBin: true - etag@1.8.1: - resolution: {integrity: sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==} - engines: {node: '>= 0.6'} - event-target-shim@5.0.1: resolution: {integrity: sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ==} engines: {node: '>=6'} @@ -1735,10 +1546,6 @@ packages: expr-eval@2.0.2: resolution: {integrity: sha512-4EMSHGOPSwAfBiibw3ndnP0AvjDWLsMvGOvWEZ2F96IGk0bIVdjQisOHxReSkE13mHcfbuCiXw+G4y0zv6N8Eg==} - express@4.21.2: - resolution: {integrity: sha512-28HqgMZAmih1Czt9ny7qr6ek2qddF4FclbMzwhCREB6OFfH+rXAnuNCwo1/wFvrtbgsQDb4kSbX9de9lFbrXnA==} - engines: {node: '>= 0.10.0'} - extend@3.0.2: resolution: {integrity: sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==} @@ -1773,10 +1580,6 @@ packages: resolution: {integrity: sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==} engines: {node: '>=8'} - finalhandler@1.3.1: - resolution: {integrity: sha512-6BN9trH7bp3qvnrRyzsBz+g3lZxTNZTbVO2EV1CS0WIcDbawYVdYvGflME/9QP0h0pYlCDBCTjYa9nZzMDpyxQ==} - engines: {node: '>= 0.8'} - find-up@4.1.0: resolution: {integrity: sha512-PpOwAdQ/YlXQ2vj8a3h8IipDuYRi3wceVQQGYWxNINccq40Anw7BlsEXCMbt1Zt+OLA6Fq9suIpIWD0OsnISlw==} engines: {node: '>=8'} @@ -1816,14 +1619,6 @@ packages: resolution: {integrity: sha512-YikH+7CUTOtP44ZTnUhR7Ic2UASBPOqmaRkRKxRbywPTe5VxF7RRCck4af9wutiZ/QKM5nME9Bie2fFaPz5Gug==} engines: {node: '>=14.0.0'} - forwarded@0.2.0: - resolution: {integrity: sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==} - engines: {node: '>= 0.6'} - - fresh@0.5.2: - resolution: {integrity: sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q==} - engines: {node: '>= 0.6'} - fs.realpath@1.0.0: resolution: {integrity: sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==} @@ -1924,10 +1719,6 @@ packages: htmlparser2@9.1.0: resolution: {integrity: sha512-5zfg6mHUoaer/97TxnGpxmbR7zJtPwIYFMZ/H5ucTlPZhKvtum05yiPK3Mgai3a0DyVxv7qYqoweaEd2nrYQzQ==} - http-errors@2.0.0: - resolution: {integrity: sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ==} - engines: {node: '>= 0.8'} - human-signals@2.1.0: resolution: {integrity: sha512-B4FFZ6q/T2jhhksgkbEW3HBvWIfDW85snkQgawt07S7J5QXTk6BkNV+0yAeZrM5QpMAdYlocGoljn0sJ/WQkFw==} engines: {node: '>=10.17.0'} @@ -1939,10 +1730,6 @@ packages: resolution: {integrity: sha512-5VFkKYU/vSIWFJTVt392XEdPmiEwUJqhxjn1MRO3lfELyU2FB+yYi8brbmXUgq+D1acHR1fpS7tIJ6IlnrR9Cg==} engines: {node: '>=18'} - iconv-lite@0.4.24: - resolution: {integrity: sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==} - engines: {node: '>=0.10.0'} - ieee754@1.2.1: resolution: {integrity: sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==} @@ -1965,10 +1752,6 @@ packages: inherits@2.0.4: resolution: {integrity: sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==} - ipaddr.js@1.9.1: - resolution: {integrity: sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==} - engines: {node: '>= 0.10'} - is-any-array@2.0.1: resolution: {integrity: sha512-UtilS7hLRu++wb/WBAw9bNuP1Eg04Ivn1vERJck8zJthEvXCBEBpGR/33u/xLKWEQf95803oalHrVDptcAvFdQ==} @@ -2396,16 +2179,9 @@ packages: resolution: {integrity: sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==} engines: {node: '>= 0.4'} - media-typer@0.3.0: - resolution: {integrity: sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==} - engines: {node: '>= 0.6'} - memory-pager@1.5.0: resolution: {integrity: sha512-ZS4Bp4r/Zoeq6+NLJpP+0Zzm0pR8whtGPf1XExKLJBAczGMnSi3It14OiNCStjQjM6NU1okjQGSxgEZN8eBYKg==} - merge-descriptors@1.0.3: - resolution: {integrity: sha512-gaNvAS7TZ897/rVaZ0nMtAyxNyi/pdbjbAwUpFQpN70GqnVfOiXpeUUMKRBmzXaSQ8DdTX4/0ms62r2K+hE6mQ==} - merge-stream@2.0.0: resolution: {integrity: sha512-abv/qOcuPfk3URPfDzmZU1LKmuw8kT+0nIHvKrKgFrwifol/doWcdA4ZqsWQ8ENrFKkd67Mfpo/LovbIUsbt3w==} @@ -2425,11 +2201,6 @@ packages: resolution: {integrity: sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==} engines: {node: '>= 0.6'} - mime@1.6.0: - resolution: {integrity: sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==} - engines: {node: '>=4'} - hasBin: true - mime@2.6.0: resolution: {integrity: sha512-USPkMeET31rOMiarsBNIHZKLGgvKc/LrjofAnBlOttf5ajRvqiRA8QsenbcooctK6d6Ts6aqZXBA+XbkKthiQg==} engines: {node: '>=4.0.0'} @@ -2494,9 +2265,6 @@ packages: socks: optional: true - ms@2.0.0: - resolution: {integrity: sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==} - ms@2.1.3: resolution: {integrity: sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==} @@ -2507,10 +2275,6 @@ packages: natural-compare@1.4.0: resolution: {integrity: sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==} - negotiator@0.6.3: - resolution: {integrity: sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==} - engines: {node: '>= 0.6'} - neo-async@2.6.2: resolution: {integrity: sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==} @@ -2582,10 +2346,6 @@ packages: resolution: {integrity: sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew==} engines: {node: '>= 0.4'} - on-finished@2.4.1: - resolution: {integrity: sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==} - engines: {node: '>= 0.8'} - once@1.4.0: resolution: {integrity: sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==} @@ -2659,10 +2419,6 @@ packages: resolution: {integrity: sha512-ayCKvm/phCGxOkYRSCM82iDwct8/EonSEgCSxWxD7ve6jHggsFl4fZVQBPRNgQoKiuV/odhFrGzQXZwbifC8Rg==} engines: {node: '>=8'} - parseurl@1.3.3: - resolution: {integrity: sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==} - engines: {node: '>= 0.8'} - path-exists@4.0.0: resolution: {integrity: sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==} engines: {node: '>=8'} @@ -2678,9 +2434,6 @@ packages: path-parse@1.0.7: resolution: {integrity: sha512-LDJzPVEEEPR+y48z93A0Ed0yXb8pAByGWo/k5YYdYgpY2/2EsOsksJrq7lOHxryrVOn1ejG6oAp8ahvOIQD8sw==} - path-to-regexp@0.1.12: - resolution: {integrity: sha512-RA1GjUVMnvYFxuqovrEqZoxxW5NUZqbwKtYz/Tt7nXerk0LbLblQmrsgdeOxV5SFHf0UDggjS/bSeOZwt1pmEQ==} - peek-readable@4.1.0: resolution: {integrity: sha512-ZI3LnwUv5nOGbQzD9c2iDG6toheuXSZP5esSHBjopsXH4dg19soufvpUGA3uohi5anFtGb2lhAVdHzH6R/Evvg==} engines: {node: '>=8'} @@ -2784,10 +2537,6 @@ packages: resolution: {integrity: sha512-CvexbZtbov6jW2eXAvLukXjXUW1TzFaivC46BpWc/3BpcCysb5Vffu+B3XHMm8lVEuy2Mm4XGex8hBSg1yapPg==} engines: {node: '>=12.0.0'} - proxy-addr@2.0.7: - resolution: {integrity: sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==} - engines: {node: '>= 0.10'} - proxy-from-env@1.1.0: resolution: {integrity: sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==} @@ -2840,10 +2589,6 @@ packages: pure-rand@6.1.0: resolution: {integrity: sha512-bVWawvoZoBYpp6yIoQtQXHZjmz35RSVHnUOTefl8Vcjr8snTPY1wnpSPMWekcFwbxI6gtmT7rSYPFvz71ldiOA==} - qs@6.13.0: - resolution: {integrity: sha512-+38qI9SOr8tfZ4QmJNplMUxqjbe7LKvvZgWdExBOmd+egZTtjLB67Gu0HRX3u/XOq7UU2Nx6nsjvS16Z9uwfpg==} - engines: {node: '>=0.6'} - qs@6.14.0: resolution: {integrity: sha512-YWWTjgABSKcvs/nWBi9PycY/JiPJqOD4JA6o9Sej2AtvSGarXxKC3OQSk4pAarbdQlKAh5D4FCQkJNkW+GAn3w==} engines: {node: '>=0.6'} @@ -2851,14 +2596,6 @@ packages: querystringify@2.2.0: resolution: {integrity: sha512-FIqgj2EUvTa7R50u0rGsyTftzjYmv/a3hO345bZNrqabNqjtgiDMgmo4mkUjd+nzU5oF3dClKqFIPUKybUyqoQ==} - range-parser@1.2.1: - resolution: {integrity: sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==} - engines: {node: '>= 0.6'} - - raw-body@2.5.2: - resolution: {integrity: sha512-8zGqypfENjCIqGhgXToC8aB2r7YrBX+AQAfIPs/Mlk+BtPTztOvTS01NRW/3Eh60J+a48lt8qsCzirQ6loCVfA==} - engines: {node: '>= 0.8'} - react-is@18.3.1: resolution: {integrity: sha512-/LLMVyas0ljjAtoYiPqYiL8VWXzUUdThrmU5+n20DZv+a+ClRoevUzw5JxU+Ieh5/c87ytoTBV9G1FiKfNJdmg==} @@ -2919,9 +2656,6 @@ packages: resolution: {integrity: sha512-b3rppTKm9T+PsVCBEOUR46GWI7fdOs00VKZ1+9c1EWDaDMvjQc6tUwuFyIprgGgTcWoVHSKrU8H31ZHA2e0RHA==} engines: {node: '>=10'} - safer-buffer@2.1.2: - resolution: {integrity: sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==} - semver@6.3.1: resolution: {integrity: sha512-BR7VvDCVHO+q2xBEWskxS6DJE1qRnb7DxzUrogb71CWoSficBxYsiAGd+Kl0mmq/MprG9yArRkyrQxTO6XjMzA==} hasBin: true @@ -2931,17 +2665,6 @@ packages: engines: {node: '>=10'} hasBin: true - send@0.19.0: - resolution: {integrity: sha512-dW41u5VfLXu8SJh5bwRmyYUbAoSB3c9uQh6L8h/KtsFREPWpbX1lrljJo186Jc4nmci/sGUZ9a0a0J2zgfq2hw==} - engines: {node: '>= 0.8.0'} - - serve-static@1.16.2: - resolution: {integrity: sha512-VqpjJZKadQB/PEbEwvFdO43Ax5dFBZ2UECszz8bQ7pi7wt//PWe1P6MN7eCnjsatYtBT6EuiClbjSWP2WrIoTw==} - engines: {node: '>= 0.8.0'} - - setprototypeof@1.2.0: - resolution: {integrity: sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==} - shebang-command@2.0.0: resolution: {integrity: sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==} engines: {node: '>=8'} @@ -3010,10 +2733,6 @@ packages: resolution: {integrity: sha512-XlkWvfIm6RmsWtNJx+uqtKLS8eqFbxUg0ZzLXqY0caEy9l7hruX8IpiDnjsLavoBgqCCR71TqWO8MaXYheJ3RQ==} engines: {node: '>=10'} - statuses@2.0.1: - resolution: {integrity: sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==} - engines: {node: '>= 0.8'} - string-length@4.0.2: resolution: {integrity: sha512-+l6rNN5fYHNhZZy41RXsYptCjA2Igmq4EG7kZAYFQI1E1VTXarr6ZPXBg6eq7Y6eK4FEhY6AJlyuFIb/v/S0VQ==} engines: {node: '>=10'} @@ -3086,10 +2805,6 @@ packages: resolution: {integrity: sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==} engines: {node: '>=8.0'} - toidentifier@1.0.1: - resolution: {integrity: sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==} - engines: {node: '>=0.6'} - token-stream@1.0.0: resolution: {integrity: sha512-VSsyNPPW74RpHwR8Fc21uubwHY7wMDeJLys2IX5zJNih+OnAnaifKHo+1LHT7DAdloQ7apeaaWg8l7qnf/TnEg==} @@ -3214,10 +2929,6 @@ packages: resolution: {integrity: sha512-TeTSQ6H5YHvpqVwBRcnLDCBnDOHWYu7IvGbHT6N8AOymcr9PJGjc1GTtiWZTYg0NCgYwvnYWEkVChQAr9bjfwA==} engines: {node: '>=16'} - type-is@1.6.18: - resolution: {integrity: sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==} - engines: {node: '>= 0.6'} - typescript@5.9.2: resolution: {integrity: sha512-CWBzXQrc/qOkhidw1OzBTQuYRbfyxDXJMVJ1XNwUHGROVmuaeiEm3OslpZ1RV96d7SKKjZKrSJu3+t/xlw3R9A==} engines: {node: '>=14.17'} @@ -3241,10 +2952,6 @@ packages: resolution: {integrity: sha512-CJ1QgKmNg3CwvAv/kOFmtnEN05f0D/cn9QntgNOQlQF9dgvVTHj3t+8JPdjqawCHk7V/KA+fbUqzZ9XWhcqPUg==} engines: {node: '>= 4.0.0'} - unpipe@1.0.0: - resolution: {integrity: sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==} - engines: {node: '>= 0.8'} - unxhr@1.2.0: resolution: {integrity: sha512-6cGpm8NFXPD9QbSNx0cD2giy7teZ6xOkCUH3U89WKVkL9N9rBrWjlCwhR94Re18ZlAop4MOc3WU1M3Hv/bgpIw==} engines: {node: '>=8.11'} @@ -3261,18 +2968,10 @@ packages: util-deprecate@1.0.2: resolution: {integrity: sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==} - utils-merge@1.0.1: - resolution: {integrity: sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA==} - engines: {node: '>= 0.4.0'} - uuid@10.0.0: resolution: {integrity: sha512-8XkAphELsDnEGrDxUOHB3RGvXz6TeuYSGEZBOjtTtPm2lwhGBjLgOzLHB63IUWfBpNucQjND6d3AOudO+H3RWQ==} hasBin: true - uuid@11.1.0: - resolution: {integrity: sha512-0/A9rDy9P7cJ+8w1c9WD9V//9Wj15Ce2MPz8Ri6032usz+NfePxx5AcN3bN+r6ZL6jEo066/yNYB3tn4pQEx+A==} - hasBin: true - uuid@9.0.1: resolution: {integrity: sha512-b+1eJOlsR9K8HJpow9Ok3fiWOWSIcIzXodvv0rQjVoOVNpWMpxf1wZNpt4y9h10odCNrqnYp1OBzRktckBe3sA==} hasBin: true @@ -3290,10 +2989,6 @@ packages: validate.io-function@1.0.2: resolution: {integrity: sha512-LlFybRJEriSuBnUhQyG5bwglhh50EpTL2ul23MPIuR1odjO7XaMLFV8vHGwp7AZciFxtYOeiSCT5st+XSPONiQ==} - vary@1.1.2: - resolution: {integrity: sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==} - engines: {node: '>= 0.8'} - void-elements@3.1.0: resolution: {integrity: sha512-Dhxzh5HZuiHQhbvTW9AMetFfBHDMYpo23Uo9btPXgdYP+3T5S+p+jgNy7spra+veYhBP2dCSgxR/i2Y02h5/6w==} engines: {node: '>=0.10.0'} @@ -3680,22 +3375,6 @@ snapshots: - encoding - utf-8-validate - '@browserbasehq/stagehand@1.14.0(@playwright/test@1.55.0)(deepmerge@4.3.1)(dotenv@16.6.1)(openai@5.12.2(ws@8.18.3)(zod@3.25.76))(zod@3.25.76)': - dependencies: - '@anthropic-ai/sdk': 0.27.3 - '@browserbasehq/sdk': 2.6.0 - '@playwright/test': 1.55.0 - deepmerge: 4.3.1 - dotenv: 16.6.1 - openai: 5.12.2(ws@8.18.3)(zod@3.25.76) - ws: 8.18.3 - zod: 3.25.76 - zod-to-json-schema: 3.24.6(zod@3.25.76) - transitivePeerDependencies: - - bufferutil - - encoding - - utf-8-validate - '@colors/colors@1.6.0': {} '@cspotcode/source-map-support@0.8.1': @@ -3946,17 +3625,6 @@ snapshots: - encoding - openai - '@langchain/anthropic@0.2.18(openai@5.12.2(ws@8.18.3)(zod@3.25.76))': - dependencies: - '@anthropic-ai/sdk': 0.25.2 - '@langchain/core': 0.2.36(openai@5.12.2(ws@8.18.3)(zod@3.25.76)) - fast-xml-parser: 4.5.3 - zod: 3.25.76 - zod-to-json-schema: 3.24.6(zod@3.25.76) - transitivePeerDependencies: - - encoding - - openai - '@langchain/community@0.3.55(@browserbasehq/sdk@2.6.0)(@browserbasehq/stagehand@1.14.0(@playwright/test@1.55.0)(deepmerge@4.3.1)(dotenv@16.6.1)(openai@4.104.0(ws@8.18.3)(zod@3.25.76))(zod@3.25.76))(@ibm-cloud/watsonx-ai@1.6.12)(@langchain/anthropic@0.2.18(openai@4.104.0(ws@8.18.3)(zod@3.25.76)))(@langchain/core@0.2.36(openai@4.104.0(ws@8.18.3)(zod@3.25.76)))(@langchain/google-genai@0.1.12(@langchain/core@0.2.36(openai@4.104.0(ws@8.18.3)(zod@3.25.76)))(zod@3.25.76))(axios@1.11.0)(fast-xml-parser@4.5.3)(handlebars@4.7.8)(ibm-cloud-sdk-core@5.4.2)(jsonwebtoken@9.0.2)(mongodb@6.19.0)(openai@4.104.0(ws@8.18.3)(zod@3.25.76))(pg@8.16.3)(playwright@1.55.0)(weaviate-client@3.8.1)(ws@8.18.3)': dependencies: '@browserbasehq/stagehand': 1.14.0(@playwright/test@1.55.0)(deepmerge@4.3.1)(dotenv@16.6.1)(openai@4.104.0(ws@8.18.3)(zod@3.25.76))(zod@3.25.76) @@ -4004,53 +3672,6 @@ snapshots: - handlebars - peggy - '@langchain/community@0.3.55(@browserbasehq/sdk@2.6.0)(@browserbasehq/stagehand@1.14.0(@playwright/test@1.55.0)(deepmerge@4.3.1)(dotenv@16.6.1)(openai@5.12.2(ws@8.18.3)(zod@3.25.76))(zod@3.25.76))(@ibm-cloud/watsonx-ai@1.6.12)(@langchain/anthropic@0.2.18(openai@5.12.2(ws@8.18.3)(zod@3.25.76)))(@langchain/core@0.2.36(openai@5.12.2(ws@8.18.3)(zod@3.25.76)))(@langchain/google-genai@0.1.12(@langchain/core@0.2.36(openai@5.12.2(ws@8.18.3)(zod@3.25.76)))(zod@3.25.76))(axios@1.11.0)(fast-xml-parser@4.5.3)(handlebars@4.7.8)(ibm-cloud-sdk-core@5.4.2)(jsonwebtoken@9.0.2)(mongodb@6.19.0)(openai@5.12.2(ws@8.18.3)(zod@3.25.76))(pg@8.16.3)(playwright@1.55.0)(weaviate-client@3.8.1)(ws@8.18.3)': - dependencies: - '@browserbasehq/stagehand': 1.14.0(@playwright/test@1.55.0)(deepmerge@4.3.1)(dotenv@16.6.1)(openai@5.12.2(ws@8.18.3)(zod@3.25.76))(zod@3.25.76) - '@ibm-cloud/watsonx-ai': 1.6.12 - '@langchain/core': 0.2.36(openai@5.12.2(ws@8.18.3)(zod@3.25.76)) - '@langchain/openai': 0.6.11(@langchain/core@0.2.36(openai@5.12.2(ws@8.18.3)(zod@3.25.76)))(ws@8.18.3) - '@langchain/weaviate': 0.2.2(@langchain/core@0.2.36(openai@5.12.2(ws@8.18.3)(zod@3.25.76))) - binary-extensions: 2.3.0 - expr-eval: 2.0.2 - flat: 5.0.2 - ibm-cloud-sdk-core: 5.4.2 - js-yaml: 4.1.0 - langchain: 0.3.33(@langchain/anthropic@0.2.18(openai@5.12.2(ws@8.18.3)(zod@3.25.76)))(@langchain/core@0.2.36(openai@5.12.2(ws@8.18.3)(zod@3.25.76)))(@langchain/google-genai@0.1.12(@langchain/core@0.2.36(openai@5.12.2(ws@8.18.3)(zod@3.25.76)))(zod@3.25.76))(axios@1.11.0)(handlebars@4.7.8)(openai@5.12.2(ws@8.18.3)(zod@3.25.76))(ws@8.18.3) - langsmith: 0.3.67(openai@5.12.2(ws@8.18.3)(zod@3.25.76)) - openai: 5.12.2(ws@8.18.3)(zod@3.25.76) - uuid: 10.0.0 - zod: 3.25.76 - optionalDependencies: - '@browserbasehq/sdk': 2.6.0 - fast-xml-parser: 4.5.3 - jsonwebtoken: 9.0.2 - mongodb: 6.19.0 - pg: 8.16.3 - playwright: 1.55.0 - weaviate-client: 3.8.1 - ws: 8.18.3 - transitivePeerDependencies: - - '@langchain/anthropic' - - '@langchain/aws' - - '@langchain/cerebras' - - '@langchain/cohere' - - '@langchain/deepseek' - - '@langchain/google-genai' - - '@langchain/google-vertexai' - - '@langchain/google-vertexai-web' - - '@langchain/groq' - - '@langchain/mistralai' - - '@langchain/ollama' - - '@langchain/xai' - - '@opentelemetry/api' - - '@opentelemetry/exporter-trace-otlp-proto' - - '@opentelemetry/sdk-trace-base' - - axios - - encoding - - handlebars - - peggy - '@langchain/core@0.1.63(openai@4.104.0(ws@8.18.3)(zod@3.25.76))': dependencies: ansi-styles: 5.2.0 @@ -4084,13 +3705,13 @@ snapshots: transitivePeerDependencies: - openai - '@langchain/core@0.2.36(openai@5.12.2(ws@8.18.3)(zod@3.25.76))': + '@langchain/core@0.2.36(openai@5.12.2(ws@8.18.3))': dependencies: ansi-styles: 5.2.0 camelcase: 6.3.0 decamelize: 1.2.0 js-tiktoken: 1.0.21 - langsmith: 0.1.68(openai@5.12.2(ws@8.18.3)(zod@3.25.76)) + langsmith: 0.1.68(openai@5.12.2(ws@8.18.3)) mustache: 4.2.0 p-queue: 6.6.2 p-retry: 4.6.2 @@ -4108,17 +3729,9 @@ snapshots: transitivePeerDependencies: - zod - '@langchain/google-genai@0.1.12(@langchain/core@0.2.36(openai@5.12.2(ws@8.18.3)(zod@3.25.76)))(zod@3.25.76)': - dependencies: - '@google/generative-ai': 0.24.1 - '@langchain/core': 0.2.36(openai@5.12.2(ws@8.18.3)(zod@3.25.76)) - zod-to-json-schema: 3.24.6(zod@3.25.76) - transitivePeerDependencies: - - zod - - '@langchain/mongodb@0.0.5(openai@5.12.2(ws@8.18.3)(zod@3.25.76))': + '@langchain/mongodb@0.0.5(openai@4.104.0(ws@8.18.3)(zod@3.25.76))': dependencies: - '@langchain/core': 0.2.36(openai@5.12.2(ws@8.18.3)(zod@3.25.76)) + '@langchain/core': 0.2.36(openai@4.104.0(ws@8.18.3)(zod@3.25.76)) mongodb: 6.19.0 transitivePeerDependencies: - '@aws-sdk/credential-providers' @@ -4150,25 +3763,11 @@ snapshots: transitivePeerDependencies: - ws - '@langchain/openai@0.6.11(@langchain/core@0.2.36(openai@5.12.2(ws@8.18.3)(zod@3.25.76)))(ws@8.18.3)': - dependencies: - '@langchain/core': 0.2.36(openai@5.12.2(ws@8.18.3)(zod@3.25.76)) - js-tiktoken: 1.0.21 - openai: 5.12.2(ws@8.18.3)(zod@3.25.76) - zod: 3.25.76 - transitivePeerDependencies: - - ws - '@langchain/textsplitters@0.1.0(@langchain/core@0.2.36(openai@4.104.0(ws@8.18.3)(zod@3.25.76)))': dependencies: '@langchain/core': 0.2.36(openai@4.104.0(ws@8.18.3)(zod@3.25.76)) js-tiktoken: 1.0.21 - '@langchain/textsplitters@0.1.0(@langchain/core@0.2.36(openai@5.12.2(ws@8.18.3)(zod@3.25.76)))': - dependencies: - '@langchain/core': 0.2.36(openai@5.12.2(ws@8.18.3)(zod@3.25.76)) - js-tiktoken: 1.0.21 - '@langchain/weaviate@0.2.2(@langchain/core@0.2.36(openai@4.104.0(ws@8.18.3)(zod@3.25.76)))': dependencies: '@langchain/core': 0.2.36(openai@4.104.0(ws@8.18.3)(zod@3.25.76)) @@ -4177,14 +3776,6 @@ snapshots: transitivePeerDependencies: - encoding - '@langchain/weaviate@0.2.2(@langchain/core@0.2.36(openai@5.12.2(ws@8.18.3)(zod@3.25.76)))': - dependencies: - '@langchain/core': 0.2.36(openai@5.12.2(ws@8.18.3)(zod@3.25.76)) - uuid: 10.0.0 - weaviate-client: 3.8.1 - transitivePeerDependencies: - - encoding - '@mongodb-js/saslprep@1.3.0': dependencies: sparse-bitfield: 3.0.3 @@ -4263,45 +3854,16 @@ snapshots: dependencies: '@babel/types': 7.28.4 - '@types/body-parser@1.19.6': - dependencies: - '@types/connect': 3.4.38 - '@types/node': 20.19.13 - - '@types/connect@3.4.38': - dependencies: - '@types/node': 20.19.13 - '@types/cookiejar@2.1.5': {} - '@types/cors@2.8.19': - dependencies: - '@types/node': 20.19.13 - '@types/debug@4.1.12': dependencies: '@types/ms': 2.1.0 - '@types/express-serve-static-core@4.19.6': - dependencies: - '@types/node': 20.19.13 - '@types/qs': 6.14.0 - '@types/range-parser': 1.2.7 - '@types/send': 0.17.5 - - '@types/express@4.17.23': - dependencies: - '@types/body-parser': 1.19.6 - '@types/express-serve-static-core': 4.19.6 - '@types/qs': 6.14.0 - '@types/serve-static': 1.15.8 - '@types/graceful-fs@4.1.9': dependencies: '@types/node': 20.19.13 - '@types/http-errors@2.0.5': {} - '@types/istanbul-lib-coverage@2.0.6': {} '@types/istanbul-lib-report@3.0.3': @@ -4319,8 +3881,6 @@ snapshots: '@types/methods@1.1.4': {} - '@types/mime@1.3.5': {} - '@types/ms@2.1.0': {} '@types/node-fetch@2.6.13': @@ -4336,23 +3896,8 @@ snapshots: dependencies: undici-types: 6.21.0 - '@types/qs@6.14.0': {} - - '@types/range-parser@1.2.7': {} - '@types/retry@0.12.0': {} - '@types/send@0.17.5': - dependencies: - '@types/mime': 1.3.5 - '@types/node': 20.19.13 - - '@types/serve-static@1.15.8': - dependencies: - '@types/http-errors': 2.0.5 - '@types/node': 20.19.13 - '@types/send': 0.17.5 - '@types/stack-utils@2.0.3': {} '@types/superagent@8.1.9': @@ -4393,11 +3938,6 @@ snapshots: dependencies: event-target-shim: 5.0.1 - accepts@1.3.8: - dependencies: - mime-types: 2.1.35 - negotiator: 0.6.3 - acorn-walk@8.3.4: dependencies: acorn: 8.15.0 @@ -4437,8 +3977,6 @@ snapshots: argparse@2.0.1: {} - array-flatten@1.1.1: {} - asap@2.0.6: {} asciidoctor@3.0.4(chokidar@3.6.0): @@ -4533,23 +4071,6 @@ snapshots: binary-search@1.3.6: {} - body-parser@1.20.3: - dependencies: - bytes: 3.1.2 - content-type: 1.0.5 - debug: 2.6.9 - depd: 2.0.0 - destroy: 1.2.0 - http-errors: 2.0.0 - iconv-lite: 0.4.24 - on-finished: 2.4.1 - qs: 6.13.0 - raw-body: 2.5.2 - type-is: 1.6.18 - unpipe: 1.0.0 - transitivePeerDependencies: - - supports-color - brace-expansion@1.1.12: dependencies: balanced-match: 1.0.2 @@ -4589,8 +4110,6 @@ snapshots: base64-js: 1.5.1 ieee754: 1.2.1 - bytes@3.1.2: {} - call-bind-apply-helpers@1.0.2: dependencies: es-errors: 1.3.0 @@ -4719,25 +4238,10 @@ snapshots: '@babel/parser': 7.28.4 '@babel/types': 7.28.4 - content-disposition@0.5.4: - dependencies: - safe-buffer: 5.2.1 - - content-type@1.0.5: {} - convert-source-map@2.0.0: {} - cookie-signature@1.0.6: {} - - cookie@0.7.1: {} - cookiejar@2.1.4: {} - cors@2.8.5: - dependencies: - object-assign: 4.1.1 - vary: 1.1.2 - create-jest@29.7.0(@types/node@20.19.13)(ts-node@10.9.2(@types/node@20.19.13)(typescript@5.9.2)): dependencies: '@jest/types': 29.6.3 @@ -4769,10 +4273,6 @@ snapshots: data-uri-to-buffer@4.0.1: {} - debug@2.6.9: - dependencies: - ms: 2.0.0 - debug@4.4.1(supports-color@5.5.0): dependencies: ms: 2.1.3 @@ -4787,10 +4287,6 @@ snapshots: delayed-stream@1.0.0: {} - depd@2.0.0: {} - - destroy@1.2.0: {} - detect-newline@3.1.0: {} dezalgo@1.0.4: @@ -4836,8 +4332,6 @@ snapshots: dependencies: safe-buffer: 5.2.1 - ee-first@1.1.1: {} - ejs@3.1.10: dependencies: jake: 10.9.4 @@ -4850,10 +4344,6 @@ snapshots: enabled@2.0.0: {} - encodeurl@1.0.2: {} - - encodeurl@2.0.0: {} - entities@4.5.0: {} error-ex@1.3.2: @@ -4877,14 +4367,10 @@ snapshots: escalade@3.2.0: {} - escape-html@1.0.3: {} - escape-string-regexp@2.0.0: {} esprima@4.0.1: {} - etag@1.8.1: {} - event-target-shim@5.0.1: {} eventemitter3@4.0.7: {} @@ -4915,42 +4401,6 @@ snapshots: expr-eval@2.0.2: {} - express@4.21.2: - dependencies: - accepts: 1.3.8 - array-flatten: 1.1.1 - body-parser: 1.20.3 - content-disposition: 0.5.4 - content-type: 1.0.5 - cookie: 0.7.1 - cookie-signature: 1.0.6 - debug: 2.6.9 - depd: 2.0.0 - encodeurl: 2.0.0 - escape-html: 1.0.3 - etag: 1.8.1 - finalhandler: 1.3.1 - fresh: 0.5.2 - http-errors: 2.0.0 - merge-descriptors: 1.0.3 - methods: 1.1.2 - on-finished: 2.4.1 - parseurl: 1.3.3 - path-to-regexp: 0.1.12 - proxy-addr: 2.0.7 - qs: 6.13.0 - range-parser: 1.2.1 - safe-buffer: 5.2.1 - send: 0.19.0 - serve-static: 1.16.2 - setprototypeof: 1.2.0 - statuses: 2.0.1 - type-is: 1.6.18 - utils-merge: 1.0.1 - vary: 1.1.2 - transitivePeerDependencies: - - supports-color - extend@3.0.2: {} fast-json-stable-stringify@2.1.0: {} @@ -4986,18 +4436,6 @@ snapshots: dependencies: to-regex-range: 5.0.1 - finalhandler@1.3.1: - dependencies: - debug: 2.6.9 - encodeurl: 2.0.0 - escape-html: 1.0.3 - on-finished: 2.4.1 - parseurl: 1.3.3 - statuses: 2.0.1 - unpipe: 1.0.0 - transitivePeerDependencies: - - supports-color - find-up@4.1.0: dependencies: locate-path: 5.0.0 @@ -5036,10 +4474,6 @@ snapshots: dezalgo: 1.0.4 once: 1.4.0 - forwarded@0.2.0: {} - - fresh@0.5.2: {} - fs.realpath@1.0.0: {} fsevents@2.3.2: @@ -5143,14 +4577,6 @@ snapshots: domutils: 3.2.2 entities: 4.5.0 - http-errors@2.0.0: - dependencies: - depd: 2.0.0 - inherits: 2.0.4 - setprototypeof: 1.2.0 - statuses: 2.0.1 - toidentifier: 1.0.1 - human-signals@2.1.0: {} humanize-ms@1.2.1: @@ -5172,15 +4598,11 @@ snapshots: isstream: 0.1.2 jsonwebtoken: 9.0.2 mime-types: 2.1.35 - retry-axios: 2.6.0(axios@1.11.0) + retry-axios: 2.6.0(axios@1.11.0(debug@4.4.1)) tough-cookie: 4.1.4 transitivePeerDependencies: - supports-color - iconv-lite@0.4.24: - dependencies: - safer-buffer: 2.1.2 - ieee754@1.2.1: {} ignore-by-default@1.0.1: {} @@ -5199,8 +4621,6 @@ snapshots: inherits@2.0.4: {} - ipaddr.js@1.9.1: {} - is-any-array@2.0.1: {} is-arrayish@0.2.1: {} @@ -5694,32 +5114,6 @@ snapshots: - openai - ws - langchain@0.3.33(@langchain/anthropic@0.2.18(openai@5.12.2(ws@8.18.3)(zod@3.25.76)))(@langchain/core@0.2.36(openai@5.12.2(ws@8.18.3)(zod@3.25.76)))(@langchain/google-genai@0.1.12(@langchain/core@0.2.36(openai@5.12.2(ws@8.18.3)(zod@3.25.76)))(zod@3.25.76))(axios@1.11.0)(handlebars@4.7.8)(openai@5.12.2(ws@8.18.3)(zod@3.25.76))(ws@8.18.3): - dependencies: - '@langchain/core': 0.2.36(openai@5.12.2(ws@8.18.3)(zod@3.25.76)) - '@langchain/openai': 0.6.11(@langchain/core@0.2.36(openai@5.12.2(ws@8.18.3)(zod@3.25.76)))(ws@8.18.3) - '@langchain/textsplitters': 0.1.0(@langchain/core@0.2.36(openai@5.12.2(ws@8.18.3)(zod@3.25.76))) - js-tiktoken: 1.0.21 - js-yaml: 4.1.0 - jsonpointer: 5.0.1 - langsmith: 0.3.67(openai@5.12.2(ws@8.18.3)(zod@3.25.76)) - openapi-types: 12.1.3 - p-retry: 4.6.2 - uuid: 10.0.0 - yaml: 2.8.1 - zod: 3.25.76 - optionalDependencies: - '@langchain/anthropic': 0.2.18(openai@5.12.2(ws@8.18.3)(zod@3.25.76)) - '@langchain/google-genai': 0.1.12(@langchain/core@0.2.36(openai@5.12.2(ws@8.18.3)(zod@3.25.76)))(zod@3.25.76) - axios: 1.11.0(debug@4.4.1) - handlebars: 4.7.8 - transitivePeerDependencies: - - '@opentelemetry/api' - - '@opentelemetry/exporter-trace-otlp-proto' - - '@opentelemetry/sdk-trace-base' - - openai - - ws - langsmith@0.1.68(openai@4.104.0(ws@8.18.3)(zod@3.25.76)): dependencies: '@types/uuid': 10.0.0 @@ -5731,7 +5125,7 @@ snapshots: optionalDependencies: openai: 4.104.0(ws@8.18.3)(zod@3.25.76) - langsmith@0.1.68(openai@5.12.2(ws@8.18.3)(zod@3.25.76)): + langsmith@0.1.68(openai@5.12.2(ws@8.18.3)): dependencies: '@types/uuid': 10.0.0 commander: 10.0.1 @@ -5754,18 +5148,6 @@ snapshots: optionalDependencies: openai: 4.104.0(ws@8.18.3)(zod@3.25.76) - langsmith@0.3.67(openai@5.12.2(ws@8.18.3)(zod@3.25.76)): - dependencies: - '@types/uuid': 10.0.0 - chalk: 4.1.2 - console-table-printer: 2.14.6 - p-queue: 6.6.2 - p-retry: 4.6.2 - semver: 7.7.2 - uuid: 10.0.0 - optionalDependencies: - openai: 5.12.2(ws@8.18.3)(zod@3.25.76) - leven@3.1.0: {} lines-and-columns@1.2.4: {} @@ -5823,12 +5205,8 @@ snapshots: math-intrinsics@1.1.0: {} - media-typer@0.3.0: {} - memory-pager@1.5.0: {} - merge-descriptors@1.0.3: {} - merge-stream@2.0.0: {} methods@1.1.2: {} @@ -5844,8 +5222,6 @@ snapshots: dependencies: mime-db: 1.52.0 - mime@1.6.0: {} - mime@2.6.0: {} mimic-fn@2.1.0: {} @@ -5892,16 +5268,12 @@ snapshots: bson: 6.10.4 mongodb-connection-string-url: 3.0.2 - ms@2.0.0: {} - ms@2.1.3: {} mustache@4.2.0: {} natural-compare@1.4.0: {} - negotiator@0.6.3: {} - neo-async@2.6.2: {} nice-grpc-client-middleware-retry@3.1.11: @@ -5968,10 +5340,6 @@ snapshots: object-inspect@1.13.4: {} - on-finished@2.4.1: - dependencies: - ee-first: 1.1.1 - once@1.4.0: dependencies: wrappy: 1.0.2 @@ -6043,8 +5411,6 @@ snapshots: json-parse-even-better-errors: 2.3.1 lines-and-columns: 1.2.4 - parseurl@1.3.3: {} - path-exists@4.0.0: {} path-is-absolute@1.0.1: {} @@ -6053,8 +5419,6 @@ snapshots: path-parse@1.0.7: {} - path-to-regexp@0.1.12: {} - peek-readable@4.1.0: {} pg-cloudflare@1.2.7: @@ -6154,11 +5518,6 @@ snapshots: '@types/node': 20.19.13 long: 5.3.2 - proxy-addr@2.0.7: - dependencies: - forwarded: 0.2.0 - ipaddr.js: 1.9.1 - proxy-from-env@1.1.0: {} psl@1.15.0: @@ -6238,25 +5597,12 @@ snapshots: pure-rand@6.1.0: {} - qs@6.13.0: - dependencies: - side-channel: 1.1.0 - qs@6.14.0: dependencies: side-channel: 1.1.0 querystringify@2.2.0: {} - range-parser@1.2.1: {} - - raw-body@2.5.2: - dependencies: - bytes: 3.1.2 - http-errors: 2.0.0 - iconv-lite: 0.4.24 - unpipe: 1.0.0 - react-is@18.3.1: {} readable-stream@3.6.2: @@ -6299,7 +5645,7 @@ snapshots: path-parse: 1.0.7 supports-preserve-symlinks-flag: 1.0.0 - retry-axios@2.6.0(axios@1.11.0): + retry-axios@2.6.0(axios@1.11.0(debug@4.4.1)): dependencies: axios: 1.11.0(debug@4.4.1) @@ -6309,41 +5655,10 @@ snapshots: safe-stable-stringify@2.5.0: {} - safer-buffer@2.1.2: {} - semver@6.3.1: {} semver@7.7.2: {} - send@0.19.0: - dependencies: - debug: 2.6.9 - depd: 2.0.0 - destroy: 1.2.0 - encodeurl: 1.0.2 - escape-html: 1.0.3 - etag: 1.8.1 - fresh: 0.5.2 - http-errors: 2.0.0 - mime: 1.6.0 - ms: 2.1.3 - on-finished: 2.4.1 - range-parser: 1.2.1 - statuses: 2.0.1 - transitivePeerDependencies: - - supports-color - - serve-static@1.16.2: - dependencies: - encodeurl: 2.0.0 - escape-html: 1.0.3 - parseurl: 1.3.3 - send: 0.19.0 - transitivePeerDependencies: - - supports-color - - setprototypeof@1.2.0: {} - shebang-command@2.0.0: dependencies: shebang-regex: 3.0.0 @@ -6415,8 +5730,6 @@ snapshots: dependencies: escape-string-regexp: 2.0.0 - statuses@2.0.1: {} - string-length@4.0.2: dependencies: char-regex: 1.0.2 @@ -6498,8 +5811,6 @@ snapshots: dependencies: is-number: 7.0.0 - toidentifier@1.0.1: {} - token-stream@1.0.0: {} token-types@4.2.1: @@ -6601,11 +5912,6 @@ snapshots: type-fest@4.41.0: {} - type-is@1.6.18: - dependencies: - media-typer: 0.3.0 - mime-types: 2.1.35 - typescript@5.9.2: {} uglify-js@3.19.3: @@ -6619,8 +5925,6 @@ snapshots: universalify@0.2.0: {} - unpipe@1.0.0: {} - unxhr@1.2.0: {} update-browserslist-db@1.1.3(browserslist@4.25.4): @@ -6636,12 +5940,8 @@ snapshots: util-deprecate@1.0.2: {} - utils-merge@1.0.1: {} - uuid@10.0.0: {} - uuid@11.1.0: {} - uuid@9.0.1: {} v8-compile-cache-lib@3.0.1: {} @@ -6656,8 +5956,6 @@ snapshots: validate.io-function@1.0.2: {} - vary@1.1.2: {} - void-elements@3.1.0: {} walker@1.0.8: diff --git a/turbo.json b/turbo.json index ebb52092..b427c07c 100644 --- a/turbo.json +++ b/turbo.json @@ -12,67 +12,11 @@ "dependsOn": ["^build"], "outputs": ["dist/**", "build/**", "out/**", ".next/**"] }, - "dev": { - "cache": false, - "persistent": true, - "env": [ - "POSTGRES_HOST", - "POSTGRES_PORT", - "POSTGRES_USER", - "POSTGRES_PASSWORD", - "POSTGRES_DB", - "POSTGRES_TABLE_NAME", - "OPENAI_API_KEY", - "ANTHROPIC_API_KEY", - "GEMINI_API_KEY", - "DEEPSEEK_API_KEY", - "GROQ_API_KEY", - "DEFAULT_CHAT_PROVIDER", - "DEFAULT_CHAT_MODEL", - "DEFAULT_EMBEDDING_PROVIDER", - "DEFAULT_EMBEDDING_MODEL", - "LANGSMITH_API_KEY", - "LANGSMITH_TRACING", - "LANGSMITH_ENDPOINT", - "HOST", - "PORT", - "DEBUG" - ] - }, "lint": {}, "test": { "dependsOn": ["^build"], "outputs": [] }, - "start": { - "dependsOn": ["build"], - "cache": false, - "persistent": true, - "inputs": ["dist/**", "build/**", "out/**", ".next/**", ".env"], - "env": [ - "POSTGRES_HOST", - "POSTGRES_PORT", - "POSTGRES_USER", - "POSTGRES_PASSWORD", - "POSTGRES_DB", - "POSTGRES_TABLE_NAME", - "OPENAI_API_KEY", - "ANTHROPIC_API_KEY", - "GEMINI_API_KEY", - "DEEPSEEK_API_KEY", - "GROQ_API_KEY", - "DEFAULT_CHAT_PROVIDER", - "DEFAULT_CHAT_MODEL", - "DEFAULT_EMBEDDING_PROVIDER", - "DEFAULT_EMBEDDING_MODEL", - "LANGSMITH_API_KEY", - "LANGSMITH_TRACING", - "LANGSMITH_ENDPOINT", - "HOST", - "PORT", - "DEBUG" - ] - }, "generate-embeddings": { "dependsOn": ["build"], "cache": false,