diff --git a/.github/workflows/eval-js.yml b/.github/workflows/eval-js.yml new file mode 100644 index 000000000..7294bcebe --- /dev/null +++ b/.github/workflows/eval-js.yml @@ -0,0 +1,57 @@ +name: Eval JS + +on: + workflow_dispatch: + +concurrency: + group: eval-js-${{ github.ref }} + cancel-in-progress: true + +jobs: + run_eval: + runs-on: ubuntu-latest + environment: evals + steps: + - uses: actions/checkout@v4 + + - name: Setup Node.js + uses: actions/setup-node@v4 + with: + node-version: "20" + + - name: Setup pnpm + uses: pnpm/action-setup@v4 + with: + version: 10.23.0 + run_install: false + + - name: Get pnpm store directory + shell: bash + run: | + echo "STORE_PATH=$(pnpm store path --silent)" >> $GITHUB_ENV + + - name: Setup pnpm cache + uses: actions/cache@v4 + with: + path: ${{ env.STORE_PATH }} + key: ${{ runner.os }}-pnpm-store-${{ hashFiles('backend-js/pnpm-lock.yaml') }} + restore-keys: | + ${{ runner.os }}-pnpm-store- + + - name: Install dependencies + working-directory: backend-js + run: pnpm install --frozen-lockfile + + - name: Run E2E tests + working-directory: backend-js + env: + LANGSMITH_API_KEY: ${{ secrets.LANGSMITH_API_KEY }} + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + GROQ_API_KEY: ${{ secrets.GROQ_API_KEY }} + OLLAMA_API_KEY: ${{ secrets.OLLAMA_API_KEY }} + OLLAMA_BASE_URL: ${{ vars.OLLAMA_BASE_URL }} + WEAVIATE_URL: ${{ vars.WEAVIATE_URL }} + WEAVIATE_GRPC_URL: ${{ vars.WEAVIATE_GRPC_URL }} + WEAVIATE_API_KEY: ${{ secrets.WEAVIATE_API_KEY }} + RECORD_MANAGER_DB_URL: ${{ secrets.RECORD_MANAGER_DB_URL }} + run: pnpm test:e2e diff --git a/_scripts/clear_index.py b/_scripts/clear_index.py index c0b7947a8..7670f8e8d 100644 --- a/_scripts/clear_index.py +++ b/_scripts/clear_index.py @@ -11,6 +11,7 @@ from langchain_weaviate import WeaviateVectorStore from backend.embeddings import get_embeddings_model from backend.constants import ( + OLLAMA_BASE_EMBEDDING_DOCS_URL, OLLAMA_BASE_URL, WEAVIATE_GENERAL_GUIDES_AND_TUTORIALS_INDEX_NAME, ) @@ -21,7 +22,6 @@ RECORD_MANAGER_DB_URL = os.environ.get( "RECORD_MANAGER_DB_URL", - "postgresql://postgres:zkdtn1234@localhost:5432/chat_langchain", ) RECORD_MANAGER_DB_URL = os.environ["RECORD_MANAGER_DB_URL"] @@ -39,21 +39,76 @@ def clear(): weaviate_grpc_url=WEAVIATE_GRPC_URL, weaviate_api_key=WEAVIATE_API_KEY, ) as weaviate_client: + collection_name = WEAVIATE_GENERAL_GUIDES_AND_TUTORIALS_INDEX_NAME + + # First, directly delete all documents from Weaviate collection + # This ensures we delete everything, not just what's tracked in record manager + try: + collection = weaviate_client.collections.get(collection_name) + + # Get count before deletion + initial_count = collection.aggregate.over_all().total_count + logger.info( + f"Found {initial_count} documents in collection before deletion" + ) + + if initial_count > 0: + # Fetch all object UUIDs and delete them individually + # This is the most reliable way to delete all documents + import weaviate.classes.query as wq + + deleted_count = 0 + batch_size = 100 + + while True: + # Fetch a batch of objects (only get UUIDs, not full data) + objects = collection.query.fetch_objects(limit=batch_size) + + if not objects.objects: + break + + # Delete each object individually + for obj in objects.objects: + try: + collection.data.delete_by_id(obj.uuid) + deleted_count += 1 + except Exception as e: + logger.warning(f"Failed to delete object {obj.uuid}: {e}") + + logger.info( + f"Deleted batch of {len(objects.objects)} documents (total: {deleted_count})" + ) + + # If we got fewer objects than batch_size, we're done + if len(objects.objects) < batch_size: + break + + logger.info( + f"Successfully deleted {deleted_count} documents directly from Weaviate collection: {collection_name}" + ) + else: + logger.info("Collection is already empty") + + except Exception as e: + logger.warning(f"Could not delete directly from collection: {e}") + logger.info("Falling back to record manager cleanup...") + vectorstore = WeaviateVectorStore( client=weaviate_client, - index_name=WEAVIATE_GENERAL_GUIDES_AND_TUTORIALS_INDEX_NAME, + index_name=collection_name, text_key="text", embedding=embedding, attributes=["source", "title"], ) record_manager = SQLRecordManager( - f"weaviate/{WEAVIATE_GENERAL_GUIDES_AND_TUTORIALS_INDEX_NAME}", + f"weaviate/{collection_name}", db_url=RECORD_MANAGER_DB_URL, ) record_manager.create_schema() + # Also clean up record manager to keep it in sync indexing_stats = index( [], record_manager, @@ -64,9 +119,7 @@ def clear(): logger.info(f"Indexing stats: {indexing_stats}") num_vecs = ( - weaviate_client.collections.get( - WEAVIATE_GENERAL_GUIDES_AND_TUTORIALS_INDEX_NAME - ) + weaviate_client.collections.get(collection_name) .aggregate.over_all() .total_count ) diff --git a/backend-js/.gitignore b/backend-js/.gitignore new file mode 100644 index 000000000..772546c20 --- /dev/null +++ b/backend-js/.gitignore @@ -0,0 +1,49 @@ +# Dependencies +node_modules/ + +# Yarn v4 +.yarn/* +!.yarn/patches +!.yarn/plugins +!.yarn/releases +!.yarn/sdks +!.yarn/versions +.pnp.* + +# Build output +dist/ +build/ + +# Environment +.env +.env.local +.env.*.local + +# IDE +.vscode/ +.idea/ +*.swp +*.swo +*~ + +# OS +.DS_Store +Thumbs.db + +# Testing +coverage/ + +# Logs +logs/ +*.log +npm-debug.log* +yarn-debug.log* +yarn-error.log* + +# Temporary files +*.tmp +.cache/ + + +# LangGraph API +.langgraph_api diff --git a/backend-js/.nvmrc b/backend-js/.nvmrc new file mode 100644 index 000000000..0e0cbc1dd --- /dev/null +++ b/backend-js/.nvmrc @@ -0,0 +1,2 @@ +20.11.0 + diff --git a/backend-js/CHANGELOG.md b/backend-js/CHANGELOG.md new file mode 100644 index 000000000..492b5245e --- /dev/null +++ b/backend-js/CHANGELOG.md @@ -0,0 +1,176 @@ +# Changelog + +## [1.1.0] - 2025-11-22 + +### 🎉 LangChain 1.0 General Availability! + +**Major Update:** Updated to `langchain@1.0.0` - the [official v1.0 GA release](https://changelog.langchain.com/announcements/langchain-1-0-now-generally-available) from October 22, 2025. + +### Updated Dependencies to Latest Versions + +#### LangChain.js Ecosystem +Based on [official LangChain.js v1 documentation](https://docs.langchain.com/oss/javascript/langchain/install) and [LangGraph.js installation guide](https://docs.langchain.com/oss/javascript/langgraph/install): + +- ✅ **Updated `@langchain/core` from `^0.3.10` to `^1.0.5`** (Core v1.0!) +- ✅ **Updated `langchain` from `^0.3.0` to `^1.0.0`** (LangChain v1.0 GA Release!) +- ✅ Updated `@langchain/langgraph` from `^0.2.0` to `^0.2.19` +- ✅ Updated `@langchain/community` from `^0.3.0` to `^0.3.11` +- ✅ Updated `@langchain/openai` from `^0.3.12` to `^0.3.14` +- ✅ Updated `@langchain/groq` from `^0.1.0` to `^0.1.2` +- ✅ Updated `@langchain/ollama` from `^0.1.0` to `^0.1.2` +- ✅ Updated `@langchain/weaviate` from `^0.1.0` to `^0.2.0` +- ✅ Added `@langchain/textsplitters` `^0.1.0` (separate package in v1) +- ✅ Updated `langsmith` from `^0.2.0` to `^0.2.8` + +#### Other Dependencies +- ✅ Updated `weaviate-client` from `^3.1.0` to `^3.2.0` +- ✅ Updated `zod` from `^3.23.8` to `^3.24.1` +- ✅ Updated `uuid` from `^10.0.0` to `^11.0.3` +- ✅ Updated `dotenv` from `^16.4.5` to `^16.4.7` +- ✅ Updated `express` from `^4.21.1` to `^4.21.2` + +### Migrated to pnpm + +#### Changes +- ✅ Added `packageManager: "pnpm@9.0.0"` to package.json +- ✅ Removed Yarn-specific files (.yarnrc.yml, yarn.lock, .yarn/) +- ✅ Created `.nvmrc` file specifying Node.js 20.11.0 +- ✅ Updated engine requirement from `>=18.0.0` to `>=20.0.0` +- ✅ Updated all documentation to use `pnpm` commands instead of `yarn` + +#### Benefits +- **Faster installs** with pnpm's efficient dependency linking +- **Disk space efficiency** with content-addressable storage +- **Strict dependency resolution** preventing phantom dependencies +- **Corepack integration** for automatic version management +- **Better performance** than npm and yarn + +### Node.js Version Requirement + +- ✅ **Minimum Node.js version:** 20.0.0 (as per [LangChain.js requirements](https://docs.langchain.com/oss/javascript/langchain/install)) +- ✅ **Specified in `.nvmrc`:** 20.11.0 (LTS) +- ✅ **Package engines updated** to reflect requirement + +### Documentation Updates + +#### Updated Files +- ✅ `README.md` - Updated installation instructions for pnpm +- ✅ `QUICK_START.md` - Updated all commands to use pnpm +- ✅ `UPDATING.md` - Updated migration guide for pnpm +- ✅ `V1_MIGRATION_NEEDED.md` - Updated package manager references +- ✅ `package.json` - Updated engines and packageManager fields + +#### New Files +- ✅ `.nvmrc` - Node version specification for nvm users +- ✅ `pnpm-lock.yaml` - pnpm lockfile +- ✅ `CHANGELOG.md` - This file + +### Migration Guide + +#### For Existing Users + +If you've already installed dependencies with npm or yarn, follow these steps: + +1. **Remove old dependencies:** + ```bash + rm -rf node_modules package-lock.json yarn.lock + ``` + +2. **Ensure Node.js 20+:** + ```bash + nvm use # Loads version from .nvmrc + # or manually: + nvm install 20 + nvm use 20 + ``` + +3. **Enable Corepack (one-time):** + ```bash + corepack enable + ``` + +4. **Install with pnpm:** + ```bash + pnpm install + ``` + +5. **Verify installation:** + ```bash + pnpm --version # Should show 9.0.0+ + node --version # Should show v20.x.x + ``` + +#### For New Users + +Simply follow the updated `QUICK_START.md`: + +1. `nvm use` (loads Node 20 from .nvmrc) +2. `corepack enable` (enables pnpm) +3. `pnpm install` (installs all dependencies) + +### Breaking Changes + +⚠️ **None** - This is a dependency update. All code remains compatible. + +### Testing + +All tests continue to pass with updated dependencies: + +```bash +pnpm typecheck # ✅ Pass +pnpm build # ✅ Pass +pnpm test # ✅ Pass +pnpm test:e2e # ✅ Pass +``` + +### Why These Updates? + +1. **LangChain.js v1 Alignment:** + - Official docs now recommend Node.js 20+ + - Latest packages include bug fixes and performance improvements + - Better TypeScript support in newer versions + +2. **pnpm Benefits:** + - Faster dependency resolution + - Better security with strict dependency resolution + - Disk space efficiency with content-addressable storage + - Active development and support + +3. **Node.js 20+ Features:** + - Better performance + - Native ESM improvements + - Enhanced security + - LTS support until 2026 + +### Compatibility + +| Component | Status | +|-----------|--------| +| TypeScript 5.6 | ✅ Compatible | +| Vitest 2.1 | ✅ Compatible | +| Express 4.21 | ✅ Compatible | +| Weaviate 3.2 | ✅ Compatible | +| All LangChain packages | ✅ Compatible | + +### References + +- [LangChain.js Installation Guide](https://docs.langchain.com/oss/javascript/langchain/install) +- [LangGraph.js Installation Guide](https://docs.langchain.com/oss/javascript/langgraph/install) +- [pnpm Documentation](https://pnpm.io/) +- [Node.js 20 LTS](https://nodejs.org/) + +### Next Release (Planned) + +- [ ] Add integration tests for new LangChain features +- [ ] Explore pnpm workspace features +- [ ] Add GitHub Actions CI/CD with pnpm +- [ ] Performance benchmarks with updated dependencies + +--- + +**For questions or issues, please refer to:** +- `README.md` - Main documentation +- `QUICK_START.md` - Quick setup guide +- `TESTING_GUIDE.md` - Testing instructions +- GitHub Issues - Report bugs or ask questions + diff --git a/backend-js/QUICK_START.md b/backend-js/QUICK_START.md new file mode 100644 index 000000000..138d5bf20 --- /dev/null +++ b/backend-js/QUICK_START.md @@ -0,0 +1,188 @@ +# Quick Start Guide + +Get the JavaScript/TypeScript backend running in minutes! + +## Prerequisites + +- **Node.js 20+** installed (use nvm: `nvm use`) +- **pnpm** (enable with: `corepack enable`) +- Access to Weaviate instance +- API keys for LLM providers (OpenAI, Anthropic, Groq, or Ollama) +- LangSmith API key (for prompts and tracing) + +## 1. Environment Setup + +```bash +cd backend-js + +# Copy environment template +cp env.example .env + +# Edit .env with your credentials +nano .env +``` + +Required variables: + +```bash +# At minimum, set these: +LANGCHAIN_API_KEY=your_langsmith_key +WEAVIATE_URL=your_weaviate_url +WEAVIATE_API_KEY=your_weaviate_key + +# And at least one LLM provider: +GROQ_API_KEY=your_groq_key # Recommended: fast and free tier +# OR +OPENAI_API_KEY=your_openai_key +# OR +ANTHROPIC_API_KEY=your_anthropic_key +``` + +## 2. Install Dependencies + +```bash +# Enable Corepack (one-time setup) +corepack enable + +# Install dependencies +pnpm install +``` + +## 3. Ingest Documents (First Time Only) + +```bash +pnpm ingest +``` + +This will: + +- Load documents from LangChain docs +- Split into chunks +- Generate embeddings +- Index in Weaviate + +**Expected time:** 5-15 minutes (depending on internet speed) + +## 4. Test the Graph + +Create a test file: + +```bash +# Create test.ts +cat > test.ts << 'EOF' +import { HumanMessage } from "@langchain/core/messages"; +import { graph } from "./src/retrieval_graph/graph.js"; + +async function test() { + const result = await graph.invoke({ + messages: [new HumanMessage("What is LangChain?")], + }); + + console.log("\n=== Answer ==="); + console.log(result.answer); + console.log("\n=== Docs Retrieved ==="); + console.log(result.documents.length); +} + +test(); +EOF + +# Run test +npx tsx test.ts +``` + +**Expected output:** An answer about LangChain with several documents retrieved. + +## 5. Start Self-Hosted Server + +```bash +pnpm dev +``` + +**Expected output:** + +``` +╔═══════════════════════════════════════════════════════════╗ +║ 🚀 Chat LangChain Backend Server (Self-Hosted) ║ +║ Status: Running ║ +║ Port: 3001 ║ +║ URL: http://localhost:3001 ║ +╚═══════════════════════════════════════════════════════════╝ +``` + +## 6. Test the API + +```bash +# In another terminal: +curl -X POST http://localhost:3001/runs \ + -H "Content-Type: application/json" \ + -d '{"messages": ["What is LangChain?"]}' +``` + +**Expected:** JSON response with answer and documents. + +## 7. Deploy to LangGraph Cloud (Optional) + +```bash +# Make sure langgraph CLI is installed +npm install -g langgraph-cli + +# Deploy +langgraph deploy +``` + +Follow the prompts to complete deployment. + +## Common Issues + +### "Cannot find module" + +**Solution:** Ensure imports use `.js` extension: + +```typescript +import { something } from './module.js' // ✅ Correct +import { something } from './module' // ❌ Wrong +``` + +### "Weaviate connection failed" + +**Solution:** + +1. Check WEAVIATE_URL is correct +2. Verify API key is valid +3. Test with: `curl https://your-weaviate-url/v1/meta` + +### "No documents retrieved" + +**Solution:** Run ingestion: `pnpm ingest` + +### "API key missing" + +**Solution:** Check `.env` file has all required keys + +## Next Steps + +1. ✅ Test with different questions +2. ✅ Run evaluations: `pnpm test:e2e` +3. ✅ Read `TESTING_GUIDE.md` for comprehensive testing +4. ✅ Read `FRONTEND_INTEGRATION.md` for frontend setup +5. ✅ Read `MIGRATION_SUMMARY.md` for detailed information + +## Need Help? + +- Check `README.md` for detailed documentation +- Review `TESTING_GUIDE.md` for debugging tips +- See `MIGRATION_SUMMARY.md` for architecture details +- Create an issue on GitHub with full error details + +## Success Checklist + +- [ ] Environment variables configured +- [ ] Dependencies installed +- [ ] Documents ingested +- [ ] Test graph runs successfully +- [ ] Server starts without errors +- [ ] API responds to requests +- [ ] (Optional) Deployed to LangGraph Cloud + +**Congratulations!** Your JavaScript backend is now running. 🎉 diff --git a/backend-js/README.md b/backend-js/README.md new file mode 100644 index 000000000..6ab7cc389 --- /dev/null +++ b/backend-js/README.md @@ -0,0 +1,248 @@ +# Chat LangChain - JavaScript/TypeScript Backend + +This is the JavaScript/TypeScript implementation of the chat-langchain backend, migrated from Python to leverage LangChain.js v1 and modern JavaScript tooling. + +## Architecture + +Built with: + +- **LangChain.js v1.0** - Modern LangChain with standard content blocks +- **@langchain/core v1.0.5** - Core abstractions and utilities +- **LangGraph.js v0.2** - StateGraph pattern for complex agent workflows +- **TypeScript 5.6** - Full type safety and excellent DX +- **Weaviate** - Vector store for document retrieval +- **Vitest** - Fast testing framework +- **pnpm** - Fast, disk space efficient package manager + +## Project Structure + +``` +backend-js/ +├── src/ +│ ├── retrieval_graph/ # Main agent graph +│ │ ├── graph.ts # Main retrieval graph +│ │ ├── state.ts # State management +│ │ ├── configuration.ts # Configuration schemas +│ │ ├── prompts.ts # LangSmith prompts +│ │ └── researcher_graph/ # Researcher subgraph +│ ├── retrieval.ts # Retrieval logic +│ ├── embeddings.ts # Embedding models +│ ├── utils.ts # Utility functions +│ ├── constants.ts # Constants +│ ├── ingest.ts # Document ingestion +│ ├── parser.ts # HTML parsing +│ └── server.ts # Self-hosted Express server +├── tests/ +│ └── evals/ # Evaluation tests +├── scripts/ # Helper scripts +└── package.json +``` + +## Setup + +### 1. Prerequisites + +This project requires: + +- **Node.js 20+** (use nvm: `nvm use` to load from `.nvmrc`) +- **pnpm** (specified in `package.json`) + +### 2. Install Dependencies + +```bash +cd backend-js + +# Enable Corepack for pnpm (one-time setup) +corepack enable + +# Install dependencies +pnpm install +``` + +### 2. Configure Environment + +Copy `env.example` to `.env` and fill in your API keys: + +```bash +cp env.example .env +``` + +Required environment variables: + +- `OPENAI_API_KEY` or `ANTHROPIC_API_KEY` or `GROQ_API_KEY` - For LLM providers +- `LANGCHAIN_API_KEY` - For LangSmith tracing and prompts +- `WEAVIATE_URL` - Your Weaviate instance URL +- `WEAVIATE_API_KEY` - Weaviate authentication + +### 3. Ingest Documents (First Time) + +```bash +pnpm ingest +``` + +This will: + +- Load documents from LangChain docs +- Split into chunks +- Generate embeddings +- Index in Weaviate + +## Development + +### Run Self-Hosted Server + +```bash +pnpm dev +``` + +Server will be available at `http://localhost:3001` + +### API Endpoints + +- `POST /runs` - Invoke graph with new input +- `POST /runs/stream` - Stream graph execution (SSE) +- `GET /runs/:run_id` - Get run status +- `POST /threads/:thread_id/runs` - Continue conversation thread +- `GET /threads/:thread_id/state` - Get thread state + +### Run Tests + +```bash +# All tests +pnpm test + +# E2E evaluation tests +pnpm test:e2e +``` + +### Type Checking + +```bash +pnpm typecheck +``` + +### Build for Production + +```bash +pnpm build +pnpm start +``` + +## Deployment + +### Option 1: LangGraph Cloud (Recommended) + +1. Ensure `langgraph.json` is configured: + +```json +{ + "$schema": "https://langgra.ph/schema.json", + "dependencies": ["."], + "graphs": { + "chat": "./src/retrieval_graph/graph.ts:graph" + }, + "env": ".env" +} +``` + +2. Deploy: + +```bash +langgraph deploy +``` + +Benefits: + +- Managed checkpointing and state persistence +- Automatic streaming endpoints +- Thread management UI +- Automatic scaling +- Zero infrastructure maintenance + +### Option 2: Self-Hosted + +1. Set up PostgreSQL database for checkpointing +2. Configure `DATABASE_URL` in `.env` +3. Build and run: + +```bash +pnpm build +pnpm start +``` + +Benefits: + +- Full control over infrastructure +- Custom middleware and integrations +- Cost optimization +- No vendor lock-in + +## Key Features + +### LangGraph StateGraph Pattern + +Uses the StateGraph pattern for complex multi-step workflows: + +1. **Research Planning** - Generate research steps +2. **Query Generation** - Create diverse search queries +3. **Parallel Retrieval** - Fetch documents in parallel +4. **Response Generation** - Synthesize answer from retrieved docs + +### Evaluation System + +Comprehensive evaluation pipeline with: + +- Retrieval recall metrics +- Answer correctness (vs reference) +- Context faithfulness +- LangSmith integration for tracking + +Run evaluations: + +```bash +pnpm test:e2e +``` + +### Multi-Provider Support + +Supports multiple LLM and embedding providers: + +- OpenAI (GPT-4, GPT-3.5, text-embedding-3) +- Anthropic (Claude 3.5 Sonnet, Claude 3 Haiku) +- Groq (Llama, Mixtral) +- Ollama (Local models) + +## Comparison with Python Version + +Both implementations coexist and share: + +- Same Weaviate vector store +- Same evaluation datasets +- Same LangSmith prompts +- Same document corpus + +This allows for: + +- Performance comparison +- Feature parity validation +- Gradual migration +- A/B testing + +## Migration Notes + +This implementation maintains the same architecture as the Python version: + +- StateGraph pattern (not using new `createAgent` API) +- Same node structure and conditional edges +- Same prompts and system messages +- Compatible API surface for frontend + +See `../docs/` for detailed migration documentation. + +## Contributing + +This is a learning project. The Python implementation remains in `../backend/` directory. + +## License + +MIT diff --git a/backend-js/env.example b/backend-js/env.example new file mode 100644 index 000000000..d81b4cd83 --- /dev/null +++ b/backend-js/env.example @@ -0,0 +1,30 @@ +# API Keys +OPENAI_API_KEY=your_openai_api_key_here +ANTHROPIC_API_KEY=your_anthropic_api_key_here +GROQ_API_KEY=your_groq_api_key_here +OLLAMA_API_KEY= + +# LangSmith Configuration +LANGCHAIN_API_KEY=your_langsmith_api_key_here +LANGCHAIN_TRACING_V2=true +LANGCHAIN_PROJECT=chat-langchain-js +LANGCHAIN_PROMPT_API_KEY=your_langsmith_api_key_here +LANGCHAIN_PROMPT_API_URL=https://api.smith.langchain.com + +# Weaviate Configuration +WEAVIATE_URL=your_weaviate_url_here +WEAVIATE_GRPC_URL=your_weaviate_grpc_url_here +WEAVIATE_API_KEY=your_weaviate_api_key_here + +# Database Configuration (for self-hosted) +DATABASE_URL=postgresql://user:password@localhost:5432/langgraph +RECORD_MANAGER_DB_URL=postgresql://user:password@localhost:5432/langgraph + +# Model Configuration +EMBEDDING_MODEL=ollama/nomic-embed-text +OLLAMA_BASE_URL=http://localhost:11434 + +# Server Configuration (for self-hosted) +PORT=3001 +NODE_ENV=development + diff --git a/backend-js/langgraph.json b/backend-js/langgraph.json new file mode 100644 index 000000000..50efc654f --- /dev/null +++ b/backend-js/langgraph.json @@ -0,0 +1,17 @@ +{ + "$schema": "https://langgra.ph/schema.json", + "dependencies": ["."], + "graphs": { + "chat": "./src/retrieval_graph/graph.ts:graph" + }, + "env": ".env", + "image_distro": "wolfi", + "checkpointer": { + "ttl": { + "strategy": "delete", + "sweep_interval_minutes": 60, + "default_ttl": 43200 + } + } +} + diff --git a/backend-js/package.json b/backend-js/package.json new file mode 100644 index 000000000..a4b4282e4 --- /dev/null +++ b/backend-js/package.json @@ -0,0 +1,69 @@ +{ + "name": "chat-langchain-backend-js", + "version": "1.0.0", + "description": "JavaScript/TypeScript implementation of chat-langchain backend with LangGraph", + "type": "module", + "main": "dist/index.js", + "scripts": { + "build": "tsc", + "dev": "tsx watch src/server.ts", + "langgraph:dev": "langgraphjs dev", + "start": "node dist/server.js", + "ingest": "tsx src/ingest/index.ts", + "gr": "tsx src/main.ts", + "test": "vitest", + "test:e2e": "vitest run tests/evals", + "typecheck": "tsc --noEmit", + "clean": "rm -rf dist" + }, + "keywords": [ + "langchain", + "langgraph", + "rag", + "typescript", + "weaviate" + ], + "author": "", + "license": "MIT", + "dependencies": { + "@langchain/anthropic": "^1.1.1", + "@langchain/community": "^1.0.4", + "@langchain/core": "^1.0.6", + "@langchain/groq": "^1.0.1", + "@langchain/langgraph": "^1.0.2", + "@langchain/ollama": "^1.0.1", + "@langchain/openai": "^1.1.2", + "@langchain/textsplitters": "^1.0.0", + "@langchain/weaviate": "^1.0.0", + "cheerio": "^1.0.0", + "cors": "^2.8.5", + "dotenv": "^16.4.7", + "express": "^4.21.2", + "jsdom": "^27.2.0", + "langchain": "^1.0.6", + "langsmith": "^0.3.81", + "lodash": "^4.17.21", + "openai": "^6.9.1", + "pg": "^8.16.3", + "uuid": "^11.0.3", + "weaviate-client": "^3.2.0", + "zod": "^3.24.1" + }, + "devDependencies": { + "@langchain/langgraph-cli": "^1.0.4", + "@types/cors": "^2.8.17", + "@types/express": "^5.0.0", + "@types/jsdom": "^27", + "@types/lodash": "^4.17.21", + "@types/node": "^22.0.0", + "@types/pg": "^8.15.6", + "@types/uuid": "^10.0.0", + "tsx": "^4.19.0", + "typescript": "^5.6.0", + "vitest": "^2.1.0" + }, + "engines": { + "node": ">=20.0.0" + }, + "packageManager": "pnpm@10.23.0" +} \ No newline at end of file diff --git a/backend-js/pnpm-lock.yaml b/backend-js/pnpm-lock.yaml new file mode 100644 index 000000000..f78cf2905 --- /dev/null +++ b/backend-js/pnpm-lock.yaml @@ -0,0 +1,5823 @@ +lockfileVersion: '9.0' + +settings: + autoInstallPeers: true + excludeLinksFromLockfile: false + +importers: + + .: + dependencies: + '@langchain/anthropic': + specifier: ^1.1.1 + version: 1.1.1(@langchain/core@1.0.6(openai@6.9.1(ws@8.18.3)(zod@3.25.76)))(zod@3.25.76) + '@langchain/community': + specifier: ^1.0.4 + version: 1.0.4(@browserbasehq/sdk@2.6.0)(@browserbasehq/stagehand@1.14.0(@playwright/test@1.56.1)(deepmerge@4.3.1)(dotenv@16.6.1)(openai@6.9.1(ws@8.18.3)(zod@3.25.76))(zod@3.25.76))(@ibm-cloud/watsonx-ai@1.7.4)(@langchain/core@1.0.6(openai@6.9.1(ws@8.18.3)(zod@3.25.76)))(cheerio@1.1.2)(ibm-cloud-sdk-core@5.4.4)(jsdom@27.2.0)(jsonwebtoken@9.0.2)(lodash@4.17.21)(openai@6.9.1(ws@8.18.3)(zod@3.25.76))(pg@8.16.3)(playwright@1.56.1)(weaviate-client@3.9.0)(ws@8.18.3) + '@langchain/core': + specifier: ^1.0.6 + version: 1.0.6(openai@6.9.1(ws@8.18.3)(zod@3.25.76)) + '@langchain/groq': + specifier: ^1.0.1 + version: 1.0.1(@langchain/core@1.0.6(openai@6.9.1(ws@8.18.3)(zod@3.25.76))) + '@langchain/langgraph': + specifier: ^1.0.2 + version: 1.0.2(@langchain/core@1.0.6(openai@6.9.1(ws@8.18.3)(zod@3.25.76)))(zod-to-json-schema@3.25.0(zod@3.25.76))(zod@3.25.76) + '@langchain/ollama': + specifier: ^1.0.1 + version: 1.0.1(@langchain/core@1.0.6(openai@6.9.1(ws@8.18.3)(zod@3.25.76))) + '@langchain/openai': + specifier: ^1.1.2 + version: 1.1.2(@langchain/core@1.0.6(openai@6.9.1(ws@8.18.3)(zod@3.25.76)))(ws@8.18.3) + '@langchain/textsplitters': + specifier: ^1.0.0 + version: 1.0.0(@langchain/core@1.0.6(openai@6.9.1(ws@8.18.3)(zod@3.25.76))) + '@langchain/weaviate': + specifier: ^1.0.0 + version: 1.0.0(@langchain/core@1.0.6(openai@6.9.1(ws@8.18.3)(zod@3.25.76))) + cheerio: + specifier: ^1.0.0 + version: 1.1.2 + cors: + specifier: ^2.8.5 + version: 2.8.5 + dotenv: + specifier: ^16.4.7 + version: 16.6.1 + express: + specifier: ^4.21.2 + version: 4.21.2 + jsdom: + specifier: ^27.2.0 + version: 27.2.0 + langchain: + specifier: ^1.0.6 + version: 1.0.6(@langchain/core@1.0.6(openai@6.9.1(ws@8.18.3)(zod@3.25.76)))(openai@6.9.1(ws@8.18.3)(zod@3.25.76))(zod-to-json-schema@3.25.0(zod@3.25.76)) + langsmith: + specifier: ^0.3.81 + version: 0.3.81(openai@6.9.1(ws@8.18.3)(zod@3.25.76)) + lodash: + specifier: ^4.17.21 + version: 4.17.21 + openai: + specifier: ^6.9.1 + version: 6.9.1(ws@8.18.3)(zod@3.25.76) + pg: + specifier: ^8.16.3 + version: 8.16.3 + uuid: + specifier: ^11.0.3 + version: 11.1.0 + weaviate-client: + specifier: ^3.2.0 + version: 3.9.0 + zod: + specifier: ^3.24.1 + version: 3.25.76 + devDependencies: + '@langchain/langgraph-cli': + specifier: ^1.0.4 + version: 1.0.4(@langchain/core@1.0.6(openai@6.9.1(ws@8.18.3)(zod@3.25.76)))(@langchain/langgraph-checkpoint@1.0.0(@langchain/core@1.0.6(openai@6.9.1(ws@8.18.3)(zod@3.25.76))))(@langchain/langgraph-sdk@1.0.2(@langchain/core@1.0.6(openai@6.9.1(ws@8.18.3)(zod@3.25.76))))(@langchain/langgraph@1.0.2(@langchain/core@1.0.6(openai@6.9.1(ws@8.18.3)(zod@3.25.76)))(zod-to-json-schema@3.25.0(zod@3.25.76))(zod@3.25.76))(openai@6.9.1(ws@8.18.3)(zod@3.25.76))(typescript@5.9.3) + '@types/cors': + specifier: ^2.8.17 + version: 2.8.19 + '@types/express': + specifier: ^5.0.0 + version: 5.0.5 + '@types/jsdom': + specifier: ^27 + version: 27.0.0 + '@types/lodash': + specifier: ^4.17.21 + version: 4.17.21 + '@types/node': + specifier: ^22.0.0 + version: 22.19.1 + '@types/pg': + specifier: ^8.15.6 + version: 8.15.6 + '@types/uuid': + specifier: ^10.0.0 + version: 10.0.0 + tsx: + specifier: ^4.19.0 + version: 4.20.6 + typescript: + specifier: ^5.6.0 + version: 5.9.3 + vitest: + specifier: ^2.1.0 + version: 2.1.9(@types/node@22.19.1)(jsdom@27.2.0)(lightningcss@1.30.2) + +packages: + + '@acemir/cssom@0.9.24': + resolution: {integrity: sha512-5YjgMmAiT2rjJZU7XK1SNI7iqTy92DpaYVgG6x63FxkJ11UpYfLndHJATtinWJClAXiOlW9XWaUyAQf8pMrQPg==} + + '@alloc/quick-lru@5.2.0': + resolution: {integrity: sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw==} + engines: {node: '>=10'} + + '@anthropic-ai/sdk@0.27.3': + resolution: {integrity: sha512-IjLt0gd3L4jlOfilxVXTifn42FnVffMgDC04RJK1KDZpmkBWLv0XC92MVVmkxrFZNS/7l3xWgP/I3nqtX1sQHw==} + + '@anthropic-ai/sdk@0.69.0': + resolution: {integrity: sha512-L92d2q47BSq+7slUqHBL1d2DwloulZotYGCTDt9AYRtPmYF+iK6rnwq9JaZwPPJgk+LenbcbQ/nj6gfaDFsl9w==} + hasBin: true + peerDependencies: + zod: ^3.25.0 || ^4.0.0 + peerDependenciesMeta: + zod: + optional: true + + '@asamuzakjp/css-color@4.1.0': + resolution: {integrity: sha512-9xiBAtLn4aNsa4mDnpovJvBn72tNEIACyvlqaNJ+ADemR+yeMJWnBudOi2qGDviJa7SwcDOU/TRh5dnET7qk0w==} + + '@asamuzakjp/dom-selector@6.7.4': + resolution: {integrity: sha512-buQDjkm+wDPXd6c13534URWZqbz0RP5PAhXZ+LIoa5LgwInT9HVJvGIJivg75vi8I13CxDGdTnz+aY5YUJlIAA==} + + '@asamuzakjp/nwsapi@2.3.9': + resolution: {integrity: sha512-n8GuYSrI9bF7FFZ/SjhwevlHc8xaVlb/7HmHelnc/PZXBD2ZR49NnN9sMMuDdEGPeeRQ5d0hqlSlEpgCX3Wl0Q==} + + '@babel/code-frame@7.27.1': + resolution: {integrity: sha512-cjQ7ZlQ0Mv3b47hABuTevyTuYN4i+loJKGeV9flcCgIK37cCXRh+L1bd3iBHlynerhQ7BhCkn2BPbQUL+rGqFg==} + engines: {node: '>=6.9.0'} + + '@babel/helper-validator-identifier@7.28.5': + resolution: {integrity: sha512-qSs4ifwzKJSV39ucNjsvc6WVHs6b7S03sOh2OcHF9UHfVPqWWALUsNUVzhSBiItjRZoLHx7nIarVjqKVusUZ1Q==} + engines: {node: '>=6.9.0'} + + '@babel/runtime@7.28.4': + resolution: {integrity: sha512-Q/N6JNWvIvPnLDvjlE1OUBLPQHH6l3CltCEsHIujp45zQUSSh8K+gHnaEX45yAT1nyngnINhvWtzN+Nb9D8RAQ==} + engines: {node: '>=6.9.0'} + + '@browserbasehq/sdk@2.6.0': + resolution: {integrity: sha512-83iXP5D7xMm8Wyn66TUaUrgoByCmAJuoMoZQI3sGg3JAiMlTfnCIMqyVBoNSaItaPIkaCnrsj6LiusmXV2X9YA==} + + '@browserbasehq/stagehand@1.14.0': + resolution: {integrity: sha512-Hi/EzgMFWz+FKyepxHTrqfTPjpsuBS4zRy3e9sbMpBgLPv+9c0R+YZEvS7Bw4mTS66QtvvURRT6zgDGFotthVQ==} + peerDependencies: + '@playwright/test': ^1.42.1 + deepmerge: ^4.3.1 + dotenv: ^16.4.5 + openai: ^4.62.1 + zod: ^3.23.8 + + '@cfworker/json-schema@4.1.1': + resolution: {integrity: sha512-gAmrUZSGtKc3AiBL71iNWxDsyUC5uMaKKGdvzYsBoTW/xi42JQHl7eKV2OYzCUqvc+D2RCcf7EXY2iCyFIk6og==} + + '@clack/core@0.4.1': + resolution: {integrity: sha512-Pxhij4UXg8KSr7rPek6Zowm+5M22rbd2g1nfojHJkxp5YkFqiZ2+YLEM/XGVIzvGOcM0nqjIFxrpDwWRZYWYjA==} + + '@clack/prompts@0.9.1': + resolution: {integrity: sha512-JIpyaboYZeWYlyP0H+OoPPxd6nqueG/CmN6ixBiNFsIDHREevjIf0n0Ohh5gr5C8pEDknzgvz+pIJ8dMhzWIeg==} + + '@colors/colors@1.6.0': + resolution: {integrity: sha512-Ir+AOibqzrIsL6ajt3Rz3LskB7OiMVHqltZmspbW/TJuTVuyOMirVqAkjfY6JISiLHgyNqicAC8AyHHGzNd/dA==} + engines: {node: '>=0.1.90'} + + '@commander-js/extra-typings@13.1.0': + resolution: {integrity: sha512-q5P52BYb1hwVWE6dtID7VvuJWrlfbCv4klj7BjUUOqMz4jbSZD4C9fJ9lRjL2jnBGTg+gDDlaXN51rkWcLk4fg==} + peerDependencies: + commander: ~13.1.0 + + '@csstools/color-helpers@5.1.0': + resolution: {integrity: sha512-S11EXWJyy0Mz5SYvRmY8nJYTFFd1LCNV+7cXyAgQtOOuzb4EsgfqDufL+9esx72/eLhsRdGZwaldu/h+E4t4BA==} + engines: {node: '>=18'} + + '@csstools/css-calc@2.1.4': + resolution: {integrity: sha512-3N8oaj+0juUw/1H3YwmDDJXCgTB1gKU6Hc/bB502u9zR0q2vd786XJH9QfrKIEgFlZmhZiq6epXl4rHqhzsIgQ==} + engines: {node: '>=18'} + peerDependencies: + '@csstools/css-parser-algorithms': ^3.0.5 + '@csstools/css-tokenizer': ^3.0.4 + + '@csstools/css-color-parser@3.1.0': + resolution: {integrity: sha512-nbtKwh3a6xNVIp/VRuXV64yTKnb1IjTAEEh3irzS+HkKjAOYLTGNb9pmVNntZ8iVBHcWDA2Dof0QtPgFI1BaTA==} + engines: {node: '>=18'} + peerDependencies: + '@csstools/css-parser-algorithms': ^3.0.5 + '@csstools/css-tokenizer': ^3.0.4 + + '@csstools/css-parser-algorithms@3.0.5': + resolution: {integrity: sha512-DaDeUkXZKjdGhgYaHNJTV9pV7Y9B3b644jCLs9Upc3VeNGg6LWARAT6O+Q+/COo+2gg/bM5rhpMAtf70WqfBdQ==} + engines: {node: '>=18'} + peerDependencies: + '@csstools/css-tokenizer': ^3.0.4 + + '@csstools/css-syntax-patches-for-csstree@1.0.17': + resolution: {integrity: sha512-LCC++2h8pLUSPY+EsZmrrJ1EOUu+5iClpEiDhhdw3zRJpPbABML/N5lmRuBHjxtKm9VnRcsUzioyD0sekFMF0A==} + engines: {node: '>=18'} + + '@csstools/css-tokenizer@3.0.4': + resolution: {integrity: sha512-Vd/9EVDiu6PPJt9yAh6roZP6El1xHrdvIVGjyBsHR0RYwNHgL7FJPyIIW4fANJNG6FtyZfvlRPpFI4ZM/lubvw==} + engines: {node: '>=18'} + + '@dabh/diagnostics@2.0.8': + resolution: {integrity: sha512-R4MSXTVnuMzGD7bzHdW2ZhhdPC/igELENcq5IjEverBvq5hn1SXCWcsi6eSsdWP0/Ur+SItRRjAktmdoX/8R/Q==} + + '@esbuild/aix-ppc64@0.21.5': + resolution: {integrity: sha512-1SDgH6ZSPTlggy1yI6+Dbkiz8xzpHJEVAlF/AM1tHPLsf5STom9rwtjE4hKAF20FfXXNTFqEYXyJNWh1GiZedQ==} + engines: {node: '>=12'} + cpu: [ppc64] + os: [aix] + + '@esbuild/aix-ppc64@0.25.12': + resolution: {integrity: sha512-Hhmwd6CInZ3dwpuGTF8fJG6yoWmsToE+vYgD4nytZVxcu1ulHpUQRAB1UJ8+N1Am3Mz4+xOByoQoSZf4D+CpkA==} + engines: {node: '>=18'} + cpu: [ppc64] + os: [aix] + + '@esbuild/android-arm64@0.21.5': + resolution: {integrity: sha512-c0uX9VAUBQ7dTDCjq+wdyGLowMdtR/GoC2U5IYk/7D1H1JYC0qseD7+11iMP2mRLN9RcCMRcjC4YMclCzGwS/A==} + engines: {node: '>=12'} + cpu: [arm64] + os: [android] + + '@esbuild/android-arm64@0.25.12': + resolution: {integrity: sha512-6AAmLG7zwD1Z159jCKPvAxZd4y/VTO0VkprYy+3N2FtJ8+BQWFXU+OxARIwA46c5tdD9SsKGZ/1ocqBS/gAKHg==} + engines: {node: '>=18'} + cpu: [arm64] + os: [android] + + '@esbuild/android-arm@0.21.5': + resolution: {integrity: sha512-vCPvzSjpPHEi1siZdlvAlsPxXl7WbOVUBBAowWug4rJHb68Ox8KualB+1ocNvT5fjv6wpkX6o/iEpbDrf68zcg==} + engines: {node: '>=12'} + cpu: [arm] + os: [android] + + '@esbuild/android-arm@0.25.12': + resolution: {integrity: sha512-VJ+sKvNA/GE7Ccacc9Cha7bpS8nyzVv0jdVgwNDaR4gDMC/2TTRc33Ip8qrNYUcpkOHUT5OZ0bUcNNVZQ9RLlg==} + engines: {node: '>=18'} + cpu: [arm] + os: [android] + + '@esbuild/android-x64@0.21.5': + resolution: {integrity: sha512-D7aPRUUNHRBwHxzxRvp856rjUHRFW1SdQATKXH2hqA0kAZb1hKmi02OpYRacl0TxIGz/ZmXWlbZgjwWYaCakTA==} + engines: {node: '>=12'} + cpu: [x64] + os: [android] + + '@esbuild/android-x64@0.25.12': + resolution: {integrity: sha512-5jbb+2hhDHx5phYR2By8GTWEzn6I9UqR11Kwf22iKbNpYrsmRB18aX/9ivc5cabcUiAT/wM+YIZ6SG9QO6a8kg==} + engines: {node: '>=18'} + cpu: [x64] + os: [android] + + '@esbuild/darwin-arm64@0.21.5': + resolution: {integrity: sha512-DwqXqZyuk5AiWWf3UfLiRDJ5EDd49zg6O9wclZ7kUMv2WRFr4HKjXp/5t8JZ11QbQfUS6/cRCKGwYhtNAY88kQ==} + engines: {node: '>=12'} + cpu: [arm64] + os: [darwin] + + '@esbuild/darwin-arm64@0.25.12': + resolution: {integrity: sha512-N3zl+lxHCifgIlcMUP5016ESkeQjLj/959RxxNYIthIg+CQHInujFuXeWbWMgnTo4cp5XVHqFPmpyu9J65C1Yg==} + engines: {node: '>=18'} + cpu: [arm64] + os: [darwin] + + '@esbuild/darwin-x64@0.21.5': + resolution: {integrity: sha512-se/JjF8NlmKVG4kNIuyWMV/22ZaerB+qaSi5MdrXtd6R08kvs2qCN4C09miupktDitvh8jRFflwGFBQcxZRjbw==} + engines: {node: '>=12'} + cpu: [x64] + os: [darwin] + + '@esbuild/darwin-x64@0.25.12': + resolution: {integrity: sha512-HQ9ka4Kx21qHXwtlTUVbKJOAnmG1ipXhdWTmNXiPzPfWKpXqASVcWdnf2bnL73wgjNrFXAa3yYvBSd9pzfEIpA==} + engines: {node: '>=18'} + cpu: [x64] + os: [darwin] + + '@esbuild/freebsd-arm64@0.21.5': + resolution: {integrity: sha512-5JcRxxRDUJLX8JXp/wcBCy3pENnCgBR9bN6JsY4OmhfUtIHe3ZW0mawA7+RDAcMLrMIZaf03NlQiX9DGyB8h4g==} + engines: {node: '>=12'} + cpu: [arm64] + os: [freebsd] + + '@esbuild/freebsd-arm64@0.25.12': + resolution: {integrity: sha512-gA0Bx759+7Jve03K1S0vkOu5Lg/85dou3EseOGUes8flVOGxbhDDh/iZaoek11Y8mtyKPGF3vP8XhnkDEAmzeg==} + engines: {node: '>=18'} + cpu: [arm64] + os: [freebsd] + + '@esbuild/freebsd-x64@0.21.5': + resolution: {integrity: sha512-J95kNBj1zkbMXtHVH29bBriQygMXqoVQOQYA+ISs0/2l3T9/kj42ow2mpqerRBxDJnmkUDCaQT/dfNXWX/ZZCQ==} + engines: {node: '>=12'} + cpu: [x64] + os: [freebsd] + + '@esbuild/freebsd-x64@0.25.12': + resolution: {integrity: sha512-TGbO26Yw2xsHzxtbVFGEXBFH0FRAP7gtcPE7P5yP7wGy7cXK2oO7RyOhL5NLiqTlBh47XhmIUXuGciXEqYFfBQ==} + engines: {node: '>=18'} + cpu: [x64] + os: [freebsd] + + '@esbuild/linux-arm64@0.21.5': + resolution: {integrity: sha512-ibKvmyYzKsBeX8d8I7MH/TMfWDXBF3db4qM6sy+7re0YXya+K1cem3on9XgdT2EQGMu4hQyZhan7TeQ8XkGp4Q==} + engines: {node: '>=12'} + cpu: [arm64] + os: [linux] + + '@esbuild/linux-arm64@0.25.12': + resolution: {integrity: sha512-8bwX7a8FghIgrupcxb4aUmYDLp8pX06rGh5HqDT7bB+8Rdells6mHvrFHHW2JAOPZUbnjUpKTLg6ECyzvas2AQ==} + engines: {node: '>=18'} + cpu: [arm64] + os: [linux] + + '@esbuild/linux-arm@0.21.5': + resolution: {integrity: sha512-bPb5AHZtbeNGjCKVZ9UGqGwo8EUu4cLq68E95A53KlxAPRmUyYv2D6F0uUI65XisGOL1hBP5mTronbgo+0bFcA==} + engines: {node: '>=12'} + cpu: [arm] + os: [linux] + + '@esbuild/linux-arm@0.25.12': + resolution: {integrity: sha512-lPDGyC1JPDou8kGcywY0YILzWlhhnRjdof3UlcoqYmS9El818LLfJJc3PXXgZHrHCAKs/Z2SeZtDJr5MrkxtOw==} + engines: {node: '>=18'} + cpu: [arm] + os: [linux] + + '@esbuild/linux-ia32@0.21.5': + resolution: {integrity: sha512-YvjXDqLRqPDl2dvRODYmmhz4rPeVKYvppfGYKSNGdyZkA01046pLWyRKKI3ax8fbJoK5QbxblURkwK/MWY18Tg==} + engines: {node: '>=12'} + cpu: [ia32] + os: [linux] + + '@esbuild/linux-ia32@0.25.12': + resolution: {integrity: sha512-0y9KrdVnbMM2/vG8KfU0byhUN+EFCny9+8g202gYqSSVMonbsCfLjUO+rCci7pM0WBEtz+oK/PIwHkzxkyharA==} + engines: {node: '>=18'} + cpu: [ia32] + os: [linux] + + '@esbuild/linux-loong64@0.21.5': + resolution: {integrity: sha512-uHf1BmMG8qEvzdrzAqg2SIG/02+4/DHB6a9Kbya0XDvwDEKCoC8ZRWI5JJvNdUjtciBGFQ5PuBlpEOXQj+JQSg==} + engines: {node: '>=12'} + cpu: [loong64] + os: [linux] + + '@esbuild/linux-loong64@0.25.12': + resolution: {integrity: sha512-h///Lr5a9rib/v1GGqXVGzjL4TMvVTv+s1DPoxQdz7l/AYv6LDSxdIwzxkrPW438oUXiDtwM10o9PmwS/6Z0Ng==} + engines: {node: '>=18'} + cpu: [loong64] + os: [linux] + + '@esbuild/linux-mips64el@0.21.5': + resolution: {integrity: sha512-IajOmO+KJK23bj52dFSNCMsz1QP1DqM6cwLUv3W1QwyxkyIWecfafnI555fvSGqEKwjMXVLokcV5ygHW5b3Jbg==} + engines: {node: '>=12'} + cpu: [mips64el] + os: [linux] + + '@esbuild/linux-mips64el@0.25.12': + resolution: {integrity: sha512-iyRrM1Pzy9GFMDLsXn1iHUm18nhKnNMWscjmp4+hpafcZjrr2WbT//d20xaGljXDBYHqRcl8HnxbX6uaA/eGVw==} + engines: {node: '>=18'} + cpu: [mips64el] + os: [linux] + + '@esbuild/linux-ppc64@0.21.5': + resolution: {integrity: sha512-1hHV/Z4OEfMwpLO8rp7CvlhBDnjsC3CttJXIhBi+5Aj5r+MBvy4egg7wCbe//hSsT+RvDAG7s81tAvpL2XAE4w==} + engines: {node: '>=12'} + cpu: [ppc64] + os: [linux] + + '@esbuild/linux-ppc64@0.25.12': + resolution: {integrity: sha512-9meM/lRXxMi5PSUqEXRCtVjEZBGwB7P/D4yT8UG/mwIdze2aV4Vo6U5gD3+RsoHXKkHCfSxZKzmDssVlRj1QQA==} + engines: {node: '>=18'} + cpu: [ppc64] + os: [linux] + + '@esbuild/linux-riscv64@0.21.5': + resolution: {integrity: sha512-2HdXDMd9GMgTGrPWnJzP2ALSokE/0O5HhTUvWIbD3YdjME8JwvSCnNGBnTThKGEB91OZhzrJ4qIIxk/SBmyDDA==} + engines: {node: '>=12'} + cpu: [riscv64] + os: [linux] + + '@esbuild/linux-riscv64@0.25.12': + resolution: {integrity: sha512-Zr7KR4hgKUpWAwb1f3o5ygT04MzqVrGEGXGLnj15YQDJErYu/BGg+wmFlIDOdJp0PmB0lLvxFIOXZgFRrdjR0w==} + engines: {node: '>=18'} + cpu: [riscv64] + os: [linux] + + '@esbuild/linux-s390x@0.21.5': + resolution: {integrity: sha512-zus5sxzqBJD3eXxwvjN1yQkRepANgxE9lgOW2qLnmr8ikMTphkjgXu1HR01K4FJg8h1kEEDAqDcZQtbrRnB41A==} + engines: {node: '>=12'} + cpu: [s390x] + os: [linux] + + '@esbuild/linux-s390x@0.25.12': + resolution: {integrity: sha512-MsKncOcgTNvdtiISc/jZs/Zf8d0cl/t3gYWX8J9ubBnVOwlk65UIEEvgBORTiljloIWnBzLs4qhzPkJcitIzIg==} + engines: {node: '>=18'} + cpu: [s390x] + os: [linux] + + '@esbuild/linux-x64@0.21.5': + resolution: {integrity: sha512-1rYdTpyv03iycF1+BhzrzQJCdOuAOtaqHTWJZCWvijKD2N5Xu0TtVC8/+1faWqcP9iBCWOmjmhoH94dH82BxPQ==} + engines: {node: '>=12'} + cpu: [x64] + os: [linux] + + '@esbuild/linux-x64@0.25.12': + resolution: {integrity: sha512-uqZMTLr/zR/ed4jIGnwSLkaHmPjOjJvnm6TVVitAa08SLS9Z0VM8wIRx7gWbJB5/J54YuIMInDquWyYvQLZkgw==} + engines: {node: '>=18'} + cpu: [x64] + os: [linux] + + '@esbuild/netbsd-arm64@0.25.12': + resolution: {integrity: sha512-xXwcTq4GhRM7J9A8Gv5boanHhRa/Q9KLVmcyXHCTaM4wKfIpWkdXiMog/KsnxzJ0A1+nD+zoecuzqPmCRyBGjg==} + engines: {node: '>=18'} + cpu: [arm64] + os: [netbsd] + + '@esbuild/netbsd-x64@0.21.5': + resolution: {integrity: sha512-Woi2MXzXjMULccIwMnLciyZH4nCIMpWQAs049KEeMvOcNADVxo0UBIQPfSmxB3CWKedngg7sWZdLvLczpe0tLg==} + engines: {node: '>=12'} + cpu: [x64] + os: [netbsd] + + '@esbuild/netbsd-x64@0.25.12': + resolution: {integrity: sha512-Ld5pTlzPy3YwGec4OuHh1aCVCRvOXdH8DgRjfDy/oumVovmuSzWfnSJg+VtakB9Cm0gxNO9BzWkj6mtO1FMXkQ==} + engines: {node: '>=18'} + cpu: [x64] + os: [netbsd] + + '@esbuild/openbsd-arm64@0.25.12': + resolution: {integrity: sha512-fF96T6KsBo/pkQI950FARU9apGNTSlZGsv1jZBAlcLL1MLjLNIWPBkj5NlSz8aAzYKg+eNqknrUJ24QBybeR5A==} + engines: {node: '>=18'} + cpu: [arm64] + os: [openbsd] + + '@esbuild/openbsd-x64@0.21.5': + resolution: {integrity: sha512-HLNNw99xsvx12lFBUwoT8EVCsSvRNDVxNpjZ7bPn947b8gJPzeHWyNVhFsaerc0n3TsbOINvRP2byTZ5LKezow==} + engines: {node: '>=12'} + cpu: [x64] + os: [openbsd] + + '@esbuild/openbsd-x64@0.25.12': + resolution: {integrity: sha512-MZyXUkZHjQxUvzK7rN8DJ3SRmrVrke8ZyRusHlP+kuwqTcfWLyqMOE3sScPPyeIXN/mDJIfGXvcMqCgYKekoQw==} + engines: {node: '>=18'} + cpu: [x64] + os: [openbsd] + + '@esbuild/openharmony-arm64@0.25.12': + resolution: {integrity: sha512-rm0YWsqUSRrjncSXGA7Zv78Nbnw4XL6/dzr20cyrQf7ZmRcsovpcRBdhD43Nuk3y7XIoW2OxMVvwuRvk9XdASg==} + engines: {node: '>=18'} + cpu: [arm64] + os: [openharmony] + + '@esbuild/sunos-x64@0.21.5': + resolution: {integrity: sha512-6+gjmFpfy0BHU5Tpptkuh8+uw3mnrvgs+dSPQXQOv3ekbordwnzTVEb4qnIvQcYXq6gzkyTnoZ9dZG+D4garKg==} + engines: {node: '>=12'} + cpu: [x64] + os: [sunos] + + '@esbuild/sunos-x64@0.25.12': + resolution: {integrity: sha512-3wGSCDyuTHQUzt0nV7bocDy72r2lI33QL3gkDNGkod22EsYl04sMf0qLb8luNKTOmgF/eDEDP5BFNwoBKH441w==} + engines: {node: '>=18'} + cpu: [x64] + os: [sunos] + + '@esbuild/win32-arm64@0.21.5': + resolution: {integrity: sha512-Z0gOTd75VvXqyq7nsl93zwahcTROgqvuAcYDUr+vOv8uHhNSKROyU961kgtCD1e95IqPKSQKH7tBTslnS3tA8A==} + engines: {node: '>=12'} + cpu: [arm64] + os: [win32] + + '@esbuild/win32-arm64@0.25.12': + resolution: {integrity: sha512-rMmLrur64A7+DKlnSuwqUdRKyd3UE7oPJZmnljqEptesKM8wx9J8gx5u0+9Pq0fQQW8vqeKebwNXdfOyP+8Bsg==} + engines: {node: '>=18'} + cpu: [arm64] + os: [win32] + + '@esbuild/win32-ia32@0.21.5': + resolution: {integrity: sha512-SWXFF1CL2RVNMaVs+BBClwtfZSvDgtL//G/smwAc5oVK/UPu2Gu9tIaRgFmYFFKrmg3SyAjSrElf0TiJ1v8fYA==} + engines: {node: '>=12'} + cpu: [ia32] + os: [win32] + + '@esbuild/win32-ia32@0.25.12': + resolution: {integrity: sha512-HkqnmmBoCbCwxUKKNPBixiWDGCpQGVsrQfJoVGYLPT41XWF8lHuE5N6WhVia2n4o5QK5M4tYr21827fNhi4byQ==} + engines: {node: '>=18'} + cpu: [ia32] + os: [win32] + + '@esbuild/win32-x64@0.21.5': + resolution: {integrity: sha512-tQd/1efJuzPC6rCFwEvLtci/xNFcTZknmXs98FYDfGE4wP9ClFV98nyKrzJKVPMhdDnjzLhdUyMX4PsQAPjwIw==} + engines: {node: '>=12'} + cpu: [x64] + os: [win32] + + '@esbuild/win32-x64@0.25.12': + resolution: {integrity: sha512-alJC0uCZpTFrSL0CCDjcgleBXPnCrEAhTBILpeAp7M/OFgoqtAetfBzX0xM00MUsVVPpVjlPuMbREqnZCXaTnA==} + engines: {node: '>=18'} + cpu: [x64] + os: [win32] + + '@graphql-typed-document-node/core@3.2.0': + resolution: {integrity: sha512-mB9oAsNCm9aM3/SOv4YtBMqZbYj10R7dkq8byBqxGY/ncFwhf2oQzMV+LCRlWoDSEBJ3COiR1yeDvMtsoOsuFQ==} + peerDependencies: + graphql: ^0.8.0 || ^0.9.0 || ^0.10.0 || ^0.11.0 || ^0.12.0 || ^0.13.0 || ^14.0.0 || ^15.0.0 || ^16.0.0 || ^17.0.0 + + '@grpc/grpc-js@1.14.1': + resolution: {integrity: sha512-sPxgEWtPUR3EnRJCEtbGZG2iX8LQDUls2wUS3o27jg07KqJFMq6YDeWvMo1wfpmy3rqRdS0rivpLwhqQtEyCuQ==} + engines: {node: '>=12.10.0'} + + '@grpc/proto-loader@0.8.0': + resolution: {integrity: sha512-rc1hOQtjIWGxcxpb9aHAfLpIctjEnsDehj0DAiVfBlmT84uvR0uUtN2hEi/ecvWVjXUGf5qPF4qEgiLOx1YIMQ==} + engines: {node: '>=6'} + hasBin: true + + '@hono/node-server@1.19.6': + resolution: {integrity: sha512-Shz/KjlIeAhfiuE93NDKVdZ7HdBVLQAfdbaXEaoAVO3ic9ibRSLGIQGkcBbFyuLr+7/1D5ZCINM8B+6IvXeMtw==} + engines: {node: '>=18.14.1'} + peerDependencies: + hono: ^4 + + '@hono/zod-validator@0.2.2': + resolution: {integrity: sha512-dSDxaPV70Py8wuIU2QNpoVEIOSzSXZ/6/B/h4xA7eOMz7+AarKTSGV8E6QwrdcCbBLkpqfJ4Q2TmBO0eP1tCBQ==} + peerDependencies: + hono: '>=3.9.0' + zod: ^3.19.1 + + '@ibm-cloud/watsonx-ai@1.7.4': + resolution: {integrity: sha512-5cVSUToeZBDEG6q3OfjuYO3yTOjI8dMsi3jgp1PGZ83hBbMeNrTA+hjT6gXPlxpTIgQeov3rSie5w7B3qzFOgg==} + engines: {node: '>=18.0.0'} + + '@isaacs/fs-minipass@4.0.1': + resolution: {integrity: sha512-wgm9Ehl2jpeqP3zw/7mo3kRHFp5MEDhqAdwy1fTGkHAwnkGOVsgpvQhL8B5n1qlb01jV3n/bI0ZfZp5lWA1k4w==} + engines: {node: '>=18.0.0'} + + '@jridgewell/gen-mapping@0.3.13': + resolution: {integrity: sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==} + + '@jridgewell/remapping@2.3.5': + resolution: {integrity: sha512-LI9u/+laYG4Ds1TDKSJW2YPrIlcVYOwi2fUC6xB43lueCjgxV4lffOCZCtYFiH6TNOX+tQKXx97T4IKHbhyHEQ==} + + '@jridgewell/resolve-uri@3.1.2': + resolution: {integrity: sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==} + engines: {node: '>=6.0.0'} + + '@jridgewell/sourcemap-codec@1.5.5': + resolution: {integrity: sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==} + + '@jridgewell/trace-mapping@0.3.31': + resolution: {integrity: sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==} + + '@js-sdsl/ordered-map@4.4.2': + resolution: {integrity: sha512-iUKgm52T8HOE/makSxjqoWhe95ZJA1/G1sYsGev2JDKUSS14KAgg1LHb+Ba+IPow0xflbnSkOsZcO08C7w1gYw==} + + '@langchain/anthropic@1.1.1': + resolution: {integrity: sha512-TCjeZPQSKWAGcvyGAYg3EDrfumKTdJ0Z7NiW8eC7+SFfVhdH92ynfvt8CYk5PqpsuIcdzJf1jOzZK6WrWagTYw==} + engines: {node: '>=20'} + peerDependencies: + '@langchain/core': ^1.0.0 + + '@langchain/classic@1.0.4': + resolution: {integrity: sha512-Jxu75fsOPcwgDTPnnn8+XT5oCl9NxXUHs0zSw//WdovfHvnDCYp+gqAVE5JxigCroNmK0lqDsHqZFoG/gczTKg==} + engines: {node: '>=20'} + peerDependencies: + '@langchain/core': ^1.0.0 + cheerio: '*' + peggy: ^3.0.2 + typeorm: '*' + peerDependenciesMeta: + cheerio: + optional: true + peggy: + optional: true + typeorm: + optional: true + + '@langchain/community@1.0.4': + resolution: {integrity: sha512-T3wOexFYS6PjhnNA67u97adDWYQETxkqu+kmR3EBhKAkH4ma+s2iMI6HbbNicgHORBlT3lT3nwi5G6MiszraZA==} + engines: {node: '>=20'} + peerDependencies: + '@arcjet/redact': ^v1.0.0-alpha.23 + '@aws-crypto/sha256-js': ^5.0.0 + '@aws-sdk/client-dynamodb': ^3.749.0 + '@aws-sdk/client-lambda': ^3.749.0 + '@aws-sdk/client-s3': ^3.749.0 + '@aws-sdk/client-sagemaker-runtime': ^3.749.0 + '@aws-sdk/client-sfn': ^3.749.0 + '@aws-sdk/credential-provider-node': ^3.388.0 + '@aws-sdk/dsql-signer': '*' + '@azure/search-documents': ^12.0.0 + '@azure/storage-blob': ^12.15.0 + '@browserbasehq/sdk': '*' + '@browserbasehq/stagehand': ^1.0.0 + '@clickhouse/client': ^0.2.5 + '@datastax/astra-db-ts': ^1.0.0 + '@elastic/elasticsearch': ^8.4.0 + '@getmetal/metal-sdk': '*' + '@getzep/zep-cloud': ^1.0.6 + '@getzep/zep-js': ^0.9.0 + '@gomomento/sdk-core': ^1.51.1 + '@google-cloud/storage': ^6.10.1 || ^7.7.0 + '@gradientai/nodejs-sdk': ^1.2.0 + '@huggingface/inference': ^4.0.5 + '@huggingface/transformers': ^3.5.2 + '@ibm-cloud/watsonx-ai': '*' + '@lancedb/lancedb': ^0.19.1 + '@langchain/core': ^1.0.0 + '@layerup/layerup-security': ^1.5.12 + '@libsql/client': ^0.14.0 + '@mendable/firecrawl-js': ^1.4.3 + '@mlc-ai/web-llm': '*' + '@mozilla/readability': '*' + '@neondatabase/serverless': '*' + '@notionhq/client': ^2.2.10 + '@opensearch-project/opensearch': '*' + '@pinecone-database/pinecone': '*' + '@planetscale/database': ^1.8.0 + '@premai/prem-sdk': ^0.3.25 + '@qdrant/js-client-rest': '*' + '@raycast/api': ^1.55.2 + '@rockset/client': ^0.9.1 + '@smithy/eventstream-codec': ^2.0.5 + '@smithy/protocol-http': ^3.0.6 + '@smithy/signature-v4': ^2.0.10 + '@smithy/util-utf8': ^2.0.0 + '@spider-cloud/spider-client': ^0.0.21 + '@supabase/supabase-js': ^2.45.0 + '@tensorflow-models/universal-sentence-encoder': '*' + '@tensorflow/tfjs-core': '*' + '@upstash/ratelimit': ^1.1.3 || ^2.0.3 + '@upstash/redis': ^1.20.6 + '@upstash/vector': ^1.1.1 + '@vercel/kv': '*' + '@vercel/postgres': '*' + '@writerai/writer-sdk': ^0.40.2 + '@xata.io/client': ^0.28.0 + '@xenova/transformers': '*' + '@zilliz/milvus2-sdk-node': '>=2.3.5' + apify-client: ^2.7.1 + assemblyai: ^4.6.0 + azion: ^1.11.1 + better-sqlite3: '>=9.4.0 <12.0.0' + cassandra-driver: ^4.7.2 + cborg: ^4.1.1 + cheerio: ^1.0.0-rc.12 + chromadb: '*' + closevector-common: 0.1.3 + closevector-node: 0.1.6 + closevector-web: 0.1.6 + cohere-ai: '*' + convex: ^1.3.1 + crypto-js: ^4.2.0 + d3-dsv: ^2.0.0 + discord.js: ^14.14.1 + duck-duck-scrape: ^2.2.5 + epub2: ^3.0.1 + faiss-node: '*' + fast-xml-parser: '*' + firebase-admin: ^11.9.0 || ^12.0.0 || ^13.0.0 + google-auth-library: '*' + googleapis: '*' + hnswlib-node: ^3.0.0 + html-to-text: ^9.0.5 + ibm-cloud-sdk-core: '*' + ignore: ^5.2.0 + interface-datastore: ^8.2.11 + ioredis: ^5.3.2 + it-all: ^3.0.4 + jsdom: '*' + jsonwebtoken: ^9.0.2 + lodash: ^4.17.21 + lunary: ^0.7.10 + mammoth: ^1.11.0 + mariadb: ^3.4.0 + mem0ai: ^2.1.8 + mongodb: '*' + mysql2: ^3.9.8 + neo4j-driver: '*' + node-llama-cpp: '>=3.0.0' + notion-to-md: ^3.1.0 + officeparser: ^4.0.4 + openai: '*' + pdf-parse: 1.1.1 + pg: ^8.11.0 + pg-copy-streams: ^6.0.5 + pickleparser: ^0.2.1 + playwright: ^1.32.1 + portkey-ai: ^0.1.11 + puppeteer: '*' + pyodide: '>=0.24.1 <0.27.0' + redis: '*' + replicate: '*' + sonix-speech-recognition: ^2.1.1 + srt-parser-2: ^1.2.3 + typeorm: ^0.3.26 + typesense: ^1.5.3 + usearch: ^1.1.1 + voy-search: 0.6.2 + weaviate-client: '*' + word-extractor: '*' + ws: ^8.14.2 + youtubei.js: '*' + peerDependenciesMeta: + '@arcjet/redact': + optional: true + '@aws-crypto/sha256-js': + optional: true + '@aws-sdk/client-dynamodb': + optional: true + '@aws-sdk/client-lambda': + optional: true + '@aws-sdk/client-s3': + optional: true + '@aws-sdk/client-sagemaker-runtime': + optional: true + '@aws-sdk/client-sfn': + optional: true + '@aws-sdk/credential-provider-node': + optional: true + '@aws-sdk/dsql-signer': + optional: true + '@azure/search-documents': + optional: true + '@azure/storage-blob': + optional: true + '@browserbasehq/sdk': + optional: true + '@clickhouse/client': + optional: true + '@datastax/astra-db-ts': + optional: true + '@elastic/elasticsearch': + optional: true + '@getmetal/metal-sdk': + optional: true + '@getzep/zep-cloud': + optional: true + '@getzep/zep-js': + optional: true + '@gomomento/sdk-core': + optional: true + '@google-cloud/storage': + optional: true + '@gradientai/nodejs-sdk': + optional: true + '@huggingface/inference': + optional: true + '@huggingface/transformers': + optional: true + '@lancedb/lancedb': + optional: true + '@layerup/layerup-security': + optional: true + '@libsql/client': + optional: true + '@mendable/firecrawl-js': + optional: true + '@mlc-ai/web-llm': + optional: true + '@mozilla/readability': + optional: true + '@neondatabase/serverless': + optional: true + '@notionhq/client': + optional: true + '@opensearch-project/opensearch': + optional: true + '@pinecone-database/pinecone': + optional: true + '@planetscale/database': + optional: true + '@premai/prem-sdk': + optional: true + '@qdrant/js-client-rest': + optional: true + '@raycast/api': + optional: true + '@rockset/client': + optional: true + '@smithy/eventstream-codec': + optional: true + '@smithy/protocol-http': + optional: true + '@smithy/signature-v4': + optional: true + '@smithy/util-utf8': + optional: true + '@spider-cloud/spider-client': + optional: true + '@supabase/supabase-js': + optional: true + '@tensorflow-models/universal-sentence-encoder': + optional: true + '@tensorflow/tfjs-core': + optional: true + '@upstash/ratelimit': + optional: true + '@upstash/redis': + optional: true + '@upstash/vector': + optional: true + '@vercel/kv': + optional: true + '@vercel/postgres': + optional: true + '@writerai/writer-sdk': + optional: true + '@xata.io/client': + optional: true + '@xenova/transformers': + optional: true + '@zilliz/milvus2-sdk-node': + optional: true + apify-client: + optional: true + assemblyai: + optional: true + azion: + optional: true + better-sqlite3: + optional: true + cassandra-driver: + optional: true + cborg: + optional: true + cheerio: + optional: true + chromadb: + optional: true + closevector-common: + optional: true + closevector-node: + optional: true + closevector-web: + optional: true + cohere-ai: + optional: true + convex: + optional: true + crypto-js: + optional: true + d3-dsv: + optional: true + discord.js: + optional: true + duck-duck-scrape: + optional: true + epub2: + optional: true + faiss-node: + optional: true + fast-xml-parser: + optional: true + firebase-admin: + optional: true + google-auth-library: + optional: true + googleapis: + optional: true + hnswlib-node: + optional: true + html-to-text: + optional: true + ignore: + optional: true + interface-datastore: + optional: true + ioredis: + optional: true + it-all: + optional: true + jsdom: + optional: true + jsonwebtoken: + optional: true + lodash: + optional: true + lunary: + optional: true + mammoth: + optional: true + mariadb: + optional: true + mem0ai: + optional: true + mongodb: + optional: true + mysql2: + optional: true + neo4j-driver: + optional: true + node-llama-cpp: + optional: true + notion-to-md: + optional: true + officeparser: + optional: true + pdf-parse: + optional: true + pg: + optional: true + pg-copy-streams: + optional: true + pickleparser: + optional: true + playwright: + optional: true + portkey-ai: + optional: true + puppeteer: + optional: true + pyodide: + optional: true + redis: + optional: true + replicate: + optional: true + sonix-speech-recognition: + optional: true + srt-parser-2: + optional: true + typeorm: + optional: true + typesense: + optional: true + usearch: + optional: true + voy-search: + optional: true + weaviate-client: + optional: true + word-extractor: + optional: true + ws: + optional: true + youtubei.js: + optional: true + + '@langchain/core@1.0.6': + resolution: {integrity: sha512-rDSjXATujCdJlL+OJFfyZhEca8kLmqGr4W2ebJvSHiUgXEDqu/IOWC+ZWgoKKHkGOGFdVTqQ7Qi0j2RnYS9Qlg==} + engines: {node: '>=20'} + + '@langchain/groq@1.0.1': + resolution: {integrity: sha512-vDQzv6A3mjG0/W/7vL4Iq+dnmhSbMHln+b7Rna810trjZzfNPZhAP6omqZyzCKIqjsQYUH4ODLnSUCNiarfYsQ==} + engines: {node: '>=20'} + peerDependencies: + '@langchain/core': ^1.0.0 + + '@langchain/langgraph-api@1.0.4': + resolution: {integrity: sha512-5oJZSerKCybGtdyKe6+5S/nUl1saqGRn4+qLcIM+cEaBzHHBIxSUOlXpGTJeqB1JlZJ8Wmp8jhoMZejoqsRaZA==} + engines: {node: ^18.19.0 || >=20.16.0} + peerDependencies: + '@langchain/core': ^0.3.59 || ^1.0.1 + '@langchain/langgraph': ^0.2.57 || ^0.3.0 || ^0.4.0 || ^1.0.0-alpha || ^1.0.0 + '@langchain/langgraph-checkpoint': ~0.0.16 || ^0.1.0 || ~1.0.0 + '@langchain/langgraph-sdk': ~0.0.16 || ^0.1.0 || ~1.0.0 + typescript: ^5.5.4 + peerDependenciesMeta: + '@langchain/langgraph-sdk': + optional: true + + '@langchain/langgraph-checkpoint@1.0.0': + resolution: {integrity: sha512-xrclBGvNCXDmi0Nz28t3vjpxSH6UYx6w5XAXSiiB1WEdc2xD2iY/a913I3x3a31XpInUW/GGfXXfePfaghV54A==} + engines: {node: '>=18'} + peerDependencies: + '@langchain/core': ^1.0.1 + + '@langchain/langgraph-cli@1.0.4': + resolution: {integrity: sha512-KBnopC5RauH07amspVZDEP438lilgU1Dkk5b8JLOz/SbJsG9lD5CHNRCWTMzncdxBgeqvih/40qPchIYywXjHg==} + engines: {node: ^18.19.0 || >=20.16.0} + hasBin: true + + '@langchain/langgraph-sdk@1.0.2': + resolution: {integrity: sha512-r3noE2KouUdfRCmHxZcS06Io8I3jplEXA+ORpYECa89VAdHWaknWRJMIBBwVJkQQJ9fvNBmOmyfTcbRkeTTakw==} + peerDependencies: + '@langchain/core': ^1.0.1 + react: ^18 || ^19 + react-dom: ^18 || ^19 + peerDependenciesMeta: + '@langchain/core': + optional: true + react: + optional: true + react-dom: + optional: true + + '@langchain/langgraph-ui@1.0.4': + resolution: {integrity: sha512-ieX5Rgd7g/sn2mHan+L5EPTfBNIFgCOz1BjlVeoRhbCFw4Hv0cA8XGsOf24kv9i/wTmYdiei3+G0hjnflMOHLg==} + engines: {node: ^18.19.0 || >=20.16.0} + hasBin: true + + '@langchain/langgraph@1.0.2': + resolution: {integrity: sha512-syxzzWTnmpCL+RhUEvalUeOXFoZy/KkzHa2Da2gKf18zsf9Dkbh3rfnRDrTyUGS1XSTejq07s4rg1qntdEDs2A==} + engines: {node: '>=18'} + peerDependencies: + '@langchain/core': ^1.0.1 + zod: ^3.25.32 || ^4.1.0 + zod-to-json-schema: ^3.x + peerDependenciesMeta: + zod-to-json-schema: + optional: true + + '@langchain/ollama@1.0.1': + resolution: {integrity: sha512-Pe32hhTpMvnRlNFJxkdu6r1QzsONGz5uvoLiMU1TpgAUu7EyKr2osymlgjBLqDe2vMKUmqHb+yWRH0IppDBUOg==} + engines: {node: '>=20'} + peerDependencies: + '@langchain/core': ^1.0.0 + + '@langchain/openai@1.1.2': + resolution: {integrity: sha512-o642toyaRfx7Cej10jK6eK561gkIGTCQrN42fqAU9OhmTBkUflmRNKhqbcHj/RU+NOJfFM//hgwNU2gHespEkw==} + engines: {node: '>=20'} + peerDependencies: + '@langchain/core': ^1.0.0 + + '@langchain/textsplitters@1.0.0': + resolution: {integrity: sha512-L1gOwOJXeM+6MKzrj9shSsDyH32j898jgqvVArOjdge2zLyY+Mv4aOuyAAxbPyaFdQXlxKfa9xjqIUyv8TzrqA==} + engines: {node: '>=20'} + peerDependencies: + '@langchain/core': ^1.0.0 + + '@langchain/weaviate@1.0.0': + resolution: {integrity: sha512-aVZZs5peS+QAacwfGeOjJivnwrnjIgo0Q96GqVZMJAMQARgy0Vm8F6l1QKKpKTLXq5MF5cV2INwo76FJ88Ap/A==} + engines: {node: '>=20'} + peerDependencies: + '@langchain/core': ^1.0.0 + + '@playwright/test@1.56.1': + resolution: {integrity: sha512-vSMYtL/zOcFpvJCW71Q/OEGQb7KYBPAdKh35WNSkaZA75JlAO8ED8UN6GUNTm3drWomcbcqRPFqQbLae8yBTdg==} + engines: {node: '>=18'} + hasBin: true + + '@protobufjs/aspromise@1.1.2': + resolution: {integrity: sha512-j+gKExEuLmKwvz3OgROXtrJ2UG2x8Ch2YZUxahh+s1F2HZ+wAceUNLkvy6zKCPVRkU++ZWQrdxsUeQXmcg4uoQ==} + + '@protobufjs/base64@1.1.2': + resolution: {integrity: sha512-AZkcAA5vnN/v4PDqKyMR5lx7hZttPDgClv83E//FMNhR2TMcLUhfRUBHCmSl0oi9zMgDDqRUJkSxO3wm85+XLg==} + + '@protobufjs/codegen@2.0.4': + resolution: {integrity: sha512-YyFaikqM5sH0ziFZCN3xDC7zeGaB/d0IUb9CATugHWbd1FRFwWwt4ld4OYMPWu5a3Xe01mGAULCdqhMlPl29Jg==} + + '@protobufjs/eventemitter@1.1.0': + resolution: {integrity: sha512-j9ednRT81vYJ9OfVuXG6ERSTdEL1xVsNgqpkxMsbIabzSo3goCjDIveeGv5d03om39ML71RdmrGNjG5SReBP/Q==} + + '@protobufjs/fetch@1.1.0': + resolution: {integrity: sha512-lljVXpqXebpsijW71PZaCYeIcE5on1w5DlQy5WH6GLbFryLUrBD4932W/E2BSpfRJWseIL4v/KPgBFxDOIdKpQ==} + + '@protobufjs/float@1.0.2': + resolution: {integrity: sha512-Ddb+kVXlXst9d+R9PfTIxh1EdNkgoRe5tOX6t01f1lYWOvJnSPDBlG241QLzcyPdoNTsblLUdujGSE4RzrTZGQ==} + + '@protobufjs/inquire@1.1.0': + resolution: {integrity: sha512-kdSefcPdruJiFMVSbn801t4vFK7KB/5gd2fYvrxhuJYg8ILrmn9SKSX2tZdV6V+ksulWqS7aXjBcRXl3wHoD9Q==} + + '@protobufjs/path@1.1.2': + resolution: {integrity: sha512-6JOcJ5Tm08dOHAbdR3GrvP+yUUfkjG5ePsHYczMFLq3ZmMkAD98cDgcT2iA1lJ9NVwFd4tH/iSSoe44YWkltEA==} + + '@protobufjs/pool@1.1.0': + resolution: {integrity: sha512-0kELaGSIDBKvcgS4zkjz1PeddatrjYcmMWOlAuAPwAeccUrPHdUqo/J6LiymHHEiJT5NrF1UVwxY14f+fy4WQw==} + + '@protobufjs/utf8@1.1.0': + resolution: {integrity: sha512-Vvn3zZrhQZkkBE8LSuW3em98c0FwgO4nxzv6OdSxPKJIEKY2bGbHn+mhGIPerzI4twdxaP8/0+06HBpwf345Lw==} + + '@rollup/rollup-android-arm-eabi@4.53.3': + resolution: {integrity: sha512-mRSi+4cBjrRLoaal2PnqH82Wqyb+d3HsPUN/W+WslCXsZsyHa9ZeQQX/pQsZaVIWDkPcpV6jJ+3KLbTbgnwv8w==} + cpu: [arm] + os: [android] + + '@rollup/rollup-android-arm64@4.53.3': + resolution: {integrity: sha512-CbDGaMpdE9sh7sCmTrTUyllhrg65t6SwhjlMJsLr+J8YjFuPmCEjbBSx4Z/e4SmDyH3aB5hGaJUP2ltV/vcs4w==} + cpu: [arm64] + os: [android] + + '@rollup/rollup-darwin-arm64@4.53.3': + resolution: {integrity: sha512-Nr7SlQeqIBpOV6BHHGZgYBuSdanCXuw09hon14MGOLGmXAFYjx1wNvquVPmpZnl0tLjg25dEdr4IQ6GgyToCUA==} + cpu: [arm64] + os: [darwin] + + '@rollup/rollup-darwin-x64@4.53.3': + resolution: {integrity: sha512-DZ8N4CSNfl965CmPktJ8oBnfYr3F8dTTNBQkRlffnUarJ2ohudQD17sZBa097J8xhQ26AwhHJ5mvUyQW8ddTsQ==} + cpu: [x64] + os: [darwin] + + '@rollup/rollup-freebsd-arm64@4.53.3': + resolution: {integrity: sha512-yMTrCrK92aGyi7GuDNtGn2sNW+Gdb4vErx4t3Gv/Tr+1zRb8ax4z8GWVRfr3Jw8zJWvpGHNpss3vVlbF58DZ4w==} + cpu: [arm64] + os: [freebsd] + + '@rollup/rollup-freebsd-x64@4.53.3': + resolution: {integrity: sha512-lMfF8X7QhdQzseM6XaX0vbno2m3hlyZFhwcndRMw8fbAGUGL3WFMBdK0hbUBIUYcEcMhVLr1SIamDeuLBnXS+Q==} + cpu: [x64] + os: [freebsd] + + '@rollup/rollup-linux-arm-gnueabihf@4.53.3': + resolution: {integrity: sha512-k9oD15soC/Ln6d2Wv/JOFPzZXIAIFLp6B+i14KhxAfnq76ajt0EhYc5YPeX6W1xJkAdItcVT+JhKl1QZh44/qw==} + cpu: [arm] + os: [linux] + + '@rollup/rollup-linux-arm-musleabihf@4.53.3': + resolution: {integrity: sha512-vTNlKq+N6CK/8UktsrFuc+/7NlEYVxgaEgRXVUVK258Z5ymho29skzW1sutgYjqNnquGwVUObAaxae8rZ6YMhg==} + cpu: [arm] + os: [linux] + + '@rollup/rollup-linux-arm64-gnu@4.53.3': + resolution: {integrity: sha512-RGrFLWgMhSxRs/EWJMIFM1O5Mzuz3Xy3/mnxJp/5cVhZ2XoCAxJnmNsEyeMJtpK+wu0FJFWz+QF4mjCA7AUQ3w==} + cpu: [arm64] + os: [linux] + + '@rollup/rollup-linux-arm64-musl@4.53.3': + resolution: {integrity: sha512-kASyvfBEWYPEwe0Qv4nfu6pNkITLTb32p4yTgzFCocHnJLAHs+9LjUu9ONIhvfT/5lv4YS5muBHyuV84epBo/A==} + cpu: [arm64] + os: [linux] + + '@rollup/rollup-linux-loong64-gnu@4.53.3': + resolution: {integrity: sha512-JiuKcp2teLJwQ7vkJ95EwESWkNRFJD7TQgYmCnrPtlu50b4XvT5MOmurWNrCj3IFdyjBQ5p9vnrX4JM6I8OE7g==} + cpu: [loong64] + os: [linux] + + '@rollup/rollup-linux-ppc64-gnu@4.53.3': + resolution: {integrity: sha512-EoGSa8nd6d3T7zLuqdojxC20oBfNT8nexBbB/rkxgKj5T5vhpAQKKnD+h3UkoMuTyXkP5jTjK/ccNRmQrPNDuw==} + cpu: [ppc64] + os: [linux] + + '@rollup/rollup-linux-riscv64-gnu@4.53.3': + resolution: {integrity: sha512-4s+Wped2IHXHPnAEbIB0YWBv7SDohqxobiiPA1FIWZpX+w9o2i4LezzH/NkFUl8LRci/8udci6cLq+jJQlh+0g==} + cpu: [riscv64] + os: [linux] + + '@rollup/rollup-linux-riscv64-musl@4.53.3': + resolution: {integrity: sha512-68k2g7+0vs2u9CxDt5ktXTngsxOQkSEV/xBbwlqYcUrAVh6P9EgMZvFsnHy4SEiUl46Xf0IObWVbMvPrr2gw8A==} + cpu: [riscv64] + os: [linux] + + '@rollup/rollup-linux-s390x-gnu@4.53.3': + resolution: {integrity: sha512-VYsFMpULAz87ZW6BVYw3I6sWesGpsP9OPcyKe8ofdg9LHxSbRMd7zrVrr5xi/3kMZtpWL/wC+UIJWJYVX5uTKg==} + cpu: [s390x] + os: [linux] + + '@rollup/rollup-linux-x64-gnu@4.53.3': + resolution: {integrity: sha512-3EhFi1FU6YL8HTUJZ51imGJWEX//ajQPfqWLI3BQq4TlvHy4X0MOr5q3D2Zof/ka0d5FNdPwZXm3Yyib/UEd+w==} + cpu: [x64] + os: [linux] + + '@rollup/rollup-linux-x64-musl@4.53.3': + resolution: {integrity: sha512-eoROhjcc6HbZCJr+tvVT8X4fW3/5g/WkGvvmwz/88sDtSJzO7r/blvoBDgISDiCjDRZmHpwud7h+6Q9JxFwq1Q==} + cpu: [x64] + os: [linux] + + '@rollup/rollup-openharmony-arm64@4.53.3': + resolution: {integrity: sha512-OueLAWgrNSPGAdUdIjSWXw+u/02BRTcnfw9PN41D2vq/JSEPnJnVuBgw18VkN8wcd4fjUs+jFHVM4t9+kBSNLw==} + cpu: [arm64] + os: [openharmony] + + '@rollup/rollup-win32-arm64-msvc@4.53.3': + resolution: {integrity: sha512-GOFuKpsxR/whszbF/bzydebLiXIHSgsEUp6M0JI8dWvi+fFa1TD6YQa4aSZHtpmh2/uAlj/Dy+nmby3TJ3pkTw==} + cpu: [arm64] + os: [win32] + + '@rollup/rollup-win32-ia32-msvc@4.53.3': + resolution: {integrity: sha512-iah+THLcBJdpfZ1TstDFbKNznlzoxa8fmnFYK4V67HvmuNYkVdAywJSoteUszvBQ9/HqN2+9AZghbajMsFT+oA==} + cpu: [ia32] + os: [win32] + + '@rollup/rollup-win32-x64-gnu@4.53.3': + resolution: {integrity: sha512-J9QDiOIZlZLdcot5NXEepDkstocktoVjkaKUtqzgzpt2yWjGlbYiKyp05rWwk4nypbYUNoFAztEgixoLaSETkg==} + cpu: [x64] + os: [win32] + + '@rollup/rollup-win32-x64-msvc@4.53.3': + resolution: {integrity: sha512-UhTd8u31dXadv0MopwGgNOBpUVROFKWVQgAg5N1ESyCz8AuBcMqm4AuTjrwgQKGDfoFuz02EuMRHQIw/frmYKQ==} + cpu: [x64] + os: [win32] + + '@sec-ant/readable-stream@0.4.1': + resolution: {integrity: sha512-831qok9r2t8AlxLko40y2ebgSDhenenCatLVeW/uBtnHPyhHOvG0C7TvfgecV+wHzIm5KUICgzmVpWS+IMEAeg==} + + '@sindresorhus/merge-streams@4.0.0': + resolution: {integrity: sha512-tlqY9xq5ukxTUZBmoOp+m61cqwQD5pHJtFY3Mn8CA8ps6yghLH/Hw8UPdqg4OLmFW3IFlcXnQNmo/dh8HzXYIQ==} + engines: {node: '>=18'} + + '@so-ric/colorspace@1.1.6': + resolution: {integrity: sha512-/KiKkpHNOBgkFJwu9sh48LkHSMYGyuTcSFK/qMBdnOAlrRJzRSXAOFB5qwzaVQuDl8wAvHVMkaASQDReTahxuw==} + + '@tailwindcss/node@4.1.17': + resolution: {integrity: sha512-csIkHIgLb3JisEFQ0vxr2Y57GUNYh447C8xzwj89U/8fdW8LhProdxvnVH6U8M2Y73QKiTIH+LWbK3V2BBZsAg==} + + '@tailwindcss/oxide-android-arm64@4.1.17': + resolution: {integrity: sha512-BMqpkJHgOZ5z78qqiGE6ZIRExyaHyuxjgrJ6eBO5+hfrfGkuya0lYfw8fRHG77gdTjWkNWEEm+qeG2cDMxArLQ==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [android] + + '@tailwindcss/oxide-darwin-arm64@4.1.17': + resolution: {integrity: sha512-EquyumkQweUBNk1zGEU/wfZo2qkp/nQKRZM8bUYO0J+Lums5+wl2CcG1f9BgAjn/u9pJzdYddHWBiFXJTcxmOg==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [darwin] + + '@tailwindcss/oxide-darwin-x64@4.1.17': + resolution: {integrity: sha512-gdhEPLzke2Pog8s12oADwYu0IAw04Y2tlmgVzIN0+046ytcgx8uZmCzEg4VcQh+AHKiS7xaL8kGo/QTiNEGRog==} + engines: {node: '>= 10'} + cpu: [x64] + os: [darwin] + + '@tailwindcss/oxide-freebsd-x64@4.1.17': + resolution: {integrity: sha512-hxGS81KskMxML9DXsaXT1H0DyA+ZBIbyG/sSAjWNe2EDl7TkPOBI42GBV3u38itzGUOmFfCzk1iAjDXds8Oh0g==} + engines: {node: '>= 10'} + cpu: [x64] + os: [freebsd] + + '@tailwindcss/oxide-linux-arm-gnueabihf@4.1.17': + resolution: {integrity: sha512-k7jWk5E3ldAdw0cNglhjSgv501u7yrMf8oeZ0cElhxU6Y2o7f8yqelOp3fhf7evjIS6ujTI3U8pKUXV2I4iXHQ==} + engines: {node: '>= 10'} + cpu: [arm] + os: [linux] + + '@tailwindcss/oxide-linux-arm64-gnu@4.1.17': + resolution: {integrity: sha512-HVDOm/mxK6+TbARwdW17WrgDYEGzmoYayrCgmLEw7FxTPLcp/glBisuyWkFz/jb7ZfiAXAXUACfyItn+nTgsdQ==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [linux] + + '@tailwindcss/oxide-linux-arm64-musl@4.1.17': + resolution: {integrity: sha512-HvZLfGr42i5anKtIeQzxdkw/wPqIbpeZqe7vd3V9vI3RQxe3xU1fLjss0TjyhxWcBaipk7NYwSrwTwK1hJARMg==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [linux] + + '@tailwindcss/oxide-linux-x64-gnu@4.1.17': + resolution: {integrity: sha512-M3XZuORCGB7VPOEDH+nzpJ21XPvK5PyjlkSFkFziNHGLc5d6g3di2McAAblmaSUNl8IOmzYwLx9NsE7bplNkwQ==} + engines: {node: '>= 10'} + cpu: [x64] + os: [linux] + + '@tailwindcss/oxide-linux-x64-musl@4.1.17': + resolution: {integrity: sha512-k7f+pf9eXLEey4pBlw+8dgfJHY4PZ5qOUFDyNf7SI6lHjQ9Zt7+NcscjpwdCEbYi6FI5c2KDTDWyf2iHcCSyyQ==} + engines: {node: '>= 10'} + cpu: [x64] + os: [linux] + + '@tailwindcss/oxide-wasm32-wasi@4.1.17': + resolution: {integrity: sha512-cEytGqSSoy7zK4JRWiTCx43FsKP/zGr0CsuMawhH67ONlH+T79VteQeJQRO/X7L0juEUA8ZyuYikcRBf0vsxhg==} + engines: {node: '>=14.0.0'} + cpu: [wasm32] + bundledDependencies: + - '@napi-rs/wasm-runtime' + - '@emnapi/core' + - '@emnapi/runtime' + - '@tybys/wasm-util' + - '@emnapi/wasi-threads' + - tslib + + '@tailwindcss/oxide-win32-arm64-msvc@4.1.17': + resolution: {integrity: sha512-JU5AHr7gKbZlOGvMdb4722/0aYbU+tN6lv1kONx0JK2cGsh7g148zVWLM0IKR3NeKLv+L90chBVYcJ8uJWbC9A==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [win32] + + '@tailwindcss/oxide-win32-x64-msvc@4.1.17': + resolution: {integrity: sha512-SKWM4waLuqx0IH+FMDUw6R66Hu4OuTALFgnleKbqhgGU30DY20NORZMZUKgLRjQXNN2TLzKvh48QXTig4h4bGw==} + engines: {node: '>= 10'} + cpu: [x64] + os: [win32] + + '@tailwindcss/oxide@4.1.17': + resolution: {integrity: sha512-F0F7d01fmkQhsTjXezGBLdrl1KresJTcI3DB8EkScCldyKp3Msz4hub4uyYaVnk88BAS1g5DQjjF6F5qczheLA==} + engines: {node: '>= 10'} + + '@tailwindcss/postcss@4.1.17': + resolution: {integrity: sha512-+nKl9N9mN5uJ+M7dBOOCzINw94MPstNR/GtIhz1fpZysxL/4a+No64jCBD6CPN+bIHWFx3KWuu8XJRrj/572Dw==} + + '@tokenizer/token@0.3.0': + resolution: {integrity: sha512-OvjF+z51L3ov0OyAU0duzsYuvO01PH7x4t6DJx+guahgTnBHkhJdG7soQeTSFLWN3efnHyibZ4Z8l2EuWwJN3A==} + + '@types/body-parser@1.19.6': + resolution: {integrity: sha512-HLFeCYgz89uk22N5Qg3dvGvsv46B8GLvKKo1zKG4NybA8U2DiEO3w9lqGg29t/tfLRJpJ6iQxnVw4OnB7MoM9g==} + + '@types/connect@3.4.38': + resolution: {integrity: sha512-K6uROf1LD88uDQqJCktA4yzL1YYAK6NgfsI0v/mTgyPKWsX1CnJ0XPSDhViejru1GcRkLWb8RlzFYJRqGUbaug==} + + '@types/cors@2.8.19': + resolution: {integrity: sha512-mFNylyeyqN93lfe/9CSxOGREz8cpzAhH+E93xJ4xWQf62V8sQ/24reV2nyzUWM6H6Xji+GGHpkbLe7pVoUEskg==} + + '@types/debug@4.1.12': + resolution: {integrity: sha512-vIChWdVG3LG1SMxEvI/AK+FWJthlrqlTu7fbrlywTkkaONwk/UAGaULXRlf8vkzFBLVm0zkMdCquhL5aOjhXPQ==} + + '@types/estree@1.0.8': + resolution: {integrity: sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==} + + '@types/express-serve-static-core@5.1.0': + resolution: {integrity: sha512-jnHMsrd0Mwa9Cf4IdOzbz543y4XJepXrbia2T4b6+spXC2We3t1y6K44D3mR8XMFSXMCf3/l7rCgddfx7UNVBA==} + + '@types/express@5.0.5': + resolution: {integrity: sha512-LuIQOcb6UmnF7C1PCFmEU1u2hmiHL43fgFQX67sN3H4Z+0Yk0Neo++mFsBjhOAuLzvlQeqAAkeDOZrJs9rzumQ==} + + '@types/http-errors@2.0.5': + resolution: {integrity: sha512-r8Tayk8HJnX0FztbZN7oVqGccWgw98T/0neJphO91KkmOzug1KkofZURD4UaD5uH8AqcFLfdPErnBod0u71/qg==} + + '@types/jsdom@27.0.0': + resolution: {integrity: sha512-NZyFl/PViwKzdEkQg96gtnB8wm+1ljhdDay9ahn4hgb+SfVtPCbm3TlmDUFXTA+MGN3CijicnMhG18SI5H3rFw==} + + '@types/json-schema@7.0.15': + resolution: {integrity: sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==} + + '@types/lodash@4.17.21': + resolution: {integrity: sha512-FOvQ0YPD5NOfPgMzJihoT+Za5pdkDJWcbpuj1DjaKZIr/gxodQjY/uWEFlTNqW2ugXHUiL8lRQgw63dzKHZdeQ==} + + '@types/mime@1.3.5': + resolution: {integrity: sha512-/pyBZWSLD2n0dcHE3hq8s8ZvcETHtEuF+3E7XVt0Ig2nvsVQXdghHVcEkIWjy9A0wKfTn97a/PSDYohKIlnP/w==} + + '@types/ms@2.1.0': + resolution: {integrity: sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA==} + + '@types/node-fetch@2.6.13': + resolution: {integrity: sha512-QGpRVpzSaUs30JBSGPjOg4Uveu384erbHBoT1zeONvyCfwQxIkUshLAOqN/k9EjGviPRmWTTe6aH2qySWKTVSw==} + + '@types/node@18.19.130': + resolution: {integrity: sha512-GRaXQx6jGfL8sKfaIDD6OupbIHBr9jv7Jnaml9tB7l4v068PAOXqfcujMMo5PhbIs6ggR1XODELqahT2R8v0fg==} + + '@types/node@22.19.1': + resolution: {integrity: sha512-LCCV0HdSZZZb34qifBsyWlUmok6W7ouER+oQIGBScS8EsZsQbrtFTUrDX4hOl+CS6p7cnNC4td+qrSVGSCTUfQ==} + + '@types/pg@8.15.6': + resolution: {integrity: sha512-NoaMtzhxOrubeL/7UZuNTrejB4MPAJ0RpxZqXQf2qXuVlTPuG6Y8p4u9dKRaue4yjmC7ZhzVO2/Yyyn25znrPQ==} + + '@types/qs@6.14.0': + resolution: {integrity: sha512-eOunJqu0K1923aExK6y8p6fsihYEn/BYuQ4g0CxAAgFc4b/ZLN4CrsRZ55srTdqoiLzU2B2evC+apEIxprEzkQ==} + + '@types/range-parser@1.2.7': + resolution: {integrity: sha512-hKormJbkJqzQGhziax5PItDUTMAM9uE2XXQmM37dyd4hVM+5aVl7oVxMVUiVQn2oCQFN/LKCZdvSM0pFRqbSmQ==} + + '@types/retry@0.12.0': + resolution: {integrity: sha512-wWKOClTTiizcZhXnPY4wikVAwmdYHp8q6DmC+EJUzAMsycb7HB32Kh9RN4+0gExjmPmZSAQjgURXIGATPegAvA==} + + '@types/send@0.17.6': + resolution: {integrity: sha512-Uqt8rPBE8SY0RK8JB1EzVOIZ32uqy8HwdxCnoCOsYrvnswqmFZ/k+9Ikidlk/ImhsdvBsloHbAlewb2IEBV/Og==} + + '@types/send@1.2.1': + resolution: {integrity: sha512-arsCikDvlU99zl1g69TcAB3mzZPpxgw0UQnaHeC1Nwb015xp8bknZv5rIfri9xTOcMuaVgvabfIRA7PSZVuZIQ==} + + '@types/serve-static@1.15.10': + resolution: {integrity: sha512-tRs1dB+g8Itk72rlSI2ZrW6vZg0YrLI81iQSTkMmOqnqCaNr/8Ek4VwWcN5vZgCYWbg/JJSGBlUaYGAOP73qBw==} + + '@types/tough-cookie@4.0.5': + resolution: {integrity: sha512-/Ad8+nIOV7Rl++6f1BdKxFSMgmoqEoYbHRpPcx3JEfv8VRsQe9Z4mCXeJBzxs7mbHY/XOZZuXlRNfhpVPbs6ZA==} + + '@types/triple-beam@1.3.5': + resolution: {integrity: sha512-6WaYesThRMCl19iryMYP7/x2OVgCtbIVflDGFpWnb9irXI3UjYE4AzmYuiUKY1AJstGijoY+MgUszMgRxIYTYw==} + + '@types/uuid@10.0.0': + resolution: {integrity: sha512-7gqG38EyHgyP1S+7+xomFtL+ZNHcKv6DwNaCZmJmo1vgMugyF3TCnXVg4t1uk89mLNwnLtnY3TpOpCOyp1/xHQ==} + + '@types/yauzl@2.10.3': + resolution: {integrity: sha512-oJoftv0LSuaDZE3Le4DbKX+KS9G36NzOeSap90UIK0yMA/NhKJhqlSGtNDORNRaIbQfzjXDrQa0ytJ6mNRGz/Q==} + + '@typescript/vfs@1.6.2': + resolution: {integrity: sha512-hoBwJwcbKHmvd2QVebiytN1aELvpk9B74B4L1mFm/XT1Q/VOYAWl2vQ9AWRFtQq8zmz6enTpfTV8WRc4ATjW/g==} + peerDependencies: + typescript: '*' + + '@vitest/expect@2.1.9': + resolution: {integrity: sha512-UJCIkTBenHeKT1TTlKMJWy1laZewsRIzYighyYiJKZreqtdxSos/S1t+ktRMQWu2CKqaarrkeszJx1cgC5tGZw==} + + '@vitest/mocker@2.1.9': + resolution: {integrity: sha512-tVL6uJgoUdi6icpxmdrn5YNo3g3Dxv+IHJBr0GXHaEdTcw3F+cPKnsXFhli6nO+f/6SDKPHEK1UN+k+TQv0Ehg==} + peerDependencies: + msw: ^2.4.9 + vite: ^5.0.0 + peerDependenciesMeta: + msw: + optional: true + vite: + optional: true + + '@vitest/pretty-format@2.1.9': + resolution: {integrity: sha512-KhRIdGV2U9HOUzxfiHmY8IFHTdqtOhIzCpd8WRdJiE7D/HUcZVD0EgQCVjm+Q9gkUXWgBvMmTtZgIG48wq7sOQ==} + + '@vitest/runner@2.1.9': + resolution: {integrity: sha512-ZXSSqTFIrzduD63btIfEyOmNcBmQvgOVsPNPe0jYtESiXkhd8u2erDLnMxmGrDCwHCCHE7hxwRDCT3pt0esT4g==} + + '@vitest/snapshot@2.1.9': + resolution: {integrity: sha512-oBO82rEjsxLNJincVhLhaxxZdEtV0EFHMK5Kmx5sJ6H9L183dHECjiefOAdnqpIgT5eZwT04PoggUnW88vOBNQ==} + + '@vitest/spy@2.1.9': + resolution: {integrity: sha512-E1B35FwzXXTs9FHNK6bDszs7mtydNi5MIfUWpceJ8Xbfb1gBMscAnwLbEu+B44ed6W3XjL9/ehLPHR1fkf1KLQ==} + + '@vitest/utils@2.1.9': + resolution: {integrity: sha512-v0psaMSkNJ3A2NMrUEHFRzJtDPFn+/VWZ5WxImB21T9fjucJRmS7xCS3ppEnARb9y11OAzaD+P2Ps+b+BGX5iQ==} + + abort-controller-x@0.4.3: + resolution: {integrity: sha512-VtUwTNU8fpMwvWGn4xE93ywbogTYsuT+AUxAXOeelbXuQVIwNmC5YLeho9sH4vZ4ITW8414TTAOG1nW6uIVHCA==} + + abort-controller@3.0.0: + resolution: {integrity: sha512-h8lQ8tacZYnR3vNQTgibj+tODHI5/+l06Au2Pcriv/Gmet0eaj4TwWH41sO9wnHDiQsEj19q0drzdWdeAHtweg==} + engines: {node: '>=6.5'} + + accepts@1.3.8: + resolution: {integrity: sha512-PYAthTa2m2VKxuvSD3DPC/Gy+U+sOA1LAuT8mkmRuvw+NACSaeXEQ+NHcVF7rONl6qcaxV3Uuemwawk+7+SJLw==} + engines: {node: '>= 0.6'} + + agent-base@7.1.4: + resolution: {integrity: sha512-MnA+YT8fwfJPgBx3m60MNqakm30XOkyIoH1y6huTQvC0PwZG7ki8NacLBcrPbNoo8vEZy7Jpuk7+jMO+CUovTQ==} + engines: {node: '>= 14'} + + agentkeepalive@4.6.0: + resolution: {integrity: sha512-kja8j7PjmncONqaTsB8fQ+wE2mSU2DJ9D4XKoJ5PFWIdRMa6SLSN1ff4mOr4jCbfRSsxR4keIiySJU0N9T5hIQ==} + engines: {node: '>= 8.0.0'} + + ansi-regex@5.0.1: + resolution: {integrity: sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==} + engines: {node: '>=8'} + + ansi-styles@4.3.0: + resolution: {integrity: sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==} + engines: {node: '>=8'} + + ansi-styles@5.2.0: + resolution: {integrity: sha512-Cxwpt2SfTzTtXcfOlzGEee8O+c+MmUgGrNiBcXnuWxuFJHe6a5Hz7qwhwe5OgaSYI0IJvkLqWX1ASG+cJOkEiA==} + engines: {node: '>=10'} + + argparse@2.0.1: + resolution: {integrity: sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==} + + array-flatten@1.1.1: + resolution: {integrity: sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg==} + + assertion-error@2.0.1: + resolution: {integrity: sha512-Izi8RQcffqCeNVgFigKli1ssklIbpHnCYc6AknXGYoB6grJqyeby7jv12JUQgmTAnIDnbck1uxksT4dzN3PWBA==} + engines: {node: '>=12'} + + async@3.2.6: + resolution: {integrity: sha512-htCUDlxyyCLMgaM3xXg0C0LW2xqfuQ6p05pCEIsXuyQ+a1koYKTuBMzRNwmybfLgvJDMd0r1LTn4+E0Ti6C2AA==} + + asynckit@0.4.0: + resolution: {integrity: sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==} + + autoprefixer@10.4.22: + resolution: {integrity: sha512-ARe0v/t9gO28Bznv6GgqARmVqcWOV3mfgUPn9becPHMiD3o9BwlRgaeccZnwTpZ7Zwqrm+c1sUSsMxIzQzc8Xg==} + engines: {node: ^10 || ^12 || >=14} + hasBin: true + peerDependencies: + postcss: ^8.1.0 + + axios@1.13.2: + resolution: {integrity: sha512-VPk9ebNqPcy5lRGuSlKx752IlDatOjT9paPlm8A7yOuW2Fbvp4X3JznJtT4f0GzGLLiWE9W8onz51SqLYwzGaA==} + + base64-js@1.5.1: + resolution: {integrity: sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==} + + baseline-browser-mapping@2.8.31: + resolution: {integrity: sha512-a28v2eWrrRWPpJSzxc+mKwm0ZtVx/G8SepdQZDArnXYU/XS+IF6mp8aB/4E+hH1tyGCoDo3KlUCdlSxGDsRkAw==} + hasBin: true + + bidi-js@1.0.3: + resolution: {integrity: sha512-RKshQI1R3YQ+n9YJz2QQ147P66ELpa1FQEg20Dk8oW9t2KgLbpDLLp9aGZ7y8WHSshDknG0bknqGw5/tyCs5tw==} + + binary-extensions@2.3.0: + resolution: {integrity: sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==} + engines: {node: '>=8'} + + body-parser@1.20.3: + resolution: {integrity: sha512-7rAxByjUMqQ3/bHJy7D6OGXvx/MMc4IqBn/X0fcM1QUcAItpZrBEYhWGem+tzXH90c+G01ypMcYJBO9Y30203g==} + engines: {node: '>= 0.8', npm: 1.2.8000 || >= 1.4.16} + + boolbase@1.0.0: + resolution: {integrity: sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww==} + + browserslist@4.28.0: + resolution: {integrity: sha512-tbydkR/CxfMwelN0vwdP/pLkDwyAASZ+VfWm4EOwlB6SWhx1sYnWLqo8N5j0rAzPfzfRaxt0mM/4wPU/Su84RQ==} + engines: {node: ^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7} + hasBin: true + + buffer-crc32@0.2.13: + resolution: {integrity: sha512-VO9Ht/+p3SN7SKWqcrgEzjGbRSJYTx+Q1pTQC0wrWqHx0vpJraQ6GtHx8tvcg1rlK1byhU5gccxgOgj7B0TDkQ==} + + buffer-equal-constant-time@1.0.1: + resolution: {integrity: sha512-zRpUiDwd/xk6ADqPMATG8vc9VPrkck7T07OIx0gnjmJAnHnTVXNQG3vfvWNuiZIkwu9KrKdA1iJKfsfTVxE6NA==} + + buffer@6.0.3: + resolution: {integrity: sha512-FTiCpNxtwiZZHEZbcbTIcZjERVICn9yq/pDFkTl95/AxzD1naBctN7YO68riM/gLSDY7sdrMby8hofADYuuqOA==} + + bundle-name@4.1.0: + resolution: {integrity: sha512-tjwM5exMg6BGRI+kNmTntNsvdZS1X8BFYS6tnJ2hdH0kVxM6/eVZ2xy+FqStSWvYmtfFMDLIxurorHwDKfDz5Q==} + engines: {node: '>=18'} + + bytes@3.1.2: + resolution: {integrity: sha512-/Nf7TyzTx6S3yRJObOAV7956r8cr2+Oj8AC5dt8wSP3BQAoeX58NoHyCU8P8zGkNXStjTSi6fzO6F0pBdcYbEg==} + engines: {node: '>= 0.8'} + + cac@6.7.14: + resolution: {integrity: sha512-b6Ilus+c3RrdDk+JhLKUAQfzzgLEPy6wcXqS7f/xe1EETvsDP6GORG7SFuOs6cID5YkqchW/LXZbX5bc8j7ZcQ==} + engines: {node: '>=8'} + + call-bind-apply-helpers@1.0.2: + resolution: {integrity: sha512-Sp1ablJ0ivDkSzjcaJdxEunN5/XvksFJ2sMBFfq6x0ryhQV/2b/KwFe21cMpmHtPOSij8K99/wSfoEuTObmuMQ==} + engines: {node: '>= 0.4'} + + call-bound@1.0.4: + resolution: {integrity: sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg==} + engines: {node: '>= 0.4'} + + camelcase@6.3.0: + resolution: {integrity: sha512-Gmy6FhYlCY7uOElZUSbxo2UCDH8owEk996gkbrpsgGtrJLM3J7jGxl9Ic7Qwwj4ivOE5AWZWRMecDdF7hqGjFA==} + engines: {node: '>=10'} + + caniuse-lite@1.0.30001756: + resolution: {integrity: sha512-4HnCNKbMLkLdhJz3TToeVWHSnfJvPaq6vu/eRP0Ahub/07n484XHhBF5AJoSGHdVrS8tKFauUQz8Bp9P7LVx7A==} + + chai@5.3.3: + resolution: {integrity: sha512-4zNhdJD/iOjSH0A05ea+Ke6MU5mmpQcbQsSOkgdaUMJ9zTlDTD/GYlwohmIE2u0gaxHYiVHEn1Fw9mZ/ktJWgw==} + engines: {node: '>=18'} + + chalk@4.1.2: + resolution: {integrity: sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==} + engines: {node: '>=10'} + + check-error@2.1.1: + resolution: {integrity: sha512-OAlb+T7V4Op9OwdkjmguYRqncdlx5JiofwOAUkmTF+jNdHwzTaTs4sRAGpzLF3oOz5xAyDGrPgeIDFQmDOTiJw==} + engines: {node: '>= 16'} + + cheerio-select@2.1.0: + resolution: {integrity: sha512-9v9kG0LvzrlcungtnJtpGNxY+fzECQKhK4EGJX2vByejiMX84MFNQw4UxPJl3bFbTMw+Dfs37XaIkCwTZfLh4g==} + + cheerio@1.1.2: + resolution: {integrity: sha512-IkxPpb5rS/d1IiLbHMgfPuS0FgiWTtFIm/Nj+2woXDLTZ7fOT2eqzgYbdMlLweqlHbsZjxEChoVK+7iph7jyQg==} + engines: {node: '>=20.18.1'} + + chokidar@4.0.3: + resolution: {integrity: sha512-Qgzu8kfBvo+cA4962jnP1KkS6Dop5NS6g7R5LFYJr4b8Ub94PPQXUksCw9PvXoeXPRRddRNC5C1JQUR2SMGtnA==} + engines: {node: '>= 14.16.0'} + + chownr@3.0.0: + resolution: {integrity: sha512-+IxzY9BZOQd/XuYPRmrvEVjF/nqj5kgT4kEq7VofrDoM1MxoRjEWkrCC3EtLi59TVawxTAn+orJwFQcrqEN1+g==} + engines: {node: '>=18'} + + cliui@8.0.1: + resolution: {integrity: sha512-BSeNnyus75C4//NQ9gQt1/csTXyo/8Sb+afLAkzAptFuMsod9HFokGNudZpi/oQV73hnVK+sR+5PVRMd+Dr7YQ==} + engines: {node: '>=12'} + + color-convert@2.0.1: + resolution: {integrity: sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==} + engines: {node: '>=7.0.0'} + + color-convert@3.1.3: + resolution: {integrity: sha512-fasDH2ont2GqF5HpyO4w0+BcewlhHEZOFn9c1ckZdHpJ56Qb7MHhH/IcJZbBGgvdtwdwNbLvxiBEdg336iA9Sg==} + engines: {node: '>=14.6'} + + color-name@1.1.4: + resolution: {integrity: sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==} + + color-name@2.1.0: + resolution: {integrity: sha512-1bPaDNFm0axzE4MEAzKPuqKWeRaT43U/hyxKPBdqTfmPF+d6n7FSoTFxLVULUJOmiLp01KjhIPPH+HrXZJN4Rg==} + engines: {node: '>=12.20'} + + color-string@2.1.4: + resolution: {integrity: sha512-Bb6Cq8oq0IjDOe8wJmi4JeNn763Xs9cfrBcaylK1tPypWzyoy2G3l90v9k64kjphl/ZJjPIShFztenRomi8WTg==} + engines: {node: '>=18'} + + color@5.0.3: + resolution: {integrity: sha512-ezmVcLR3xAVp8kYOm4GS45ZLLgIE6SPAFoduLr6hTDajwb3KZ2F46gulK3XpcwRFb5KKGCSezCBAY4Dw4HsyXA==} + engines: {node: '>=18'} + + colors@1.4.0: + resolution: {integrity: sha512-a+UqTh4kgZg/SlGvfbzDHpgRu7AAQOmmqRHJnxhRZICKFUT91brVhNNt58CMWU9PsBbv3PDCZUHbVxuDiH2mtA==} + engines: {node: '>=0.1.90'} + + combined-stream@1.0.8: + resolution: {integrity: sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==} + engines: {node: '>= 0.8'} + + commander@13.1.0: + resolution: {integrity: sha512-/rFeCpNJQbhSZjGVwO9RFV3xPqbnERS8MmIQzCtD/zl6gpJuV/bMLuN92oG3F7d8oDEHHRrujSXNUr8fpjntKw==} + engines: {node: '>=18'} + + console-table-printer@2.15.0: + resolution: {integrity: sha512-SrhBq4hYVjLCkBVOWaTzceJalvn5K1Zq5aQA6wXC/cYjI3frKWNPEMK3sZsJfNNQApvCQmgBcc13ZKmFj8qExw==} + + content-disposition@0.5.4: + resolution: {integrity: sha512-FveZTNuGw04cxlAiWbzi6zTAL/lhehaWbTtgluJh4/E95DqMwTmha3KZN1aAWA8cFIhHzMZUvLevkw5Rqk+tSQ==} + engines: {node: '>= 0.6'} + + content-type@1.0.5: + resolution: {integrity: sha512-nTjqfcBFEipKdXCv4YDQWCfmcLZKm81ldF0pAopTvyrFGVbcR6P/VAAd5G7N+0tTr8QqiU0tFadD6FK4NtJwOA==} + engines: {node: '>= 0.6'} + + cookie-signature@1.0.6: + resolution: {integrity: sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ==} + + cookie@0.7.1: + resolution: {integrity: sha512-6DnInpx7SJ2AK3+CTUE/ZM0vWTUboZCegxhC2xiIydHR9jNuTAASBrfEpHhiGOZw/nX51bHt6YQl8jsGo4y/0w==} + engines: {node: '>= 0.6'} + + copy-anything@4.0.5: + resolution: {integrity: sha512-7Vv6asjS4gMOuILabD3l739tsaxFQmC+a7pLZm02zyvs8p977bL3zEgq3yDk5rn9B0PbYgIv++jmHcuUab4RhA==} + engines: {node: '>=18'} + + cors@2.8.5: + resolution: {integrity: sha512-KIHbLJqu73RGr/hnbrO9uBeixNGuvSQjul/jdFvS/KFSIH1hWVd1ng7zOHx+YrEfInLG7q4n6GHQ9cDtxv/P6g==} + engines: {node: '>= 0.10'} + + create-langgraph@1.0.0: + resolution: {integrity: sha512-zS8t4p9XUezZFAZ8jJH5zjrv586bqNp/DBgsOfpwHO6MswB9B+4SoSI3JFiIYwx1bRsWwQKg+nU2y16zcJa+Jg==} + hasBin: true + + cross-fetch@3.2.0: + resolution: {integrity: sha512-Q+xVJLoGOeIMXZmbUK4HYk+69cQH6LudR0Vu/pRm2YlU/hDV9CiS0gKUMaWY5f2NeUH9C1nV3bsTlCo0FsTV1Q==} + + cross-spawn@7.0.6: + resolution: {integrity: sha512-uV2QOWP2nWzsy2aMp8aRibhi9dlzF5Hgh5SHaB9OiTGEyDTiJJyx0uy51QXdyWbtAHNua4XJzUKca3OzKUd3vA==} + engines: {node: '>= 8'} + + css-select@5.2.2: + resolution: {integrity: sha512-TizTzUddG/xYLA3NXodFM0fSbNizXjOKhqiQQwvhlspadZokn1KDy0NZFS0wuEubIYAV5/c1/lAr0TaaFXEXzw==} + + css-tree@3.1.0: + resolution: {integrity: sha512-0eW44TGN5SQXU1mWSkKwFstI/22X2bG1nYzZTYMAWjylYURhse752YgbE4Cx46AC+bAvI+/dYTPRk1LqSUnu6w==} + engines: {node: ^10 || ^12.20.0 || ^14.13.0 || >=15.0.0} + + css-what@6.2.2: + resolution: {integrity: sha512-u/O3vwbptzhMs3L1fQE82ZSLHQQfto5gyZzwteVIEyeaY5Fc7R4dapF/BvRoSYFeqfBk4m0V1Vafq5Pjv25wvA==} + engines: {node: '>= 6'} + + cssesc@3.0.0: + resolution: {integrity: sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==} + engines: {node: '>=4'} + hasBin: true + + cssstyle@5.3.3: + resolution: {integrity: sha512-OytmFH+13/QXONJcC75QNdMtKpceNk3u8ThBjyyYjkEcy/ekBwR1mMAuNvi3gdBPW3N5TlCzQ0WZw8H0lN/bDw==} + engines: {node: '>=20'} + + data-urls@6.0.0: + resolution: {integrity: sha512-BnBS08aLUM+DKamupXs3w2tJJoqU+AkaE/+6vQxi/G/DPmIZFJJp9Dkb1kM03AZx8ADehDUZgsNxju3mPXZYIA==} + engines: {node: '>=20'} + + debug@2.6.9: + resolution: {integrity: sha512-bC7ElrdJaJnPbAP+1EotYvqZsb3ecl5wi6Bfi6BJTUcNowp6cvspg0jXznRTKDjm/E7AdgFBVeAPVMNcKGsHMA==} + peerDependencies: + supports-color: '*' + peerDependenciesMeta: + supports-color: + optional: true + + debug@4.4.3: + resolution: {integrity: sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==} + engines: {node: '>=6.0'} + peerDependencies: + supports-color: '*' + peerDependenciesMeta: + supports-color: + optional: true + + decamelize@1.2.0: + resolution: {integrity: sha512-z2S+W9X73hAUUki+N+9Za2lBlun89zigOyGrsax+KUQ6wKW4ZoWpEYBkGhQjwAjjDCkWxhY0VKEhk8wzY7F5cA==} + engines: {node: '>=0.10.0'} + + decimal.js@10.6.0: + resolution: {integrity: sha512-YpgQiITW3JXGntzdUmyUR1V812Hn8T1YVXhCu+wO3OpS4eU9l4YdD3qjyiKdV6mvV29zapkMeD390UVEf2lkUg==} + + dedent@1.7.0: + resolution: {integrity: sha512-HGFtf8yhuhGhqO07SV79tRp+br4MnbdjeVxotpn1QBl30pcLLCQjX5b2295ll0fv8RKDKsmWYrl05usHM9CewQ==} + peerDependencies: + babel-plugin-macros: ^3.1.0 + peerDependenciesMeta: + babel-plugin-macros: + optional: true + + deep-eql@5.0.2: + resolution: {integrity: sha512-h5k/5U50IJJFpzfL6nO9jaaumfjO/f2NjK/oYB2Djzm4p9L+3T9qWpZqZ2hAbLPuuYq9wrU08WQyBTL5GbPk5Q==} + engines: {node: '>=6'} + + deepmerge@4.3.1: + resolution: {integrity: sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==} + engines: {node: '>=0.10.0'} + + default-browser-id@5.0.1: + resolution: {integrity: sha512-x1VCxdX4t+8wVfd1so/9w+vQ4vx7lKd2Qp5tDRutErwmR85OgmfX7RlLRMWafRMY7hbEiXIbudNrjOAPa/hL8Q==} + engines: {node: '>=18'} + + default-browser@5.4.0: + resolution: {integrity: sha512-XDuvSq38Hr1MdN47EDvYtx3U0MTqpCEn+F6ft8z2vYDzMrvQhVp0ui9oQdqW3MvK3vqUETglt1tVGgjLuJ5izg==} + engines: {node: '>=18'} + + define-lazy-prop@3.0.0: + resolution: {integrity: sha512-N+MeXYoqr3pOgn8xfyRPREN7gHakLYjhsHhWGT3fWAiL4IkAt0iDw14QiiEm2bE30c5XX5q0FtAA3CK5f9/BUg==} + engines: {node: '>=12'} + + delayed-stream@1.0.0: + resolution: {integrity: sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==} + engines: {node: '>=0.4.0'} + + depd@2.0.0: + resolution: {integrity: sha512-g7nH6P6dyDioJogAAGprGpCtVImJhpPk/roCzdb3fIh61/s/nPsfR6onyMwkCAR/OlC3yBC0lESvUoQEAssIrw==} + engines: {node: '>= 0.8'} + + destroy@1.2.0: + resolution: {integrity: sha512-2sJGJTaXIIaR1w4iJSNoN0hnMY7Gpc/n8D4qSCJw8QqFWXf7cuAgnEHxBpweaVcPevC2l3KpjYCx3NypQQgaJg==} + engines: {node: '>= 0.8', npm: 1.2.8000 || >= 1.4.16} + + detect-libc@2.1.2: + resolution: {integrity: sha512-Btj2BOOO83o3WyH59e8MgXsxEQVcarkUOpEYrubB0urwnN10yQ364rsiByU11nZlqWYZm05i/of7io4mzihBtQ==} + engines: {node: '>=8'} + + dom-serializer@2.0.0: + resolution: {integrity: sha512-wIkAryiqt/nV5EQKqQpo3SToSOV9J0DnbJqwK7Wv/Trc92zIAYZ4FlMu+JPFW1DfGFt81ZTCGgDEabffXeLyJg==} + + domelementtype@2.3.0: + resolution: {integrity: sha512-OLETBj6w0OsagBwdXnPdN0cnMfF9opN69co+7ZrbfPGrdpPVNBUj02spi6B1N7wChLQiPn4CSH/zJvXw56gmHw==} + + domhandler@5.0.3: + resolution: {integrity: sha512-cgwlv/1iFQiFnU96XXgROh8xTeetsnJiDsTc7TYCLFd9+/WNkIqPTxiM/8pSd8VIrhXGTf1Ny1q1hquVqDJB5w==} + engines: {node: '>= 4'} + + domutils@3.2.2: + resolution: {integrity: sha512-6kZKyUajlDuqlHKVX1w7gyslj9MPIXzIFiz/rGu35uC1wMi+kMhQwGhl4lt9unC9Vb9INnY9Z3/ZA3+FhASLaw==} + + dotenv@16.6.1: + resolution: {integrity: sha512-uBq4egWHTcTt33a72vpSG0z3HnPuIl6NqYcTrKEg2azoEyl2hpW0zqlxysq2pK9HlDIHyHyakeYaYnSAwd8bow==} + engines: {node: '>=12'} + + dunder-proto@1.0.1: + resolution: {integrity: sha512-KIN/nDJBQRcXw0MLVhZE9iQHmG68qAVIBg9CqmUYjmQIhgij9U5MFvrqkUL5FbtyyzZuOeOt0zdeRe4UY7ct+A==} + engines: {node: '>= 0.4'} + + ecdsa-sig-formatter@1.0.11: + resolution: {integrity: sha512-nagl3RYrbNv6kQkeJIpt6NJZy8twLB/2vtz6yN9Z4vRKHN4/QZJIEbqohALSgwKdnksuY3k5Addp5lg8sVoVcQ==} + + ee-first@1.1.1: + resolution: {integrity: sha512-WMwm9LhRUo+WUaRN+vRuETqG89IgZphVSNkdFgeb6sS/E4OrDIN7t48CAewSHXc6C8lefD8KKfr5vY61brQlow==} + + electron-to-chromium@1.5.259: + resolution: {integrity: sha512-I+oLXgpEJzD6Cwuwt1gYjxsDmu/S/Kd41mmLA3O+/uH2pFRO/DvOjUyGozL8j3KeLV6WyZ7ssPwELMsXCcsJAQ==} + + emoji-regex@8.0.0: + resolution: {integrity: sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A==} + + enabled@2.0.0: + resolution: {integrity: sha512-AKrN98kuwOzMIdAizXGI86UFBoo26CL21UM763y1h/GMSJ4/OHU9k2YlsmBpyScFo/wbLzWQJBMCW4+IO3/+OQ==} + + encodeurl@1.0.2: + resolution: {integrity: sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==} + engines: {node: '>= 0.8'} + + encodeurl@2.0.0: + resolution: {integrity: sha512-Q0n9HRi4m6JuGIV1eFlmvJB7ZEVxu93IrMyiMsGC0lrMJMWzRgx6WGquyfQgZVb31vhGgXnfmPNNXmxnOkRBrg==} + engines: {node: '>= 0.8'} + + encoding-sniffer@0.2.1: + resolution: {integrity: sha512-5gvq20T6vfpekVtqrYQsSCFZ1wEg5+wW0/QaZMWkFr6BqD3NfKs0rLCx4rrVlSWJeZb5NBJgVLswK/w2MWU+Gw==} + + end-of-stream@1.4.5: + resolution: {integrity: sha512-ooEGc6HP26xXq/N+GCGOT0JKCLDGrq2bQUZrQ7gyrJiZANJ/8YDTxTpQBXGMn+WbIQXNVpyWymm7KYVICQnyOg==} + + enhanced-resolve@5.18.3: + resolution: {integrity: sha512-d4lC8xfavMeBjzGr2vECC3fsGXziXZQyJxD868h2M/mBI3PwAuODxAkLkq5HYuvrPYcUtiLzsTo8U3PgX3Ocww==} + engines: {node: '>=10.13.0'} + + entities@4.5.0: + resolution: {integrity: sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==} + engines: {node: '>=0.12'} + + entities@6.0.1: + resolution: {integrity: sha512-aN97NXWF6AWBTahfVOIrB/NShkzi5H7F9r1s9mD3cDj4Ko5f2qhhVoYMibXF7GlLveb/D2ioWay8lxI97Ven3g==} + engines: {node: '>=0.12'} + + es-define-property@1.0.1: + resolution: {integrity: sha512-e3nRfgfUZ4rNGL232gUgX06QNyyez04KdjFrF+LTRoOXmrOgFKDg4BCdsjW8EnT69eqdYGmRpJwiPVYNrCaW3g==} + engines: {node: '>= 0.4'} + + es-errors@1.3.0: + resolution: {integrity: sha512-Zf5H2Kxt2xjTvbJvP2ZWLEICxA6j+hAmMzIlypy4xcBg1vKVnx89Wy0GbS+kf5cwCVFFzdCFh2XSCFNULS6csw==} + engines: {node: '>= 0.4'} + + es-module-lexer@1.7.0: + resolution: {integrity: sha512-jEQoCwk8hyb2AZziIOLhDqpm5+2ww5uIE6lkO/6jcOCusfk6LhMHpXXfBLXTZ7Ydyt0j4VoUQv6uGNYbdW+kBA==} + + es-object-atoms@1.1.1: + resolution: {integrity: sha512-FGgH2h8zKNim9ljj7dankFPcICIK9Cp5bm+c2gQSYePhpaG5+esrLODihIorn+Pe6FGJzWhXQotPv73jTaldXA==} + engines: {node: '>= 0.4'} + + es-set-tostringtag@2.1.0: + resolution: {integrity: sha512-j6vWzfrGVfyXxge+O0x5sh6cvxAog0a/4Rdd2K36zCMV5eJ+/+tOAngRO8cODMNWbVRdVlmGZQL2YS3yR8bIUA==} + engines: {node: '>= 0.4'} + + esbuild-plugin-tailwindcss@2.1.0: + resolution: {integrity: sha512-Mqd9Dko8fHGOWVhN53wXefAZZ45i5sZVY3XvyFN5ZKhOPpxXrducAcDPE5iqFw0aJoEm6MG1V9x4Kz6TiQV9kQ==} + + esbuild@0.21.5: + resolution: {integrity: sha512-mg3OPMV4hXywwpoDxu3Qda5xCKQi+vCTZq8S9J/EpkhB2HzKXq4SNFZE3+NK93JYxc8VMSep+lOUSC/RVKaBqw==} + engines: {node: '>=12'} + hasBin: true + + esbuild@0.25.12: + resolution: {integrity: sha512-bbPBYYrtZbkt6Os6FiTLCTFxvq4tt3JKall1vRwshA3fdVztsLAatFaZobhkBC8/BrPetoa0oksYoKXoG4ryJg==} + engines: {node: '>=18'} + hasBin: true + + escalade@3.2.0: + resolution: {integrity: sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==} + engines: {node: '>=6'} + + escape-html@1.0.3: + resolution: {integrity: sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==} + + estree-walker@3.0.3: + resolution: {integrity: sha512-7RUKfXgSMMkzt6ZuXmqapOurLGPPfgj6l9uRZ7lRGolvk0y2yocc35LdcxKC5PQZdn2DMqioAQ2NoWcrTKmm6g==} + + etag@1.8.1: + resolution: {integrity: sha512-aIL5Fx7mawVa300al2BnEE4iNvo1qETxLrPI/o05L7z6go7fCw1J6EQmbK4FmJ2AS7kgVF/KEZWufBfdClMcPg==} + engines: {node: '>= 0.6'} + + event-target-shim@5.0.1: + resolution: {integrity: sha512-i/2XbnSz/uxRCU6+NdVJgKWDTM427+MqYbkQzD321DuCQJUqOuJKIA0IM2+W2xtYHdKOmZ4dR6fExsd4SXL+WQ==} + engines: {node: '>=6'} + + eventemitter3@4.0.7: + resolution: {integrity: sha512-8guHBZCwKnFhYdHr2ysuRWErTwhoN2X8XELRlrRwpmfeY2jjuUN4taQMsULKUVo1K4DvZl+0pgfyoysHxvmvEw==} + + events@3.3.0: + resolution: {integrity: sha512-mQw+2fkQbALzQ7V0MY0IqdnXNOeTtP4r0lN9z7AAawCXgqea7bDii20AYrIBrFd/Hx0M2Ocz6S111CaFkUcb0Q==} + engines: {node: '>=0.8.x'} + + execa@9.6.0: + resolution: {integrity: sha512-jpWzZ1ZhwUmeWRhS7Qv3mhpOhLfwI+uAX4e5fOcXqwMR7EcJ0pj2kV1CVzHVMX/LphnKWD3LObjZCoJ71lKpHw==} + engines: {node: ^18.19.0 || >=20.5.0} + + exit-hook@4.0.0: + resolution: {integrity: sha512-Fqs7ChZm72y40wKjOFXBKg7nJZvQJmewP5/7LtePDdnah/+FH9Hp5sgMujSCMPXlxOAW2//1jrW9pnsY7o20vQ==} + engines: {node: '>=18'} + + expect-type@1.2.2: + resolution: {integrity: sha512-JhFGDVJ7tmDJItKhYgJCGLOWjuK9vPxiXoUFLwLDc99NlmklilbiQJwoctZtt13+xMw91MCk/REan6MWHqDjyA==} + engines: {node: '>=12.0.0'} + + express@4.21.2: + resolution: {integrity: sha512-28HqgMZAmih1Czt9ny7qr6ek2qddF4FclbMzwhCREB6OFfH+rXAnuNCwo1/wFvrtbgsQDb4kSbX9de9lFbrXnA==} + engines: {node: '>= 0.10.0'} + + extend@3.0.2: + resolution: {integrity: sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==} + + extract-zip@2.0.1: + resolution: {integrity: sha512-GDhU9ntwuKyGXdZBUgTIe+vXnWj0fppUEtMDL0+idd5Sta8TGpHssn/eusA9mrPr9qNDym6SxAYZjNvCn/9RBg==} + engines: {node: '>= 10.17.0'} + hasBin: true + + fd-slicer@1.1.0: + resolution: {integrity: sha512-cE1qsB/VwyQozZ+q1dGxR8LBYNZeofhEdUNGSMbQD3Gw2lAzX9Zb3uIU6Ebc/Fmyjo9AWWfnn0AUCHqtevs/8g==} + + fecha@4.2.3: + resolution: {integrity: sha512-OP2IUU6HeYKJi3i0z4A19kHMQoLVs4Hc+DPqqxI2h/DPZHTm/vjsfC6P0b4jCMy14XizLBqvndQ+UilD7707Jw==} + + figures@6.1.0: + resolution: {integrity: sha512-d+l3qxjSesT4V7v2fh+QnmFnUWv9lSpjarhShNTgBOfA0ttejbQUAlHLitbjkoRiDulW0OPoQPYIGhIC8ohejg==} + engines: {node: '>=18'} + + file-type@16.5.4: + resolution: {integrity: sha512-/yFHK0aGjFEgDJjEKP0pWCplsPFPhwyfwevf/pVxiN0tmE4L9LmwWxWukdJSHdoCli4VgQLehjJtwQBnqmsKcw==} + engines: {node: '>=10'} + + finalhandler@1.3.1: + resolution: {integrity: sha512-6BN9trH7bp3qvnrRyzsBz+g3lZxTNZTbVO2EV1CS0WIcDbawYVdYvGflME/9QP0h0pYlCDBCTjYa9nZzMDpyxQ==} + engines: {node: '>= 0.8'} + + flat@5.0.2: + resolution: {integrity: sha512-b6suED+5/3rTpUBdG1gupIl8MPFCAMA0QXwmljLhvCUKcUvdE4gWky9zpuGCcXHOsz4J9wPGNWq6OKpmIzz3hQ==} + hasBin: true + + fn.name@1.1.0: + resolution: {integrity: sha512-GRnmB5gPyJpAhTQdSZTSp9uaPSvl09KoYcMQtsB9rQoOmzs9dH6ffeccH+Z+cv6P68Hu5bC6JjRh4Ah/mHSNRw==} + + follow-redirects@1.15.11: + resolution: {integrity: sha512-deG2P0JfjrTxl50XGCDyfI97ZGVCxIpfKYmfyrQ54n5FO/0gfIES8C/Psl6kWVDolizcaaxZJnTS0QSMxvnsBQ==} + engines: {node: '>=4.0'} + peerDependencies: + debug: '*' + peerDependenciesMeta: + debug: + optional: true + + form-data-encoder@1.7.2: + resolution: {integrity: sha512-qfqtYan3rxrnCk1VYaA4H+Ms9xdpPqvLZa6xmMgFvhO32x7/3J/ExcTd6qpxM0vH2GdMI+poehyBZvqfMTto8A==} + + form-data@4.0.5: + resolution: {integrity: sha512-8RipRLol37bNs2bhoV67fiTEvdTrbMUYcFTiy3+wuuOnUog2QBHCZWXDRijWQfAkhBj2Uf5UnVaiWwA5vdd82w==} + engines: {node: '>= 6'} + + formdata-node@4.4.1: + resolution: {integrity: sha512-0iirZp3uVDjVGt9p49aTaqjk84TrglENEDuqfdlZQ1roC9CWlPk6Avf8EEnZNcAqPonwkG35x4n3ww/1THYAeQ==} + engines: {node: '>= 12.20'} + + forwarded@0.2.0: + resolution: {integrity: sha512-buRG0fpBtRHSTCOASe6hD258tEubFoRLb4ZNA6NxMVHNw2gOcwHo9wyablzMzOA5z9xA9L1KNjk/Nt6MT9aYow==} + engines: {node: '>= 0.6'} + + fraction.js@5.3.4: + resolution: {integrity: sha512-1X1NTtiJphryn/uLQz3whtY6jK3fTqoE3ohKs0tT+Ujr1W59oopxmoEh7Lu5p6vBaPbgoM0bzveAW4Qi5RyWDQ==} + + fresh@0.5.2: + resolution: {integrity: sha512-zJ2mQYM18rEFOudeV4GShTGIQ7RbzA7ozbU9I/XBpm7kqgMywgmylMwXHxZJmkVoYkna9d2pVXVXPdYTP9ej8Q==} + engines: {node: '>= 0.6'} + + fsevents@2.3.2: + resolution: {integrity: sha512-xiqMQR4xAeHTuB9uWm+fFRcIOgKBMiOBP+eXiyT7jsgVCq1bkVygt00oASowB7EdtpOHaaPgKt812P9ab+DDKA==} + engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0} + os: [darwin] + + fsevents@2.3.3: + resolution: {integrity: sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==} + engines: {node: ^8.16.0 || ^10.6.0 || >=11.0.0} + os: [darwin] + + function-bind@1.1.2: + resolution: {integrity: sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA==} + + generic-names@4.0.0: + resolution: {integrity: sha512-ySFolZQfw9FoDb3ed9d80Cm9f0+r7qj+HJkWjeD9RBfpxEVTlVhol+gvaQB/78WbwYfbnNh8nWHHBSlg072y6A==} + + get-caller-file@2.0.5: + resolution: {integrity: sha512-DyFP3BM/3YHTQOCUL/w0OZHR0lpKeGrxotcHWcqNEdnltqFwXVfhEBQ94eIo34AfQpo0rGki4cyIiftY06h2Fg==} + engines: {node: 6.* || 8.* || >= 10.*} + + get-intrinsic@1.3.0: + resolution: {integrity: sha512-9fSjSaos/fRIVIp+xSJlE6lfwhES7LNtKaCBIamHsjr2na1BiABJPo0mOjjz8GJDURarmCPGqaiVg5mfjb98CQ==} + engines: {node: '>= 0.4'} + + get-proto@1.0.1: + resolution: {integrity: sha512-sTSfBjoXBp89JvIKIefqw7U2CCebsc74kiY6awiGogKtoSGbgjYE/G/+l9sF3MWFPNc9IcoOC4ODfKHfxFmp0g==} + engines: {node: '>= 0.4'} + + get-stream@5.2.0: + resolution: {integrity: sha512-nBF+F1rAZVCu/p7rjzgA+Yb4lfYXrpl7a6VmJrU8wF9I1CKvP/QwPNZHnOlwbTkY6dvtFIzFMSyQXbLoTQPRpA==} + engines: {node: '>=8'} + + get-stream@9.0.1: + resolution: {integrity: sha512-kVCxPF3vQM/N0B1PmoqVUqgHP+EeVjmZSQn+1oCRPxd2P21P2F19lIgbR3HBosbB1PUhOAoctJnfEn2GbN2eZA==} + engines: {node: '>=18'} + + get-tsconfig@4.13.0: + resolution: {integrity: sha512-1VKTZJCwBrvbd+Wn3AOgQP/2Av+TfTCOlE4AcRJE72W1ksZXbAx8PPBR9RzgTeSPzlPMHrbANMH3LbltH73wxQ==} + + gopd@1.2.0: + resolution: {integrity: sha512-ZUKRh6/kUFoAiTAtTYPZJ3hw9wNxx+BIBOijnlG9PnrJsCcSjs1wyyD6vJpaYtgnzDrKYRSqf3OO6Rfa93xsRg==} + engines: {node: '>= 0.4'} + + graceful-fs@4.2.11: + resolution: {integrity: sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==} + + graphql-request@6.1.0: + resolution: {integrity: sha512-p+XPfS4q7aIpKVcgmnZKhMNqhltk20hfXtkaIkTfjjmiKMJ5xrt5c743cL03y/K7y1rg3WrIC49xGiEQ4mxdNw==} + peerDependencies: + graphql: 14 - 16 + + graphql@16.12.0: + resolution: {integrity: sha512-DKKrynuQRne0PNpEbzuEdHlYOMksHSUI8Zc9Unei5gTsMNA2/vMpoMz/yKba50pejK56qj98qM0SjYxAKi13gQ==} + engines: {node: ^12.22.0 || ^14.16.0 || ^16.0.0 || >=17.0.0} + + groq-sdk@0.19.0: + resolution: {integrity: sha512-vdh5h7ORvwvOvutA80dKF81b0gPWHxu6K/GOJBOM0n6p6CSqAVLhFfeS79Ef0j/yCycDR09jqY7jkYz9dLiS6w==} + + handlebars@4.7.8: + resolution: {integrity: sha512-vafaFqs8MZkRrSX7sFVUdo3ap/eNiLnb4IakshzvP56X5Nr1iGKAIqdX6tMlm6HcNRIkr6AxO5jFEoJzzpT8aQ==} + engines: {node: '>=0.4.7'} + hasBin: true + + has-flag@4.0.0: + resolution: {integrity: sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==} + engines: {node: '>=8'} + + has-symbols@1.1.0: + resolution: {integrity: sha512-1cDNdwJ2Jaohmb3sg4OmKaMBwuC48sYni5HUw2DvsC8LjGTLK9h+eb1X6RyuOHe4hT0ULCW68iomhjUoKUqlPQ==} + engines: {node: '>= 0.4'} + + has-tostringtag@1.0.2: + resolution: {integrity: sha512-NqADB8VjPFLM2V0VvHUewwwsw0ZWBaIdgo+ieHtK3hasLz4qeCRjYcqfB6AQrBggRKppKF8L52/VqdVsO47Dlw==} + engines: {node: '>= 0.4'} + + hasown@2.0.2: + resolution: {integrity: sha512-0hJU9SCPvmMzIBdZFqNPXWa6dqh7WdH0cII9y+CyS8rG3nL48Bclra9HmKhVVUHyPWNH5Y7xDwAB7bfgSjkUMQ==} + engines: {node: '>= 0.4'} + + hono@4.10.6: + resolution: {integrity: sha512-BIdolzGpDO9MQ4nu3AUuDwHZZ+KViNm+EZ75Ae55eMXMqLVhDFqEMXxtUe9Qh8hjL+pIna/frs2j6Y2yD5Ua/g==} + engines: {node: '>=16.9.0'} + + html-encoding-sniffer@4.0.0: + resolution: {integrity: sha512-Y22oTqIU4uuPgEemfz7NDJz6OeKf12Lsu+QC+s3BVpda64lTiMYCyGwg5ki4vFxkMwQdeZDl2adZoqUgdFuTgQ==} + engines: {node: '>=18'} + + htmlparser2@10.0.0: + resolution: {integrity: sha512-TwAZM+zE5Tq3lrEHvOlvwgj1XLWQCtaaibSN11Q+gGBAS7Y1uZSWwXXRe4iF6OXnaq1riyQAPFOBtYc77Mxq0g==} + + http-errors@2.0.0: + resolution: {integrity: sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ==} + engines: {node: '>= 0.8'} + + http-proxy-agent@7.0.2: + resolution: {integrity: sha512-T1gkAiYYDWYx3V5Bmyu7HcfcvL7mUrTWiM6yOfa3PIphViJ/gFPbvidQ+veqSOHci/PxBcDabeUNCzpOODJZig==} + engines: {node: '>= 14'} + + https-proxy-agent@7.0.6: + resolution: {integrity: sha512-vK9P5/iUfdl95AI+JVyUuIcVtd4ofvtrOr3HNtM2yxC9bnMbEdp3x01OhQNnjb8IJYi38VlTE3mBXwcfvywuSw==} + engines: {node: '>= 14'} + + human-signals@8.0.1: + resolution: {integrity: sha512-eKCa6bwnJhvxj14kZk5NCPc6Hb6BdsU9DZcOnmQKSnO1VKrfV0zCvtttPZUsBvjmNDn8rpcJfpwSYnHBjc95MQ==} + engines: {node: '>=18.18.0'} + + humanize-ms@1.2.1: + resolution: {integrity: sha512-Fl70vYtsAFb/C06PTS9dZBo7ihau+Tu/DNCk/OyHhea07S+aeMWpFFkUaXRa8fI+ScZbEI8dfSxwY7gxZ9SAVQ==} + + ibm-cloud-sdk-core@5.4.4: + resolution: {integrity: sha512-2zqgHp3W2meNJtommmgnZdouj2dPK4AbNQ4QN7BjNpfsQhWNO4eZbUYo2iD2V3I2k9mAsCjzsM87YuE+mu8gfA==} + engines: {node: '>=18'} + + iconv-lite@0.4.24: + resolution: {integrity: sha512-v3MXnZAcvnywkTUEZomIActle7RXXeedOR31wwl7VlyoXO4Qi9arvSenNQWne1TcRwhCL1HwLI21bEqdpj8/rA==} + engines: {node: '>=0.10.0'} + + iconv-lite@0.6.3: + resolution: {integrity: sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==} + engines: {node: '>=0.10.0'} + + icss-utils@5.1.0: + resolution: {integrity: sha512-soFhflCVWLfRNOPU3iv5Z9VUdT44xFRbzjLsEzSr5AQmgqPMTHdU3PMT1Cf1ssx8fLNJDA1juftYl+PUcv3MqA==} + engines: {node: ^10 || ^12 || >= 14} + peerDependencies: + postcss: ^8.1.0 + + ieee754@1.2.1: + resolution: {integrity: sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==} + + inherits@2.0.4: + resolution: {integrity: sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==} + + ipaddr.js@1.9.1: + resolution: {integrity: sha512-0KI/607xoxSToH7GjN1FfSbLoU0+btTicjsQSWQlh/hZykN8KpmMf7uYwPW3R+akZ6R/w18ZlXSHBYXiYUPO3g==} + engines: {node: '>= 0.10'} + + is-docker@3.0.0: + resolution: {integrity: sha512-eljcgEDlEns/7AXFosB5K/2nCM4P7FQPkGc/DWLy5rmFEWvZayGrik1d9/QIY5nJ4f9YsVvBkA6kJpHn9rISdQ==} + engines: {node: ^12.20.0 || ^14.13.1 || >=16.0.0} + hasBin: true + + is-fullwidth-code-point@3.0.0: + resolution: {integrity: sha512-zymm5+u+sCsSWyD9qNaejV3DFvhCKclKdizYaJUuHA83RLjb7nSuGnddCHGv0hk+KY7BMAlsWeK4Ueg6EV6XQg==} + engines: {node: '>=8'} + + is-inside-container@1.0.0: + resolution: {integrity: sha512-KIYLCCJghfHZxqjYBE7rEy0OBuTd5xCHS7tHVgvCLkx7StIoaxwNW3hCALgEUjFfeRk+MG/Qxmp/vtETEF3tRA==} + engines: {node: '>=14.16'} + hasBin: true + + is-plain-obj@4.1.0: + resolution: {integrity: sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==} + engines: {node: '>=12'} + + is-potential-custom-element-name@1.0.1: + resolution: {integrity: sha512-bCYeRA2rVibKZd+s2625gGnGF/t7DSqDs4dP7CrLA1m7jKWz6pps0LpYLJN8Q64HtmPKJ1hrN3nzPNKFEKOUiQ==} + + is-stream@2.0.1: + resolution: {integrity: sha512-hFoiJiTl63nn+kstHGBtewWSKnQLpyb155KHheA1l39uvtO9nWIop1p3udqPcUd/xbF1VLMO4n7OI6p7RbngDg==} + engines: {node: '>=8'} + + is-stream@4.0.1: + resolution: {integrity: sha512-Dnz92NInDqYckGEUJv689RbRiTSEHCQ7wOVeALbkOz999YpqT46yMRIGtSNl2iCL1waAZSx40+h59NV/EwzV/A==} + engines: {node: '>=18'} + + is-unicode-supported@2.1.0: + resolution: {integrity: sha512-mE00Gnza5EEB3Ds0HfMyllZzbBrmLOX3vfWoj9A9PEnTfratQ/BcaJOuMhnkhjXvb2+FkY3VuHqtAGpTPmglFQ==} + engines: {node: '>=18'} + + is-what@5.5.0: + resolution: {integrity: sha512-oG7cgbmg5kLYae2N5IVd3jm2s+vldjxJzK1pcu9LfpGuQ93MQSzo0okvRna+7y5ifrD+20FE8FvjusyGaz14fw==} + engines: {node: '>=18'} + + is-wsl@3.1.0: + resolution: {integrity: sha512-UcVfVfaK4Sc4m7X3dUSoHoozQGBEFeDC+zVo06t98xe8CzHSZZBekNXH+tu0NalHolcJ/QAGqS46Hef7QXBIMw==} + engines: {node: '>=16'} + + isexe@2.0.0: + resolution: {integrity: sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==} + + isstream@0.1.2: + resolution: {integrity: sha512-Yljz7ffyPbrLpLngrMtZ7NduUgVvi6wG9RJ9IUcyCd59YQ911PBJphODUcbOVbqYfxe1wuYf/LJ8PauMRwsM/g==} + + jiti@2.6.1: + resolution: {integrity: sha512-ekilCSN1jwRvIbgeg/57YFh8qQDNbwDb9xT/qu2DAHbFFZUicIl4ygVaAvzveMhMVr3LnpSKTNnwt8PoOfmKhQ==} + hasBin: true + + js-tiktoken@1.0.21: + resolution: {integrity: sha512-biOj/6M5qdgx5TKjDnFT1ymSpM5tbd3ylwDtrQvFQSu0Z7bBYko2dF+W/aUkXUPuk6IVpRxk/3Q2sHOzGlS36g==} + + js-tokens@4.0.0: + resolution: {integrity: sha512-RdJUflcE3cUzKiMqQgsCu06FPu9UdIJO0beYbPhHN4k6apgJtifcoCtT9bcxOpYBtpD2kCM6Sbzg4CausW/PKQ==} + + js-yaml@4.1.1: + resolution: {integrity: sha512-qQKT4zQxXl8lLwBtHMWwaTcGfFOZviOJet3Oy/xmGk2gZH677CJM9EvtfdSkgWcATZhj/55JZ0rmy3myCT5lsA==} + hasBin: true + + jsdom@27.2.0: + resolution: {integrity: sha512-454TI39PeRDW1LgpyLPyURtB4Zx1tklSr6+OFOipsxGUH1WMTvk6C65JQdrj455+DP2uJ1+veBEHTGFKWVLFoA==} + engines: {node: ^20.19.0 || ^22.12.0 || >=24.0.0} + peerDependencies: + canvas: ^3.0.0 + peerDependenciesMeta: + canvas: + optional: true + + json-schema-to-ts@3.1.1: + resolution: {integrity: sha512-+DWg8jCJG2TEnpy7kOm/7/AxaYoaRbjVB4LFZLySZlWn8exGs3A4OLJR966cVvU26N7X9TWxl+Jsw7dzAqKT6g==} + engines: {node: '>=16'} + + jsonpointer@5.0.1: + resolution: {integrity: sha512-p/nXbhSEcu3pZRdkW1OfJhpsVtW1gd4Wa1fnQc9YLiTfAjn0312eMKimbdIQzuZl9aa9xUGaRlP9T/CJE/ditQ==} + engines: {node: '>=0.10.0'} + + jsonwebtoken@9.0.2: + resolution: {integrity: sha512-PRp66vJ865SSqOlgqS8hujT5U4AOgMfhrwYIuIhfKaoSCZcirrmASQr8CX7cUg+RMih+hgznrjp99o+W4pJLHQ==} + engines: {node: '>=12', npm: '>=6'} + + jwa@1.4.2: + resolution: {integrity: sha512-eeH5JO+21J78qMvTIDdBXidBd6nG2kZjg5Ohz/1fpa28Z4CcsWUzJ1ZZyFq/3z3N17aZy+ZuBoHljASbL1WfOw==} + + jws@3.2.2: + resolution: {integrity: sha512-YHlZCB6lMTllWDtSPHz/ZXTsi8S00usEV6v1tjq8tOUZzw7DpSDWVXjXDre6ed1w/pd495ODpHZYSdkRTsa0HA==} + + kuler@2.0.0: + resolution: {integrity: sha512-Xq9nH7KlWZmXAtodXDDRE7vs6DU1gTU8zYDHDiWLSip45Egwq3plLHzPn27NgvzL2r1LMPC1vdqh98sQxtqj4A==} + + langchain@1.0.6: + resolution: {integrity: sha512-JbxsTtWHiymV08H8jHISUj6dwbfB6/AaqsmJj0WKELkUCCCk64JfKEMmw56RSVjUkkvtwh+1Df+/+UpU7yRtlg==} + engines: {node: '>=20'} + peerDependencies: + '@langchain/core': ^1.0.5 + + langsmith@0.3.81: + resolution: {integrity: sha512-NFmp7TDrrbCE6TIfHqutN9xhdgvx0EOhULVo8bDW+ib5idprwjMTvmS0S1n9uVFwjN03zU2zVEWViXnwy5XPrw==} + peerDependencies: + '@opentelemetry/api': '*' + '@opentelemetry/exporter-trace-otlp-proto': '*' + '@opentelemetry/sdk-trace-base': '*' + openai: '*' + peerDependenciesMeta: + '@opentelemetry/api': + optional: true + '@opentelemetry/exporter-trace-otlp-proto': + optional: true + '@opentelemetry/sdk-trace-base': + optional: true + openai: + optional: true + + lightningcss-android-arm64@1.30.2: + resolution: {integrity: sha512-BH9sEdOCahSgmkVhBLeU7Hc9DWeZ1Eb6wNS6Da8igvUwAe0sqROHddIlvU06q3WyXVEOYDZ6ykBZQnjTbmo4+A==} + engines: {node: '>= 12.0.0'} + cpu: [arm64] + os: [android] + + lightningcss-darwin-arm64@1.30.2: + resolution: {integrity: sha512-ylTcDJBN3Hp21TdhRT5zBOIi73P6/W0qwvlFEk22fkdXchtNTOU4Qc37SkzV+EKYxLouZ6M4LG9NfZ1qkhhBWA==} + engines: {node: '>= 12.0.0'} + cpu: [arm64] + os: [darwin] + + lightningcss-darwin-x64@1.30.2: + resolution: {integrity: sha512-oBZgKchomuDYxr7ilwLcyms6BCyLn0z8J0+ZZmfpjwg9fRVZIR5/GMXd7r9RH94iDhld3UmSjBM6nXWM2TfZTQ==} + engines: {node: '>= 12.0.0'} + cpu: [x64] + os: [darwin] + + lightningcss-freebsd-x64@1.30.2: + resolution: {integrity: sha512-c2bH6xTrf4BDpK8MoGG4Bd6zAMZDAXS569UxCAGcA7IKbHNMlhGQ89eRmvpIUGfKWNVdbhSbkQaWhEoMGmGslA==} + engines: {node: '>= 12.0.0'} + cpu: [x64] + os: [freebsd] + + lightningcss-linux-arm-gnueabihf@1.30.2: + resolution: {integrity: sha512-eVdpxh4wYcm0PofJIZVuYuLiqBIakQ9uFZmipf6LF/HRj5Bgm0eb3qL/mr1smyXIS1twwOxNWndd8z0E374hiA==} + engines: {node: '>= 12.0.0'} + cpu: [arm] + os: [linux] + + lightningcss-linux-arm64-gnu@1.30.2: + resolution: {integrity: sha512-UK65WJAbwIJbiBFXpxrbTNArtfuznvxAJw4Q2ZGlU8kPeDIWEX1dg3rn2veBVUylA2Ezg89ktszWbaQnxD/e3A==} + engines: {node: '>= 12.0.0'} + cpu: [arm64] + os: [linux] + + lightningcss-linux-arm64-musl@1.30.2: + resolution: {integrity: sha512-5Vh9dGeblpTxWHpOx8iauV02popZDsCYMPIgiuw97OJ5uaDsL86cnqSFs5LZkG3ghHoX5isLgWzMs+eD1YzrnA==} + engines: {node: '>= 12.0.0'} + cpu: [arm64] + os: [linux] + + lightningcss-linux-x64-gnu@1.30.2: + resolution: {integrity: sha512-Cfd46gdmj1vQ+lR6VRTTadNHu6ALuw2pKR9lYq4FnhvgBc4zWY1EtZcAc6EffShbb1MFrIPfLDXD6Xprbnni4w==} + engines: {node: '>= 12.0.0'} + cpu: [x64] + os: [linux] + + lightningcss-linux-x64-musl@1.30.2: + resolution: {integrity: sha512-XJaLUUFXb6/QG2lGIW6aIk6jKdtjtcffUT0NKvIqhSBY3hh9Ch+1LCeH80dR9q9LBjG3ewbDjnumefsLsP6aiA==} + engines: {node: '>= 12.0.0'} + cpu: [x64] + os: [linux] + + lightningcss-win32-arm64-msvc@1.30.2: + resolution: {integrity: sha512-FZn+vaj7zLv//D/192WFFVA0RgHawIcHqLX9xuWiQt7P0PtdFEVaxgF9rjM/IRYHQXNnk61/H/gb2Ei+kUQ4xQ==} + engines: {node: '>= 12.0.0'} + cpu: [arm64] + os: [win32] + + lightningcss-win32-x64-msvc@1.30.2: + resolution: {integrity: sha512-5g1yc73p+iAkid5phb4oVFMB45417DkRevRbt/El/gKXJk4jid+vPFF/AXbxn05Aky8PapwzZrdJShv5C0avjw==} + engines: {node: '>= 12.0.0'} + cpu: [x64] + os: [win32] + + lightningcss@1.30.2: + resolution: {integrity: sha512-utfs7Pr5uJyyvDETitgsaqSyjCb2qNRAtuqUeWIAKztsOYdcACf2KtARYXg2pSvhkt+9NfoaNY7fxjl6nuMjIQ==} + engines: {node: '>= 12.0.0'} + + loader-utils@3.3.1: + resolution: {integrity: sha512-FMJTLMXfCLMLfJxcX9PFqX5qD88Z5MRGaZCVzfuqeZSPsyiBzs+pahDQjbIWz2QIzPZz0NX9Zy4FX3lmK6YHIg==} + engines: {node: '>= 12.13.0'} + + lodash.camelcase@4.3.0: + resolution: {integrity: sha512-TwuEnCnxbc3rAvhf/LbG7tJUDzhqXyFnv3dtzLOPgCG/hODL7WFnsbwktkD7yUV0RrreP/l1PALq/YSg6VvjlA==} + + lodash.includes@4.3.0: + resolution: {integrity: sha512-W3Bx6mdkRTGtlJISOvVD/lbqjTlPPUDTMnlXZFnVwi9NKJ6tiAk6LVdlhZMm17VZisqhKcgzpO5Wz91PCt5b0w==} + + lodash.isboolean@3.0.3: + resolution: {integrity: sha512-Bz5mupy2SVbPHURB98VAcw+aHh4vRV5IPNhILUCsOzRmsTmSQ17jIuqopAentWoehktxGd9e/hbIXq980/1QJg==} + + lodash.isinteger@4.0.4: + resolution: {integrity: sha512-DBwtEWN2caHQ9/imiNeEA5ys1JoRtRfY3d7V9wkqtbycnAmTvRRmbHKDV4a0EYc678/dia0jrte4tjYwVBaZUA==} + + lodash.isnumber@3.0.3: + resolution: {integrity: sha512-QYqzpfwO3/CWf3XP+Z+tkQsfaLL/EnUlXWVkIk5FUPc4sBdTehEqZONuyRt2P67PXAk+NXmTBcc97zw9t1FQrw==} + + lodash.isplainobject@4.0.6: + resolution: {integrity: sha512-oSXzaWypCMHkPC3NvBEaPHf0KsA5mvPrOPgQWDsbg8n7orZ290M0BmC/jgRZ4vcJ6DTAhjrsSYgdsW/F+MFOBA==} + + lodash.isstring@4.0.1: + resolution: {integrity: sha512-0wJxfxH1wgO3GrbuP+dTTk7op+6L41QCXbGINEmD+ny/G/eCqGzxyCsh7159S+mgDDcoarnBw6PC1PS5+wUGgw==} + + lodash.once@4.1.1: + resolution: {integrity: sha512-Sb487aTOCr9drQVL8pIxOzVhafOjZN9UU54hiN8PU3uAiSV7lx1yYNpbNmex2PK6dSJoNTSJUUswT651yww3Mg==} + + lodash@4.17.21: + resolution: {integrity: sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==} + + logform@2.7.0: + resolution: {integrity: sha512-TFYA4jnP7PVbmlBIfhlSe+WKxs9dklXMTEGcBCIvLhE/Tn3H6Gk1norupVW7m5Cnd4bLcr08AytbyV/xj7f/kQ==} + engines: {node: '>= 12.0.0'} + + long@5.3.2: + resolution: {integrity: sha512-mNAgZ1GmyNhD7AuqnTG3/VQ26o760+ZYBPKjPvugO8+nLbYfX6TVpJPseBvopbdY+qpZ/lKUnmEc1LeZYS3QAA==} + + loupe@3.2.1: + resolution: {integrity: sha512-CdzqowRJCeLU72bHvWqwRBBlLcMEtIvGrlvef74kMnV2AolS9Y8xUv1I0U/MNAWMhBlKIoyuEgoJ0t/bbwHbLQ==} + + lru-cache@11.2.2: + resolution: {integrity: sha512-F9ODfyqML2coTIsQpSkRHnLSZMtkU8Q+mSfcaIyKwy58u+8k5nvAYeiNhsyMARvzNcXJ9QfWVrcPsC9e9rAxtg==} + engines: {node: 20 || >=22} + + magic-string@0.30.21: + resolution: {integrity: sha512-vd2F4YUyEXKGcLHoq+TEyCjxueSeHnFxyyjNp80yg0XV4vUhnDer/lvvlqM/arB5bXQN5K2/3oinyCRyx8T2CQ==} + + math-expression-evaluator@2.0.7: + resolution: {integrity: sha512-uwliJZ6BPHRq4eiqNWxZBDzKUiS5RIynFFcgchqhBOloVLVBpZpNG8jRYkedLcBvhph8TnRyWEuxPqiQcwIdog==} + + math-intrinsics@1.1.0: + resolution: {integrity: sha512-/IXtbwEk5HTPyEwyKX6hGkYXxM9nbj64B+ilVJnC/R6B0pH5G4V3b0pVbL7DBj4tkhBAppbQUlf6F6Xl9LHu1g==} + engines: {node: '>= 0.4'} + + mdn-data@2.12.2: + resolution: {integrity: sha512-IEn+pegP1aManZuckezWCO+XZQDplx1366JoVhTpMpBB1sPey/SbveZQUosKiKiGYjg1wH4pMlNgXbCiYgihQA==} + + media-typer@0.3.0: + resolution: {integrity: sha512-dq+qelQ9akHpcOl/gUVRTxVIOkAJ1wR3QAvb4RsVjS8oVoFjDGTc679wJYmUmknUF5HwMLOgb5O+a3KxfWapPQ==} + engines: {node: '>= 0.6'} + + merge-descriptors@1.0.3: + resolution: {integrity: sha512-gaNvAS7TZ897/rVaZ0nMtAyxNyi/pdbjbAwUpFQpN70GqnVfOiXpeUUMKRBmzXaSQ8DdTX4/0ms62r2K+hE6mQ==} + + methods@1.1.2: + resolution: {integrity: sha512-iclAHeNqNm68zFtnZ0e+1L2yUIdvzNoauKU4WBA3VvH/vPFieF7qfRlwUZU+DA9P9bPXIS90ulxoUoCH23sV2w==} + engines: {node: '>= 0.6'} + + mime-db@1.52.0: + resolution: {integrity: sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==} + engines: {node: '>= 0.6'} + + mime-types@2.1.35: + resolution: {integrity: sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==} + engines: {node: '>= 0.6'} + + mime@1.6.0: + resolution: {integrity: sha512-x0Vn8spI+wuJ1O6S7gnbaQg8Pxh4NNHb7KSINmEWKiPE4RKOplvijn+NkmYmmRgP68mc70j2EbeTFRsrswaQeg==} + engines: {node: '>=4'} + hasBin: true + + minimist@1.2.8: + resolution: {integrity: sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==} + + minipass@7.1.2: + resolution: {integrity: sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==} + engines: {node: '>=16 || 14 >=14.17'} + + minizlib@3.1.0: + resolution: {integrity: sha512-KZxYo1BUkWD2TVFLr0MQoM8vUUigWD3LlD83a/75BqC+4qE0Hb1Vo5v1FgcfaNXvfXzr+5EhQ6ing/CaBijTlw==} + engines: {node: '>= 18'} + + ms@2.0.0: + resolution: {integrity: sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==} + + ms@2.1.3: + resolution: {integrity: sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==} + + mustache@4.2.0: + resolution: {integrity: sha512-71ippSywq5Yb7/tVYyGbkBggbU8H3u5Rz56fH60jGFgr8uHwxs+aSKeqmluIVzM0m0kB7xQjKS6qPfd0b2ZoqQ==} + hasBin: true + + nanoid@3.3.11: + resolution: {integrity: sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==} + engines: {node: ^10 || ^12 || ^13.7 || ^14 || >=15.0.1} + hasBin: true + + negotiator@0.6.3: + resolution: {integrity: sha512-+EUsqGPLsM+j/zdChZjsnX51g4XrHFOIXwfnCVPGlQk/k5giakcKsuxCObBRu6DSm9opw/O6slWbJdghQM4bBg==} + engines: {node: '>= 0.6'} + + neo-async@2.6.2: + resolution: {integrity: sha512-Yd3UES5mWCSqR+qNT93S3UoYUkqAZ9lLg8a7g9rimsWmYGK8cVToA4/sF3RrshdyV3sAGMXVUmpMYOw+dLpOuw==} + + nice-grpc-client-middleware-retry@3.1.13: + resolution: {integrity: sha512-Q9I/wm5lYkDTveKFirrTHBkBY137yavXZ4xQDXTPIycUp7aLXD8xPTHFhqtAFWUw05aS91uffZZRgdv3HS0y/g==} + + nice-grpc-common@2.0.2: + resolution: {integrity: sha512-7RNWbls5kAL1QVUOXvBsv1uO0wPQK3lHv+cY1gwkTzirnG1Nop4cBJZubpgziNbaVc/bl9QJcyvsf/NQxa3rjQ==} + + nice-grpc@2.1.14: + resolution: {integrity: sha512-GK9pKNxlvnU5FAdaw7i2FFuR9CqBspcE+if2tqnKXBcE0R8525wj4BZvfcwj7FjvqbssqKxRHt2nwedalbJlww==} + + node-domexception@1.0.0: + resolution: {integrity: sha512-/jKZoMpw0F8GRwl4/eLROPA3cfcXtLApP0QzLmUT/HuPCZWyB7IY9ZrMeKw2O/nFIqPQB3PVM9aYm0F312AXDQ==} + engines: {node: '>=10.5.0'} + deprecated: Use your platform's native DOMException instead + + node-fetch@2.7.0: + resolution: {integrity: sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A==} + engines: {node: 4.x || >=6.0.0} + peerDependencies: + encoding: ^0.1.0 + peerDependenciesMeta: + encoding: + optional: true + + node-releases@2.0.27: + resolution: {integrity: sha512-nmh3lCkYZ3grZvqcCH+fjmQ7X+H0OeZgP40OierEaAptX4XofMh5kwNbWh7lBduUzCcV/8kZ+NDLCwm2iorIlA==} + + normalize-range@0.1.2: + resolution: {integrity: sha512-bdok/XvKII3nUpklnV6P2hxtMNrCboOjAcyBuQnWEhO665FwrSNRxU+AqpsyvO6LgGYPspN+lu5CLtw4jPRKNA==} + engines: {node: '>=0.10.0'} + + npm-run-path@6.0.0: + resolution: {integrity: sha512-9qny7Z9DsQU8Ou39ERsPU4OZQlSTP47ShQzuKZ6PRXpYLtIFgl/DEBYEXKlvcEa+9tHVcK8CF81Y2V72qaZhWA==} + engines: {node: '>=18'} + + nth-check@2.1.1: + resolution: {integrity: sha512-lqjrjmaOoAnWfMmBPL+XNnynZh2+swxiX3WUE0s4yEHI6m+AwrK2UZOimIRl3X/4QctVqS8AiZjFqyOGrMXb/w==} + + object-assign@4.1.1: + resolution: {integrity: sha512-rJgTQnkUnH1sFw8yT6VSU3zD3sWmu6sZhIseY8VX+GRu3P6F7Fu+JNDoXfklElbLJSnc3FUQHVe4cU5hj+BcUg==} + engines: {node: '>=0.10.0'} + + object-inspect@1.13.4: + resolution: {integrity: sha512-W67iLl4J2EXEGTbfeHCffrjDfitvLANg0UlX3wFUUSTx92KXRFegMHUVgSqE+wvhAbi4WqjGg9czysTV2Epbew==} + engines: {node: '>= 0.4'} + + ollama@0.5.18: + resolution: {integrity: sha512-lTFqTf9bo7Cd3hpF6CviBe/DEhewjoZYd9N/uCe7O20qYTvGqrNOFOBDj3lbZgFWHUgDv5EeyusYxsZSLS8nvg==} + + on-finished@2.4.1: + resolution: {integrity: sha512-oVlzkg3ENAhCk2zdv7IJwd/QUD4z2RxRwpkcGY8psCVcCYZNq4wYnVWALHM+brtuJjePWiYF/ClmuDr8Ch5+kg==} + engines: {node: '>= 0.8'} + + once@1.4.0: + resolution: {integrity: sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==} + + one-time@1.0.0: + resolution: {integrity: sha512-5DXOiRKwuSEcQ/l0kGCF6Q3jcADFv5tSmRaJck/OqkVFcOzutB134KRSfF0xDrL39MNnqxbHBbUUcjZIhTgb2g==} + + open@10.2.0: + resolution: {integrity: sha512-YgBpdJHPyQ2UE5x+hlSXcnejzAvD0b22U2OuAP+8OnlJT+PjWPxtgmGqKKc+RgTM63U9gN0YzrYc71R2WT/hTA==} + engines: {node: '>=18'} + + openai@6.9.1: + resolution: {integrity: sha512-vQ5Rlt0ZgB3/BNmTa7bIijYFhz3YBceAA3Z4JuoMSBftBF9YqFHIEhZakSs+O/Ad7EaoEimZvHxD5ylRjN11Lg==} + hasBin: true + peerDependencies: + ws: ^8.18.0 + zod: ^3.25 || ^4.0 + peerDependenciesMeta: + ws: + optional: true + zod: + optional: true + + openapi-types@12.1.3: + resolution: {integrity: sha512-N4YtSYJqghVu4iek2ZUvcN/0aqH1kRDuNqzcycDxhOUpg7GdvLa2F3DgS6yBNhInhv2r/6I0Flkn7CqL8+nIcw==} + + p-finally@1.0.0: + resolution: {integrity: sha512-LICb2p9CB7FS+0eR1oqWnHhp0FljGLZCWBE9aix0Uye9W8LTQPwMTYVGWQWIw9RdQiDg4+epXQODwIYJtSJaow==} + engines: {node: '>=4'} + + p-queue@6.6.2: + resolution: {integrity: sha512-RwFpb72c/BhQLEXIZ5K2e+AhgNVmIejGlTgiB9MzZ0e93GRvqZ7uSi0dvRF7/XIXDeNkra2fNHBxTyPDGySpjQ==} + engines: {node: '>=8'} + + p-retry@4.6.2: + resolution: {integrity: sha512-312Id396EbJdvRONlngUx0NydfrIQ5lsYu0znKVUzVvArzEIt08V1qhtyESbGVd1FGX7UKtiFp5uwKZdM8wIuQ==} + engines: {node: '>=8'} + + p-timeout@3.2.0: + resolution: {integrity: sha512-rhIwUycgwwKcP9yTOOFK/AKsAopjjCakVqLHePO3CC6Mir1Z99xT+R63jZxAT5lFZLa2inS5h+ZS2GvR99/FBg==} + engines: {node: '>=8'} + + package-manager-detector@1.5.0: + resolution: {integrity: sha512-uBj69dVlYe/+wxj8JOpr97XfsxH/eumMt6HqjNTmJDf/6NO9s+0uxeOneIz3AsPt2m6y9PqzDzd3ATcU17MNfw==} + + parse-ms@4.0.0: + resolution: {integrity: sha512-TXfryirbmq34y8QBwgqCVLi+8oA3oWx2eAnSn62ITyEhEYaWRlVZ2DvMM9eZbMs/RfxPu/PK/aBLyGj4IrqMHw==} + engines: {node: '>=18'} + + parse5-htmlparser2-tree-adapter@7.1.0: + resolution: {integrity: sha512-ruw5xyKs6lrpo9x9rCZqZZnIUntICjQAd0Wsmp396Ul9lN/h+ifgVV1x1gZHi8euej6wTfpqX8j+BFQxF0NS/g==} + + parse5-parser-stream@7.1.2: + resolution: {integrity: sha512-JyeQc9iwFLn5TbvvqACIF/VXG6abODeB3Fwmv/TGdLk2LfbWkaySGY72at4+Ty7EkPZj854u4CrICqNk2qIbow==} + + parse5@7.3.0: + resolution: {integrity: sha512-IInvU7fabl34qmi9gY8XOVxhYyMyuH2xUNpb2q8/Y+7552KlejkRvqvD19nMoUW/uQGGbqNpA6Tufu5FL5BZgw==} + + parse5@8.0.0: + resolution: {integrity: sha512-9m4m5GSgXjL4AjumKzq1Fgfp3Z8rsvjRNbnkVwfu2ImRqE5D0LnY2QfDen18FSY9C573YU5XxSapdHZTZ2WolA==} + + parseurl@1.3.3: + resolution: {integrity: sha512-CiyeOxFT/JZyN5m0z9PfXw4SCBJ6Sygz1Dpl0wqjlhDEGGBP1GnsUVEL0p63hoG1fcj3fHynXi9NYO4nWOL+qQ==} + engines: {node: '>= 0.8'} + + path-key@3.1.1: + resolution: {integrity: sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==} + engines: {node: '>=8'} + + path-key@4.0.0: + resolution: {integrity: sha512-haREypq7xkM7ErfgIyA0z+Bj4AGKlMSdlQE2jvJo6huWD1EdkKYV+G/T4nq0YEF2vgTT8kqMFKo1uHn950r4SQ==} + engines: {node: '>=12'} + + path-to-regexp@0.1.12: + resolution: {integrity: sha512-RA1GjUVMnvYFxuqovrEqZoxxW5NUZqbwKtYz/Tt7nXerk0LbLblQmrsgdeOxV5SFHf0UDggjS/bSeOZwt1pmEQ==} + + pathe@1.1.2: + resolution: {integrity: sha512-whLdWMYL2TwI08hn8/ZqAbrVemu0LNaNNJZX73O6qaIdCTfXutsLhMkjdENX0qhsQ9uIimo4/aQOmXkoon2nDQ==} + + pathval@2.0.1: + resolution: {integrity: sha512-//nshmD55c46FuFw26xV/xFAaB5HF9Xdap7HJBBnrKdAd6/GxDBaNA1870O79+9ueg61cZLSVc+OaFlfmObYVQ==} + engines: {node: '>= 14.16'} + + peek-readable@4.1.0: + resolution: {integrity: sha512-ZI3LnwUv5nOGbQzD9c2iDG6toheuXSZP5esSHBjopsXH4dg19soufvpUGA3uohi5anFtGb2lhAVdHzH6R/Evvg==} + engines: {node: '>=8'} + + pend@1.2.0: + resolution: {integrity: sha512-F3asv42UuXchdzt+xXqfW1OGlVBe+mxa2mqI0pg5yAHZPvFmY3Y6drSf/GQ1A86WgWEN9Kzh/WrgKa6iGcHXLg==} + + pg-cloudflare@1.2.7: + resolution: {integrity: sha512-YgCtzMH0ptvZJslLM1ffsY4EuGaU0cx4XSdXLRFae8bPP4dS5xL1tNB3k2o/N64cHJpwU7dxKli/nZ2lUa5fLg==} + + pg-connection-string@2.9.1: + resolution: {integrity: sha512-nkc6NpDcvPVpZXxrreI/FOtX3XemeLl8E0qFr6F2Lrm/I8WOnaWNhIPK2Z7OHpw7gh5XJThi6j6ppgNoaT1w4w==} + + pg-int8@1.0.1: + resolution: {integrity: sha512-WCtabS6t3c8SkpDBUlb1kjOs7l66xsGdKpIPZsg4wR+B3+u9UAum2odSsF9tnvxg80h4ZxLWMy4pRjOsFIqQpw==} + engines: {node: '>=4.0.0'} + + pg-pool@3.10.1: + resolution: {integrity: sha512-Tu8jMlcX+9d8+QVzKIvM/uJtp07PKr82IUOYEphaWcoBhIYkoHpLXN3qO59nAI11ripznDsEzEv8nUxBVWajGg==} + peerDependencies: + pg: '>=8.0' + + pg-protocol@1.10.3: + resolution: {integrity: sha512-6DIBgBQaTKDJyxnXaLiLR8wBpQQcGWuAESkRBX/t6OwA8YsqP+iVSiond2EDy6Y/dsGk8rh/jtax3js5NeV7JQ==} + + pg-types@2.2.0: + resolution: {integrity: sha512-qTAAlrEsl8s4OiEQY69wDvcMIdQN6wdz5ojQiOy6YRMuynxenON0O5oCpJI6lshc6scgAY8qvJ2On/p+CXY0GA==} + engines: {node: '>=4'} + + pg@8.16.3: + resolution: {integrity: sha512-enxc1h0jA/aq5oSDMvqyW3q89ra6XIIDZgCX9vkMrnz5DFTw/Ny3Li2lFQ+pt3L6MCgm/5o2o8HW9hiJji+xvw==} + engines: {node: '>= 16.0.0'} + peerDependencies: + pg-native: '>=3.0.1' + peerDependenciesMeta: + pg-native: + optional: true + + pgpass@1.0.5: + resolution: {integrity: sha512-FdW9r/jQZhSeohs1Z3sI1yxFQNFvMcnmfuj4WBMUTxOrAyLMaTcE1aAMBiTlbMNaXvBCQuVi0R7hd8udDSP7ug==} + + picocolors@1.1.1: + resolution: {integrity: sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==} + + playwright-core@1.56.1: + resolution: {integrity: sha512-hutraynyn31F+Bifme+Ps9Vq59hKuUCz7H1kDOcBs+2oGguKkWTU50bBWrtz34OUWmIwpBTWDxaRPXrIXkgvmQ==} + engines: {node: '>=18'} + hasBin: true + + playwright@1.56.1: + resolution: {integrity: sha512-aFi5B0WovBHTEvpM3DzXTUaeN6eN0qWnTkKx4NQaH4Wvcmc153PdaY2UBdSYKaGYw+UyWXSVyxDUg5DoPEttjw==} + engines: {node: '>=18'} + hasBin: true + + postcss-modules-extract-imports@3.1.0: + resolution: {integrity: sha512-k3kNe0aNFQDAZGbin48pL2VNidTF0w4/eASDsxlyspobzU3wZQLOGj7L9gfRe0Jo9/4uud09DsjFNH7winGv8Q==} + engines: {node: ^10 || ^12 || >= 14} + peerDependencies: + postcss: ^8.1.0 + + postcss-modules-local-by-default@4.2.0: + resolution: {integrity: sha512-5kcJm/zk+GJDSfw+V/42fJ5fhjL5YbFDl8nVdXkJPLLW+Vf9mTD5Xe0wqIaDnLuL2U6cDNpTr+UQ+v2HWIBhzw==} + engines: {node: ^10 || ^12 || >= 14} + peerDependencies: + postcss: ^8.1.0 + + postcss-modules-scope@3.2.1: + resolution: {integrity: sha512-m9jZstCVaqGjTAuny8MdgE88scJnCiQSlSrOWcTQgM2t32UBe+MUmFSO5t7VMSfAf/FJKImAxBav8ooCHJXCJA==} + engines: {node: ^10 || ^12 || >= 14} + peerDependencies: + postcss: ^8.1.0 + + postcss-modules-values@4.0.0: + resolution: {integrity: sha512-RDxHkAiEGI78gS2ofyvCsu7iycRv7oqw5xMWn9iMoR0N/7mf9D50ecQqUo5BZ9Zh2vH4bCUR/ktCqbB9m8vJjQ==} + engines: {node: ^10 || ^12 || >= 14} + peerDependencies: + postcss: ^8.1.0 + + postcss-modules@6.0.1: + resolution: {integrity: sha512-zyo2sAkVvuZFFy0gc2+4O+xar5dYlaVy/ebO24KT0ftk/iJevSNyPyQellsBLlnccwh7f6V6Y4GvuKRYToNgpQ==} + peerDependencies: + postcss: ^8.0.0 + + postcss-selector-parser@7.1.0: + resolution: {integrity: sha512-8sLjZwK0R+JlxlYcTuVnyT2v+htpdrjDOKuMcOVdYjt52Lh8hWRYpxBPoKx/Zg+bcjc3wx6fmQevMmUztS/ccA==} + engines: {node: '>=4'} + + postcss-value-parser@4.2.0: + resolution: {integrity: sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==} + + postcss@8.5.6: + resolution: {integrity: sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg==} + engines: {node: ^10 || ^12 || >=14} + + postgres-array@2.0.0: + resolution: {integrity: sha512-VpZrUqU5A69eQyW2c5CA1jtLecCsN2U/bD6VilrFDWq5+5UIEVO7nazS3TEcHf1zuPYO/sqGvUvW62g86RXZuA==} + engines: {node: '>=4'} + + postgres-bytea@1.0.0: + resolution: {integrity: sha512-xy3pmLuQqRBZBXDULy7KbaitYqLcmxigw14Q5sj8QBVLqEwXfeybIKVWiqAXTlcvdvb0+xkOtDbfQMOf4lST1w==} + engines: {node: '>=0.10.0'} + + postgres-date@1.0.7: + resolution: {integrity: sha512-suDmjLVQg78nMK2UZ454hAG+OAW+HQPZ6n++TNDUX+L0+uUlLywnoxJKDou51Zm+zTCjrCl0Nq6J9C5hP9vK/Q==} + engines: {node: '>=0.10.0'} + + postgres-interval@1.2.0: + resolution: {integrity: sha512-9ZhXKM/rw350N1ovuWHbGxnGh/SNJ4cnxHiM0rxE4VN41wsg8P8zWn9hv/buK00RP4WvlOyr/RBDiptyxVbkZQ==} + engines: {node: '>=0.10.0'} + + pretty-ms@9.3.0: + resolution: {integrity: sha512-gjVS5hOP+M3wMm5nmNOucbIrqudzs9v/57bWRHQWLYklXqoXKrVfYW2W9+glfGsqtPgpiz5WwyEEB+ksXIx3gQ==} + engines: {node: '>=18'} + + process@0.11.10: + resolution: {integrity: sha512-cdGef/drWFoydD1JsMzuFf8100nZl+GT+yacc2bEced5f9Rjk4z+WtFUTBu9PhOi9j/jfmBPu0mMEY4wIdAF8A==} + engines: {node: '>= 0.6.0'} + + protobufjs@7.5.4: + resolution: {integrity: sha512-CvexbZtbov6jW2eXAvLukXjXUW1TzFaivC46BpWc/3BpcCysb5Vffu+B3XHMm8lVEuy2Mm4XGex8hBSg1yapPg==} + engines: {node: '>=12.0.0'} + + proxy-addr@2.0.7: + resolution: {integrity: sha512-llQsMLSUDUPT44jdrU/O37qlnifitDP+ZwrmmZcoSKyLKvtZxpyV0n2/bD/N4tBAAZ/gJEdZU7KMraoK1+XYAg==} + engines: {node: '>= 0.10'} + + proxy-from-env@1.1.0: + resolution: {integrity: sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==} + + psl@1.15.0: + resolution: {integrity: sha512-JZd3gMVBAVQkSs6HdNZo9Sdo0LNcQeMNP3CozBJb3JYC/QUYZTnKxP+f8oWRX4rHP5EurWxqAHTSwUCjlNKa1w==} + + pump@3.0.3: + resolution: {integrity: sha512-todwxLMY7/heScKmntwQG8CXVkWUOdYxIvY2s0VWAAMh/nd8SoYiRaKjlr7+iCs984f2P8zvrfWcDDYVb73NfA==} + + punycode@2.3.1: + resolution: {integrity: sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==} + engines: {node: '>=6'} + + qs@6.13.0: + resolution: {integrity: sha512-+38qI9SOr8tfZ4QmJNplMUxqjbe7LKvvZgWdExBOmd+egZTtjLB67Gu0HRX3u/XOq7UU2Nx6nsjvS16Z9uwfpg==} + engines: {node: '>=0.6'} + + querystringify@2.2.0: + resolution: {integrity: sha512-FIqgj2EUvTa7R50u0rGsyTftzjYmv/a3hO345bZNrqabNqjtgiDMgmo4mkUjd+nzU5oF3dClKqFIPUKybUyqoQ==} + + range-parser@1.2.1: + resolution: {integrity: sha512-Hrgsx+orqoygnmhFbKaHE6c296J+HTAQXoxEF6gNupROmmGJRoyzfG3ccAveqCBrwr/2yxQ5BVd/GTl5agOwSg==} + engines: {node: '>= 0.6'} + + raw-body@2.5.2: + resolution: {integrity: sha512-8zGqypfENjCIqGhgXToC8aB2r7YrBX+AQAfIPs/Mlk+BtPTztOvTS01NRW/3Eh60J+a48lt8qsCzirQ6loCVfA==} + engines: {node: '>= 0.8'} + + readable-stream@3.6.2: + resolution: {integrity: sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==} + engines: {node: '>= 6'} + + readable-stream@4.7.0: + resolution: {integrity: sha512-oIGGmcpTLwPga8Bn6/Z75SVaH1z5dUut2ibSyAMVhmUggWpmDn2dapB0n7f8nwaSiRtepAsfJyfXIO5DCVAODg==} + engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0} + + readable-web-to-node-stream@3.0.4: + resolution: {integrity: sha512-9nX56alTf5bwXQ3ZDipHJhusu9NTQJ/CVPtb/XHAJCXihZeitfJvIRS4GqQ/mfIoOE3IelHMrpayVrosdHBuLw==} + engines: {node: '>=8'} + + readdirp@4.1.2: + resolution: {integrity: sha512-GDhwkLfywWL2s6vEjyhri+eXmfH6j1L7JE27WhqLeYzoh/A3DBaYGEj2H/HFZCn/kMfim73FXxEJTw06WtxQwg==} + engines: {node: '>= 14.18.0'} + + require-directory@2.1.1: + resolution: {integrity: sha512-fGxEI7+wsG9xrvdjsrlmL22OMTTiHRwAMroiEeMgq8gzoLC/PQr7RsRDSTLUg/bZAZtF+TVIkHc6/4RIKrui+Q==} + engines: {node: '>=0.10.0'} + + require-from-string@2.0.2: + resolution: {integrity: sha512-Xf0nWe6RseziFMu+Ap9biiUbmplq6S9/p+7w7YXP/JBHhrUDDUhwa+vANyubuqfZWTveU//DYVGsDG7RKL/vEw==} + engines: {node: '>=0.10.0'} + + requires-port@1.0.0: + resolution: {integrity: sha512-KigOCHcocU3XODJxsu8i/j8T9tzT4adHiecwORRQ0ZZFcp7ahwXuRU1m+yuO90C5ZUyGeGfocHDI14M3L3yDAQ==} + + resolve-pkg-maps@1.0.0: + resolution: {integrity: sha512-seS2Tj26TBVOC2NIc2rOe2y2ZO7efxITtLZcGSOnHHNOQ7CkiUBfw0Iw2ck6xkIhPwLhKNLS8BO+hEpngQlqzw==} + + retry-axios@2.6.0: + resolution: {integrity: sha512-pOLi+Gdll3JekwuFjXO3fTq+L9lzMQGcSq7M5gIjExcl3Gu1hd4XXuf5o3+LuSBsaULQH7DiNbsqPd1chVpQGQ==} + engines: {node: '>=10.7.0'} + peerDependencies: + axios: '*' + + retry@0.13.1: + resolution: {integrity: sha512-XQBQ3I8W1Cge0Seh+6gjj03LbmRFWuoszgK9ooCpwYIrhhoO80pfq4cUkU5DkknwfOfFteRwlZ56PYOGYyFWdg==} + engines: {node: '>= 4'} + + rollup@4.53.3: + resolution: {integrity: sha512-w8GmOxZfBmKknvdXU1sdM9NHcoQejwF/4mNgj2JuEEdRaHwwF12K7e9eXn1nLZ07ad+du76mkVsyeb2rKGllsA==} + engines: {node: '>=18.0.0', npm: '>=8.0.0'} + hasBin: true + + run-applescript@7.1.0: + resolution: {integrity: sha512-DPe5pVFaAsinSaV6QjQ6gdiedWDcRCbUuiQfQa2wmWV7+xC9bGulGI8+TdRmoFkAPaBXk8CrAbnlY2ISniJ47Q==} + engines: {node: '>=18'} + + safe-buffer@5.2.1: + resolution: {integrity: sha512-rp3So07KcdmmKbGvgaNxQSJr7bGVSVk5S9Eq1F+ppbRo70+YeaDxkw5Dd8NPN+GD6bjnYm2VuPuCXmpuYvmCXQ==} + + safe-stable-stringify@2.5.0: + resolution: {integrity: sha512-b3rppTKm9T+PsVCBEOUR46GWI7fdOs00VKZ1+9c1EWDaDMvjQc6tUwuFyIprgGgTcWoVHSKrU8H31ZHA2e0RHA==} + engines: {node: '>=10'} + + safer-buffer@2.1.2: + resolution: {integrity: sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==} + + saxes@6.0.0: + resolution: {integrity: sha512-xAg7SOnEhrm5zI3puOOKyy1OMcMlIJZYNJY7xLBwSze0UjhPLnWfj2GF2EpT0jmzaJKIWKHLsaSSajf35bcYnA==} + engines: {node: '>=v12.22.7'} + + semver@7.7.3: + resolution: {integrity: sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q==} + engines: {node: '>=10'} + hasBin: true + + send@0.19.0: + resolution: {integrity: sha512-dW41u5VfLXu8SJh5bwRmyYUbAoSB3c9uQh6L8h/KtsFREPWpbX1lrljJo186Jc4nmci/sGUZ9a0a0J2zgfq2hw==} + engines: {node: '>= 0.8.0'} + + serve-static@1.16.2: + resolution: {integrity: sha512-VqpjJZKadQB/PEbEwvFdO43Ax5dFBZ2UECszz8bQ7pi7wt//PWe1P6MN7eCnjsatYtBT6EuiClbjSWP2WrIoTw==} + engines: {node: '>= 0.8.0'} + + setprototypeof@1.2.0: + resolution: {integrity: sha512-E5LDX7Wrp85Kil5bhZv46j8jOeboKq5JMmYM3gVGdGH8xFpPWXUMsNrlODCrkoxMEeNi/XZIwuRvY4XNwYMJpw==} + + shebang-command@2.0.0: + resolution: {integrity: sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==} + engines: {node: '>=8'} + + shebang-regex@3.0.0: + resolution: {integrity: sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==} + engines: {node: '>=8'} + + side-channel-list@1.0.0: + resolution: {integrity: sha512-FCLHtRD/gnpCiCHEiJLOwdmFP+wzCmDEkc9y7NsYxeF4u7Btsn1ZuwgwJGxImImHicJArLP4R0yX4c2KCrMrTA==} + engines: {node: '>= 0.4'} + + side-channel-map@1.0.1: + resolution: {integrity: sha512-VCjCNfgMsby3tTdo02nbjtM/ewra6jPHmpThenkTYh8pG9ucZ/1P8So4u4FGBek/BjpOVsDCMoLA/iuBKIFXRA==} + engines: {node: '>= 0.4'} + + side-channel-weakmap@1.0.2: + resolution: {integrity: sha512-WPS/HvHQTYnHisLo9McqBHOJk2FkHO/tlpvldyrnem4aeQp4hai3gythswg6p01oSoTl58rcpiFAjF2br2Ak2A==} + engines: {node: '>= 0.4'} + + side-channel@1.1.0: + resolution: {integrity: sha512-ZX99e6tRweoUXqR+VBrslhda51Nh5MTQwou5tnUDgbtyM0dBgmhEDtWGP/xbKn6hqfPRHujUNwz5fy/wbbhnpw==} + engines: {node: '>= 0.4'} + + siginfo@2.0.0: + resolution: {integrity: sha512-ybx0WO1/8bSBLEWXZvEd7gMW3Sn3JFlW3TvX1nREbDLRNQNaeNN8WK0meBwPdAaOI7TtRRRJn/Es1zhrrCHu7g==} + + signal-exit@4.1.0: + resolution: {integrity: sha512-bzyZ1e88w9O1iNJbKnOlvYTrWPDl46O1bG0D3XInv+9tkPrxrN8jUUTiFlDkkmKWgn1M6CfIA13SuGqOa9Korw==} + engines: {node: '>=14'} + + simple-wcswidth@1.1.2: + resolution: {integrity: sha512-j7piyCjAeTDSjzTSQ7DokZtMNwNlEAyxqSZeCS+CXH7fJ4jx3FuJ/mTW3mE+6JLs4VJBbcll0Kjn+KXI5t21Iw==} + + sisteransi@1.0.5: + resolution: {integrity: sha512-bLGGlR1QxBcynn2d5YmDX4MGjlZvy2MRBDRNHLJ8VI6l6+9FUiyTFNJ0IveOSP0bcXgVDPRcfGqA0pjaqUpfVg==} + + source-map-js@1.2.1: + resolution: {integrity: sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==} + engines: {node: '>=0.10.0'} + + source-map@0.6.1: + resolution: {integrity: sha512-UjgapumWlbMhkBgzT7Ykc5YXUT46F0iKu8SGXq0bcwP5dz/h0Plj6enJqjz1Zbq2l5WaqYnrVbwWOWMyF3F47g==} + engines: {node: '>=0.10.0'} + + split2@4.2.0: + resolution: {integrity: sha512-UcjcJOWknrNkF6PLX83qcHM6KHgVKNkV62Y8a5uYDVv9ydGQVwAHMKqHdJje1VTWpljG0WYpCDhrCdAOYH4TWg==} + engines: {node: '>= 10.x'} + + stack-trace@0.0.10: + resolution: {integrity: sha512-KGzahc7puUKkzyMt+IqAep+TVNbKP+k2Lmwhub39m1AsTSkaDutx56aDCo+HLDzf/D26BIHTJWNiTG1KAJiQCg==} + + stackback@0.0.2: + resolution: {integrity: sha512-1XMJE5fQo1jGH6Y/7ebnwPOBEkIEnT4QF32d5R1+VXdXveM0IBMJt8zfaxX1P3QhVwrYe+576+jkANtSS2mBbw==} + + stacktrace-parser@0.1.11: + resolution: {integrity: sha512-WjlahMgHmCJpqzU8bIBy4qtsZdU9lRlcZE3Lvyej6t4tuOuv1vk57OW3MBrj6hXBFx/nNoC9MPMTcr5YA7NQbg==} + engines: {node: '>=6'} + + statuses@2.0.1: + resolution: {integrity: sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==} + engines: {node: '>= 0.8'} + + std-env@3.10.0: + resolution: {integrity: sha512-5GS12FdOZNliM5mAOxFRg7Ir0pWz8MdpYm6AY6VPkGpbA7ZzmbzNcBJQ0GPvvyWgcY7QAhCgf9Uy89I03faLkg==} + + string-hash@1.1.3: + resolution: {integrity: sha512-kJUvRUFK49aub+a7T1nNE66EJbZBMnBgoC1UbCZ5n6bsZKBRga4KgBRTMn/pFkeCZSYtNeSyMxPDM0AXWELk2A==} + + string-width@4.2.3: + resolution: {integrity: sha512-wKyQRQpjJ0sIp62ErSZdGsjMJWsap5oRNihHhu6G7JVO/9jIB6UyevL+tXuOqrng8j/cxKTWyWUwvSTriiZz/g==} + engines: {node: '>=8'} + + string_decoder@1.3.0: + resolution: {integrity: sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==} + + strip-ansi@6.0.1: + resolution: {integrity: sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==} + engines: {node: '>=8'} + + strip-final-newline@4.0.0: + resolution: {integrity: sha512-aulFJcD6YK8V1G7iRB5tigAP4TsHBZZrOV8pjV++zdUwmeV8uzbY7yn6h9MswN62adStNZFuCIx4haBnRuMDaw==} + engines: {node: '>=18'} + + strtok3@6.3.0: + resolution: {integrity: sha512-fZtbhtvI9I48xDSywd/somNqgUHl2L2cstmXCCif0itOf96jeW18MBSyrLuNicYQVkvpOxkZtkzujiTJ9LW5Jw==} + engines: {node: '>=10'} + + superjson@2.2.5: + resolution: {integrity: sha512-zWPTX96LVsA/eVYnqOM2+ofcdPqdS1dAF1LN4TS2/MWuUpfitd9ctTa87wt4xrYnZnkLtS69xpBdSxVBP5Rm6w==} + engines: {node: '>=16'} + + supports-color@7.2.0: + resolution: {integrity: sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==} + engines: {node: '>=8'} + + symbol-tree@3.2.4: + resolution: {integrity: sha512-9QNk5KwDF+Bvz+PyObkmSYjI5ksVUYtjW7AU22r2NKcfLJcXp96hkDWU3+XndOsUb+AQ9QhfzfCT2O+CNWT5Tw==} + + tailwindcss@4.1.17: + resolution: {integrity: sha512-j9Ee2YjuQqYT9bbRTfTZht9W/ytp5H+jJpZKiYdP/bpnXARAuELt9ofP0lPnmHjbga7SNQIxdTAXCmtKVYjN+Q==} + + tapable@2.3.0: + resolution: {integrity: sha512-g9ljZiwki/LfxmQADO3dEY1CbpmXT5Hm2fJ+QaGKwSXUylMybePR7/67YW7jOrrvjEgL1Fmz5kzyAjWVWLlucg==} + engines: {node: '>=6'} + + tar@7.5.2: + resolution: {integrity: sha512-7NyxrTE4Anh8km8iEy7o0QYPs+0JKBTj5ZaqHg6B39erLg0qYXN3BijtShwbsNSvQ+LN75+KV+C4QR/f6Gwnpg==} + engines: {node: '>=18'} + + text-hex@1.0.0: + resolution: {integrity: sha512-uuVGNWzgJ4yhRaNSiubPY7OjISw4sw4E5Uv0wbjp+OzcbmVU/rsT8ujgcXJhn9ypzsgr5vlzpPqP+MBBKcGvbg==} + + tinybench@2.9.0: + resolution: {integrity: sha512-0+DUvqWMValLmha6lr4kD8iAMK1HzV0/aKnCtWb9v9641TnP/MFb7Pc2bxoxQjTXAErryXVgUOfv2YqNllqGeg==} + + tinyexec@0.3.2: + resolution: {integrity: sha512-KQQR9yN7R5+OSwaK0XQoj22pwHoTlgYqmUscPYoknOoWCWfj/5/ABTMRi69FrKU5ffPVh5QcFikpWJI/P1ocHA==} + + tinypool@1.1.1: + resolution: {integrity: sha512-Zba82s87IFq9A9XmjiX5uZA/ARWDrB03OHlq+Vw1fSdt0I+4/Kutwy8BP4Y/y/aORMo61FQ0vIb5j44vSo5Pkg==} + engines: {node: ^18.0.0 || >=20.0.0} + + tinyrainbow@1.2.0: + resolution: {integrity: sha512-weEDEq7Z5eTHPDh4xjX789+fHfF+P8boiFB+0vbWzpbnbsEr/GRaohi/uMKxg8RZMXnl1ItAi/IUHWMsjDV7kQ==} + engines: {node: '>=14.0.0'} + + tinyspy@3.0.2: + resolution: {integrity: sha512-n1cw8k1k0x4pgA2+9XrOkFydTerNcJ1zWCO5Nn9scWHTD+5tp8dghT2x1uduQePZTZgd3Tupf+x9BxJjeJi77Q==} + engines: {node: '>=14.0.0'} + + tldts-core@7.0.19: + resolution: {integrity: sha512-lJX2dEWx0SGH4O6p+7FPwYmJ/bu1JbcGJ8RLaG9b7liIgZ85itUVEPbMtWRVrde/0fnDPEPHW10ZsKW3kVsE9A==} + + tldts@7.0.19: + resolution: {integrity: sha512-8PWx8tvC4jDB39BQw1m4x8y5MH1BcQ5xHeL2n7UVFulMPH/3Q0uiamahFJ3lXA0zO2SUyRXuVVbWSDmstlt9YA==} + hasBin: true + + toidentifier@1.0.1: + resolution: {integrity: sha512-o5sSPKEkg/DIQNmH43V0/uerLrpzVedkUh8tGNvaeXpfpuwjKenlSox/2O/BTlZUtEe+JG7s5YhEz608PlAHRA==} + engines: {node: '>=0.6'} + + token-types@4.2.1: + resolution: {integrity: sha512-6udB24Q737UD/SDsKAHI9FCRP7Bqc9D/MQUV02ORQg5iskjtLJlZJNdN4kKtcdtwCeWIwIHDGaUsTsCCAa8sFQ==} + engines: {node: '>=10'} + + tough-cookie@4.1.4: + resolution: {integrity: sha512-Loo5UUvLD9ScZ6jh8beX1T6sO1w2/MpCRpEP7V280GKMVUQ0Jzar2U3UJPsrdbziLEMMhu3Ujnq//rhiFuIeag==} + engines: {node: '>=6'} + + tough-cookie@6.0.0: + resolution: {integrity: sha512-kXuRi1mtaKMrsLUxz3sQYvVl37B0Ns6MzfrtV5DvJceE9bPyspOqk9xxv7XbZWcfLWbFmm997vl83qUWVJA64w==} + engines: {node: '>=16'} + + tr46@0.0.3: + resolution: {integrity: sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw==} + + tr46@6.0.0: + resolution: {integrity: sha512-bLVMLPtstlZ4iMQHpFHTR7GAGj2jxi8Dg0s2h2MafAE4uSWF98FC/3MomU51iQAMf8/qDUbKWf5GxuvvVcXEhw==} + engines: {node: '>=20'} + + triple-beam@1.4.1: + resolution: {integrity: sha512-aZbgViZrg1QNcG+LULa7nhZpJTZSLm/mXnHXnbAbjmN5aSa0y7V+wvv6+4WaBtpISJzThKy+PIPxc1Nq1EJ9mg==} + engines: {node: '>= 14.0.0'} + + ts-algebra@2.0.0: + resolution: {integrity: sha512-FPAhNPFMrkwz76P7cdjdmiShwMynZYN6SgOujD1urY4oNm80Ou9oMdmbR45LotcKOXoy7wSmHkRFE6Mxbrhefw==} + + ts-error@1.0.6: + resolution: {integrity: sha512-tLJxacIQUM82IR7JO1UUkKlYuUTmoY9HBJAmNWFzheSlDS5SPMcNIepejHJa4BpPQLAcbRhRf3GDJzyj6rbKvA==} + + tsx@4.20.6: + resolution: {integrity: sha512-ytQKuwgmrrkDTFP4LjR0ToE2nqgy886GpvRSpU0JAnrdBYppuY5rLkRUYPU1yCryb24SsKBTL/hlDQAEFVwtZg==} + engines: {node: '>=18.0.0'} + hasBin: true + + type-fest@0.7.1: + resolution: {integrity: sha512-Ne2YiiGN8bmrmJJEuTWTLJR32nh/JdL1+PSicowtNb0WFpn59GK8/lfD61bVtzguz7b3PBt74nxpv/Pw5po5Rg==} + engines: {node: '>=8'} + + type-is@1.6.18: + resolution: {integrity: sha512-TkRKr9sUTxEH8MdfuCSP7VizJyzRNMjj2J2do2Jr3Kym598JVdEksuzPQCnlFPW4ky9Q+iA+ma9BGm06XQBy8g==} + engines: {node: '>= 0.6'} + + typescript@5.9.3: + resolution: {integrity: sha512-jl1vZzPDinLr9eUt3J/t7V6FgNEw9QjvBPdysz9KfQDD41fQrC2Y4vKQdiaUpFT4bXlb1RHhLpp8wtm6M5TgSw==} + engines: {node: '>=14.17'} + hasBin: true + + uglify-js@3.19.3: + resolution: {integrity: sha512-v3Xu+yuwBXisp6QYTcH4UbH+xYJXqnq2m/LtQVWKWzYc1iehYnLixoQDN9FH6/j9/oybfd6W9Ghwkl8+UMKTKQ==} + engines: {node: '>=0.8.0'} + hasBin: true + + undici-types@5.26.5: + resolution: {integrity: sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==} + + undici-types@6.21.0: + resolution: {integrity: sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==} + + undici@7.16.0: + resolution: {integrity: sha512-QEg3HPMll0o3t2ourKwOeUAZ159Kn9mx5pnzHRQO8+Wixmh88YdZRiIwat0iNzNNXn0yoEtXJqFpyW7eM8BV7g==} + engines: {node: '>=20.18.1'} + + unicorn-magic@0.3.0: + resolution: {integrity: sha512-+QBBXBCvifc56fsbuxZQ6Sic3wqqc3WWaqxs58gvJrcOuN83HGTCwz3oS5phzU9LthRNE9VrJCFCLUgHeeFnfA==} + engines: {node: '>=18'} + + universalify@0.2.0: + resolution: {integrity: sha512-CJ1QgKmNg3CwvAv/kOFmtnEN05f0D/cn9QntgNOQlQF9dgvVTHj3t+8JPdjqawCHk7V/KA+fbUqzZ9XWhcqPUg==} + engines: {node: '>= 4.0.0'} + + unpipe@1.0.0: + resolution: {integrity: sha512-pjy2bYhSsufwWlKwPc+l3cN7+wuJlK6uz0YdJEOlQDbl6jo/YlPi4mb8agUkVC8BF7V8NuzeyPNqRksA3hztKQ==} + engines: {node: '>= 0.8'} + + update-browserslist-db@1.1.4: + resolution: {integrity: sha512-q0SPT4xyU84saUX+tomz1WLkxUbuaJnR1xWt17M7fJtEJigJeWUNGUqrauFXsHnqev9y9JTRGwk13tFBuKby4A==} + hasBin: true + peerDependencies: + browserslist: '>= 4.21.0' + + url-parse@1.5.10: + resolution: {integrity: sha512-WypcfiRhfeUP9vvF0j6rw0J3hrWrw6iZv3+22h6iRMJ/8z1Tj6XfLP4DsUix5MhMPnXpiHDoKyoZ/bdCkwBCiQ==} + + util-deprecate@1.0.2: + resolution: {integrity: sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==} + + utils-merge@1.0.1: + resolution: {integrity: sha512-pMZTvIkT1d+TFGvDOqodOclx0QWkkgi6Tdoa8gC8ffGAAqz9pzPTZWAybbsHHoED/ztMtkv/VoYTYyShUn81hA==} + engines: {node: '>= 0.4.0'} + + uuid@10.0.0: + resolution: {integrity: sha512-8XkAphELsDnEGrDxUOHB3RGvXz6TeuYSGEZBOjtTtPm2lwhGBjLgOzLHB63IUWfBpNucQjND6d3AOudO+H3RWQ==} + hasBin: true + + uuid@11.1.0: + resolution: {integrity: sha512-0/A9rDy9P7cJ+8w1c9WD9V//9Wj15Ce2MPz8Ri6032usz+NfePxx5AcN3bN+r6ZL6jEo066/yNYB3tn4pQEx+A==} + hasBin: true + + uuid@9.0.1: + resolution: {integrity: sha512-b+1eJOlsR9K8HJpow9Ok3fiWOWSIcIzXodvv0rQjVoOVNpWMpxf1wZNpt4y9h10odCNrqnYp1OBzRktckBe3sA==} + hasBin: true + + vary@1.1.2: + resolution: {integrity: sha512-BNGbWLfd0eUPabhkXUVm0j8uuvREyTh5ovRa/dyow/BqAbZJyC+5fU+IzQOzmAKzYqYRAISoRhdQr3eIZ/PXqg==} + engines: {node: '>= 0.8'} + + vite-node@2.1.9: + resolution: {integrity: sha512-AM9aQ/IPrW/6ENLQg3AGY4K1N2TGZdR5e4gu/MmmR2xR3Ll1+dib+nook92g4TV3PXVyeyxdWwtaCAiUL0hMxA==} + engines: {node: ^18.0.0 || >=20.0.0} + hasBin: true + + vite@5.4.21: + resolution: {integrity: sha512-o5a9xKjbtuhY6Bi5S3+HvbRERmouabWbyUcpXXUA1u+GNUKoROi9byOJ8M0nHbHYHkYICiMlqxkg1KkYmm25Sw==} + engines: {node: ^18.0.0 || >=20.0.0} + hasBin: true + peerDependencies: + '@types/node': ^18.0.0 || >=20.0.0 + less: '*' + lightningcss: ^1.21.0 + sass: '*' + sass-embedded: '*' + stylus: '*' + sugarss: '*' + terser: ^5.4.0 + peerDependenciesMeta: + '@types/node': + optional: true + less: + optional: true + lightningcss: + optional: true + sass: + optional: true + sass-embedded: + optional: true + stylus: + optional: true + sugarss: + optional: true + terser: + optional: true + + vitest@2.1.9: + resolution: {integrity: sha512-MSmPM9REYqDGBI8439mA4mWhV5sKmDlBKWIYbA3lRb2PTHACE0mgKwA8yQ2xq9vxDTuk4iPrECBAEW2aoFXY0Q==} + engines: {node: ^18.0.0 || >=20.0.0} + hasBin: true + peerDependencies: + '@edge-runtime/vm': '*' + '@types/node': ^18.0.0 || >=20.0.0 + '@vitest/browser': 2.1.9 + '@vitest/ui': 2.1.9 + happy-dom: '*' + jsdom: '*' + peerDependenciesMeta: + '@edge-runtime/vm': + optional: true + '@types/node': + optional: true + '@vitest/browser': + optional: true + '@vitest/ui': + optional: true + happy-dom: + optional: true + jsdom: + optional: true + + w3c-xmlserializer@5.0.0: + resolution: {integrity: sha512-o8qghlI8NZHU1lLPrpi2+Uq7abh4GGPpYANlalzWxyWteJOCsr/P+oPBA49TOLu5FTZO4d3F9MnWJfiMo4BkmA==} + engines: {node: '>=18'} + + weaviate-client@3.9.0: + resolution: {integrity: sha512-7qwg7YONAaT4zWnohLrFdzky+rZegVe76J+Tky/+7tuyvjFpdKgSrdqI/wPDh8aji0ZGZrL4DdGwGfFnZ+uV4w==} + engines: {node: '>=18.0.0'} + + web-streams-polyfill@4.0.0-beta.3: + resolution: {integrity: sha512-QW95TCTaHmsYfHDybGMwO5IJIM93I/6vTRk+daHTWFPhwh+C8Cg7j7XyKrwrj8Ib6vYXe0ocYNrmzY4xAAN6ug==} + engines: {node: '>= 14'} + + webidl-conversions@3.0.1: + resolution: {integrity: sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ==} + + webidl-conversions@8.0.0: + resolution: {integrity: sha512-n4W4YFyz5JzOfQeA8oN7dUYpR+MBP3PIUsn2jLjWXwK5ASUzt0Jc/A5sAUZoCYFJRGF0FBKJ+1JjN43rNdsQzA==} + engines: {node: '>=20'} + + whatwg-encoding@3.1.1: + resolution: {integrity: sha512-6qN4hJdMwfYBtE3YBTTHhoeuUrDBPZmbQaxWAqSALV/MeEnR5z1xd8UKud2RAkFoPkmB+hli1TZSnyi84xz1vQ==} + engines: {node: '>=18'} + + whatwg-fetch@3.6.20: + resolution: {integrity: sha512-EqhiFU6daOA8kpjOWTL0olhVOF3i7OrFzSYiGsEMB8GcXS+RrzauAERX65xMeNWVqxA6HXH2m69Z9LaKKdisfg==} + + whatwg-mimetype@4.0.0: + resolution: {integrity: sha512-QaKxh0eNIi2mE9p2vEdzfagOKHCcj1pJ56EEHGQOVxp8r9/iszLUUV7v89x9O1p/T+NlTM5W7jW6+cz4Fq1YVg==} + engines: {node: '>=18'} + + whatwg-url@15.1.0: + resolution: {integrity: sha512-2ytDk0kiEj/yu90JOAp44PVPUkO9+jVhyf+SybKlRHSDlvOOZhdPIrr7xTH64l4WixO2cP+wQIcgujkGBPPz6g==} + engines: {node: '>=20'} + + whatwg-url@5.0.0: + resolution: {integrity: sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw==} + + which@2.0.2: + resolution: {integrity: sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==} + engines: {node: '>= 8'} + hasBin: true + + why-is-node-running@2.3.0: + resolution: {integrity: sha512-hUrmaWBdVDcxvYqnyh09zunKzROWjbZTiNy8dBEjkS7ehEDQibXJ7XvlmtbwuTclUiIyN+CyXQD4Vmko8fNm8w==} + engines: {node: '>=8'} + hasBin: true + + winston-console-format@1.0.8: + resolution: {integrity: sha512-dq7t/E0D0QRi4XIOwu6HM1+5e//WPqylH88GVjKEhQVrzGFg34MCz+G7pMJcXFBen9C0kBsu5GYgbYsE2LDwKw==} + + winston-transport@4.9.0: + resolution: {integrity: sha512-8drMJ4rkgaPo1Me4zD/3WLfI/zPdA9o2IipKODunnGDcuqbHwjsbB79ylv04LCGGzU0xQ6vTznOMpQGaLhhm6A==} + engines: {node: '>= 12.0.0'} + + winston@3.18.3: + resolution: {integrity: sha512-NoBZauFNNWENgsnC9YpgyYwOVrl2m58PpQ8lNHjV3kosGs7KJ7Npk9pCUE+WJlawVSe8mykWDKWFSVfs3QO9ww==} + engines: {node: '>= 12.0.0'} + + wordwrap@1.0.0: + resolution: {integrity: sha512-gvVzJFlPycKc5dZN4yPkP8w7Dc37BtP1yczEneOb4uq34pXZcvrtRTmWV8W+Ume+XCxKgbjM+nevkyFPMybd4Q==} + + wrap-ansi@7.0.0: + resolution: {integrity: sha512-YVGIj2kamLSTxw6NsZjoBxfSwsn0ycdesmc4p+Q21c5zPuZ1pl+NfxVdxPtdHvmNVOQ6XSYG4AUtyt/Fi7D16Q==} + engines: {node: '>=10'} + + wrappy@1.0.2: + resolution: {integrity: sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==} + + ws@8.18.3: + resolution: {integrity: sha512-PEIGCY5tSlUt50cqyMXfCzX+oOPqN0vuGqWzbcJ2xvnkzkq46oOpz7dQaTDBdfICb4N14+GARUDw2XV2N4tvzg==} + engines: {node: '>=10.0.0'} + peerDependencies: + bufferutil: ^4.0.1 + utf-8-validate: '>=5.0.2' + peerDependenciesMeta: + bufferutil: + optional: true + utf-8-validate: + optional: true + + wsl-utils@0.1.0: + resolution: {integrity: sha512-h3Fbisa2nKGPxCpm89Hk33lBLsnaGBvctQopaBSOW/uIs6FTe1ATyAnKFJrzVs9vpGdsTe73WF3V4lIsk4Gacw==} + engines: {node: '>=18'} + + xml-name-validator@5.0.0: + resolution: {integrity: sha512-EvGK8EJ3DhaHfbRlETOWAS5pO9MZITeauHKJyb8wyajUfQUenkIg2MvLDTZ4T/TgIcm3HU0TFBgWWboAZ30UHg==} + engines: {node: '>=18'} + + xmlchars@2.2.0: + resolution: {integrity: sha512-JZnDKK8B0RCDw84FNdDAIpZK+JuJw+s7Lz8nksI7SIuU3UXJJslUthsi+uWBUYOwPFwW7W7PRLRfUKpxjtjFCw==} + + xtend@4.0.2: + resolution: {integrity: sha512-LKYU1iAXJXUgAXn9URjiu+MWhyUXHsvfp7mcuYm9dSUKK0/CjtrUwFAxD82/mCWbtLsGjFIad0wIsod4zrTAEQ==} + engines: {node: '>=0.4'} + + y18n@5.0.8: + resolution: {integrity: sha512-0pfFzegeDWJHJIAmTLRP2DwHjdF5s7jo9tuztdQxAhINCdvS+3nGINqPd00AphqJR/0LhANUS6/+7SCb98YOfA==} + engines: {node: '>=10'} + + yallist@5.0.0: + resolution: {integrity: sha512-YgvUTfwqyc7UXVMrB+SImsVYSmTS8X/tSrtdNZMImM+n7+QTriRXyXim0mBrTXNeqzVF0KWGgHPeiyViFFrNDw==} + engines: {node: '>=18'} + + yaml@2.8.1: + resolution: {integrity: sha512-lcYcMxX2PO9XMGvAJkJ3OsNMw+/7FKes7/hgerGUYWIoWu5j/+YQqcZr5JnPZWzOsEBgMbSbiSTn/dv/69Mkpw==} + engines: {node: '>= 14.6'} + hasBin: true + + yargs-parser@21.1.1: + resolution: {integrity: sha512-tVpsJW7DdjecAiFpbIB1e3qxIQsE6NoPc5/eTdrbbIC4h0LVsWhnoa3g+m2HclBIujHzsxZ4VJVA+GUuc2/LBw==} + engines: {node: '>=12'} + + yargs@17.7.2: + resolution: {integrity: sha512-7dSzzRQ++CKnNI/krKnYRV7JKKPUXMEh61soaHKg9mrWEhzFWhFnxPxGl+69cD1Ou63C13NUPCnmIcrvqCuM6w==} + engines: {node: '>=12'} + + yauzl@2.10.0: + resolution: {integrity: sha512-p4a9I6X6nu6IhoGmBqAcbJy1mlC4j27vEPZX9F4L4/vZT3Lyq1VkFHw/V/PUcB9Buo+DG3iHkT0x3Qya58zc3g==} + + yoctocolors@2.1.2: + resolution: {integrity: sha512-CzhO+pFNo8ajLM2d2IW/R93ipy99LWjtwblvC1RsoSUMZgyLbYFr221TnSNT7GjGdYui6P459mw9JH/g/zW2ug==} + engines: {node: '>=18'} + + zod-to-json-schema@3.25.0: + resolution: {integrity: sha512-HvWtU2UG41LALjajJrML6uQejQhNJx+JBO9IflpSja4R03iNWfKXrj6W2h7ljuLyc1nKS+9yDyL/9tD1U/yBnQ==} + peerDependencies: + zod: ^3.25 || ^4 + + zod@3.25.76: + resolution: {integrity: sha512-gzUt/qt81nXsFGKIFcC3YnfEAx5NkunCfnDlvuBSSFS02bcXu4Lmea0AFIUwbLWxWPx3d9p8S5QoaujKcNQxcQ==} + +snapshots: + + '@acemir/cssom@0.9.24': {} + + '@alloc/quick-lru@5.2.0': {} + + '@anthropic-ai/sdk@0.27.3': + dependencies: + '@types/node': 18.19.130 + '@types/node-fetch': 2.6.13 + abort-controller: 3.0.0 + agentkeepalive: 4.6.0 + form-data-encoder: 1.7.2 + formdata-node: 4.4.1 + node-fetch: 2.7.0 + transitivePeerDependencies: + - encoding + + '@anthropic-ai/sdk@0.69.0(zod@3.25.76)': + dependencies: + json-schema-to-ts: 3.1.1 + optionalDependencies: + zod: 3.25.76 + + '@asamuzakjp/css-color@4.1.0': + dependencies: + '@csstools/css-calc': 2.1.4(@csstools/css-parser-algorithms@3.0.5(@csstools/css-tokenizer@3.0.4))(@csstools/css-tokenizer@3.0.4) + '@csstools/css-color-parser': 3.1.0(@csstools/css-parser-algorithms@3.0.5(@csstools/css-tokenizer@3.0.4))(@csstools/css-tokenizer@3.0.4) + '@csstools/css-parser-algorithms': 3.0.5(@csstools/css-tokenizer@3.0.4) + '@csstools/css-tokenizer': 3.0.4 + lru-cache: 11.2.2 + + '@asamuzakjp/dom-selector@6.7.4': + dependencies: + '@asamuzakjp/nwsapi': 2.3.9 + bidi-js: 1.0.3 + css-tree: 3.1.0 + is-potential-custom-element-name: 1.0.1 + lru-cache: 11.2.2 + + '@asamuzakjp/nwsapi@2.3.9': {} + + '@babel/code-frame@7.27.1': + dependencies: + '@babel/helper-validator-identifier': 7.28.5 + js-tokens: 4.0.0 + picocolors: 1.1.1 + + '@babel/helper-validator-identifier@7.28.5': {} + + '@babel/runtime@7.28.4': {} + + '@browserbasehq/sdk@2.6.0': + dependencies: + '@types/node': 18.19.130 + '@types/node-fetch': 2.6.13 + abort-controller: 3.0.0 + agentkeepalive: 4.6.0 + form-data-encoder: 1.7.2 + formdata-node: 4.4.1 + node-fetch: 2.7.0 + transitivePeerDependencies: + - encoding + + '@browserbasehq/stagehand@1.14.0(@playwright/test@1.56.1)(deepmerge@4.3.1)(dotenv@16.6.1)(openai@6.9.1(ws@8.18.3)(zod@3.25.76))(zod@3.25.76)': + dependencies: + '@anthropic-ai/sdk': 0.27.3 + '@browserbasehq/sdk': 2.6.0 + '@playwright/test': 1.56.1 + deepmerge: 4.3.1 + dotenv: 16.6.1 + openai: 6.9.1(ws@8.18.3)(zod@3.25.76) + ws: 8.18.3 + zod: 3.25.76 + zod-to-json-schema: 3.25.0(zod@3.25.76) + transitivePeerDependencies: + - bufferutil + - encoding + - utf-8-validate + + '@cfworker/json-schema@4.1.1': {} + + '@clack/core@0.4.1': + dependencies: + picocolors: 1.1.1 + sisteransi: 1.0.5 + + '@clack/prompts@0.9.1': + dependencies: + '@clack/core': 0.4.1 + picocolors: 1.1.1 + sisteransi: 1.0.5 + + '@colors/colors@1.6.0': {} + + '@commander-js/extra-typings@13.1.0(commander@13.1.0)': + dependencies: + commander: 13.1.0 + + '@csstools/color-helpers@5.1.0': {} + + '@csstools/css-calc@2.1.4(@csstools/css-parser-algorithms@3.0.5(@csstools/css-tokenizer@3.0.4))(@csstools/css-tokenizer@3.0.4)': + dependencies: + '@csstools/css-parser-algorithms': 3.0.5(@csstools/css-tokenizer@3.0.4) + '@csstools/css-tokenizer': 3.0.4 + + '@csstools/css-color-parser@3.1.0(@csstools/css-parser-algorithms@3.0.5(@csstools/css-tokenizer@3.0.4))(@csstools/css-tokenizer@3.0.4)': + dependencies: + '@csstools/color-helpers': 5.1.0 + '@csstools/css-calc': 2.1.4(@csstools/css-parser-algorithms@3.0.5(@csstools/css-tokenizer@3.0.4))(@csstools/css-tokenizer@3.0.4) + '@csstools/css-parser-algorithms': 3.0.5(@csstools/css-tokenizer@3.0.4) + '@csstools/css-tokenizer': 3.0.4 + + '@csstools/css-parser-algorithms@3.0.5(@csstools/css-tokenizer@3.0.4)': + dependencies: + '@csstools/css-tokenizer': 3.0.4 + + '@csstools/css-syntax-patches-for-csstree@1.0.17': {} + + '@csstools/css-tokenizer@3.0.4': {} + + '@dabh/diagnostics@2.0.8': + dependencies: + '@so-ric/colorspace': 1.1.6 + enabled: 2.0.0 + kuler: 2.0.0 + + '@esbuild/aix-ppc64@0.21.5': + optional: true + + '@esbuild/aix-ppc64@0.25.12': + optional: true + + '@esbuild/android-arm64@0.21.5': + optional: true + + '@esbuild/android-arm64@0.25.12': + optional: true + + '@esbuild/android-arm@0.21.5': + optional: true + + '@esbuild/android-arm@0.25.12': + optional: true + + '@esbuild/android-x64@0.21.5': + optional: true + + '@esbuild/android-x64@0.25.12': + optional: true + + '@esbuild/darwin-arm64@0.21.5': + optional: true + + '@esbuild/darwin-arm64@0.25.12': + optional: true + + '@esbuild/darwin-x64@0.21.5': + optional: true + + '@esbuild/darwin-x64@0.25.12': + optional: true + + '@esbuild/freebsd-arm64@0.21.5': + optional: true + + '@esbuild/freebsd-arm64@0.25.12': + optional: true + + '@esbuild/freebsd-x64@0.21.5': + optional: true + + '@esbuild/freebsd-x64@0.25.12': + optional: true + + '@esbuild/linux-arm64@0.21.5': + optional: true + + '@esbuild/linux-arm64@0.25.12': + optional: true + + '@esbuild/linux-arm@0.21.5': + optional: true + + '@esbuild/linux-arm@0.25.12': + optional: true + + '@esbuild/linux-ia32@0.21.5': + optional: true + + '@esbuild/linux-ia32@0.25.12': + optional: true + + '@esbuild/linux-loong64@0.21.5': + optional: true + + '@esbuild/linux-loong64@0.25.12': + optional: true + + '@esbuild/linux-mips64el@0.21.5': + optional: true + + '@esbuild/linux-mips64el@0.25.12': + optional: true + + '@esbuild/linux-ppc64@0.21.5': + optional: true + + '@esbuild/linux-ppc64@0.25.12': + optional: true + + '@esbuild/linux-riscv64@0.21.5': + optional: true + + '@esbuild/linux-riscv64@0.25.12': + optional: true + + '@esbuild/linux-s390x@0.21.5': + optional: true + + '@esbuild/linux-s390x@0.25.12': + optional: true + + '@esbuild/linux-x64@0.21.5': + optional: true + + '@esbuild/linux-x64@0.25.12': + optional: true + + '@esbuild/netbsd-arm64@0.25.12': + optional: true + + '@esbuild/netbsd-x64@0.21.5': + optional: true + + '@esbuild/netbsd-x64@0.25.12': + optional: true + + '@esbuild/openbsd-arm64@0.25.12': + optional: true + + '@esbuild/openbsd-x64@0.21.5': + optional: true + + '@esbuild/openbsd-x64@0.25.12': + optional: true + + '@esbuild/openharmony-arm64@0.25.12': + optional: true + + '@esbuild/sunos-x64@0.21.5': + optional: true + + '@esbuild/sunos-x64@0.25.12': + optional: true + + '@esbuild/win32-arm64@0.21.5': + optional: true + + '@esbuild/win32-arm64@0.25.12': + optional: true + + '@esbuild/win32-ia32@0.21.5': + optional: true + + '@esbuild/win32-ia32@0.25.12': + optional: true + + '@esbuild/win32-x64@0.21.5': + optional: true + + '@esbuild/win32-x64@0.25.12': + optional: true + + '@graphql-typed-document-node/core@3.2.0(graphql@16.12.0)': + dependencies: + graphql: 16.12.0 + + '@grpc/grpc-js@1.14.1': + dependencies: + '@grpc/proto-loader': 0.8.0 + '@js-sdsl/ordered-map': 4.4.2 + + '@grpc/proto-loader@0.8.0': + dependencies: + lodash.camelcase: 4.3.0 + long: 5.3.2 + protobufjs: 7.5.4 + yargs: 17.7.2 + + '@hono/node-server@1.19.6(hono@4.10.6)': + dependencies: + hono: 4.10.6 + + '@hono/zod-validator@0.2.2(hono@4.10.6)(zod@3.25.76)': + dependencies: + hono: 4.10.6 + zod: 3.25.76 + + '@ibm-cloud/watsonx-ai@1.7.4': + dependencies: + '@types/node': 18.19.130 + extend: 3.0.2 + form-data: 4.0.5 + ibm-cloud-sdk-core: 5.4.4 + transitivePeerDependencies: + - supports-color + + '@isaacs/fs-minipass@4.0.1': + dependencies: + minipass: 7.1.2 + + '@jridgewell/gen-mapping@0.3.13': + dependencies: + '@jridgewell/sourcemap-codec': 1.5.5 + '@jridgewell/trace-mapping': 0.3.31 + + '@jridgewell/remapping@2.3.5': + dependencies: + '@jridgewell/gen-mapping': 0.3.13 + '@jridgewell/trace-mapping': 0.3.31 + + '@jridgewell/resolve-uri@3.1.2': {} + + '@jridgewell/sourcemap-codec@1.5.5': {} + + '@jridgewell/trace-mapping@0.3.31': + dependencies: + '@jridgewell/resolve-uri': 3.1.2 + '@jridgewell/sourcemap-codec': 1.5.5 + + '@js-sdsl/ordered-map@4.4.2': {} + + '@langchain/anthropic@1.1.1(@langchain/core@1.0.6(openai@6.9.1(ws@8.18.3)(zod@3.25.76)))(zod@3.25.76)': + dependencies: + '@anthropic-ai/sdk': 0.69.0(zod@3.25.76) + '@langchain/core': 1.0.6(openai@6.9.1(ws@8.18.3)(zod@3.25.76)) + transitivePeerDependencies: + - zod + + '@langchain/classic@1.0.4(@langchain/core@1.0.6(openai@6.9.1(ws@8.18.3)(zod@3.25.76)))(cheerio@1.1.2)(openai@6.9.1(ws@8.18.3)(zod@3.25.76))(ws@8.18.3)': + dependencies: + '@langchain/core': 1.0.6(openai@6.9.1(ws@8.18.3)(zod@3.25.76)) + '@langchain/openai': 1.1.2(@langchain/core@1.0.6(openai@6.9.1(ws@8.18.3)(zod@3.25.76)))(ws@8.18.3) + '@langchain/textsplitters': 1.0.0(@langchain/core@1.0.6(openai@6.9.1(ws@8.18.3)(zod@3.25.76))) + handlebars: 4.7.8 + js-yaml: 4.1.1 + jsonpointer: 5.0.1 + openapi-types: 12.1.3 + p-retry: 4.6.2 + uuid: 10.0.0 + yaml: 2.8.1 + zod: 3.25.76 + optionalDependencies: + cheerio: 1.1.2 + langsmith: 0.3.81(openai@6.9.1(ws@8.18.3)(zod@3.25.76)) + transitivePeerDependencies: + - '@opentelemetry/api' + - '@opentelemetry/exporter-trace-otlp-proto' + - '@opentelemetry/sdk-trace-base' + - openai + - ws + + '@langchain/community@1.0.4(@browserbasehq/sdk@2.6.0)(@browserbasehq/stagehand@1.14.0(@playwright/test@1.56.1)(deepmerge@4.3.1)(dotenv@16.6.1)(openai@6.9.1(ws@8.18.3)(zod@3.25.76))(zod@3.25.76))(@ibm-cloud/watsonx-ai@1.7.4)(@langchain/core@1.0.6(openai@6.9.1(ws@8.18.3)(zod@3.25.76)))(cheerio@1.1.2)(ibm-cloud-sdk-core@5.4.4)(jsdom@27.2.0)(jsonwebtoken@9.0.2)(lodash@4.17.21)(openai@6.9.1(ws@8.18.3)(zod@3.25.76))(pg@8.16.3)(playwright@1.56.1)(weaviate-client@3.9.0)(ws@8.18.3)': + dependencies: + '@browserbasehq/stagehand': 1.14.0(@playwright/test@1.56.1)(deepmerge@4.3.1)(dotenv@16.6.1)(openai@6.9.1(ws@8.18.3)(zod@3.25.76))(zod@3.25.76) + '@ibm-cloud/watsonx-ai': 1.7.4 + '@langchain/classic': 1.0.4(@langchain/core@1.0.6(openai@6.9.1(ws@8.18.3)(zod@3.25.76)))(cheerio@1.1.2)(openai@6.9.1(ws@8.18.3)(zod@3.25.76))(ws@8.18.3) + '@langchain/core': 1.0.6(openai@6.9.1(ws@8.18.3)(zod@3.25.76)) + '@langchain/openai': 1.1.2(@langchain/core@1.0.6(openai@6.9.1(ws@8.18.3)(zod@3.25.76)))(ws@8.18.3) + binary-extensions: 2.3.0 + flat: 5.0.2 + ibm-cloud-sdk-core: 5.4.4 + js-yaml: 4.1.1 + math-expression-evaluator: 2.0.7 + openai: 6.9.1(ws@8.18.3)(zod@3.25.76) + uuid: 10.0.0 + zod: 3.25.76 + optionalDependencies: + '@browserbasehq/sdk': 2.6.0 + cheerio: 1.1.2 + jsdom: 27.2.0 + jsonwebtoken: 9.0.2 + lodash: 4.17.21 + pg: 8.16.3 + playwright: 1.56.1 + weaviate-client: 3.9.0 + ws: 8.18.3 + transitivePeerDependencies: + - '@opentelemetry/api' + - '@opentelemetry/exporter-trace-otlp-proto' + - '@opentelemetry/sdk-trace-base' + - peggy + + '@langchain/core@1.0.6(openai@6.9.1(ws@8.18.3)(zod@3.25.76))': + dependencies: + '@cfworker/json-schema': 4.1.1 + ansi-styles: 5.2.0 + camelcase: 6.3.0 + decamelize: 1.2.0 + js-tiktoken: 1.0.21 + langsmith: 0.3.81(openai@6.9.1(ws@8.18.3)(zod@3.25.76)) + mustache: 4.2.0 + p-queue: 6.6.2 + p-retry: 4.6.2 + uuid: 10.0.0 + zod: 3.25.76 + transitivePeerDependencies: + - '@opentelemetry/api' + - '@opentelemetry/exporter-trace-otlp-proto' + - '@opentelemetry/sdk-trace-base' + - openai + + '@langchain/groq@1.0.1(@langchain/core@1.0.6(openai@6.9.1(ws@8.18.3)(zod@3.25.76)))': + dependencies: + '@langchain/core': 1.0.6(openai@6.9.1(ws@8.18.3)(zod@3.25.76)) + groq-sdk: 0.19.0 + transitivePeerDependencies: + - encoding + + '@langchain/langgraph-api@1.0.4(@langchain/core@1.0.6(openai@6.9.1(ws@8.18.3)(zod@3.25.76)))(@langchain/langgraph-checkpoint@1.0.0(@langchain/core@1.0.6(openai@6.9.1(ws@8.18.3)(zod@3.25.76))))(@langchain/langgraph-sdk@1.0.2(@langchain/core@1.0.6(openai@6.9.1(ws@8.18.3)(zod@3.25.76))))(@langchain/langgraph@1.0.2(@langchain/core@1.0.6(openai@6.9.1(ws@8.18.3)(zod@3.25.76)))(zod-to-json-schema@3.25.0(zod@3.25.76))(zod@3.25.76))(openai@6.9.1(ws@8.18.3)(zod@3.25.76))(typescript@5.9.3)': + dependencies: + '@babel/code-frame': 7.27.1 + '@hono/node-server': 1.19.6(hono@4.10.6) + '@hono/zod-validator': 0.2.2(hono@4.10.6)(zod@3.25.76) + '@langchain/core': 1.0.6(openai@6.9.1(ws@8.18.3)(zod@3.25.76)) + '@langchain/langgraph': 1.0.2(@langchain/core@1.0.6(openai@6.9.1(ws@8.18.3)(zod@3.25.76)))(zod-to-json-schema@3.25.0(zod@3.25.76))(zod@3.25.76) + '@langchain/langgraph-checkpoint': 1.0.0(@langchain/core@1.0.6(openai@6.9.1(ws@8.18.3)(zod@3.25.76))) + '@langchain/langgraph-ui': 1.0.4 + '@types/json-schema': 7.0.15 + '@typescript/vfs': 1.6.2(typescript@5.9.3) + dedent: 1.7.0 + dotenv: 16.6.1 + exit-hook: 4.0.0 + hono: 4.10.6 + langsmith: 0.3.81(openai@6.9.1(ws@8.18.3)(zod@3.25.76)) + open: 10.2.0 + semver: 7.7.3 + stacktrace-parser: 0.1.11 + superjson: 2.2.5 + tsx: 4.20.6 + typescript: 5.9.3 + uuid: 10.0.0 + winston: 3.18.3 + winston-console-format: 1.0.8 + zod: 3.25.76 + optionalDependencies: + '@langchain/langgraph-sdk': 1.0.2(@langchain/core@1.0.6(openai@6.9.1(ws@8.18.3)(zod@3.25.76))) + transitivePeerDependencies: + - '@opentelemetry/api' + - '@opentelemetry/exporter-trace-otlp-proto' + - '@opentelemetry/sdk-trace-base' + - babel-plugin-macros + - openai + - supports-color + + '@langchain/langgraph-checkpoint@1.0.0(@langchain/core@1.0.6(openai@6.9.1(ws@8.18.3)(zod@3.25.76)))': + dependencies: + '@langchain/core': 1.0.6(openai@6.9.1(ws@8.18.3)(zod@3.25.76)) + uuid: 10.0.0 + + '@langchain/langgraph-cli@1.0.4(@langchain/core@1.0.6(openai@6.9.1(ws@8.18.3)(zod@3.25.76)))(@langchain/langgraph-checkpoint@1.0.0(@langchain/core@1.0.6(openai@6.9.1(ws@8.18.3)(zod@3.25.76))))(@langchain/langgraph-sdk@1.0.2(@langchain/core@1.0.6(openai@6.9.1(ws@8.18.3)(zod@3.25.76))))(@langchain/langgraph@1.0.2(@langchain/core@1.0.6(openai@6.9.1(ws@8.18.3)(zod@3.25.76)))(zod-to-json-schema@3.25.0(zod@3.25.76))(zod@3.25.76))(openai@6.9.1(ws@8.18.3)(zod@3.25.76))(typescript@5.9.3)': + dependencies: + '@babel/code-frame': 7.27.1 + '@commander-js/extra-typings': 13.1.0(commander@13.1.0) + '@langchain/langgraph-api': 1.0.4(@langchain/core@1.0.6(openai@6.9.1(ws@8.18.3)(zod@3.25.76)))(@langchain/langgraph-checkpoint@1.0.0(@langchain/core@1.0.6(openai@6.9.1(ws@8.18.3)(zod@3.25.76))))(@langchain/langgraph-sdk@1.0.2(@langchain/core@1.0.6(openai@6.9.1(ws@8.18.3)(zod@3.25.76))))(@langchain/langgraph@1.0.2(@langchain/core@1.0.6(openai@6.9.1(ws@8.18.3)(zod@3.25.76)))(zod-to-json-schema@3.25.0(zod@3.25.76))(zod@3.25.76))(openai@6.9.1(ws@8.18.3)(zod@3.25.76))(typescript@5.9.3) + chokidar: 4.0.3 + commander: 13.1.0 + create-langgraph: 1.0.0 + dedent: 1.7.0 + dotenv: 16.6.1 + execa: 9.6.0 + exit-hook: 4.0.0 + extract-zip: 2.0.1 + langsmith: 0.3.81(openai@6.9.1(ws@8.18.3)(zod@3.25.76)) + open: 10.2.0 + package-manager-detector: 1.5.0 + stacktrace-parser: 0.1.11 + tar: 7.5.2 + winston: 3.18.3 + winston-console-format: 1.0.8 + yaml: 2.8.1 + zod: 3.25.76 + transitivePeerDependencies: + - '@langchain/core' + - '@langchain/langgraph' + - '@langchain/langgraph-checkpoint' + - '@langchain/langgraph-sdk' + - '@opentelemetry/api' + - '@opentelemetry/exporter-trace-otlp-proto' + - '@opentelemetry/sdk-trace-base' + - babel-plugin-macros + - openai + - supports-color + - typescript + + '@langchain/langgraph-sdk@1.0.2(@langchain/core@1.0.6(openai@6.9.1(ws@8.18.3)(zod@3.25.76)))': + dependencies: + p-queue: 6.6.2 + p-retry: 4.6.2 + uuid: 9.0.1 + optionalDependencies: + '@langchain/core': 1.0.6(openai@6.9.1(ws@8.18.3)(zod@3.25.76)) + + '@langchain/langgraph-ui@1.0.4': + dependencies: + '@commander-js/extra-typings': 13.1.0(commander@13.1.0) + commander: 13.1.0 + esbuild: 0.25.12 + esbuild-plugin-tailwindcss: 2.1.0 + zod: 3.25.76 + + '@langchain/langgraph@1.0.2(@langchain/core@1.0.6(openai@6.9.1(ws@8.18.3)(zod@3.25.76)))(zod-to-json-schema@3.25.0(zod@3.25.76))(zod@3.25.76)': + dependencies: + '@langchain/core': 1.0.6(openai@6.9.1(ws@8.18.3)(zod@3.25.76)) + '@langchain/langgraph-checkpoint': 1.0.0(@langchain/core@1.0.6(openai@6.9.1(ws@8.18.3)(zod@3.25.76))) + '@langchain/langgraph-sdk': 1.0.2(@langchain/core@1.0.6(openai@6.9.1(ws@8.18.3)(zod@3.25.76))) + uuid: 10.0.0 + zod: 3.25.76 + optionalDependencies: + zod-to-json-schema: 3.25.0(zod@3.25.76) + transitivePeerDependencies: + - react + - react-dom + + '@langchain/ollama@1.0.1(@langchain/core@1.0.6(openai@6.9.1(ws@8.18.3)(zod@3.25.76)))': + dependencies: + '@langchain/core': 1.0.6(openai@6.9.1(ws@8.18.3)(zod@3.25.76)) + ollama: 0.5.18 + uuid: 10.0.0 + + '@langchain/openai@1.1.2(@langchain/core@1.0.6(openai@6.9.1(ws@8.18.3)(zod@3.25.76)))(ws@8.18.3)': + dependencies: + '@langchain/core': 1.0.6(openai@6.9.1(ws@8.18.3)(zod@3.25.76)) + js-tiktoken: 1.0.21 + openai: 6.9.1(ws@8.18.3)(zod@3.25.76) + zod: 3.25.76 + transitivePeerDependencies: + - ws + + '@langchain/textsplitters@1.0.0(@langchain/core@1.0.6(openai@6.9.1(ws@8.18.3)(zod@3.25.76)))': + dependencies: + '@langchain/core': 1.0.6(openai@6.9.1(ws@8.18.3)(zod@3.25.76)) + js-tiktoken: 1.0.21 + + '@langchain/weaviate@1.0.0(@langchain/core@1.0.6(openai@6.9.1(ws@8.18.3)(zod@3.25.76)))': + dependencies: + '@langchain/core': 1.0.6(openai@6.9.1(ws@8.18.3)(zod@3.25.76)) + uuid: 10.0.0 + weaviate-client: 3.9.0 + transitivePeerDependencies: + - encoding + + '@playwright/test@1.56.1': + dependencies: + playwright: 1.56.1 + + '@protobufjs/aspromise@1.1.2': {} + + '@protobufjs/base64@1.1.2': {} + + '@protobufjs/codegen@2.0.4': {} + + '@protobufjs/eventemitter@1.1.0': {} + + '@protobufjs/fetch@1.1.0': + dependencies: + '@protobufjs/aspromise': 1.1.2 + '@protobufjs/inquire': 1.1.0 + + '@protobufjs/float@1.0.2': {} + + '@protobufjs/inquire@1.1.0': {} + + '@protobufjs/path@1.1.2': {} + + '@protobufjs/pool@1.1.0': {} + + '@protobufjs/utf8@1.1.0': {} + + '@rollup/rollup-android-arm-eabi@4.53.3': + optional: true + + '@rollup/rollup-android-arm64@4.53.3': + optional: true + + '@rollup/rollup-darwin-arm64@4.53.3': + optional: true + + '@rollup/rollup-darwin-x64@4.53.3': + optional: true + + '@rollup/rollup-freebsd-arm64@4.53.3': + optional: true + + '@rollup/rollup-freebsd-x64@4.53.3': + optional: true + + '@rollup/rollup-linux-arm-gnueabihf@4.53.3': + optional: true + + '@rollup/rollup-linux-arm-musleabihf@4.53.3': + optional: true + + '@rollup/rollup-linux-arm64-gnu@4.53.3': + optional: true + + '@rollup/rollup-linux-arm64-musl@4.53.3': + optional: true + + '@rollup/rollup-linux-loong64-gnu@4.53.3': + optional: true + + '@rollup/rollup-linux-ppc64-gnu@4.53.3': + optional: true + + '@rollup/rollup-linux-riscv64-gnu@4.53.3': + optional: true + + '@rollup/rollup-linux-riscv64-musl@4.53.3': + optional: true + + '@rollup/rollup-linux-s390x-gnu@4.53.3': + optional: true + + '@rollup/rollup-linux-x64-gnu@4.53.3': + optional: true + + '@rollup/rollup-linux-x64-musl@4.53.3': + optional: true + + '@rollup/rollup-openharmony-arm64@4.53.3': + optional: true + + '@rollup/rollup-win32-arm64-msvc@4.53.3': + optional: true + + '@rollup/rollup-win32-ia32-msvc@4.53.3': + optional: true + + '@rollup/rollup-win32-x64-gnu@4.53.3': + optional: true + + '@rollup/rollup-win32-x64-msvc@4.53.3': + optional: true + + '@sec-ant/readable-stream@0.4.1': {} + + '@sindresorhus/merge-streams@4.0.0': {} + + '@so-ric/colorspace@1.1.6': + dependencies: + color: 5.0.3 + text-hex: 1.0.0 + + '@tailwindcss/node@4.1.17': + dependencies: + '@jridgewell/remapping': 2.3.5 + enhanced-resolve: 5.18.3 + jiti: 2.6.1 + lightningcss: 1.30.2 + magic-string: 0.30.21 + source-map-js: 1.2.1 + tailwindcss: 4.1.17 + + '@tailwindcss/oxide-android-arm64@4.1.17': + optional: true + + '@tailwindcss/oxide-darwin-arm64@4.1.17': + optional: true + + '@tailwindcss/oxide-darwin-x64@4.1.17': + optional: true + + '@tailwindcss/oxide-freebsd-x64@4.1.17': + optional: true + + '@tailwindcss/oxide-linux-arm-gnueabihf@4.1.17': + optional: true + + '@tailwindcss/oxide-linux-arm64-gnu@4.1.17': + optional: true + + '@tailwindcss/oxide-linux-arm64-musl@4.1.17': + optional: true + + '@tailwindcss/oxide-linux-x64-gnu@4.1.17': + optional: true + + '@tailwindcss/oxide-linux-x64-musl@4.1.17': + optional: true + + '@tailwindcss/oxide-wasm32-wasi@4.1.17': + optional: true + + '@tailwindcss/oxide-win32-arm64-msvc@4.1.17': + optional: true + + '@tailwindcss/oxide-win32-x64-msvc@4.1.17': + optional: true + + '@tailwindcss/oxide@4.1.17': + optionalDependencies: + '@tailwindcss/oxide-android-arm64': 4.1.17 + '@tailwindcss/oxide-darwin-arm64': 4.1.17 + '@tailwindcss/oxide-darwin-x64': 4.1.17 + '@tailwindcss/oxide-freebsd-x64': 4.1.17 + '@tailwindcss/oxide-linux-arm-gnueabihf': 4.1.17 + '@tailwindcss/oxide-linux-arm64-gnu': 4.1.17 + '@tailwindcss/oxide-linux-arm64-musl': 4.1.17 + '@tailwindcss/oxide-linux-x64-gnu': 4.1.17 + '@tailwindcss/oxide-linux-x64-musl': 4.1.17 + '@tailwindcss/oxide-wasm32-wasi': 4.1.17 + '@tailwindcss/oxide-win32-arm64-msvc': 4.1.17 + '@tailwindcss/oxide-win32-x64-msvc': 4.1.17 + + '@tailwindcss/postcss@4.1.17': + dependencies: + '@alloc/quick-lru': 5.2.0 + '@tailwindcss/node': 4.1.17 + '@tailwindcss/oxide': 4.1.17 + postcss: 8.5.6 + tailwindcss: 4.1.17 + + '@tokenizer/token@0.3.0': {} + + '@types/body-parser@1.19.6': + dependencies: + '@types/connect': 3.4.38 + '@types/node': 22.19.1 + + '@types/connect@3.4.38': + dependencies: + '@types/node': 22.19.1 + + '@types/cors@2.8.19': + dependencies: + '@types/node': 22.19.1 + + '@types/debug@4.1.12': + dependencies: + '@types/ms': 2.1.0 + + '@types/estree@1.0.8': {} + + '@types/express-serve-static-core@5.1.0': + dependencies: + '@types/node': 22.19.1 + '@types/qs': 6.14.0 + '@types/range-parser': 1.2.7 + '@types/send': 1.2.1 + + '@types/express@5.0.5': + dependencies: + '@types/body-parser': 1.19.6 + '@types/express-serve-static-core': 5.1.0 + '@types/serve-static': 1.15.10 + + '@types/http-errors@2.0.5': {} + + '@types/jsdom@27.0.0': + dependencies: + '@types/node': 22.19.1 + '@types/tough-cookie': 4.0.5 + parse5: 7.3.0 + + '@types/json-schema@7.0.15': {} + + '@types/lodash@4.17.21': {} + + '@types/mime@1.3.5': {} + + '@types/ms@2.1.0': {} + + '@types/node-fetch@2.6.13': + dependencies: + '@types/node': 22.19.1 + form-data: 4.0.5 + + '@types/node@18.19.130': + dependencies: + undici-types: 5.26.5 + + '@types/node@22.19.1': + dependencies: + undici-types: 6.21.0 + + '@types/pg@8.15.6': + dependencies: + '@types/node': 22.19.1 + pg-protocol: 1.10.3 + pg-types: 2.2.0 + + '@types/qs@6.14.0': {} + + '@types/range-parser@1.2.7': {} + + '@types/retry@0.12.0': {} + + '@types/send@0.17.6': + dependencies: + '@types/mime': 1.3.5 + '@types/node': 22.19.1 + + '@types/send@1.2.1': + dependencies: + '@types/node': 22.19.1 + + '@types/serve-static@1.15.10': + dependencies: + '@types/http-errors': 2.0.5 + '@types/node': 22.19.1 + '@types/send': 0.17.6 + + '@types/tough-cookie@4.0.5': {} + + '@types/triple-beam@1.3.5': {} + + '@types/uuid@10.0.0': {} + + '@types/yauzl@2.10.3': + dependencies: + '@types/node': 22.19.1 + optional: true + + '@typescript/vfs@1.6.2(typescript@5.9.3)': + dependencies: + debug: 4.4.3 + typescript: 5.9.3 + transitivePeerDependencies: + - supports-color + + '@vitest/expect@2.1.9': + dependencies: + '@vitest/spy': 2.1.9 + '@vitest/utils': 2.1.9 + chai: 5.3.3 + tinyrainbow: 1.2.0 + + '@vitest/mocker@2.1.9(vite@5.4.21(@types/node@22.19.1)(lightningcss@1.30.2))': + dependencies: + '@vitest/spy': 2.1.9 + estree-walker: 3.0.3 + magic-string: 0.30.21 + optionalDependencies: + vite: 5.4.21(@types/node@22.19.1)(lightningcss@1.30.2) + + '@vitest/pretty-format@2.1.9': + dependencies: + tinyrainbow: 1.2.0 + + '@vitest/runner@2.1.9': + dependencies: + '@vitest/utils': 2.1.9 + pathe: 1.1.2 + + '@vitest/snapshot@2.1.9': + dependencies: + '@vitest/pretty-format': 2.1.9 + magic-string: 0.30.21 + pathe: 1.1.2 + + '@vitest/spy@2.1.9': + dependencies: + tinyspy: 3.0.2 + + '@vitest/utils@2.1.9': + dependencies: + '@vitest/pretty-format': 2.1.9 + loupe: 3.2.1 + tinyrainbow: 1.2.0 + + abort-controller-x@0.4.3: {} + + abort-controller@3.0.0: + dependencies: + event-target-shim: 5.0.1 + + accepts@1.3.8: + dependencies: + mime-types: 2.1.35 + negotiator: 0.6.3 + + agent-base@7.1.4: {} + + agentkeepalive@4.6.0: + dependencies: + humanize-ms: 1.2.1 + + ansi-regex@5.0.1: {} + + ansi-styles@4.3.0: + dependencies: + color-convert: 2.0.1 + + ansi-styles@5.2.0: {} + + argparse@2.0.1: {} + + array-flatten@1.1.1: {} + + assertion-error@2.0.1: {} + + async@3.2.6: {} + + asynckit@0.4.0: {} + + autoprefixer@10.4.22(postcss@8.5.6): + dependencies: + browserslist: 4.28.0 + caniuse-lite: 1.0.30001756 + fraction.js: 5.3.4 + normalize-range: 0.1.2 + picocolors: 1.1.1 + postcss: 8.5.6 + postcss-value-parser: 4.2.0 + + axios@1.13.2(debug@4.4.3): + dependencies: + follow-redirects: 1.15.11(debug@4.4.3) + form-data: 4.0.5 + proxy-from-env: 1.1.0 + transitivePeerDependencies: + - debug + + base64-js@1.5.1: {} + + baseline-browser-mapping@2.8.31: {} + + bidi-js@1.0.3: + dependencies: + require-from-string: 2.0.2 + + binary-extensions@2.3.0: {} + + body-parser@1.20.3: + dependencies: + bytes: 3.1.2 + content-type: 1.0.5 + debug: 2.6.9 + depd: 2.0.0 + destroy: 1.2.0 + http-errors: 2.0.0 + iconv-lite: 0.4.24 + on-finished: 2.4.1 + qs: 6.13.0 + raw-body: 2.5.2 + type-is: 1.6.18 + unpipe: 1.0.0 + transitivePeerDependencies: + - supports-color + + boolbase@1.0.0: {} + + browserslist@4.28.0: + dependencies: + baseline-browser-mapping: 2.8.31 + caniuse-lite: 1.0.30001756 + electron-to-chromium: 1.5.259 + node-releases: 2.0.27 + update-browserslist-db: 1.1.4(browserslist@4.28.0) + + buffer-crc32@0.2.13: {} + + buffer-equal-constant-time@1.0.1: {} + + buffer@6.0.3: + dependencies: + base64-js: 1.5.1 + ieee754: 1.2.1 + + bundle-name@4.1.0: + dependencies: + run-applescript: 7.1.0 + + bytes@3.1.2: {} + + cac@6.7.14: {} + + call-bind-apply-helpers@1.0.2: + dependencies: + es-errors: 1.3.0 + function-bind: 1.1.2 + + call-bound@1.0.4: + dependencies: + call-bind-apply-helpers: 1.0.2 + get-intrinsic: 1.3.0 + + camelcase@6.3.0: {} + + caniuse-lite@1.0.30001756: {} + + chai@5.3.3: + dependencies: + assertion-error: 2.0.1 + check-error: 2.1.1 + deep-eql: 5.0.2 + loupe: 3.2.1 + pathval: 2.0.1 + + chalk@4.1.2: + dependencies: + ansi-styles: 4.3.0 + supports-color: 7.2.0 + + check-error@2.1.1: {} + + cheerio-select@2.1.0: + dependencies: + boolbase: 1.0.0 + css-select: 5.2.2 + css-what: 6.2.2 + domelementtype: 2.3.0 + domhandler: 5.0.3 + domutils: 3.2.2 + + cheerio@1.1.2: + dependencies: + cheerio-select: 2.1.0 + dom-serializer: 2.0.0 + domhandler: 5.0.3 + domutils: 3.2.2 + encoding-sniffer: 0.2.1 + htmlparser2: 10.0.0 + parse5: 7.3.0 + parse5-htmlparser2-tree-adapter: 7.1.0 + parse5-parser-stream: 7.1.2 + undici: 7.16.0 + whatwg-mimetype: 4.0.0 + + chokidar@4.0.3: + dependencies: + readdirp: 4.1.2 + + chownr@3.0.0: {} + + cliui@8.0.1: + dependencies: + string-width: 4.2.3 + strip-ansi: 6.0.1 + wrap-ansi: 7.0.0 + + color-convert@2.0.1: + dependencies: + color-name: 1.1.4 + + color-convert@3.1.3: + dependencies: + color-name: 2.1.0 + + color-name@1.1.4: {} + + color-name@2.1.0: {} + + color-string@2.1.4: + dependencies: + color-name: 2.1.0 + + color@5.0.3: + dependencies: + color-convert: 3.1.3 + color-string: 2.1.4 + + colors@1.4.0: {} + + combined-stream@1.0.8: + dependencies: + delayed-stream: 1.0.0 + + commander@13.1.0: {} + + console-table-printer@2.15.0: + dependencies: + simple-wcswidth: 1.1.2 + + content-disposition@0.5.4: + dependencies: + safe-buffer: 5.2.1 + + content-type@1.0.5: {} + + cookie-signature@1.0.6: {} + + cookie@0.7.1: {} + + copy-anything@4.0.5: + dependencies: + is-what: 5.5.0 + + cors@2.8.5: + dependencies: + object-assign: 4.1.1 + vary: 1.1.2 + + create-langgraph@1.0.0: + dependencies: + '@clack/prompts': 0.9.1 + '@commander-js/extra-typings': 13.1.0(commander@13.1.0) + commander: 13.1.0 + dedent: 1.7.0 + extract-zip: 2.0.1 + picocolors: 1.1.1 + transitivePeerDependencies: + - babel-plugin-macros + - supports-color + + cross-fetch@3.2.0: + dependencies: + node-fetch: 2.7.0 + transitivePeerDependencies: + - encoding + + cross-spawn@7.0.6: + dependencies: + path-key: 3.1.1 + shebang-command: 2.0.0 + which: 2.0.2 + + css-select@5.2.2: + dependencies: + boolbase: 1.0.0 + css-what: 6.2.2 + domhandler: 5.0.3 + domutils: 3.2.2 + nth-check: 2.1.1 + + css-tree@3.1.0: + dependencies: + mdn-data: 2.12.2 + source-map-js: 1.2.1 + + css-what@6.2.2: {} + + cssesc@3.0.0: {} + + cssstyle@5.3.3: + dependencies: + '@asamuzakjp/css-color': 4.1.0 + '@csstools/css-syntax-patches-for-csstree': 1.0.17 + css-tree: 3.1.0 + + data-urls@6.0.0: + dependencies: + whatwg-mimetype: 4.0.0 + whatwg-url: 15.1.0 + + debug@2.6.9: + dependencies: + ms: 2.0.0 + + debug@4.4.3: + dependencies: + ms: 2.1.3 + + decamelize@1.2.0: {} + + decimal.js@10.6.0: {} + + dedent@1.7.0: {} + + deep-eql@5.0.2: {} + + deepmerge@4.3.1: {} + + default-browser-id@5.0.1: {} + + default-browser@5.4.0: + dependencies: + bundle-name: 4.1.0 + default-browser-id: 5.0.1 + + define-lazy-prop@3.0.0: {} + + delayed-stream@1.0.0: {} + + depd@2.0.0: {} + + destroy@1.2.0: {} + + detect-libc@2.1.2: {} + + dom-serializer@2.0.0: + dependencies: + domelementtype: 2.3.0 + domhandler: 5.0.3 + entities: 4.5.0 + + domelementtype@2.3.0: {} + + domhandler@5.0.3: + dependencies: + domelementtype: 2.3.0 + + domutils@3.2.2: + dependencies: + dom-serializer: 2.0.0 + domelementtype: 2.3.0 + domhandler: 5.0.3 + + dotenv@16.6.1: {} + + dunder-proto@1.0.1: + dependencies: + call-bind-apply-helpers: 1.0.2 + es-errors: 1.3.0 + gopd: 1.2.0 + + ecdsa-sig-formatter@1.0.11: + dependencies: + safe-buffer: 5.2.1 + + ee-first@1.1.1: {} + + electron-to-chromium@1.5.259: {} + + emoji-regex@8.0.0: {} + + enabled@2.0.0: {} + + encodeurl@1.0.2: {} + + encodeurl@2.0.0: {} + + encoding-sniffer@0.2.1: + dependencies: + iconv-lite: 0.6.3 + whatwg-encoding: 3.1.1 + + end-of-stream@1.4.5: + dependencies: + once: 1.4.0 + + enhanced-resolve@5.18.3: + dependencies: + graceful-fs: 4.2.11 + tapable: 2.3.0 + + entities@4.5.0: {} + + entities@6.0.1: {} + + es-define-property@1.0.1: {} + + es-errors@1.3.0: {} + + es-module-lexer@1.7.0: {} + + es-object-atoms@1.1.1: + dependencies: + es-errors: 1.3.0 + + es-set-tostringtag@2.1.0: + dependencies: + es-errors: 1.3.0 + get-intrinsic: 1.3.0 + has-tostringtag: 1.0.2 + hasown: 2.0.2 + + esbuild-plugin-tailwindcss@2.1.0: + dependencies: + '@tailwindcss/postcss': 4.1.17 + autoprefixer: 10.4.22(postcss@8.5.6) + postcss: 8.5.6 + postcss-modules: 6.0.1(postcss@8.5.6) + + esbuild@0.21.5: + optionalDependencies: + '@esbuild/aix-ppc64': 0.21.5 + '@esbuild/android-arm': 0.21.5 + '@esbuild/android-arm64': 0.21.5 + '@esbuild/android-x64': 0.21.5 + '@esbuild/darwin-arm64': 0.21.5 + '@esbuild/darwin-x64': 0.21.5 + '@esbuild/freebsd-arm64': 0.21.5 + '@esbuild/freebsd-x64': 0.21.5 + '@esbuild/linux-arm': 0.21.5 + '@esbuild/linux-arm64': 0.21.5 + '@esbuild/linux-ia32': 0.21.5 + '@esbuild/linux-loong64': 0.21.5 + '@esbuild/linux-mips64el': 0.21.5 + '@esbuild/linux-ppc64': 0.21.5 + '@esbuild/linux-riscv64': 0.21.5 + '@esbuild/linux-s390x': 0.21.5 + '@esbuild/linux-x64': 0.21.5 + '@esbuild/netbsd-x64': 0.21.5 + '@esbuild/openbsd-x64': 0.21.5 + '@esbuild/sunos-x64': 0.21.5 + '@esbuild/win32-arm64': 0.21.5 + '@esbuild/win32-ia32': 0.21.5 + '@esbuild/win32-x64': 0.21.5 + + esbuild@0.25.12: + optionalDependencies: + '@esbuild/aix-ppc64': 0.25.12 + '@esbuild/android-arm': 0.25.12 + '@esbuild/android-arm64': 0.25.12 + '@esbuild/android-x64': 0.25.12 + '@esbuild/darwin-arm64': 0.25.12 + '@esbuild/darwin-x64': 0.25.12 + '@esbuild/freebsd-arm64': 0.25.12 + '@esbuild/freebsd-x64': 0.25.12 + '@esbuild/linux-arm': 0.25.12 + '@esbuild/linux-arm64': 0.25.12 + '@esbuild/linux-ia32': 0.25.12 + '@esbuild/linux-loong64': 0.25.12 + '@esbuild/linux-mips64el': 0.25.12 + '@esbuild/linux-ppc64': 0.25.12 + '@esbuild/linux-riscv64': 0.25.12 + '@esbuild/linux-s390x': 0.25.12 + '@esbuild/linux-x64': 0.25.12 + '@esbuild/netbsd-arm64': 0.25.12 + '@esbuild/netbsd-x64': 0.25.12 + '@esbuild/openbsd-arm64': 0.25.12 + '@esbuild/openbsd-x64': 0.25.12 + '@esbuild/openharmony-arm64': 0.25.12 + '@esbuild/sunos-x64': 0.25.12 + '@esbuild/win32-arm64': 0.25.12 + '@esbuild/win32-ia32': 0.25.12 + '@esbuild/win32-x64': 0.25.12 + + escalade@3.2.0: {} + + escape-html@1.0.3: {} + + estree-walker@3.0.3: + dependencies: + '@types/estree': 1.0.8 + + etag@1.8.1: {} + + event-target-shim@5.0.1: {} + + eventemitter3@4.0.7: {} + + events@3.3.0: {} + + execa@9.6.0: + dependencies: + '@sindresorhus/merge-streams': 4.0.0 + cross-spawn: 7.0.6 + figures: 6.1.0 + get-stream: 9.0.1 + human-signals: 8.0.1 + is-plain-obj: 4.1.0 + is-stream: 4.0.1 + npm-run-path: 6.0.0 + pretty-ms: 9.3.0 + signal-exit: 4.1.0 + strip-final-newline: 4.0.0 + yoctocolors: 2.1.2 + + exit-hook@4.0.0: {} + + expect-type@1.2.2: {} + + express@4.21.2: + dependencies: + accepts: 1.3.8 + array-flatten: 1.1.1 + body-parser: 1.20.3 + content-disposition: 0.5.4 + content-type: 1.0.5 + cookie: 0.7.1 + cookie-signature: 1.0.6 + debug: 2.6.9 + depd: 2.0.0 + encodeurl: 2.0.0 + escape-html: 1.0.3 + etag: 1.8.1 + finalhandler: 1.3.1 + fresh: 0.5.2 + http-errors: 2.0.0 + merge-descriptors: 1.0.3 + methods: 1.1.2 + on-finished: 2.4.1 + parseurl: 1.3.3 + path-to-regexp: 0.1.12 + proxy-addr: 2.0.7 + qs: 6.13.0 + range-parser: 1.2.1 + safe-buffer: 5.2.1 + send: 0.19.0 + serve-static: 1.16.2 + setprototypeof: 1.2.0 + statuses: 2.0.1 + type-is: 1.6.18 + utils-merge: 1.0.1 + vary: 1.1.2 + transitivePeerDependencies: + - supports-color + + extend@3.0.2: {} + + extract-zip@2.0.1: + dependencies: + debug: 4.4.3 + get-stream: 5.2.0 + yauzl: 2.10.0 + optionalDependencies: + '@types/yauzl': 2.10.3 + transitivePeerDependencies: + - supports-color + + fd-slicer@1.1.0: + dependencies: + pend: 1.2.0 + + fecha@4.2.3: {} + + figures@6.1.0: + dependencies: + is-unicode-supported: 2.1.0 + + file-type@16.5.4: + dependencies: + readable-web-to-node-stream: 3.0.4 + strtok3: 6.3.0 + token-types: 4.2.1 + + finalhandler@1.3.1: + dependencies: + debug: 2.6.9 + encodeurl: 2.0.0 + escape-html: 1.0.3 + on-finished: 2.4.1 + parseurl: 1.3.3 + statuses: 2.0.1 + unpipe: 1.0.0 + transitivePeerDependencies: + - supports-color + + flat@5.0.2: {} + + fn.name@1.1.0: {} + + follow-redirects@1.15.11(debug@4.4.3): + optionalDependencies: + debug: 4.4.3 + + form-data-encoder@1.7.2: {} + + form-data@4.0.5: + dependencies: + asynckit: 0.4.0 + combined-stream: 1.0.8 + es-set-tostringtag: 2.1.0 + hasown: 2.0.2 + mime-types: 2.1.35 + + formdata-node@4.4.1: + dependencies: + node-domexception: 1.0.0 + web-streams-polyfill: 4.0.0-beta.3 + + forwarded@0.2.0: {} + + fraction.js@5.3.4: {} + + fresh@0.5.2: {} + + fsevents@2.3.2: + optional: true + + fsevents@2.3.3: + optional: true + + function-bind@1.1.2: {} + + generic-names@4.0.0: + dependencies: + loader-utils: 3.3.1 + + get-caller-file@2.0.5: {} + + get-intrinsic@1.3.0: + dependencies: + call-bind-apply-helpers: 1.0.2 + es-define-property: 1.0.1 + es-errors: 1.3.0 + es-object-atoms: 1.1.1 + function-bind: 1.1.2 + get-proto: 1.0.1 + gopd: 1.2.0 + has-symbols: 1.1.0 + hasown: 2.0.2 + math-intrinsics: 1.1.0 + + get-proto@1.0.1: + dependencies: + dunder-proto: 1.0.1 + es-object-atoms: 1.1.1 + + get-stream@5.2.0: + dependencies: + pump: 3.0.3 + + get-stream@9.0.1: + dependencies: + '@sec-ant/readable-stream': 0.4.1 + is-stream: 4.0.1 + + get-tsconfig@4.13.0: + dependencies: + resolve-pkg-maps: 1.0.0 + + gopd@1.2.0: {} + + graceful-fs@4.2.11: {} + + graphql-request@6.1.0(graphql@16.12.0): + dependencies: + '@graphql-typed-document-node/core': 3.2.0(graphql@16.12.0) + cross-fetch: 3.2.0 + graphql: 16.12.0 + transitivePeerDependencies: + - encoding + + graphql@16.12.0: {} + + groq-sdk@0.19.0: + dependencies: + '@types/node': 18.19.130 + '@types/node-fetch': 2.6.13 + abort-controller: 3.0.0 + agentkeepalive: 4.6.0 + form-data-encoder: 1.7.2 + formdata-node: 4.4.1 + node-fetch: 2.7.0 + transitivePeerDependencies: + - encoding + + handlebars@4.7.8: + dependencies: + minimist: 1.2.8 + neo-async: 2.6.2 + source-map: 0.6.1 + wordwrap: 1.0.0 + optionalDependencies: + uglify-js: 3.19.3 + + has-flag@4.0.0: {} + + has-symbols@1.1.0: {} + + has-tostringtag@1.0.2: + dependencies: + has-symbols: 1.1.0 + + hasown@2.0.2: + dependencies: + function-bind: 1.1.2 + + hono@4.10.6: {} + + html-encoding-sniffer@4.0.0: + dependencies: + whatwg-encoding: 3.1.1 + + htmlparser2@10.0.0: + dependencies: + domelementtype: 2.3.0 + domhandler: 5.0.3 + domutils: 3.2.2 + entities: 6.0.1 + + http-errors@2.0.0: + dependencies: + depd: 2.0.0 + inherits: 2.0.4 + setprototypeof: 1.2.0 + statuses: 2.0.1 + toidentifier: 1.0.1 + + http-proxy-agent@7.0.2: + dependencies: + agent-base: 7.1.4 + debug: 4.4.3 + transitivePeerDependencies: + - supports-color + + https-proxy-agent@7.0.6: + dependencies: + agent-base: 7.1.4 + debug: 4.4.3 + transitivePeerDependencies: + - supports-color + + human-signals@8.0.1: {} + + humanize-ms@1.2.1: + dependencies: + ms: 2.1.3 + + ibm-cloud-sdk-core@5.4.4: + dependencies: + '@types/debug': 4.1.12 + '@types/node': 18.19.130 + '@types/tough-cookie': 4.0.5 + axios: 1.13.2(debug@4.4.3) + camelcase: 6.3.0 + debug: 4.4.3 + dotenv: 16.6.1 + extend: 3.0.2 + file-type: 16.5.4 + form-data: 4.0.5 + isstream: 0.1.2 + jsonwebtoken: 9.0.2 + mime-types: 2.1.35 + retry-axios: 2.6.0(axios@1.13.2) + tough-cookie: 4.1.4 + transitivePeerDependencies: + - supports-color + + iconv-lite@0.4.24: + dependencies: + safer-buffer: 2.1.2 + + iconv-lite@0.6.3: + dependencies: + safer-buffer: 2.1.2 + + icss-utils@5.1.0(postcss@8.5.6): + dependencies: + postcss: 8.5.6 + + ieee754@1.2.1: {} + + inherits@2.0.4: {} + + ipaddr.js@1.9.1: {} + + is-docker@3.0.0: {} + + is-fullwidth-code-point@3.0.0: {} + + is-inside-container@1.0.0: + dependencies: + is-docker: 3.0.0 + + is-plain-obj@4.1.0: {} + + is-potential-custom-element-name@1.0.1: {} + + is-stream@2.0.1: {} + + is-stream@4.0.1: {} + + is-unicode-supported@2.1.0: {} + + is-what@5.5.0: {} + + is-wsl@3.1.0: + dependencies: + is-inside-container: 1.0.0 + + isexe@2.0.0: {} + + isstream@0.1.2: {} + + jiti@2.6.1: {} + + js-tiktoken@1.0.21: + dependencies: + base64-js: 1.5.1 + + js-tokens@4.0.0: {} + + js-yaml@4.1.1: + dependencies: + argparse: 2.0.1 + + jsdom@27.2.0: + dependencies: + '@acemir/cssom': 0.9.24 + '@asamuzakjp/dom-selector': 6.7.4 + cssstyle: 5.3.3 + data-urls: 6.0.0 + decimal.js: 10.6.0 + html-encoding-sniffer: 4.0.0 + http-proxy-agent: 7.0.2 + https-proxy-agent: 7.0.6 + is-potential-custom-element-name: 1.0.1 + parse5: 8.0.0 + saxes: 6.0.0 + symbol-tree: 3.2.4 + tough-cookie: 6.0.0 + w3c-xmlserializer: 5.0.0 + webidl-conversions: 8.0.0 + whatwg-encoding: 3.1.1 + whatwg-mimetype: 4.0.0 + whatwg-url: 15.1.0 + ws: 8.18.3 + xml-name-validator: 5.0.0 + transitivePeerDependencies: + - bufferutil + - supports-color + - utf-8-validate + + json-schema-to-ts@3.1.1: + dependencies: + '@babel/runtime': 7.28.4 + ts-algebra: 2.0.0 + + jsonpointer@5.0.1: {} + + jsonwebtoken@9.0.2: + dependencies: + jws: 3.2.2 + lodash.includes: 4.3.0 + lodash.isboolean: 3.0.3 + lodash.isinteger: 4.0.4 + lodash.isnumber: 3.0.3 + lodash.isplainobject: 4.0.6 + lodash.isstring: 4.0.1 + lodash.once: 4.1.1 + ms: 2.1.3 + semver: 7.7.3 + + jwa@1.4.2: + dependencies: + buffer-equal-constant-time: 1.0.1 + ecdsa-sig-formatter: 1.0.11 + safe-buffer: 5.2.1 + + jws@3.2.2: + dependencies: + jwa: 1.4.2 + safe-buffer: 5.2.1 + + kuler@2.0.0: {} + + langchain@1.0.6(@langchain/core@1.0.6(openai@6.9.1(ws@8.18.3)(zod@3.25.76)))(openai@6.9.1(ws@8.18.3)(zod@3.25.76))(zod-to-json-schema@3.25.0(zod@3.25.76)): + dependencies: + '@langchain/core': 1.0.6(openai@6.9.1(ws@8.18.3)(zod@3.25.76)) + '@langchain/langgraph': 1.0.2(@langchain/core@1.0.6(openai@6.9.1(ws@8.18.3)(zod@3.25.76)))(zod-to-json-schema@3.25.0(zod@3.25.76))(zod@3.25.76) + '@langchain/langgraph-checkpoint': 1.0.0(@langchain/core@1.0.6(openai@6.9.1(ws@8.18.3)(zod@3.25.76))) + langsmith: 0.3.81(openai@6.9.1(ws@8.18.3)(zod@3.25.76)) + uuid: 10.0.0 + zod: 3.25.76 + transitivePeerDependencies: + - '@opentelemetry/api' + - '@opentelemetry/exporter-trace-otlp-proto' + - '@opentelemetry/sdk-trace-base' + - openai + - react + - react-dom + - zod-to-json-schema + + langsmith@0.3.81(openai@6.9.1(ws@8.18.3)(zod@3.25.76)): + dependencies: + '@types/uuid': 10.0.0 + chalk: 4.1.2 + console-table-printer: 2.15.0 + p-queue: 6.6.2 + p-retry: 4.6.2 + semver: 7.7.3 + uuid: 10.0.0 + optionalDependencies: + openai: 6.9.1(ws@8.18.3)(zod@3.25.76) + + lightningcss-android-arm64@1.30.2: + optional: true + + lightningcss-darwin-arm64@1.30.2: + optional: true + + lightningcss-darwin-x64@1.30.2: + optional: true + + lightningcss-freebsd-x64@1.30.2: + optional: true + + lightningcss-linux-arm-gnueabihf@1.30.2: + optional: true + + lightningcss-linux-arm64-gnu@1.30.2: + optional: true + + lightningcss-linux-arm64-musl@1.30.2: + optional: true + + lightningcss-linux-x64-gnu@1.30.2: + optional: true + + lightningcss-linux-x64-musl@1.30.2: + optional: true + + lightningcss-win32-arm64-msvc@1.30.2: + optional: true + + lightningcss-win32-x64-msvc@1.30.2: + optional: true + + lightningcss@1.30.2: + dependencies: + detect-libc: 2.1.2 + optionalDependencies: + lightningcss-android-arm64: 1.30.2 + lightningcss-darwin-arm64: 1.30.2 + lightningcss-darwin-x64: 1.30.2 + lightningcss-freebsd-x64: 1.30.2 + lightningcss-linux-arm-gnueabihf: 1.30.2 + lightningcss-linux-arm64-gnu: 1.30.2 + lightningcss-linux-arm64-musl: 1.30.2 + lightningcss-linux-x64-gnu: 1.30.2 + lightningcss-linux-x64-musl: 1.30.2 + lightningcss-win32-arm64-msvc: 1.30.2 + lightningcss-win32-x64-msvc: 1.30.2 + + loader-utils@3.3.1: {} + + lodash.camelcase@4.3.0: {} + + lodash.includes@4.3.0: {} + + lodash.isboolean@3.0.3: {} + + lodash.isinteger@4.0.4: {} + + lodash.isnumber@3.0.3: {} + + lodash.isplainobject@4.0.6: {} + + lodash.isstring@4.0.1: {} + + lodash.once@4.1.1: {} + + lodash@4.17.21: {} + + logform@2.7.0: + dependencies: + '@colors/colors': 1.6.0 + '@types/triple-beam': 1.3.5 + fecha: 4.2.3 + ms: 2.1.3 + safe-stable-stringify: 2.5.0 + triple-beam: 1.4.1 + + long@5.3.2: {} + + loupe@3.2.1: {} + + lru-cache@11.2.2: {} + + magic-string@0.30.21: + dependencies: + '@jridgewell/sourcemap-codec': 1.5.5 + + math-expression-evaluator@2.0.7: {} + + math-intrinsics@1.1.0: {} + + mdn-data@2.12.2: {} + + media-typer@0.3.0: {} + + merge-descriptors@1.0.3: {} + + methods@1.1.2: {} + + mime-db@1.52.0: {} + + mime-types@2.1.35: + dependencies: + mime-db: 1.52.0 + + mime@1.6.0: {} + + minimist@1.2.8: {} + + minipass@7.1.2: {} + + minizlib@3.1.0: + dependencies: + minipass: 7.1.2 + + ms@2.0.0: {} + + ms@2.1.3: {} + + mustache@4.2.0: {} + + nanoid@3.3.11: {} + + negotiator@0.6.3: {} + + neo-async@2.6.2: {} + + nice-grpc-client-middleware-retry@3.1.13: + dependencies: + abort-controller-x: 0.4.3 + nice-grpc-common: 2.0.2 + + nice-grpc-common@2.0.2: + dependencies: + ts-error: 1.0.6 + + nice-grpc@2.1.14: + dependencies: + '@grpc/grpc-js': 1.14.1 + abort-controller-x: 0.4.3 + nice-grpc-common: 2.0.2 + + node-domexception@1.0.0: {} + + node-fetch@2.7.0: + dependencies: + whatwg-url: 5.0.0 + + node-releases@2.0.27: {} + + normalize-range@0.1.2: {} + + npm-run-path@6.0.0: + dependencies: + path-key: 4.0.0 + unicorn-magic: 0.3.0 + + nth-check@2.1.1: + dependencies: + boolbase: 1.0.0 + + object-assign@4.1.1: {} + + object-inspect@1.13.4: {} + + ollama@0.5.18: + dependencies: + whatwg-fetch: 3.6.20 + + on-finished@2.4.1: + dependencies: + ee-first: 1.1.1 + + once@1.4.0: + dependencies: + wrappy: 1.0.2 + + one-time@1.0.0: + dependencies: + fn.name: 1.1.0 + + open@10.2.0: + dependencies: + default-browser: 5.4.0 + define-lazy-prop: 3.0.0 + is-inside-container: 1.0.0 + wsl-utils: 0.1.0 + + openai@6.9.1(ws@8.18.3)(zod@3.25.76): + optionalDependencies: + ws: 8.18.3 + zod: 3.25.76 + + openapi-types@12.1.3: {} + + p-finally@1.0.0: {} + + p-queue@6.6.2: + dependencies: + eventemitter3: 4.0.7 + p-timeout: 3.2.0 + + p-retry@4.6.2: + dependencies: + '@types/retry': 0.12.0 + retry: 0.13.1 + + p-timeout@3.2.0: + dependencies: + p-finally: 1.0.0 + + package-manager-detector@1.5.0: {} + + parse-ms@4.0.0: {} + + parse5-htmlparser2-tree-adapter@7.1.0: + dependencies: + domhandler: 5.0.3 + parse5: 7.3.0 + + parse5-parser-stream@7.1.2: + dependencies: + parse5: 7.3.0 + + parse5@7.3.0: + dependencies: + entities: 6.0.1 + + parse5@8.0.0: + dependencies: + entities: 6.0.1 + + parseurl@1.3.3: {} + + path-key@3.1.1: {} + + path-key@4.0.0: {} + + path-to-regexp@0.1.12: {} + + pathe@1.1.2: {} + + pathval@2.0.1: {} + + peek-readable@4.1.0: {} + + pend@1.2.0: {} + + pg-cloudflare@1.2.7: + optional: true + + pg-connection-string@2.9.1: {} + + pg-int8@1.0.1: {} + + pg-pool@3.10.1(pg@8.16.3): + dependencies: + pg: 8.16.3 + + pg-protocol@1.10.3: {} + + pg-types@2.2.0: + dependencies: + pg-int8: 1.0.1 + postgres-array: 2.0.0 + postgres-bytea: 1.0.0 + postgres-date: 1.0.7 + postgres-interval: 1.2.0 + + pg@8.16.3: + dependencies: + pg-connection-string: 2.9.1 + pg-pool: 3.10.1(pg@8.16.3) + pg-protocol: 1.10.3 + pg-types: 2.2.0 + pgpass: 1.0.5 + optionalDependencies: + pg-cloudflare: 1.2.7 + + pgpass@1.0.5: + dependencies: + split2: 4.2.0 + + picocolors@1.1.1: {} + + playwright-core@1.56.1: {} + + playwright@1.56.1: + dependencies: + playwright-core: 1.56.1 + optionalDependencies: + fsevents: 2.3.2 + + postcss-modules-extract-imports@3.1.0(postcss@8.5.6): + dependencies: + postcss: 8.5.6 + + postcss-modules-local-by-default@4.2.0(postcss@8.5.6): + dependencies: + icss-utils: 5.1.0(postcss@8.5.6) + postcss: 8.5.6 + postcss-selector-parser: 7.1.0 + postcss-value-parser: 4.2.0 + + postcss-modules-scope@3.2.1(postcss@8.5.6): + dependencies: + postcss: 8.5.6 + postcss-selector-parser: 7.1.0 + + postcss-modules-values@4.0.0(postcss@8.5.6): + dependencies: + icss-utils: 5.1.0(postcss@8.5.6) + postcss: 8.5.6 + + postcss-modules@6.0.1(postcss@8.5.6): + dependencies: + generic-names: 4.0.0 + icss-utils: 5.1.0(postcss@8.5.6) + lodash.camelcase: 4.3.0 + postcss: 8.5.6 + postcss-modules-extract-imports: 3.1.0(postcss@8.5.6) + postcss-modules-local-by-default: 4.2.0(postcss@8.5.6) + postcss-modules-scope: 3.2.1(postcss@8.5.6) + postcss-modules-values: 4.0.0(postcss@8.5.6) + string-hash: 1.1.3 + + postcss-selector-parser@7.1.0: + dependencies: + cssesc: 3.0.0 + util-deprecate: 1.0.2 + + postcss-value-parser@4.2.0: {} + + postcss@8.5.6: + dependencies: + nanoid: 3.3.11 + picocolors: 1.1.1 + source-map-js: 1.2.1 + + postgres-array@2.0.0: {} + + postgres-bytea@1.0.0: {} + + postgres-date@1.0.7: {} + + postgres-interval@1.2.0: + dependencies: + xtend: 4.0.2 + + pretty-ms@9.3.0: + dependencies: + parse-ms: 4.0.0 + + process@0.11.10: {} + + protobufjs@7.5.4: + dependencies: + '@protobufjs/aspromise': 1.1.2 + '@protobufjs/base64': 1.1.2 + '@protobufjs/codegen': 2.0.4 + '@protobufjs/eventemitter': 1.1.0 + '@protobufjs/fetch': 1.1.0 + '@protobufjs/float': 1.0.2 + '@protobufjs/inquire': 1.1.0 + '@protobufjs/path': 1.1.2 + '@protobufjs/pool': 1.1.0 + '@protobufjs/utf8': 1.1.0 + '@types/node': 22.19.1 + long: 5.3.2 + + proxy-addr@2.0.7: + dependencies: + forwarded: 0.2.0 + ipaddr.js: 1.9.1 + + proxy-from-env@1.1.0: {} + + psl@1.15.0: + dependencies: + punycode: 2.3.1 + + pump@3.0.3: + dependencies: + end-of-stream: 1.4.5 + once: 1.4.0 + + punycode@2.3.1: {} + + qs@6.13.0: + dependencies: + side-channel: 1.1.0 + + querystringify@2.2.0: {} + + range-parser@1.2.1: {} + + raw-body@2.5.2: + dependencies: + bytes: 3.1.2 + http-errors: 2.0.0 + iconv-lite: 0.4.24 + unpipe: 1.0.0 + + readable-stream@3.6.2: + dependencies: + inherits: 2.0.4 + string_decoder: 1.3.0 + util-deprecate: 1.0.2 + + readable-stream@4.7.0: + dependencies: + abort-controller: 3.0.0 + buffer: 6.0.3 + events: 3.3.0 + process: 0.11.10 + string_decoder: 1.3.0 + + readable-web-to-node-stream@3.0.4: + dependencies: + readable-stream: 4.7.0 + + readdirp@4.1.2: {} + + require-directory@2.1.1: {} + + require-from-string@2.0.2: {} + + requires-port@1.0.0: {} + + resolve-pkg-maps@1.0.0: {} + + retry-axios@2.6.0(axios@1.13.2): + dependencies: + axios: 1.13.2(debug@4.4.3) + + retry@0.13.1: {} + + rollup@4.53.3: + dependencies: + '@types/estree': 1.0.8 + optionalDependencies: + '@rollup/rollup-android-arm-eabi': 4.53.3 + '@rollup/rollup-android-arm64': 4.53.3 + '@rollup/rollup-darwin-arm64': 4.53.3 + '@rollup/rollup-darwin-x64': 4.53.3 + '@rollup/rollup-freebsd-arm64': 4.53.3 + '@rollup/rollup-freebsd-x64': 4.53.3 + '@rollup/rollup-linux-arm-gnueabihf': 4.53.3 + '@rollup/rollup-linux-arm-musleabihf': 4.53.3 + '@rollup/rollup-linux-arm64-gnu': 4.53.3 + '@rollup/rollup-linux-arm64-musl': 4.53.3 + '@rollup/rollup-linux-loong64-gnu': 4.53.3 + '@rollup/rollup-linux-ppc64-gnu': 4.53.3 + '@rollup/rollup-linux-riscv64-gnu': 4.53.3 + '@rollup/rollup-linux-riscv64-musl': 4.53.3 + '@rollup/rollup-linux-s390x-gnu': 4.53.3 + '@rollup/rollup-linux-x64-gnu': 4.53.3 + '@rollup/rollup-linux-x64-musl': 4.53.3 + '@rollup/rollup-openharmony-arm64': 4.53.3 + '@rollup/rollup-win32-arm64-msvc': 4.53.3 + '@rollup/rollup-win32-ia32-msvc': 4.53.3 + '@rollup/rollup-win32-x64-gnu': 4.53.3 + '@rollup/rollup-win32-x64-msvc': 4.53.3 + fsevents: 2.3.3 + + run-applescript@7.1.0: {} + + safe-buffer@5.2.1: {} + + safe-stable-stringify@2.5.0: {} + + safer-buffer@2.1.2: {} + + saxes@6.0.0: + dependencies: + xmlchars: 2.2.0 + + semver@7.7.3: {} + + send@0.19.0: + dependencies: + debug: 2.6.9 + depd: 2.0.0 + destroy: 1.2.0 + encodeurl: 1.0.2 + escape-html: 1.0.3 + etag: 1.8.1 + fresh: 0.5.2 + http-errors: 2.0.0 + mime: 1.6.0 + ms: 2.1.3 + on-finished: 2.4.1 + range-parser: 1.2.1 + statuses: 2.0.1 + transitivePeerDependencies: + - supports-color + + serve-static@1.16.2: + dependencies: + encodeurl: 2.0.0 + escape-html: 1.0.3 + parseurl: 1.3.3 + send: 0.19.0 + transitivePeerDependencies: + - supports-color + + setprototypeof@1.2.0: {} + + shebang-command@2.0.0: + dependencies: + shebang-regex: 3.0.0 + + shebang-regex@3.0.0: {} + + side-channel-list@1.0.0: + dependencies: + es-errors: 1.3.0 + object-inspect: 1.13.4 + + side-channel-map@1.0.1: + dependencies: + call-bound: 1.0.4 + es-errors: 1.3.0 + get-intrinsic: 1.3.0 + object-inspect: 1.13.4 + + side-channel-weakmap@1.0.2: + dependencies: + call-bound: 1.0.4 + es-errors: 1.3.0 + get-intrinsic: 1.3.0 + object-inspect: 1.13.4 + side-channel-map: 1.0.1 + + side-channel@1.1.0: + dependencies: + es-errors: 1.3.0 + object-inspect: 1.13.4 + side-channel-list: 1.0.0 + side-channel-map: 1.0.1 + side-channel-weakmap: 1.0.2 + + siginfo@2.0.0: {} + + signal-exit@4.1.0: {} + + simple-wcswidth@1.1.2: {} + + sisteransi@1.0.5: {} + + source-map-js@1.2.1: {} + + source-map@0.6.1: {} + + split2@4.2.0: {} + + stack-trace@0.0.10: {} + + stackback@0.0.2: {} + + stacktrace-parser@0.1.11: + dependencies: + type-fest: 0.7.1 + + statuses@2.0.1: {} + + std-env@3.10.0: {} + + string-hash@1.1.3: {} + + string-width@4.2.3: + dependencies: + emoji-regex: 8.0.0 + is-fullwidth-code-point: 3.0.0 + strip-ansi: 6.0.1 + + string_decoder@1.3.0: + dependencies: + safe-buffer: 5.2.1 + + strip-ansi@6.0.1: + dependencies: + ansi-regex: 5.0.1 + + strip-final-newline@4.0.0: {} + + strtok3@6.3.0: + dependencies: + '@tokenizer/token': 0.3.0 + peek-readable: 4.1.0 + + superjson@2.2.5: + dependencies: + copy-anything: 4.0.5 + + supports-color@7.2.0: + dependencies: + has-flag: 4.0.0 + + symbol-tree@3.2.4: {} + + tailwindcss@4.1.17: {} + + tapable@2.3.0: {} + + tar@7.5.2: + dependencies: + '@isaacs/fs-minipass': 4.0.1 + chownr: 3.0.0 + minipass: 7.1.2 + minizlib: 3.1.0 + yallist: 5.0.0 + + text-hex@1.0.0: {} + + tinybench@2.9.0: {} + + tinyexec@0.3.2: {} + + tinypool@1.1.1: {} + + tinyrainbow@1.2.0: {} + + tinyspy@3.0.2: {} + + tldts-core@7.0.19: {} + + tldts@7.0.19: + dependencies: + tldts-core: 7.0.19 + + toidentifier@1.0.1: {} + + token-types@4.2.1: + dependencies: + '@tokenizer/token': 0.3.0 + ieee754: 1.2.1 + + tough-cookie@4.1.4: + dependencies: + psl: 1.15.0 + punycode: 2.3.1 + universalify: 0.2.0 + url-parse: 1.5.10 + + tough-cookie@6.0.0: + dependencies: + tldts: 7.0.19 + + tr46@0.0.3: {} + + tr46@6.0.0: + dependencies: + punycode: 2.3.1 + + triple-beam@1.4.1: {} + + ts-algebra@2.0.0: {} + + ts-error@1.0.6: {} + + tsx@4.20.6: + dependencies: + esbuild: 0.25.12 + get-tsconfig: 4.13.0 + optionalDependencies: + fsevents: 2.3.3 + + type-fest@0.7.1: {} + + type-is@1.6.18: + dependencies: + media-typer: 0.3.0 + mime-types: 2.1.35 + + typescript@5.9.3: {} + + uglify-js@3.19.3: + optional: true + + undici-types@5.26.5: {} + + undici-types@6.21.0: {} + + undici@7.16.0: {} + + unicorn-magic@0.3.0: {} + + universalify@0.2.0: {} + + unpipe@1.0.0: {} + + update-browserslist-db@1.1.4(browserslist@4.28.0): + dependencies: + browserslist: 4.28.0 + escalade: 3.2.0 + picocolors: 1.1.1 + + url-parse@1.5.10: + dependencies: + querystringify: 2.2.0 + requires-port: 1.0.0 + + util-deprecate@1.0.2: {} + + utils-merge@1.0.1: {} + + uuid@10.0.0: {} + + uuid@11.1.0: {} + + uuid@9.0.1: {} + + vary@1.1.2: {} + + vite-node@2.1.9(@types/node@22.19.1)(lightningcss@1.30.2): + dependencies: + cac: 6.7.14 + debug: 4.4.3 + es-module-lexer: 1.7.0 + pathe: 1.1.2 + vite: 5.4.21(@types/node@22.19.1)(lightningcss@1.30.2) + transitivePeerDependencies: + - '@types/node' + - less + - lightningcss + - sass + - sass-embedded + - stylus + - sugarss + - supports-color + - terser + + vite@5.4.21(@types/node@22.19.1)(lightningcss@1.30.2): + dependencies: + esbuild: 0.21.5 + postcss: 8.5.6 + rollup: 4.53.3 + optionalDependencies: + '@types/node': 22.19.1 + fsevents: 2.3.3 + lightningcss: 1.30.2 + + vitest@2.1.9(@types/node@22.19.1)(jsdom@27.2.0)(lightningcss@1.30.2): + dependencies: + '@vitest/expect': 2.1.9 + '@vitest/mocker': 2.1.9(vite@5.4.21(@types/node@22.19.1)(lightningcss@1.30.2)) + '@vitest/pretty-format': 2.1.9 + '@vitest/runner': 2.1.9 + '@vitest/snapshot': 2.1.9 + '@vitest/spy': 2.1.9 + '@vitest/utils': 2.1.9 + chai: 5.3.3 + debug: 4.4.3 + expect-type: 1.2.2 + magic-string: 0.30.21 + pathe: 1.1.2 + std-env: 3.10.0 + tinybench: 2.9.0 + tinyexec: 0.3.2 + tinypool: 1.1.1 + tinyrainbow: 1.2.0 + vite: 5.4.21(@types/node@22.19.1)(lightningcss@1.30.2) + vite-node: 2.1.9(@types/node@22.19.1)(lightningcss@1.30.2) + why-is-node-running: 2.3.0 + optionalDependencies: + '@types/node': 22.19.1 + jsdom: 27.2.0 + transitivePeerDependencies: + - less + - lightningcss + - msw + - sass + - sass-embedded + - stylus + - sugarss + - supports-color + - terser + + w3c-xmlserializer@5.0.0: + dependencies: + xml-name-validator: 5.0.0 + + weaviate-client@3.9.0: + dependencies: + abort-controller-x: 0.4.3 + graphql: 16.12.0 + graphql-request: 6.1.0(graphql@16.12.0) + long: 5.3.2 + nice-grpc: 2.1.14 + nice-grpc-client-middleware-retry: 3.1.13 + nice-grpc-common: 2.0.2 + uuid: 9.0.1 + transitivePeerDependencies: + - encoding + + web-streams-polyfill@4.0.0-beta.3: {} + + webidl-conversions@3.0.1: {} + + webidl-conversions@8.0.0: {} + + whatwg-encoding@3.1.1: + dependencies: + iconv-lite: 0.6.3 + + whatwg-fetch@3.6.20: {} + + whatwg-mimetype@4.0.0: {} + + whatwg-url@15.1.0: + dependencies: + tr46: 6.0.0 + webidl-conversions: 8.0.0 + + whatwg-url@5.0.0: + dependencies: + tr46: 0.0.3 + webidl-conversions: 3.0.1 + + which@2.0.2: + dependencies: + isexe: 2.0.0 + + why-is-node-running@2.3.0: + dependencies: + siginfo: 2.0.0 + stackback: 0.0.2 + + winston-console-format@1.0.8: + dependencies: + colors: 1.4.0 + logform: 2.7.0 + triple-beam: 1.4.1 + + winston-transport@4.9.0: + dependencies: + logform: 2.7.0 + readable-stream: 3.6.2 + triple-beam: 1.4.1 + + winston@3.18.3: + dependencies: + '@colors/colors': 1.6.0 + '@dabh/diagnostics': 2.0.8 + async: 3.2.6 + is-stream: 2.0.1 + logform: 2.7.0 + one-time: 1.0.0 + readable-stream: 3.6.2 + safe-stable-stringify: 2.5.0 + stack-trace: 0.0.10 + triple-beam: 1.4.1 + winston-transport: 4.9.0 + + wordwrap@1.0.0: {} + + wrap-ansi@7.0.0: + dependencies: + ansi-styles: 4.3.0 + string-width: 4.2.3 + strip-ansi: 6.0.1 + + wrappy@1.0.2: {} + + ws@8.18.3: {} + + wsl-utils@0.1.0: + dependencies: + is-wsl: 3.1.0 + + xml-name-validator@5.0.0: {} + + xmlchars@2.2.0: {} + + xtend@4.0.2: {} + + y18n@5.0.8: {} + + yallist@5.0.0: {} + + yaml@2.8.1: {} + + yargs-parser@21.1.1: {} + + yargs@17.7.2: + dependencies: + cliui: 8.0.1 + escalade: 3.2.0 + get-caller-file: 2.0.5 + require-directory: 2.1.1 + string-width: 4.2.3 + y18n: 5.0.8 + yargs-parser: 21.1.1 + + yauzl@2.10.0: + dependencies: + buffer-crc32: 0.2.13 + fd-slicer: 1.1.0 + + yoctocolors@2.1.2: {} + + zod-to-json-schema@3.25.0(zod@3.25.76): + dependencies: + zod: 3.25.76 + + zod@3.25.76: {} diff --git a/backend-js/src/configuration.ts b/backend-js/src/configuration.ts new file mode 100644 index 000000000..89f596e4c --- /dev/null +++ b/backend-js/src/configuration.ts @@ -0,0 +1,106 @@ +/** + * Base configuration for the agent. + * + * This module defines the base configuration parameters for indexing and retrieval operations. + */ + +import { RunnableConfig } from "@langchain/core/runnables"; +import { z } from "zod"; + +/** + * Schema for backwards-compatible model name mapping + */ +const MODEL_NAME_TO_RESPONSE_MODEL: Record = { + anthropic_claude_3_5_sonnet: "anthropic/claude-3-5-sonnet-20240620", +}; + +/** + * BaseConfiguration schema using Zod for validation + */ +export const BaseConfigurationSchema = z.object({ + /** + * Name of the embedding model to use. + * Must be a valid embedding model name. + * @default "ollama/nomic-embed-text" + */ + embeddingModel: z + .string() + .default(process.env.EMBEDDING_MODEL || "ollama/nomic-embed-text") + .describe("Name of the embedding model to use"), + + /** + * The vector store provider to use for retrieval. + * @default "weaviate" + */ + retrieverProvider: z + .enum(["weaviate"]) + .default("weaviate") + .describe("The vector store provider to use for retrieval"), + + /** + * Additional keyword arguments to pass to the search function of the retriever. + */ + searchKwargs: z + .record(z.any()) + .default({}) + .describe("Additional keyword arguments for the retriever search function"), + + /** + * The number of documents to retrieve (backwards compatibility). + * Use searchKwargs instead. + * @default 6 + */ + k: z.number().default(6).describe("Number of documents to retrieve"), +}); + +export type BaseConfiguration = z.infer; + +/** + * Update configurable parameters for backwards compatibility + */ +function updateConfigurableForBackwardsCompatibility( + configurable: Record +): Record { + const update: Record = {}; + + if ("k" in configurable) { + update.searchKwargs = { k: configurable.k }; + } + + if ("model_name" in configurable) { + update.responseModel = + MODEL_NAME_TO_RESPONSE_MODEL[configurable.model_name] || configurable.model_name; + } + + if (Object.keys(update).length > 0) { + return { ...configurable, ...update }; + } + + return configurable; +} + +/** + * Extract configuration from RunnableConfig + */ +export function getBaseConfiguration(config?: RunnableConfig): BaseConfiguration { + const configurable = config?.configurable || {}; + const updated = updateConfigurableForBackwardsCompatibility(configurable); + + // Convert snake_case to camelCase for embedding_model + if ("embedding_model" in updated) { + updated.embeddingModel = updated.embedding_model; + delete updated.embedding_model; + } + if ("retriever_provider" in updated) { + updated.retrieverProvider = updated.retriever_provider; + delete updated.retriever_provider; + } + if ("search_kwargs" in updated) { + updated.searchKwargs = updated.search_kwargs; + delete updated.search_kwargs; + } + + // Parse and validate with defaults + return BaseConfigurationSchema.parse(updated); +} + diff --git a/backend-js/src/constants.ts b/backend-js/src/constants.ts new file mode 100644 index 000000000..68d6f189e --- /dev/null +++ b/backend-js/src/constants.ts @@ -0,0 +1,16 @@ +/** + * Application constants + */ + +// Weaviate index names +export const WEAVIATE_DOCS_INDEX_NAME = + 'LangChain_Combined_Docs_nomic_embed_text' +export const WEAVIATE_GENERAL_GUIDES_AND_TUTORIALS_INDEX_NAME = + 'LangChain_General_Guides_And_Tutorials_nomic_embed_text' + +// Ollama configuration +export const OLLAMA_BASE_URL = + process.env.OLLAMA_BASE_URL || 'https://ollama.hanu-nus.com' + +export const OLLAMA_BASE_EMBEDDING_DOCS_URL = + process.env.OLLAMA_BASE_EMBEDDING_DOCS_URL || 'http://localhost:11434' diff --git a/backend-js/src/embeddings.ts b/backend-js/src/embeddings.ts new file mode 100644 index 000000000..83cec8720 --- /dev/null +++ b/backend-js/src/embeddings.ts @@ -0,0 +1,71 @@ +/** + * Embeddings module for managing different embedding providers. + * + * Supports: + * - ollama/nomic-embed-text: Local Ollama embeddings with 2K context window (default) + * - openai/*: OpenAI embeddings + * - weaviate/*: Legacy Weaviate built-in vectorizer (deprecated) + */ + +import { Embeddings } from '@langchain/core/embeddings' +import { OpenAIEmbeddings } from '@langchain/openai' +import { OllamaEmbeddings } from '@langchain/ollama' +import { OLLAMA_BASE_URL } from './constants.js' + +/** + * Get embeddings model based on provider and model name. + * + * @param model - Model specification in format "provider/model-name" + * @param baseUrl - Base URL for Ollama (optional, defaults to OLLAMA_BASE_URL) + * @returns Embeddings instance or null for Weaviate built-in vectorizer + * + * @example + * ```typescript + * // Ollama embeddings + * const embeddings = getEmbeddingsModel("ollama/nomic-embed-text"); + * + * // OpenAI embeddings + * const embeddings = getEmbeddingsModel("openai/text-embedding-3-small"); + * + * // Weaviate built-in (returns null) + * const embeddings = getEmbeddingsModel("weaviate/vectorizer"); + * ``` + */ +export function getEmbeddingsModel( + model?: string, + baseUrl: string = OLLAMA_BASE_URL, +): Embeddings | null { + const ollamaApiKey = process.env.OLLAMA_API_KEY || '' + + const modelSpec = + model || process.env.EMBEDDING_MODEL || 'ollama/nomic-embed-text' + + const [provider, modelName] = modelSpec.split('/', 2) + + switch (provider.toLowerCase()) { + case 'ollama': + // Ollama embeddings with nomic-embed-text (2K context, 768 dimensions) + return new OllamaEmbeddings({ + model: modelName, + baseUrl, + headers: { + 'X-API-Key': ollamaApiKey, + }, + }) + + case 'openai': + return new OpenAIEmbeddings({ + model: modelName, + // Chunk size for batching + batchSize: 200, + }) + + case 'weaviate': + // Weaviate's built-in vectorizer handles embeddings internally + // Return null to signal that no external embedding is needed + return null + + default: + throw new Error(`Unsupported embedding provider: ${provider}`) + } +} diff --git a/backend-js/src/ingest/FixedSitemapLoader.ts b/backend-js/src/ingest/FixedSitemapLoader.ts new file mode 100644 index 000000000..33c8af18a --- /dev/null +++ b/backend-js/src/ingest/FixedSitemapLoader.ts @@ -0,0 +1,115 @@ +/** + * Fixed version of SitemapLoader that correctly implements filterUrls + * and provides better content extraction and metadata. + * + * This fixes two bugs in @langchain/community's SitemapLoader: + * 1. The _checkUrlPatterns method has inverted logic + * 2. The _loadSitemapUrls method extracts too much content and wrong metadata + */ + +import { SitemapLoader } from '@langchain/community/document_loaders/web/sitemap' +import { CheerioWebBaseLoader } from '@langchain/community/document_loaders/web/cheerio' +import { Document } from '@langchain/core/documents' +import { simpleExtractor, extractMetadata } from './parser.js' +import type { CheerioAPI } from 'cheerio' + +interface SiteMapElement { + loc: string + changefreq?: string + lastmod?: string + priority?: string +} + +interface FixedSitemapLoaderOptions { + filterUrls?: (string | RegExp)[] + chunkSize?: number + extractor?: (html: string | CheerioAPI) => string +} + +export class FixedSitemapLoader extends SitemapLoader { + private customExtractor?: (html: string | CheerioAPI) => string + + constructor(webPath: string, options?: FixedSitemapLoaderOptions) { + const { extractor, ...loaderOptions } = options || {} + super(webPath, loaderOptions) + this.customExtractor = extractor + } + /** + * Check if a URL should be skipped based on allowUrlPatterns. + * + * Original buggy logic: + * - Returns true when URL matches patterns (causing it to be skipped) + * - Returns false when URL doesn't match (causing it to be included) + * + * Fixed logic: + * - Returns true when URL doesn't match any pattern (should be skipped) + * - Returns false when URL matches at least one pattern (should be included) + * + * @param url - The URL to check + * @returns true if URL should be skipped, false if it should be included + */ + _checkUrlPatterns(url: string): boolean { + // If no filter patterns are set, don't skip any URLs + if (!this.allowUrlPatterns) return false + + // Skip URL if it doesn't match ANY of the patterns + // (i.e., return true only if NONE of the patterns match) + return this.allowUrlPatterns.every( + (pattern) => !new RegExp(pattern).test(url), + ) + } + + /** + * Load and extract content from sitemap URLs. + * + * This override fixes the original implementation which: + * - Uses default selector that captures too much content + * - Extracts metadata from wrong meta tags (og:title instead of title) + * + * New implementation: + * - Uses custom extractor (if provided) or simpleExtractor for clean content extraction + * - Extracts metadata properly (title from tag, description, language) + * + * @param elements - Array of sitemap elements to load + * @returns Array of Document objects with properly extracted content and metadata + */ + async _loadSitemapUrls(elements: SiteMapElement[]): Promise<Document[]> { + // Scrape all URLs from the sitemap + const all = await CheerioWebBaseLoader.scrapeAll( + elements.map((ele) => ele.loc), + this.caller, + this.timeout, + this.textDecoder, + ) + + // Use custom extractor if provided, otherwise use simpleExtractor + const extractor = this.customExtractor || simpleExtractor + + // Process each scraped page + const documents = all.map(($, i) => { + if (!elements[i]) { + throw new Error('Scraped docs and elements not in sync') + } + + // Use extractor to get clean text content (matches Python behavior) + const text = extractor($) + + // Extract metadata from the HTML (matches Python metadata_extractor) + const customMetadata = extractMetadata($) + + // Get sitemap-specific metadata + const { loc: source, ...sitemapMetadata } = elements[i] + + return new Document({ + pageContent: text, + metadata: { + ...sitemapMetadata, // changefreq, lastmod, priority from sitemap + source: source.trim(), + ...customMetadata, // title, description, language from HTML + }, + }) + }) + + return documents + } +} diff --git a/backend-js/src/ingest/index.ts b/backend-js/src/ingest/index.ts new file mode 100644 index 000000000..9ec69d0f0 --- /dev/null +++ b/backend-js/src/ingest/index.ts @@ -0,0 +1,495 @@ +/** + * Load HTML from files, clean up, split, ingest into Weaviate. + * + * This module provides functions to load documents from sitemaps, + * process them, and index them in Weaviate for retrieval. + */ + +// Load environment variables from .env file +import 'dotenv/config' + +import { promises as fs } from 'fs' +import * as path from 'path' +import { RecursiveUrlLoader } from '@langchain/community/document_loaders/web/recursive_url' +import { RecursiveCharacterTextSplitter } from '@langchain/textsplitters' +import { Document } from '@langchain/core/documents' +import type { CheerioAPI } from 'cheerio' +import { WeaviateStore } from '@langchain/weaviate' +import { PostgresRecordManager } from '@langchain/community/indexes/postgres' +import { index } from '@langchain/core/indexing' +import { getWeaviateClient } from '../utils.js' +import { getEmbeddingsModel } from '../embeddings.js' +import { FixedSitemapLoader } from './FixedSitemapLoader.js' +import { + OLLAMA_BASE_EMBEDDING_DOCS_URL, + WEAVIATE_GENERAL_GUIDES_AND_TUTORIALS_INDEX_NAME, +} from '../constants.js' +import { + langchainDocsExtractor, + simpleExtractor, + extractMetadata, +} from './parser.js' + +const WEAVIATE_URL = process.env.WEAVIATE_URL +const WEAVIATE_GRPC_URL = process.env.WEAVIATE_GRPC_URL +const WEAVIATE_API_KEY = process.env.WEAVIATE_API_KEY +const RECORD_MANAGER_DB_URL = process.env.RECORD_MANAGER_DB_URL + +/** + * Load documents from a sitemap and extract content. + * + * @param sitemapUrl - URL of the sitemap to load + * @param filterUrls - Array of URL patterns to filter (optional) + * @param extractor - Function to extract content from HTML + * @returns Array of loaded documents + */ +async function loadFromSitemap( + sitemapUrl: string, + filterUrls?: string[], + extractor: (html: string | CheerioAPI) => string = simpleExtractor, +): Promise<Document[]> { + console.log(`Loading documents from sitemap: ${sitemapUrl}`) + + // Use FixedSitemapLoader to load documents from sitemap + // This fixes a bug in the original SitemapLoader where filterUrls has inverted logic + // Convert filter URLs to regex patterns that match the URL + // Escape special regex characters in URLs for literal matching + const filterRegexes = filterUrls?.map((url) => { + const escapedUrl = url.replace(/[.*+?^${}()|[\]\\]/g, '\\$&') + return new RegExp(escapedUrl) + }) + + const loader = new FixedSitemapLoader(sitemapUrl, { + filterUrls: filterRegexes, + chunkSize: 50, // Load in smaller chunks to avoid timeout + extractor, // Pass the custom extractor + }) + + console.log('Loading documents from sitemap...') + console.log( + `Filter patterns: ${filterUrls?.join(', ') || 'none (loading all)'}`, + ) + + try { + // FixedSitemapLoader now handles content extraction and metadata during load + // It uses the provided extractor (or simpleExtractor by default) and extractMetadata internally + const docs = await loader.load() + console.log(`Successfully loaded and processed ${docs.length} documents`) + return docs + } catch (error) { + console.error('Error loading from sitemap:', error) + throw error + } +} + +/** + * Load documents recursively from a base URL. + * + * @param baseUrl - Starting URL for recursive crawling + * @param maxDepth - Maximum depth to crawl + * @param extractor - Function to extract content from HTML + * @param excludeDirs - Directories to exclude from crawling + * @returns Array of loaded documents + */ +async function loadFromRecursiveUrl( + baseUrl: string, + maxDepth: number = 2, + extractor: (html: string | CheerioAPI) => string = simpleExtractor, + excludeDirs?: string[], +): Promise<Document[]> { + console.log(`Loading documents recursively from: ${baseUrl}`) + + const loader = new RecursiveUrlLoader(baseUrl, { + maxDepth, + extractor, + excludeDirs, + preventOutside: true, + timeout: 10000, + }) + + console.log('Loading documents recursively...') + const docs = await loader.load() + console.log(`Loaded ${docs.length} documents recursively`) + + // Process documents to extract metadata + const documents: Document[] = [] + + for (const doc of docs) { + try { + // Extract additional metadata from the original HTML if needed + const metadata = extractMetadata(doc.pageContent) + + documents.push( + new Document({ + pageContent: doc.pageContent, + metadata: { + ...doc.metadata, + ...metadata, + }, + }), + ) + + console.log(`Processed: ${doc.metadata.source}`) + } catch (error) { + console.error(`Failed to process document:`, error) + } + } + + return documents +} + +/** + * Load LangChain Python docs (to be deprecated once docs are migrated). + */ +async function loadLangchainPythonDocs(): Promise<Document[]> { + return loadFromSitemap( + 'https://python.langchain.com/sitemap.xml', + ['https://python.langchain.com/'], + langchainDocsExtractor, + ) +} + +/** + * Load LangChain JS docs (to be deprecated once docs are migrated). + */ +async function loadLangchainJsDocs(): Promise<Document[]> { + return loadFromSitemap( + 'https://js.langchain.com/sitemap.xml', + ['https://js.langchain.com/docs/concepts'], + simpleExtractor, + ) +} + +/** + * Load from aggregated docs site. + */ +async function loadAggregatedDocsSite(): Promise<Document[]> { + console.log('Loading from aggregated docs site...') + const docs = await loadFromSitemap( + 'https://docs.langchain.com/sitemap.xml', + [ + // 'https://docs.langchain.com/oss/javascript', + 'https://docs.langchain.com/oss/javascript/langchain/mcp', + 'https://docs.langchain.com/oss/javascript/langchain/agents', + 'https://docs.langchain.com/oss/javascript/langchain/context-engineering', + 'https://docs.langchain.com/oss/javascript/concepts/context', + ], + simpleExtractor, + ) + + if (docs.length === 0) { + console.warn( + 'WARNING: No documents matched the filter criteria! Check your filter URLs.', + ) + } + + return docs +} + +/** + * Ingest general guides and tutorials. + */ +async function ingestGeneralGuidesAndTutorials(): Promise<Document[]> { + const aggregatedSiteDocs = await loadAggregatedDocsSite() + + if (aggregatedSiteDocs.length === 0) { + throw new Error( + 'No documents were loaded! Check your sitemap URL and filter patterns.', + ) + } + + return aggregatedSiteDocs +} + +/** + * Serialize a document to JSON format for file output. + * + * @param doc - Document to serialize + * @returns Serialized document data + */ +function serializeDocumentForJson(doc: Document): Record<string, any> { + // Start with basic fields (matching Python format) + const docData: Record<string, any> = { + page_content: doc.pageContent, + metadata: doc.metadata, + type: 'Document', + } + + // Add any additional fields from the document object + // This matches the Python version which iterates through all attributes + for (const key of Object.keys(doc)) { + if (!['pageContent', 'metadata'].includes(key) && !key.startsWith('_')) { + const value = (doc as any)[key] + // Only include serializable values + if ( + value !== undefined && + typeof value !== 'function' && + typeof value !== 'symbol' + ) { + try { + JSON.stringify(value) + docData[key] = value + } catch { + // Skip non-serializable values + } + } + } + } + + return docData +} + +/** + * Write documents to a JSON file for inspection. + * + * @param documents - Documents to write + * @param filename - Name of the output file + * @param description - Description for logging + */ +async function writeDocumentsToJsonFile( + documents: Document[], + filename: string, + description: string, +): Promise<void> { + const serializedData = documents.map(serializeDocumentForJson) + const filePath = path.join(process.cwd(), '..', filename) + + console.log(`Writing to: ${filePath}`) + await fs.writeFile(filePath, JSON.stringify(serializedData, null, 2), 'utf-8') + console.log(`✓ Wrote ${serializedData.length} ${description} to ${filename}`) +} + +/** + * Split documents into chunks and filter out short ones. + * + * @param documents - Documents to split + * @param textSplitter - Text splitter instance + * @param minLength - Minimum content length to keep + * @returns Filtered chunks + */ +async function splitAndFilterDocuments( + documents: Document[], + textSplitter: RecursiveCharacterTextSplitter, + minLength: number = 10, +): Promise<Document[]> { + console.log('Step 3/5: Splitting documents into chunks...') + let chunks = await textSplitter.splitDocuments(documents) + console.log(`Created ${chunks.length} chunks (before filtering)`) + + // Filter out very short documents + const beforeFilter = chunks.length + chunks = chunks.filter((doc) => doc.pageContent.length > minLength) + console.log( + `✓ Filtered to ${chunks.length} chunks (removed ${ + beforeFilter - chunks.length + } short chunks)`, + ) + + return chunks +} + +/** + * Ensure required metadata fields exist in all documents. + * Weaviate will error at query time if required attributes are missing. + * + * @param documents - Documents to validate + */ +function ensureRequiredMetadata(documents: Document[]): void { + console.log('Step 5/5: Ensuring metadata fields...') + + for (const doc of documents) { + if (!doc.metadata.source) { + doc.metadata.source = '' + } + if (!doc.metadata.title) { + doc.metadata.title = '' + } + } + + console.log('✓ Metadata fields validated') +} + +/** + * Create a Weaviate vector store instance. + * + * @param weaviateClient - Weaviate client + * @param embedding - Embeddings model + * @returns Weaviate store instance + */ +function createWeaviateVectorStore( + weaviateClient: any, + embedding: any, +): WeaviateStore { + console.log('Indexing documents in Weaviate...') + + return new WeaviateStore(embedding, { + client: weaviateClient, + indexName: WEAVIATE_GENERAL_GUIDES_AND_TUTORIALS_INDEX_NAME, + textKey: 'text', + metadataKeys: ['source', 'title'], + }) +} + +/** + * Create and initialize PostgreSQL record manager. + * + * @returns Initialized record manager + */ +async function createRecordManager(): Promise<PostgresRecordManager> { + // Remove sslmode from connection string as pg client doesn't parse it properly + // and explicitly set ssl to false since the server doesn't support SSL + const dbUrl = RECORD_MANAGER_DB_URL?.split('?')[0] || RECORD_MANAGER_DB_URL + + const recordManager = new PostgresRecordManager( + `weaviate/${WEAVIATE_GENERAL_GUIDES_AND_TUTORIALS_INDEX_NAME}`, + { + postgresConnectionOptions: { + connectionString: dbUrl, + ssl: false, + }, + }, + ) + + await recordManager.createSchema() + console.log('Record manager schema created') + + return recordManager +} + +/** + * Index documents in the vector store with record manager tracking. + * + * @param documents - Documents to index + * @param vectorStore - Vector store instance + * @param recordManager - Record manager instance + * @returns Indexing statistics + */ +async function indexDocumentsInVectorStore( + documents: Document[], + vectorStore: WeaviateStore, + recordManager: PostgresRecordManager, +): Promise<any> { + const indexingStats = await index({ + docsSource: documents, + recordManager, + vectorStore, + options: { + cleanup: 'full', + sourceIdKey: 'source', + forceUpdate: + (process.env.FORCE_UPDATE || 'false').toLowerCase() === 'true', + }, + }) + + console.log(`Indexing stats:`, indexingStats) + return indexingStats +} + +/** + * Get and log total vector count from Weaviate collection. + * + * @param weaviateClient - Weaviate client + */ +async function logTotalVectorCount(weaviateClient: any): Promise<void> { + const collection = await weaviateClient.collections.get( + WEAVIATE_GENERAL_GUIDES_AND_TUTORIALS_INDEX_NAME, + ) + const totalCount = await collection.aggregate.overAll() + console.log(`Total vectors in collection: ${totalCount.totalCount}`) +} + +/** + * Main ingestion function. + * Orchestrates the document ingestion pipeline: load, split, and index documents. + */ +export async function ingestDocs(): Promise<void> { + console.log('Starting document ingestion...') + + // Initialize text splitter + // Chunks for nomic-embed-text (2K token context window) + // Reduce to 2000 chars ≈ 500-650 tokens to avoid context overflow + // TypeScript Ollama client seems more strict than Python version + const textSplitter = new RecursiveCharacterTextSplitter({ + chunkSize: 4000, + chunkOverlap: 200, + }) + + // Initialize embeddings model + const embedding = getEmbeddingsModel( + undefined, + OLLAMA_BASE_EMBEDDING_DOCS_URL, + ) + if (!embedding) { + throw new Error('Embeddings model is required for ingestion') + } + + // Initialize Weaviate client + const weaviateClient = await getWeaviateClient( + WEAVIATE_URL, + WEAVIATE_GRPC_URL, + WEAVIATE_API_KEY, + ) + + let recordManager: PostgresRecordManager | undefined + + try { + // Step 1: Load documents + console.log('Loading documents...') + console.log('Step 1/5: Fetching documents from sitemap...') + const rawDocuments = await ingestGeneralGuidesAndTutorials() + console.log(`✓ Loaded ${rawDocuments.length} documents successfully`) + + // Step 2: Write raw documents to file + console.log('Step 2/5: Writing raw documents to file...') + await writeDocumentsToJsonFile( + rawDocuments, + 'raw_docs_js.json', + 'raw documents', + ) + + // Step 3: Split and filter documents + const chunks = await splitAndFilterDocuments(rawDocuments, textSplitter) + + // Step 4: Write chunks to file + console.log('Step 4/5: Writing chunks to file...') + await writeDocumentsToJsonFile(chunks, 'chunks_js.json', 'chunks') + + // Step 5: Ensure required metadata + ensureRequiredMetadata(chunks) + + // Create vector store and record manager + const vectorStore = createWeaviateVectorStore(weaviateClient, embedding) + recordManager = await createRecordManager() + + // Index documents + await indexDocumentsInVectorStore(chunks, vectorStore, recordManager) + + // Log final count + await logTotalVectorCount(weaviateClient) + + console.log('Document ingestion completed successfully!') + } finally { + // Cleanup connections + if (recordManager) { + await recordManager.end() + console.log('Record manager connection closed') + } + await weaviateClient.close() + console.log('Weaviate client closed') + } +} + +/** + * Run ingestion if this file is executed directly + */ +if (import.meta.url === `file://${process.argv[1]}`) { + ingestDocs() + .then(() => { + console.log('Done!') + process.exit(0) + }) + .catch((error) => { + console.error('Ingestion failed:', error) + process.exit(1) + }) +} diff --git a/backend-js/src/ingest/parser.ts b/backend-js/src/ingest/parser.ts new file mode 100644 index 000000000..acca3478d --- /dev/null +++ b/backend-js/src/ingest/parser.ts @@ -0,0 +1,150 @@ +/** + * HTML parsing utilities for document ingestion. + * + * This module provides functions to extract text content from HTML documents, + * with specific handling for LangChain documentation structure. + */ + +import * as cheerio from 'cheerio' + +/** + * Extract text content from LangChain documentation HTML. + * + * This function extracts the main content from LangChain documentation pages, + * focusing on the article content and removing unnecessary elements. + * + * @param html - HTML string or cheerio instance + * @returns Extracted text content + */ +export function langchainDocsExtractor( + html: string | cheerio.CheerioAPI, +): string { + const $ = typeof html === 'string' ? cheerio.load(html) : html + + // Remove unwanted elements before extraction + // This matches Python's behavior which uses SoupStrainer to filter during parsing + // Remove navigation, menus, sidebars, and other non-content elements + $( + 'script, style, nav, header, footer, iframe, noscript, form, button, ' + + 'aside, [role="navigation"], [role="banner"], [role="complementary"], ' + + '[class*="sidebar"], [class*="menu"], [class*="nav"], ' + + '[class*="toc"], [class*="breadcrumb"]', + ).remove() + + // Try to find the main article content + let content = '' + + // Look for common content selectors + const articleSelector = $('article') + if (articleSelector.length > 0) { + // Remove any remaining navigation elements inside article + articleSelector + .find('nav, [role="navigation"], [class*="sidebar"], [class*="menu"]') + .remove() + content = articleSelector.text() + } else { + // Fallback to body if no article found + content = $('body').text() + } + + // Clean up the text + return cleanText(content) +} + +/** + * Simple HTML text extractor. + * + * This function extracts all text content from HTML, removing all tags + * and cleaning up whitespace. + * + * @param html - HTML string or cheerio instance + * @returns Extracted text content + */ +export function simpleExtractor(html: string | cheerio.CheerioAPI): string { + const $ = typeof html === 'string' ? cheerio.load(html) : html + + // Remove unwanted elements before extraction + // This matches Python's behavior which uses SoupStrainer to filter during parsing + // Remove navigation, menus, sidebars, and other non-content elements + $( + 'script, style, nav, header, footer, iframe, noscript, form, button, ' + + 'aside, [role="navigation"], [role="banner"], [role="complementary"], ' + + '[class*="sidebar"], [class*="menu"], [class*="nav"], ' + + '[class*="toc"], [class*="breadcrumb"]', + ).remove() + + // Extract only from article tag or main content area + // This matches Python's: SoupStrainer(name=("article", "title", "html", "lang", "content")) + let content = '' + const article = $('article') + if (article.length > 0) { + // Remove any remaining navigation elements inside article + article + .find('nav, [role="navigation"], [class*="sidebar"], [class*="menu"]') + .remove() + content = article.text() + } else { + // Fallback: try main content area + const main = $('main') + if (main.length > 0) { + main + .find('nav, [role="navigation"], [class*="sidebar"], [class*="menu"]') + .remove() + content = main.text() + } else { + content = $('body').text() + } + } + + return cleanText(content) +} + +/** + * Clean extracted text by normalizing whitespace. + * + * @param text - Raw text to clean + * @returns Cleaned text + */ +function cleanText(text: string): string { + return ( + text + // Replace multiple newlines with double newline + .replace(/\n\n+/g, '\n\n') + // Replace multiple spaces with single space + .replace(/[ \t]+/g, ' ') + // Trim whitespace from each line + .split('\n') + .map((line) => line.trim()) + .join('\n') + // Remove empty lines at start and end + .trim() + ) +} + +/** + * Extract metadata from HTML page. + * + * @param html - HTML string or cheerio instance + * @param titleSuffix - Optional suffix to append to title + * @returns Metadata object with title, description, and language + */ +export function extractMetadata( + html: string | cheerio.CheerioAPI, + titleSuffix?: string, +): Record<string, string> { + const $ = typeof html === 'string' ? cheerio.load(html) : html + + let title = $('title').text() || '' + if (titleSuffix) { + title += titleSuffix + } + + const description = $('meta[name="description"]').attr('content') || '' + const language = $('html').attr('lang') || '' + + return { + title, + description, + language, + } +} diff --git a/backend-js/src/main.ts b/backend-js/src/main.ts new file mode 100644 index 000000000..0a0e4dd77 --- /dev/null +++ b/backend-js/src/main.ts @@ -0,0 +1,57 @@ +/** + * Main entrypoint for testing the retrieval graph. + * + * This script provides a simple way to test the retrieval graph locally, + * similar to the Python main.py file. + */ + +// Load environment variables from .env file +import 'dotenv/config' + +import { HumanMessage } from '@langchain/core/messages' +import { graph } from './retrieval_graph/graph.js' + +/** + * Test the graph with a sample query + */ +async function testGraph(): Promise<void> { + try { + console.log('🚀 Testing Retrieval Graph...\n') + + const result = await graph.invoke({ + messages: [new HumanMessage('How to connect LangChain to MCP?')], + }) + + console.log('✅ Graph execution completed!\n') + console.log(`📝 Answer: ${result.answer || 'N/A'}\n`) + console.log(`📚 Documents retrieved: ${result.documents?.length || 0}\n`) + + // Print a sample of documents if available + if (result.documents && result.documents.length > 0) { + console.log('📄 Sample documents:') + result.documents.slice(0, 3).forEach((doc, idx) => { + console.log(`\n Document ${idx + 1}:`) + console.log(` Source: ${doc.metadata?.source || 'N/A'}`) + console.log(` Preview: ${doc.pageContent.substring(0, 100)}...`) + }) + } + } catch (error) { + console.error('❌ Error executing graph:', error) + if (error instanceof Error) { + console.error('Error message:', error.message) + console.error('Error stack:', error.stack) + } + process.exit(1) + } +} + +// Run the test +testGraph() + .then(() => { + console.log('\n✨ Test completed successfully!') + process.exit(0) + }) + .catch((error) => { + console.error('\n💥 Test failed:', error) + process.exit(1) + }) diff --git a/backend-js/src/retrieval.ts b/backend-js/src/retrieval.ts new file mode 100644 index 000000000..466a14979 --- /dev/null +++ b/backend-js/src/retrieval.ts @@ -0,0 +1,117 @@ +/** + * Retrieval module for creating and managing retrievers. + * + * This module provides factory functions for creating retrievers + * based on the current configuration. + */ + +import { VectorStoreRetriever } from '@langchain/core/vectorstores' +import { RunnableConfig } from '@langchain/core/runnables' +import { WeaviateStore } from '@langchain/weaviate' +import { getWeaviateClient } from './utils.js' +import { getEmbeddingsModel } from './embeddings.js' +import { getBaseConfiguration } from './configuration.js' +import { + OLLAMA_BASE_URL, + WEAVIATE_GENERAL_GUIDES_AND_TUTORIALS_INDEX_NAME, +} from './constants.js' + +const WEAVIATE_URL = process.env.WEAVIATE_URL +const WEAVIATE_GRPC_URL = process.env.WEAVIATE_GRPC_URL +const WEAVIATE_API_KEY = process.env.WEAVIATE_API_KEY + +/** + * Create a Weaviate retriever with the specified configuration. + * + * @param embeddingModel - The embedding model specification + * @param searchKwargs - Additional search parameters + * @param baseUrl - Base URL for Ollama embeddings + * @returns A VectorStoreRetriever configured for Weaviate + */ +export async function makeWeaviateRetriever( + embeddingModel: string, + searchKwargs: Record<string, any> = {}, + baseUrl: string = OLLAMA_BASE_URL, +): Promise<VectorStoreRetriever> { + const client = await getWeaviateClient( + WEAVIATE_URL, + WEAVIATE_GRPC_URL, + WEAVIATE_API_KEY, + ) + + const embeddings = getEmbeddingsModel(embeddingModel, baseUrl) + + if (!embeddings) { + throw new Error( + 'Weaviate built-in vectorizer not yet supported in TypeScript version', + ) + } + + const store = new WeaviateStore(embeddings, { + client, + indexName: WEAVIATE_GENERAL_GUIDES_AND_TUTORIALS_INDEX_NAME, + textKey: 'text', + metadataKeys: ['source', 'title'], + }) + + // Merge default search kwargs with provided ones + const finalSearchKwargs = { + k: 6, + ...searchKwargs, + } + + return store.asRetriever(finalSearchKwargs) +} + +/** + * Create a retriever based on the provided configuration. + * + * This is the main factory function that routes to the appropriate + * retriever implementation based on the configuration. + * + * @param config - RunnableConfig containing retriever configuration + * @param baseUrl - Base URL for Ollama (optional) + * @returns A configured retriever + * + * @example + * ```typescript + * const retriever = await makeRetriever(config); + * const docs = await retriever.invoke("What is LangChain?"); + * ``` + */ +export async function makeRetriever( + config?: RunnableConfig, + baseUrl: string = OLLAMA_BASE_URL, +): Promise<VectorStoreRetriever> { + const configuration = getBaseConfiguration(config) + + switch (configuration.retrieverProvider) { + case 'weaviate': + return makeWeaviateRetriever( + configuration.embeddingModel, + configuration.searchKwargs, + baseUrl, + ) + + default: + throw new Error( + `Unrecognized retriever_provider in configuration. ` + + `Expected: weaviate, Got: ${configuration.retrieverProvider}`, + ) + } +} + +/** + * Helper function to clean up Weaviate client connection. + * Call this when done with retrieval operations. + */ +export async function closeWeaviateClient( + retriever: VectorStoreRetriever, +): Promise<void> { + // The retriever's vectorStore should have access to the client + const store = retriever.vectorStore as WeaviateStore + // Access client via type assertion since it's private but we need to close it + if (store && (store as any).client) { + await (store as any).client.close() + } +} diff --git a/backend-js/src/retrieval_graph/configuration.ts b/backend-js/src/retrieval_graph/configuration.ts new file mode 100644 index 000000000..bff1fc753 --- /dev/null +++ b/backend-js/src/retrieval_graph/configuration.ts @@ -0,0 +1,128 @@ +/** + * Agent configuration for the retrieval graph. + * + * This module defines the configurable parameters specific to the agent, + * including model selections and prompt templates. + */ + +import { RunnableConfig } from '@langchain/core/runnables' +import { z } from 'zod' +import { BaseConfigurationSchema } from '../configuration.js' + +/** + * Note: Prompts will be loaded from LangSmith at runtime. + * These are placeholder defaults. + */ +const DEFAULT_PROMPTS = { + ROUTER_SYSTEM_PROMPT: + 'You are a helpful assistant that routes user questions.', + MORE_INFO_SYSTEM_PROMPT: + 'You are a helpful assistant that asks for clarification.', + GENERAL_SYSTEM_PROMPT: 'You are a helpful assistant.', + RESEARCH_PLAN_SYSTEM_PROMPT: + "You are a research planner. Create a step-by-step research plan to answer the user's question about LangChain.", + GENERATE_QUERIES_SYSTEM_PROMPT: + 'Generate diverse search queries to help answer the research question.', + RESPONSE_SYSTEM_PROMPT: + "You are an expert on LangChain. Answer the user's question based on the provided context.\n\nContext:\n{context}", +} + +/** + * AgentConfiguration extends BaseConfiguration with agent-specific settings + */ +export const AgentConfigurationSchema = BaseConfigurationSchema.extend({ + /** + * The language model used for processing and refining queries. + * Should be in the form: provider/model-name. + * @default "groq/openai/gpt-oss-20b" + */ + queryModel: z + .string() + .default('groq/openai/gpt-oss-20b') + .describe('The language model used for query processing'), + + /** + * The language model used for generating responses. + * Should be in the form: provider/model-name. + * @default "groq/openai/gpt-oss-20b" + */ + responseModel: z + .string() + .default('groq/openai/gpt-oss-20b') + .describe('The language model used for generating responses'), + + /** + * System prompts for different stages of the agent + */ + routerSystemPrompt: z + .string() + .default(DEFAULT_PROMPTS.ROUTER_SYSTEM_PROMPT) + .describe('System prompt for routing user questions'), + + moreInfoSystemPrompt: z + .string() + .default(DEFAULT_PROMPTS.MORE_INFO_SYSTEM_PROMPT) + .describe('System prompt for asking for more information'), + + generalSystemPrompt: z + .string() + .default(DEFAULT_PROMPTS.GENERAL_SYSTEM_PROMPT) + .describe('System prompt for general questions'), + + researchPlanSystemPrompt: z + .string() + .default(DEFAULT_PROMPTS.RESEARCH_PLAN_SYSTEM_PROMPT) + .describe('System prompt for generating research plans'), + + generateQueriesSystemPrompt: z + .string() + .default(DEFAULT_PROMPTS.GENERATE_QUERIES_SYSTEM_PROMPT) + .describe('System prompt for generating search queries'), + + responseSystemPrompt: z + .string() + .default(DEFAULT_PROMPTS.RESPONSE_SYSTEM_PROMPT) + .describe('System prompt for generating final responses'), +}) + +export type AgentConfiguration = z.infer<typeof AgentConfigurationSchema> + +/** + * Extract agent configuration from RunnableConfig + * + * Reads from config.configurable to match Python's behavior + * This is the standard approach used by LangSmith + */ +export function getAgentConfiguration( + config?: RunnableConfig, +): AgentConfiguration { + // Use configurable (matches Python's from_runnable_config) + const configurable = config?.configurable || {} + + // Convert snake_case to camelCase for all fields + const camelCased: Record<string, any> = {} + + for (const [key, value] of Object.entries(configurable)) { + // Convert snake_case to camelCase + const camelKey = key.replace(/_([a-z])/g, (_, letter) => + letter.toUpperCase(), + ) + camelCased[camelKey] = value + } + + // Parse and validate with defaults + return AgentConfigurationSchema.parse(camelCased) +} + +/** + * Load prompts from LangSmith (to be implemented with actual LangSmith client) + * For now, returns empty object to use schema defaults + */ +export async function loadPromptsFromLangSmith(): Promise< + Partial<AgentConfiguration> +> { + // TODO: Implement actual LangSmith prompt loading + // This would use langsmith SDK to fetch prompts + // For now, return empty object and let schema defaults apply + return {} +} diff --git a/backend-js/src/retrieval_graph/graph.ts b/backend-js/src/retrieval_graph/graph.ts new file mode 100644 index 000000000..d5ce79312 --- /dev/null +++ b/backend-js/src/retrieval_graph/graph.ts @@ -0,0 +1,182 @@ +/** + * Main entrypoint for the conversational retrieval graph. + * + * This module defines the core structure and functionality of the conversational + * retrieval graph. It includes the main graph definition, state management, + * and key functions for processing & routing user queries, generating research plans to answer user questions, + * conducting research, and formulating responses. + */ + +import { RunnableConfig } from '@langchain/core/runnables' +import { StateGraph, START, END } from '@langchain/langgraph' +import { z } from 'zod' +import { AgentStateAnnotation, InputStateAnnotation } from './state.js' +import { + AgentConfigurationSchema, + getAgentConfiguration, +} from './configuration.js' +import { + getResearchPlanSystemPrompt, + getResponseSystemPrompt, +} from './prompts.js' +import { loadChatModel, formatDocs } from '../utils.js' +import { graph as researcherGraph } from './researcher_graph/graph.js' + +/** + * Schema for research plan + */ +const ResearchPlanSchema = z.object({ + steps: z.array(z.string()).describe('List of research steps to complete'), +}) + +/** + * Create a step-by-step research plan for answering a LangChain-related query. + * + * @param state - The current state of the agent, including conversation history + * @param config - Configuration with the model used to generate the plan + * @returns Updated state with research steps + */ +async function createResearchPlan( + state: typeof AgentStateAnnotation.State, + config?: RunnableConfig, +): Promise< + | Partial<typeof AgentStateAnnotation.State> + | { steps: string[]; documents: 'delete'; query: string } +> { + const configuration = getAgentConfiguration(config) + const systemPrompt = await getResearchPlanSystemPrompt() + + // Determine if we should use function calling method + const useFunctionCalling = configuration.queryModel.includes('openai') + + const model = loadChatModel(configuration.queryModel) + const structuredModel = model.withStructuredOutput(ResearchPlanSchema, { + method: useFunctionCalling ? 'functionCalling' : 'json_schema', + }) + + const messages = [ + { role: 'system' as const, content: systemPrompt }, + ...state.messages, + ] + + const response = await structuredModel.invoke(messages, { + ...config, + tags: ['langsmith:nostream'], + }) + + // Get the query from the last message + const lastMessage = state.messages[state.messages.length - 1] + const query = 'content' in lastMessage ? String(lastMessage.content) : '' + + return { + steps: response.steps, + documents: 'delete' as 'delete', + query, + } +} + +/** + * Execute the first step of the research plan. + * + * This function takes the first step from the research plan and uses it to conduct research. + * + * @param state - The current state of the agent, including the research plan steps + * @param config - Configuration for the research + * @returns Updated state with retrieved documents and remaining steps + */ +async function conductResearch( + state: typeof AgentStateAnnotation.State, + config?: RunnableConfig, +): Promise<Partial<typeof AgentStateAnnotation.State>> { + if (!state.steps || state.steps.length === 0) { + return { steps: [] } + } + + const result = await researcherGraph.invoke( + { question: state.steps[0] }, + config, + ) + + return { + documents: result.documents, + steps: state.steps.slice(1), + } +} + +/** + * Determine if the research process is complete or if more research is needed. + * + * This function checks if there are any remaining steps in the research plan. + * + * @param state - The current state of the agent, including the remaining research steps + * @returns The next step to take based on whether research is complete + */ +function checkFinished( + state: typeof AgentStateAnnotation.State, +): 'respond' | 'conduct_research' { + if (state.steps && state.steps.length > 0) { + return 'conduct_research' + } + return 'respond' +} + +/** + * Generate a final response to the user's query based on the conducted research. + * + * This function formulates a comprehensive answer using the conversation history and the documents retrieved by the researcher. + * + * @param state - The current state of the agent, including retrieved documents and conversation history + * @param config - Configuration with the model used to respond + * @returns Updated state with the generated response + */ +async function respond( + state: typeof AgentStateAnnotation.State, + config?: RunnableConfig, +): Promise<Partial<typeof AgentStateAnnotation.State>> { + const configuration = getAgentConfiguration(config) + const model = loadChatModel(configuration.responseModel) + + // TODO: add a re-ranker here + const topK = 3 + const documents = state.documents || [] + const context = formatDocs(documents.slice(0, topK)) + + const systemPromptTemplate = await getResponseSystemPrompt() + const systemPrompt = systemPromptTemplate.replace('{context}', context) + + const messages = [ + { role: 'system' as const, content: systemPrompt }, + ...state.messages, + ] + + const response = await model.invoke(messages, config) + + // Extract answer content - match Python's response.content behavior + const answerContent = + typeof response.content === 'string' + ? response.content + : JSON.stringify(response.content) + + // Ensure answer is a non-empty string + const finalAnswer = answerContent || 'No answer generated' + + return { + messages: [response], + answer: finalAnswer, + } +} + +const builder = new StateGraph(AgentStateAnnotation, { + input: InputStateAnnotation, + context: AgentConfigurationSchema, +}) + .addNode('create_research_plan', createResearchPlan) + .addNode('conduct_research', conductResearch) + .addNode('respond', respond) + .addEdge(START, 'create_research_plan') + .addEdge('create_research_plan', 'conduct_research') + .addConditionalEdges('conduct_research', checkFinished) + .addEdge('respond', END) + +export const graph = builder.compile() +graph.name = 'RetrievalGraph' diff --git a/backend-js/src/retrieval_graph/prompts.ts b/backend-js/src/retrieval_graph/prompts.ts new file mode 100644 index 000000000..24943d775 --- /dev/null +++ b/backend-js/src/retrieval_graph/prompts.ts @@ -0,0 +1,171 @@ +/** + * Prompt templates for the retrieval graph. + * + * This module loads prompts from LangSmith for consistent prompt management. + * Falls back to default prompts if LangSmith is not available. + */ + +import { Client } from 'langsmith' + +// Initialize LangSmith client +const client = new Client({ + apiKey: process.env.LANGCHAIN_PROMPT_API_KEY || process.env.LANGCHAIN_API_KEY, + apiUrl: process.env.LANGCHAIN_PROMPT_API_URL, +}) + +/** + * Default prompts (fallback if LangSmith is unavailable) + */ +const DEFAULT_PROMPTS = { + ROUTER_SYSTEM_PROMPT: `You are an expert at routing user questions to the appropriate handler. +Given a user question, determine if it needs research or can be answered directly.`, + + MORE_INFO_SYSTEM_PROMPT: `You are a helpful assistant that asks for clarification when user questions are unclear or ambiguous. +Ask specific questions to better understand what the user needs.`, + + GENERAL_SYSTEM_PROMPT: `You are a helpful AI assistant with expertise in LangChain and related technologies. +Provide clear, accurate, and helpful responses to user questions.`, + + RESEARCH_PLAN_SYSTEM_PROMPT: `You are a research planner for LangChain-related queries. +Break down the user's question into a step-by-step research plan. +Each step should be a specific, focused research question that can be answered through document retrieval. +Generate 2-4 research steps.`, + + GENERATE_QUERIES_SYSTEM_PROMPT: `You are an expert at generating diverse search queries. +Given a research question about LangChain, generate 3-5 diverse search queries that would help find relevant information. +The queries should approach the topic from different angles to maximize coverage.`, + + RESPONSE_SYSTEM_PROMPT: `You are an expert on LangChain and related technologies. +Answer the user's question based on the provided context from the documentation. +If the context doesn't contain enough information, acknowledge this. +Be specific and cite the documentation when appropriate. + +Context: +{context}`, +} + +/** + * Cached prompts to avoid repeated API calls + */ +let cachedPrompts: Record<string, string> | null = null + +/** + * Load a single prompt from LangSmith + */ +async function loadPromptFromLangSmith(promptName: string): Promise<string> { + try { + const prompt = await client._pullPrompt(promptName) + // Extract the template from the prompt + if (prompt && prompt.messages && prompt.messages.length > 0) { + const firstMessage = prompt.messages[0] + if (firstMessage && 'prompt' in firstMessage && firstMessage.prompt) { + return firstMessage.prompt.template || '' + } + } + return '' + } catch (error) { + console.warn(`Failed to load prompt ${promptName} from LangSmith:`, error) + return '' + } +} + +/** + * Load all prompts from LangSmith or use defaults + */ +async function loadAllPrompts(): Promise<Record<string, string>> { + if (cachedPrompts) { + return cachedPrompts + } + + const prompts: Record<string, string> = {} + + try { + // Try to load prompts from LangSmith + const [router, moreInfo, general, researchPlan, generateQueries, response] = + await Promise.allSettled([ + loadPromptFromLangSmith('langchain-ai/chat-langchain-router-prompt'), + loadPromptFromLangSmith('langchain-ai/chat-langchain-more-info-prompt'), + loadPromptFromLangSmith('langchain-ai/chat-langchain-general-prompt'), + loadPromptFromLangSmith( + 'langchain-ai/chat-langchain-research-plan-prompt', + ), + loadPromptFromLangSmith( + 'langchain-ai/chat-langchain-generate-queries-prompt', + ), + loadPromptFromLangSmith('langchain-ai/chat-langchain-response-prompt'), + ]) + + // Use LangSmith prompts if available, otherwise fall back to defaults + prompts.ROUTER_SYSTEM_PROMPT = + (router.status === 'fulfilled' && router.value) || + DEFAULT_PROMPTS.ROUTER_SYSTEM_PROMPT + prompts.MORE_INFO_SYSTEM_PROMPT = + (moreInfo.status === 'fulfilled' && moreInfo.value) || + DEFAULT_PROMPTS.MORE_INFO_SYSTEM_PROMPT + prompts.GENERAL_SYSTEM_PROMPT = + (general.status === 'fulfilled' && general.value) || + DEFAULT_PROMPTS.GENERAL_SYSTEM_PROMPT + prompts.RESEARCH_PLAN_SYSTEM_PROMPT = + (researchPlan.status === 'fulfilled' && researchPlan.value) || + DEFAULT_PROMPTS.RESEARCH_PLAN_SYSTEM_PROMPT + prompts.GENERATE_QUERIES_SYSTEM_PROMPT = + (generateQueries.status === 'fulfilled' && generateQueries.value) || + DEFAULT_PROMPTS.GENERATE_QUERIES_SYSTEM_PROMPT + prompts.RESPONSE_SYSTEM_PROMPT = + (response.status === 'fulfilled' && response.value) || + DEFAULT_PROMPTS.RESPONSE_SYSTEM_PROMPT + } catch (error) { + console.warn( + 'Failed to load prompts from LangSmith, using defaults:', + error, + ) + Object.assign(prompts, DEFAULT_PROMPTS) + } + + cachedPrompts = prompts + return prompts +} + +// Load prompts on module initialization +let promptsPromise: Promise<Record<string, string>> | null = null + +/** + * Get all prompts (loads from LangSmith on first call, then caches) + */ +export async function getPrompts(): Promise<Record<string, string>> { + if (!promptsPromise) { + promptsPromise = loadAllPrompts() + } + return promptsPromise +} + +// Export individual prompt getters for convenience +export async function getRouterSystemPrompt(): Promise<string> { + const prompts = await getPrompts() + return prompts.ROUTER_SYSTEM_PROMPT +} + +export async function getMoreInfoSystemPrompt(): Promise<string> { + const prompts = await getPrompts() + return prompts.MORE_INFO_SYSTEM_PROMPT +} + +export async function getGeneralSystemPrompt(): Promise<string> { + const prompts = await getPrompts() + return prompts.GENERAL_SYSTEM_PROMPT +} + +export async function getResearchPlanSystemPrompt(): Promise<string> { + const prompts = await getPrompts() + return prompts.RESEARCH_PLAN_SYSTEM_PROMPT +} + +export async function getGenerateQueriesSystemPrompt(): Promise<string> { + const prompts = await getPrompts() + return prompts.GENERATE_QUERIES_SYSTEM_PROMPT +} + +export async function getResponseSystemPrompt(): Promise<string> { + const prompts = await getPrompts() + return prompts.RESPONSE_SYSTEM_PROMPT +} diff --git a/backend-js/src/retrieval_graph/researcher_graph/graph.ts b/backend-js/src/retrieval_graph/researcher_graph/graph.ts new file mode 100644 index 000000000..5c698affb --- /dev/null +++ b/backend-js/src/retrieval_graph/researcher_graph/graph.ts @@ -0,0 +1,123 @@ +/** + * Researcher graph used in the conversational retrieval system as a subgraph. + * + * This module defines the core structure and functionality of the researcher graph, + * which is responsible for generating search queries and retrieving relevant documents. + */ + +import { RunnableConfig } from '@langchain/core/runnables' +import { StateGraph, START, END, Send } from '@langchain/langgraph' +import { z } from 'zod' +import { + ResearcherStateAnnotation, + QueryStateAnnotation, + QueryState, +} from './state.js' +import { getAgentConfiguration } from '../configuration.js' +import { getGenerateQueriesSystemPrompt } from '../prompts.js' +import { loadChatModel } from '../../utils.js' +import { makeRetriever } from '../../retrieval.js' +/** + * Schema for query generation response + */ +const GenerateQueriesSchema = z.object({ + queries: z.array(z.string()).describe('List of search queries to execute'), +}) + +/** + * Generate search queries based on the question (a step in the research plan). + * + * This function uses a language model to generate diverse search queries to help answer the question. + * + * @param state - The current state of the researcher, including the user's question + * @param config - Configuration with the model used to generate queries + * @returns Updated state with generated queries + */ +async function generateQueries( + state: typeof ResearcherStateAnnotation.State, + config?: RunnableConfig, +): Promise<Partial<typeof ResearcherStateAnnotation.State>> { + const configuration = getAgentConfiguration(config) + const systemPrompt = await getGenerateQueriesSystemPrompt() + + // Determine if we should use function calling method + const useFunctionCalling = configuration.queryModel.includes('openai') + + const model = loadChatModel(configuration.queryModel) + const structuredModel = model.withStructuredOutput(GenerateQueriesSchema, { + method: useFunctionCalling ? 'functionCalling' : 'json_schema', + }) + + const messages = [ + { role: 'system' as const, content: systemPrompt }, + { role: 'human' as const, content: state.question }, + ] + + const response = await structuredModel.invoke(messages, { + ...config, + tags: ['langsmith:nostream'], + }) + + return { + queries: response.queries, + } +} + +/** + * Retrieve documents based on a given query. + * + * This function uses a retriever to fetch relevant documents for a given query. + * + * @param state - The current state containing the query string + * @param config - Configuration with the retriever used to fetch documents + * @returns Updated state with retrieved documents + */ +async function retrieveDocuments( + state: typeof QueryStateAnnotation.State, + config?: RunnableConfig, +): Promise<Partial<typeof QueryStateAnnotation.State>> { + const retriever = await makeRetriever(config) + + const documents = await retriever.invoke(state.query, config) + + return { + documents, + queryIndex: state.queryIndex, + } +} + +/** + * Create parallel retrieval tasks for each generated query. + * + * This function prepares parallel document retrieval tasks for each query in the researcher's state. + * + * @param state - The current state of the researcher, including the generated queries + * @returns List of Send objects, each representing a document retrieval task + */ +function retrieveInParallel( + state: typeof ResearcherStateAnnotation.State, +): Send<string, QueryState>[] { + return state.queries.map( + (query, index) => + new Send<string, QueryState>('retrieve_documents', { + query, + queryIndex: index, + documents: [], + } as QueryState), + ) +} + +/** + * Build and compile the researcher graph + */ +const builder = new StateGraph(ResearcherStateAnnotation) + .addNode('generate_queries', generateQueries) + .addNode('retrieve_documents', retrieveDocuments) + .addEdge(START, 'generate_queries') + .addConditionalEdges('generate_queries', retrieveInParallel, [ + 'retrieve_documents', + ]) + .addEdge('retrieve_documents', END) + +export const graph = builder.compile() +graph.name = 'ResearcherGraph' diff --git a/backend-js/src/retrieval_graph/researcher_graph/state.ts b/backend-js/src/retrieval_graph/researcher_graph/state.ts new file mode 100644 index 000000000..b70fbb8fb --- /dev/null +++ b/backend-js/src/retrieval_graph/researcher_graph/state.ts @@ -0,0 +1,86 @@ +/** + * State management for the researcher subgraph. + * + * This module defines state structures for the researcher subgraph which + * generates queries and retrieves relevant documents. + */ + +import { Document } from '@langchain/core/documents' +import { Annotation } from '@langchain/langgraph' +import { reduceDocs } from '../../utils.js' + +/** + * ResearcherState manages the state for query generation and document retrieval. + */ +export const ResearcherStateAnnotation = Annotation.Root({ + /** + * The question or research step to investigate. + */ + question: Annotation<string>({ + reducer: (existing, update) => update || existing || '', + default: () => '', + }), + + /** + * Generated search queries for this research step. + */ + queries: Annotation<string[]>({ + reducer: (existing, update) => { + if (update === null || update === undefined) { + return existing || [] + } + return update + }, + default: () => [], + }), + + /** + * Documents retrieved from all queries. + * Uses custom reducer to handle document deduplication. + */ + documents: Annotation<Document[]>({ + reducer: reduceDocs, + default: () => [], + }), + + /** + * Index of this query in the list of queries (for tracking). + */ + queryIndex: Annotation<number>({ + reducer: (existing, update) => update ?? existing ?? 0, + default: () => 0, + }), +}) + +/** + * QueryState represents the state for a single query retrieval task. + */ +export const QueryStateAnnotation = Annotation.Root({ + /** + * The search query string. + */ + query: Annotation<string>({ + reducer: (existing, update) => update || existing || '', + default: () => '', + }), + + /** + * Index of this query in the list of queries (for tracking). + */ + queryIndex: Annotation<number>({ + reducer: (existing, update) => update ?? existing ?? 0, + default: () => 0, + }), + + /** + * Documents retrieved for this specific query. + */ + documents: Annotation<Document[]>({ + reducer: reduceDocs, + default: () => [], + }), +}) + +// Type exports for use in other modules +export type ResearcherState = typeof ResearcherStateAnnotation.State +export type QueryState = typeof QueryStateAnnotation.State diff --git a/backend-js/src/retrieval_graph/state.ts b/backend-js/src/retrieval_graph/state.ts new file mode 100644 index 000000000..8f40d8a56 --- /dev/null +++ b/backend-js/src/retrieval_graph/state.ts @@ -0,0 +1,114 @@ +/** + * State management for the retrieval graph. + * + * This module defines the state structures used in the retrieval graph. It includes + * definitions for agent state, input state, and reducer functions. + */ + +import { BaseMessage } from '@langchain/core/messages' +import { Document } from '@langchain/core/documents' +import { Annotation, messagesStateReducer } from '@langchain/langgraph' +import { reduceDocs } from '../utils.js' + +/** + * Shared channel definitions to ensure all annotations use the same channel instances. + * This prevents "Channel already exists with a different type" errors. + */ +const messagesChannel = Annotation<BaseMessage[]>({ + reducer: messagesStateReducer, + default: () => [], +}) + +const documentsChannel = Annotation<Document[], Document[] | 'delete'>({ + reducer: reduceDocs, + default: () => [], +}) + +const answerChannel = Annotation<string>({ + reducer: (existing, update) => { + // Always return a string value for LangSmith compatibility + if (update !== undefined && update !== null) { + return String(update) + } + if (existing !== undefined && existing !== null) { + return String(existing) + } + return '' + }, + default: () => '', +}) + +const queryChannel = Annotation<string>({ + reducer: (existing, update) => { + // Always return a string value for LangSmith compatibility + if (update !== undefined && update !== null) { + return String(update) + } + if (existing !== undefined && existing !== null) { + return String(existing) + } + return '' + }, + default: () => '', +}) + +/** + * InputState represents the input to the agent. + * + * This is a restricted version of the State that is used to define + * a narrower interface to the outside world vs. what is maintained internally. + */ +export const InputStateAnnotation = Annotation.Root({ + /** + * Messages track the primary execution state of the agent. + * + * Typically accumulates a pattern of Human/AI/Human/AI messages. + * Uses messagesStateReducer to merge messages by ID. + */ + messages: messagesChannel, +}) + +/** + * AgentState is the primary state of the retrieval agent. + * + * It extends InputState with additional internal state for research planning + * and document retrieval, matching Python's class AgentState(InputState) pattern. + */ +export const AgentStateAnnotation = Annotation.Root({ + /** + * Inherit messages from InputStateAnnotation + */ + ...InputStateAnnotation.spec, + + /** + * A list of steps in the research plan. + */ + steps: Annotation<string[]>({ + reducer: (existing, update) => { + if (update === null || update === undefined) { + return existing || [] + } + return update + }, + default: () => [], + }), + + /** + * Documents retrieved by the researcher. + */ + documents: documentsChannel, + + /** + * Final answer. Useful for evaluations. + */ + answer: answerChannel, + + /** + * The original query from the user. + */ + query: queryChannel, +}) + +// Type exports for use in other modules +export type InputState = typeof InputStateAnnotation.State +export type AgentState = typeof AgentStateAnnotation.State diff --git a/backend-js/src/server.ts b/backend-js/src/server.ts new file mode 100644 index 000000000..8d58f7720 --- /dev/null +++ b/backend-js/src/server.ts @@ -0,0 +1,321 @@ +/** + * Self-hosted Express server for the retrieval graph. + * + * This server provides REST API endpoints for invoking the graph, + * with support for streaming, checkpointing, and thread management. + */ + +import express, { Request, Response } from "express"; +import cors from "cors"; +import { config as loadEnv } from "dotenv"; +import { HumanMessage } from "@langchain/core/messages"; +import { graph } from "./retrieval_graph/graph.js"; + +// Load environment variables +loadEnv(); + +const app = express(); +const PORT = process.env.PORT || 3001; + +// Middleware +app.use(cors()); +app.use(express.json()); + +// Request logging middleware +app.use((req, res, next) => { + console.log(`${new Date().toISOString()} ${req.method} ${req.path}`); + next(); +}); + +/** + * Health check endpoint + */ +app.get("/health", (req: Request, res: Response) => { + res.json({ status: "healthy", timestamp: new Date().toISOString() }); +}); + +/** + * POST /runs + * Invoke the graph with a new question + * + * Body: + * { + * "messages": ["What is LangChain?"], + * "thread_id": "optional-thread-id", + * "config": {} // optional configuration + * } + */ +app.post("/runs", async (req: Request, res: Response) => { + try { + const { messages, thread_id, config = {} } = req.body; + + if (!messages || !Array.isArray(messages) || messages.length === 0) { + return res.status(400).json({ error: "messages array is required" }); + } + + // Convert string messages to HumanMessage objects + const messageObjects = messages.map((msg: string | any) => { + if (typeof msg === "string") { + return new HumanMessage(msg); + } + return msg; + }); + + // Prepare config + const runnableConfig = { + ...config, + configurable: { + ...config.configurable, + thread_id: thread_id || `thread_${Date.now()}`, + }, + }; + + // Invoke the graph + const result = await graph.invoke( + { messages: messageObjects }, + runnableConfig + ); + + res.json({ + success: true, + thread_id: runnableConfig.configurable.thread_id, + result, + }); + } catch (error: any) { + console.error("Error in /runs:", error); + res.status(500).json({ + error: "Internal server error", + message: error.message, + }); + } +}); + +/** + * POST /runs/stream + * Stream the graph execution (Server-Sent Events) + * + * Body: + * { + * "messages": ["What is LangChain?"], + * "thread_id": "optional-thread-id", + * "config": {} // optional configuration + * } + */ +app.post("/runs/stream", async (req: Request, res: Response) => { + try { + const { messages, thread_id, config = {} } = req.body; + + if (!messages || !Array.isArray(messages) || messages.length === 0) { + return res.status(400).json({ error: "messages array is required" }); + } + + // Set headers for Server-Sent Events + res.setHeader("Content-Type", "text/event-stream"); + res.setHeader("Cache-Control", "no-cache"); + res.setHeader("Connection", "keep-alive"); + + // Convert string messages to HumanMessage objects + const messageObjects = messages.map((msg: string | any) => { + if (typeof msg === "string") { + return new HumanMessage(msg); + } + return msg; + }); + + // Prepare config + const runnableConfig = { + ...config, + configurable: { + ...config.configurable, + thread_id: thread_id || `thread_${Date.now()}`, + }, + }; + + // Stream the graph execution + const stream = await graph.stream( + { messages: messageObjects }, + runnableConfig + ); + + // Send events to client + for await (const event of stream) { + res.write(`data: ${JSON.stringify(event)}\n\n`); + } + + // End the stream + res.write("data: [DONE]\n\n"); + res.end(); + } catch (error: any) { + console.error("Error in /runs/stream:", error); + if (!res.headersSent) { + res.status(500).json({ + error: "Internal server error", + message: error.message, + }); + } else { + res.write(`data: ${JSON.stringify({ error: error.message })}\n\n`); + res.end(); + } + } +}); + +/** + * GET /runs/:run_id + * Get the status of a specific run + */ +app.get("/runs/:run_id", async (req: Request, res: Response) => { + try { + const { run_id } = req.params; + + // Note: This would require storing run information + // For now, return a placeholder + res.json({ + run_id, + status: "completed", + message: "Run status tracking not yet implemented in self-hosted mode", + }); + } catch (error: any) { + console.error("Error in /runs/:run_id:", error); + res.status(500).json({ + error: "Internal server error", + message: error.message, + }); + } +}); + +/** + * POST /threads/:thread_id/runs + * Continue a conversation in an existing thread + * + * Body: + * { + * "messages": ["Follow-up question"], + * "config": {} // optional configuration + * } + */ +app.post("/threads/:thread_id/runs", async (req: Request, res: Response) => { + try { + const { thread_id } = req.params; + const { messages, config = {} } = req.body; + + if (!messages || !Array.isArray(messages) || messages.length === 0) { + return res.status(400).json({ error: "messages array is required" }); + } + + // Convert string messages to HumanMessage objects + const messageObjects = messages.map((msg: string | any) => { + if (typeof msg === "string") { + return new HumanMessage(msg); + } + return msg; + }); + + // Prepare config with thread_id + const runnableConfig = { + ...config, + configurable: { + ...config.configurable, + thread_id, + }, + }; + + // Invoke the graph + const result = await graph.invoke( + { messages: messageObjects }, + runnableConfig + ); + + res.json({ + success: true, + thread_id, + result, + }); + } catch (error: any) { + console.error("Error in /threads/:thread_id/runs:", error); + res.status(500).json({ + error: "Internal server error", + message: error.message, + }); + } +}); + +/** + * GET /threads/:thread_id/state + * Get the current state of a thread + */ +app.get("/threads/:thread_id/state", async (req: Request, res: Response) => { + try { + const { thread_id } = req.params; + + // Note: This would require implementing checkpointing + // For now, return a placeholder + res.json({ + thread_id, + state: {}, + message: "Thread state retrieval not yet implemented in self-hosted mode", + note: "Implement checkpointing using @langchain/langgraph-checkpoint-postgres or similar", + }); + } catch (error: any) { + console.error("Error in /threads/:thread_id/state:", error); + res.status(500).json({ + error: "Internal server error", + message: error.message, + }); + } +}); + +/** + * Error handling middleware + */ +app.use((err: any, req: Request, res: Response, next: any) => { + console.error("Unhandled error:", err); + res.status(500).json({ + error: "Internal server error", + message: err.message, + }); +}); + +/** + * Start the server + */ +app.listen(PORT, () => { + console.log(` +╔═══════════════════════════════════════════════════════════╗ +║ ║ +║ 🚀 Chat LangChain Backend Server (Self-Hosted) ║ +║ ║ +║ Status: Running ║ +║ Port: ${PORT} ║ +║ URL: http://localhost:${PORT} ║ +║ ║ +║ Endpoints: ║ +║ - GET /health ║ +║ - POST /runs ║ +║ - POST /runs/stream ║ +║ - GET /runs/:run_id ║ +║ - POST /threads/:thread_id/runs ║ +║ - GET /threads/:thread_id/state ║ +║ ║ +╚═══════════════════════════════════════════════════════════╝ + `); + + console.log("\n📝 Note: For production deployment, consider:"); + console.log(" - Adding authentication/authorization"); + console.log(" - Implementing rate limiting"); + console.log(" - Setting up checkpointing with PostgreSQL"); + console.log(" - Adding request validation middleware"); + console.log(" - Configuring logging and monitoring"); + console.log("\n💡 Tip: Use LangGraph Cloud for managed deployment with these features built-in.\n"); +}); + +// Graceful shutdown +process.on("SIGTERM", () => { + console.log("\nReceived SIGTERM, shutting down gracefully..."); + process.exit(0); +}); + +process.on("SIGINT", () => { + console.log("\nReceived SIGINT, shutting down gracefully..."); + process.exit(0); +}); + diff --git a/backend-js/src/utils.ts b/backend-js/src/utils.ts new file mode 100644 index 000000000..fb3823ce8 --- /dev/null +++ b/backend-js/src/utils.ts @@ -0,0 +1,320 @@ +/** + * Shared utility functions used in the project. + * + * Functions: + * - getWeaviateClient: Create a Weaviate client connection + * - formatDocs: Convert documents to an xml-formatted string + * - loadChatModel: Load a chat model from a model name + * - reduceDocs: Document reducer for state management + */ + +import weaviate, { WeaviateClient } from 'weaviate-client' +import { Document } from '@langchain/core/documents' +import { BaseChatModel } from '@langchain/core/language_models/chat_models' +import { ChatAnthropic } from '@langchain/anthropic' +import { ChatOpenAI } from '@langchain/openai' +import { ChatGroq } from '@langchain/groq' +import { ChatOllama } from '@langchain/ollama' +import { v4 as uuidv4 } from 'uuid' + +/** + * Create and connect to a Weaviate client. + * + * @param weaviateUrl - The Weaviate HTTP URL. If not provided, reads from WEAVIATE_URL env var. + * @param weaviateGrpcUrl - The Weaviate gRPC URL. If not provided, uses weaviateUrl. + * @param weaviateApiKey - The Weaviate API key. If not provided, reads from WEAVIATE_API_KEY env var. + * @returns A connected Weaviate client + */ +export async function getWeaviateClient( + weaviateUrl?: string, + weaviateGrpcUrl?: string, + weaviateApiKey?: string, +): Promise<WeaviateClient> { + const url = weaviateUrl || process.env.WEAVIATE_URL || 'weaviate.hanu-nus.com' + const grpcUrl = + weaviateGrpcUrl || + process.env.WEAVIATE_GRPC_URL || + 'grpc-weaviate.hanu-nus.com' + const apiKey = weaviateApiKey || process.env.WEAVIATE_API_KEY || 'admin-key' + + // Extract hostname from URL (remove https:// or http://) + const httpHost = url.replace(/^https?:\/\//, '') + const grpcHost = grpcUrl.replace(/^https?:\/\//, '') + + const client = await weaviate.connectToCustom({ + httpHost, + httpPort: 443, + httpSecure: true, + grpcHost, + grpcPort: 443, + grpcSecure: true, + authCredentials: new weaviate.ApiKey(apiKey), + // Skip init checks to avoid gRPC health check failures with proxied/tunneled connections + skipInitChecks: true, + // Increase timeouts for slow/tunneled connections + timeout: { + init: 60_000, // 60 seconds for initialization + query: 60_000, // 60 seconds for queries + insert: 120_000, // 2 minutes for inserts + }, + }) + + return client +} + +/** + * Format a single document as XML. + * + * @param doc - The document to format + * @returns The formatted document as an XML string + */ +function formatDoc(doc: Document): string { + const metadata = doc.metadata || {} + const metaStr = Object.entries(metadata) + .map(([k, v]) => ` ${k}="${v}"`) + .join('') + + return `<document${metaStr}>\n${doc.pageContent}\n</document>` +} + +/** + * Format a list of documents as XML. + * + * This function takes a list of Document objects and formats them into a single XML string. + * + * @param docs - A list of Document objects to format, or null + * @returns A string containing the formatted documents in XML format + * + * @example + * ```typescript + * const docs = [ + * new Document({ pageContent: "Hello" }), + * new Document({ pageContent: "World" }) + * ]; + * console.log(formatDocs(docs)); + * // Output: + * // <documents> + * // <document> + * // Hello + * // </document> + * // <document> + * // World + * // </document> + * // </documents> + * ``` + */ +export function formatDocs(docs: Document[] | null | undefined): string { + if (!docs || docs.length === 0) { + return '<documents></documents>' + } + const formatted = docs.map((doc) => formatDoc(doc)).join('\n') + return `<documents>\n${formatted}\n</documents>` +} + +/** + * Load a chat model from a fully specified name. + * + * @param fullySpecifiedName - String in the format 'provider/model' + * @returns A BaseChatModel instance + * + * @example + * ```typescript + * // Load Groq model + * const model = loadChatModel("groq/llama-3.3-70b-versatile"); + * // Load other models + * const model2 = loadChatModel("openai/gpt-4"); + * ``` + */ +export function loadChatModel(fullySpecifiedName: string): BaseChatModel { + let provider: string + let model: string + + if (fullySpecifiedName.includes('/')) { + // Split only on the first '/' to handle formats like "groq/openai/gpt-oss-20b" + // This matches Python's split("/", maxsplit=1) behavior + const parts = fullySpecifiedName.split('/') + provider = parts[0] + model = parts.slice(1).join('/') + } else { + provider = '' + model = fullySpecifiedName + } + + const baseConfig = { + temperature: 0, + } + + switch (provider.toLowerCase()) { + case 'groq': + return new ChatGroq({ + // model: 'llama-3.1-8b-instant', + // model, + model: 'llama-3.1-8b-instant', // TODO: change back to model + ...baseConfig, + }) + + case 'openai': + return new ChatOpenAI({ + model, + ...baseConfig, + streamUsage: true, + }) + + case 'anthropic': + return new ChatAnthropic({ + model, + ...baseConfig, + }) + + case 'ollama': + return new ChatOllama({ + model, + ...baseConfig, + baseUrl: process.env.OLLAMA_BASE_URL || 'http://localhost:11434', + }) + + case 'google_genai': + // Note: Google GenAI might need special handling for system messages + throw new Error('Google GenAI not yet implemented in TypeScript version') + + default: + // Default to OpenAI if no provider specified + return new ChatOpenAI({ + model: fullySpecifiedName, + ...baseConfig, + streamUsage: true, + }) + } +} + +/** + * Reduce and process documents based on the input type. + * + * This function handles various input types and converts them into a sequence of Document objects. + * It also combines existing documents with the new ones based on the document ID. + * + * @param existing - The existing docs in the state, if any + * @param newDocs - The new input to process. Can be a sequence of Documents, objects, strings, or "delete" + * @returns Combined list of documents + */ +/** + * Reduce and process documents based on the input type. + * + * This function handles various input types and converts them into a sequence of Document objects. + * It uses dual deduplication: UUID-based (primary) and content-based (secondary). + * The content-based check handles retrieved documents that have different UUIDs but identical content. + * + * @param existing - The existing docs in the state, if any + * @param newDocs - The new input to process. Can be a sequence of Documents, objects, strings, or "delete" + * @returns Combined list of documents + */ +export function reduceDocs( + existing: Document[] | undefined, + newDocs: Document[] | Record<string, any>[] | string[] | string | 'delete', +): Document[] { + if (newDocs === 'delete') { + return [] + } + + const existingList = existing || [] + + if (typeof newDocs === 'string') { + return [ + ...existingList, + new Document({ + pageContent: newDocs, + metadata: { uuid: uuidv4() }, + }), + ] + } + + if (!Array.isArray(newDocs)) { + return existingList + } + + const newList: Document[] = [] + // Primary deduplication: Track UUIDs (matches Python's behavior) + const existingIds = new Set( + existingList.map((doc) => doc.metadata?.uuid).filter(Boolean), + ) + + // Secondary deduplication: Track content+source signatures + // This catches retrieved documents with different UUIDs but identical content + const existingContentKeys = new Set( + existingList.map((doc) => { + const source = doc.metadata?.source || '' + const content = doc.pageContent.substring(0, 500) // Use first 500 chars as signature + return `${source}:::${content}` + }), + ) + + for (const item of newDocs) { + if (typeof item === 'string') { + const itemId = uuidv4() + newList.push( + new Document({ + pageContent: item, + metadata: { uuid: itemId }, + }), + ) + existingIds.add(itemId) + } else if (item instanceof Document) { + // Use existing id from Document (from vector DB) if available, otherwise check metadata.uuid, fallback to generating new UUID + let itemId = item.id || item.metadata?.uuid + if (!itemId) { + // Generate new UUID only if neither id nor metadata.uuid exists + itemId = uuidv4() + } + + // Primary check: UUID-based deduplication + if (existingIds.has(itemId)) { + continue + } + + // Secondary check: content-based deduplication (for retrieved docs with different UUIDs) + const source = item.metadata?.source || '' + const contentKey = `${source}:::${item.pageContent.substring(0, 500)}` + if (existingContentKeys.has(contentKey)) { + continue + } + + // Add the document if it's truly unique + // Ensure metadata.uuid is set for deduplication consistency, and preserve the id field + const newDoc = + itemId === item.metadata?.uuid && itemId === item.id + ? item + : new Document({ + pageContent: item.pageContent, + metadata: { ...item.metadata, uuid: itemId }, + id: itemId, // Preserve the id field from vector DB + }) + + newList.push(newDoc) + existingIds.add(itemId) + existingContentKeys.add(contentKey) + } else if (typeof item === 'object' && item !== null) { + // Plain object with pageContent + const metadata = item.metadata || {} + const pageContent = item.pageContent || '' + // Use existing id from object if available (from vector DB), otherwise check metadata.uuid, fallback to generating new UUID + let itemId = (item as any).id || metadata.uuid + + if (!itemId) { + itemId = uuidv4() + } + + if (!existingIds.has(itemId)) { + newList.push( + new Document({ + pageContent, + metadata: { ...metadata, uuid: itemId }, + id: itemId, // Preserve the id field from vector DB + }), + ) + existingIds.add(itemId) + } + } + } + + return [...existingList, ...newList] +} diff --git a/backend-js/tests/evals/test_e2e.test.ts b/backend-js/tests/evals/test_e2e.test.ts new file mode 100644 index 000000000..895a96bba --- /dev/null +++ b/backend-js/tests/evals/test_e2e.test.ts @@ -0,0 +1,296 @@ +/** + * End-to-end evaluation tests for the retrieval graph. + * + * This module tests the complete system against a dataset of questions + * and expected answers, measuring retrieval recall and answer correctness. + */ + +import 'dotenv/config' + +import { describe, it, expect } from 'vitest' +import { HumanMessage, AIMessage } from '@langchain/core/messages' +import { Document } from '@langchain/core/documents' +import { ChatPromptTemplate } from '@langchain/core/prompts' +import { Client, Run, Example } from 'langsmith' +import { + evaluate as evaluateLangSmith, + EvaluationResult, + type EvaluateOptions, +} from 'langsmith/evaluation' +import { z } from 'zod' +import { graph } from '../../src/retrieval_graph/graph.js' +import { formatDocs, loadChatModel } from '../../src/utils.js' + +// Dataset and experiment configuration +const DATASET_NAME = 'small-chatlangchain-dataset' +const EXPERIMENT_PREFIX = 'chat-langchain-ci' + +// Score keys +const SCORE_RETRIEVAL_RECALL = 'retrieval_recall' +const SCORE_ANSWER_CORRECTNESS = 'answer_correctness_score' +const SCORE_ANSWER_VS_CONTEXT_CORRECTNESS = + 'answer_vs_context_correctness_score' + +// Judge model +const JUDGE_MODEL_NAME = 'openai/gpt-4o-mini' + +const judgeModel = loadChatModel(JUDGE_MODEL_NAME) + +// Initialize LangSmith client +const client = new Client() + +/** + * Schema for grading answers + */ +const GradeAnswerSchema = z.object({ + reason: z + .string() + .describe('1-2 short sentences with the reason why the score was assigned'), + score: z + .number() + .min(0.0) + .max(1.0) + .describe( + 'Score that shows how correct the answer is. Use 1.0 if completely correct and 0.0 if completely incorrect', + ), +}) + +type GradeAnswer = z.infer<typeof GradeAnswerSchema> + +/** + * Evaluate retrieval recall + * Matches Python signature: evaluate_retrieval_recall(run: Run, example: Example) -> dict + */ +function evaluateRetrievalRecall( + run: Run, + example?: Example, +): EvaluationResult { + if (!example) { + return { key: SCORE_RETRIEVAL_RECALL, score: 0.0 } + } + + const expectedSources = example.metadata?.metadata?.source + const retrievedSources = new Set<string>( + run.outputs?.documents?.map?.((doc: Document) => doc.metadata.source) || [], + ) + + // Calculate recall - at least one expected source should be in retrieved docs + const score = retrievedSources.has(expectedSources) ? 1.0 : 0.0 + + return { key: SCORE_RETRIEVAL_RECALL, score } +} + +/** + * QA evaluation system prompt + */ +const QA_SYSTEM_PROMPT = `You are an expert programmer and problem-solver, tasked with grading answers to questions about Langchain. +You are given a question, the student's answer, and the true answer, and are asked to score the student answer as either CORRECT or INCORRECT. + +Grade the student answers based ONLY on their factual accuracy. Ignore differences in punctuation and phrasing between the student answer and true answer. It is OK if the student answer contains more information than the true answer, as long as it does not contain any conflicting statements.` + +const QA_PROMPT = ChatPromptTemplate.fromMessages([ + ['system', QA_SYSTEM_PROMPT], + [ + 'human', + 'QUESTION: \n\n {question} \n\n TRUE ANSWER: {true_answer} \n\n STUDENT ANSWER: {answer}', + ], +]) + +/** + * Evaluate answer correctness based on reference answer + * Matches Python signature: evaluate_qa(run: Run, example: Example) -> dict + */ +async function evaluateQA( + run: Run, + example?: Example, +): Promise<EvaluationResult> { + if (!example) { + return { key: SCORE_ANSWER_CORRECTNESS, score: 0.0 } + } + + const messages = (run.outputs?.messages as any[]) || [] + if (messages.length === 0) { + return { key: SCORE_ANSWER_CORRECTNESS, score: 0.0 } + } + + const lastMessage = messages[messages.length - 1] + if (!(lastMessage instanceof AIMessage)) { + return { key: SCORE_ANSWER_CORRECTNESS, score: 0.0 } + } + + const qaChain = QA_PROMPT.pipe( + judgeModel.withStructuredOutput(GradeAnswerSchema), + ) + + const result = (await qaChain.invoke({ + question: example.inputs?.question, + true_answer: example.outputs?.answer, + answer: lastMessage.content, + })) as GradeAnswer + + return { key: SCORE_ANSWER_CORRECTNESS, score: result.score } +} + +/** + * Context QA evaluation system prompt + */ +const CONTEXT_QA_SYSTEM_PROMPT = `You are an expert programmer and problem-solver, tasked with grading answers to questions about Langchain. +You are given a question, the context for answering the question, and the student's answer. You are asked to score the student's answer as either CORRECT or INCORRECT, based on the context. + +Grade the student answer BOTH based on its factual accuracy AND on whether it is supported by the context. Ignore differences in punctuation and phrasing between the student answer and true answer. It is OK if the student answer contains more information than the true answer, as long as it does not contain any conflicting statements.` + +const CONTEXT_QA_PROMPT = ChatPromptTemplate.fromMessages([ + ['system', CONTEXT_QA_SYSTEM_PROMPT], + [ + 'human', + 'QUESTION: \n\n {question} \n\n CONTEXT: {context} \n\n STUDENT ANSWER: {answer}', + ], +]) + +/** + * Evaluate answer correctness based on retrieved context + * Matches Python signature: evaluate_qa_context(run: Run, example: Example) -> dict + */ +async function evaluateQAContext( + run: Run, + example?: Example, +): Promise<EvaluationResult> { + if (!example) { + return { key: SCORE_ANSWER_VS_CONTEXT_CORRECTNESS, score: 0.0 } + } + + const messages = (run.outputs?.messages as any[]) || [] + if (messages.length === 0) { + return { key: SCORE_ANSWER_VS_CONTEXT_CORRECTNESS, score: 0.0 } + } + + const documents = (run.outputs?.documents as Document[]) || [] + if (documents.length === 0) { + return { key: SCORE_ANSWER_VS_CONTEXT_CORRECTNESS, score: 0.0 } + } + + const context = formatDocs(documents) + + const lastMessage = messages[messages.length - 1] + if (!(lastMessage instanceof AIMessage)) { + return { key: SCORE_ANSWER_VS_CONTEXT_CORRECTNESS, score: 0.0 } + } + + const contextQaChain = CONTEXT_QA_PROMPT.pipe( + judgeModel.withStructuredOutput(GradeAnswerSchema), + ) + + const result = (await contextQaChain.invoke({ + question: example.inputs?.question, + context, + answer: lastMessage.content, + })) as GradeAnswer + + return { key: SCORE_ANSWER_VS_CONTEXT_CORRECTNESS, score: result.score } +} + +/** + * Run the graph for evaluation + * Wrapper to match LangSmith evaluate target signature + */ +async function runGraph( + inputs: Record<string, any>, +): Promise<Record<string, any>> { + const results = await graph.invoke({ + messages: [new HumanMessage(inputs.question)], + }) + return results +} + +/** + * Main evaluation test + * Uses LangSmith evaluate function (equivalent to Python's aevaluate) + */ +describe('E2E Evaluation Tests', () => { + it('should pass regression test with minimum score thresholds', async () => { + console.log('Starting evaluation...') + + const options: EvaluateOptions = { + data: DATASET_NAME, + evaluators: [evaluateQA, evaluateQAContext, evaluateRetrievalRecall], + experimentPrefix: EXPERIMENT_PREFIX, + metadata: { judge_model_name: JUDGE_MODEL_NAME }, + maxConcurrency: 1, + client, + } + const { results: experimentResults } = await evaluateLangSmith( + runGraph, + options, + ) + + // Collect results and scores + const results: Array<{ + input: any + expectedOutput: any + actualOutput: any + scores: Record<string, number> + }> = [] + + // Process results as they become available + for await (const result of experimentResults) { + const scores: Record<string, number> = {} + for (const evalResult of result.evaluationResults.results) { + if (evalResult.score !== null && evalResult.score !== undefined) { + scores[evalResult.key] = evalResult.score as number + } + } + + results.push({ + input: result.example.inputs, + expectedOutput: result.example.outputs, + actualOutput: result.run.outputs, + scores, + }) + } + + // Log all results in a table format + const tableData = results.map((result, index) => ({ + Test: index + 1, + Question: (result.input.question as string).substring(0, 50) + '...', + [SCORE_RETRIEVAL_RECALL]: + result.scores[SCORE_RETRIEVAL_RECALL]?.toFixed(2) ?? 'N/A', + [SCORE_ANSWER_CORRECTNESS]: + result.scores[SCORE_ANSWER_CORRECTNESS]?.toFixed(2) ?? 'N/A', + [SCORE_ANSWER_VS_CONTEXT_CORRECTNESS]: + result.scores[SCORE_ANSWER_VS_CONTEXT_CORRECTNESS]?.toFixed(2) ?? 'N/A', + })) + console.log('Records:') + console.table(tableData) + + // Calculate average scores + const avgAnswerCorrectness = + results.reduce( + (sum, r) => sum + (r.scores[SCORE_ANSWER_CORRECTNESS] ?? 0), + 0, + ) / results.length + const avgContextCorrectness = + results.reduce( + (sum, r) => sum + (r.scores[SCORE_ANSWER_VS_CONTEXT_CORRECTNESS] ?? 0), + 0, + ) / results.length + + const avgRetrievalRecall = + results.reduce( + (sum, r) => sum + (r.scores[SCORE_RETRIEVAL_RECALL] ?? 0), + 0, + ) / results.length + + // Print averages in a console.table for better visibility + console.log('Averages:') + console.table({ + [SCORE_ANSWER_CORRECTNESS]: avgAnswerCorrectness.toFixed(2), + [SCORE_ANSWER_VS_CONTEXT_CORRECTNESS]: avgContextCorrectness.toFixed(2), + [SCORE_RETRIEVAL_RECALL]: avgRetrievalRecall.toFixed(2), + }) + + // Assert minimum thresholds + expect(avgAnswerCorrectness).toBeGreaterThanOrEqual(0.9) + expect(avgContextCorrectness).toBeGreaterThanOrEqual(0.7) + expect(avgRetrievalRecall).toBeGreaterThanOrEqual(0.9) + }, 300000) // 5 minute timeout for full evaluation +}) diff --git a/backend-js/tsconfig.json b/backend-js/tsconfig.json new file mode 100644 index 000000000..012fd77d9 --- /dev/null +++ b/backend-js/tsconfig.json @@ -0,0 +1,23 @@ +{ + "compilerOptions": { + "target": "ES2022", + "module": "ES2022", + "lib": ["ES2022"], + "moduleResolution": "bundler", + "esModuleInterop": true, + "allowSyntheticDefaultImports": true, + "strict": true, + "skipLibCheck": true, + "forceConsistentCasingInFileNames": true, + "resolveJsonModule": true, + "declaration": true, + "declarationMap": true, + "sourceMap": true, + "outDir": "./dist", + "types": ["node", "vitest/globals"], + "allowImportingTsExtensions": false, + "noEmit": false + }, + "include": ["src/**/*", "tests/**/*", "scripts/**/*"], + "exclude": ["node_modules", "dist"] +} diff --git a/backend-js/vitest.config.ts b/backend-js/vitest.config.ts new file mode 100644 index 000000000..0c047dfd8 --- /dev/null +++ b/backend-js/vitest.config.ts @@ -0,0 +1,11 @@ +import { defineConfig } from 'vitest/config' + +export default defineConfig({ + test: { + globals: true, + environment: 'node', + testTimeout: 60000, // 60 seconds for LLM calls + hookTimeout: 30000, + include: ['tests/**/*.test.ts'], + }, +}) diff --git a/backend/constants.py b/backend/constants.py index ac72c28f8..92511f45f 100644 --- a/backend/constants.py +++ b/backend/constants.py @@ -8,3 +8,7 @@ # Ollama configuration OLLAMA_BASE_URL = os.getenv("OLLAMA_BASE_URL", "http://localhost:11434") + +OLLAMA_BASE_EMBEDDING_DOCS_URL = os.getenv( + "OLLAMA_BASE_EMBEDDING_DOCS_URL", OLLAMA_BASE_URL +) diff --git a/backend/ingest.py b/backend/ingest.py index 0ce5ef03b..1906a0bc8 100644 --- a/backend/ingest.py +++ b/backend/ingest.py @@ -17,6 +17,7 @@ from langchain_weaviate import WeaviateVectorStore from backend.constants import ( + OLLAMA_BASE_EMBEDDING_DOCS_URL, OLLAMA_BASE_URL, WEAVIATE_GENERAL_GUIDES_AND_TUTORIALS_INDEX_NAME, ) @@ -134,7 +135,7 @@ def ingest_docs(): # Larger chunks for nomic-embed-text (2K token context window) # 4000 chars ≈ 1000-1300 tokens, well within the 2K limit text_splitter = RecursiveCharacterTextSplitter(chunk_size=4000, chunk_overlap=200) - embedding = get_embeddings_model(base_url=OLLAMA_BASE_URL) + embedding = get_embeddings_model(base_url=OLLAMA_BASE_EMBEDDING_DOCS_URL) with get_weaviate_client( weaviate_url=WEAVIATE_URL, diff --git a/dataset/chunks.json b/dataset/chunks.json deleted file mode 100644 index f58dbcfe6..000000000 --- a/dataset/chunks.json +++ /dev/null @@ -1,242 +0,0 @@ -[ - { - "page_content": "Agents - Docs by LangChainSkip to main contentWe've raised a $125M Series B to build the platform for agent engineering. Read more.Docs by LangChain home pageLangChain + LangGraphSearch...⌘KAsk AIGitHubTry LangSmithTry LangSmithSearch...NavigationCore componentsAgentsLangChainLangGraphIntegrationsLearnReferenceContributeTypeScriptOverviewLangChain v1.0Release notesMigration guideGet startedInstallQuickstartPhilosophyCore componentsAgentsModelsMessagesToolsShort-term memoryStreamingMiddlewareStructured outputAdvanced usageGuardrailsRuntimeContext engineeringModel Context Protocol (MCP)Human-in-the-loopMulti-agentRetrievalLong-term memoryUse in productionStudioTestDeployAgent Chat UIObservabilityEnglishcloseOn this pageCore componentsModelStatic modelDynamic modelToolsDefining toolsTool error handlingTool use in the ReAct loopSystem promptDynamic system promptInvocationAdvanced conceptsStructured outputMemoryStreamingMiddlewareCore componentsAgentsCopy pageCopy pageAgents combine language models with tools to create systems that can reason about tasks, decide which tools to use, and iteratively work towards solutions.\ncreateAgent() provides a production-ready agent implementation.\nAn LLM Agent runs tools in a loop to achieve a goal.\nAn agent runs until a stop condition is met - i.e., when the model emits a final output or an iteration limit is reached.\n\ncreateAgent() builds a graph-based agent runtime using LangGraph. A graph consists of nodes (steps) and edges (connections) that define how your agent processes information. The agent moves through this graph, executing nodes like the model node (which calls the model), the tools node (which executes tools), or middleware.Learn more about the Graph API.\n​Core components\n​Model\nThe model is the reasoning engine of your agent. It can be specified in multiple ways, supporting both static and dynamic model selection.\n​Static model\nStatic models are configured once when creating the agent and remain unchanged throughout execution. This is the most common and straightforward approach.\nTo initialize a static model from a model identifier string:\nCopyAsk AIimport { createAgent } from \"langchain\";\n\nconst agent = createAgent({\n model: \"gpt-5\",\n tools: []\n});\n\nModel identifier strings use the format provider:model (e.g. \"openai:gpt-5\"). You may want more control over the model configuration, in which case you can initialize a model instance directly using the provider package:\nCopyAsk AIimport { createAgent } from \"langchain\";\nimport { ChatOpenAI } from \"@langchain/openai\";\n\nconst model = new ChatOpenAI({\n model: \"gpt-4o\",\n temperature: 0.1,\n maxTokens: 1000,\n timeout: 30\n});\n\nconst agent = createAgent({\n model,\n tools: []\n});\n\nModel instances give you complete control over configuration. Use them when you need to set specific parameters like temperature, max_tokens, timeouts, or configure API keys, base_url, and other provider-specific settings. Refer to the API reference to see available params and methods on your model.\n​Dynamic model\nDynamic models are selected at runtime based on the current state and context. This enables sophisticated routing logic and cost optimization.\nTo use a dynamic model, create middleware with wrapModelCall that modifies the model in the request:\nCopyAsk AIimport { ChatOpenAI } from \"@langchain/openai\";\nimport { createAgent, createMiddleware } from \"langchain\";\n\nconst basicModel = new ChatOpenAI({ model: \"gpt-4o-mini\" });\nconst advancedModel = new ChatOpenAI({ model: \"gpt-4o\" });\n\nconst dynamicModelSelection = createMiddleware({\n name: \"DynamicModelSelection\",\n wrapModelCall: (request, handler) => {\n // Choose model based on conversation complexity\n const messageCount = request.messages.length;\n\n return handler({\n ...request,\n model: messageCount > 10 ? advancedModel : basicModel,\n });\n },\n});", - "metadata": { - "source": "https://docs.langchain.com/oss/javascript/langchain/agents", - "title": "Agents - Docs by LangChain", - "description": "", - "language": "en", - "loc": "https://docs.langchain.com/oss/javascript/langchain/agents", - "lastmod": "2025-11-03T18:48:17.363Z" - }, - "type": "Document", - "id": null, - "lc_attributes": {}, - "lc_secrets": {}, - "model_computed_fields": {}, - "model_config": { - "extra": "ignore" - }, - "model_extra": null - }, - { - "page_content": "return handler({\n ...request,\n model: messageCount > 10 ? advancedModel : basicModel,\n });\n },\n});\n\nconst agent = createAgent({\n model: \"gpt-4o-mini\", // Base model (used when messageCount ≤ 10)\n tools,\n middleware: [dynamicModelSelection] as const,\n});\n\nFor more details on middleware and advanced patterns, see the middleware documentation.\nFor model configuration details, see Models. For dynamic model selection patterns, see Dynamic model in middleware.\n​Tools\nTools give agents the ability to take actions. Agents go beyond simple model-only tool binding by facilitating:\n\nMultiple tool calls in sequence (triggered by a single prompt)\nParallel tool calls when appropriate\nDynamic tool selection based on previous results\nTool retry logic and error handling\nState persistence across tool calls\n\nFor more information, see Tools.\n​Defining tools\nPass a list of tools to the agent.\nCopyAsk AIimport * as z from \"zod\";\nimport { createAgent, tool } from \"langchain\";\n\nconst search = tool(\n ({ query }) => `Results for: ${query}`,\n {\n name: \"search\",\n description: \"Search for information\",\n schema: z.object({\n query: z.string().describe(\"The query to search for\"),\n }),\n }\n);\n\nconst getWeather = tool(\n ({ location }) => `Weather in ${location}: Sunny, 72°F`,\n {\n name: \"get_weather\",\n description: \"Get weather information for a location\",\n schema: z.object({\n location: z.string().describe(\"The location to get weather for\"),\n }),\n }\n);\n\nconst agent = createAgent({\n model: \"gpt-4o\",\n tools: [search, getWeather],\n});\n\nIf an empty tool list is provided, the agent will consist of a single LLM node without tool-calling capabilities.\n​Tool error handling\nTo customize how tool errors are handled, use the wrapToolCall hook in a custom middleware:\nCopyAsk AIimport { createAgent, createMiddleware, ToolMessage } from \"langchain\";\n\nconst handleToolErrors = createMiddleware({\n name: \"HandleToolErrors\",\n wrapToolCall: (request, handler) => {\n try {\n return handler(request);\n } catch (error) {\n // Return a custom error message to the model\n return new ToolMessage({\n content: `Tool error: Please check your input and try again. (${error})`,\n tool_call_id: request.toolCall.id!,\n });\n }\n },\n});\n\nconst agent = createAgent({\n model: \"gpt-4o\",\n tools: [\n /* ... */\n ],\n middleware: [handleToolErrors] as const,\n});\n\nThe agent will return a @[ToolMessage] with the custom error message when a tool fails.\n​Tool use in the ReAct loop\nAgents follow the ReAct (“Reasoning + Acting”) pattern, alternating between brief reasoning steps with targeted tool calls and feeding the resulting observations into subsequent decisions until they can deliver a final answer.\nExample of ReAct loopPrompt: Identify the current most popular wireless headphones and verify availability.CopyAsk AI================================ Human Message =================================\n\nFind the most popular wireless headphones right now and check if they're in stock\n\nReasoning: “Popularity is time-sensitive, I need to use the provided search tool.”\nActing: Call search_products(\"wireless headphones\")\nCopyAsk AI================================== Ai Message ==================================\nTool Calls:\n search_products (call_abc123)\n Call ID: call_abc123\n Args:\n query: wireless headphones\nCopyAsk AI================================= Tool Message =================================\n\nFound 5 products matching \"wireless headphones\". Top 5 results: WH-1000XM5, ...\n\nReasoning: “I need to confirm availability for the top-ranked item before answering.”\nActing: Call check_inventory(\"WH-1000XM5\")\nCopyAsk AI================================== Ai Message ==================================\nTool Calls:\n check_inventory (call_def456)\n Call ID: call_def456\n Args:\n product_id: WH-1000XM5\nCopyAsk AI================================= Tool Message =================================", - "metadata": { - "source": "https://docs.langchain.com/oss/javascript/langchain/agents", - "title": "Agents - Docs by LangChain", - "description": "", - "language": "en", - "loc": "https://docs.langchain.com/oss/javascript/langchain/agents", - "lastmod": "2025-11-03T18:48:17.363Z" - }, - "type": "Document", - "id": null, - "lc_attributes": {}, - "lc_secrets": {}, - "model_computed_fields": {}, - "model_config": { - "extra": "ignore" - }, - "model_extra": null - }, - { - "page_content": "Product WH-1000XM5: 10 units in stock\n\nReasoning: “I have the most popular model and its stock status. I can now answer the user’s question.”\nActing: Produce final answer\nCopyAsk AI================================== Ai Message ==================================\n\nI found wireless headphones (model WH-1000XM5) with 10 units in stock...\n\nTo learn more about tools, see Tools.\n​System prompt\nYou can shape how your agent approaches tasks by providing a prompt. The systemPrompt parameter can be provided as a string:\nCopyAsk AIconst agent = createAgent({\n model,\n tools,\n systemPrompt: \"You are a helpful assistant. Be concise and accurate.\",\n});\n\nWhen no @[system_prompt] is provided, the agent will infer its task from the messages directly.\n​Dynamic system prompt\nFor more advanced use cases where you need to modify the system prompt based on runtime context or agent state, you can use middleware.\nCopyAsk AIimport * as z from \"zod\";\nimport { createAgent, dynamicSystemPromptMiddleware } from \"langchain\";\n\nconst contextSchema = z.object({\n userRole: z.enum([\"expert\", \"beginner\"]),\n});\n\nconst agent = createAgent({\n model: \"gpt-4o\",\n tools: [/* ... */],\n contextSchema,\n middleware: [\n dynamicSystemPromptMiddleware<z.infer<typeof contextSchema>>((state, runtime) => {\n const userRole = runtime.context.userRole || \"user\";\n const basePrompt = \"You are a helpful assistant.\";\n\n if (userRole === \"expert\") {\n return `${basePrompt} Provide detailed technical responses.`;\n } else if (userRole === \"beginner\") {\n return `${basePrompt} Explain concepts simply and avoid jargon.`;\n }\n return basePrompt;\n }),\n ],\n});\n\n// The system prompt will be set dynamically based on context\nconst result = await agent.invoke(\n { messages: [{ role: \"user\", content: \"Explain machine learning\" }] },\n { context: { userRole: \"expert\" } }\n);\n\nFor more details on message types and formatting, see Messages. For comprehensive middleware documentation, see Middleware.\n​Invocation\nYou can invoke an agent by passing an update to its State. All agents include a sequence of messages in their state; to invoke the agent, pass a new message:\nCopyAsk AIawait agent.invoke({\n messages: [{ role: \"user\", content: \"What's the weather in San Francisco?\" }],\n})\n\nFor streaming steps and / or tokens from the agent, refer to the streaming guide.\nOtherwise, the agent follows the LangGraph Graph API and supports all associated methods.\n​Advanced concepts\n​Structured output\nIn some situations, you may want the agent to return an output in a specific format. LangChain provides a simple, universal way to do this with the responseFormat parameter.\nCopyAsk AIimport * as z from \"zod\";\nimport { createAgent } from \"langchain\";\n\nconst ContactInfo = z.object({\n name: z.string(),\n email: z.string(),\n phone: z.string(),\n});\n\nconst agent = createAgent({\n model: \"gpt-4o\",\n responseFormat: ContactInfo,\n});\n\nconst result = await agent.invoke({\n messages: [\n {\n role: \"user\",\n content: \"Extract contact info from: John Doe, john@example.com, (555) 123-4567\",\n },\n ],\n});\n\nconsole.log(result.structuredResponse);\n// {\n// name: 'John Doe',\n// email: 'john@example.com',\n// phone: '(555) 123-4567'\n// }\n\nTo learn about structured output, see Structured output.\n​Memory\nAgents maintain conversation history automatically through the message state. You can also configure the agent to use a custom state schema to remember additional information during the conversation.\nInformation stored in the state can be thought of as the short-term memory of the agent:\nCopyAsk AIimport * as z from \"zod\";\nimport { MessagesZodState } from \"@langchain/langgraph\";\nimport { createAgent, type BaseMessage } from \"langchain\";\n\nconst customAgentState = z.object({\n messages: MessagesZodState.shape.messages,\n userPreferences: z.record(z.string(), z.string()),\n});", - "metadata": { - "source": "https://docs.langchain.com/oss/javascript/langchain/agents", - "title": "Agents - Docs by LangChain", - "description": "", - "language": "en", - "loc": "https://docs.langchain.com/oss/javascript/langchain/agents", - "lastmod": "2025-11-03T18:48:17.363Z" - }, - "type": "Document", - "id": null, - "lc_attributes": {}, - "lc_secrets": {}, - "model_computed_fields": {}, - "model_config": { - "extra": "ignore" - }, - "model_extra": null - }, - { - "page_content": "const customAgentState = z.object({\n messages: MessagesZodState.shape.messages,\n userPreferences: z.record(z.string(), z.string()),\n});\n\nconst CustomAgentState = createAgent({\n model: \"gpt-4o\",\n tools: [],\n stateSchema: customAgentState,\n});\n\nTo learn more about memory, see Memory. For information on implementing long-term memory that persists across sessions, see Long-term memory.\n​Streaming\nWe’ve seen how the agent can be called with invoke to get a final response. If the agent executes multiple steps, this may take a while. To show intermediate progress, we can stream back messages as they occur.\nCopyAsk AIconst stream = await agent.stream(\n {\n messages: [{\n role: \"user\",\n content: \"Search for AI news and summarize the findings\"\n }],\n },\n { streamMode: \"values\" }\n);\n\nfor await (const chunk of stream) {\n // Each chunk contains the full state at that point\n const latestMessage = chunk.messages.at(-1);\n if (latestMessage?.content) {\n console.log(`Agent: ${latestMessage.content}`);\n } else if (latestMessage?.tool_calls) {\n const toolCallNames = latestMessage.tool_calls.map((tc) => tc.name);\n console.log(`Calling tools: ${toolCallNames.join(\", \")}`);\n }\n}\n\nFor more details on streaming, see Streaming.\n​Middleware\nMiddleware provides powerful extensibility for customizing agent behavior at different stages of execution. You can use middleware to:\n\nProcess state before the model is called (e.g., message trimming, context injection)\nModify or validate the model’s response (e.g., guardrails, content filtering)\nHandle tool execution errors with custom logic\nImplement dynamic model selection based on state or context\nAdd custom logging, monitoring, or analytics\n\nMiddleware integrates seamlessly into the agent’s execution graph, allowing you to intercept and modify data flow at key points without changing the core agent logic.\nFor comprehensive middleware documentation including hooks like beforeModel, afterModel, and wrapToolCall, see Middleware.\n\nEdit the source of this page on GitHub.\nConnect these docs programmatically to Claude, VSCode, and more via MCP for real-time answers.Was this page helpful?YesNoPhilosophyPreviousModelsNextDocs by LangChain home pagegithubxlinkedinyoutubeResourcesForumChangelogLangChain AcademyTrust CenterCompanyAboutCareersBloggithubxlinkedinyoutubePowered by Mintlify", - "metadata": { - "source": "https://docs.langchain.com/oss/javascript/langchain/agents", - "title": "Agents - Docs by LangChain", - "description": "", - "language": "en", - "loc": "https://docs.langchain.com/oss/javascript/langchain/agents", - "lastmod": "2025-11-03T18:48:17.363Z" - }, - "type": "Document", - "id": null, - "lc_attributes": {}, - "lc_secrets": {}, - "model_computed_fields": {}, - "model_config": { - "extra": "ignore" - }, - "model_extra": null - }, - { - "page_content": "Context engineering in agents - Docs by LangChainSkip to main contentWe've raised a $125M Series B to build the platform for agent engineering. Read more.Docs by LangChain home pageLangChain + LangGraphSearch...⌘KAsk AIGitHubTry LangSmithTry LangSmithSearch...NavigationAdvanced usageContext engineering in agentsLangChainLangGraphIntegrationsLearnReferenceContributeTypeScriptOverviewLangChain v1.0Release notesMigration guideGet startedInstallQuickstartPhilosophyCore componentsAgentsModelsMessagesToolsShort-term memoryStreamingMiddlewareStructured outputAdvanced usageGuardrailsRuntimeContext engineeringModel Context Protocol (MCP)Human-in-the-loopMulti-agentRetrievalLong-term memoryUse in productionStudioTestDeployAgent Chat UIObservabilityEnglishcloseOn this pageOverviewWhy do agents fail?The agent loopWhat you can controlData sourcesHow it worksModel ContextSystem PromptMessagesToolsDefining toolsSelecting toolsModelResponse FormatDefining formatsSelecting formatsTool ContextReadsWritesLife-cycle ContextExample: SummarizationBest practicesRelated resourcesAdvanced usageContext engineering in agentsCopy pageCopy page​Overview\nThe hard part of building agents (or any LLM application) is making them reliable enough. While they may work for a prototype, they often fail in real-world use cases.\n​Why do agents fail?\nWhen agents fail, it’s usually because the LLM call inside the agent took the wrong action / didn’t do what we expected. LLMs fail for one of two reasons:\n\nThe underlying LLM is not capable enough\nThe “right” context was not passed to the LLM\n\nMore often than not - it’s actually the second reason that causes agents to not be reliable.\nContext engineering is providing the right information and tools in the right format so the LLM can accomplish a task. This is the number one job of AI Engineers. This lack of “right” context is the number one blocker for more reliable agents, and LangChain’s agent abstractions are uniquely designed to facilitate context engineering.\nNew to context engineering? Start with the conceptual overview to understand the different types of context and when to use them.\n​The agent loop\nA typical agent loop consists of two main steps:\n\nModel call - calls the LLM with a prompt and available tools, returns either a response or a request to execute tools\nTool execution - executes the tools that the LLM requested, returns tool results\n\nThis loop continues until the LLM decides to finish.\n​What you can control\nTo build reliable agents, you need to control what happens at each step of the agent loop, as well as what happens between steps.\n\nContext TypeWhat You ControlTransient or Persistent\nModel ContextWhat goes into model calls (instructions, message history, tools, response format)TransientTool ContextWhat tools can access and produce (reads/writes to state, store, runtime context)PersistentLife-cycle ContextWhat happens between model and tool calls (summarization, guardrails, logging, etc.)Persistent\n\nTransient contextWhat the LLM sees for a single call. You can modify messages, tools, or prompts without changing what’s saved in state.Persistent contextWhat gets saved in state across turns. Life-cycle hooks and tool writes modify this permanently.\n​Data sources\nThroughout this process, your agent accesses (reads / writes) different sources of data:\n\nData SourceAlso Known AsScopeExamples\nRuntime ContextStatic configurationConversation-scopedUser ID, API keys, database connections, permissions, environment settingsStateShort-term memoryConversation-scopedCurrent messages, uploaded files, authentication status, tool resultsStoreLong-term memoryCross-conversationUser preferences, extracted insights, memories, historical data\n\n​How it works\nLangChain middleware is the mechanism under the hood that makes context engineering practical for developers using LangChain.\nMiddleware allows you to hook into any step in the agent lifecycle and:\n\nUpdate context\nJump to a different step in the agent lifecycle", - "metadata": { - "source": "https://docs.langchain.com/oss/javascript/langchain/context-engineering", - "title": "Context engineering in agents - Docs by LangChain", - "description": "", - "language": "en", - "loc": "https://docs.langchain.com/oss/javascript/langchain/context-engineering", - "lastmod": "2025-10-31T14:01:30.939Z" - }, - "type": "Document", - "id": null, - "lc_attributes": {}, - "lc_secrets": {}, - "model_computed_fields": {}, - "model_config": { - "extra": "ignore" - }, - "model_extra": null - }, - { - "page_content": "Update context\nJump to a different step in the agent lifecycle\n\nThroughout this guide, you’ll see frequent use of the middleware API as a means to the context engineering end.\n​Model Context\nControl what goes into each model call - instructions, available tools, which model to use, and output format. These decisions directly impact reliability and cost.\nSystem PromptBase instructions from the developer to the LLM.MessagesThe full list of messages (conversation history) sent to the LLM.ToolsUtilities the agent has access to to take actions.ModelThe actual model (including configuration) to be called.Response FormatSchema specification for the model’s final response.\nAll of these types of model context can draw from state (short-term memory), store (long-term memory), or runtime context (static configuration).\n​System Prompt\nThe system prompt sets the LLM’s behavior and capabilities. Different users, contexts, or conversation stages need different instructions. Successful agents draw on memories, preferences, and configuration to provide the right instructions for the current state of the conversation.\n State Store Runtime ContextAccess message count or conversation context from state:CopyAsk AIimport { createAgent } from \"langchain\";\n\nconst agent = createAgent({\n model: \"gpt-4o\",\n tools: [...],\n middleware: [\n dynamicSystemPromptMiddleware((state) => {\n // Read from State: check conversation length\n const messageCount = state.messages.length;\n\n let base = \"You are a helpful assistant.\";\n\n if (messageCount > 10) {\n base += \"\\nThis is a long conversation - be extra concise.\";\n }\n\n return base;\n }),\n ],\n});\n\n​Messages\nMessages make up the prompt that is sent to the LLM.\nIt’s critical to manage the content of messages to ensure that the LLM has the right information to respond well.\n State Store Runtime ContextInject uploaded file context from State when relevant to current query:CopyAsk AIimport { createMiddleware } from \"langchain\";\n\nconst injectFileContext = createMiddleware({\n name: \"InjectFileContext\",\n wrapModelCall: (request, handler) => {\n // request.state is a shortcut for request.state.messages\n const uploadedFiles = request.state.uploadedFiles || []; \n\n if (uploadedFiles.length > 0) {\n // Build context about available files\n const fileDescriptions = uploadedFiles.map(file =>\n `- ${file.name} (${file.type}): ${file.summary}`\n );\n\n const fileContext = `Files you have access to in this conversation:\n${fileDescriptions.join(\"\\n\")}\n\nReference these files when answering questions.`;\n\n // Inject file context before recent messages\n const messages = [ \n ...request.messages // Rest of conversation\n { role: \"user\", content: fileContext }\n ];\n request = request.override({ messages }); \n }\n\n return handler(request);\n },\n});\n\nconst agent = createAgent({\n model: \"gpt-4o\",\n tools: [...],\n middleware: [injectFileContext],\n});\n\nTransient vs Persistent Message Updates:The examples above use wrap_model_call to make transient updates - modifying what messages are sent to the model for a single call without changing what’s saved in state.For persistent updates that modify state (like the summarization example in Life-cycle Context), use life-cycle hooks like before_model or after_model to permanently update the conversation history. See the middleware documentation for more details.\n​Tools\nTools let the model interact with databases, APIs, and external systems. How you define and select tools directly impacts whether the model can complete tasks effectively.\n​Defining tools\nEach tool needs a clear name, description, argument names, and argument descriptions. These aren’t just metadata—they guide the model’s reasoning about when and how to use the tool.\nCopyAsk AIimport { tool } from \"@langchain/core/tools\";\nimport { z } from \"zod\";", - "metadata": { - "source": "https://docs.langchain.com/oss/javascript/langchain/context-engineering", - "title": "Context engineering in agents - Docs by LangChain", - "description": "", - "language": "en", - "loc": "https://docs.langchain.com/oss/javascript/langchain/context-engineering", - "lastmod": "2025-10-31T14:01:30.939Z" - }, - "type": "Document", - "id": null, - "lc_attributes": {}, - "lc_secrets": {}, - "model_computed_fields": {}, - "model_config": { - "extra": "ignore" - }, - "model_extra": null - }, - { - "page_content": "const searchOrders = tool(\n async ({ userId, status, limit = 10 }) => {\n // Implementation here\n },\n {\n name: \"search_orders\",\n description: `Search for user orders by status.\n\n Use this when the user asks about order history or wants to check\n order status. Always filter by the provided status.`,\n schema: z.object({\n userId: z.string().describe(\"Unique identifier for the user\"),\n status: z.enum([\"pending\", \"shipped\", \"delivered\"]).describe(\"Order status to filter by\"),\n limit: z.number().default(10).describe(\"Maximum number of results to return\"),\n }),\n }\n);\n\n​Selecting tools\nNot every tool is appropriate for every situation. Too many tools may overwhelm the model (overload context) and increase errors; too few limit capabilities. Dynamic tool selection adapts the available toolset based on authentication state, user permissions, feature flags, or conversation stage.\n State Store Runtime ContextEnable advanced tools only after certain conversation milestones:CopyAsk AIimport { createMiddleware } from \"langchain\";\n\nconst stateBasedTools = createMiddleware({\n name: \"StateBasedTools\",\n wrapModelCall: (request, handler) => {\n // Read from State: check authentication and conversation length\n const state = request.state; \n const isAuthenticated = state.authenticated || false; \n const messageCount = state.messages.length;\n\n let filteredTools = request.tools;\n\n // Only enable sensitive tools after authentication\n if (!isAuthenticated) {\n filteredTools = request.tools.filter(t => t.name.startsWith(\"public_\")); \n } else if (messageCount < 5) {\n filteredTools = request.tools.filter(t => t.name !== \"advanced_search\"); \n }\n\n return handler({ ...request, tools: filteredTools }); \n },\n});\n\nSee Dynamically selecting tools for more examples.\n​Model\nDifferent models have different strengths, costs, and context windows. Select the right model for the task at hand, which\nmight change during an agent run.\n State Store Runtime ContextUse different models based on conversation length from State:CopyAsk AIimport { createMiddleware, initChatModel } from \"langchain\";\n\n// Initialize models once outside the middleware\nconst largeModel = initChatModel(\"claude-sonnet-4-5-20250929\");\nconst standardModel = initChatModel(\"gpt-4o\");\nconst efficientModel = initChatModel(\"gpt-4o-mini\");\n\nconst stateBasedModel = createMiddleware({\n name: \"StateBasedModel\",\n wrapModelCall: (request, handler) => {\n // request.messages is a shortcut for request.state.messages\n const messageCount = request.messages.length; \n let model;\n\n if (messageCount > 20) {\n model = largeModel;\n } else if (messageCount > 10) {\n model = standardModel;\n } else {\n model = efficientModel;\n }\n\n return handler({ ...request, model }); \n },\n});\n\nSee Dynamic model for more examples.\n​Response Format\nStructured output transforms unstructured text into validated, structured data. When extracting specific fields or returning data for downstream systems, free-form text isn’t sufficient.\nHow it works: When you provide a schema as the response format, the model’s final response is guaranteed to conform to that schema. The agent runs the model / tool calling loop until the model is done calling tools, then the final response is coerced into the provided format.\n​Defining formats\nSchema definitions guide the model. Field names, types, and descriptions specify exactly what format the output should adhere to.\nCopyAsk AIimport { z } from \"zod\";", - "metadata": { - "source": "https://docs.langchain.com/oss/javascript/langchain/context-engineering", - "title": "Context engineering in agents - Docs by LangChain", - "description": "", - "language": "en", - "loc": "https://docs.langchain.com/oss/javascript/langchain/context-engineering", - "lastmod": "2025-10-31T14:01:30.939Z" - }, - "type": "Document", - "id": null, - "lc_attributes": {}, - "lc_secrets": {}, - "model_computed_fields": {}, - "model_config": { - "extra": "ignore" - }, - "model_extra": null - }, - { - "page_content": "const customerSupportTicket = z.object({\n category: z.enum([\"billing\", \"technical\", \"account\", \"product\"]).describe(\n \"Issue category\"\n ),\n priority: z.enum([\"low\", \"medium\", \"high\", \"critical\"]).describe(\n \"Urgency level\"\n ),\n summary: z.string().describe(\n \"One-sentence summary of the customer's issue\"\n ),\n customerSentiment: z.enum([\"frustrated\", \"neutral\", \"satisfied\"]).describe(\n \"Customer's emotional tone\"\n ),\n}).describe(\"Structured ticket information extracted from customer message\");\n\n​Selecting formats\nDynamic response format selection adapts schemas based on user preferences, conversation stage, or role—returning simple formats early and detailed formats as complexity increases.\n State Store Runtime ContextConfigure structured output based on conversation state:CopyAsk AIimport { createMiddleware } from \"langchain\";\nimport { z } from \"zod\";\n\nconst simpleResponse = z.object({\n answer: z.string().describe(\"A brief answer\"),\n});\n\nconst detailedResponse = z.object({\n answer: z.string().describe(\"A detailed answer\"),\n reasoning: z.string().describe(\"Explanation of reasoning\"),\n confidence: z.number().describe(\"Confidence score 0-1\"),\n});\n\nconst stateBasedOutput = createMiddleware({\n name: \"StateBasedOutput\",\n wrapModelCall: (request, handler) => {\n // request.state is a shortcut for request.state.messages\n const messageCount = request.messages.length; \n\n if (messageCount < 3) {\n // Early conversation - use simple format\n responseFormat = simpleResponse; \n } else {\n // Established conversation - use detailed format\n responseFormat = detailedResponse; \n }\n\n return handler({ ...request, responseFormat });\n },\n});\n\n​Tool Context\nTools are special in that they both read and write context.\nIn the most basic case, when a tool executes, it receives the LLM’s request parameters and returns a tool message back. The tool does its work and produces a result.\nTools can also fetch important information for the model that allows it to perform and complete tasks.\n​Reads\nMost real-world tools need more than just the LLM’s parameters. They need user IDs for database queries, API keys for external services, or current session state to make decisions. Tools read from state, store, and runtime context to access this information.\n State Store Runtime ContextRead from State to check current session information:CopyAsk AIimport * as z from \"zod\";\nimport { tool } from \"@langchain/core/tools\";\nimport { createAgent } from \"langchain\";\n\nconst checkAuthentication = tool(\n async (_, { runtime }) => {\n // Read from State: check current auth status\n const currentState = runtime.state;\n const isAuthenticated = currentState.authenticated || false;\n\n if (isAuthenticated) {\n return \"User is authenticated\";\n } else {\n return \"User is not authenticated\";\n }\n },\n {\n name: \"check_authentication\",\n description: \"Check if user is authenticated\",\n schema: z.object({}),\n }\n);\n\n​Writes\nTool results can be used to help an agent complete a given task. Tools can both return results directly to the model\nand update the memory of the agent to make important context available to future steps.\n State StoreWrite to State to track session-specific information using Command:CopyAsk AIimport * as z from \"zod\";\nimport { tool } from \"@langchain/core/tools\";\nimport { createAgent } from \"langchain\";\nimport { Command } from \"@langchain/langgraph\";\n\nconst authenticateUser = tool(\n async ({ password }, { runtime }) => {\n // Perform authentication\n if (password === \"correct\") {\n // Write to State: mark as authenticated using Command\n return new Command({\n update: { authenticated: true },\n });\n } else {\n return new Command({ update: { authenticated: false } });\n }\n },\n {\n name: \"authenticate_user\",\n description: \"Authenticate user and update State\",\n schema: z.object({\n password: z.string(),\n }),\n }\n);", - "metadata": { - "source": "https://docs.langchain.com/oss/javascript/langchain/context-engineering", - "title": "Context engineering in agents - Docs by LangChain", - "description": "", - "language": "en", - "loc": "https://docs.langchain.com/oss/javascript/langchain/context-engineering", - "lastmod": "2025-10-31T14:01:30.939Z" - }, - "type": "Document", - "id": null, - "lc_attributes": {}, - "lc_secrets": {}, - "model_computed_fields": {}, - "model_config": { - "extra": "ignore" - }, - "model_extra": null - }, - { - "page_content": "See Tools for comprehensive examples of accessing state, store, and runtime context in tools.\n​Life-cycle Context\nControl what happens between the core agent steps - intercepting data flow to implement cross-cutting concerns like summarization, guardrails, and logging.\nAs you’ve seen in Model Context and Tool Context, middleware is the mechanism that makes context engineering practical. Middleware allows you to hook into any step in the agent lifecycle and either:\n\nUpdate context - Modify state and store to persist changes, update conversation history, or save insights\nJump in the lifecycle - Move to different steps in the agent cycle based on context (e.g., skip tool execution if a condition is met, repeat model call with modified context)\n\n​Example: Summarization\nOne of the most common life-cycle patterns is automatically condensing conversation history when it gets too long. Unlike the transient message trimming shown in Model Context, summarization persistently updates state - permanently replacing old messages with a summary that’s saved for all future turns.\nLangChain offers built-in middleware for this:\nCopyAsk AIimport { createAgent, summarizationMiddleware } from \"langchain\";\n\nconst agent = createAgent({\n model: \"gpt-4o\",\n tools: [...],\n middleware: [\n summarizationMiddleware({\n model: \"gpt-4o-mini\",\n maxTokensBeforeSummary: 4000, // Trigger summarization at 4000 tokens\n messagesToKeep: 20, // Keep last 20 messages after summary\n }),\n ],\n});\n\nWhen the conversation exceeds the token limit, SummarizationMiddleware automatically:\n\nSummarizes older messages using a separate LLM call\nReplaces them with a summary message in State (permanently)\nKeeps recent messages intact for context\n\nThe summarized conversation history is permanently updated - future turns will see the summary instead of the original messages.\nFor a complete list of built-in middleware, available hooks, and how to create custom middleware, see the Middleware documentation.\n​Best practices\n\nStart simple - Begin with static prompts and tools, add dynamics only when needed\nTest incrementally - Add one context engineering feature at a time\nMonitor performance - Track model calls, token usage, and latency\nUse built-in middleware - Leverage SummarizationMiddleware, LLMToolSelectorMiddleware, etc.\nDocument your context strategy - Make it clear what context is being passed and why\nUnderstand transient vs persistent: Model context changes are transient (per-call), while life-cycle context changes persist to state\n\n​Related resources\n\nContext conceptual overview - Understand context types and when to use them\nMiddleware - Complete middleware guide\nTools - Tool creation and context access\nMemory - Short-term and long-term memory patterns\nAgents - Core agent concepts\n\nEdit the source of this page on GitHub.\nConnect these docs programmatically to Claude, VSCode, and more via MCP for real-time answers.Was this page helpful?YesNoRuntimePreviousModel Context Protocol (MCP)NextDocs by LangChain home pagegithubxlinkedinyoutubeResourcesForumChangelogLangChain AcademyTrust CenterCompanyAboutCareersBloggithubxlinkedinyoutubePowered by Mintlify", - "metadata": { - "source": "https://docs.langchain.com/oss/javascript/langchain/context-engineering", - "title": "Context engineering in agents - Docs by LangChain", - "description": "", - "language": "en", - "loc": "https://docs.langchain.com/oss/javascript/langchain/context-engineering", - "lastmod": "2025-10-31T14:01:30.939Z" - }, - "type": "Document", - "id": null, - "lc_attributes": {}, - "lc_secrets": {}, - "model_computed_fields": {}, - "model_config": { - "extra": "ignore" - }, - "model_extra": null - }, - { - "page_content": "Model Context Protocol (MCP) - Docs by LangChainSkip to main contentWe've raised a $125M Series B to build the platform for agent engineering. Read more.Docs by LangChain home pageLangChain + LangGraphSearch...⌘KAsk AIGitHubTry LangSmithTry LangSmithSearch...NavigationAdvanced usageModel Context Protocol (MCP)LangChainLangGraphIntegrationsLearnReferenceContributeTypeScriptOverviewLangChain v1.0Release notesMigration guideGet startedInstallQuickstartPhilosophyCore componentsAgentsModelsMessagesToolsShort-term memoryStreamingMiddlewareStructured outputAdvanced usageGuardrailsRuntimeContext engineeringModel Context Protocol (MCP)Human-in-the-loopMulti-agentRetrievalLong-term memoryUse in productionStudioTestDeployAgent Chat UIObservabilityEnglishcloseOn this pageInstallTransport typesUse MCP toolsCustom MCP serversStateful tool usageAdditional resourcesAdvanced usageModel Context Protocol (MCP)Copy pageCopy pageModel Context Protocol (MCP) is an open protocol that standardizes how applications provide tools and context to LLMs. LangChain agents can use tools defined on MCP servers using the langchain-mcp-adapters library.\n​Install\nInstall the @langchain/mcp-adapters library to use MCP tools in LangGraph:\nnpmpnpmyarnbunCopyAsk AInpm install @langchain/mcp-adapters\n\n​Transport types\nMCP supports different transport mechanisms for client-server communication:\n\nstdio – Client launches server as a subprocess and communicates via standard input/output. Best for local tools and simple setups.\nStreamable HTTP – Server runs as an independent process handling HTTP requests. Supports remote connections and multiple clients.\nServer-Sent Events (SSE) – a variant of streamable HTTP optimized for real-time streaming communication.\n\n​Use MCP tools\n@langchain/mcp-adapters enables agents to use tools defined across one or more MCP server.\nAccessing multiple MCP serversCopyAsk AIimport { MultiServerMCPClient } from \"@langchain/mcp-adapters\"; \nimport { ChatAnthropic } from \"@langchain/anthropic\";\nimport { createAgent } from \"langchain\";\n\nconst client = new MultiServerMCPClient({ \n math: {\n transport: \"stdio\", // Local subprocess communication\n command: \"node\",\n // Replace with absolute path to your math_server.js file\n args: [\"/path/to/math_server.js\"],\n },\n weather: {\n transport: \"sse\", // Server-Sent Events for streaming\n // Ensure you start your weather server on port 8000\n url: \"http://localhost:8000/mcp\",\n },\n});\n\nconst tools = await client.getTools(); \nconst agent = createAgent({\n model: \"claude-sonnet-4-5-20250929\",\n tools, \n});\n\nconst mathResponse = await agent.invoke({\n messages: [{ role: \"user\", content: \"what's (3 + 5) x 12?\" }],\n});\n\nconst weatherResponse = await agent.invoke({\n messages: [{ role: \"user\", content: \"what is the weather in nyc?\" }],\n});\n\nMultiServerMCPClient is stateless by default. Each tool invocation creates a fresh MCP ClientSession, executes the tool, and then cleans up.\n​Custom MCP servers\nTo create your own MCP servers, you can use the @modelcontextprotocol/sdk library. This library provides a simple way to define tools and run them as servers.\nnpmpnpmyarnbunCopyAsk AInpm install @modelcontextprotocol/sdk\n\nUse the following reference implementations to test your agent with MCP tool servers.\nMath server (stdio transport)CopyAsk AIimport { Server } from \"@modelcontextprotocol/sdk/server/index.js\";\nimport { StdioServerTransport } from \"@modelcontextprotocol/sdk/server/stdio.js\";\nimport {\n CallToolRequestSchema,\n ListToolsRequestSchema,\n} from \"@modelcontextprotocol/sdk/types.js\";\n\nconst server = new Server(\n {\n name: \"math-server\",\n version: \"0.1.0\",\n },\n {\n capabilities: {\n tools: {},\n },\n }\n);", - "metadata": { - "source": "https://docs.langchain.com/oss/javascript/langchain/mcp", - "title": "Model Context Protocol (MCP) - Docs by LangChain", - "description": "", - "language": "en", - "loc": "https://docs.langchain.com/oss/javascript/langchain/mcp", - "lastmod": "2025-10-31T14:01:30.957Z" - }, - "type": "Document", - "id": null, - "lc_attributes": {}, - "lc_secrets": {}, - "model_computed_fields": {}, - "model_config": { - "extra": "ignore" - }, - "model_extra": null - }, - { - "page_content": "const server = new Server(\n {\n name: \"math-server\",\n version: \"0.1.0\",\n },\n {\n capabilities: {\n tools: {},\n },\n }\n);\n\nserver.setRequestHandler(ListToolsRequestSchema, async () => {\n return {\n tools: [\n {\n name: \"add\",\n description: \"Add two numbers\",\n inputSchema: {\n type: \"object\",\n properties: {\n a: {\n type: \"number\",\n description: \"First number\",\n },\n b: {\n type: \"number\",\n description: \"Second number\",\n },\n },\n required: [\"a\", \"b\"],\n },\n },\n {\n name: \"multiply\",\n description: \"Multiply two numbers\",\n inputSchema: {\n type: \"object\",\n properties: {\n a: {\n type: \"number\",\n description: \"First number\",\n },\n b: {\n type: \"number\",\n description: \"Second number\",\n },\n },\n required: [\"a\", \"b\"],\n },\n },\n ],\n };\n});\n\nserver.setRequestHandler(CallToolRequestSchema, async (request) => {\n switch (request.params.name) {\n case \"add\": {\n const { a, b } = request.params.arguments as { a: number; b: number };\n return {\n content: [\n {\n type: \"text\",\n text: String(a + b),\n },\n ],\n };\n }\n case \"multiply\": {\n const { a, b } = request.params.arguments as { a: number; b: number };\n return {\n content: [\n {\n type: \"text\",\n text: String(a * b),\n },\n ],\n };\n }\n default:\n throw new Error(`Unknown tool: ${request.params.name}`);\n }\n});\n\nasync function main() {\n const transport = new StdioServerTransport();\n await server.connect(transport);\n console.error(\"Math MCP server running on stdio\");\n}\n\nmain();\n\nWeather server (SSE transport)CopyAsk AIimport { Server } from \"@modelcontextprotocol/sdk/server/index.js\";\nimport { SSEServerTransport } from \"@modelcontextprotocol/sdk/server/sse.js\";\nimport {\n CallToolRequestSchema,\n ListToolsRequestSchema,\n} from \"@modelcontextprotocol/sdk/types.js\";\nimport express from \"express\";\n\nconst app = express();\napp.use(express.json());\n\nconst server = new Server(\n {\n name: \"weather-server\",\n version: \"0.1.0\",\n },\n {\n capabilities: {\n tools: {},\n },\n }\n);\n\nserver.setRequestHandler(ListToolsRequestSchema, async () => {\n return {\n tools: [\n {\n name: \"get_weather\",\n description: \"Get weather for location\",\n inputSchema: {\n type: \"object\",\n properties: {\n location: {\n type: \"string\",\n description: \"Location to get weather for\",\n },\n },\n required: [\"location\"],\n },\n },\n ],\n };\n});\n\nserver.setRequestHandler(CallToolRequestSchema, async (request) => {\n switch (request.params.name) {\n case \"get_weather\": {\n const { location } = request.params.arguments as { location: string };\n return {\n content: [\n {\n type: \"text\",\n text: `It's always sunny in ${location}`,\n },\n ],\n };\n }\n default:\n throw new Error(`Unknown tool: ${request.params.name}`);\n }\n});\n\napp.post(\"/mcp\", async (req, res) => {\n const transport = new SSEServerTransport(\"/mcp\", res);\n await server.connect(transport);\n});\n\nconst PORT = process.env.PORT || 8000;\napp.listen(PORT, () => {\n console.log(`Weather MCP server running on port ${PORT}`);\n});", - "metadata": { - "source": "https://docs.langchain.com/oss/javascript/langchain/mcp", - "title": "Model Context Protocol (MCP) - Docs by LangChain", - "description": "", - "language": "en", - "loc": "https://docs.langchain.com/oss/javascript/langchain/mcp", - "lastmod": "2025-10-31T14:01:30.957Z" - }, - "type": "Document", - "id": null, - "lc_attributes": {}, - "lc_secrets": {}, - "model_computed_fields": {}, - "model_config": { - "extra": "ignore" - }, - "model_extra": null - }, - { - "page_content": "const PORT = process.env.PORT || 8000;\napp.listen(PORT, () => {\n console.log(`Weather MCP server running on port ${PORT}`);\n});\n\n​Stateful tool usage\nFor stateful servers that maintain context between tool calls, use client.session() to create a persistent ClientSession.\nUsing MCP ClientSession for stateful tool usageCopyAsk AIimport { loadMCPTools } from \"@langchain/mcp-adapters/tools.js\";\n\nconst client = new MultiServerMCPClient({...});\nconst session = await client.session(\"math\");\nconst tools = await loadMCPTools(session);\n\n​Additional resources\n\nMCP documentation\n\nMCP Transport documentation\n\nlangchain-mcp-adapters\n\nEdit the source of this page on GitHub.\nConnect these docs programmatically to Claude, VSCode, and more via MCP for real-time answers.Was this page helpful?YesNoContext engineering in agentsPreviousHuman-in-the-loopNextDocs by LangChain home pagegithubxlinkedinyoutubeResourcesForumChangelogLangChain AcademyTrust CenterCompanyAboutCareersBloggithubxlinkedinyoutubePowered by Mintlify", - "metadata": { - "source": "https://docs.langchain.com/oss/javascript/langchain/mcp", - "title": "Model Context Protocol (MCP) - Docs by LangChain", - "description": "", - "language": "en", - "loc": "https://docs.langchain.com/oss/javascript/langchain/mcp", - "lastmod": "2025-10-31T14:01:30.957Z" - }, - "type": "Document", - "id": null, - "lc_attributes": {}, - "lc_secrets": {}, - "model_computed_fields": {}, - "model_config": { - "extra": "ignore" - }, - "model_extra": null - } -] \ No newline at end of file diff --git a/dataset/raw_docs.json b/dataset/raw_docs.json deleted file mode 100644 index a4ac34d5e..000000000 --- a/dataset/raw_docs.json +++ /dev/null @@ -1,38 +0,0 @@ -[ - { - "page_content": "Agents - Docs by LangChainSkip to main contentWe've raised a $125M Series B to build the platform for agent engineering. Read more.Docs by LangChain home pageLangChain + LangGraphSearch...⌘KAsk AIGitHubTry LangSmithTry LangSmithSearch...NavigationCore componentsAgentsLangChainLangGraphIntegrationsLearnReferenceContributeTypeScriptOverviewLangChain v1.0Release notesMigration guideGet startedInstallQuickstartPhilosophyCore componentsAgentsModelsMessagesToolsShort-term memoryStreamingMiddlewareStructured outputAdvanced usageGuardrailsRuntimeContext engineeringModel Context Protocol (MCP)Human-in-the-loopMulti-agentRetrievalLong-term memoryUse in productionStudioTestDeployAgent Chat UIObservabilityEnglishcloseOn this pageCore componentsModelStatic modelDynamic modelToolsDefining toolsTool error handlingTool use in the ReAct loopSystem promptDynamic system promptInvocationAdvanced conceptsStructured outputMemoryStreamingMiddlewareCore componentsAgentsCopy pageCopy pageAgents combine language models with tools to create systems that can reason about tasks, decide which tools to use, and iteratively work towards solutions.\ncreateAgent() provides a production-ready agent implementation.\nAn LLM Agent runs tools in a loop to achieve a goal.\nAn agent runs until a stop condition is met - i.e., when the model emits a final output or an iteration limit is reached.\n\ncreateAgent() builds a graph-based agent runtime using LangGraph. A graph consists of nodes (steps) and edges (connections) that define how your agent processes information. The agent moves through this graph, executing nodes like the model node (which calls the model), the tools node (which executes tools), or middleware.Learn more about the Graph API.\n​Core components\n​Model\nThe model is the reasoning engine of your agent. It can be specified in multiple ways, supporting both static and dynamic model selection.\n​Static model\nStatic models are configured once when creating the agent and remain unchanged throughout execution. This is the most common and straightforward approach.\nTo initialize a static model from a model identifier string:\nCopyAsk AIimport { createAgent } from \"langchain\";\n\nconst agent = createAgent({\n model: \"gpt-5\",\n tools: []\n});\n\nModel identifier strings use the format provider:model (e.g. \"openai:gpt-5\"). You may want more control over the model configuration, in which case you can initialize a model instance directly using the provider package:\nCopyAsk AIimport { createAgent } from \"langchain\";\nimport { ChatOpenAI } from \"@langchain/openai\";\n\nconst model = new ChatOpenAI({\n model: \"gpt-4o\",\n temperature: 0.1,\n maxTokens: 1000,\n timeout: 30\n});\n\nconst agent = createAgent({\n model,\n tools: []\n});\n\nModel instances give you complete control over configuration. Use them when you need to set specific parameters like temperature, max_tokens, timeouts, or configure API keys, base_url, and other provider-specific settings. Refer to the API reference to see available params and methods on your model.\n​Dynamic model\nDynamic models are selected at runtime based on the current state and context. This enables sophisticated routing logic and cost optimization.\nTo use a dynamic model, create middleware with wrapModelCall that modifies the model in the request:\nCopyAsk AIimport { ChatOpenAI } from \"@langchain/openai\";\nimport { createAgent, createMiddleware } from \"langchain\";\n\nconst basicModel = new ChatOpenAI({ model: \"gpt-4o-mini\" });\nconst advancedModel = new ChatOpenAI({ model: \"gpt-4o\" });\n\nconst dynamicModelSelection = createMiddleware({\n name: \"DynamicModelSelection\",\n wrapModelCall: (request, handler) => {\n // Choose model based on conversation complexity\n const messageCount = request.messages.length;\n\n return handler({\n ...request,\n model: messageCount > 10 ? advancedModel : basicModel,\n });\n },\n});\n\nconst agent = createAgent({\n model: \"gpt-4o-mini\", // Base model (used when messageCount ≤ 10)\n tools,\n middleware: [dynamicModelSelection] as const,\n});\n\nFor more details on middleware and advanced patterns, see the middleware documentation.\nFor model configuration details, see Models. For dynamic model selection patterns, see Dynamic model in middleware.\n​Tools\nTools give agents the ability to take actions. Agents go beyond simple model-only tool binding by facilitating:\n\nMultiple tool calls in sequence (triggered by a single prompt)\nParallel tool calls when appropriate\nDynamic tool selection based on previous results\nTool retry logic and error handling\nState persistence across tool calls\n\nFor more information, see Tools.\n​Defining tools\nPass a list of tools to the agent.\nCopyAsk AIimport * as z from \"zod\";\nimport { createAgent, tool } from \"langchain\";\n\nconst search = tool(\n ({ query }) => `Results for: ${query}`,\n {\n name: \"search\",\n description: \"Search for information\",\n schema: z.object({\n query: z.string().describe(\"The query to search for\"),\n }),\n }\n);\n\nconst getWeather = tool(\n ({ location }) => `Weather in ${location}: Sunny, 72°F`,\n {\n name: \"get_weather\",\n description: \"Get weather information for a location\",\n schema: z.object({\n location: z.string().describe(\"The location to get weather for\"),\n }),\n }\n);\n\nconst agent = createAgent({\n model: \"gpt-4o\",\n tools: [search, getWeather],\n});\n\nIf an empty tool list is provided, the agent will consist of a single LLM node without tool-calling capabilities.\n​Tool error handling\nTo customize how tool errors are handled, use the wrapToolCall hook in a custom middleware:\nCopyAsk AIimport { createAgent, createMiddleware, ToolMessage } from \"langchain\";\n\nconst handleToolErrors = createMiddleware({\n name: \"HandleToolErrors\",\n wrapToolCall: (request, handler) => {\n try {\n return handler(request);\n } catch (error) {\n // Return a custom error message to the model\n return new ToolMessage({\n content: `Tool error: Please check your input and try again. (${error})`,\n tool_call_id: request.toolCall.id!,\n });\n }\n },\n});\n\nconst agent = createAgent({\n model: \"gpt-4o\",\n tools: [\n /* ... */\n ],\n middleware: [handleToolErrors] as const,\n});\n\nThe agent will return a @[ToolMessage] with the custom error message when a tool fails.\n​Tool use in the ReAct loop\nAgents follow the ReAct (“Reasoning + Acting”) pattern, alternating between brief reasoning steps with targeted tool calls and feeding the resulting observations into subsequent decisions until they can deliver a final answer.\nExample of ReAct loopPrompt: Identify the current most popular wireless headphones and verify availability.CopyAsk AI================================ Human Message =================================\n\nFind the most popular wireless headphones right now and check if they're in stock\n\nReasoning: “Popularity is time-sensitive, I need to use the provided search tool.”\nActing: Call search_products(\"wireless headphones\")\nCopyAsk AI================================== Ai Message ==================================\nTool Calls:\n search_products (call_abc123)\n Call ID: call_abc123\n Args:\n query: wireless headphones\nCopyAsk AI================================= Tool Message =================================\n\nFound 5 products matching \"wireless headphones\". Top 5 results: WH-1000XM5, ...\n\nReasoning: “I need to confirm availability for the top-ranked item before answering.”\nActing: Call check_inventory(\"WH-1000XM5\")\nCopyAsk AI================================== Ai Message ==================================\nTool Calls:\n check_inventory (call_def456)\n Call ID: call_def456\n Args:\n product_id: WH-1000XM5\nCopyAsk AI================================= Tool Message =================================\n\nProduct WH-1000XM5: 10 units in stock\n\nReasoning: “I have the most popular model and its stock status. I can now answer the user’s question.”\nActing: Produce final answer\nCopyAsk AI================================== Ai Message ==================================\n\nI found wireless headphones (model WH-1000XM5) with 10 units in stock...\n\nTo learn more about tools, see Tools.\n​System prompt\nYou can shape how your agent approaches tasks by providing a prompt. The systemPrompt parameter can be provided as a string:\nCopyAsk AIconst agent = createAgent({\n model,\n tools,\n systemPrompt: \"You are a helpful assistant. Be concise and accurate.\",\n});\n\nWhen no @[system_prompt] is provided, the agent will infer its task from the messages directly.\n​Dynamic system prompt\nFor more advanced use cases where you need to modify the system prompt based on runtime context or agent state, you can use middleware.\nCopyAsk AIimport * as z from \"zod\";\nimport { createAgent, dynamicSystemPromptMiddleware } from \"langchain\";\n\nconst contextSchema = z.object({\n userRole: z.enum([\"expert\", \"beginner\"]),\n});\n\nconst agent = createAgent({\n model: \"gpt-4o\",\n tools: [/* ... */],\n contextSchema,\n middleware: [\n dynamicSystemPromptMiddleware<z.infer<typeof contextSchema>>((state, runtime) => {\n const userRole = runtime.context.userRole || \"user\";\n const basePrompt = \"You are a helpful assistant.\";\n\n if (userRole === \"expert\") {\n return `${basePrompt} Provide detailed technical responses.`;\n } else if (userRole === \"beginner\") {\n return `${basePrompt} Explain concepts simply and avoid jargon.`;\n }\n return basePrompt;\n }),\n ],\n});\n\n// The system prompt will be set dynamically based on context\nconst result = await agent.invoke(\n { messages: [{ role: \"user\", content: \"Explain machine learning\" }] },\n { context: { userRole: \"expert\" } }\n);\n\nFor more details on message types and formatting, see Messages. For comprehensive middleware documentation, see Middleware.\n​Invocation\nYou can invoke an agent by passing an update to its State. All agents include a sequence of messages in their state; to invoke the agent, pass a new message:\nCopyAsk AIawait agent.invoke({\n messages: [{ role: \"user\", content: \"What's the weather in San Francisco?\" }],\n})\n\nFor streaming steps and / or tokens from the agent, refer to the streaming guide.\nOtherwise, the agent follows the LangGraph Graph API and supports all associated methods.\n​Advanced concepts\n​Structured output\nIn some situations, you may want the agent to return an output in a specific format. LangChain provides a simple, universal way to do this with the responseFormat parameter.\nCopyAsk AIimport * as z from \"zod\";\nimport { createAgent } from \"langchain\";\n\nconst ContactInfo = z.object({\n name: z.string(),\n email: z.string(),\n phone: z.string(),\n});\n\nconst agent = createAgent({\n model: \"gpt-4o\",\n responseFormat: ContactInfo,\n});\n\nconst result = await agent.invoke({\n messages: [\n {\n role: \"user\",\n content: \"Extract contact info from: John Doe, john@example.com, (555) 123-4567\",\n },\n ],\n});\n\nconsole.log(result.structuredResponse);\n// {\n// name: 'John Doe',\n// email: 'john@example.com',\n// phone: '(555) 123-4567'\n// }\n\nTo learn about structured output, see Structured output.\n​Memory\nAgents maintain conversation history automatically through the message state. You can also configure the agent to use a custom state schema to remember additional information during the conversation.\nInformation stored in the state can be thought of as the short-term memory of the agent:\nCopyAsk AIimport * as z from \"zod\";\nimport { MessagesZodState } from \"@langchain/langgraph\";\nimport { createAgent, type BaseMessage } from \"langchain\";\n\nconst customAgentState = z.object({\n messages: MessagesZodState.shape.messages,\n userPreferences: z.record(z.string(), z.string()),\n});\n\nconst CustomAgentState = createAgent({\n model: \"gpt-4o\",\n tools: [],\n stateSchema: customAgentState,\n});\n\nTo learn more about memory, see Memory. For information on implementing long-term memory that persists across sessions, see Long-term memory.\n​Streaming\nWe’ve seen how the agent can be called with invoke to get a final response. If the agent executes multiple steps, this may take a while. To show intermediate progress, we can stream back messages as they occur.\nCopyAsk AIconst stream = await agent.stream(\n {\n messages: [{\n role: \"user\",\n content: \"Search for AI news and summarize the findings\"\n }],\n },\n { streamMode: \"values\" }\n);\n\nfor await (const chunk of stream) {\n // Each chunk contains the full state at that point\n const latestMessage = chunk.messages.at(-1);\n if (latestMessage?.content) {\n console.log(`Agent: ${latestMessage.content}`);\n } else if (latestMessage?.tool_calls) {\n const toolCallNames = latestMessage.tool_calls.map((tc) => tc.name);\n console.log(`Calling tools: ${toolCallNames.join(\", \")}`);\n }\n}\n\nFor more details on streaming, see Streaming.\n​Middleware\nMiddleware provides powerful extensibility for customizing agent behavior at different stages of execution. You can use middleware to:\n\nProcess state before the model is called (e.g., message trimming, context injection)\nModify or validate the model’s response (e.g., guardrails, content filtering)\nHandle tool execution errors with custom logic\nImplement dynamic model selection based on state or context\nAdd custom logging, monitoring, or analytics\n\nMiddleware integrates seamlessly into the agent’s execution graph, allowing you to intercept and modify data flow at key points without changing the core agent logic.\nFor comprehensive middleware documentation including hooks like beforeModel, afterModel, and wrapToolCall, see Middleware.\n\nEdit the source of this page on GitHub.\nConnect these docs programmatically to Claude, VSCode, and more via MCP for real-time answers.Was this page helpful?YesNoPhilosophyPreviousModelsNextDocs by LangChain home pagegithubxlinkedinyoutubeResourcesForumChangelogLangChain AcademyTrust CenterCompanyAboutCareersBloggithubxlinkedinyoutubePowered by Mintlify", - "metadata": { - "source": "https://docs.langchain.com/oss/javascript/langchain/agents", - "title": "Agents - Docs by LangChain", - "description": "", - "language": "en", - "loc": "https://docs.langchain.com/oss/javascript/langchain/agents", - "lastmod": "2025-11-03T18:48:17.363Z" - }, - "type": "Document" - }, - { - "page_content": "Context engineering in agents - Docs by LangChainSkip to main contentWe've raised a $125M Series B to build the platform for agent engineering. Read more.Docs by LangChain home pageLangChain + LangGraphSearch...⌘KAsk AIGitHubTry LangSmithTry LangSmithSearch...NavigationAdvanced usageContext engineering in agentsLangChainLangGraphIntegrationsLearnReferenceContributeTypeScriptOverviewLangChain v1.0Release notesMigration guideGet startedInstallQuickstartPhilosophyCore componentsAgentsModelsMessagesToolsShort-term memoryStreamingMiddlewareStructured outputAdvanced usageGuardrailsRuntimeContext engineeringModel Context Protocol (MCP)Human-in-the-loopMulti-agentRetrievalLong-term memoryUse in productionStudioTestDeployAgent Chat UIObservabilityEnglishcloseOn this pageOverviewWhy do agents fail?The agent loopWhat you can controlData sourcesHow it worksModel ContextSystem PromptMessagesToolsDefining toolsSelecting toolsModelResponse FormatDefining formatsSelecting formatsTool ContextReadsWritesLife-cycle ContextExample: SummarizationBest practicesRelated resourcesAdvanced usageContext engineering in agentsCopy pageCopy page​Overview\nThe hard part of building agents (or any LLM application) is making them reliable enough. While they may work for a prototype, they often fail in real-world use cases.\n​Why do agents fail?\nWhen agents fail, it’s usually because the LLM call inside the agent took the wrong action / didn’t do what we expected. LLMs fail for one of two reasons:\n\nThe underlying LLM is not capable enough\nThe “right” context was not passed to the LLM\n\nMore often than not - it’s actually the second reason that causes agents to not be reliable.\nContext engineering is providing the right information and tools in the right format so the LLM can accomplish a task. This is the number one job of AI Engineers. This lack of “right” context is the number one blocker for more reliable agents, and LangChain’s agent abstractions are uniquely designed to facilitate context engineering.\nNew to context engineering? Start with the conceptual overview to understand the different types of context and when to use them.\n​The agent loop\nA typical agent loop consists of two main steps:\n\nModel call - calls the LLM with a prompt and available tools, returns either a response or a request to execute tools\nTool execution - executes the tools that the LLM requested, returns tool results\n\nThis loop continues until the LLM decides to finish.\n​What you can control\nTo build reliable agents, you need to control what happens at each step of the agent loop, as well as what happens between steps.\n\nContext TypeWhat You ControlTransient or Persistent\nModel ContextWhat goes into model calls (instructions, message history, tools, response format)TransientTool ContextWhat tools can access and produce (reads/writes to state, store, runtime context)PersistentLife-cycle ContextWhat happens between model and tool calls (summarization, guardrails, logging, etc.)Persistent\n\nTransient contextWhat the LLM sees for a single call. You can modify messages, tools, or prompts without changing what’s saved in state.Persistent contextWhat gets saved in state across turns. Life-cycle hooks and tool writes modify this permanently.\n​Data sources\nThroughout this process, your agent accesses (reads / writes) different sources of data:\n\nData SourceAlso Known AsScopeExamples\nRuntime ContextStatic configurationConversation-scopedUser ID, API keys, database connections, permissions, environment settingsStateShort-term memoryConversation-scopedCurrent messages, uploaded files, authentication status, tool resultsStoreLong-term memoryCross-conversationUser preferences, extracted insights, memories, historical data\n\n​How it works\nLangChain middleware is the mechanism under the hood that makes context engineering practical for developers using LangChain.\nMiddleware allows you to hook into any step in the agent lifecycle and:\n\nUpdate context\nJump to a different step in the agent lifecycle\n\nThroughout this guide, you’ll see frequent use of the middleware API as a means to the context engineering end.\n​Model Context\nControl what goes into each model call - instructions, available tools, which model to use, and output format. These decisions directly impact reliability and cost.\nSystem PromptBase instructions from the developer to the LLM.MessagesThe full list of messages (conversation history) sent to the LLM.ToolsUtilities the agent has access to to take actions.ModelThe actual model (including configuration) to be called.Response FormatSchema specification for the model’s final response.\nAll of these types of model context can draw from state (short-term memory), store (long-term memory), or runtime context (static configuration).\n​System Prompt\nThe system prompt sets the LLM’s behavior and capabilities. Different users, contexts, or conversation stages need different instructions. Successful agents draw on memories, preferences, and configuration to provide the right instructions for the current state of the conversation.\n State Store Runtime ContextAccess message count or conversation context from state:CopyAsk AIimport { createAgent } from \"langchain\";\n\nconst agent = createAgent({\n model: \"gpt-4o\",\n tools: [...],\n middleware: [\n dynamicSystemPromptMiddleware((state) => {\n // Read from State: check conversation length\n const messageCount = state.messages.length;\n\n let base = \"You are a helpful assistant.\";\n\n if (messageCount > 10) {\n base += \"\\nThis is a long conversation - be extra concise.\";\n }\n\n return base;\n }),\n ],\n});\n\n​Messages\nMessages make up the prompt that is sent to the LLM.\nIt’s critical to manage the content of messages to ensure that the LLM has the right information to respond well.\n State Store Runtime ContextInject uploaded file context from State when relevant to current query:CopyAsk AIimport { createMiddleware } from \"langchain\";\n\nconst injectFileContext = createMiddleware({\n name: \"InjectFileContext\",\n wrapModelCall: (request, handler) => {\n // request.state is a shortcut for request.state.messages\n const uploadedFiles = request.state.uploadedFiles || []; \n\n if (uploadedFiles.length > 0) {\n // Build context about available files\n const fileDescriptions = uploadedFiles.map(file =>\n `- ${file.name} (${file.type}): ${file.summary}`\n );\n\n const fileContext = `Files you have access to in this conversation:\n${fileDescriptions.join(\"\\n\")}\n\nReference these files when answering questions.`;\n\n // Inject file context before recent messages\n const messages = [ \n ...request.messages // Rest of conversation\n { role: \"user\", content: fileContext }\n ];\n request = request.override({ messages }); \n }\n\n return handler(request);\n },\n});\n\nconst agent = createAgent({\n model: \"gpt-4o\",\n tools: [...],\n middleware: [injectFileContext],\n});\n\nTransient vs Persistent Message Updates:The examples above use wrap_model_call to make transient updates - modifying what messages are sent to the model for a single call without changing what’s saved in state.For persistent updates that modify state (like the summarization example in Life-cycle Context), use life-cycle hooks like before_model or after_model to permanently update the conversation history. See the middleware documentation for more details.\n​Tools\nTools let the model interact with databases, APIs, and external systems. How you define and select tools directly impacts whether the model can complete tasks effectively.\n​Defining tools\nEach tool needs a clear name, description, argument names, and argument descriptions. These aren’t just metadata—they guide the model’s reasoning about when and how to use the tool.\nCopyAsk AIimport { tool } from \"@langchain/core/tools\";\nimport { z } from \"zod\";\n\nconst searchOrders = tool(\n async ({ userId, status, limit = 10 }) => {\n // Implementation here\n },\n {\n name: \"search_orders\",\n description: `Search for user orders by status.\n\n Use this when the user asks about order history or wants to check\n order status. Always filter by the provided status.`,\n schema: z.object({\n userId: z.string().describe(\"Unique identifier for the user\"),\n status: z.enum([\"pending\", \"shipped\", \"delivered\"]).describe(\"Order status to filter by\"),\n limit: z.number().default(10).describe(\"Maximum number of results to return\"),\n }),\n }\n);\n\n​Selecting tools\nNot every tool is appropriate for every situation. Too many tools may overwhelm the model (overload context) and increase errors; too few limit capabilities. Dynamic tool selection adapts the available toolset based on authentication state, user permissions, feature flags, or conversation stage.\n State Store Runtime ContextEnable advanced tools only after certain conversation milestones:CopyAsk AIimport { createMiddleware } from \"langchain\";\n\nconst stateBasedTools = createMiddleware({\n name: \"StateBasedTools\",\n wrapModelCall: (request, handler) => {\n // Read from State: check authentication and conversation length\n const state = request.state; \n const isAuthenticated = state.authenticated || false; \n const messageCount = state.messages.length;\n\n let filteredTools = request.tools;\n\n // Only enable sensitive tools after authentication\n if (!isAuthenticated) {\n filteredTools = request.tools.filter(t => t.name.startsWith(\"public_\")); \n } else if (messageCount < 5) {\n filteredTools = request.tools.filter(t => t.name !== \"advanced_search\"); \n }\n\n return handler({ ...request, tools: filteredTools }); \n },\n});\n\nSee Dynamically selecting tools for more examples.\n​Model\nDifferent models have different strengths, costs, and context windows. Select the right model for the task at hand, which\nmight change during an agent run.\n State Store Runtime ContextUse different models based on conversation length from State:CopyAsk AIimport { createMiddleware, initChatModel } from \"langchain\";\n\n// Initialize models once outside the middleware\nconst largeModel = initChatModel(\"claude-sonnet-4-5-20250929\");\nconst standardModel = initChatModel(\"gpt-4o\");\nconst efficientModel = initChatModel(\"gpt-4o-mini\");\n\nconst stateBasedModel = createMiddleware({\n name: \"StateBasedModel\",\n wrapModelCall: (request, handler) => {\n // request.messages is a shortcut for request.state.messages\n const messageCount = request.messages.length; \n let model;\n\n if (messageCount > 20) {\n model = largeModel;\n } else if (messageCount > 10) {\n model = standardModel;\n } else {\n model = efficientModel;\n }\n\n return handler({ ...request, model }); \n },\n});\n\nSee Dynamic model for more examples.\n​Response Format\nStructured output transforms unstructured text into validated, structured data. When extracting specific fields or returning data for downstream systems, free-form text isn’t sufficient.\nHow it works: When you provide a schema as the response format, the model’s final response is guaranteed to conform to that schema. The agent runs the model / tool calling loop until the model is done calling tools, then the final response is coerced into the provided format.\n​Defining formats\nSchema definitions guide the model. Field names, types, and descriptions specify exactly what format the output should adhere to.\nCopyAsk AIimport { z } from \"zod\";\n\nconst customerSupportTicket = z.object({\n category: z.enum([\"billing\", \"technical\", \"account\", \"product\"]).describe(\n \"Issue category\"\n ),\n priority: z.enum([\"low\", \"medium\", \"high\", \"critical\"]).describe(\n \"Urgency level\"\n ),\n summary: z.string().describe(\n \"One-sentence summary of the customer's issue\"\n ),\n customerSentiment: z.enum([\"frustrated\", \"neutral\", \"satisfied\"]).describe(\n \"Customer's emotional tone\"\n ),\n}).describe(\"Structured ticket information extracted from customer message\");\n\n​Selecting formats\nDynamic response format selection adapts schemas based on user preferences, conversation stage, or role—returning simple formats early and detailed formats as complexity increases.\n State Store Runtime ContextConfigure structured output based on conversation state:CopyAsk AIimport { createMiddleware } from \"langchain\";\nimport { z } from \"zod\";\n\nconst simpleResponse = z.object({\n answer: z.string().describe(\"A brief answer\"),\n});\n\nconst detailedResponse = z.object({\n answer: z.string().describe(\"A detailed answer\"),\n reasoning: z.string().describe(\"Explanation of reasoning\"),\n confidence: z.number().describe(\"Confidence score 0-1\"),\n});\n\nconst stateBasedOutput = createMiddleware({\n name: \"StateBasedOutput\",\n wrapModelCall: (request, handler) => {\n // request.state is a shortcut for request.state.messages\n const messageCount = request.messages.length; \n\n if (messageCount < 3) {\n // Early conversation - use simple format\n responseFormat = simpleResponse; \n } else {\n // Established conversation - use detailed format\n responseFormat = detailedResponse; \n }\n\n return handler({ ...request, responseFormat });\n },\n});\n\n​Tool Context\nTools are special in that they both read and write context.\nIn the most basic case, when a tool executes, it receives the LLM’s request parameters and returns a tool message back. The tool does its work and produces a result.\nTools can also fetch important information for the model that allows it to perform and complete tasks.\n​Reads\nMost real-world tools need more than just the LLM’s parameters. They need user IDs for database queries, API keys for external services, or current session state to make decisions. Tools read from state, store, and runtime context to access this information.\n State Store Runtime ContextRead from State to check current session information:CopyAsk AIimport * as z from \"zod\";\nimport { tool } from \"@langchain/core/tools\";\nimport { createAgent } from \"langchain\";\n\nconst checkAuthentication = tool(\n async (_, { runtime }) => {\n // Read from State: check current auth status\n const currentState = runtime.state;\n const isAuthenticated = currentState.authenticated || false;\n\n if (isAuthenticated) {\n return \"User is authenticated\";\n } else {\n return \"User is not authenticated\";\n }\n },\n {\n name: \"check_authentication\",\n description: \"Check if user is authenticated\",\n schema: z.object({}),\n }\n);\n\n​Writes\nTool results can be used to help an agent complete a given task. Tools can both return results directly to the model\nand update the memory of the agent to make important context available to future steps.\n State StoreWrite to State to track session-specific information using Command:CopyAsk AIimport * as z from \"zod\";\nimport { tool } from \"@langchain/core/tools\";\nimport { createAgent } from \"langchain\";\nimport { Command } from \"@langchain/langgraph\";\n\nconst authenticateUser = tool(\n async ({ password }, { runtime }) => {\n // Perform authentication\n if (password === \"correct\") {\n // Write to State: mark as authenticated using Command\n return new Command({\n update: { authenticated: true },\n });\n } else {\n return new Command({ update: { authenticated: false } });\n }\n },\n {\n name: \"authenticate_user\",\n description: \"Authenticate user and update State\",\n schema: z.object({\n password: z.string(),\n }),\n }\n);\n\nSee Tools for comprehensive examples of accessing state, store, and runtime context in tools.\n​Life-cycle Context\nControl what happens between the core agent steps - intercepting data flow to implement cross-cutting concerns like summarization, guardrails, and logging.\nAs you’ve seen in Model Context and Tool Context, middleware is the mechanism that makes context engineering practical. Middleware allows you to hook into any step in the agent lifecycle and either:\n\nUpdate context - Modify state and store to persist changes, update conversation history, or save insights\nJump in the lifecycle - Move to different steps in the agent cycle based on context (e.g., skip tool execution if a condition is met, repeat model call with modified context)\n\n​Example: Summarization\nOne of the most common life-cycle patterns is automatically condensing conversation history when it gets too long. Unlike the transient message trimming shown in Model Context, summarization persistently updates state - permanently replacing old messages with a summary that’s saved for all future turns.\nLangChain offers built-in middleware for this:\nCopyAsk AIimport { createAgent, summarizationMiddleware } from \"langchain\";\n\nconst agent = createAgent({\n model: \"gpt-4o\",\n tools: [...],\n middleware: [\n summarizationMiddleware({\n model: \"gpt-4o-mini\",\n maxTokensBeforeSummary: 4000, // Trigger summarization at 4000 tokens\n messagesToKeep: 20, // Keep last 20 messages after summary\n }),\n ],\n});\n\nWhen the conversation exceeds the token limit, SummarizationMiddleware automatically:\n\nSummarizes older messages using a separate LLM call\nReplaces them with a summary message in State (permanently)\nKeeps recent messages intact for context\n\nThe summarized conversation history is permanently updated - future turns will see the summary instead of the original messages.\nFor a complete list of built-in middleware, available hooks, and how to create custom middleware, see the Middleware documentation.\n​Best practices\n\nStart simple - Begin with static prompts and tools, add dynamics only when needed\nTest incrementally - Add one context engineering feature at a time\nMonitor performance - Track model calls, token usage, and latency\nUse built-in middleware - Leverage SummarizationMiddleware, LLMToolSelectorMiddleware, etc.\nDocument your context strategy - Make it clear what context is being passed and why\nUnderstand transient vs persistent: Model context changes are transient (per-call), while life-cycle context changes persist to state\n\n​Related resources\n\nContext conceptual overview - Understand context types and when to use them\nMiddleware - Complete middleware guide\nTools - Tool creation and context access\nMemory - Short-term and long-term memory patterns\nAgents - Core agent concepts\n\nEdit the source of this page on GitHub.\nConnect these docs programmatically to Claude, VSCode, and more via MCP for real-time answers.Was this page helpful?YesNoRuntimePreviousModel Context Protocol (MCP)NextDocs by LangChain home pagegithubxlinkedinyoutubeResourcesForumChangelogLangChain AcademyTrust CenterCompanyAboutCareersBloggithubxlinkedinyoutubePowered by Mintlify", - "metadata": { - "source": "https://docs.langchain.com/oss/javascript/langchain/context-engineering", - "title": "Context engineering in agents - Docs by LangChain", - "description": "", - "language": "en", - "loc": "https://docs.langchain.com/oss/javascript/langchain/context-engineering", - "lastmod": "2025-10-31T14:01:30.939Z" - }, - "type": "Document" - }, - { - "page_content": "Model Context Protocol (MCP) - Docs by LangChainSkip to main contentWe've raised a $125M Series B to build the platform for agent engineering. Read more.Docs by LangChain home pageLangChain + LangGraphSearch...⌘KAsk AIGitHubTry LangSmithTry LangSmithSearch...NavigationAdvanced usageModel Context Protocol (MCP)LangChainLangGraphIntegrationsLearnReferenceContributeTypeScriptOverviewLangChain v1.0Release notesMigration guideGet startedInstallQuickstartPhilosophyCore componentsAgentsModelsMessagesToolsShort-term memoryStreamingMiddlewareStructured outputAdvanced usageGuardrailsRuntimeContext engineeringModel Context Protocol (MCP)Human-in-the-loopMulti-agentRetrievalLong-term memoryUse in productionStudioTestDeployAgent Chat UIObservabilityEnglishcloseOn this pageInstallTransport typesUse MCP toolsCustom MCP serversStateful tool usageAdditional resourcesAdvanced usageModel Context Protocol (MCP)Copy pageCopy pageModel Context Protocol (MCP) is an open protocol that standardizes how applications provide tools and context to LLMs. LangChain agents can use tools defined on MCP servers using the langchain-mcp-adapters library.\n​Install\nInstall the @langchain/mcp-adapters library to use MCP tools in LangGraph:\nnpmpnpmyarnbunCopyAsk AInpm install @langchain/mcp-adapters\n\n​Transport types\nMCP supports different transport mechanisms for client-server communication:\n\nstdio – Client launches server as a subprocess and communicates via standard input/output. Best for local tools and simple setups.\nStreamable HTTP – Server runs as an independent process handling HTTP requests. Supports remote connections and multiple clients.\nServer-Sent Events (SSE) – a variant of streamable HTTP optimized for real-time streaming communication.\n\n​Use MCP tools\n@langchain/mcp-adapters enables agents to use tools defined across one or more MCP server.\nAccessing multiple MCP serversCopyAsk AIimport { MultiServerMCPClient } from \"@langchain/mcp-adapters\"; \nimport { ChatAnthropic } from \"@langchain/anthropic\";\nimport { createAgent } from \"langchain\";\n\nconst client = new MultiServerMCPClient({ \n math: {\n transport: \"stdio\", // Local subprocess communication\n command: \"node\",\n // Replace with absolute path to your math_server.js file\n args: [\"/path/to/math_server.js\"],\n },\n weather: {\n transport: \"sse\", // Server-Sent Events for streaming\n // Ensure you start your weather server on port 8000\n url: \"http://localhost:8000/mcp\",\n },\n});\n\nconst tools = await client.getTools(); \nconst agent = createAgent({\n model: \"claude-sonnet-4-5-20250929\",\n tools, \n});\n\nconst mathResponse = await agent.invoke({\n messages: [{ role: \"user\", content: \"what's (3 + 5) x 12?\" }],\n});\n\nconst weatherResponse = await agent.invoke({\n messages: [{ role: \"user\", content: \"what is the weather in nyc?\" }],\n});\n\nMultiServerMCPClient is stateless by default. Each tool invocation creates a fresh MCP ClientSession, executes the tool, and then cleans up.\n​Custom MCP servers\nTo create your own MCP servers, you can use the @modelcontextprotocol/sdk library. This library provides a simple way to define tools and run them as servers.\nnpmpnpmyarnbunCopyAsk AInpm install @modelcontextprotocol/sdk\n\nUse the following reference implementations to test your agent with MCP tool servers.\nMath server (stdio transport)CopyAsk AIimport { Server } from \"@modelcontextprotocol/sdk/server/index.js\";\nimport { StdioServerTransport } from \"@modelcontextprotocol/sdk/server/stdio.js\";\nimport {\n CallToolRequestSchema,\n ListToolsRequestSchema,\n} from \"@modelcontextprotocol/sdk/types.js\";\n\nconst server = new Server(\n {\n name: \"math-server\",\n version: \"0.1.0\",\n },\n {\n capabilities: {\n tools: {},\n },\n }\n);\n\nserver.setRequestHandler(ListToolsRequestSchema, async () => {\n return {\n tools: [\n {\n name: \"add\",\n description: \"Add two numbers\",\n inputSchema: {\n type: \"object\",\n properties: {\n a: {\n type: \"number\",\n description: \"First number\",\n },\n b: {\n type: \"number\",\n description: \"Second number\",\n },\n },\n required: [\"a\", \"b\"],\n },\n },\n {\n name: \"multiply\",\n description: \"Multiply two numbers\",\n inputSchema: {\n type: \"object\",\n properties: {\n a: {\n type: \"number\",\n description: \"First number\",\n },\n b: {\n type: \"number\",\n description: \"Second number\",\n },\n },\n required: [\"a\", \"b\"],\n },\n },\n ],\n };\n});\n\nserver.setRequestHandler(CallToolRequestSchema, async (request) => {\n switch (request.params.name) {\n case \"add\": {\n const { a, b } = request.params.arguments as { a: number; b: number };\n return {\n content: [\n {\n type: \"text\",\n text: String(a + b),\n },\n ],\n };\n }\n case \"multiply\": {\n const { a, b } = request.params.arguments as { a: number; b: number };\n return {\n content: [\n {\n type: \"text\",\n text: String(a * b),\n },\n ],\n };\n }\n default:\n throw new Error(`Unknown tool: ${request.params.name}`);\n }\n});\n\nasync function main() {\n const transport = new StdioServerTransport();\n await server.connect(transport);\n console.error(\"Math MCP server running on stdio\");\n}\n\nmain();\n\nWeather server (SSE transport)CopyAsk AIimport { Server } from \"@modelcontextprotocol/sdk/server/index.js\";\nimport { SSEServerTransport } from \"@modelcontextprotocol/sdk/server/sse.js\";\nimport {\n CallToolRequestSchema,\n ListToolsRequestSchema,\n} from \"@modelcontextprotocol/sdk/types.js\";\nimport express from \"express\";\n\nconst app = express();\napp.use(express.json());\n\nconst server = new Server(\n {\n name: \"weather-server\",\n version: \"0.1.0\",\n },\n {\n capabilities: {\n tools: {},\n },\n }\n);\n\nserver.setRequestHandler(ListToolsRequestSchema, async () => {\n return {\n tools: [\n {\n name: \"get_weather\",\n description: \"Get weather for location\",\n inputSchema: {\n type: \"object\",\n properties: {\n location: {\n type: \"string\",\n description: \"Location to get weather for\",\n },\n },\n required: [\"location\"],\n },\n },\n ],\n };\n});\n\nserver.setRequestHandler(CallToolRequestSchema, async (request) => {\n switch (request.params.name) {\n case \"get_weather\": {\n const { location } = request.params.arguments as { location: string };\n return {\n content: [\n {\n type: \"text\",\n text: `It's always sunny in ${location}`,\n },\n ],\n };\n }\n default:\n throw new Error(`Unknown tool: ${request.params.name}`);\n }\n});\n\napp.post(\"/mcp\", async (req, res) => {\n const transport = new SSEServerTransport(\"/mcp\", res);\n await server.connect(transport);\n});\n\nconst PORT = process.env.PORT || 8000;\napp.listen(PORT, () => {\n console.log(`Weather MCP server running on port ${PORT}`);\n});\n\n​Stateful tool usage\nFor stateful servers that maintain context between tool calls, use client.session() to create a persistent ClientSession.\nUsing MCP ClientSession for stateful tool usageCopyAsk AIimport { loadMCPTools } from \"@langchain/mcp-adapters/tools.js\";\n\nconst client = new MultiServerMCPClient({...});\nconst session = await client.session(\"math\");\nconst tools = await loadMCPTools(session);\n\n​Additional resources\n\nMCP documentation\n\nMCP Transport documentation\n\nlangchain-mcp-adapters\n\nEdit the source of this page on GitHub.\nConnect these docs programmatically to Claude, VSCode, and more via MCP for real-time answers.Was this page helpful?YesNoContext engineering in agentsPreviousHuman-in-the-loopNextDocs by LangChain home pagegithubxlinkedinyoutubeResourcesForumChangelogLangChain AcademyTrust CenterCompanyAboutCareersBloggithubxlinkedinyoutubePowered by Mintlify", - "metadata": { - "source": "https://docs.langchain.com/oss/javascript/langchain/mcp", - "title": "Model Context Protocol (MCP) - Docs by LangChain", - "description": "", - "language": "en", - "loc": "https://docs.langchain.com/oss/javascript/langchain/mcp", - "lastmod": "2025-10-31T14:01:30.957Z" - }, - "type": "Document" - } -] \ No newline at end of file