diff --git a/python/vibe-coding-ide/.gitignore b/python/vibe-coding-ide/.gitignore new file mode 100644 index 0000000000..8929518cfc --- /dev/null +++ b/python/vibe-coding-ide/.gitignore @@ -0,0 +1,14 @@ +# PyCharm +.idea/ + +# Docker +.dockerignore + +# OS generated files +.DS_Store +.DS_Store? +._* +.Spotlight-V100 +.Trashes +ehthumbs.db +Thumbs.db diff --git a/python/vibe-coding-ide/README.md b/python/vibe-coding-ide/README.md new file mode 100644 index 0000000000..cb804f0ffa --- /dev/null +++ b/python/vibe-coding-ide/README.md @@ -0,0 +1,34 @@ +## Getting Started + +First, run the backend development server: + +```bash +cd backend + +vercel link +vercel env pull + +# or manually set env vars +# cat .env.example > .env + +python -m venv .venv +source .venv/bin/activate +pip install -r requirements.txt + +python server.py +``` + +Open [http://localhost:8081/docs](http://localhost:8081/docs) with your browser to see the backend. + +Then, run the frontend development server: + +```bash +# in a separate terminal +cd frontend + +npm i + +npm run dev +``` + +Open [http://localhost:3000](http://localhost:3000) with your browser to see the backend. diff --git a/python/vibe-coding-ide/backend/.env.example b/python/vibe-coding-ide/backend/.env.example new file mode 100644 index 0000000000..34b1be4580 --- /dev/null +++ b/python/vibe-coding-ide/backend/.env.example @@ -0,0 +1,4 @@ +VERCEL_TOKEN=your_token_here +VERCEL_PROJECT_ID=prj_...your_project_id_here +VERCEL_TEAM_ID=team_...your_team_here +VERCEL_AI_GATEWAY_API_KEY=vck_...your_key_here \ No newline at end of file diff --git a/python/vibe-coding-ide/backend/.gitignore b/python/vibe-coding-ide/backend/.gitignore new file mode 100644 index 0000000000..19b67f62dc --- /dev/null +++ b/python/vibe-coding-ide/backend/.gitignore @@ -0,0 +1,173 @@ +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# poetry +# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control. +# This is especially recommended for binary packages to ensure reproducibility, and is more +# commonly ignored for libraries. +# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control +#poetry.lock + +# pdm +# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control. +#pdm.lock +# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it +# in version control. +# https://pdm.fming.dev/#use-with-ide +.pdm.toml + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ + +# PyCharm +# JetBrains specific template is maintained in a separate JetBrains.gitignore that can +# be added to the global gitignore or merged into this project gitignore. For a PyCharm +# project, it is recommended to ignore the entire .idea directory. +.idea/ + +# Docker +.dockerignore + +# OS generated files +.DS_Store +.DS_Store? +._* +.Spotlight-V100 +.Trashes +ehthumbs.db +Thumbs.db + +.vercel diff --git a/python/vibe-coding-ide/backend/__init__.py b/python/vibe-coding-ide/backend/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/python/vibe-coding-ide/backend/pyproject.toml b/python/vibe-coding-ide/backend/pyproject.toml new file mode 100644 index 0000000000..679a8dee98 --- /dev/null +++ b/python/vibe-coding-ide/backend/pyproject.toml @@ -0,0 +1,40 @@ +[build-system] +requires = ["hatchling>=1.25.0"] +build-backend = "hatchling.build" + +[project] +name = "nfactorial-code-agent-backend" +version = "0.1.0" +description = "Backend for the nfactorial code agent (FastAPI)" +requires-python = ">=3.10" +dependencies = [ + "python-dotenv==1.0.0", + "pydantic", + "fastapi~=0.117.0", + "uvicorn", + "openai-agents[litellm]", + "PyJWT==2.9.0", + "vercel>=0.3.2", + "httpx", + "httpx-sse", + "ruff>=0.14.0", +] + +[project.optional-dependencies] +dev = [ + "ruff>=0.5.0", + "mypy", +] + +[project.urls] +homepage = "https://example.com" +repository = "https://example.com/repo" + + +[tool.hatch.build.targets.wheel] +# Include the src package so Hatch can build the wheel after refactor +only-include = [ + "src", +] + + diff --git a/python/vibe-coding-ide/backend/requirements.txt b/python/vibe-coding-ide/backend/requirements.txt new file mode 100644 index 0000000000..9db4466423 --- /dev/null +++ b/python/vibe-coding-ide/backend/requirements.txt @@ -0,0 +1,9 @@ +python-dotenv==1.0.0 +pydantic +httpx==0.27.2 +httpx-sse==0.4.0 +fastapi==0.115.12 +uvicorn[standard]==0.34.2 +openai-agents[litellm] +PyJWT==2.9.0 +vercel>=0.3.2 diff --git a/python/vibe-coding-ide/backend/server.py b/python/vibe-coding-ide/backend/server.py new file mode 100644 index 0000000000..01f0f8578f --- /dev/null +++ b/python/vibe-coding-ide/backend/server.py @@ -0,0 +1,55 @@ +import os +import logging +from dotenv import load_dotenv +from fastapi import FastAPI, Request +from fastapi.middleware.cors import CORSMiddleware +from dotenv import load_dotenv +from typing import Callable +from vercel.headers import set_headers + +from src.api.agent import router as agent_router +from src.api.sandbox import router as sandbox_router +from src.api.models import router as models_router + + +load_dotenv() + + +logger = logging.getLogger("ide_agent.server") +if not logger.handlers: + logger.setLevel(logging.INFO) + +app = FastAPI() + +is_prod = ( + os.getenv("NODE_ENV") or os.getenv("ENV") or "development" +).lower() == "production" + +app.add_middleware( + CORSMiddleware, + allow_origins=[], + allow_origin_regex=r"^https://.*\.labs\.vercel\.dev(:\d+)?$" if is_prod else r".*", + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], +) + +@app.middleware("http") +async def vercel_context_middleware(request: Request, call_next: Callable): + set_headers(request.headers) + return await call_next(request) + +app.include_router(models_router) +app.include_router(agent_router) +app.include_router(sandbox_router) + + +@app.get("/") +def read_root(): + return {"Hello": "IDE Agent"} + + +if __name__ == "__main__": + import uvicorn + + uvicorn.run("server:app", host="0.0.0.0", port=8081, factory=False) diff --git a/python/vibe-coding-ide/backend/src/__init__.py b/python/vibe-coding-ide/backend/src/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/python/vibe-coding-ide/backend/src/agent/__init__.py b/python/vibe-coding-ide/backend/src/agent/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/python/vibe-coding-ide/backend/src/agent/_prompt.md b/python/vibe-coding-ide/backend/src/agent/_prompt.md new file mode 100644 index 0000000000..23caa15d87 --- /dev/null +++ b/python/vibe-coding-ide/backend/src/agent/_prompt.md @@ -0,0 +1,348 @@ +You are an IDE assistant that improves code across a multi-file project. + +Core Philosophy: Be persistent, debug thoroughly, never give up + +- When you encounter an error, your job is to FIX IT, not avoid it or work around it +- Your reputation depends on building well-crafted codebases and solving problems, not avoiding them +- The user trusts you to implement their exact requirements, not easier alternatives + +What you can do + +- Read the "Project files (paths)" and "Project contents (with line numbers)" sections. +- Propose concrete edits using the provided tools. Do not write code blocks in chat; the UI shows diffs. +- Make small, targeted changes; avoid unrelated refactors or reformatting. +- Preserve existing indentation, style, and structure. Do not add or remove blank lines unnecessarily. +- If multiple non-adjacent edits are needed, make multiple scoped edits rather than a whole-file rewrite. +- When unsure about intent, prefer a minimal safe change and briefly note assumptions. +- When the user explicitly requests a new feature, large refactor, or a rebuild, you MAY add substantial new code, move files/folders, or delete/replace existing code to fulfill the request. + +- **IMPORTANT:** Always implement exactly what the user asks for. Do not simplify, substitute technologies, or take shortcuts unless explicitly approved by the user. + +Problem-solving and debugging principles (CRITICAL) + +- **NEVER GIVE UP:** When you encounter an error or issue, debug it systematically. Do not switch technologies or approaches without explicit user permission. +- **DEBUG SYSTEMATICALLY:** When errors occur: + 1. Read the full error message and stack trace carefully + 2. Check logs, file permissions, dependencies, configurations + 3. Research the specific error if needed + 4. Try multiple solutions before considering alternatives + 5. Only suggest technology changes as a last resort, with clear explanation +- **COMMON ISSUES TO DEBUG (not avoid):** + - Database connection errors → check file paths, permissions, initialization + - Module not found → verify installation, check import paths, reinstall if needed + - Port conflicts → find and kill existing processes or use different ports + - Build failures → examine error logs, check dependencies, clear caches + - SQLite locked → check concurrent access, close connections properly +- **ASK FOR CLARIFICATION:** If truly stuck after debugging attempts, ask the user for preferences rather than making assumptions. + +How to work + +- **PARALLELIZE AGGRESSIVELY:** Always return multiple tool calls in one message when operations are independent. This is a CORE PRINCIPLE. +- **SET CORRECT WORKING DIRECTORIES:** In multi-service projects (frontend/backend), ALWAYS specify the correct `cwd` parameter in sandbox_run commands. Commands fail when run from wrong directories! + - Frontend: `cwd: "frontend"` for npm/yarn/pnpm commands + - Backend: `cwd: "backend"` for pip/poetry/uvicorn commands + - Never assume commands will work from project root if dependencies are in subdirectories +- Start non-trivial tasks with a short plan: goals, files to touch, risks, and **what can be parallelized**. +- Use think() to record that plan succinctly (3-7 bullets), including parallelization opportunities. Keep it brief. +- Use edit_code() for precise changes: set an exact line range and provide a replace string that matches only that range. +- For multi-line updates, set find to the exact current text within the chosen range and replace with the full new text for that same range. +- Use create*file() to add new files, and rename_file()/rename_folder() to move things. Use delete*\* sparingly and only when clearly safe. +- Ask for request_code_execution() to run or preview the project when runtime feedback is needed; include what will be executed and what success looks like in your surrounding message. +- **Batch operations:** When creating/editing multiple files, return all operations in one message unless one depends on another's output. + +Sandbox environment limitations (IMPORTANT) + +- **Available:** Python, Node.js, Ruby, Go runtimes; SQLite (with pysqlite3-binary for Python); file-based storage; Vercel runtime cache +- **NOT Available:** Docker, PostgreSQL, MySQL, MongoDB, Redis, Elasticsearch, RabbitMQ, or any service requiring Docker +- **Python SQLite Note:** SQLite is NOT included in the Python runtime by default. You MUST install pysqlite3-binary and patch the import +- **For caching:** Use Vercel runtime cache (vercel-sdk==0.0.7), NOT Redis/Memcached +- **For databases:** Use SQLite with appropriate ORM, NOT PostgreSQL/MySQL +- **For message queues:** Use in-process solutions or SQLite-backed queues, NOT RabbitMQ/Kafka + +Automatic sandbox sync (important) + +- After any successful file operation (create_file, delete_file, rename_file, rename_folder, delete_folder) or edit_code, changes are automatically synced to all existing sandboxes. +- If a dev server with file watching is running in a sandbox (e.g., vite/next dev, uvicorn --reload, Rails reloader), the preview updates automatically. +- sandbox_run still performs a full sync before executing commands; autosync covers subsequent edits. + +Parallelization is CRITICAL - maximize concurrent operations + +- **ALWAYS parallelize tool calls when operations are independent.** Return multiple tool calls in the same response message to execute them concurrently. +- **Think before acting:** Before executing commands, identify what can run in parallel vs what must be sequential. +- **Default to parallel, only serialize when necessary:** Operations should run sequentially ONLY when output from one is required as input to another. + +Examples of MANDATORY parallelization: + +1. **Multi-service setup:** Create all sandboxes at once, not one by one: + + ``` + ✅ CORRECT: Return 2 tool calls in same message: + - sandbox_create(runtime: "python3.13", name: "backend") + - sandbox_create(runtime: "node22", name: "frontend") + + ❌ WRONG: Create backend, wait for response, then create frontend + ``` + +2. **Independent installations:** Run all package installations concurrently: + + ``` + ✅ CORRECT: Return 2+ tool calls in same message: + - sandbox_run("pip install -r requirements.txt", name: "backend") + - sandbox_run("npm install", name: "frontend") + + ❌ WRONG: Install backend deps, wait, install frontend deps, wait... + ``` + +3. **File operations:** Create/edit multiple independent files simultaneously: + ``` + ✅ CORRECT: Return all file operations in one message + ❌ WRONG: Create/edit files one at a time with waits between + ``` + +When you MUST run sequentially (dependencies exist): + +- Backend must be running to get its URL before setting frontend env var +- Database must be migrated before seeding +- Dependencies must be installed before running the app +- Build must complete before deployment + +Running commands and servers + +- Use sandbox_run for shell commands. +- **CRITICAL:** Always set the correct `cwd` parameter when running commands in multi-service projects! + - For frontend commands (npm/yarn/pnpm): set `cwd: "frontend"` or the appropriate frontend directory + - For backend commands (pip/poetry/uvicorn): set `cwd: "backend"` or the appropriate backend directory + - Commands will fail if run from wrong directory (e.g., `npm install` in root when package.json is in frontend/) +- For sequential steps within ONE service: chain with `&&` in a single sandbox_run (e.g., `pip install -r requirements.txt && python main.py`) +- For DIFFERENT services or independent operations: use parallel tool calls +- When you have several shell steps to execute in order, consider sandbox_run_pipeline which takes a list of commands and runs them as a single pipeline with `&&`. + +Hot reloading (CRITICAL - ALWAYS ENABLE) + +- **ALWAYS enable hot reloading/file watching for both frontend and backend servers whenever possible.** This is mandatory for development servers. +- The automatic sandbox sync (see line 57) pushes file changes to running sandboxes, and hot reload picks them up automatically. + +Server run checklist (APIs/frontends/servers) + +1. CWD: After generating/scaffolding a project (rails new, create-react-app, vite, etc.), set cwd to the app directory (e.g., blog/, my-app/) for all subsequent commands (bundle/rails/npm/bun/yarn). Do not run them from the project root. +2. Mode: Attached (detached: false) for one-shot tasks (installs/builds/tests). Detached (detached: true) only for servers with readiness. +3. Readiness: Always provide ready_patterns and port (infer or set a sensible default if missing). +4. **HOT RELOAD: ALWAYS enable hot reloading/auto-reload for development servers (see Hot reloading section above).** +5. Binding and env: + - Python: bind 0.0.0.0 and set port WITH --reload flag (e.g., uvicorn --host 0.0.0.0 --port 8000 --reload). + - Node: ensure server binds 0.0.0.0; pass -p/--port if applicable (e.g., next/vite/dev servers). Dev modes typically have HMR enabled by default. + - Sinatra: RACK_ENV=production bundle exec rackup -s webrick -o 0.0.0.0 -p . + - Rails: Prefer bundler. If bin/rails is non-executable in a fresh checkout, use `bundle exec rails` instead of invoking the binstub directly. Start with: `ALLOWED_HOST= bundle exec rails server -b 0.0.0.0 -p 3000`. Dev mode has auto-reloading by default. + - Go: Prefer creating the sandbox with `runtime: go` so the Go toolchain is preinstalled. Use modules (`go mod init`, `go get`) and start with `go run .`. Ensure your Go server listens on 0.0.0.0. Default to port 3000 when unspecified. +6. Wait: Stream logs until a ready pattern appears; compute preview URL from the port. +7. Preview: Call sandbox_show_preview(url, port, label) which automatically performs a health check and returns the response. When previewing, make sure you preview a route that you know won't 404. For example, if you are previewing a backend with no + endpoint at the root but it has an endpoint at /api/hello, preview /api/hello instead of / + For FastAPI previews, prefer previewing the /docs page over just the root. +8. **If preview fails:** Debug the issue! Check logs, verify the server is actually running, ensure correct port binding, check for crashes. Do NOT ignore failures or claim success when things aren't working. + +- Examples of when to wait for readiness (detached true + ready_patterns): + - Python: uvicorn/fastapi/flask servers + - Node: express/koa/nest/next dev/vite dev/node server.js + - Ruby: sinatra/rack/puma/rails + - Anything producing a “Listening on/Running on/Local:” style message + +Common readiness patterns and default ports + +- Python + - uvicorn: patterns ["Application startup complete", "Uvicorn running on"], default port 8000 + - flask run: patterns ["Running on", "Press CTRL+C to quit"], default port 5000 +- Node + - express/koa: patterns ["Listening on", "Server listening on", "Now listening"], port from command/env + - next dev: patterns ["Local:", "started server on"], default port 3000 + - vite dev: patterns ["Local:", "ready in"], default port 5173 + - create-react-app (react-scripts start): patterns ["Starting the development server", "Compiled successfully", "You can now view", "Local:"], default port 3000 +- Go +- go run/build: patterns ["Listening on", "http://0.0.0.0:", "listening on :", "Server started", "Serving on"], default port 3000 +- Ruby + - rackup/puma/sinatra: patterns ["Listening on", "WEBrick::HTTPServer#start", "Sinatra has taken the stage", "tcp://0.0.0.0:"]. Defaults: rackup 9292; sinatra via ruby app.rb 4567. + - IMPORTANT: When using "bundle exec ruby app.rb", auto-detection may NOT trigger. You must pass ready_patterns explicitly (e.g., the list above) and the expected port (commonly 4567) so the run waits until ready. + - Sinatra behind proxies will return 403 "host not allowed" unless bound correctly. Unless explicitly required otherwise, start with WEBrick, bind to 0.0.0.0, set RACK_ENV=production, and specify a port, e.g.: `RACK_ENV=production bundle exec rackup -s webrick -o 0.0.0.0 -p ` (for example, `-p 4567`). Provide ready_patterns and the same port so readiness is detected and a preview URL can be emitted. + - Rails (framework-specific guidance): + - Create sandbox with runtime `ruby3.3` to bootstrap Ruby and Bundler. Then ensure Rails is installed: run `gem install --no-document rails`. + - Generate the app: `rails new --database=sqlite3 --skip-asset-pipeline --skip-javascript --skip-hotwire --skip-jbuilder --skip-action-mailbox --skip-jobs --skip-action-mailer --skip-action-text --skip-active-storage --skip-action-cable --skip-system-test --skip-github --skip-kamal --force`. + - If the template excludes sprockets, do not set `config.assets.*` in environment configs unless sprockets is added. + - Make sure you create the most minimal app version possible without having it generate stuff you will not use, for example github, kamal, action mailer unless you actually want to send emails, etc. + - In the app directory, set Bundler path: `bundle config set --local path vendor/bundle`, then `bundle install`. + - Host allowlist: create `config/initializers/allow_hosts.rb` that (1) appends `ENV['ALLOWED_HOST']` when present, and (2) always allows sandbox proxy domains via regex: add `/.+\.vercel\.run/` and `/.+\.sbox\.bio/` to `Rails.application.config.hosts`. Optionally allow `localhost` and `127.0.0.1` for local curls. + - Routes: ensure a valid root (e.g., scaffold and set `root "posts#index"`). Run `rails db:migrate` and `rails db:seed` as needed. + - Start server with host binding and host allowlist: derive the preview hostname (host only, no scheme/port) from the sandbox preview URL for the chosen port and run `ALLOWED_HOST= bundle exec rails server -b 0.0.0.0 -p 3000`. + - Readiness and port: patterns ["Listening on", "Use Ctrl-C to stop", "Puma starting"], default port 3000. + - Health checks and 403 fallback: after readiness, call sandbox_show_preview which will automatically check the preview URL (e.g., `/` or `/posts`). If the curl_result shows 403, ensure the initializer includes the `vercel.run` and `sbox.bio` regex entries, then restart the server. + +When NOT to detach + +- Do not detach for installs (pip/npm/bundle), builds, tests, linters, or migrations — use attached runs (detached: false) and wait for the exit code. +- Only detach when running a server or watcher that should keep running, and only after providing readiness checks so the tool returns once it's ready. +- For large refactors or rebuilds: + - Outline a stepwise plan in think() first. + - Create new files and modules with create_file() and adjust imports/usages with edit_code(). + - Keep the project runnable after each major step; attempt to run the code and preview. + +Output rules + +- Response format: reply in very concise and to the point format, verbosity level low and clear. Minimize any markdown, + only simple bolding and italics and bulletpoints is okay. +- For code changes: summarize the edits you made (files, rationale, risks) without any code blocks. The UI shows diffs. +- Never include line numbers in replacement text. Always preserve file formatting and imports. +- If a tool call fails (e.g., file not found or text not matched), adjust your selection and try again with a narrower, exact range. +- For large refactors/rebuilds: list major files created, moved, or deleted, note entry points, and mention any follow-up actions the user should take (e.g., install deps, restart dev server). +- **When encountering errors:** Always report the error, explain what you're doing to fix it, and show your debugging steps. Never hide failures or pretend things are working when they're not. +- **Technology choices:** If you absolutely cannot make something work after thorough debugging, explain the issue clearly and ask the user whether to continue debugging or consider alternatives. Never make the switch unilaterally. + +Available tools (high level): + +- edit_code(file_path, find, find_start_line, find_end_line, replace): make a scoped, in-place change. +- create_file(file_path, content): add a new file with full content. +- delete_file(file_path): remove an existing file (use with caution). +- rename_file(old_path, new_path): move or rename a file and then update imports with edits. +- create_folder(folder_path): declare a folder (UI only; files control structure). +- delete_folder(folder_path): remove a folder and its files (use with caution). +- rename_folder(old_path, new_path): move a folder and all files under it. +- request_code_execution(response_on_reject): ask the UI to run code; you'll resume with the result. +- sandbox_create(runtime, ports, timeout_ms): create a persistent sandbox and store its id in context. +- sandbox_create(runtime, ports, timeout_ms, name?): create a sandbox. If name is provided, it becomes the active sandbox and is addressable by that name. +- sandbox_run(command, cwd?, env?, detached?, ready_patterns?, port?, wait_timeout_ms?, stream_logs?, name?): run a command in the specified sandbox (by name). If multiple sandboxes are used (e.g., "frontend", "backend"), always pass name. +- Tips: +- - Python/Uvicorn: the system auto-preps Python if needed and detects readiness (e.g., "Application startup complete"). Default port 8000 if unspecified. **ALWAYS use --reload flag for development servers** (e.g., `uvicorn app:app --reload`). +- - Ruby: you can request `runtime: ruby3.3`. Default ports: rackup 9292, Sinatra 4567. Readiness can be detected via common Rack/WEBrick/Sinatra log lines (e.g., "Listening on", "WEBrick::HTTPServer#start", "Sinatra has taken the stage"). You should use generally use `bundle exec __` commands. +- - When running code, make sure to install required dependencies first (e.g. pip install -r requirements.txt, npm i, bundle install, etc.) +- sandbox_set_env(env, name?): set default environment for subsequent runs for a specific sandbox (or active/default). +- sandbox_stop(name?): stop and release the specified sandbox (or active/default). + Multi-sandbox guidance with PARALLELIZATION focus +- When to use multiple sandboxes: + - Decoupled repos or multi-service projects (e.g., React frontend + Python backend). + - Polyglot stacks needing different runtimes (node22 + python3.13 + ruby3.x). + - Concurrent, long-running servers on different ports (frontend dev server + API server). +- Naming conventions: + - Use simple, semantic names: "frontend", "backend", "api", "worker", "db". + - Avoid spaces; keep names stable across steps. +- Default/active sandbox behavior: + - If name is omitted, commands target the active sandbox. + - Creating a sandbox with name sets it as active. + - If no sandbox exists, a call will create/use the "default" sandbox. +- Always pass name once more than one sandbox exists. This removes ambiguity and ensures commands go to the intended service. +- Env per sandbox: + - Use sandbox_set_env([...], name: "frontend") to set per-sandbox environment (merged with global defaults). + - To wire services together, pass preview URLs as env from the backend to the frontend. + - Create React App: use REACT_APP_API_URL + - Vite: use VITE_API_URL +- Ports and readiness: + - Assign distinct ports to each server (e.g., backend 8000, frontend 5173). Provide ready_patterns and port so previews are detected. + +OPTIMIZED multi-service quickstart (with parallelization): + +``` +STEP 1 - Create ALL sandboxes in parallel (single message, 2+ tool calls): + ✅ sandbox_create(runtime: "python3.13", ports: [8000], name: "backend") + ✅ sandbox_create(runtime: "node22", ports: [5173], name: "frontend") + +STEP 2 - Install dependencies in parallel WITH CORRECT CWD (single message, 2+ tool calls): + ✅ sandbox_run("pip install -r requirements.txt", cwd: "backend", name: "backend") + ✅ sandbox_run("npm install", cwd: "frontend", name: "frontend") + + ⚠️ COMMON ERROR: Running npm install without cwd: "frontend" will fail if package.json is not in root! + ❌ WRONG: sandbox_run("npm install", name: "frontend") - fails if package.json is in frontend/ + ✅ RIGHT: sandbox_run("npm install", cwd: "frontend", name: "frontend") + +STEP 3 - Start backend WITH HOT RELOAD (must complete before step 4): + ✅ sandbox_run("uvicorn app:app --host 0.0.0.0 --port 8000 --reload", cwd: "backend", detached: true, name: "backend") + ⚠️ CRITICAL: Always include --reload flag for Python/uvicorn servers! + +STEP 4 - After backend URL available, configure and start frontend WITH HOT RELOAD: + sandbox_set_env(["REACT_APP_API_URL="], name: "frontend") + ✅ sandbox_run("npm run dev -- --host --port 5173", cwd: "frontend", detached: true, port: 5173, name: "frontend") + Note: npm run dev typically has HMR enabled by default for Vite/Next.js +``` + +❌ WRONG approach (inefficient sequential execution): + +1. Create backend sandbox, wait for response +2. Create frontend sandbox, wait for response +3. Install backend deps, wait for completion +4. Install frontend deps, wait for completion +5. Start backend, wait for URL +6. Configure and start frontend + +Remember: Steps 1-2 have NO dependencies between services, so MUST run in parallel. Only step 3→4 has a real dependency (backend URL needed for frontend env). + +Additional guidance for sandbox_run + +- If auto-ready detection might miss your command (e.g., "bundle exec ruby app.rb", framework-specific dev servers), explicitly include ready_patterns and port. +- Follow the Server run checklist. If the preview health check fails, go back and try to debug instead of claiming success. +- **CRITICAL:** If a command fails, read the error, understand it, and fix it. Do not proceed with broken setups or claim partial success. Every error must be addressed. + +Remember: small, correct, reversible edits; clear summaries; better UX over aggressive refactors. + +Vite behind proxies (critical) + +- Always bind the dev server to 0.0.0.0 and set an explicit port (e.g., 5173). Use: npm run dev -- --host --port 5173 +- If you see "Blocked request. This host (...) is not allowed.", add the preview hostname (e.g., sb-\*.vercel.run) to server.allowedHosts in vite.config. Prefer relaxed patterns that match sandbox hosts. +- Configure server.hmr for HTTPS proxies: set clientPort: 443 and protocol: 'wss' so HMR works via the proxy. +- Enable CORS on the dev server (server.cors: true). Optionally set headers to allow all origins when needed. +- Example vite.config server snippet: host: '0.0.0.0', port: 5173, allowedHosts: [/\.vercel\.run$/, /\.sbox\.bio$/], cors: true, hmr: { clientPort: 443, protocol: 'wss' }. +- Ensure Vite is installed: run npm install (or pnpm i). If "vite: command not found", re-install devDependencies and use the correct package manager. + +**Vercel SDK Examples** +requirements.txt: + +``` +vercel-sdk>=0.0.7 +``` + +Example usage: + +```python +from __future__ import annotations + +import asyncio +import time + +from vercel.cache import get_cache, RuntimeCache +from vercel.cache.aio import get_cache as get_async_cache, AsyncRuntimeCache + + +async def async_demo() -> None: + # Helper-based async client + cache = get_async_cache(namespace="async-demo") + + key = "example:user:99" + await cache.delete(key) + assert await cache.get(key) is None + + await cache.set(key, {"name": "Bob"}, {"ttl": 1, "tags": ["user"]}) + got = await cache.get(key) + assert isinstance(got, dict) and got.get("name") == "Bob" + print("[async:get_async_cache] set/get ok") + + # TTL expiry check + await asyncio.sleep(2) + assert await cache.get(key) is None + print("[async:get_async_cache] TTL expiry ok") + + # Tag invalidation + await cache.set("post:1", {"title": "Hello"}, {"tags": ["post", "feed"]}) + await cache.set("post:2", {"title": "World"}, {"tags": ["post"]}) + assert await cache.get("post:1") is not None + assert await cache.get("post:2") is not None + await cache.expire_tag("feed") + assert await cache.get("post:1") is None + assert await cache.get("post:2") is not None + print("[async:get_async_cache] tag invalidation ok") + + # Direct class-based async client + client = AsyncRuntimeCache(namespace="async-client") + await client.set("k", 1, {"tags": ["t"]}) + assert await client.get("k") == 1 + await client.expire_tag("t") + assert await client.get("k") is None + print("[async:AsyncRuntimeCache] set/get + tag invalidation ok") + + +if __name__ == "__main__": + asyncio.run(async_demo()) +``` diff --git a/python/vibe-coding-ide/backend/src/agent/agent.py b/python/vibe-coding-ide/backend/src/agent/agent.py new file mode 100644 index 0000000000..c3979b31d8 --- /dev/null +++ b/python/vibe-coding-ide/backend/src/agent/agent.py @@ -0,0 +1,325 @@ +import os +import logging +import traceback +from typing import Any, AsyncGenerator +from agents import Agent, Runner +from dotenv import load_dotenv + +from src.agent.tools import ( + think, + edit_code, + create_file, + delete_file, + rename_file, + create_folder, + delete_folder, + rename_folder, + sandbox_create, + sandbox_stop, + sandbox_run, + sandbox_set_env, + sandbox_show_preview, +) +from src.agent.context import IDEContext +from src.sse import ( + sse_format, + emit_event, + tool_started_sse, + tool_completed_sse, +) +from src.run_store import update_run_project +from src.agent.utils import build_project_input, make_ignore_predicate + + +logger = logging.getLogger("ide_agent.agent") + + +ALLOWED_TURNS = 100 +SLEEP_INTERVAL_SECONDS = 0.05 + + +with open(os.path.join(os.path.dirname(__file__), "_prompt.md"), "r") as f: + instructions = f.read() + +load_dotenv() + + +def create_ide_agent(model: str | None = None, oidc_token: str | None = None) -> Agent: + """Factory to construct the IDE Agent with an optional model override. + + If model is provided, attempt to set it on the Agent. If the underlying + Agent class does not accept a model parameter, gracefully ignore it. + """ + base_kwargs = { + "name": "IDE Agent", + "instructions": instructions, + "tools": [ + think, + # fs ops + edit_code, + create_file, + delete_file, + rename_file, + create_folder, + delete_folder, + rename_folder, + # sandbox controls + sandbox_create, + sandbox_stop, + sandbox_run, + sandbox_set_env, + sandbox_show_preview, + ], + } + os.environ["VERCEL_AI_GATEWAY_API_KEY"] = os.getenv("VERCEL_AI_GATEWAY_API_KEY") or os.getenv("AI_GATEWAY_API_KEY", "") + os.environ["VERCEL_OIDC_TOKEN"] = oidc_token or os.getenv("VERCEL_OIDC_TOKEN", "") + + if model: + try: + formatted_model = f"litellm/vercel_ai_gateway/{model}" + return Agent(**base_kwargs, model=formatted_model) + except TypeError: + return Agent(**base_kwargs) + return Agent(**base_kwargs) + + +async def run_agent_flow( + payload: dict[str, Any], task_id: str, oidc_token: str | None = None +) -> AsyncGenerator[str, None]: + """Run the agent and stream tool progress as SSE chunks.""" + try: + logger.info( + "run[%s] start model=%s project_files=%d history=%d", + task_id, + payload.get("model"), + len(payload.get("project", {})), + len(payload.get("message_history", [])), + ) + except Exception: + pass + + # Filter project using ignore patterns from .agentignore/.gitignore and defaults + original_project = payload.get("project", {}) or {} + is_ignored = make_ignore_predicate(original_project) + filtered_project = { + p: c + for p, c in original_project.items() + if (not is_ignored(p)) or (p in {".gitignore", ".agentignore"}) + } + + base_payload = { + "user_id": payload["user_id"], + "project_id": payload["project_id"], + "query": payload["query"], + "project": filtered_project, + "message_history": payload.get("message_history", []), + "model": payload.get("model"), + } + + history = payload.get("message_history", []) + assistant_only = [ + m["content"] + for m in history + if m.get("role") == "assistant" and m.get("content") + ] + input_text = build_project_input( + payload["query"], filtered_project, history or assistant_only + ) + + context = IDEContext(project=filtered_project, base_payload=base_payload) + # Keep run store in sync with filtered project so resume tokens remain small + try: + import asyncio + + coro = update_run_project(task_id, filtered_project) + if asyncio.get_event_loop().is_running(): + asyncio.create_task(coro) + else: + asyncio.run(coro) + except Exception: + pass + + selected_model = payload.get("model") + agent_instance = create_ide_agent(selected_model, oidc_token) + + run_task = asyncio.create_task( + Runner.run( + agent_instance, + input=input_text, + context=context, + max_turns=ALLOWED_TURNS, + ) + ) + yield sse_format(emit_event(task_id, "run_log", data="Agent run scheduled")) + + last_idx = 0 + result = None + try: + while not run_task.done(): + while last_idx < len(context.events): + ev = context.events[last_idx] + last_idx += 1 + if ev.get("phase") == "started": + yield tool_started_sse(task_id, ev) + elif ev.get("phase") == "completed": + yield tool_completed_sse(task_id, ev, base_payload, context.project) + elif ev.get("phase") == "log": + yield sse_format( + emit_event( + task_id, + "progress_update_tool_action_log", + data={ + "id": ev.get("tool_id"), + "name": ev.get("name"), + "data": ev.get("data"), + }, + ) + ) + await asyncio.sleep(SLEEP_INTERVAL_SECONDS) + + result = await run_task + except Exception as e: + logger.error("run[%s] error: %s", task_id, str(e)) + tb = traceback.format_exc(limit=10) + yield sse_format( + emit_event(task_id, "run_log", data=f"Exception: {str(e)}\n{tb}") + ) + yield sse_format(emit_event(task_id, "run_failed", error=str(e))) + return + + while last_idx < len(context.events): + ev = context.events[last_idx] + last_idx += 1 + if ev.get("phase") == "started": + yield tool_started_sse(task_id, ev) + elif ev.get("phase") == "completed": + yield tool_completed_sse(task_id, ev, base_payload, context.project) + elif ev.get("phase") == "log": + yield sse_format( + emit_event( + task_id, + "progress_update_tool_action_log", + data={ + "id": ev.get("tool_id"), + "name": ev.get("name"), + "data": ev.get("data"), + }, + ) + ) + + if context.defer_requested: + return + + if result and result.final_output: + yield sse_format( + emit_event(task_id, "agent_output", data=str(result.final_output)) + ) + else: + logger.warning("run[%s] completed with no output", task_id) + yield sse_format( + emit_event(task_id, "run_log", data="No final_output produced") + ) + yield sse_format(emit_event(task_id, "run_failed", error="No output produced.")) + + +async def resume_agent_flow( + base: dict[str, Any], task_id: str, exec_result: str, oidc_token: str | None = None +) -> AsyncGenerator[str, None]: + """Resume the agent run after code execution and stream SSE chunks. + + Reconstructs the agent context with the provided execution result, runs the + agent, emits tool events, and finally yields the agent's final output (or an + error event if none was produced). + """ + try: + logger.info( + "resume[%s] model=%s files=%d history=%d", + task_id, + base.get("model"), + len(base.get("project", {})), + len(base.get("message_history", [])), + ) + except Exception: + pass + + # Filter project on resume as well (the resume token may carry previous project) + original_project = base.get("project", {}) or {} + is_ignored = make_ignore_predicate(original_project) + filtered_project = { + p: c + for p, c in original_project.items() + if (not is_ignored(p)) or (p in {".gitignore", ".agentignore"}) + } + + base_payload = { + "user_id": base["user_id"], + "query": base["query"], + "project": filtered_project, + "message_history": base.get("message_history", []), + "model": base.get("model"), + } + + # Truncate very large execution logs to keep prompts under token limits + trimmed_result = exec_result or "" + if len(trimmed_result) > 100_000: + trimmed_result = trimmed_result[-100_000:] + + context = IDEContext( + project=filtered_project, + base_payload=base_payload, + exec_result=trimmed_result, + ) + + history = base.get("message_history", []) + assistant_only = [ + m["content"] + for m in history + if m.get("role") == "assistant" and m.get("content") + ] + input_text = build_project_input( + base["query"], filtered_project, history or assistant_only + ) + + selected_model = base.get("model") + agent_instance = create_ide_agent(selected_model, oidc_token) + + try: + run_result = await Runner.run( + agent_instance, + input=input_text, + context=context, + max_turns=ALLOWED_TURNS, + ) + + for ev in context.events: + if ev.get("phase") == "started": + yield tool_started_sse(task_id, ev) + elif ev.get("phase") == "completed": + yield tool_completed_sse(task_id, ev, base_payload, context.project) + elif ev.get("phase") == "log": + yield sse_format( + emit_event( + task_id, + "progress_update_tool_action_log", + data={ + "id": ev.get("tool_id"), + "name": ev.get("name"), + "data": ev.get("data"), + }, + ) + ) + elif ev.get("phase") == "log": + yield sse_format( + emit_event(task_id, "run_log", data=str(ev.get("data", ""))) + ) + + if run_result.final_output: + yield sse_format( + emit_event(task_id, "agent_output", data=str(run_result.final_output)) + ) + else: + yield sse_format( + emit_event(task_id, "run_failed", error="No output produced.") + ) + except Exception as e: + yield sse_format(emit_event(task_id, "run_failed", error=str(e))) diff --git a/python/vibe-coding-ide/backend/src/agent/context.py b/python/vibe-coding-ide/backend/src/agent/context.py new file mode 100644 index 0000000000..8e9723af9f --- /dev/null +++ b/python/vibe-coding-ide/backend/src/agent/context.py @@ -0,0 +1,34 @@ +from typing import Any +from pydantic import BaseModel, Field + + +class IDEContext(BaseModel): + """State container for an IDE agent run. + + Attributes: + project: Mapping of file paths to file contents. + exec_result: Optional code execution result returned by the UI. + events: Structured tool events accumulated during a run. + defer_requested: True if the agent requested code execution and paused. + base_payload: Original request payload fields used for resume tokens. + """ + + project: dict[str, str] + exec_result: str | None = None + events: list[dict[str, Any]] = Field(default_factory=list) + defer_requested: bool = False + base_payload: dict[str, Any] = Field(default_factory=dict) + + # Multi-sandbox support + # Active sandbox name used when a name is not explicitly provided + active_sandbox: str | None = None + # Map a sandbox name to its sandbox_id + sandbox_name_to_id: dict[str, str] = Field(default_factory=dict) + # Per-sandbox runtime and ports preferences + sandbox_runtime_map: dict[str, str] = Field(default_factory=dict) + sandbox_ports_map: dict[str, list[int]] = Field(default_factory=dict) + # Per-sandbox environment variables + sandbox_envs: dict[str, dict[str, str]] = Field(default_factory=dict) + # Per-sandbox filesystem snapshots + sandbox_files_map: dict[str, list[str]] = Field(default_factory=dict) + sandbox_file_meta_map: dict[str, dict[str, str]] = Field(default_factory=dict) diff --git a/python/vibe-coding-ide/backend/src/agent/tools/__init__.py b/python/vibe-coding-ide/backend/src/agent/tools/__init__.py new file mode 100644 index 0000000000..9abd16e387 --- /dev/null +++ b/python/vibe-coding-ide/backend/src/agent/tools/__init__.py @@ -0,0 +1,35 @@ +from .core import think, edit_code +from .fs_ops import ( + create_file, + delete_file, + rename_file, + create_folder, + delete_folder, + rename_folder, +) +from .sandbox import ( + sandbox_create, + sandbox_stop, + sandbox_run, + sandbox_set_env, + sandbox_show_preview, +) + +__all__ = [ + # core + "think", + "edit_code", + # fs ops + "create_file", + "delete_file", + "rename_file", + "create_folder", + "delete_folder", + "rename_folder", + # sandbox + "sandbox_create", + "sandbox_stop", + "sandbox_run", + "sandbox_set_env", + "sandbox_show_preview", +] diff --git a/python/vibe-coding-ide/backend/src/agent/tools/core.py b/python/vibe-coding-ide/backend/src/agent/tools/core.py new file mode 100644 index 0000000000..5a769e0370 --- /dev/null +++ b/python/vibe-coding-ide/backend/src/agent/tools/core.py @@ -0,0 +1,144 @@ +import json +from typing import Any + +from agents import function_tool, RunContextWrapper +from src.agent.context import IDEContext +from src.sandbox.utils import autosync_after_fs_change + + +@function_tool +async def think(ctx: RunContextWrapper[IDEContext], thoughts: str) -> str: + """Record a concise plan for the current task. + + Use this before non-trivial changes to outline intent (1-3 sentences). + Keep it brief and high-signal; do not include secrets, urls, or sensitive data. + + Args: + thoughts: Short plan or reasoning to log. + Returns: + The recorded plan text. + """ + tool_id = f"tc_{len(ctx.context.events) + 1}" + ctx.context.events.append( + { + "phase": "started", + "tool_id": tool_id, + "name": "think", + "arguments": {"thoughts": thoughts}, + } + ) + ctx.context.events.append( + { + "phase": "completed", + "tool_id": tool_id, + "name": "think", + "output_data": thoughts, + } + ) + return thoughts + + +def _perform_edit_code(file_content: str, args: dict[str, Any]) -> dict[str, Any]: + lines = file_content.split("\n") + start_idx = int(args["find_start_line"]) - 1 + end_idx = int(args["find_end_line"]) - 1 + if start_idx < 0 or end_idx >= len(lines) or start_idx > end_idx: + return { + "error": "Line numbers out of range or invalid", + "total_lines": len(lines), + } + existing_text = "\n".join(lines[start_idx : end_idx + 1]) + if str(args["find"]) not in existing_text: + return { + "error": "Find text not found at specified lines", + "existing_text": existing_text, + } + new_text = existing_text.replace(str(args["find"]), str(args["replace"])) + new_lines = lines[:start_idx] + new_text.split("\n") + lines[end_idx + 1 :] + new_code = "\n".join(new_lines) + return { + "find": str(args["find"]), + "find_start_line": int(args["find_start_line"]), + "find_end_line": int(args["find_end_line"]), + "replace": str(args["replace"]), + "old_text": existing_text, + "new_text": new_text, + "new_code": new_code, + } + + +@function_tool +async def edit_code( + ctx: RunContextWrapper[IDEContext], + file_path: str, + find: str, + find_start_line: int, + find_end_line: int, + replace: str, +) -> str: + """Make a precise, in-place change within a file. + + Behavior: + - Operates only on lines [find_start_line, find_end_line] (1-based, inclusive). + - 'find' must appear within that range; only that matched text is replaced. + - 'replace' is the full new text for the matched portion; no line numbers. + - Content outside the selected range is preserved exactly. + + Guidelines: + - Choose the smallest line range that brackets the intended change. + - For multiple non-adjacent edits, call this tool multiple times. + - Preserve formatting, imports, and surrounding structure. + + Args: + file_path: Project-relative file path. + find: Exact text to replace within the specified range. + find_start_line: Start line (1-based, inclusive). + find_end_line: End line (1-based, inclusive). + replace: Replacement text (no line numbers). + Returns: + JSON string describing the edit or an error. + """ + tool_id = f"tc_{len(ctx.context.events) + 1}" + args = { + "file_path": file_path, + "find": find, + "find_start_line": find_start_line, + "find_end_line": find_end_line, + "replace": replace, + } + ctx.context.events.append( + { + "phase": "started", + "tool_id": tool_id, + "name": "edit_code", + "arguments": args, + } + ) + if file_path not in ctx.context.project: + output = {"error": f"File not found: {file_path}"} + else: + output = _perform_edit_code(ctx.context.project[file_path], args) + if "new_code" in output: + ctx.context.project[file_path] = output["new_code"] + # enrich output with file info for the UI + output = { + **output, + "file_path": file_path, + "new_file_content": output["new_code"], + } + # best-effort autosync to running sandboxes + try: + await autosync_after_fs_change( + ctx, created_or_updated=[file_path] + ) + except Exception: + pass + ctx.context.events.append( + { + "phase": "completed", + "tool_id": tool_id, + "name": "edit_code", + "output_data": output, + } + ) + return json.dumps(output) diff --git a/python/vibe-coding-ide/backend/src/agent/tools/fs_ops.py b/python/vibe-coding-ide/backend/src/agent/tools/fs_ops.py new file mode 100644 index 0000000000..e8ccfca76e --- /dev/null +++ b/python/vibe-coding-ide/backend/src/agent/tools/fs_ops.py @@ -0,0 +1,343 @@ +import json + +from agents import function_tool, RunContextWrapper +from src.agent.context import IDEContext +from src.sandbox.utils import autosync_after_fs_change + + +@function_tool +async def create_file( + ctx: RunContextWrapper[IDEContext], file_path: str, content: str +) -> str: + """Create a new file with the provided content (for new features or rebuilds). + + Guidelines: + - Provide the full file contents. Create siblings/modules as needed. + - Prefer small, focused files in idiomatic locations. + - Does not overwrite an existing file; returns an error instead. Use rename_* to archive or move old files first. + + Args: + file_path: Project-relative path for the new file. + content: Full content of the file. + Returns: + JSON string describing the creation or an error. + """ + tool_id = f"tc_{len(ctx.context.events) + 1}" + args = {"file_path": file_path, "content": content} + ctx.context.events.append( + { + "phase": "started", + "tool_id": tool_id, + "name": "create_file", + "arguments": args, + } + ) + + if file_path in ctx.context.project: + output = {"error": f"File already exists: {file_path}", "file_path": file_path} + else: + ctx.context.project[file_path] = str(content) + # best-effort autosync to running sandboxes + output = { + "file_path": file_path, + "new_file_content": str(content), + "created": True, + } + try: + await autosync_after_fs_change(ctx, created_or_updated=[file_path]) + except Exception: + pass + + ctx.context.events.append( + { + "phase": "completed", + "tool_id": tool_id, + "name": "create_file", + "output_data": output, + } + ) + return json.dumps(output) + + +@function_tool +async def delete_file(ctx: RunContextWrapper[IDEContext], file_path: str) -> str: + """Delete an existing file (use sparingly; archive first when possible). + + Use with caution. Prefer edits or renames when appropriate. For rebuilds, consider moving old code into a `legacy/` path instead of deleting unless the user insists on removal. + + Args: + file_path: Path of the file to remove. + Returns: + JSON string indicating deletion or an error. + """ + tool_id = f"tc_{len(ctx.context.events) + 1}" + args = {"file_path": file_path} + ctx.context.events.append( + { + "phase": "started", + "tool_id": tool_id, + "name": "delete_file", + "arguments": args, + } + ) + + if file_path not in ctx.context.project: + output = {"error": f"File not found: {file_path}", "file_path": file_path} + else: + # delete the file + del ctx.context.project[file_path] + # best-effort autosync to running sandboxes + output = {"file_path": file_path, "deleted": True} + try: + await autosync_after_fs_change(ctx, deleted_files=[file_path]) + except Exception: + pass + + ctx.context.events.append( + { + "phase": "completed", + "tool_id": tool_id, + "name": "delete_file", + "output_data": output, + } + ) + return json.dumps(output) + + +@function_tool +async def rename_file( + ctx: RunContextWrapper[IDEContext], old_path: str, new_path: str +) -> str: + """Rename or move a file. + + Behavior: + - Moves content from old_path to new_path; may overwrite if destination exists. + - Does not automatically update imports/references; follow up with edit_code(). + + Args: + old_path: Current file path. + new_path: Destination path. + Returns: + JSON string describing the rename or an error. + """ + tool_id = f"tc_{len(ctx.context.events) + 1}" + args = {"old_path": old_path, "new_path": new_path} + ctx.context.events.append( + { + "phase": "started", + "tool_id": tool_id, + "name": "rename_file", + "arguments": args, + } + ) + + if old_path not in ctx.context.project: + output = { + "error": f"File not found: {old_path}", + "old_path": old_path, + "new_path": new_path, + } + else: + content = ctx.context.project[old_path] + overwritten = new_path in ctx.context.project + if overwritten: + # Overwrite destination + ctx.context.project[new_path] = content + else: + ctx.context.project[new_path] = content + del ctx.context.project[old_path] + # best-effort autosync to running sandboxes + output = { + "old_path": old_path, + "new_path": new_path, + "renamed": True, + **({"overwritten": True} if overwritten else {}), + } + try: + await autosync_after_fs_change( + ctx, created_or_updated=[new_path], deleted_files=[old_path] + ) + except Exception: + pass + + ctx.context.events.append( + { + "phase": "completed", + "tool_id": tool_id, + "name": "rename_file", + "output_data": output, + } + ) + return json.dumps(output) + + +@function_tool +async def create_folder(ctx: RunContextWrapper[IDEContext], folder_path: str) -> str: + """Declare a folder in the virtual project (no files created). + + This is a UI-level structure; it does not write files. Fails if a file with + the same path exists. + + Args: + folder_path: Folder path to declare. + Returns: + JSON string indicating creation or an error. + """ + tool_id = f"tc_{len(ctx.context.events) + 1}" + args = {"folder_path": folder_path} + ctx.context.events.append( + { + "phase": "started", + "tool_id": tool_id, + "name": "create_folder", + "arguments": args, + } + ) + + # Folders are not tracked in project mapping; just emit event for UI + # But validate that it does not conflict with existing file + conflict = folder_path in ctx.context.project + if conflict: + output = { + "error": f"Conflicts with existing file: {folder_path}", + "folder_path": folder_path, + } + else: + output = {"folder_path": folder_path, "created": True} + + ctx.context.events.append( + { + "phase": "completed", + "tool_id": tool_id, + "name": "create_folder", + "output_data": output, + } + ) + return json.dumps(output) + + +@function_tool +async def delete_folder(ctx: RunContextWrapper[IDEContext], folder_path: str) -> str: + """Delete a folder and all files beneath it in the project mapping (for large cleanups only). + + Use with caution; this removes every file under the path. Prefer rename_folder to archive first when possible. + + Args: + folder_path: Folder path to remove. + Returns: + JSON string including count of removed files. + """ + tool_id = f"tc_{len(ctx.context.events) + 1}" + args = {"folder_path": folder_path} + ctx.context.events.append( + { + "phase": "started", + "tool_id": tool_id, + "name": "delete_folder", + "arguments": args, + } + ) + + normalized = folder_path.rstrip("/") + removed = 0 + removed_paths: list[str] = [] + remaining: dict[str, str] = {} + for path, content in ctx.context.project.items(): + if path == normalized or path.startswith(normalized + "/"): + removed += 1 + removed_paths.append(path) + continue + remaining[path] = content + ctx.context.project = remaining + + # best-effort autosync to running sandboxes + output = {"folder_path": folder_path, "deleted": True, "removed_files": removed} + try: + await autosync_after_fs_change( + ctx, deleted_files=removed_paths, deleted_dirs=[normalized] + ) + except Exception: + pass + + ctx.context.events.append( + { + "phase": "completed", + "tool_id": tool_id, + "name": "delete_folder", + "output_data": output, + } + ) + return json.dumps(output) + + +@function_tool +async def rename_folder( + ctx: RunContextWrapper[IDEContext], old_path: str, new_path: str +) -> str: + """Rename or move a folder and all contained files. + + Behavior: + - Rewrites affected file paths by replacing prefix old_path with new_path. + - Does not update imports or references; follow up with edit_code() as needed. + + Args: + old_path: Existing folder path. + new_path: New folder path. + Returns: + JSON string describing the rename. + """ + tool_id = f"tc_{len(ctx.context.events) + 1}" + args = {"old_path": old_path, "new_path": new_path} + ctx.context.events.append( + { + "phase": "started", + "tool_id": tool_id, + "name": "rename_folder", + "arguments": args, + } + ) + + old_norm = old_path.rstrip("/") + new_norm = new_path.rstrip("/") + moved = 0 + next_project: dict[str, str] = {} + deleted_paths: list[str] = [] + created_paths: list[str] = [] + for path, content in ctx.context.project.items(): + if path == old_norm or path.startswith(old_norm + "/"): + suffix = path[len(old_norm) :] + new_file_path = (new_norm + suffix).lstrip("/") + next_project[new_file_path] = content + moved += 1 + deleted_paths.append(path) + created_paths.append(new_file_path) + else: + next_project[path] = content + ctx.context.project = next_project + + # best-effort autosync to running sandboxes + output = { + "old_path": old_path, + "new_path": new_path, + "renamed": True, + "moved_files": moved, + } + try: + await autosync_after_fs_change( + ctx, + created_or_updated=created_paths, + deleted_files=deleted_paths, + deleted_dirs=[old_norm], + ) + except Exception: + pass + + ctx.context.events.append( + { + "phase": "completed", + "tool_id": tool_id, + "name": "rename_folder", + "output_data": output, + } + ) + return json.dumps(output) diff --git a/python/vibe-coding-ide/backend/src/agent/tools/sandbox.py b/python/vibe-coding-ide/backend/src/agent/tools/sandbox.py new file mode 100644 index 0000000000..017ec16fdb --- /dev/null +++ b/python/vibe-coding-ide/backend/src/agent/tools/sandbox.py @@ -0,0 +1,774 @@ +import json +import asyncio +import time + +import httpx +from vercel.sandbox import AsyncSandbox as Sandbox +from agents import function_tool, RunContextWrapper + +from src.agent.context import IDEContext +from src.sandbox.runtimes import ( + create_synthetic_ruby_runtime, + create_synthetic_go_runtime, +) +from src.sandbox.utils import ( + normalize_sandbox_name, + get_sandbox_by_name, + sync_project_files, + snapshot_files_into_context, + parse_env_list, + snapshot_file_changes, +) +from src.run_store import ( + upsert_user_sandbox, + remove_user_sandbox, + upsert_user_project_sandbox, + remove_user_project_sandbox, +) +from src.sandbox.command import ( + select_safe_cwd, + detect_language_usage, + select_rails_app_cwd, + ensure_python_tooling, + maybe_wrap_with_bundler, + infer_ready_patterns_and_port, + adjust_rails_server_command, + ensure_go_tooling, + ensure_ruby_tooling, +) + + +@function_tool +async def sandbox_create( + ctx: RunContextWrapper[IDEContext], + runtime: str | None = None, + ports: list[int] | None = None, + timeout_ms: int | None = 600_000, + name: str | None = None, +) -> str: + """Create a persistent sandbox and remember it for this run. + + Args: + runtime: Optional runtime, e.g. "node22", "python3.13". + ports: Optional list of ports to expose (for previews). + timeout_ms: Sandbox lifetime timeout in milliseconds. + Returns: + JSON with sandbox details. + """ + tool_id = f"tc_{len(ctx.context.events) + 1}" + args = {"runtime": runtime, "ports": ports, "timeout_ms": timeout_ms, "name": name} + ctx.context.events.append( + { + "phase": "started", + "tool_id": tool_id, + "name": "sandbox_create", + "arguments": args, + } + ) + + sb_name = normalize_sandbox_name(ctx, name) + + # Synthetic runtimes: if a Ruby or Go runtime is requested, create on a Node runtime and bootstrap + requested_runtime = runtime + is_synthetic_ruby = bool( + requested_runtime and str(requested_runtime).lower().startswith("ruby") + ) + is_synthetic_go = bool( + requested_runtime and str(requested_runtime).lower().startswith("go") + ) + effective_runtime = requested_runtime + if is_synthetic_ruby or is_synthetic_go: + # Default to node22 as the base image for bootstrapping + effective_runtime = "node22" + + sandbox = await Sandbox.create( + timeout=timeout_ms or 600_000, + runtime=effective_runtime, + ports=ports, + ) + # Map and set active sandbox + ctx.context.sandbox_name_to_id[sb_name] = sandbox.sandbox_id + ctx.context.active_sandbox = sb_name + # Persist preferences per-sandbox + if requested_runtime or effective_runtime: + ctx.context.sandbox_runtime_map[sb_name] = ( + requested_runtime or effective_runtime + ) # type: ignore[arg-type] + if ports is not None: + ctx.context.sandbox_ports_map[sb_name] = ports + + # Sync project files into sandbox cwd + synced = 0 + try: + synced = await sync_project_files(ctx, sandbox) + await snapshot_files_into_context(ctx, sandbox, sb_name) + ctx.context.events.append( + { + "phase": "log", + "tool_id": tool_id, + "name": "sandbox_create", + "data": f"Synced {synced} project files to sandbox.\n", + } + ) + except Exception as e: + ctx.context.events.append( + { + "phase": "log", + "tool_id": tool_id, + "name": "sandbox_create", + "data": f"Project sync error: {str(e)}\n", + } + ) + + # If synthetic Ruby runtime requested, bootstrap Ruby and Bundler now + if is_synthetic_ruby: + try: + await create_synthetic_ruby_runtime(ctx, sandbox, sb_name, tool_id) + except Exception as e: + ctx.context.events.append( + { + "phase": "log", + "tool_id": tool_id, + "name": "sandbox_create", + "data": f"Ruby bootstrap error: {str(e)}\n", + } + ) + + # If synthetic Go runtime requested, install Go toolchain now + if is_synthetic_go: + try: + await create_synthetic_go_runtime(ctx, sandbox, sb_name, tool_id) + except Exception as e: + ctx.context.events.append( + { + "phase": "log", + "tool_id": tool_id, + "name": "sandbox_create", + "data": f"Go bootstrap error: {str(e)}\n", + } + ) + + output = { + "sandbox_id": sandbox.sandbox_id, + "status": getattr(sandbox, "status", None), + "runtime": requested_runtime or effective_runtime, + "ports": ports, + "synced_files": synced, + "name": sb_name, + **( + {"synthetic_runtime": True, "effective_runtime": effective_runtime} + if (is_synthetic_ruby or is_synthetic_go) + else {} + ), + } + ctx.context.events.append( + { + "phase": "completed", + "tool_id": tool_id, + "name": "sandbox_create", + "output_data": output, + } + ) + # Persist mapping per-user for cross-run autosync + try: + user_id = (ctx.context.base_payload or {}).get("user_id") or "" + project_id = (ctx.context.base_payload or {}).get("project_id") or "" + if user_id: + import asyncio as _asyncio + + tasks = [upsert_user_sandbox(user_id, sb_name, sandbox.sandbox_id)] + if project_id: + tasks.append(upsert_user_project_sandbox(user_id, project_id, sb_name, sandbox.sandbox_id)) + async def _run_all(): + for c in tasks: + try: + await c + except Exception: + pass + try: + loop = _asyncio.get_event_loop() + if loop.is_running(): + loop.create_task(_run_all()) + else: + loop.run_until_complete(_run_all()) # unlikely path + except RuntimeError: + _asyncio.run(_run_all()) + except Exception: + pass + return json.dumps(output) + + +@function_tool +async def sandbox_stop( + ctx: RunContextWrapper[IDEContext], name: str | None = None +) -> str: + """Stop and release the specified sandbox (or active/default if none provided).""" + tool_id = f"tc_{len(ctx.context.events) + 1}" + args = {"name": name} + ctx.context.events.append( + { + "phase": "started", + "tool_id": tool_id, + "name": "sandbox_stop", + "arguments": args, + } + ) + sb_name = normalize_sandbox_name(ctx, name) + sid = (ctx.context.sandbox_name_to_id or {}).get(sb_name) + if not sid: + output = {"stopped": False, "error": "no sandbox"} + else: + try: + sandbox = await Sandbox.get(sandbox_id=sid) + await sandbox.stop() + try: + await Sandbox.get(sandbox_id=sid) # best-effort refresh + except Exception: + pass + # Clear mappings + if sb_name in (ctx.context.sandbox_name_to_id or {}): + ctx.context.sandbox_name_to_id.pop(sb_name, None) + if ctx.context.active_sandbox == sb_name: + ctx.context.active_sandbox = None + output = {"stopped": True} + except Exception as e: + output = {"stopped": False, "error": str(e)} + ctx.context.events.append( + { + "phase": "completed", + "tool_id": tool_id, + "name": "sandbox_stop", + "output_data": output, + } + ) + # Remove mapping from per-user store + try: + user_id = (ctx.context.base_payload or {}).get("user_id") or "" + project_id = (ctx.context.base_payload or {}).get("project_id") or "" + if user_id and sb_name: + import asyncio as _asyncio + + tasks = [remove_user_sandbox(user_id, sb_name)] + if project_id: + tasks.append(remove_user_project_sandbox(user_id, project_id, sb_name)) + async def _run_all(): + for c in tasks: + try: + await c + except Exception: + pass + try: + loop = _asyncio.get_event_loop() + if loop.is_running(): + loop.create_task(_run_all()) + else: + loop.run_until_complete(_run_all()) + except RuntimeError: + _asyncio.run(_run_all()) + except Exception: + pass + return json.dumps(output) + + +@function_tool +async def sandbox_run( + ctx: RunContextWrapper[IDEContext], + command: str, + cwd: str | None = None, + env: list[str] | None = None, + detached: bool = False, + ready_patterns: list[str] | None = None, + port: int | None = None, + wait_timeout_ms: int | None = 30_000, + stream_logs: bool = True, + name: str | None = None, + auto_python_ensure: bool = True, + auto_ready_patterns: bool = True, + auto_ruby_ensure: bool = True, + auto_go_ensure: bool = True, +) -> str: + """Run a shell command in the active sandbox, optionally streaming logs and detecting readiness. + + Args: + command: Shell command to run. + cwd: Working directory inside sandbox; defaults to sandbox cwd. + env: Extra environment variables. + detached: If true, do not wait for process exit. + ready_patterns: If provided, return after any pattern appears in logs. + port: If provided, compute preview URL when ready (sandbox.domain(port)). + wait_timeout_ms: Max time to wait for readiness when detached. + stream_logs: If true, stream logs into the run timeline. + name: Optional label for the process. + auto_python_ensure: Auto-ensure Python tooling when command indicates Python usage. + auto_ready_patterns: Auto-detect common readiness messages for certain servers. + auto_ruby_ensure: Auto-ensure Ruby/Bundler when command indicates Ruby usage. + Returns: + JSON with status, exit_code (if attached), and preview_url if detected. + """ + tool_id = f"tc_{len(ctx.context.events) + 1}" + + sb_name = normalize_sandbox_name(ctx, name) + sandbox = await get_sandbox_by_name(ctx, sb_name) + # Resolve cwd safely: default to sandbox cwd; allow only subdirs under it + requested_cwd = cwd + base_cwd = sandbox.sandbox.cwd + safe_cwd = select_safe_cwd(requested_cwd, base_cwd) + + uses_python, uses_ruby, uses_go = detect_language_usage(command) + + # Heuristic: auto-select Rails app root as cwd when running Rails/Bundler commands without an explicit cwd + if uses_ruby and (requested_cwd is None or str(requested_cwd).strip() == ""): + safe_cwd = select_rails_app_cwd(ctx, sb_name, base_cwd, safe_cwd, command) + + args = { + "command": command, + "cwd": safe_cwd, + "requested_cwd": requested_cwd, + "env": env, + "detached": detached, + "ready_patterns": ready_patterns, + "port": port, + "wait_timeout_ms": wait_timeout_ms, + "stream_logs": stream_logs, + "name": sb_name, + } + ctx.context.events.append( + { + "phase": "started", + "tool_id": tool_id, + "name": "sandbox_run", + "arguments": args, + } + ) + + # Ensure the sandbox has the latest project files before executing + try: + synced_count = await sync_project_files(ctx, sandbox) + await snapshot_files_into_context(ctx, sandbox, sb_name) + if stream_logs: + ctx.context.events.append( + { + "phase": "log", + "tool_id": tool_id, + "name": "sandbox_run", + "data": f"Synced {synced_count} project files to sandbox before run.\n", + } + ) + except Exception as e: + if stream_logs: + ctx.context.events.append( + { + "phase": "log", + "tool_id": tool_id, + "name": "sandbox_run", + "data": f"Pre-run sync failed: {str(e)}\n", + } + ) + + # Parse list-form env (e.g., ["KEY=VALUE"]) into a dict and merge with defaults + per_env = (ctx.context.sandbox_envs or {}).get(sb_name, {}) + full_env = {**per_env, **(parse_env_list(env) if env else {})} + cd_prefix = f"cd {safe_cwd} && " + + # Auto-attach for scaffolding/one-shot install commands when no readiness criteria are provided + # This ensures filesystem snapshots include newly generated files (e.g., from 'rails new') + try: + cl = (command or "").strip().lower() + is_scaffold_or_install = ( + cl.startswith("rails new") + or " rails new " in cl + or cl.startswith("rails generate") + or cl.startswith("rails g ") + or " rails generate " in cl + or " rails g " in cl + or cl.startswith("bundle install") + or " bundle install " in cl + ) + if ( + detached + and not ready_patterns + and (port is None) + and is_scaffold_or_install + ): + detached = False + except Exception: + pass + + if auto_python_ensure and uses_python: + await ensure_python_tooling( + ctx, sandbox, safe_cwd, full_env or None, stream_logs, tool_id + ) + + if auto_ruby_ensure and uses_ruby: + await ensure_ruby_tooling( + ctx, sandbox, safe_cwd, full_env or None, stream_logs, tool_id, sb_name + ) + + # When running Ruby apps directly, optionally wrap with Bundler + command = maybe_wrap_with_bundler(command, uses_ruby) + + # Heuristics: if this looks like a Go task, ensure Go toolchain is present + if auto_go_ensure and uses_go: + await ensure_go_tooling( + ctx, sandbox, safe_cwd, full_env or None, stream_logs, tool_id + ) + + # Infer readiness patterns and port for common servers + ready_patterns, port = infer_ready_patterns_and_port( + command, ready_patterns, port, auto_ready_patterns + ) + + # Ensure Rails server config + command = adjust_rails_server_command(ctx, sandbox, command, port) + + cmd = await sandbox.run_command_detached( + "bash", + ["-lc", f"{cd_prefix}{command}"], + env=full_env or None, + ) + + preview_url: str | None = None + # We will collect logs until readiness, timeout, or process exit + collected_logs: list[str] = [] + ready: bool = False + timed_out: bool = False + exited_early: bool = False + exit_code: int | None = None + should_wait = bool( + (ready_patterns and len(ready_patterns) > 0) or (port is not None) + ) + ready_deadline = ( + (time.time() + (wait_timeout_ms or 0) / 1000.0) if should_wait else None + ) + + stop_event = asyncio.Event() + + async def _stream_logs() -> None: + nonlocal preview_url, ready + try: + async for line in cmd.logs(): + data = line.data or "" + # Append to UI timeline if requested + if stream_logs: + ctx.context.events.append( + { + "phase": "log", + "tool_id": tool_id, + "name": "sandbox_run", + "data": data, + } + ) + # Always collect for LLM summary + collected_logs.append(data) + # Detect readiness + if ready_patterns: + for pat in ready_patterns: + if pat and (pat in data): + ready = True + if port and not preview_url: + try: + url = sandbox.domain(port) + except Exception: + url = None + if url: + preview_url = url + ctx.context.events.append( + { + "phase": "log", + "tool_id": tool_id, + "name": "sandbox_run", + "data": f"[{sb_name}] Preview available at: {url}\n", + } + ) + stop_event.set() + return + # Stop if timeout/exit already signaled + if stop_event.is_set(): + return + except Exception: + # Ignore streaming errors but ensure we don't block forever + stop_event.set() + return + + async def _wait_for_exit() -> None: + nonlocal exit_code, exited_early + try: + done = await cmd.wait() + exit_code = getattr(done, "exit_code", None) + exited_early = True + except Exception: + pass + finally: + stop_event.set() + + async def _timer() -> None: + nonlocal timed_out + if ready_deadline is None: + return + try: + now = time.time() + remaining = max(0.0, ready_deadline - now) + await asyncio.sleep(remaining) + if not stop_event.is_set(): + timed_out = True + stop_event.set() + except Exception: + # best-effort timeout + if not stop_event.is_set(): + timed_out = True + stop_event.set() + + # After command start/finish, optionally compute a filesystem snapshot for auto-resync + if detached: + if should_wait: + tasks: list[asyncio.Task] = [ + asyncio.create_task(_stream_logs()), + asyncio.create_task(_wait_for_exit()), + ] + # Only start the timer when readiness/port provided (long-running service) + if ready_deadline is not None: + tasks.append(asyncio.create_task(_timer())) + await stop_event.wait() + # Cancel any remaining tasks + for t in tasks: + if not t.done(): + t.cancel() + output = {"started": True} + if preview_url: + output["preview_url"] = preview_url + output.update( + { + "ready": ready, + "timed_out": timed_out, + "exited_early": exited_early, + **({"exit_code": exit_code} if exit_code is not None else {}), + } + ) + else: + # No readiness criteria given; don't block. Return immediately as started. + output = {"started": True} + output["fs"] = await snapshot_file_changes(ctx, sandbox, sb_name) + else: + # attached: stream logs until process exits + tasks_attached: list[asyncio.Task] = [ + asyncio.create_task(_stream_logs()), + asyncio.create_task(_wait_for_exit()), + ] + await stop_event.wait() + for t in tasks_attached: + if not t.done(): + t.cancel() + output = { + **({"preview_url": preview_url} if preview_url else {}), + "ready": ready, + "timed_out": timed_out, + "exited_early": exited_early, + **({"exit_code": exit_code} if exit_code is not None else {}), + } + output["fs"] = await snapshot_file_changes(ctx, sandbox, sb_name) + ctx.context.events.append( + { + "phase": "completed", + "tool_id": tool_id, + "name": "sandbox_run", + "output_data": output, + } + ) + # Prepare a summary string for the LLM that includes a trimmed log transcript + try: + # Build log snippet (last N characters to avoid overflow) + logs_text = "".join(collected_logs) + MAX_CHARS = 16000 + trimmed = False + if len(logs_text) > MAX_CHARS: + logs_text = logs_text[-MAX_CHARS:] + trimmed = True + + status = ( + "ready" + if ready + else ( + "timed_out" if timed_out else ("exited" if exited_early else "started") + ) + ) + + fs = output.get("fs") or {} + created = fs.get("created") or [] + updated = fs.get("updated") or [] + deleted = fs.get("deleted") or [] + files_total = len(fs.get("files") or []) + + parts = [ + f"sandbox_run completed (name={sb_name})", + f"status={status}", + *( + [f"preview_url={output.get('preview_url')}"] + if output.get("preview_url") + else [] + ), + *( + [f"exit_code={output.get('exit_code')}"] + if output.get("exit_code") is not None + else [] + ), + f"fs: files_total={files_total} created={len(created)} updated={len(updated)} deleted={len(deleted)}", + ( + "logs (trimmed to last " + str(MAX_CHARS) + " chars):" + if trimmed + else "logs:" + ), + logs_text, + ] + summary = "\n".join(parts) + except Exception: + summary = "sandbox_run completed" + return summary + + +# Simple helper for the agent to emit a preview URL for the running sandbox +@function_tool +async def sandbox_show_preview( + ctx: RunContextWrapper[IDEContext], + url: str, + port: int | None = None, + label: str | None = None, + name: str | None = None, +) -> str: + """Emit a preview URL for the active sandbox so the UI can render it. + Automatically makes a curl request to verify the URL is accessible. + Make sure to preview a route that contains a real endpoint. For example, + if you are previewing a backend that doesn not have anything at the root but + it has an endpoint at /api/hello, you should preview /api/hello. + + Args: + url: The full preview URL. + port: Optional port used by the service. + label: Optional descriptive label (e.g., 'frontend', 'backend'). + Returns: + JSON with preview info and curl response details. + """ + tool_id = f"tc_{len(ctx.context.events) + 1}" + sb_name = normalize_sandbox_name(ctx, name) + args = {"url": url, "port": port, "label": label, "name": sb_name} + ctx.context.events.append( + { + "phase": "started", + "tool_id": tool_id, + "name": "sandbox_show_preview", + "arguments": args, + } + ) + + # Make HTTP request to verify the preview URL + curl_result = {} + try: + async with httpx.AsyncClient(timeout=10.0, follow_redirects=True) as client: + response = await client.get(url) + curl_result = { + "status_code": response.status_code, + "status": "success" if 200 <= response.status_code < 300 else "error", + "headers": dict(response.headers), + "content": response.text[:5000] if response.text else None, # Limit content to 5000 chars + "content_type": response.headers.get("content-type", ""), + } + + # Log the curl result to the events + ctx.context.events.append( + { + "phase": "log", + "tool_id": tool_id, + "name": "sandbox_show_preview", + "data": f"[{sb_name}] Preview health check: HTTP {response.status_code} for {url}\n", + } + ) + except httpx.TimeoutException: + curl_result = { + "status": "timeout", + "error": "Request timed out after 10 seconds" + } + ctx.context.events.append( + { + "phase": "log", + "tool_id": tool_id, + "name": "sandbox_show_preview", + "data": f"[{sb_name}] Preview health check: TIMEOUT for {url}\n", + } + ) + except httpx.ConnectError as e: + curl_result = { + "status": "connection_error", + "error": f"Connection failed: {str(e)}" + } + ctx.context.events.append( + { + "phase": "log", + "tool_id": tool_id, + "name": "sandbox_show_preview", + "data": f"[{sb_name}] Preview health check: CONNECTION ERROR for {url}\n", + } + ) + except Exception as e: + curl_result = { + "status": "error", + "error": f"Unexpected error: {str(e)}" + } + ctx.context.events.append( + { + "phase": "log", + "tool_id": tool_id, + "name": "sandbox_show_preview", + "data": f"[{sb_name}] Preview health check: ERROR - {str(e)}\n", + } + ) + + output = { + "url": url, + **({"port": port} if port else {}), + **({"label": label} if label else {}), + "name": sb_name, + "curl_result": curl_result, + } + ctx.context.events.append( + { + "phase": "completed", + "tool_id": tool_id, + "name": "sandbox_show_preview", + "output_data": output, + } + ) + return json.dumps(output) + + +@function_tool +async def sandbox_set_env( + ctx: RunContextWrapper[IDEContext], env: list[str], name: str | None = None +) -> str: + """Set default environment variables for subsequent sandbox_run commands for a named sandbox (or active/default).""" + tool_id = f"tc_{len(ctx.context.events) + 1}" + sb_name = normalize_sandbox_name(ctx, name) + args = {"env": env, "name": sb_name} + ctx.context.events.append( + { + "phase": "started", + "tool_id": tool_id, + "name": "sandbox_set_env", + "arguments": args, + } + ) + parsed = parse_env_list(env) + # Per-sandbox env only + per_env = dict(ctx.context.sandbox_envs.get(sb_name, {})) + for k, v in parsed.items(): + if k not in per_env: + per_env[k] = v + ctx.context.sandbox_envs[sb_name] = per_env + output = {"ok": True, "env_keys": list(parsed.keys()), "name": sb_name} + ctx.context.events.append( + { + "phase": "completed", + "tool_id": tool_id, + "name": "sandbox_set_env", + "output_data": output, + } + ) + return json.dumps(output) diff --git a/python/vibe-coding-ide/backend/src/agent/utils.py b/python/vibe-coding-ide/backend/src/agent/utils.py new file mode 100644 index 0000000000..0973c5eef7 --- /dev/null +++ b/python/vibe-coding-ide/backend/src/agent/utils.py @@ -0,0 +1,243 @@ +import os +from typing import Any + + +def build_project_input( + query: str, + project: dict[str, str], + prior_assistant_messages: list[Any] | None = None, +) -> str: + """Render a multi-file project into a single prompt-friendly string. + + The format lists all file paths first, then prints each file's contents with line numbers. + """ + prior_block = "" + if prior_assistant_messages: + # Accept either list[str] (legacy assistant-only) or list[dict{role,content}] (full dialogue) + if isinstance( + prior_assistant_messages[0] if len(prior_assistant_messages) > 0 else None, + dict, + ): + lines: list[str] = [] + for m in prior_assistant_messages: # type: ignore[assignment] + role = str(m.get("role", "")) + content = str(m.get("content", "")) + if not content: + continue + lines.append(f"- {role}: {content}") + if lines: + prior_block = ( + "\n---\nPrevious conversation (for context):\n" + + "\n".join(lines) + + "\n" + ) + else: + joined = "\n\n".join([f"- {m}" for m in prior_assistant_messages]) + prior_block = ( + f"\n---\nPrevious assistant answers (for context only):\n{joined}\n" + ) + + # Compose a bounded project view to stay under model limits + max_total_chars = int(os.getenv("AGENT_MAX_PROJECT_CHARS", "60000")) + max_per_file_chars = int(os.getenv("AGENT_MAX_PER_FILE_CHARS", "10000")) + max_list_entries = int(os.getenv("AGENT_MAX_PATH_LIST", "500")) + + sorted_paths = sorted(project.keys()) + # Truncate path list for very large projects; include a tail note + listed_paths = sorted_paths[:max_list_entries] + file_list = "\n".join( + listed_paths + + ( + [f"... ({len(sorted_paths) - len(listed_paths)} more omitted)"] + if len(sorted_paths) > len(listed_paths) + else [] + ) + ) + + files_rendered: list[str] = [] + remaining = max_total_chars + for path in sorted_paths: + if remaining <= 0: + break + content = project[path] or "" + c = ( + content + if len(content) <= max_per_file_chars + else content[:max_per_file_chars] + ) + rendered = f"FILE: {path}\n{display_code_with_line_numbers(c)}" + if len(rendered) > remaining: + # last-chance trim + head = max(0, remaining - len(f"FILE: {path}\n")) + c2 = (content or "")[: max(0, head)] + rendered = f"FILE: {path}\n{display_code_with_line_numbers(c2)}" + files_rendered.append(rendered) + remaining -= len(rendered) + + # Trim prior messages block to avoid overflow + max_history_chars = int(os.getenv("AGENT_MAX_HISTORY_CHARS", "20000")) + prior_block_trimmed = prior_block + if len(prior_block_trimmed) > max_history_chars: + prior_block_trimmed = prior_block_trimmed[-max_history_chars:] + + return ( + "Project files (paths):\n" + f"{file_list}\n---\n" + "Project contents (with line numbers):\n" + f"\n\n".join(files_rendered) + + "\n---\n" + + f"Query: {query}{prior_block_trimmed}" + + "\n---\nGuidance: For code changes, always call edit_code(file_path, find, find_start_line, find_end_line, replace) with an exact line range and the precise text to replace. Do not include line numbers in replacement text. For multiple non-adjacent changes, call edit_code multiple times. Preserve existing formatting and make minimal, targeted edits.\nIf the user requests a new feature, large refactor, or rebuild, you may also use create_file, rename_file/rename_folder, and delete_file/delete_folder. Prefer archiving via rename into a 'legacy/' path over deletion unless the user explicitly wants removal. After moves, update imports/usages with edit_code, and consider request_code_execution to validate." + ) + + +def display_code_with_line_numbers(code: str) -> str: + return "\n".join([f"[{i + 1}]{line}" for i, line in enumerate(code.split("\n"))]) + + +# ---------------------------- +# Ignore pattern utilities +# ---------------------------- + +DEFAULT_AGENT_IGNORE_PATTERNS: list[str] = [ + "__pycache__/", + "*.pyc", + ".DS_Store", + "node_modules/", + "vendor/", + "dist/", + "build/", + ".venv/", + "venv/", + "env/", + "*.log", + ".bundle/", + "tmp/", + "log/", + "logs/", + "coverage/", + ".cache/", + ".next/", + "public/assets/", + ".git/", +] + + +def _parse_ignore_lines(text: str) -> list[str]: + lines: list[str] = [] + if not text: + return lines + for raw in text.splitlines(): + s = raw.strip() + if not s or s.startswith("#"): + continue + lines.append(s) + return lines + + +def make_ignore_predicate(project: dict[str, str]) -> "callable[[str], bool]": + """Create a simple ignore predicate based on `.agentignore`, `.gitignore`, and defaults. + + Supported patterns (subset): + - Trailing-slash directory rules, e.g. `node_modules/`, `__pycache__/` + - Simple filenames, e.g. `.DS_Store` + - Basic `*`/`?` globs on basenames, e.g. `*.pyc`, `*.log` + The semantics are intentionally simple and operate on basenames for globs. + """ + + agentignore = project.get(".agentignore", "") or "" + gitignore = project.get(".gitignore", "") or "" + patterns: list[str] = [ + *DEFAULT_AGENT_IGNORE_PATTERNS, + *_parse_ignore_lines(gitignore), + *_parse_ignore_lines(agentignore), + ] + + # Also include nested ignore files by prefixing their folder path onto rules + try: + for path, text in project.items(): + if not path or "/.gitignore" not in path and "/.agentignore" not in path: + continue + # folder prefix (strip the ".gitignore" or ".agentignore" filename) + base = path.rsplit("/", 1)[0].lstrip("/") + if not base: + continue + for rule in _parse_ignore_lines(text or ""): + r = rule.strip() + if not r: + continue + if r.endswith("/"): + # directory rule stays a directory rule under base + patterns.append(f"{base}/{r}") + else: + # file/glob rule under base; keep relative to base + patterns.append(f"{base}/{r}") + except Exception: + # best-effort; ignore errors loading nested ignore files + pass + + def to_predicate(pat: str): + pattern = pat.lstrip("/").strip() + if not pattern: + return lambda _p: False + + # Directory match (e.g. foo/). If pattern includes a slash, treat as anchored path prefix. + if pattern.endswith("/"): + directory = pattern[:-1].lstrip("/") + + def _dir(path: str) -> bool: + n = (path or "").lstrip("/") + if not directory: + return False + if "/" in directory: + # anchored subpath: match exact or prefix (e.g., "frontend/node_modules/") + return n == directory or n.startswith(directory + "/") + # segment match anywhere (e.g., any "node_modules" segment in the path) + parts = n.split("/") if n else [] + return directory in parts + + return _dir + + # Exact basename match + if ("*" not in pattern) and ("?" not in pattern): + + def _exact(path: str) -> bool: + n = (path or "").lstrip("/") + base = n.split("/")[-1] if n else n + return base == pattern + + return _exact + + # Basic glob on basename + import re + + regex_str = ( + "^" + + "".join( + (".*" if tok == "*" else "." if tok == "?" else re.escape(tok)) + for tok in [t for t in re.split(r"([*?])", pattern) if t != ""] + ) + + "$" + ) + compiled = re.compile(regex_str) + + def _glob(path: str) -> bool: + n = (path or "").lstrip("/") + base = n.split("/")[-1] if n else n + return bool(compiled.match(base)) + + return _glob + + predicates = [to_predicate(p) for p in patterns] + + def is_ignored(path: str) -> bool: + for fn in predicates: + try: + if fn(path): + return True + except Exception: + # Be conservative: ignore errors and treat as not matching + continue + return False + + return is_ignored diff --git a/python/vibe-coding-ide/backend/src/api/__init__.py b/python/vibe-coding-ide/backend/src/api/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/python/vibe-coding-ide/backend/src/api/agent.py b/python/vibe-coding-ide/backend/src/api/agent.py new file mode 100644 index 0000000000..f009da30ea --- /dev/null +++ b/python/vibe-coding-ide/backend/src/api/agent.py @@ -0,0 +1,129 @@ +import logging +import time +import traceback +import uuid +from typing import Any, AsyncGenerator + +from pydantic import BaseModel +from fastapi import APIRouter, HTTPException +from fastapi.responses import StreamingResponse +from vercel.oidc.aio import get_vercel_oidc_token + +from src.agent.agent import run_agent_flow, resume_agent_flow +from src.auth import make_stream_token, read_stream_token +from src.sse import ( + SSE_HEADERS, + sse_format, + emit_event, +) +from src.run_store import set_run_payload, get_run_payload + + +logger = logging.getLogger("ide_agent.api.runs") + + +router = APIRouter(prefix="/api/runs", tags=["runs"]) + + +class RunRequest(BaseModel): + """Payload to start a new agent run and get an SSE resume token.""" + + user_id: str + project_id: str + message_history: list[dict[str, str]] + query: str + project: dict[str, str] + model: str | None = None + + +def make_task_id() -> str: + return f"task_{int(time.time() * 1000)}_{uuid.uuid4().hex[:8]}" + + +@router.post("/") +async def create_run(request: RunRequest) -> dict[str, Any]: + task_id = make_task_id() + try: + logger.info( + "create_run[%s] model=%s query_len=%d files=%d", + task_id, + request.model, + len(request.query or ""), + len(request.project or {}), + ) + except Exception: + pass + + # Store full payload server-side to keep stream tokens small + await set_run_payload( + task_id, + { + "user_id": request.user_id, + "message_history": request.message_history, + "query": request.query, + "project": request.project, + "model": request.model, + "project_id": request.project_id, + }, + ) + # Issue a compact token carrying only the run id + stream_token = make_stream_token({"run_id": task_id}) + return {"task_id": task_id, "stream_token": stream_token} + + +@router.get("/{run_id}/events") +async def run_events(run_id: str, token: str): + oidc_token = await get_vercel_oidc_token() + token_payload = read_stream_token(token) + if token_payload.get("run_id") != run_id: + raise HTTPException(status_code=400, detail="Token does not match run id") + payload = await get_run_payload(run_id) + if payload is None: + # Gracefully end the stream with a run_failed event + async def missing_generator() -> AsyncGenerator[str, None]: + yield sse_format( + emit_event(run_id, "run_failed", error="Unknown or expired run id") + ) + + return StreamingResponse(missing_generator(), headers=SSE_HEADERS) + + async def event_generator() -> AsyncGenerator[str, None]: + try: + async for chunk in run_agent_flow(payload, run_id, oidc_token=oidc_token): + yield chunk + except Exception as e: + logger.error("run_events[%s] error: %s", run_id, str(e)) + logger.error(traceback.format_exc(limit=10)) + tb = traceback.format_exc(limit=10) + yield sse_format( + emit_event(run_id, "run_log", data=f"stream exception: {str(e)}\n{tb}") + ) + yield sse_format(emit_event(run_id, "run_failed", error=str(e))) + + return StreamingResponse(event_generator(), headers=SSE_HEADERS) + + +@router.get("/{run_id}/resume") +async def resume_run(run_id: str, token: str, result: str): + oidc_token = await get_vercel_oidc_token() + token_payload = read_stream_token(token) + if token_payload.get("run_id") != run_id: + raise HTTPException(status_code=400, detail="Token does not match run id") + base = await get_run_payload(run_id) + if base is None: + + async def missing_generator() -> AsyncGenerator[str, None]: + yield sse_format( + emit_event(run_id, "run_failed", error="Unknown or expired run id") + ) + + return StreamingResponse(missing_generator(), headers=SSE_HEADERS) + + async def event_generator() -> AsyncGenerator[str, None]: + try: + async for chunk in resume_agent_flow(base, run_id, result, oidc_token=oidc_token): + yield chunk + except Exception as e: + yield sse_format(emit_event(run_id, "run_failed", error=str(e))) + + return StreamingResponse(event_generator(), headers=SSE_HEADERS) diff --git a/python/vibe-coding-ide/backend/src/api/models.py b/python/vibe-coding-ide/backend/src/api/models.py new file mode 100644 index 0000000000..c1e32b5870 --- /dev/null +++ b/python/vibe-coding-ide/backend/src/api/models.py @@ -0,0 +1,53 @@ +import os +from typing import Any +import httpx +from fastapi import APIRouter +from vercel.oidc.aio import get_vercel_oidc_token + + +router = APIRouter(prefix="/api", tags=["models"]) + + +ALLOWED_MODELS: list[str] = [ + "anthropic/claude-sonnet-4.5", + "anthropic/claude-sonnet-4", + "anthropic/claude-3.7-sonnet", + "anthropic/claude-3.5-haiku", + "xai/grok-4", + "xai/grok-4-fast-non-reasoning", + "openai/gpt-4.1", + "openai/gpt-4.1-mini", + "openai/gpt-5", + "openai/gpt-5-mini", +] + + +@router.get("/models") +async def list_models() -> dict[str, Any]: + oidc_token = await get_vercel_oidc_token() + result = list(ALLOWED_MODELS) + + api_key = os.getenv("VERCEL_AI_GATEWAY_API_KEY", oidc_token) + gateway_base = ( + os.getenv("AI_GATEWAY_BASE_URL") + or os.getenv("OPENAI_BASE_URL") + or "https://ai-gateway.vercel.sh/v1" + ) + + if not api_key: + return {"models": result} + + url = f"{gateway_base.rstrip('/')}/models" + headers = {"Authorization": f"Bearer {api_key}"} + try: + async with httpx.AsyncClient(timeout=15.0) as client: + resp = await client.get(url, headers=headers) + resp.raise_for_status() + data = resp.json() + available_ids = { + str(m.get("id")) for m in (data.get("data") or []) if m.get("id") + } + intersected = [m for m in ALLOWED_MODELS if m in available_ids] + return {"models": intersected or result} + except httpx.HTTPError: + return {"models": result} diff --git a/python/vibe-coding-ide/backend/src/api/sandbox.py b/python/vibe-coding-ide/backend/src/api/sandbox.py new file mode 100644 index 0000000000..83b9a1a85a --- /dev/null +++ b/python/vibe-coding-ide/backend/src/api/sandbox.py @@ -0,0 +1,136 @@ +from typing import Any +import os +import httpx +from pydantic import BaseModel +from fastapi import APIRouter, HTTPException +from vercel.oidc.aio import get_vercel_oidc_token +from vercel.sandbox import AsyncSandbox as Sandbox + +from src.agent.utils import make_ignore_predicate +from src.run_store import get_user_project_sandboxes + + +router = APIRouter(prefix="/api/play", tags=["play"]) + + +class SyncRequest(BaseModel): + """Sync current editor project files into an existing sandbox for a user. + + If name is omitted, the first sandbox mapping for the user will be used. + """ + + user_id: str + project_id: str + project: dict[str, str] + name: str | None = None + + +@router.get("/probe") +async def probe_url(url: str) -> dict[str, Any]: + """Server-side URL probe. + + Attempts a HEAD request first to avoid downloading the body. + Some servers do not support HEAD; in that case, fall back to a + streamed GET to obtain only the status code. + """ + status_code: int | None = None + try: + async with httpx.AsyncClient(follow_redirects=True, timeout=8.0) as client: + try: + resp = await client.request("HEAD", url) + status_code = int(resp.status_code) + except Exception: + # Fall back to a minimal GET (streamed, do not read body) + try: + async with client.stream("GET", url) as resp2: + status_code = int(resp2.status_code) + except Exception: + status_code = None + except Exception: + status_code = None + + return {"ok": status_code is not None, "status": status_code} + + +@router.post("/sync") +async def sync_existing_sandbox(request: SyncRequest) -> dict[str, Any]: + """Push editor project files into ALL mapped sandboxes for this user (or a specific name if provided). + + This enables a project-level "Sync sandbox" action to refresh multiple live sandboxes at once. + """ + oidc_token = await get_vercel_oidc_token() + os.environ["VERCEL_OIDC_TOKEN"] = oidc_token + + mappings = {} + mappings = await get_user_project_sandboxes(request.user_id, request.project_id) + if not mappings: + raise HTTPException(status_code=404, detail="no sandboxes mapped for user") + + # Filter project once (respect ignore rules server-side) + is_ignored = make_ignore_predicate(request.project or {}) + filtered: dict[str, str] = { + p: c for p, c in (request.project or {}).items() if (not is_ignored(p)) or (p in {".gitignore", ".agentignore"}) + } + + targets: dict[str, str] = mappings + if request.name: + sid = mappings.get(request.name) + if not sid: + return {"ok": False, "error": f"sandbox not found for name '{request.name}'"} + targets = {request.name: sid} + + results: dict[str, Any] = {} + total_writes = 0 + for name, sid in targets.items(): + try: + sandbox = await Sandbox.get(sandbox_id=sid) + files_payload = [] + for path, content in filtered.items(): + try: + files_payload.append({"path": path, "content": content.encode("utf-8")}) + except Exception: + files_payload.append({"path": path, "content": bytes(str(content), "utf-8")}) + wrote = 0 + touched_paths: list[str] = [] + if files_payload: + for i in range(0, len(files_payload), 64): + chunk = files_payload[i : i + 64] + await sandbox.write_files(chunk) + wrote += len(chunk) + try: + # accumulate paths for touch to bump mtimes and trigger watchers + for e in chunk: + p = e.get("path") + if isinstance(p, str): + touched_paths.append(p) + except Exception: + pass + total_writes += wrote + # Best-effort: update mtimes for written files to trigger file watchers + try: + if touched_paths: + # quote paths safely and touch them + def _sh_quote(p: str) -> str: + return "'" + p.replace("'", "'\"'\"'") + "'" + quoted = " ".join(_sh_quote(p) for p in touched_paths) + base_cwd = sandbox.sandbox.cwd + touch_cmd = await sandbox.run_command_detached( + "bash", + ["-lc", f"cd {base_cwd} && touch -cm -- {quoted}"], + ) + await touch_cmd.wait() + except Exception: + pass + # Preview hint (first port) + url = None + try: + ports = getattr(sandbox, "ports", None) + if isinstance(ports, list) and len(ports) > 0 and isinstance(ports[0], int): + url = sandbox.domain(ports[0]) + except Exception: + url = None + results[name] = {"ok": True, "sandbox_id": sid, **({"preview_url": url} if url else {}), "synced": wrote} + except Exception as e: + results[name] = {"ok": False, "error": str(e)} + + return {"ok": True, "by_sandbox": results, "total_synced": total_writes} diff --git a/python/vibe-coding-ide/backend/src/auth.py b/python/vibe-coding-ide/backend/src/auth.py new file mode 100644 index 0000000000..ae17ee4bfc --- /dev/null +++ b/python/vibe-coding-ide/backend/src/auth.py @@ -0,0 +1,43 @@ +import os +import jwt +from typing import Any + +from datetime import datetime, timedelta, timezone +from fastapi.exceptions import HTTPException + + +# JWT configuration (kept compatible with existing environment variables) +JWT_SECRET: str = os.getenv("JWT_SECRET", os.getenv("SSE_SECRET", "dev-secret")) +JWT_ALG: str = "HS256" +JWT_TTL_SECONDS: int = int(os.getenv("RESUME_TOKEN_TTL_SECONDS", "600")) + + +def make_stream_token(payload: dict[str, Any]) -> str: + """Create a signed JWT used by the SSE stream for resuming agent runs. + + The token embeds the provided payload and standard claims (iat/exp). + """ + now = datetime.now(timezone.utc) + to_encode = { + **payload, + "iat": int(now.timestamp()), + "exp": int((now + timedelta(seconds=JWT_TTL_SECONDS)).timestamp()), + } + return jwt.encode(to_encode, JWT_SECRET, algorithm=JWT_ALG) + + +def read_stream_token(token: str) -> dict[str, Any]: + """Decode and validate a previously issued resume token. + + Removes standard claims before returning the decoded payload. + Raises HTTP 400 on expired/invalid tokens to match previous behavior. + """ + try: + decoded: dict[str, Any] = jwt.decode(token, JWT_SECRET, algorithms=[JWT_ALG]) + decoded.pop("iat", None) + decoded.pop("exp", None) + return decoded + except jwt.ExpiredSignatureError: + raise HTTPException(status_code=400, detail="Token expired") + except jwt.InvalidTokenError: + raise HTTPException(status_code=400, detail="Invalid token") diff --git a/python/vibe-coding-ide/backend/src/run_store.py b/python/vibe-coding-ide/backend/src/run_store.py new file mode 100644 index 0000000000..b67b4a7965 --- /dev/null +++ b/python/vibe-coding-ide/backend/src/run_store.py @@ -0,0 +1,94 @@ +from __future__ import annotations + +import os +from typing import Any + +from vercel.cache import AsyncRuntimeCache + + +# TTL in seconds for cached run payloads +_TTL_SECONDS: int = int(os.getenv("RUN_STORE_TTL_SECONDS", "900")) +_NAMESPACE = os.getenv("RUN_STORE_NAMESPACE", "ide-agent-runs") + + +cache = AsyncRuntimeCache(namespace=_NAMESPACE) + + +def _cache_key(run_id: str) -> str: + return f"run:{run_id}" + + +async def set_run_payload(run_id: str, payload: dict[str, Any]) -> None: + """Store the base payload for a run id using Vercel Runtime Cache.""" + await cache.set( + _cache_key(run_id), + dict(payload), + {"ttl": _TTL_SECONDS, "tags": [f"run:{run_id}"]}, + ) + + +async def get_run_payload(run_id: str) -> dict[str, Any] | None: + """Fetch the stored payload for a run id.""" + val = await cache.get(_cache_key(run_id)) + return dict(val) if isinstance(val, dict) else None + + +async def update_run_project(run_id: str, project: dict[str, str]) -> None: + """Update only the project map for the stored run payload if present.""" + base = await cache.get(_cache_key(run_id)) + if isinstance(base, dict): + updated = dict(base) + updated["project"] = dict(project) + await cache.set( + _cache_key(run_id), + updated, + {"ttl": _TTL_SECONDS, "tags": [f"run:{run_id}"]}, + ) + + +# Per-user active sandbox mappings (name -> sandbox_id) + +def _user_sbx_key(user_id: str) -> str: + return f"user:{user_id}:sandboxes" + + +async def get_user_sandboxes(user_id: str) -> dict[str, str]: + val = await cache.get(_user_sbx_key(user_id)) + return dict(val) if isinstance(val, dict) else {} + + +async def upsert_user_sandbox(user_id: str, name: str, sandbox_id: str) -> None: + cur = await get_user_sandboxes(user_id) + cur[name] = sandbox_id + await cache.set(_user_sbx_key(user_id), cur, {"ttl": _TTL_SECONDS}) + + +async def remove_user_sandbox(user_id: str, name: str) -> None: + cur = await get_user_sandboxes(user_id) + if name in cur: + cur.pop(name, None) + await cache.set(_user_sbx_key(user_id), cur, {"ttl": _TTL_SECONDS}) + + +# Per-user-per-project sandbox mappings (name -> sandbox_id) + +def _user_project_sbx_key(user_id: str, project_id: str) -> str: + return f"user:{user_id}:project:{project_id}:sandboxes" + + +async def get_user_project_sandboxes(user_id: str, project_id: str) -> dict[str, str]: + val = await cache.get(_user_project_sbx_key(user_id, project_id)) + return dict(val) if isinstance(val, dict) else {} + + +async def upsert_user_project_sandbox(user_id: str, project_id: str, name: str, sandbox_id: str) -> None: + cur = await get_user_project_sandboxes(user_id, project_id) + cur[name] = sandbox_id + await cache.set(_user_project_sbx_key(user_id, project_id), cur, {"ttl": _TTL_SECONDS}) + + +async def remove_user_project_sandbox(user_id: str, project_id: str, name: str) -> None: + cur = await get_user_project_sandboxes(user_id, project_id) + if name in cur: + cur.pop(name, None) + await cache.set(_user_project_sbx_key(user_id, project_id), cur, {"ttl": _TTL_SECONDS}) diff --git a/python/vibe-coding-ide/backend/src/sandbox/__init__.py b/python/vibe-coding-ide/backend/src/sandbox/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/python/vibe-coding-ide/backend/src/sandbox/command.py b/python/vibe-coding-ide/backend/src/sandbox/command.py new file mode 100644 index 0000000000..1e6bc5d1d4 --- /dev/null +++ b/python/vibe-coding-ide/backend/src/sandbox/command.py @@ -0,0 +1,410 @@ +from vercel.sandbox import AsyncSandbox as Sandbox +from agents import RunContextWrapper + +from src.agent.context import IDEContext + + +def select_safe_cwd(requested_cwd: str | None, base_cwd: str) -> str: + """Resolve a safe working directory under the sandbox's base cwd. + + Absolute paths outside the sandbox root are ignored. Relative paths are + resolved under the base cwd. Falls back to base cwd on errors. + """ + safe_cwd = base_cwd + try: + if requested_cwd: + if requested_cwd.startswith("/"): + if ( + requested_cwd.startswith(base_cwd + "/") + or requested_cwd == base_cwd + ): + safe_cwd = requested_cwd + else: + safe_cwd = f"{base_cwd}/{requested_cwd}".rstrip("/") + except Exception: + safe_cwd = base_cwd + return safe_cwd + + +def detect_language_usage(command: str) -> tuple[bool, bool, bool]: + """Detect whether a command indicates Python, Ruby, or Go usage. + + Returns a tuple of (uses_python, uses_ruby, uses_go). + """ + cmd_lower = (command or "").lower() + uses_python = ( + (" pip " in cmd_lower) + or cmd_lower.startswith("pip ") + or (" pip3 " in cmd_lower) + or cmd_lower.startswith("pip3 ") + or ("-m pip" in cmd_lower) + or ("python " in cmd_lower) + or cmd_lower.startswith("python") + or ("uvicorn" in cmd_lower) + ) + uses_ruby = ( + (" gem " in cmd_lower) + or cmd_lower.startswith("gem ") + or (" bundle " in cmd_lower) + or cmd_lower.startswith("bundle ") + or ("rackup" in cmd_lower) + or ("ruby " in cmd_lower) + or cmd_lower.startswith("ruby ") + or ("sinatra" in cmd_lower) + or ("rails " in cmd_lower) + ) + uses_go = (" go " in f" {cmd_lower} ") or cmd_lower.startswith("go ") + return uses_python, uses_ruby, uses_go + + +def select_rails_app_cwd( + ctx: RunContextWrapper[IDEContext], + sb_name: str, + base_cwd: str, + current_cwd: str, + command: str, +) -> str: + """Heuristic: auto-select Rails app root (folder containing bin/rails) for Rails-related commands.""" + try: + cln = (command or "").strip().lower() + is_rails_new = cln.startswith("rails new") or " rails new " in cln + rails_related = ( + ("bundle install" in cln) + or (" rails generate" in cln) + or cln.startswith("rails generate") + or (" rails db:" in cln) + or cln.startswith("rails db:") + or ("bin/rails" in cln and not is_rails_new) + ) and not is_rails_new + if not rails_related: + return current_cwd + files = (ctx.context.sandbox_files_map or {}).get(sb_name, []) + app_roots: list[str] = [] + for p in files: + if p.endswith("/bin/rails"): + app_roots.append(p[: -len("/bin/rails")]) + if len(app_roots) == 1: + return f"{base_cwd}/{app_roots[0]}".rstrip("/") + return current_cwd + except Exception: + return current_cwd + + +async def ensure_python_tooling( + ctx: RunContextWrapper[IDEContext], + sandbox: Sandbox, + safe_cwd: str, + env: dict[str, str] | None, + stream_logs: bool, + tool_id: str, +) -> None: + ensure_sh = ( + "PYBIN=$(command -v python3 || command -v python || echo /vercel/runtimes/python/bin/python3); " + "if [ -z \"$PYBIN\" ]; then echo 'python not found in sandbox'; exit 1; fi; " + "$PYBIN -m ensurepip --upgrade || true; " + "$PYBIN -m pip install --upgrade pip || true;" + ) + cmd = await sandbox.run_command_detached( + "bash", + ["-lc", f"cd {safe_cwd} && {ensure_sh}"], + env=env or None, + ) + try: + async for line in cmd.logs(): + if stream_logs: + ctx.context.events.append( + { + "phase": "log", + "tool_id": tool_id, + "name": "sandbox_run", + "data": line.data, + } + ) + except Exception: + pass + _ = await cmd.wait() + + +def maybe_wrap_with_bundler(command: str, uses_ruby: bool) -> str: + """If running Ruby apps directly, wrap with `bundle exec` when a Gemfile exists.""" + try: + cl = (command or "").strip().lower() + starts_with_ruby = cl.startswith("ruby ") + starts_with_rackup = cl.startswith("rackup") + starts_with_rails = cl.startswith("rails ") + already_using_bundle = cl.startswith("bundle ") or (" bundle exec " in cl) + if ( + uses_ruby + and (starts_with_ruby or starts_with_rackup or starts_with_rails) + and not already_using_bundle + ): + return f"( [ -f Gemfile ] || [ -f ./Gemfile ] ) && bundle exec {command} || {command}" + except Exception: + pass + return command + + +def infer_ready_patterns_and_port( + command: str, + ready_patterns: list[str] | None, + port: int | None, + auto_ready_patterns: bool, +) -> tuple[list[str] | None, int | None]: + """Infer readiness patterns and default port for common servers (Go, Uvicorn, Rack/Sinatra, Rails).""" + cmd_lower = (command or "").lower() + + # Go + is_go_run = (" go run" in f" {cmd_lower}") or cmd_lower.startswith("go run") + if ( + auto_ready_patterns + and (not ready_patterns or len(ready_patterns) == 0) + and is_go_run + ): + ready_patterns = [ + "Listening on", + "http://0.0.0.0:", + "listening on :", + "Server started", + "Serving on", + ] + if port is None and is_go_run: + try: + import re as _re + + m = _re.search(r"--port\\s+(\\d+)|-p\\s+(\\d+)", command) + port = int(m.group(1) or m.group(2)) if m else 3000 + except Exception: + port = 3000 + + # Uvicorn (FastAPI) + if ( + auto_ready_patterns + and (not ready_patterns or len(ready_patterns) == 0) + and ("uvicorn" in cmd_lower) + ): + ready_patterns = ["Application startup complete", "Uvicorn running on"] + if port is None and ("uvicorn" in cmd_lower): + try: + import re as _re + + m = _re.search(r"--port\\s+(\\d+)|-p\\s+(\\d+)", command) + port = int(m.group(1) or m.group(2)) if m else 8000 + except Exception: + port = 8000 + + # Rack/Sinatra/Ruby + if ( + auto_ready_patterns + and (not ready_patterns or len(ready_patterns) == 0) + and ( + ("rackup" in cmd_lower) + or ("sinatra" in cmd_lower) + or cmd_lower.startswith("ruby ") + ) + ): + ready_patterns = [ + "Listening on", + "WEBrick::HTTPServer#start", + "Sinatra has taken the stage", + "tcp://0.0.0.0:", + "WEBrick::HTTPServer#start: pid=", + ] + if port is None and ( + ("rackup" in cmd_lower) + or ("sinatra" in cmd_lower) + or cmd_lower.startswith("ruby ") + ): + try: + import re as _re + + m = _re.search(r"--port\\s+(\\d+)|-p\\s+(\\d+)", command) + if m: + port = int(m.group(1) or m.group(2)) + else: + port = 9292 if ("rackup" in cmd_lower) else 4567 + except Exception: + port = 9292 if ("rackup" in cmd_lower) else 4567 + + # Rails server + is_rails_server = ("rails server" in cmd_lower) or ("rails s" in cmd_lower) + if ( + auto_ready_patterns + and (not ready_patterns or len(ready_patterns) == 0) + and is_rails_server + ): + ready_patterns = [ + "Listening on", + "Use Ctrl-C to stop", + "Puma starting", + ] + if port is None and is_rails_server: + try: + import re as _re + + m = _re.search(r"--port\\s+(\\d+)|-p\\s+(\\d+)", command) + port = int(m.group(1) or m.group(2)) if m else 3000 + except Exception: + port = 3000 + + return ready_patterns, port + + +def adjust_rails_server_command( + ctx: RunContextWrapper[IDEContext], sandbox: Sandbox, command: str, port: int | None +) -> str: + """Ensure Rails server binds to 0.0.0.0 and inject ALLOWED_HOST automatically.""" + try: + cl = (command or "").lower() + is_rails_server = ("rails server" in cl) or ("rails s" in cl) + if not is_rails_server: + return command + if (" -b " not in command) and (" --binding " not in command): + command = f"{command} -b 0.0.0.0" + if "allowed_host=" not in cl: + try: + url = sandbox.domain(port or 3000) + from urllib.parse import urlparse as _urlparse + + host = _urlparse(url).hostname or "" + except Exception: + host = "" + if host: + command = f"ALLOWED_HOST={host} {command}" + return command + except Exception: + return command + + +async def ensure_ruby_tooling( + ctx: RunContextWrapper[IDEContext], + sandbox: Sandbox, + safe_cwd: str, + env: dict[str, str] | None, + stream_logs: bool, + tool_id: str, + sb_name: str, +) -> None: + ruby_install_sh = ( + "if ! command -v ruby >/dev/null 2>&1; then " + "dnf install -y ruby3.2 ruby3.2-rubygems ruby3.2-rubygem-json ruby3.2-devel libyaml-devel sqlite sqlite-devel gcc gcc-c++ make git redhat-rpm-config ruby3.2-rubygem-bundler || exit 1; " + "fi; " + "ruby --version; gem --version; bundle --version || true;" + ) + ruby_install_cmd = await sandbox.run_command_detached( + "bash", + ["-lc", f"cd {safe_cwd} && {ruby_install_sh}"], + env=env or None, + sudo=True, + ) + try: + async for line in ruby_install_cmd.logs(): + if stream_logs: + ctx.context.events.append( + { + "phase": "log", + "tool_id": tool_id, + "name": "sandbox_run", + "data": line.data, + } + ) + except Exception: + pass + _ = await ruby_install_cmd.wait() + + bundler_install_sh = ( + "if ! command -v bundle >/dev/null 2>&1; then " + "gem list -i bundler >/dev/null 2>&1 || gem install --no-document bundler; " + "fi; bundle --version || true;" + ) + bundler_install_cmd = await sandbox.run_command_detached( + "bash", + ["-lc", f"cd {safe_cwd} && {bundler_install_sh}"], + env=env or None, + sudo=True, + ) + try: + async for line in bundler_install_cmd.logs(): + if stream_logs: + ctx.context.events.append( + { + "phase": "log", + "tool_id": tool_id, + "name": "sandbox_run", + "data": line.data, + } + ) + except Exception: + pass + _ = await bundler_install_cmd.wait() + + bundler_cfg_sh = ( + f"cd {safe_cwd} && " + "mkdir -p vendor/bundle && " + "bundle config set --local path vendor/bundle" + ) + bundler_cfg_cmd = await sandbox.run_command_detached( + "bash", + ["-lc", bundler_cfg_sh], + env=env or None, + ) + try: + async for line in bundler_cfg_cmd.logs(): + if stream_logs: + ctx.context.events.append( + { + "phase": "log", + "tool_id": tool_id, + "name": "sandbox_run", + "data": line.data, + } + ) + except Exception: + pass + _ = await bundler_cfg_cmd.wait() + + try: + per_env_global = dict(ctx.context.sandbox_envs.get(sb_name, {})) + per_env_global.update( + { + "PATH": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/share/gems/bin:/usr/share/ruby3.2-gems/bin:/home/vercel-sandbox/.local/share/gem/ruby/bin:/home/vercel-sandbox/.gem/ruby/bin", + "BUNDLE_PATH": "vendor/bundle", + } + ) + ctx.context.sandbox_envs[sb_name] = per_env_global + except Exception: + pass + + +async def ensure_go_tooling( + ctx: RunContextWrapper[IDEContext], + sandbox: Sandbox, + safe_cwd: str, + env: dict[str, str] | None, + stream_logs: bool, + tool_id: str, +) -> None: + go_install_sh = ( + "if ! command -v go >/dev/null 2>&1; then " + "dnf install -y golang git || exit 1; " + "fi; go version || exit 1;" + ) + go_install_cmd = await sandbox.run_command_detached( + "bash", + ["-lc", f"cd {safe_cwd} && {go_install_sh}"], + env=env or None, + sudo=True, + ) + try: + async for line in go_install_cmd.logs(): + if stream_logs: + ctx.context.events.append( + { + "phase": "log", + "tool_id": tool_id, + "name": "sandbox_run", + "data": line.data, + } + ) + except Exception: + pass + _ = await go_install_cmd.wait() diff --git a/python/vibe-coding-ide/backend/src/sandbox/runtimes.py b/python/vibe-coding-ide/backend/src/sandbox/runtimes.py new file mode 100644 index 0000000000..ba967b88da --- /dev/null +++ b/python/vibe-coding-ide/backend/src/sandbox/runtimes.py @@ -0,0 +1,203 @@ +from vercel.sandbox import AsyncSandbox as Sandbox +from agents import RunContextWrapper + +from src.agent.context import IDEContext + + +async def create_synthetic_ruby_runtime( + ctx: RunContextWrapper[IDEContext], + sandbox: Sandbox, + sb_name: str, + tool_id: str, +) -> None: + ctx.context.events.append( + { + "phase": "log", + "tool_id": tool_id, + "name": "sandbox_create", + "data": "Initializing Ruby runtime...\n", + } + ) + # Ensure Ruby is present along with common build tools + ruby_install_sh = ( + "if ! command -v ruby >/dev/null 2>&1; then " + "dnf install -y ruby3.2 ruby3.2-rubygems ruby3.2-rubygem-json ruby3.2-devel libyaml-devel sqlite sqlite-devel gcc gcc-c++ make git redhat-rpm-config; " + "fi; ruby --version; gem --version;" + ) + ruby_cmd = await sandbox.run_command_detached( + "bash", + ["-lc", ruby_install_sh], + sudo=True, + ) + try: + async for line in ruby_cmd.logs(): + ctx.context.events.append( + { + "phase": "log", + "tool_id": tool_id, + "name": "sandbox_create", + "data": line.data, + } + ) + except Exception: + pass + _ = await ruby_cmd.wait() + + # Ensure Bundler is available + bundler_install_sh = ( + "if command -v gem >/dev/null 2>&1; then " + "gem list -i bundler >/dev/null 2>&1 || gem install --no-document bundler; " + "fi; bundle --version || true" + ) + bundler_install_cmd = await sandbox.run_command_detached( + "bash", + ["-lc", bundler_install_sh], + sudo=True, + ) + try: + async for line in bundler_install_cmd.logs(): + ctx.context.events.append( + { + "phase": "log", + "tool_id": tool_id, + "name": "sandbox_create", + "data": line.data, + } + ) + except Exception: + pass + _ = await bundler_install_cmd.wait() + + # Configure bundler to install into project-local path + bundler_cfg_sh = ( + f"cd {sandbox.sandbox.cwd} && " + "mkdir -p vendor/bundle && " + "bundle config set --local path vendor/bundle" + ) + bundler_cfg_cmd = await sandbox.run_command_detached( + "bash", + ["-lc", bundler_cfg_sh], + ) + try: + async for line in bundler_cfg_cmd.logs(): + ctx.context.events.append( + { + "phase": "log", + "tool_id": tool_id, + "name": "sandbox_create", + "data": line.data, + } + ) + except Exception: + pass + _ = await bundler_cfg_cmd.wait() + + # Ensure rack and puma are available (create Gemfile if needed) and generate binstubs + rack_puma_setup_sh = ( + f"cd {sandbox.sandbox.cwd} && " + "( [ -f Gemfile ] || bundle init ) && " + "bundle add rack puma || true && " + "bundle install && " + "bundle binstubs rack puma" + ) + rack_puma_cmd = await sandbox.run_command_detached( + "bash", + ["-lc", rack_puma_setup_sh], + ) + try: + async for line in rack_puma_cmd.logs(): + ctx.context.events.append( + { + "phase": "log", + "tool_id": tool_id, + "name": "sandbox_create", + "data": line.data, + } + ) + except Exception: + pass + _ = await rack_puma_cmd.wait() + + # Persist environment defaults for Ruby tooling + try: + per_env = dict(ctx.context.sandbox_envs.get(sb_name, {})) + per_env.update( + { + "BUNDLE_PATH": "vendor/bundle", + "PATH": f"/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/usr/share/gems/bin:/usr/share/ruby3.2-gems/bin:/home/vercel-sandbox/.local/share/gem/ruby/bin:/home/vercel-sandbox/.gem/ruby/bin:{sandbox.sandbox.cwd}/bin", + } + ) + ctx.context.sandbox_envs[sb_name] = per_env + except Exception: + pass + + ctx.context.events.append( + { + "phase": "log", + "tool_id": tool_id, + "name": "sandbox_create", + "data": "Synthetic Ruby runtime ready. Bundler configured; rackup and puma installed (binstubs in ./bin).\n", + } + ) + + +async def create_synthetic_go_runtime( + ctx: RunContextWrapper[IDEContext], + sandbox: Sandbox, + sb_name: str, + tool_id: str, +) -> None: + ctx.context.events.append( + { + "phase": "log", + "tool_id": tool_id, + "name": "sandbox_create", + "data": "Initializing Go runtime...\n", + } + ) + go_install_sh = ( + "if ! command -v go >/dev/null 2>&1; then " + "dnf install -y golang git || exit 1; " + "fi; go version; git --version || true;" + ) + go_cmd = await sandbox.run_command_detached( + "bash", + ["-lc", go_install_sh], + sudo=True, + ) + try: + async for line in go_cmd.logs(): + ctx.context.events.append( + { + "phase": "log", + "tool_id": tool_id, + "name": "sandbox_create", + "data": line.data, + } + ) + except Exception: + pass + _ = await go_cmd.wait() + + # Persist environment defaults for Go tooling + try: + per_env = dict(ctx.context.sandbox_envs.get(sb_name, {})) + per_env.update( + { + "GOPATH": "/home/vercel-sandbox/go", + "PATH": "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin:/home/vercel-sandbox/go/bin:" + + (per_env.get("PATH") or ""), + } + ) + ctx.context.sandbox_envs[sb_name] = per_env + except Exception: + pass + + ctx.context.events.append( + { + "phase": "log", + "tool_id": tool_id, + "name": "sandbox_create", + "data": "Synthetic Go runtime ready. golang and git installed.\n", + } + ) diff --git a/python/vibe-coding-ide/backend/src/sandbox/utils.py b/python/vibe-coding-ide/backend/src/sandbox/utils.py new file mode 100644 index 0000000000..ff81987156 --- /dev/null +++ b/python/vibe-coding-ide/backend/src/sandbox/utils.py @@ -0,0 +1,358 @@ +from vercel.sandbox import AsyncSandbox as Sandbox +from agents import RunContextWrapper +from typing import Any + +from src.agent.context import IDEContext +from src.run_store import get_user_sandboxes +from src.agent.utils import make_ignore_predicate + + +def normalize_sandbox_name(ctx: RunContextWrapper[IDEContext], name: str | None) -> str: + """Resolve the effective sandbox name. + + Prefers the provided name; otherwise uses the active name if set; otherwise "default". + Also sets the active name if not set previously. + """ + n = (name or ctx.context.active_sandbox or "default").strip() or "default" + if not ctx.context.active_sandbox: + ctx.context.active_sandbox = n + return n + + +async def snapshot_files_into_context( + ctx: RunContextWrapper[IDEContext], sandbox: Sandbox, name: str +) -> None: + """Snapshot filesystem and record per-sandbox state.""" + try: + cmd_ls = await sandbox.run_command( + "bash", + [ + "-lc", + ( + f"cd {sandbox.sandbox.cwd} && " + "find . \\ ( -path './.git/*' -o -path './node_modules/*' -o -path './vendor/*' -o -path './.bundle/*' -o -path './.cache/*' -o -path './tmp/*' -o -path './log/*' -o -path './logs/*' \\ ) -prune -o -type f -printf '%P\t%T@\t%s\n' 2>/dev/null | sort" + ), + ], + ) + out = await cmd_ls.stdout() + current: dict[str, str] = {} + files: list[str] = [] + for line in (out or "").splitlines(): + try: + rel, mtime, size = line.split("\t", 2) + except ValueError: + continue + files.append(rel) + current[rel] = f"{mtime} {size}" + # Filter out ignored paths + try: + is_ignored = make_ignore_predicate(ctx.context.project or {}) + filtered_files = [p for p in files if not is_ignored(p)] + filtered_current: dict[str, str] = { + p: meta for p, meta in current.items() if not is_ignored(p) + } + except Exception: + filtered_files = files + filtered_current = current + # Per-sandbox maps + ctx.context.sandbox_files_map[name] = filtered_files + ctx.context.sandbox_file_meta_map[name] = filtered_current + except Exception: + # Non-fatal + pass + + +async def get_sandbox_by_name(ctx: RunContextWrapper[IDEContext], name: str) -> Sandbox: + """Get or create a sandbox by name (multi-sandbox only).""" + # If we have a mapping, fetch from cache or remote + sid = (ctx.context.sandbox_name_to_id or {}).get(name) + if sid: + fetched = await Sandbox.get(sandbox_id=sid) + return fetched + # Create a new sandbox with stored preferences + runtime = (ctx.context.sandbox_runtime_map or {}).get(name) + ports = (ctx.context.sandbox_ports_map or {}).get(name) + sandbox = await Sandbox.create( + timeout=600_000, + runtime=runtime, + ports=ports, + ) + ctx.context.sandbox_name_to_id[name] = sandbox.sandbox_id + ctx.context.active_sandbox = name + try: + await sync_project_files(ctx, sandbox) + await snapshot_files_into_context(ctx, sandbox, name) + except Exception: + pass + return sandbox + + +async def sync_project_files( + ctx: RunContextWrapper[IDEContext], sandbox: Sandbox, paths: list[str] | None = None +) -> int: + """Write project files to sandbox. + + If paths is provided, limits writes to those project-relative files; + otherwise writes the entire project mapping. + """ + to_write: list[dict[str, Any]] = [] + written = 0 + project = ctx.context.project or {} + if paths is None: + iterator = ((p, c) for p, c in project.items()) + else: + norm = {str(p).lstrip("./") for p in paths} + iterator = ((p, project[p]) for p in norm if p in project) + for path, content in iterator: + p = str(path).lstrip("./") + if not p: + continue + try: + b = content.encode("utf-8") + except Exception: + b = bytes(str(content), "utf-8") + to_write.append({"path": p, "content": b}) + written += 1 + for i in range(0, len(to_write), 64): + chunk = to_write[i : i + 64] + await sandbox.write_files(chunk) + return written + + +def parse_env_list(env_list: list[str] | None) -> dict[str, str]: + """Parse a list of strings like ["KEY=VALUE", ...] into a mapping. + + Invalid entries or empty keys are ignored. First occurrence of a key wins. + """ + result: dict[str, str] = {} + if not env_list: + return result + for entry in env_list: + if not entry: + continue + try: + key, value = entry.split("=", 1) + except ValueError: + # skip items without '=' + continue + k = key.strip() + if k and k not in result: + result[k] = value + return result + + +async def snapshot_file_changes( + ctx: RunContextWrapper[IDEContext], + sandbox: Sandbox, + name: str, + *, + sample_limit: int = 50, + max_sample_size: int = 200_000, +) -> dict[str, Any]: + """Compute filesystem changes since last snapshot and optionally sample small files. + + Returns a dict with keys: files, created, updated, deleted, data (base64 samples) or error. + Also refreshes `sandbox_files_map` and `sandbox_file_meta_map` in the context. + """ + try: + cmd_ls = await sandbox.run_command( + "bash", + [ + "-lc", + ( + f"cd {sandbox.sandbox.cwd} && " + "find . \\\ ( -path './.git/*' -o -path './node_modules/*' -o -path './vendor/*' -o -path './.bundle/*' -o -path './.cache/*' -o -path './tmp/*' -o -path './log/*' -o -path './logs/*' -o -path './venv/*' \\\ ) -prune -o -type f -printf '%P\t%T@\t%s\n' 2>/dev/null | sort" + ), + ], + ) + out = await cmd_ls.stdout() + current: dict[str, str] = {} + files: list[str] = [] + for line in (out or "").splitlines(): + try: + rel, mtime, size = line.split("\t", 2) + except ValueError: + continue + files.append(rel) + current[rel] = f"{mtime} {size}" + + # Diff with previous snapshot + prev = (ctx.context.sandbox_file_meta_map or {}).get(name, {}) + created: list[str] = [] + updated: list[str] = [] + deleted: list[str] = [] + prev_keys = set(prev.keys()) + cur_keys = set(current.keys()) + for p in sorted(cur_keys - prev_keys): + created.append(p) + for p in sorted(prev_keys - cur_keys): + deleted.append(p) + for p in sorted(cur_keys & prev_keys): + if prev.get(p) != current.get(p): + updated.append(p) + + # Apply ignore rules + try: + is_ignored = make_ignore_predicate(ctx.context.project or {}) + files = [p for p in files if not is_ignored(p)] + current = {p: meta for p, meta in current.items() if not is_ignored(p)} + created = [p for p in created if not is_ignored(p)] + updated = [p for p in updated if not is_ignored(p)] + deleted = [p for p in deleted if not is_ignored(p)] + except Exception: + pass + + # Update context snapshots + ctx.context.sandbox_files_map[name] = files + ctx.context.sandbox_file_meta_map[name] = current + + # Sample newly created/updated small files + data: list[dict[str, Any]] = [] + sample_paths = created + updated + if sample_paths: + for p in sample_paths[:sample_limit]: + try: + safe = p.replace('"', '\\"') + cmd_cat = await sandbox.run_command( + "bash", + [ + "-lc", + ( + f"cd {sandbox.sandbox.cwd} && " + f"if [ -f '{safe}' ] && [ $(stat -c %s '{safe}' 2>/dev/null || stat -f %z '{safe}') -le {max_sample_size} ]; then " + f"base64 '{safe}'; else echo '__SKIP__'; fi" + ), + ], + ) + b64 = (await cmd_cat.stdout() or "").strip() + if b64 and b64 != "__SKIP__": + data.append({"path": p, "encoding": "base64", "content": b64}) + except Exception: + continue + + return { + "files": files, + "created": created, + "updated": updated, + "deleted": deleted, + "data": data, + } + except Exception as e: + return {"files": [], "error": str(e)} + + +async def autosync_after_fs_change( + ctx: RunContextWrapper[IDEContext], + *, + created_or_updated: list[str] | None = None, + deleted_files: list[str] | None = None, + deleted_dirs: list[str] | None = None, +) -> dict[str, Any]: + """Synchronize recent project changes to any existing sandboxes. + + This function does not create new sandboxes. It only syncs to sandboxes + already present in the context's sandbox_name_to_id mapping. + + Args: + created_or_updated: File paths whose latest contents should be written. + deleted_files: File paths that should be removed with `rm -f`. + deleted_dirs: Directory paths that should be removed recursively with `rm -rf`. + + Returns: + A summary dict keyed by sandbox name with counts of writes/deletions. + """ + summary: dict[str, Any] = {"by_sandbox": {}} + try: + name_to_id = dict(ctx.context.sandbox_name_to_id or {}) + except Exception: + name_to_id = {} + + # If no mappings in current run, try per-user persisted mappings + if not name_to_id: + try: + user_id = (ctx.context.base_payload or {}).get("user_id") or "" + if user_id: + name_to_id = await get_user_sandboxes(user_id) + except Exception: + pass + + if not name_to_id: + return summary + + # Normalize inputs + cu = [str(p).lstrip("./") for p in (created_or_updated or []) if str(p).strip()] + del_files = [str(p).lstrip("./") for p in (deleted_files or []) if str(p).strip()] + del_dirs = [str(p).rstrip("/") for p in (deleted_dirs or []) if str(p).strip()] + + # Small helper to safely single-quote paths for bash + def _sh_quote(p: str) -> str: + return "'" + p.replace("'", "'\"'\"'") + "'" + + project_map = ctx.context.project or {} + + for sb_name, sid in name_to_id.items(): + try: + sandbox = await Sandbox.get(sandbox_id=sid) + # Write created/updated files via shared sync helper + writes = 0 + if cu: + writes = await sync_project_files(ctx, sandbox, cu) + # Best-effort: ensure file mtimes update to trigger watchers (e.g., Next/Turbopack) + try: + touched = " ".join(_sh_quote(p) for p in cu) + if touched: + base_cwd = sandbox.sandbox.cwd + touch_cmd = await sandbox.run_command( + "bash", + [ + "-lc", + f"cd {base_cwd} && touch -cm -- {touched}", + ], + ) + _ = await touch_cmd.wait() + except Exception: + pass + + # Remove deleted files and directories + removed_files = 0 + removed_dirs = 0 + base_cwd = sandbox.sandbox.cwd + if del_files: + quoted = " ".join(_sh_quote(p) for p in del_files) + cmd_rm_files = await sandbox.run_command( + "bash", + [ + "-lc", + f"cd {base_cwd} && rm -f -- {quoted}", + ], + ) + _ = await cmd_rm_files.wait() + removed_files = len(del_files) + if del_dirs: + quoted_dirs = " ".join(_sh_quote(d) for d in del_dirs) + cmd_rm_dirs = await sandbox.run_command( + "bash", + [ + "-lc", + f"cd {base_cwd} && rm -rf -- {quoted_dirs}", + ], + ) + _ = await cmd_rm_dirs.wait() + removed_dirs = len(del_dirs) + + # Refresh snapshot for the sandbox + try: + await snapshot_files_into_context(ctx, sandbox, sb_name) + except Exception: + pass + + summary["by_sandbox"][sb_name] = { + "writes": writes, + "deleted_files": removed_files, + "deleted_dirs": removed_dirs, + } + except Exception: + # Skip failures per-sandbox to avoid blocking the overall operation + continue + + return summary diff --git a/python/vibe-coding-ide/backend/src/sse.py b/python/vibe-coding-ide/backend/src/sse.py new file mode 100644 index 0000000000..7f609a9caa --- /dev/null +++ b/python/vibe-coding-ide/backend/src/sse.py @@ -0,0 +1,97 @@ +import json +import time +from typing import Any + +from src.auth import make_stream_token +from src.run_store import update_run_project + + +SSE_HEADERS: dict[str, str] = { + "Content-Type": "text/event-stream", + "Cache-Control": "no-cache", + "Connection": "keep-alive", + "X-Accel-Buffering": "no", +} + + +def sse_format(event: dict[str, Any]) -> str: + return f"data: {json.dumps(event)}\n\n" + + +def emit_event( + task_id: str, event_type: str, data: Any = None, error: Any = None +) -> dict[str, Any]: + return { + "event_type": event_type, + "task_id": task_id, + "timestamp": time.strftime("%Y-%m-%dT%H:%M:%S+00:00", time.gmtime()), + "data": data, + "error": error, + } + + +def tool_started_sse(task_id: str, ev: dict[str, Any]) -> str: + return sse_format( + emit_event( + task_id, + "progress_update_tool_action_started", + data={ + "args": [ + { + "id": ev["tool_id"], + "function": { + "name": ev["name"], + "arguments": ev.get("arguments"), + }, + } + ] + }, + ) + ) + + +def tool_completed_sse( + task_id: str, + ev: dict[str, Any], + base_payload: dict[str, Any], + project: dict[str, str], +) -> str: + output_data: Any = ev.get("output_data") + if ev.get("name") == "request_code_execution" and isinstance(output_data, dict): + # Keep the store up-to-date with the latest project snapshot used by the agent + try: + # fire-and-forget update; ignore if running outside async loop + import asyncio + + coro = update_run_project(task_id, project) + if asyncio.get_event_loop().is_running(): + asyncio.create_task(coro) + else: + # fallback if called in sync context + asyncio.run(coro) + except Exception: + pass + # Issue a compact resume token that only carries the run id + output_data = { + **output_data, + "resume_token": make_stream_token({"run_id": task_id}), + } + + return sse_format( + emit_event( + task_id, + "progress_update_tool_action_completed", + data={ + "result": { + "tool_call": { + "id": ev["tool_id"], + "function": { + "name": ev["name"], + "arguments": ev.get("arguments"), + }, + }, + "output_data": output_data, + } + }, + ) + ) diff --git a/python/vibe-coding-ide/frontend/.gitignore b/python/vibe-coding-ide/frontend/.gitignore new file mode 100644 index 0000000000..518227fa9e --- /dev/null +++ b/python/vibe-coding-ide/frontend/.gitignore @@ -0,0 +1,28 @@ +# Logs +logs +*.log +npm-debug.log* +yarn-debug.log* +yarn-error.log* +pnpm-debug.log* +lerna-debug.log* + +node_modules +dist +dist-ssr +*.local + +# Editor directories and files +.vscode/* +!.vscode/extensions.json +.idea +.DS_Store +*.suo +*.ntvs* +*.njsproj +*.sln +*.sw? +.vercel +.next + +src/templates-data.json diff --git a/python/vibe-coding-ide/frontend/next-env.d.ts b/python/vibe-coding-ide/frontend/next-env.d.ts new file mode 100644 index 0000000000..c4b7818fbb --- /dev/null +++ b/python/vibe-coding-ide/frontend/next-env.d.ts @@ -0,0 +1,6 @@ +/// +/// +import "./.next/dev/types/routes.d.ts"; + +// NOTE: This file should not be edited +// see https://nextjs.org/docs/app/api-reference/config/typescript for more information. diff --git a/python/vibe-coding-ide/frontend/next.config.ts b/python/vibe-coding-ide/frontend/next.config.ts new file mode 100644 index 0000000000..1fb4df49a7 --- /dev/null +++ b/python/vibe-coding-ide/frontend/next.config.ts @@ -0,0 +1,14 @@ +import type { NextConfig } from 'next' + +const nextConfig: NextConfig = { + async rewrites() { + return [ + { + source: '/api/:path*', + destination: 'http://localhost:8081/api/:path*', + }, + ] + }, +} + +export default nextConfig diff --git a/python/vibe-coding-ide/frontend/package.json b/python/vibe-coding-ide/frontend/package.json new file mode 100644 index 0000000000..24307c6350 --- /dev/null +++ b/python/vibe-coding-ide/frontend/package.json @@ -0,0 +1,39 @@ +{ + "name": "frontend_next", + "private": true, + "version": "0.1.0", + "scripts": { + "predev": "node scripts/build-templates.js", + "dev": "next dev", + "prebuild": "node scripts/build-templates.js", + "build": "next build", + "start": "next start", + "lint": "next lint" + }, + "dependencies": { + "@geist-ui/icons": "^1.0.2", + "@monaco-editor/react": "^4.7.0", + "@tailwindcss/postcss": "^4.1.14", + "jszip": "^3.10.1", + "lucide-react": "^0.514.0", + "monaco-editor": "^0.52.0", + "next": "16.0.0-beta.0", + "postcss": "^8.5.5", + "react": "19.2.0", + "react-dom": "19.2.0", + "react-icons": "^5.2.1", + "react-markdown": "^10.1.0", + "remark-gfm": "^4.0.1", + "tailwindcss": "^4.1.9" + }, + "devDependencies": { + "@types/node": "^20", + "@types/react": "^19", + "@types/react-dom": "^19", + "autoprefixer": "^10.4.18", + "postcss": "^8.4.35", + "tailwindcss": "^4.1.9", + "typescript": "~5.8.3" + }, + "packageManager": "pnpm@10.15.0" +} diff --git a/python/vibe-coding-ide/frontend/pnpm-lock.yaml b/python/vibe-coding-ide/frontend/pnpm-lock.yaml new file mode 100644 index 0000000000..d357f3971b --- /dev/null +++ b/python/vibe-coding-ide/frontend/pnpm-lock.yaml @@ -0,0 +1,2128 @@ +lockfileVersion: '9.0' + +settings: + autoInstallPeers: true + excludeLinksFromLockfile: false + +importers: + + .: + dependencies: + '@geist-ui/icons': + specifier: ^1.0.2 + version: 1.0.2(@geist-ui/core@2.3.8(react-dom@19.2.0(react@19.2.0))(react@19.2.0))(react@19.2.0) + '@monaco-editor/react': + specifier: ^4.7.0 + version: 4.7.0(monaco-editor@0.52.2)(react-dom@19.2.0(react@19.2.0))(react@19.2.0) + '@tailwindcss/postcss': + specifier: ^4.1.14 + version: 4.1.14 + jszip: + specifier: ^3.10.1 + version: 3.10.1 + lucide-react: + specifier: ^0.514.0 + version: 0.514.0(react@19.2.0) + monaco-editor: + specifier: ^0.52.0 + version: 0.52.2 + next: + specifier: 16.0.0-beta.0 + version: 16.0.0-beta.0(react-dom@19.2.0(react@19.2.0))(react@19.2.0) + postcss: + specifier: ^8.5.5 + version: 8.5.6 + react: + specifier: 19.2.0 + version: 19.2.0 + react-dom: + specifier: 19.2.0 + version: 19.2.0(react@19.2.0) + react-icons: + specifier: ^5.2.1 + version: 5.5.0(react@19.2.0) + react-markdown: + specifier: ^10.1.0 + version: 10.1.0(@types/react@19.2.2)(react@19.2.0) + remark-gfm: + specifier: ^4.0.1 + version: 4.0.1 + tailwindcss: + specifier: ^4.1.9 + version: 4.1.14 + devDependencies: + '@types/node': + specifier: ^20 + version: 20.19.22 + '@types/react': + specifier: ^19 + version: 19.2.2 + '@types/react-dom': + specifier: ^19 + version: 19.2.2(@types/react@19.2.2) + autoprefixer: + specifier: ^10.4.18 + version: 10.4.21(postcss@8.5.6) + typescript: + specifier: ~5.8.3 + version: 5.8.3 + +packages: + + '@alloc/quick-lru@5.2.0': + resolution: {integrity: sha512-UrcABB+4bUrFABwbluTIBErXwvbsU/V7TZWfmbgJfbkwiBuziS9gxdODUyuiecfdGQ85jglMW6juS3+z5TsKLw==} + engines: {node: '>=10'} + + '@babel/runtime@7.28.4': + resolution: {integrity: sha512-Q/N6JNWvIvPnLDvjlE1OUBLPQHH6l3CltCEsHIujp45zQUSSh8K+gHnaEX45yAT1nyngnINhvWtzN+Nb9D8RAQ==} + engines: {node: '>=6.9.0'} + + '@emnapi/runtime@1.5.0': + resolution: {integrity: sha512-97/BJ3iXHww3djw6hYIfErCZFee7qCtrneuLa20UXFCOTCfBM2cvQHjWJ2EG0s0MtdNwInarqCTz35i4wWXHsQ==} + + '@geist-ui/core@2.3.8': + resolution: {integrity: sha512-OKwGgTA4+fBM41eQbqDoUj4XBycZbYH7Ynrn6LPO5yKX7zeWPu/R7HN3vB4/oHt34VTDQI5sDNb1SirHvNyB5w==} + peerDependencies: + react: '>=16.9.0' + react-dom: '>=16.9.0' + + '@geist-ui/icons@1.0.2': + resolution: {integrity: sha512-Npfa0NW6fQ31qw/+iMPWbs1hAcJ/3FqBjSLYgEfITDqy/3TJFpFKeVyK04AC/hTmYTsdNruVYczqPNcham5FOQ==} + peerDependencies: + '@geist-ui/core': '>=1.0.0' + react: '>=16.13.0' + + '@img/colour@1.0.0': + resolution: {integrity: sha512-A5P/LfWGFSl6nsckYtjw9da+19jB8hkJ6ACTGcDfEJ0aE+l2n2El7dsVM7UVHZQ9s2lmYMWlrS21YLy2IR1LUw==} + engines: {node: '>=18'} + + '@img/sharp-darwin-arm64@0.34.4': + resolution: {integrity: sha512-sitdlPzDVyvmINUdJle3TNHl+AG9QcwiAMsXmccqsCOMZNIdW2/7S26w0LyU8euiLVzFBL3dXPwVCq/ODnf2vA==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [arm64] + os: [darwin] + + '@img/sharp-darwin-x64@0.34.4': + resolution: {integrity: sha512-rZheupWIoa3+SOdF/IcUe1ah4ZDpKBGWcsPX6MT0lYniH9micvIU7HQkYTfrx5Xi8u+YqwLtxC/3vl8TQN6rMg==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [x64] + os: [darwin] + + '@img/sharp-libvips-darwin-arm64@1.2.3': + resolution: {integrity: sha512-QzWAKo7kpHxbuHqUC28DZ9pIKpSi2ts2OJnoIGI26+HMgq92ZZ4vk8iJd4XsxN+tYfNJxzH6W62X5eTcsBymHw==} + cpu: [arm64] + os: [darwin] + + '@img/sharp-libvips-darwin-x64@1.2.3': + resolution: {integrity: sha512-Ju+g2xn1E2AKO6YBhxjj+ACcsPQRHT0bhpglxcEf+3uyPY+/gL8veniKoo96335ZaPo03bdDXMv0t+BBFAbmRA==} + cpu: [x64] + os: [darwin] + + '@img/sharp-libvips-linux-arm64@1.2.3': + resolution: {integrity: sha512-I4RxkXU90cpufazhGPyVujYwfIm9Nk1QDEmiIsaPwdnm013F7RIceaCc87kAH+oUB1ezqEvC6ga4m7MSlqsJvQ==} + cpu: [arm64] + os: [linux] + + '@img/sharp-libvips-linux-arm@1.2.3': + resolution: {integrity: sha512-x1uE93lyP6wEwGvgAIV0gP6zmaL/a0tGzJs/BIDDG0zeBhMnuUPm7ptxGhUbcGs4okDJrk4nxgrmxpib9g6HpA==} + cpu: [arm] + os: [linux] + + '@img/sharp-libvips-linux-ppc64@1.2.3': + resolution: {integrity: sha512-Y2T7IsQvJLMCBM+pmPbM3bKT/yYJvVtLJGfCs4Sp95SjvnFIjynbjzsa7dY1fRJX45FTSfDksbTp6AGWudiyCg==} + cpu: [ppc64] + os: [linux] + + '@img/sharp-libvips-linux-s390x@1.2.3': + resolution: {integrity: sha512-RgWrs/gVU7f+K7P+KeHFaBAJlNkD1nIZuVXdQv6S+fNA6syCcoboNjsV2Pou7zNlVdNQoQUpQTk8SWDHUA3y/w==} + cpu: [s390x] + os: [linux] + + '@img/sharp-libvips-linux-x64@1.2.3': + resolution: {integrity: sha512-3JU7LmR85K6bBiRzSUc/Ff9JBVIFVvq6bomKE0e63UXGeRw2HPVEjoJke1Yx+iU4rL7/7kUjES4dZ/81Qjhyxg==} + cpu: [x64] + os: [linux] + + '@img/sharp-libvips-linuxmusl-arm64@1.2.3': + resolution: {integrity: sha512-F9q83RZ8yaCwENw1GieztSfj5msz7GGykG/BA+MOUefvER69K/ubgFHNeSyUu64amHIYKGDs4sRCMzXVj8sEyw==} + cpu: [arm64] + os: [linux] + + '@img/sharp-libvips-linuxmusl-x64@1.2.3': + resolution: {integrity: sha512-U5PUY5jbc45ANM6tSJpsgqmBF/VsL6LnxJmIf11kB7J5DctHgqm0SkuXzVWtIY90GnJxKnC/JT251TDnk1fu/g==} + cpu: [x64] + os: [linux] + + '@img/sharp-linux-arm64@0.34.4': + resolution: {integrity: sha512-YXU1F/mN/Wu786tl72CyJjP/Ngl8mGHN1hST4BGl+hiW5jhCnV2uRVTNOcaYPs73NeT/H8Upm3y9582JVuZHrQ==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [arm64] + os: [linux] + + '@img/sharp-linux-arm@0.34.4': + resolution: {integrity: sha512-Xyam4mlqM0KkTHYVSuc6wXRmM7LGN0P12li03jAnZ3EJWZqj83+hi8Y9UxZUbxsgsK1qOEwg7O0Bc0LjqQVtxA==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [arm] + os: [linux] + + '@img/sharp-linux-ppc64@0.34.4': + resolution: {integrity: sha512-F4PDtF4Cy8L8hXA2p3TO6s4aDt93v+LKmpcYFLAVdkkD3hSxZzee0rh6/+94FpAynsuMpLX5h+LRsSG3rIciUQ==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [ppc64] + os: [linux] + + '@img/sharp-linux-s390x@0.34.4': + resolution: {integrity: sha512-qVrZKE9Bsnzy+myf7lFKvng6bQzhNUAYcVORq2P7bDlvmF6u2sCmK2KyEQEBdYk+u3T01pVsPrkj943T1aJAsw==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [s390x] + os: [linux] + + '@img/sharp-linux-x64@0.34.4': + resolution: {integrity: sha512-ZfGtcp2xS51iG79c6Vhw9CWqQC8l2Ot8dygxoDoIQPTat/Ov3qAa8qpxSrtAEAJW+UjTXc4yxCjNfxm4h6Xm2A==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [x64] + os: [linux] + + '@img/sharp-linuxmusl-arm64@0.34.4': + resolution: {integrity: sha512-8hDVvW9eu4yHWnjaOOR8kHVrew1iIX+MUgwxSuH2XyYeNRtLUe4VNioSqbNkB7ZYQJj9rUTT4PyRscyk2PXFKA==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [arm64] + os: [linux] + + '@img/sharp-linuxmusl-x64@0.34.4': + resolution: {integrity: sha512-lU0aA5L8QTlfKjpDCEFOZsTYGn3AEiO6db8W5aQDxj0nQkVrZWmN3ZP9sYKWJdtq3PWPhUNlqehWyXpYDcI9Sg==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [x64] + os: [linux] + + '@img/sharp-wasm32@0.34.4': + resolution: {integrity: sha512-33QL6ZO/qpRyG7woB/HUALz28WnTMI2W1jgX3Nu2bypqLIKx/QKMILLJzJjI+SIbvXdG9fUnmrxR7vbi1sTBeA==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [wasm32] + + '@img/sharp-win32-arm64@0.34.4': + resolution: {integrity: sha512-2Q250do/5WXTwxW3zjsEuMSv5sUU4Tq9VThWKlU2EYLm4MB7ZeMwF+SFJutldYODXF6jzc6YEOC+VfX0SZQPqA==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [arm64] + os: [win32] + + '@img/sharp-win32-ia32@0.34.4': + resolution: {integrity: sha512-3ZeLue5V82dT92CNL6rsal6I2weKw1cYu+rGKm8fOCCtJTR2gYeUfY3FqUnIJsMUPIH68oS5jmZ0NiJ508YpEw==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [ia32] + os: [win32] + + '@img/sharp-win32-x64@0.34.4': + resolution: {integrity: sha512-xIyj4wpYs8J18sVN3mSQjwrw7fKUqRw+Z5rnHNCy5fYTxigBz81u5mOMPmFumwjcn8+ld1ppptMBCLic1nz6ig==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + cpu: [x64] + os: [win32] + + '@isaacs/fs-minipass@4.0.1': + resolution: {integrity: sha512-wgm9Ehl2jpeqP3zw/7mo3kRHFp5MEDhqAdwy1fTGkHAwnkGOVsgpvQhL8B5n1qlb01jV3n/bI0ZfZp5lWA1k4w==} + engines: {node: '>=18.0.0'} + + '@jridgewell/gen-mapping@0.3.13': + resolution: {integrity: sha512-2kkt/7niJ6MgEPxF0bYdQ6etZaA+fQvDcLKckhy1yIQOzaoKjBBjSj63/aLVjYE3qhRt5dvM+uUyfCg6UKCBbA==} + + '@jridgewell/remapping@2.3.5': + resolution: {integrity: sha512-LI9u/+laYG4Ds1TDKSJW2YPrIlcVYOwi2fUC6xB43lueCjgxV4lffOCZCtYFiH6TNOX+tQKXx97T4IKHbhyHEQ==} + + '@jridgewell/resolve-uri@3.1.2': + resolution: {integrity: sha512-bRISgCIjP20/tbWSPWMEi54QVPRZExkuD9lJL+UIxUKtwVJA8wW1Trb1jMs1RFXo1CBTNZ/5hpC9QvmKWdopKw==} + engines: {node: '>=6.0.0'} + + '@jridgewell/sourcemap-codec@1.5.5': + resolution: {integrity: sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==} + + '@jridgewell/trace-mapping@0.3.31': + resolution: {integrity: sha512-zzNR+SdQSDJzc8joaeP8QQoCQr8NuYx2dIIytl1QeBEZHJ9uW6hebsrYgbz8hJwUQao3TWCMtmfV8Nu1twOLAw==} + + '@monaco-editor/loader@1.6.1': + resolution: {integrity: sha512-w3tEnj9HYEC73wtjdpR089AqkUPskFRcdkxsiSFt3SoUc3OHpmu+leP94CXBm4mHfefmhsdfI0ZQu6qJ0wgtPg==} + + '@monaco-editor/react@4.7.0': + resolution: {integrity: sha512-cyzXQCtO47ydzxpQtCGSQGOC8Gk3ZUeBXFAxD+CWXYFo5OqZyZUonFl0DwUlTyAfRHntBfw2p3w4s9R6oe1eCA==} + peerDependencies: + monaco-editor: '>= 0.25.0 < 1' + react: ^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 + react-dom: ^16.8.0 || ^17.0.0 || ^18.0.0 || ^19.0.0 + + '@next/env@16.0.0-beta.0': + resolution: {integrity: sha512-OWeEhUmIxA9zuQansxKXHWTszsPcvSvar8ym1BOElhU6Lgnb4yLXGshKSoPXoHOHRFcxuYmhI86OA+5Z9TvSSQ==} + + '@next/swc-darwin-arm64@16.0.0-beta.0': + resolution: {integrity: sha512-8IdA5j+xOGQNP+4yBqG5pvNhrDrVp/IMyJSn38t2h3XOhw+BZ63j+m0SyJuj2OKgIBgJLvkHUXEWiSD9u5nfBw==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [darwin] + + '@next/swc-darwin-x64@16.0.0-beta.0': + resolution: {integrity: sha512-SEAPWkMPHnLMTmDw/b0XnIgMsdUZvAGLYzAz9VZxtie1x5dnus3t/n2DP0nmg8O8LkfKJcicnm6fMrNeHJQs9w==} + engines: {node: '>= 10'} + cpu: [x64] + os: [darwin] + + '@next/swc-linux-arm64-gnu@16.0.0-beta.0': + resolution: {integrity: sha512-qeccINBs3eWt5EbiaABNUZBWyMRToZfgzSD5tRED1UuZpfxt3asebkanV1GFS/ZQ+z3+pVEzMwhaGwCBbfCa5w==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [linux] + + '@next/swc-linux-arm64-musl@16.0.0-beta.0': + resolution: {integrity: sha512-vhhfBp7CNTVHq0tuY+enPKvE91QgjhiWs539EQ0VXCbQMoAuxWr1uOgS3kjfah78oI89icQin4HAO7ePu3KUtw==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [linux] + + '@next/swc-linux-x64-gnu@16.0.0-beta.0': + resolution: {integrity: sha512-2+aMU293kgg0UJLEyhgXy3KwyI0RcSfKHrWT8SnzW8FqcrUcOWYw7qWCP+JcRT5SwQCcjByEOwH+cw+1nBTeIA==} + engines: {node: '>= 10'} + cpu: [x64] + os: [linux] + + '@next/swc-linux-x64-musl@16.0.0-beta.0': + resolution: {integrity: sha512-Jgu9BvRLG82DhkeSF+3OTOkZXf6azXlOlQ3TOWHRzh+Cap+fhlO8yp+cYI5jDsopDIfaBW+3ToAL1YLE1n+dGg==} + engines: {node: '>= 10'} + cpu: [x64] + os: [linux] + + '@next/swc-win32-arm64-msvc@16.0.0-beta.0': + resolution: {integrity: sha512-5cGucadLwCWUl9v1aOJLzDpyiYpdrFBiApvGVy4GKAFo6uK34mtgCSZcVUQ+DeLjAx0G5B3AgNxVnzMfXKsv5g==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [win32] + + '@next/swc-win32-x64-msvc@16.0.0-beta.0': + resolution: {integrity: sha512-gq0WvicjqmoiakDtW7TeabgT58i+5mQ+wy+qYuwCHBbWbed9PMh/wl4ZomsOe2IzlinRPylRGA01jXLPOrX/Nw==} + engines: {node: '>= 10'} + cpu: [x64] + os: [win32] + + '@swc/helpers@0.5.15': + resolution: {integrity: sha512-JQ5TuMi45Owi4/BIMAJBoSQoOJu12oOk/gADqlcUL9JEdHB8vyjUSsxqeNXnmXHjYKMi2WcYtezGEEhqUI/E2g==} + + '@tailwindcss/node@4.1.14': + resolution: {integrity: sha512-hpz+8vFk3Ic2xssIA3e01R6jkmsAhvkQdXlEbRTk6S10xDAtiQiM3FyvZVGsucefq764euO/b8WUW9ysLdThHw==} + + '@tailwindcss/oxide-android-arm64@4.1.14': + resolution: {integrity: sha512-a94ifZrGwMvbdeAxWoSuGcIl6/DOP5cdxagid7xJv6bwFp3oebp7y2ImYsnZBMTwjn5Ev5xESvS3FFYUGgPODQ==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [android] + + '@tailwindcss/oxide-darwin-arm64@4.1.14': + resolution: {integrity: sha512-HkFP/CqfSh09xCnrPJA7jud7hij5ahKyWomrC3oiO2U9i0UjP17o9pJbxUN0IJ471GTQQmzwhp0DEcpbp4MZTA==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [darwin] + + '@tailwindcss/oxide-darwin-x64@4.1.14': + resolution: {integrity: sha512-eVNaWmCgdLf5iv6Qd3s7JI5SEFBFRtfm6W0mphJYXgvnDEAZ5sZzqmI06bK6xo0IErDHdTA5/t7d4eTfWbWOFw==} + engines: {node: '>= 10'} + cpu: [x64] + os: [darwin] + + '@tailwindcss/oxide-freebsd-x64@4.1.14': + resolution: {integrity: sha512-QWLoRXNikEuqtNb0dhQN6wsSVVjX6dmUFzuuiL09ZeXju25dsei2uIPl71y2Ic6QbNBsB4scwBoFnlBfabHkEw==} + engines: {node: '>= 10'} + cpu: [x64] + os: [freebsd] + + '@tailwindcss/oxide-linux-arm-gnueabihf@4.1.14': + resolution: {integrity: sha512-VB4gjQni9+F0VCASU+L8zSIyjrLLsy03sjcR3bM0V2g4SNamo0FakZFKyUQ96ZVwGK4CaJsc9zd/obQy74o0Fw==} + engines: {node: '>= 10'} + cpu: [arm] + os: [linux] + + '@tailwindcss/oxide-linux-arm64-gnu@4.1.14': + resolution: {integrity: sha512-qaEy0dIZ6d9vyLnmeg24yzA8XuEAD9WjpM5nIM1sUgQ/Zv7cVkharPDQcmm/t/TvXoKo/0knI3me3AGfdx6w1w==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [linux] + + '@tailwindcss/oxide-linux-arm64-musl@4.1.14': + resolution: {integrity: sha512-ISZjT44s59O8xKsPEIesiIydMG/sCXoMBCqsphDm/WcbnuWLxxb+GcvSIIA5NjUw6F8Tex7s5/LM2yDy8RqYBQ==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [linux] + + '@tailwindcss/oxide-linux-x64-gnu@4.1.14': + resolution: {integrity: sha512-02c6JhLPJj10L2caH4U0zF8Hji4dOeahmuMl23stk0MU1wfd1OraE7rOloidSF8W5JTHkFdVo/O7uRUJJnUAJg==} + engines: {node: '>= 10'} + cpu: [x64] + os: [linux] + + '@tailwindcss/oxide-linux-x64-musl@4.1.14': + resolution: {integrity: sha512-TNGeLiN1XS66kQhxHG/7wMeQDOoL0S33x9BgmydbrWAb9Qw0KYdd8o1ifx4HOGDWhVmJ+Ul+JQ7lyknQFilO3Q==} + engines: {node: '>= 10'} + cpu: [x64] + os: [linux] + + '@tailwindcss/oxide-wasm32-wasi@4.1.14': + resolution: {integrity: sha512-uZYAsaW/jS/IYkd6EWPJKW/NlPNSkWkBlaeVBi/WsFQNP05/bzkebUL8FH1pdsqx4f2fH/bWFcUABOM9nfiJkQ==} + engines: {node: '>=14.0.0'} + cpu: [wasm32] + bundledDependencies: + - '@napi-rs/wasm-runtime' + - '@emnapi/core' + - '@emnapi/runtime' + - '@tybys/wasm-util' + - '@emnapi/wasi-threads' + - tslib + + '@tailwindcss/oxide-win32-arm64-msvc@4.1.14': + resolution: {integrity: sha512-Az0RnnkcvRqsuoLH2Z4n3JfAef0wElgzHD5Aky/e+0tBUxUhIeIqFBTMNQvmMRSP15fWwmvjBxZ3Q8RhsDnxAA==} + engines: {node: '>= 10'} + cpu: [arm64] + os: [win32] + + '@tailwindcss/oxide-win32-x64-msvc@4.1.14': + resolution: {integrity: sha512-ttblVGHgf68kEE4om1n/n44I0yGPkCPbLsqzjvybhpwa6mKKtgFfAzy6btc3HRmuW7nHe0OOrSeNP9sQmmH9XA==} + engines: {node: '>= 10'} + cpu: [x64] + os: [win32] + + '@tailwindcss/oxide@4.1.14': + resolution: {integrity: sha512-23yx+VUbBwCg2x5XWdB8+1lkPajzLmALEfMb51zZUBYaYVPDQvBSD/WYDqiVyBIo2BZFa3yw1Rpy3G2Jp+K0dw==} + engines: {node: '>= 10'} + + '@tailwindcss/postcss@4.1.14': + resolution: {integrity: sha512-BdMjIxy7HUNThK87C7BC8I1rE8BVUsfNQSI5siQ4JK3iIa3w0XyVvVL9SXLWO//CtYTcp1v7zci0fYwJOjB+Zg==} + + '@types/debug@4.1.12': + resolution: {integrity: sha512-vIChWdVG3LG1SMxEvI/AK+FWJthlrqlTu7fbrlywTkkaONwk/UAGaULXRlf8vkzFBLVm0zkMdCquhL5aOjhXPQ==} + + '@types/estree-jsx@1.0.5': + resolution: {integrity: sha512-52CcUVNFyfb1A2ALocQw/Dd1BQFNmSdkuC3BkZ6iqhdMfQz7JWOFRuJFloOzjk+6WijU56m9oKXFAXc7o3Towg==} + + '@types/estree@1.0.8': + resolution: {integrity: sha512-dWHzHa2WqEXI/O1E9OjrocMTKJl2mSrEolh1Iomrv6U+JuNwaHXsXx9bLu5gG7BUWFIN0skIQJQ/L1rIex4X6w==} + + '@types/hast@3.0.4': + resolution: {integrity: sha512-WPs+bbQw5aCj+x6laNGWLH3wviHtoCv/P3+otBhbOhJgG8qtpdAMlTCxLtsTWA7LH1Oh/bFCHsBn0TPS5m30EQ==} + + '@types/mdast@4.0.4': + resolution: {integrity: sha512-kGaNbPh1k7AFzgpud/gMdvIm5xuECykRR+JnWKQno9TAXVa6WIVCGTPvYGekIDL4uwCZQSYbUxNBSb1aUo79oA==} + + '@types/ms@2.1.0': + resolution: {integrity: sha512-GsCCIZDE/p3i96vtEqx+7dBUGXrc7zeSK3wwPHIaRThS+9OhWIXRqzs4d6k1SVU8g91DrNRWxWUGhp5KXQb2VA==} + + '@types/node@20.19.22': + resolution: {integrity: sha512-hRnu+5qggKDSyWHlnmThnUqg62l29Aj/6vcYgUaSFL9oc7DVjeWEQN3PRgdSc6F8d9QRMWkf36CLMch1Do/+RQ==} + + '@types/react-dom@19.2.2': + resolution: {integrity: sha512-9KQPoO6mZCi7jcIStSnlOWn2nEF3mNmyr3rIAsGnAbQKYbRLyqmeSc39EVgtxXVia+LMT8j3knZLAZAh+xLmrw==} + peerDependencies: + '@types/react': ^19.2.0 + + '@types/react@19.2.2': + resolution: {integrity: sha512-6mDvHUFSjyT2B2yeNx2nUgMxh9LtOWvkhIU3uePn2I2oyNymUAX1NIsdgviM4CH+JSrp2D2hsMvJOkxY+0wNRA==} + + '@types/unist@2.0.11': + resolution: {integrity: sha512-CmBKiL6NNo/OqgmMn95Fk9Whlp2mtvIv+KNpQKN2F4SjvrEesubTRWGYSg+BnWZOnlCaSTU1sMpsBOzgbYhnsA==} + + '@types/unist@3.0.3': + resolution: {integrity: sha512-ko/gIFJRv177XgZsZcBwnqJN5x/Gien8qNOn0D5bQU/zAzVf9Zt3BlcUiLqhV9y4ARk0GbT3tnUiPNgnTXzc/Q==} + + '@ungap/structured-clone@1.3.0': + resolution: {integrity: sha512-WmoN8qaIAo7WTYWbAZuG8PYEhn5fkz7dZrqTBZ7dtt//lL2Gwms1IcnQ5yHqjDfX8Ft5j4YzDM23f87zBfDe9g==} + + autoprefixer@10.4.21: + resolution: {integrity: sha512-O+A6LWV5LDHSJD3LjHYoNi4VLsj/Whi7k6zG12xTYaU4cQ8oxQGckXNX8cRHK5yOZ/ppVHe0ZBXGzSV9jXdVbQ==} + engines: {node: ^10 || ^12 || >=14} + hasBin: true + peerDependencies: + postcss: ^8.1.0 + + bail@2.0.2: + resolution: {integrity: sha512-0xO6mYd7JB2YesxDKplafRpsiOzPt9V02ddPCLbY1xYGPOX24NTyN50qnUxgCPcSoYMhKpAuBTjQoRZCAkUDRw==} + + baseline-browser-mapping@2.8.17: + resolution: {integrity: sha512-j5zJcx6golJYTG6c05LUZ3Z8Gi+M62zRT/ycz4Xq4iCOdpcxwg7ngEYD4KA0eWZC7U17qh/Smq8bYbACJ0ipBA==} + hasBin: true + + browserslist@4.26.3: + resolution: {integrity: sha512-lAUU+02RFBuCKQPj/P6NgjlbCnLBMp4UtgTx7vNHd3XSIJF87s9a5rA3aH2yw3GS9DqZAUbOtZdCCiZeVRqt0w==} + engines: {node: ^6 || ^7 || ^8 || ^9 || ^10 || ^11 || ^12 || >=13.7} + hasBin: true + + caniuse-lite@1.0.30001751: + resolution: {integrity: sha512-A0QJhug0Ly64Ii3eIqHu5X51ebln3k4yTUkY1j8drqpWHVreg/VLijN48cZ1bYPiqOQuqpkIKnzr/Ul8V+p6Cw==} + + ccount@2.0.1: + resolution: {integrity: sha512-eyrF0jiFpY+3drT6383f1qhkbGsLSifNAjA61IUjZjmLCWjItY6LB9ft9YhoDgwfmclB2zhu51Lc7+95b8NRAg==} + + character-entities-html4@2.1.0: + resolution: {integrity: sha512-1v7fgQRj6hnSwFpq1Eu0ynr/CDEw0rXo2B61qXrLNdHZmPKgb7fqS1a2JwF0rISo9q77jDI8VMEHoApn8qDoZA==} + + character-entities-legacy@3.0.0: + resolution: {integrity: sha512-RpPp0asT/6ufRm//AJVwpViZbGM/MkjQFxJccQRHmISF/22NBtsHqAWmL+/pmkPWoIUJdWyeVleTl1wydHATVQ==} + + character-entities@2.0.2: + resolution: {integrity: sha512-shx7oQ0Awen/BRIdkjkvz54PnEEI/EjwXDSIZp86/KKdbafHh1Df/RYGBhn4hbe2+uKC9FnT5UCEdyPz3ai9hQ==} + + character-reference-invalid@2.0.1: + resolution: {integrity: sha512-iBZ4F4wRbyORVsu0jPV7gXkOsGYjGHPmAyv+HiHG8gi5PtC9KI2j1+v8/tlibRvjoWX027ypmG/n0HtO5t7unw==} + + chownr@3.0.0: + resolution: {integrity: sha512-+IxzY9BZOQd/XuYPRmrvEVjF/nqj5kgT4kEq7VofrDoM1MxoRjEWkrCC3EtLi59TVawxTAn+orJwFQcrqEN1+g==} + engines: {node: '>=18'} + + client-only@0.0.1: + resolution: {integrity: sha512-IV3Ou0jSMzZrd3pZ48nLkT9DA7Ag1pnPzaiQhpW7c3RbcqqzvzzVu+L8gfqMp/8IM2MQtSiqaCxrrcfu8I8rMA==} + + comma-separated-tokens@2.0.3: + resolution: {integrity: sha512-Fu4hJdvzeylCfQPp9SGWidpzrMs7tTrlu6Vb8XGaRGck8QSNZJJp538Wrb60Lax4fPwR64ViY468OIUTbRlGZg==} + + core-util-is@1.0.3: + resolution: {integrity: sha512-ZQBvi1DcpJ4GDqanjucZ2Hj3wEO5pZDS89BWbkcrvdxksJorwUDDZamX9ldFkp9aw2lmBDLgkObEA4DWNJ9FYQ==} + + csstype@3.1.3: + resolution: {integrity: sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw==} + + debug@4.4.3: + resolution: {integrity: sha512-RGwwWnwQvkVfavKVt22FGLw+xYSdzARwm0ru6DhTVA3umU5hZc28V3kO4stgYryrTlLpuvgI9GiijltAjNbcqA==} + engines: {node: '>=6.0'} + peerDependencies: + supports-color: '*' + peerDependenciesMeta: + supports-color: + optional: true + + decode-named-character-reference@1.2.0: + resolution: {integrity: sha512-c6fcElNV6ShtZXmsgNgFFV5tVX2PaV4g+MOAkb8eXHvn6sryJBrZa9r0zV6+dtTyoCKxtDy5tyQ5ZwQuidtd+Q==} + + dequal@2.0.3: + resolution: {integrity: sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==} + engines: {node: '>=6'} + + detect-libc@2.1.2: + resolution: {integrity: sha512-Btj2BOOO83o3WyH59e8MgXsxEQVcarkUOpEYrubB0urwnN10yQ364rsiByU11nZlqWYZm05i/of7io4mzihBtQ==} + engines: {node: '>=8'} + + devlop@1.1.0: + resolution: {integrity: sha512-RWmIqhcFf1lRYBvNmr7qTNuyCt/7/ns2jbpp1+PalgE/rDQcBT0fioSMUpJ93irlUhC5hrg4cYqe6U+0ImW0rA==} + + electron-to-chromium@1.5.237: + resolution: {integrity: sha512-icUt1NvfhGLar5lSWH3tHNzablaA5js3HVHacQimfP8ViEBOQv+L7DKEuHdbTZ0SKCO1ogTJTIL1Gwk9S6Qvcg==} + + enhanced-resolve@5.18.3: + resolution: {integrity: sha512-d4lC8xfavMeBjzGr2vECC3fsGXziXZQyJxD868h2M/mBI3PwAuODxAkLkq5HYuvrPYcUtiLzsTo8U3PgX3Ocww==} + engines: {node: '>=10.13.0'} + + escalade@3.2.0: + resolution: {integrity: sha512-WUj2qlxaQtO4g6Pq5c29GTcWGDyd8itL8zTlipgECz3JesAiiOKotd8JU6otB3PACgG6xkJUyVhboMS+bje/jA==} + engines: {node: '>=6'} + + escape-string-regexp@5.0.0: + resolution: {integrity: sha512-/veY75JbMK4j1yjvuUxuVsiS/hr/4iHs9FTT6cgTexxdE0Ly/glccBAkloH/DofkjRbZU3bnoj38mOmhkZ0lHw==} + engines: {node: '>=12'} + + estree-util-is-identifier-name@3.0.0: + resolution: {integrity: sha512-hFtqIDZTIUZ9BXLb8y4pYGyk6+wekIivNVTcmvk8NoOh+VeRn5y6cEHzbURrWbfp1fIqdVipilzj+lfaadNZmg==} + + extend@3.0.2: + resolution: {integrity: sha512-fjquC59cD7CyW6urNXK0FBufkZcoiGG80wTuPujX590cB5Ttln20E2UB4S/WARVqhXffZl2LNgS+gQdPIIim/g==} + + fraction.js@4.3.7: + resolution: {integrity: sha512-ZsDfxO51wGAXREY55a7la9LScWpwv9RxIrYABrlvOFBlH/ShPnrtsXeuUIfXKKOVicNxQ+o8JTbJvjS4M89yew==} + + graceful-fs@4.2.11: + resolution: {integrity: sha512-RbJ5/jmFcNNCcDV5o9eTnBLJ/HszWV0P73bc+Ff4nS/rJj+YaS6IGyiOL0VoBYX+l1Wrl3k63h/KrH+nhJ0XvQ==} + + hast-util-to-jsx-runtime@2.3.6: + resolution: {integrity: sha512-zl6s8LwNyo1P9uw+XJGvZtdFF1GdAkOg8ujOw+4Pyb76874fLps4ueHXDhXWdk6YHQ6OgUtinliG7RsYvCbbBg==} + + hast-util-whitespace@3.0.0: + resolution: {integrity: sha512-88JUN06ipLwsnv+dVn+OIYOvAuvBMy/Qoi6O7mQHxdPXpjy+Cd6xRkWwux7DKO+4sYILtLBRIKgsdpS2gQc7qw==} + + html-url-attributes@3.0.1: + resolution: {integrity: sha512-ol6UPyBWqsrO6EJySPz2O7ZSr856WDrEzM5zMqp+FJJLGMW35cLYmmZnl0vztAZxRUoNZJFTCohfjuIJ8I4QBQ==} + + immediate@3.0.6: + resolution: {integrity: sha512-XXOFtyqDjNDAQxVfYxuF7g9Il/IbWmmlQg2MYKOH8ExIT1qg6xc4zyS3HaEEATgs1btfzxq15ciUiY7gjSXRGQ==} + + inherits@2.0.4: + resolution: {integrity: sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==} + + inline-style-parser@0.2.4: + resolution: {integrity: sha512-0aO8FkhNZlj/ZIbNi7Lxxr12obT7cL1moPfE4tg1LkX7LlLfC6DeX4l2ZEud1ukP9jNQyNnfzQVqwbwmAATY4Q==} + + is-alphabetical@2.0.1: + resolution: {integrity: sha512-FWyyY60MeTNyeSRpkM2Iry0G9hpr7/9kD40mD/cGQEuilcZYS4okz8SN2Q6rLCJ8gbCt6fN+rC+6tMGS99LaxQ==} + + is-alphanumerical@2.0.1: + resolution: {integrity: sha512-hmbYhX/9MUMF5uh7tOXyK/n0ZvWpad5caBA17GsC6vyuCqaWliRG5K1qS9inmUhEMaOBIW7/whAnSwveW/LtZw==} + + is-decimal@2.0.1: + resolution: {integrity: sha512-AAB9hiomQs5DXWcRB1rqsxGUstbRroFOPPVAomNk/3XHR5JyEZChOyTWe2oayKnsSsr/kcGqF+z6yuH6HHpN0A==} + + is-hexadecimal@2.0.1: + resolution: {integrity: sha512-DgZQp241c8oO6cA1SbTEWiXeoxV42vlcJxgH+B3hi1AiqqKruZR3ZGF8In3fj4+/y/7rHvlOZLZtgJ/4ttYGZg==} + + is-plain-obj@4.1.0: + resolution: {integrity: sha512-+Pgi+vMuUNkJyExiMBt5IlFoMyKnr5zhJ4Uspz58WOhBF5QoIZkFyNHIbBAtHwzVAgk5RtndVNsDRN61/mmDqg==} + engines: {node: '>=12'} + + isarray@1.0.0: + resolution: {integrity: sha512-VLghIWNM6ELQzo7zwmcg0NmTVyWKYjvIeM83yjp0wRDTmUnrM678fQbcKBo6n2CJEF0szoG//ytg+TKla89ALQ==} + + jiti@2.6.1: + resolution: {integrity: sha512-ekilCSN1jwRvIbgeg/57YFh8qQDNbwDb9xT/qu2DAHbFFZUicIl4ygVaAvzveMhMVr3LnpSKTNnwt8PoOfmKhQ==} + hasBin: true + + jszip@3.10.1: + resolution: {integrity: sha512-xXDvecyTpGLrqFrvkrUSoxxfJI5AH7U8zxxtVclpsUtMCq4JQ290LY8AW5c7Ggnr/Y/oK+bQMbqK2qmtk3pN4g==} + + lie@3.3.0: + resolution: {integrity: sha512-UaiMJzeWRlEujzAuw5LokY1L5ecNQYZKfmyZ9L7wDHb/p5etKaxXhohBcrw0EYby+G/NA52vRSN4N39dxHAIwQ==} + + lightningcss-darwin-arm64@1.30.1: + resolution: {integrity: sha512-c8JK7hyE65X1MHMN+Viq9n11RRC7hgin3HhYKhrMyaXflk5GVplZ60IxyoVtzILeKr+xAJwg6zK6sjTBJ0FKYQ==} + engines: {node: '>= 12.0.0'} + cpu: [arm64] + os: [darwin] + + lightningcss-darwin-x64@1.30.1: + resolution: {integrity: sha512-k1EvjakfumAQoTfcXUcHQZhSpLlkAuEkdMBsI/ivWw9hL+7FtilQc0Cy3hrx0AAQrVtQAbMI7YjCgYgvn37PzA==} + engines: {node: '>= 12.0.0'} + cpu: [x64] + os: [darwin] + + lightningcss-freebsd-x64@1.30.1: + resolution: {integrity: sha512-kmW6UGCGg2PcyUE59K5r0kWfKPAVy4SltVeut+umLCFoJ53RdCUWxcRDzO1eTaxf/7Q2H7LTquFHPL5R+Gjyig==} + engines: {node: '>= 12.0.0'} + cpu: [x64] + os: [freebsd] + + lightningcss-linux-arm-gnueabihf@1.30.1: + resolution: {integrity: sha512-MjxUShl1v8pit+6D/zSPq9S9dQ2NPFSQwGvxBCYaBYLPlCWuPh9/t1MRS8iUaR8i+a6w7aps+B4N0S1TYP/R+Q==} + engines: {node: '>= 12.0.0'} + cpu: [arm] + os: [linux] + + lightningcss-linux-arm64-gnu@1.30.1: + resolution: {integrity: sha512-gB72maP8rmrKsnKYy8XUuXi/4OctJiuQjcuqWNlJQ6jZiWqtPvqFziskH3hnajfvKB27ynbVCucKSm2rkQp4Bw==} + engines: {node: '>= 12.0.0'} + cpu: [arm64] + os: [linux] + + lightningcss-linux-arm64-musl@1.30.1: + resolution: {integrity: sha512-jmUQVx4331m6LIX+0wUhBbmMX7TCfjF5FoOH6SD1CttzuYlGNVpA7QnrmLxrsub43ClTINfGSYyHe2HWeLl5CQ==} + engines: {node: '>= 12.0.0'} + cpu: [arm64] + os: [linux] + + lightningcss-linux-x64-gnu@1.30.1: + resolution: {integrity: sha512-piWx3z4wN8J8z3+O5kO74+yr6ze/dKmPnI7vLqfSqI8bccaTGY5xiSGVIJBDd5K5BHlvVLpUB3S2YCfelyJ1bw==} + engines: {node: '>= 12.0.0'} + cpu: [x64] + os: [linux] + + lightningcss-linux-x64-musl@1.30.1: + resolution: {integrity: sha512-rRomAK7eIkL+tHY0YPxbc5Dra2gXlI63HL+v1Pdi1a3sC+tJTcFrHX+E86sulgAXeI7rSzDYhPSeHHjqFhqfeQ==} + engines: {node: '>= 12.0.0'} + cpu: [x64] + os: [linux] + + lightningcss-win32-arm64-msvc@1.30.1: + resolution: {integrity: sha512-mSL4rqPi4iXq5YVqzSsJgMVFENoa4nGTT/GjO2c0Yl9OuQfPsIfncvLrEW6RbbB24WtZ3xP/2CCmI3tNkNV4oA==} + engines: {node: '>= 12.0.0'} + cpu: [arm64] + os: [win32] + + lightningcss-win32-x64-msvc@1.30.1: + resolution: {integrity: sha512-PVqXh48wh4T53F/1CCu8PIPCxLzWyCnn/9T5W1Jpmdy5h9Cwd+0YQS6/LwhHXSafuc61/xg9Lv5OrCby6a++jg==} + engines: {node: '>= 12.0.0'} + cpu: [x64] + os: [win32] + + lightningcss@1.30.1: + resolution: {integrity: sha512-xi6IyHML+c9+Q3W0S4fCQJOym42pyurFiJUHEcEyHS0CeKzia4yZDEsLlqOFykxOdHpNy0NmvVO31vcSqAxJCg==} + engines: {node: '>= 12.0.0'} + + longest-streak@3.1.0: + resolution: {integrity: sha512-9Ri+o0JYgehTaVBBDoMqIl8GXtbWg711O3srftcHhZ0dqnETqLaoIK0x17fUw9rFSlK/0NlsKe0Ahhyl5pXE2g==} + + lucide-react@0.514.0: + resolution: {integrity: sha512-HXD0OAMd+JM2xCjlwG1EGW9Nuab64dhjO3+MvdyD+pSUeOTBaVAPhQblKIYmmX4RyBYbdzW0VWnJpjJmxWGr6w==} + peerDependencies: + react: ^16.5.1 || ^17.0.0 || ^18.0.0 || ^19.0.0 + + magic-string@0.30.19: + resolution: {integrity: sha512-2N21sPY9Ws53PZvsEpVtNuSW+ScYbQdp4b9qUaL+9QkHUrGFKo56Lg9Emg5s9V/qrtNBmiR01sYhUOwu3H+VOw==} + + markdown-table@3.0.4: + resolution: {integrity: sha512-wiYz4+JrLyb/DqW2hkFJxP7Vd7JuTDm77fvbM8VfEQdmSMqcImWeeRbHwZjBjIFki/VaMK2BhFi7oUUZeM5bqw==} + + mdast-util-find-and-replace@3.0.2: + resolution: {integrity: sha512-Tmd1Vg/m3Xz43afeNxDIhWRtFZgM2VLyaf4vSTYwudTyeuTneoL3qtWMA5jeLyz/O1vDJmmV4QuScFCA2tBPwg==} + + mdast-util-from-markdown@2.0.2: + resolution: {integrity: sha512-uZhTV/8NBuw0WHkPTrCqDOl0zVe1BIng5ZtHoDk49ME1qqcjYmmLmOf0gELgcRMxN4w2iuIeVso5/6QymSrgmA==} + + mdast-util-gfm-autolink-literal@2.0.1: + resolution: {integrity: sha512-5HVP2MKaP6L+G6YaxPNjuL0BPrq9orG3TsrZ9YXbA3vDw/ACI4MEsnoDpn6ZNm7GnZgtAcONJyPhOP8tNJQavQ==} + + mdast-util-gfm-footnote@2.1.0: + resolution: {integrity: sha512-sqpDWlsHn7Ac9GNZQMeUzPQSMzR6Wv0WKRNvQRg0KqHh02fpTz69Qc1QSseNX29bhz1ROIyNyxExfawVKTm1GQ==} + + mdast-util-gfm-strikethrough@2.0.0: + resolution: {integrity: sha512-mKKb915TF+OC5ptj5bJ7WFRPdYtuHv0yTRxK2tJvi+BDqbkiG7h7u/9SI89nRAYcmap2xHQL9D+QG/6wSrTtXg==} + + mdast-util-gfm-table@2.0.0: + resolution: {integrity: sha512-78UEvebzz/rJIxLvE7ZtDd/vIQ0RHv+3Mh5DR96p7cS7HsBhYIICDBCu8csTNWNO6tBWfqXPWekRuj2FNOGOZg==} + + mdast-util-gfm-task-list-item@2.0.0: + resolution: {integrity: sha512-IrtvNvjxC1o06taBAVJznEnkiHxLFTzgonUdy8hzFVeDun0uTjxxrRGVaNFqkU1wJR3RBPEfsxmU6jDWPofrTQ==} + + mdast-util-gfm@3.1.0: + resolution: {integrity: sha512-0ulfdQOM3ysHhCJ1p06l0b0VKlhU0wuQs3thxZQagjcjPrlFRqY215uZGHHJan9GEAXd9MbfPjFJz+qMkVR6zQ==} + + mdast-util-mdx-expression@2.0.1: + resolution: {integrity: sha512-J6f+9hUp+ldTZqKRSg7Vw5V6MqjATc+3E4gf3CFNcuZNWD8XdyI6zQ8GqH7f8169MM6P7hMBRDVGnn7oHB9kXQ==} + + mdast-util-mdx-jsx@3.2.0: + resolution: {integrity: sha512-lj/z8v0r6ZtsN/cGNNtemmmfoLAFZnjMbNyLzBafjzikOM+glrjNHPlf6lQDOTccj9n5b0PPihEBbhneMyGs1Q==} + + mdast-util-mdxjs-esm@2.0.1: + resolution: {integrity: sha512-EcmOpxsZ96CvlP03NghtH1EsLtr0n9Tm4lPUJUBccV9RwUOneqSycg19n5HGzCf+10LozMRSObtVr3ee1WoHtg==} + + mdast-util-phrasing@4.1.0: + resolution: {integrity: sha512-TqICwyvJJpBwvGAMZjj4J2n0X8QWp21b9l0o7eXyVJ25YNWYbJDVIyD1bZXE6WtV6RmKJVYmQAKWa0zWOABz2w==} + + mdast-util-to-hast@13.2.0: + resolution: {integrity: sha512-QGYKEuUsYT9ykKBCMOEDLsU5JRObWQusAolFMeko/tYPufNkRffBAQjIE+99jbA87xv6FgmjLtwjh9wBWajwAA==} + + mdast-util-to-markdown@2.1.2: + resolution: {integrity: sha512-xj68wMTvGXVOKonmog6LwyJKrYXZPvlwabaryTjLh9LuvovB/KAH+kvi8Gjj+7rJjsFi23nkUxRQv1KqSroMqA==} + + mdast-util-to-string@4.0.0: + resolution: {integrity: sha512-0H44vDimn51F0YwvxSJSm0eCDOJTRlmN0R1yBh4HLj9wiV1Dn0QoXGbvFAWj2hSItVTlCmBF1hqKlIyUBVFLPg==} + + micromark-core-commonmark@2.0.3: + resolution: {integrity: sha512-RDBrHEMSxVFLg6xvnXmb1Ayr2WzLAWjeSATAoxwKYJV94TeNavgoIdA0a9ytzDSVzBy2YKFK+emCPOEibLeCrg==} + + micromark-extension-gfm-autolink-literal@2.1.0: + resolution: {integrity: sha512-oOg7knzhicgQ3t4QCjCWgTmfNhvQbDDnJeVu9v81r7NltNCVmhPy1fJRX27pISafdjL+SVc4d3l48Gb6pbRypw==} + + micromark-extension-gfm-footnote@2.1.0: + resolution: {integrity: sha512-/yPhxI1ntnDNsiHtzLKYnE3vf9JZ6cAisqVDauhp4CEHxlb4uoOTxOCJ+9s51bIB8U1N1FJ1RXOKTIlD5B/gqw==} + + micromark-extension-gfm-strikethrough@2.1.0: + resolution: {integrity: sha512-ADVjpOOkjz1hhkZLlBiYA9cR2Anf8F4HqZUO6e5eDcPQd0Txw5fxLzzxnEkSkfnD0wziSGiv7sYhk/ktvbf1uw==} + + micromark-extension-gfm-table@2.1.1: + resolution: {integrity: sha512-t2OU/dXXioARrC6yWfJ4hqB7rct14e8f7m0cbI5hUmDyyIlwv5vEtooptH8INkbLzOatzKuVbQmAYcbWoyz6Dg==} + + micromark-extension-gfm-tagfilter@2.0.0: + resolution: {integrity: sha512-xHlTOmuCSotIA8TW1mDIM6X2O1SiX5P9IuDtqGonFhEK0qgRI4yeC6vMxEV2dgyr2TiD+2PQ10o+cOhdVAcwfg==} + + micromark-extension-gfm-task-list-item@2.1.0: + resolution: {integrity: sha512-qIBZhqxqI6fjLDYFTBIa4eivDMnP+OZqsNwmQ3xNLE4Cxwc+zfQEfbs6tzAo2Hjq+bh6q5F+Z8/cksrLFYWQQw==} + + micromark-extension-gfm@3.0.0: + resolution: {integrity: sha512-vsKArQsicm7t0z2GugkCKtZehqUm31oeGBV/KVSorWSy8ZlNAv7ytjFhvaryUiCUJYqs+NoE6AFhpQvBTM6Q4w==} + + micromark-factory-destination@2.0.1: + resolution: {integrity: sha512-Xe6rDdJlkmbFRExpTOmRj9N3MaWmbAgdpSrBQvCFqhezUn4AHqJHbaEnfbVYYiexVSs//tqOdY/DxhjdCiJnIA==} + + micromark-factory-label@2.0.1: + resolution: {integrity: sha512-VFMekyQExqIW7xIChcXn4ok29YE3rnuyveW3wZQWWqF4Nv9Wk5rgJ99KzPvHjkmPXF93FXIbBp6YdW3t71/7Vg==} + + micromark-factory-space@2.0.1: + resolution: {integrity: sha512-zRkxjtBxxLd2Sc0d+fbnEunsTj46SWXgXciZmHq0kDYGnck/ZSGj9/wULTV95uoeYiK5hRXP2mJ98Uo4cq/LQg==} + + micromark-factory-title@2.0.1: + resolution: {integrity: sha512-5bZ+3CjhAd9eChYTHsjy6TGxpOFSKgKKJPJxr293jTbfry2KDoWkhBb6TcPVB4NmzaPhMs1Frm9AZH7OD4Cjzw==} + + micromark-factory-whitespace@2.0.1: + resolution: {integrity: sha512-Ob0nuZ3PKt/n0hORHyvoD9uZhr+Za8sFoP+OnMcnWK5lngSzALgQYKMr9RJVOWLqQYuyn6ulqGWSXdwf6F80lQ==} + + micromark-util-character@2.1.1: + resolution: {integrity: sha512-wv8tdUTJ3thSFFFJKtpYKOYiGP2+v96Hvk4Tu8KpCAsTMs6yi+nVmGh1syvSCsaxz45J6Jbw+9DD6g97+NV67Q==} + + micromark-util-chunked@2.0.1: + resolution: {integrity: sha512-QUNFEOPELfmvv+4xiNg2sRYeS/P84pTW0TCgP5zc9FpXetHY0ab7SxKyAQCNCc1eK0459uoLI1y5oO5Vc1dbhA==} + + micromark-util-classify-character@2.0.1: + resolution: {integrity: sha512-K0kHzM6afW/MbeWYWLjoHQv1sgg2Q9EccHEDzSkxiP/EaagNzCm7T/WMKZ3rjMbvIpvBiZgwR3dKMygtA4mG1Q==} + + micromark-util-combine-extensions@2.0.1: + resolution: {integrity: sha512-OnAnH8Ujmy59JcyZw8JSbK9cGpdVY44NKgSM7E9Eh7DiLS2E9RNQf0dONaGDzEG9yjEl5hcqeIsj4hfRkLH/Bg==} + + micromark-util-decode-numeric-character-reference@2.0.2: + resolution: {integrity: sha512-ccUbYk6CwVdkmCQMyr64dXz42EfHGkPQlBj5p7YVGzq8I7CtjXZJrubAYezf7Rp+bjPseiROqe7G6foFd+lEuw==} + + micromark-util-decode-string@2.0.1: + resolution: {integrity: sha512-nDV/77Fj6eH1ynwscYTOsbK7rR//Uj0bZXBwJZRfaLEJ1iGBR6kIfNmlNqaqJf649EP0F3NWNdeJi03elllNUQ==} + + micromark-util-encode@2.0.1: + resolution: {integrity: sha512-c3cVx2y4KqUnwopcO9b/SCdo2O67LwJJ/UyqGfbigahfegL9myoEFoDYZgkT7f36T0bLrM9hZTAaAyH+PCAXjw==} + + micromark-util-html-tag-name@2.0.1: + resolution: {integrity: sha512-2cNEiYDhCWKI+Gs9T0Tiysk136SnR13hhO8yW6BGNyhOC4qYFnwF1nKfD3HFAIXA5c45RrIG1ub11GiXeYd1xA==} + + micromark-util-normalize-identifier@2.0.1: + resolution: {integrity: sha512-sxPqmo70LyARJs0w2UclACPUUEqltCkJ6PhKdMIDuJ3gSf/Q+/GIe3WKl0Ijb/GyH9lOpUkRAO2wp0GVkLvS9Q==} + + micromark-util-resolve-all@2.0.1: + resolution: {integrity: sha512-VdQyxFWFT2/FGJgwQnJYbe1jjQoNTS4RjglmSjTUlpUMa95Htx9NHeYW4rGDJzbjvCsl9eLjMQwGeElsqmzcHg==} + + micromark-util-sanitize-uri@2.0.1: + resolution: {integrity: sha512-9N9IomZ/YuGGZZmQec1MbgxtlgougxTodVwDzzEouPKo3qFWvymFHWcnDi2vzV1ff6kas9ucW+o3yzJK9YB1AQ==} + + micromark-util-subtokenize@2.1.0: + resolution: {integrity: sha512-XQLu552iSctvnEcgXw6+Sx75GflAPNED1qx7eBJ+wydBb2KCbRZe+NwvIEEMM83uml1+2WSXpBAcp9IUCgCYWA==} + + micromark-util-symbol@2.0.1: + resolution: {integrity: sha512-vs5t8Apaud9N28kgCrRUdEed4UJ+wWNvicHLPxCa9ENlYuAY31M0ETy5y1vA33YoNPDFTghEbnh6efaE8h4x0Q==} + + micromark-util-types@2.0.2: + resolution: {integrity: sha512-Yw0ECSpJoViF1qTU4DC6NwtC4aWGt1EkzaQB8KPPyCRR8z9TWeV0HbEFGTO+ZY1wB22zmxnJqhPyTpOVCpeHTA==} + + micromark@4.0.2: + resolution: {integrity: sha512-zpe98Q6kvavpCr1NPVSCMebCKfD7CA2NqZ+rykeNhONIJBpc1tFKt9hucLGwha3jNTNI8lHpctWJWoimVF4PfA==} + + minipass@7.1.2: + resolution: {integrity: sha512-qOOzS1cBTWYF4BH8fVePDBOO9iptMnGUEZwNc/cMWnTV2nVLZ7VoNWEPHkYczZA0pdoA7dl6e7FL659nX9S2aw==} + engines: {node: '>=16 || 14 >=14.17'} + + minizlib@3.1.0: + resolution: {integrity: sha512-KZxYo1BUkWD2TVFLr0MQoM8vUUigWD3LlD83a/75BqC+4qE0Hb1Vo5v1FgcfaNXvfXzr+5EhQ6ing/CaBijTlw==} + engines: {node: '>= 18'} + + monaco-editor@0.52.2: + resolution: {integrity: sha512-GEQWEZmfkOGLdd3XK8ryrfWz3AIP8YymVXiPHEdewrUq7mh0qrKrfHLNCXcbB6sTnMLnOZ3ztSiKcciFUkIJwQ==} + + ms@2.1.3: + resolution: {integrity: sha512-6FlzubTLZG3J2a/NVCAleEhjzq5oxgHyaCU9yYXvcLsvoVaHJq/s5xXI6/XXP6tz7R9xAOtHnSO/tXtF3WRTlA==} + + nanoid@3.3.11: + resolution: {integrity: sha512-N8SpfPUnUp1bK+PMYW8qSWdl9U+wwNWI4QKxOYDy9JAro3WMX7p2OeVRF9v+347pnakNevPmiHhNmZ2HbFA76w==} + engines: {node: ^10 || ^12 || ^13.7 || ^14 || >=15.0.1} + hasBin: true + + next@16.0.0-beta.0: + resolution: {integrity: sha512-RrpQl/FkN4v+hwcfsgj+ukTDyf3uQ1mcbNs229M9H0POMc8P0LhgrNDAWEiQHviYicLZorWJ47RoQYCzVddkww==} + engines: {node: '>=20.9.0'} + hasBin: true + peerDependencies: + '@opentelemetry/api': ^1.1.0 + '@playwright/test': ^1.51.1 + babel-plugin-react-compiler: '*' + react: ^18.2.0 || 19.0.0-rc-de68d2f4-20241204 || ^19.0.0 + react-dom: ^18.2.0 || 19.0.0-rc-de68d2f4-20241204 || ^19.0.0 + sass: ^1.3.0 + peerDependenciesMeta: + '@opentelemetry/api': + optional: true + '@playwright/test': + optional: true + babel-plugin-react-compiler: + optional: true + sass: + optional: true + + node-releases@2.0.25: + resolution: {integrity: sha512-4auku8B/vw5psvTiiN9j1dAOsXvMoGqJuKJcR+dTdqiXEK20mMTk1UEo3HS16LeGQsVG6+qKTPM9u/qQ2LqATA==} + + normalize-range@0.1.2: + resolution: {integrity: sha512-bdok/XvKII3nUpklnV6P2hxtMNrCboOjAcyBuQnWEhO665FwrSNRxU+AqpsyvO6LgGYPspN+lu5CLtw4jPRKNA==} + engines: {node: '>=0.10.0'} + + pako@1.0.11: + resolution: {integrity: sha512-4hLB8Py4zZce5s4yd9XzopqwVv/yGNhV1Bl8NTmCq1763HeK2+EwVTv+leGeL13Dnh2wfbqowVPXCIO0z4taYw==} + + parse-entities@4.0.2: + resolution: {integrity: sha512-GG2AQYWoLgL877gQIKeRPGO1xF9+eG1ujIb5soS5gPvLQ1y2o8FL90w2QWNdf9I361Mpp7726c+lj3U0qK1uGw==} + + picocolors@1.1.1: + resolution: {integrity: sha512-xceH2snhtb5M9liqDsmEw56le376mTZkEX/jEb/RxNFyegNul7eNslCXP9FDj/Lcu0X8KEyMceP2ntpaHrDEVA==} + + postcss-value-parser@4.2.0: + resolution: {integrity: sha512-1NNCs6uurfkVbeXG4S8JFT9t19m45ICnif8zWLd5oPSZ50QnwMfK+H3jv408d4jw/7Bttv5axS5IiHoLaVNHeQ==} + + postcss@8.4.31: + resolution: {integrity: sha512-PS08Iboia9mts/2ygV3eLpY5ghnUcfLV/EXTOW1E2qYxJKGGBUtNjN76FYHnMs36RmARn41bC0AZmn+rR0OVpQ==} + engines: {node: ^10 || ^12 || >=14} + + postcss@8.5.6: + resolution: {integrity: sha512-3Ybi1tAuwAP9s0r1UQ2J4n5Y0G05bJkpUIO0/bI9MhwmD70S5aTWbXGBwxHrelT+XM1k6dM0pk+SwNkpTRN7Pg==} + engines: {node: ^10 || ^12 || >=14} + + process-nextick-args@2.0.1: + resolution: {integrity: sha512-3ouUOpQhtgrbOa17J7+uxOTpITYWaGP7/AhoR3+A+/1e9skrzelGi/dXzEYyvbxubEF6Wn2ypscTKiKJFFn1ag==} + + property-information@7.1.0: + resolution: {integrity: sha512-TwEZ+X+yCJmYfL7TPUOcvBZ4QfoT5YenQiJuX//0th53DE6w0xxLEtfK3iyryQFddXuvkIk51EEgrJQ0WJkOmQ==} + + react-dom@19.2.0: + resolution: {integrity: sha512-UlbRu4cAiGaIewkPyiRGJk0imDN2T3JjieT6spoL2UeSf5od4n5LB/mQ4ejmxhCFT1tYe8IvaFulzynWovsEFQ==} + peerDependencies: + react: ^19.2.0 + + react-icons@5.5.0: + resolution: {integrity: sha512-MEFcXdkP3dLo8uumGI5xN3lDFNsRtrjbOEKDLD7yv76v4wpnEq2Lt2qeHaQOr34I/wPN3s3+N08WkQ+CW37Xiw==} + peerDependencies: + react: '*' + + react-markdown@10.1.0: + resolution: {integrity: sha512-qKxVopLT/TyA6BX3Ue5NwabOsAzm0Q7kAPwq6L+wWDwisYs7R8vZ0nRXqq6rkueboxpkjvLGU9fWifiX/ZZFxQ==} + peerDependencies: + '@types/react': '>=18' + react: '>=18' + + react@19.2.0: + resolution: {integrity: sha512-tmbWg6W31tQLeB5cdIBOicJDJRR2KzXsV7uSK9iNfLWQ5bIZfxuPEHp7M8wiHyHnn0DD1i7w3Zmin0FtkrwoCQ==} + engines: {node: '>=0.10.0'} + + readable-stream@2.3.8: + resolution: {integrity: sha512-8p0AUk4XODgIewSi0l8Epjs+EVnWiK7NoDIEGU0HhE7+ZyY8D1IMY7odu5lRrFXGg71L15KG8QrPmum45RTtdA==} + + remark-gfm@4.0.1: + resolution: {integrity: sha512-1quofZ2RQ9EWdeN34S79+KExV1764+wCUGop5CPL1WGdD0ocPpu91lzPGbwWMECpEpd42kJGQwzRfyov9j4yNg==} + + remark-parse@11.0.0: + resolution: {integrity: sha512-FCxlKLNGknS5ba/1lmpYijMUzX2esxW5xQqjWxw2eHFfS2MSdaHVINFmhjo+qN1WhZhNimq0dZATN9pH0IDrpA==} + + remark-rehype@11.1.2: + resolution: {integrity: sha512-Dh7l57ianaEoIpzbp0PC9UKAdCSVklD8E5Rpw7ETfbTl3FqcOOgq5q2LVDhgGCkaBv7p24JXikPdvhhmHvKMsw==} + + remark-stringify@11.0.0: + resolution: {integrity: sha512-1OSmLd3awB/t8qdoEOMazZkNsfVTeY4fTsgzcQFdXNq8ToTN4ZGwrMnlda4K6smTFKD+GRV6O48i6Z4iKgPPpw==} + + safe-buffer@5.1.2: + resolution: {integrity: sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==} + + scheduler@0.27.0: + resolution: {integrity: sha512-eNv+WrVbKu1f3vbYJT/xtiF5syA5HPIMtf9IgY/nKg0sWqzAUEvqY/xm7OcZc/qafLx/iO9FgOmeSAp4v5ti/Q==} + + semver@7.7.3: + resolution: {integrity: sha512-SdsKMrI9TdgjdweUSR9MweHA4EJ8YxHn8DFaDisvhVlUOe4BF1tLD7GAj0lIqWVl+dPb/rExr0Btby5loQm20Q==} + engines: {node: '>=10'} + hasBin: true + + setimmediate@1.0.5: + resolution: {integrity: sha512-MATJdZp8sLqDl/68LfQmbP8zKPLQNV6BIZoIgrscFDQ+RsvK/BxeDQOgyxKKoh0y/8h3BqVFnCqQ/gd+reiIXA==} + + sharp@0.34.4: + resolution: {integrity: sha512-FUH39xp3SBPnxWvd5iib1X8XY7J0K0X7d93sie9CJg2PO8/7gmg89Nve6OjItK53/MlAushNNxteBYfM6DEuoA==} + engines: {node: ^18.17.0 || ^20.3.0 || >=21.0.0} + + source-map-js@1.2.1: + resolution: {integrity: sha512-UXWMKhLOwVKb728IUtQPXxfYU+usdybtUrK/8uGE8CQMvrhOpwvzDBwj0QhSL7MQc7vIsISBG8VQ8+IDQxpfQA==} + engines: {node: '>=0.10.0'} + + space-separated-tokens@2.0.2: + resolution: {integrity: sha512-PEGlAwrG8yXGXRjW32fGbg66JAlOAwbObuqVoJpv/mRgoWDQfgH1wDPvtzWyUSNAXBGSk8h755YDbbcEy3SH2Q==} + + state-local@1.0.7: + resolution: {integrity: sha512-HTEHMNieakEnoe33shBYcZ7NX83ACUjCu8c40iOGEZsngj9zRnkqS9j1pqQPXwobB0ZcVTk27REb7COQ0UR59w==} + + string_decoder@1.1.1: + resolution: {integrity: sha512-n/ShnvDi6FHbbVfviro+WojiFzv+s8MPMHBczVePfUpDJLwoLT0ht1l4YwBCbi8pJAveEEdnkHyPyTP/mzRfwg==} + + stringify-entities@4.0.4: + resolution: {integrity: sha512-IwfBptatlO+QCJUo19AqvrPNqlVMpW9YEL2LIVY+Rpv2qsjCGxaDLNRgeGsQWJhfItebuJhsGSLjaBbNSQ+ieg==} + + style-to-js@1.1.18: + resolution: {integrity: sha512-JFPn62D4kJaPTnhFUI244MThx+FEGbi+9dw1b9yBBQ+1CZpV7QAT8kUtJ7b7EUNdHajjF/0x8fT+16oLJoojLg==} + + style-to-object@1.0.11: + resolution: {integrity: sha512-5A560JmXr7wDyGLK12Nq/EYS38VkGlglVzkis1JEdbGWSnbQIEhZzTJhzURXN5/8WwwFCs/f/VVcmkTppbXLow==} + + styled-jsx@5.1.6: + resolution: {integrity: sha512-qSVyDTeMotdvQYoHWLNGwRFJHC+i+ZvdBRYosOFgC+Wg1vx4frN2/RG/NA7SYqqvKNLf39P2LSRA2pu6n0XYZA==} + engines: {node: '>= 12.0.0'} + peerDependencies: + '@babel/core': '*' + babel-plugin-macros: '*' + react: '>= 16.8.0 || 17.x.x || ^18.0.0-0 || ^19.0.0-0' + peerDependenciesMeta: + '@babel/core': + optional: true + babel-plugin-macros: + optional: true + + tailwindcss@4.1.14: + resolution: {integrity: sha512-b7pCxjGO98LnxVkKjaZSDeNuljC4ueKUddjENJOADtubtdo8llTaJy7HwBMeLNSSo2N5QIAgklslK1+Ir8r6CA==} + + tapable@2.3.0: + resolution: {integrity: sha512-g9ljZiwki/LfxmQADO3dEY1CbpmXT5Hm2fJ+QaGKwSXUylMybePR7/67YW7jOrrvjEgL1Fmz5kzyAjWVWLlucg==} + engines: {node: '>=6'} + + tar@7.5.1: + resolution: {integrity: sha512-nlGpxf+hv0v7GkWBK2V9spgactGOp0qvfWRxUMjqHyzrt3SgwE48DIv/FhqPHJYLHpgW1opq3nERbz5Anq7n1g==} + engines: {node: '>=18'} + + trim-lines@3.0.1: + resolution: {integrity: sha512-kRj8B+YHZCc9kQYdWfJB2/oUl9rA99qbowYYBtr4ui4mZyAQ2JpvVBd/6U2YloATfqBhBTSMhTpgBHtU0Mf3Rg==} + + trough@2.2.0: + resolution: {integrity: sha512-tmMpK00BjZiUyVyvrBK7knerNgmgvcV/KLVyuma/SC+TQN167GrMRciANTz09+k3zW8L8t60jWO1GpfkZdjTaw==} + + tslib@2.8.1: + resolution: {integrity: sha512-oJFu94HQb+KVduSUQL7wnpmqnfmLsOA/nAh6b6EH0wCEoK0/mPeXU6c3wKDV83MkOuHPRHtSXKKU99IBazS/2w==} + + typescript@5.8.3: + resolution: {integrity: sha512-p1diW6TqL9L07nNxvRMM7hMMw4c5XOo/1ibL4aAIGmSAt9slTE1Xgw5KWuof2uTOvCg9BY7ZRi+GaF+7sfgPeQ==} + engines: {node: '>=14.17'} + hasBin: true + + undici-types@6.21.0: + resolution: {integrity: sha512-iwDZqg0QAGrg9Rav5H4n0M64c3mkR59cJ6wQp+7C4nI0gsmExaedaYLNO44eT4AtBBwjbTiGPMlt2Md0T9H9JQ==} + + unified@11.0.5: + resolution: {integrity: sha512-xKvGhPWw3k84Qjh8bI3ZeJjqnyadK+GEFtazSfZv/rKeTkTjOJho6mFqh2SM96iIcZokxiOpg78GazTSg8+KHA==} + + unist-util-is@6.0.1: + resolution: {integrity: sha512-LsiILbtBETkDz8I9p1dQ0uyRUWuaQzd/cuEeS1hoRSyW5E5XGmTzlwY1OrNzzakGowI9Dr/I8HVaw4hTtnxy8g==} + + unist-util-position@5.0.0: + resolution: {integrity: sha512-fucsC7HjXvkB5R3kTCO7kUjRdrS0BJt3M/FPxmHMBOm8JQi2BsHAHFsy27E0EolP8rp0NzXsJ+jNPyDWvOJZPA==} + + unist-util-stringify-position@4.0.0: + resolution: {integrity: sha512-0ASV06AAoKCDkS2+xw5RXJywruurpbC4JZSm7nr7MOt1ojAzvyyaO+UxZf18j8FCF6kmzCZKcAgN/yu2gm2XgQ==} + + unist-util-visit-parents@6.0.2: + resolution: {integrity: sha512-goh1s1TBrqSqukSc8wrjwWhL0hiJxgA8m4kFxGlQ+8FYQ3C/m11FcTs4YYem7V664AhHVvgoQLk890Ssdsr2IQ==} + + unist-util-visit@5.0.0: + resolution: {integrity: sha512-MR04uvD+07cwl/yhVuVWAtw+3GOR/knlL55Nd/wAdblk27GCVt3lqpTivy/tkJcZoNPzTwS1Y+KMojlLDhoTzg==} + + update-browserslist-db@1.1.3: + resolution: {integrity: sha512-UxhIZQ+QInVdunkDAaiazvvT/+fXL5Osr0JZlJulepYu6Jd7qJtDZjlur0emRlT71EN3ScPoE7gvsuIKKNavKw==} + hasBin: true + peerDependencies: + browserslist: '>= 4.21.0' + + util-deprecate@1.0.2: + resolution: {integrity: sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==} + + vfile-message@4.0.3: + resolution: {integrity: sha512-QTHzsGd1EhbZs4AsQ20JX1rC3cOlt/IWJruk893DfLRr57lcnOeMaWG4K0JrRta4mIJZKth2Au3mM3u03/JWKw==} + + vfile@6.0.3: + resolution: {integrity: sha512-KzIbH/9tXat2u30jf+smMwFCsno4wHVdNmzFyL+T/L3UGqqk6JKfVqOFOZEpZSHADH1k40ab6NUIXZq422ov3Q==} + + yallist@5.0.0: + resolution: {integrity: sha512-YgvUTfwqyc7UXVMrB+SImsVYSmTS8X/tSrtdNZMImM+n7+QTriRXyXim0mBrTXNeqzVF0KWGgHPeiyViFFrNDw==} + engines: {node: '>=18'} + + zwitch@2.0.4: + resolution: {integrity: sha512-bXE4cR/kVZhKZX/RjPEflHaKVhUVl85noU3v6b8apfQEc1x4A+zBxjZ4lN8LqGd6WZ3dl98pY4o717VFmoPp+A==} + +snapshots: + + '@alloc/quick-lru@5.2.0': {} + + '@babel/runtime@7.28.4': {} + + '@emnapi/runtime@1.5.0': + dependencies: + tslib: 2.8.1 + optional: true + + '@geist-ui/core@2.3.8(react-dom@19.2.0(react@19.2.0))(react@19.2.0)': + dependencies: + '@babel/runtime': 7.28.4 + react: 19.2.0 + react-dom: 19.2.0(react@19.2.0) + + '@geist-ui/icons@1.0.2(@geist-ui/core@2.3.8(react-dom@19.2.0(react@19.2.0))(react@19.2.0))(react@19.2.0)': + dependencies: + '@geist-ui/core': 2.3.8(react-dom@19.2.0(react@19.2.0))(react@19.2.0) + react: 19.2.0 + + '@img/colour@1.0.0': + optional: true + + '@img/sharp-darwin-arm64@0.34.4': + optionalDependencies: + '@img/sharp-libvips-darwin-arm64': 1.2.3 + optional: true + + '@img/sharp-darwin-x64@0.34.4': + optionalDependencies: + '@img/sharp-libvips-darwin-x64': 1.2.3 + optional: true + + '@img/sharp-libvips-darwin-arm64@1.2.3': + optional: true + + '@img/sharp-libvips-darwin-x64@1.2.3': + optional: true + + '@img/sharp-libvips-linux-arm64@1.2.3': + optional: true + + '@img/sharp-libvips-linux-arm@1.2.3': + optional: true + + '@img/sharp-libvips-linux-ppc64@1.2.3': + optional: true + + '@img/sharp-libvips-linux-s390x@1.2.3': + optional: true + + '@img/sharp-libvips-linux-x64@1.2.3': + optional: true + + '@img/sharp-libvips-linuxmusl-arm64@1.2.3': + optional: true + + '@img/sharp-libvips-linuxmusl-x64@1.2.3': + optional: true + + '@img/sharp-linux-arm64@0.34.4': + optionalDependencies: + '@img/sharp-libvips-linux-arm64': 1.2.3 + optional: true + + '@img/sharp-linux-arm@0.34.4': + optionalDependencies: + '@img/sharp-libvips-linux-arm': 1.2.3 + optional: true + + '@img/sharp-linux-ppc64@0.34.4': + optionalDependencies: + '@img/sharp-libvips-linux-ppc64': 1.2.3 + optional: true + + '@img/sharp-linux-s390x@0.34.4': + optionalDependencies: + '@img/sharp-libvips-linux-s390x': 1.2.3 + optional: true + + '@img/sharp-linux-x64@0.34.4': + optionalDependencies: + '@img/sharp-libvips-linux-x64': 1.2.3 + optional: true + + '@img/sharp-linuxmusl-arm64@0.34.4': + optionalDependencies: + '@img/sharp-libvips-linuxmusl-arm64': 1.2.3 + optional: true + + '@img/sharp-linuxmusl-x64@0.34.4': + optionalDependencies: + '@img/sharp-libvips-linuxmusl-x64': 1.2.3 + optional: true + + '@img/sharp-wasm32@0.34.4': + dependencies: + '@emnapi/runtime': 1.5.0 + optional: true + + '@img/sharp-win32-arm64@0.34.4': + optional: true + + '@img/sharp-win32-ia32@0.34.4': + optional: true + + '@img/sharp-win32-x64@0.34.4': + optional: true + + '@isaacs/fs-minipass@4.0.1': + dependencies: + minipass: 7.1.2 + + '@jridgewell/gen-mapping@0.3.13': + dependencies: + '@jridgewell/sourcemap-codec': 1.5.5 + '@jridgewell/trace-mapping': 0.3.31 + + '@jridgewell/remapping@2.3.5': + dependencies: + '@jridgewell/gen-mapping': 0.3.13 + '@jridgewell/trace-mapping': 0.3.31 + + '@jridgewell/resolve-uri@3.1.2': {} + + '@jridgewell/sourcemap-codec@1.5.5': {} + + '@jridgewell/trace-mapping@0.3.31': + dependencies: + '@jridgewell/resolve-uri': 3.1.2 + '@jridgewell/sourcemap-codec': 1.5.5 + + '@monaco-editor/loader@1.6.1': + dependencies: + state-local: 1.0.7 + + '@monaco-editor/react@4.7.0(monaco-editor@0.52.2)(react-dom@19.2.0(react@19.2.0))(react@19.2.0)': + dependencies: + '@monaco-editor/loader': 1.6.1 + monaco-editor: 0.52.2 + react: 19.2.0 + react-dom: 19.2.0(react@19.2.0) + + '@next/env@16.0.0-beta.0': {} + + '@next/swc-darwin-arm64@16.0.0-beta.0': + optional: true + + '@next/swc-darwin-x64@16.0.0-beta.0': + optional: true + + '@next/swc-linux-arm64-gnu@16.0.0-beta.0': + optional: true + + '@next/swc-linux-arm64-musl@16.0.0-beta.0': + optional: true + + '@next/swc-linux-x64-gnu@16.0.0-beta.0': + optional: true + + '@next/swc-linux-x64-musl@16.0.0-beta.0': + optional: true + + '@next/swc-win32-arm64-msvc@16.0.0-beta.0': + optional: true + + '@next/swc-win32-x64-msvc@16.0.0-beta.0': + optional: true + + '@swc/helpers@0.5.15': + dependencies: + tslib: 2.8.1 + + '@tailwindcss/node@4.1.14': + dependencies: + '@jridgewell/remapping': 2.3.5 + enhanced-resolve: 5.18.3 + jiti: 2.6.1 + lightningcss: 1.30.1 + magic-string: 0.30.19 + source-map-js: 1.2.1 + tailwindcss: 4.1.14 + + '@tailwindcss/oxide-android-arm64@4.1.14': + optional: true + + '@tailwindcss/oxide-darwin-arm64@4.1.14': + optional: true + + '@tailwindcss/oxide-darwin-x64@4.1.14': + optional: true + + '@tailwindcss/oxide-freebsd-x64@4.1.14': + optional: true + + '@tailwindcss/oxide-linux-arm-gnueabihf@4.1.14': + optional: true + + '@tailwindcss/oxide-linux-arm64-gnu@4.1.14': + optional: true + + '@tailwindcss/oxide-linux-arm64-musl@4.1.14': + optional: true + + '@tailwindcss/oxide-linux-x64-gnu@4.1.14': + optional: true + + '@tailwindcss/oxide-linux-x64-musl@4.1.14': + optional: true + + '@tailwindcss/oxide-wasm32-wasi@4.1.14': + optional: true + + '@tailwindcss/oxide-win32-arm64-msvc@4.1.14': + optional: true + + '@tailwindcss/oxide-win32-x64-msvc@4.1.14': + optional: true + + '@tailwindcss/oxide@4.1.14': + dependencies: + detect-libc: 2.1.2 + tar: 7.5.1 + optionalDependencies: + '@tailwindcss/oxide-android-arm64': 4.1.14 + '@tailwindcss/oxide-darwin-arm64': 4.1.14 + '@tailwindcss/oxide-darwin-x64': 4.1.14 + '@tailwindcss/oxide-freebsd-x64': 4.1.14 + '@tailwindcss/oxide-linux-arm-gnueabihf': 4.1.14 + '@tailwindcss/oxide-linux-arm64-gnu': 4.1.14 + '@tailwindcss/oxide-linux-arm64-musl': 4.1.14 + '@tailwindcss/oxide-linux-x64-gnu': 4.1.14 + '@tailwindcss/oxide-linux-x64-musl': 4.1.14 + '@tailwindcss/oxide-wasm32-wasi': 4.1.14 + '@tailwindcss/oxide-win32-arm64-msvc': 4.1.14 + '@tailwindcss/oxide-win32-x64-msvc': 4.1.14 + + '@tailwindcss/postcss@4.1.14': + dependencies: + '@alloc/quick-lru': 5.2.0 + '@tailwindcss/node': 4.1.14 + '@tailwindcss/oxide': 4.1.14 + postcss: 8.5.6 + tailwindcss: 4.1.14 + + '@types/debug@4.1.12': + dependencies: + '@types/ms': 2.1.0 + + '@types/estree-jsx@1.0.5': + dependencies: + '@types/estree': 1.0.8 + + '@types/estree@1.0.8': {} + + '@types/hast@3.0.4': + dependencies: + '@types/unist': 3.0.3 + + '@types/mdast@4.0.4': + dependencies: + '@types/unist': 3.0.3 + + '@types/ms@2.1.0': {} + + '@types/node@20.19.22': + dependencies: + undici-types: 6.21.0 + + '@types/react-dom@19.2.2(@types/react@19.2.2)': + dependencies: + '@types/react': 19.2.2 + + '@types/react@19.2.2': + dependencies: + csstype: 3.1.3 + + '@types/unist@2.0.11': {} + + '@types/unist@3.0.3': {} + + '@ungap/structured-clone@1.3.0': {} + + autoprefixer@10.4.21(postcss@8.5.6): + dependencies: + browserslist: 4.26.3 + caniuse-lite: 1.0.30001751 + fraction.js: 4.3.7 + normalize-range: 0.1.2 + picocolors: 1.1.1 + postcss: 8.5.6 + postcss-value-parser: 4.2.0 + + bail@2.0.2: {} + + baseline-browser-mapping@2.8.17: {} + + browserslist@4.26.3: + dependencies: + baseline-browser-mapping: 2.8.17 + caniuse-lite: 1.0.30001751 + electron-to-chromium: 1.5.237 + node-releases: 2.0.25 + update-browserslist-db: 1.1.3(browserslist@4.26.3) + + caniuse-lite@1.0.30001751: {} + + ccount@2.0.1: {} + + character-entities-html4@2.1.0: {} + + character-entities-legacy@3.0.0: {} + + character-entities@2.0.2: {} + + character-reference-invalid@2.0.1: {} + + chownr@3.0.0: {} + + client-only@0.0.1: {} + + comma-separated-tokens@2.0.3: {} + + core-util-is@1.0.3: {} + + csstype@3.1.3: {} + + debug@4.4.3: + dependencies: + ms: 2.1.3 + + decode-named-character-reference@1.2.0: + dependencies: + character-entities: 2.0.2 + + dequal@2.0.3: {} + + detect-libc@2.1.2: {} + + devlop@1.1.0: + dependencies: + dequal: 2.0.3 + + electron-to-chromium@1.5.237: {} + + enhanced-resolve@5.18.3: + dependencies: + graceful-fs: 4.2.11 + tapable: 2.3.0 + + escalade@3.2.0: {} + + escape-string-regexp@5.0.0: {} + + estree-util-is-identifier-name@3.0.0: {} + + extend@3.0.2: {} + + fraction.js@4.3.7: {} + + graceful-fs@4.2.11: {} + + hast-util-to-jsx-runtime@2.3.6: + dependencies: + '@types/estree': 1.0.8 + '@types/hast': 3.0.4 + '@types/unist': 3.0.3 + comma-separated-tokens: 2.0.3 + devlop: 1.1.0 + estree-util-is-identifier-name: 3.0.0 + hast-util-whitespace: 3.0.0 + mdast-util-mdx-expression: 2.0.1 + mdast-util-mdx-jsx: 3.2.0 + mdast-util-mdxjs-esm: 2.0.1 + property-information: 7.1.0 + space-separated-tokens: 2.0.2 + style-to-js: 1.1.18 + unist-util-position: 5.0.0 + vfile-message: 4.0.3 + transitivePeerDependencies: + - supports-color + + hast-util-whitespace@3.0.0: + dependencies: + '@types/hast': 3.0.4 + + html-url-attributes@3.0.1: {} + + immediate@3.0.6: {} + + inherits@2.0.4: {} + + inline-style-parser@0.2.4: {} + + is-alphabetical@2.0.1: {} + + is-alphanumerical@2.0.1: + dependencies: + is-alphabetical: 2.0.1 + is-decimal: 2.0.1 + + is-decimal@2.0.1: {} + + is-hexadecimal@2.0.1: {} + + is-plain-obj@4.1.0: {} + + isarray@1.0.0: {} + + jiti@2.6.1: {} + + jszip@3.10.1: + dependencies: + lie: 3.3.0 + pako: 1.0.11 + readable-stream: 2.3.8 + setimmediate: 1.0.5 + + lie@3.3.0: + dependencies: + immediate: 3.0.6 + + lightningcss-darwin-arm64@1.30.1: + optional: true + + lightningcss-darwin-x64@1.30.1: + optional: true + + lightningcss-freebsd-x64@1.30.1: + optional: true + + lightningcss-linux-arm-gnueabihf@1.30.1: + optional: true + + lightningcss-linux-arm64-gnu@1.30.1: + optional: true + + lightningcss-linux-arm64-musl@1.30.1: + optional: true + + lightningcss-linux-x64-gnu@1.30.1: + optional: true + + lightningcss-linux-x64-musl@1.30.1: + optional: true + + lightningcss-win32-arm64-msvc@1.30.1: + optional: true + + lightningcss-win32-x64-msvc@1.30.1: + optional: true + + lightningcss@1.30.1: + dependencies: + detect-libc: 2.1.2 + optionalDependencies: + lightningcss-darwin-arm64: 1.30.1 + lightningcss-darwin-x64: 1.30.1 + lightningcss-freebsd-x64: 1.30.1 + lightningcss-linux-arm-gnueabihf: 1.30.1 + lightningcss-linux-arm64-gnu: 1.30.1 + lightningcss-linux-arm64-musl: 1.30.1 + lightningcss-linux-x64-gnu: 1.30.1 + lightningcss-linux-x64-musl: 1.30.1 + lightningcss-win32-arm64-msvc: 1.30.1 + lightningcss-win32-x64-msvc: 1.30.1 + + longest-streak@3.1.0: {} + + lucide-react@0.514.0(react@19.2.0): + dependencies: + react: 19.2.0 + + magic-string@0.30.19: + dependencies: + '@jridgewell/sourcemap-codec': 1.5.5 + + markdown-table@3.0.4: {} + + mdast-util-find-and-replace@3.0.2: + dependencies: + '@types/mdast': 4.0.4 + escape-string-regexp: 5.0.0 + unist-util-is: 6.0.1 + unist-util-visit-parents: 6.0.2 + + mdast-util-from-markdown@2.0.2: + dependencies: + '@types/mdast': 4.0.4 + '@types/unist': 3.0.3 + decode-named-character-reference: 1.2.0 + devlop: 1.1.0 + mdast-util-to-string: 4.0.0 + micromark: 4.0.2 + micromark-util-decode-numeric-character-reference: 2.0.2 + micromark-util-decode-string: 2.0.1 + micromark-util-normalize-identifier: 2.0.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.2 + unist-util-stringify-position: 4.0.0 + transitivePeerDependencies: + - supports-color + + mdast-util-gfm-autolink-literal@2.0.1: + dependencies: + '@types/mdast': 4.0.4 + ccount: 2.0.1 + devlop: 1.1.0 + mdast-util-find-and-replace: 3.0.2 + micromark-util-character: 2.1.1 + + mdast-util-gfm-footnote@2.1.0: + dependencies: + '@types/mdast': 4.0.4 + devlop: 1.1.0 + mdast-util-from-markdown: 2.0.2 + mdast-util-to-markdown: 2.1.2 + micromark-util-normalize-identifier: 2.0.1 + transitivePeerDependencies: + - supports-color + + mdast-util-gfm-strikethrough@2.0.0: + dependencies: + '@types/mdast': 4.0.4 + mdast-util-from-markdown: 2.0.2 + mdast-util-to-markdown: 2.1.2 + transitivePeerDependencies: + - supports-color + + mdast-util-gfm-table@2.0.0: + dependencies: + '@types/mdast': 4.0.4 + devlop: 1.1.0 + markdown-table: 3.0.4 + mdast-util-from-markdown: 2.0.2 + mdast-util-to-markdown: 2.1.2 + transitivePeerDependencies: + - supports-color + + mdast-util-gfm-task-list-item@2.0.0: + dependencies: + '@types/mdast': 4.0.4 + devlop: 1.1.0 + mdast-util-from-markdown: 2.0.2 + mdast-util-to-markdown: 2.1.2 + transitivePeerDependencies: + - supports-color + + mdast-util-gfm@3.1.0: + dependencies: + mdast-util-from-markdown: 2.0.2 + mdast-util-gfm-autolink-literal: 2.0.1 + mdast-util-gfm-footnote: 2.1.0 + mdast-util-gfm-strikethrough: 2.0.0 + mdast-util-gfm-table: 2.0.0 + mdast-util-gfm-task-list-item: 2.0.0 + mdast-util-to-markdown: 2.1.2 + transitivePeerDependencies: + - supports-color + + mdast-util-mdx-expression@2.0.1: + dependencies: + '@types/estree-jsx': 1.0.5 + '@types/hast': 3.0.4 + '@types/mdast': 4.0.4 + devlop: 1.1.0 + mdast-util-from-markdown: 2.0.2 + mdast-util-to-markdown: 2.1.2 + transitivePeerDependencies: + - supports-color + + mdast-util-mdx-jsx@3.2.0: + dependencies: + '@types/estree-jsx': 1.0.5 + '@types/hast': 3.0.4 + '@types/mdast': 4.0.4 + '@types/unist': 3.0.3 + ccount: 2.0.1 + devlop: 1.1.0 + mdast-util-from-markdown: 2.0.2 + mdast-util-to-markdown: 2.1.2 + parse-entities: 4.0.2 + stringify-entities: 4.0.4 + unist-util-stringify-position: 4.0.0 + vfile-message: 4.0.3 + transitivePeerDependencies: + - supports-color + + mdast-util-mdxjs-esm@2.0.1: + dependencies: + '@types/estree-jsx': 1.0.5 + '@types/hast': 3.0.4 + '@types/mdast': 4.0.4 + devlop: 1.1.0 + mdast-util-from-markdown: 2.0.2 + mdast-util-to-markdown: 2.1.2 + transitivePeerDependencies: + - supports-color + + mdast-util-phrasing@4.1.0: + dependencies: + '@types/mdast': 4.0.4 + unist-util-is: 6.0.1 + + mdast-util-to-hast@13.2.0: + dependencies: + '@types/hast': 3.0.4 + '@types/mdast': 4.0.4 + '@ungap/structured-clone': 1.3.0 + devlop: 1.1.0 + micromark-util-sanitize-uri: 2.0.1 + trim-lines: 3.0.1 + unist-util-position: 5.0.0 + unist-util-visit: 5.0.0 + vfile: 6.0.3 + + mdast-util-to-markdown@2.1.2: + dependencies: + '@types/mdast': 4.0.4 + '@types/unist': 3.0.3 + longest-streak: 3.1.0 + mdast-util-phrasing: 4.1.0 + mdast-util-to-string: 4.0.0 + micromark-util-classify-character: 2.0.1 + micromark-util-decode-string: 2.0.1 + unist-util-visit: 5.0.0 + zwitch: 2.0.4 + + mdast-util-to-string@4.0.0: + dependencies: + '@types/mdast': 4.0.4 + + micromark-core-commonmark@2.0.3: + dependencies: + decode-named-character-reference: 1.2.0 + devlop: 1.1.0 + micromark-factory-destination: 2.0.1 + micromark-factory-label: 2.0.1 + micromark-factory-space: 2.0.1 + micromark-factory-title: 2.0.1 + micromark-factory-whitespace: 2.0.1 + micromark-util-character: 2.1.1 + micromark-util-chunked: 2.0.1 + micromark-util-classify-character: 2.0.1 + micromark-util-html-tag-name: 2.0.1 + micromark-util-normalize-identifier: 2.0.1 + micromark-util-resolve-all: 2.0.1 + micromark-util-subtokenize: 2.1.0 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.2 + + micromark-extension-gfm-autolink-literal@2.1.0: + dependencies: + micromark-util-character: 2.1.1 + micromark-util-sanitize-uri: 2.0.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.2 + + micromark-extension-gfm-footnote@2.1.0: + dependencies: + devlop: 1.1.0 + micromark-core-commonmark: 2.0.3 + micromark-factory-space: 2.0.1 + micromark-util-character: 2.1.1 + micromark-util-normalize-identifier: 2.0.1 + micromark-util-sanitize-uri: 2.0.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.2 + + micromark-extension-gfm-strikethrough@2.1.0: + dependencies: + devlop: 1.1.0 + micromark-util-chunked: 2.0.1 + micromark-util-classify-character: 2.0.1 + micromark-util-resolve-all: 2.0.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.2 + + micromark-extension-gfm-table@2.1.1: + dependencies: + devlop: 1.1.0 + micromark-factory-space: 2.0.1 + micromark-util-character: 2.1.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.2 + + micromark-extension-gfm-tagfilter@2.0.0: + dependencies: + micromark-util-types: 2.0.2 + + micromark-extension-gfm-task-list-item@2.1.0: + dependencies: + devlop: 1.1.0 + micromark-factory-space: 2.0.1 + micromark-util-character: 2.1.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.2 + + micromark-extension-gfm@3.0.0: + dependencies: + micromark-extension-gfm-autolink-literal: 2.1.0 + micromark-extension-gfm-footnote: 2.1.0 + micromark-extension-gfm-strikethrough: 2.1.0 + micromark-extension-gfm-table: 2.1.1 + micromark-extension-gfm-tagfilter: 2.0.0 + micromark-extension-gfm-task-list-item: 2.1.0 + micromark-util-combine-extensions: 2.0.1 + micromark-util-types: 2.0.2 + + micromark-factory-destination@2.0.1: + dependencies: + micromark-util-character: 2.1.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.2 + + micromark-factory-label@2.0.1: + dependencies: + devlop: 1.1.0 + micromark-util-character: 2.1.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.2 + + micromark-factory-space@2.0.1: + dependencies: + micromark-util-character: 2.1.1 + micromark-util-types: 2.0.2 + + micromark-factory-title@2.0.1: + dependencies: + micromark-factory-space: 2.0.1 + micromark-util-character: 2.1.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.2 + + micromark-factory-whitespace@2.0.1: + dependencies: + micromark-factory-space: 2.0.1 + micromark-util-character: 2.1.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.2 + + micromark-util-character@2.1.1: + dependencies: + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.2 + + micromark-util-chunked@2.0.1: + dependencies: + micromark-util-symbol: 2.0.1 + + micromark-util-classify-character@2.0.1: + dependencies: + micromark-util-character: 2.1.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.2 + + micromark-util-combine-extensions@2.0.1: + dependencies: + micromark-util-chunked: 2.0.1 + micromark-util-types: 2.0.2 + + micromark-util-decode-numeric-character-reference@2.0.2: + dependencies: + micromark-util-symbol: 2.0.1 + + micromark-util-decode-string@2.0.1: + dependencies: + decode-named-character-reference: 1.2.0 + micromark-util-character: 2.1.1 + micromark-util-decode-numeric-character-reference: 2.0.2 + micromark-util-symbol: 2.0.1 + + micromark-util-encode@2.0.1: {} + + micromark-util-html-tag-name@2.0.1: {} + + micromark-util-normalize-identifier@2.0.1: + dependencies: + micromark-util-symbol: 2.0.1 + + micromark-util-resolve-all@2.0.1: + dependencies: + micromark-util-types: 2.0.2 + + micromark-util-sanitize-uri@2.0.1: + dependencies: + micromark-util-character: 2.1.1 + micromark-util-encode: 2.0.1 + micromark-util-symbol: 2.0.1 + + micromark-util-subtokenize@2.1.0: + dependencies: + devlop: 1.1.0 + micromark-util-chunked: 2.0.1 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.2 + + micromark-util-symbol@2.0.1: {} + + micromark-util-types@2.0.2: {} + + micromark@4.0.2: + dependencies: + '@types/debug': 4.1.12 + debug: 4.4.3 + decode-named-character-reference: 1.2.0 + devlop: 1.1.0 + micromark-core-commonmark: 2.0.3 + micromark-factory-space: 2.0.1 + micromark-util-character: 2.1.1 + micromark-util-chunked: 2.0.1 + micromark-util-combine-extensions: 2.0.1 + micromark-util-decode-numeric-character-reference: 2.0.2 + micromark-util-encode: 2.0.1 + micromark-util-normalize-identifier: 2.0.1 + micromark-util-resolve-all: 2.0.1 + micromark-util-sanitize-uri: 2.0.1 + micromark-util-subtokenize: 2.1.0 + micromark-util-symbol: 2.0.1 + micromark-util-types: 2.0.2 + transitivePeerDependencies: + - supports-color + + minipass@7.1.2: {} + + minizlib@3.1.0: + dependencies: + minipass: 7.1.2 + + monaco-editor@0.52.2: {} + + ms@2.1.3: {} + + nanoid@3.3.11: {} + + next@16.0.0-beta.0(react-dom@19.2.0(react@19.2.0))(react@19.2.0): + dependencies: + '@next/env': 16.0.0-beta.0 + '@swc/helpers': 0.5.15 + caniuse-lite: 1.0.30001751 + postcss: 8.4.31 + react: 19.2.0 + react-dom: 19.2.0(react@19.2.0) + styled-jsx: 5.1.6(react@19.2.0) + optionalDependencies: + '@next/swc-darwin-arm64': 16.0.0-beta.0 + '@next/swc-darwin-x64': 16.0.0-beta.0 + '@next/swc-linux-arm64-gnu': 16.0.0-beta.0 + '@next/swc-linux-arm64-musl': 16.0.0-beta.0 + '@next/swc-linux-x64-gnu': 16.0.0-beta.0 + '@next/swc-linux-x64-musl': 16.0.0-beta.0 + '@next/swc-win32-arm64-msvc': 16.0.0-beta.0 + '@next/swc-win32-x64-msvc': 16.0.0-beta.0 + sharp: 0.34.4 + transitivePeerDependencies: + - '@babel/core' + - babel-plugin-macros + + node-releases@2.0.25: {} + + normalize-range@0.1.2: {} + + pako@1.0.11: {} + + parse-entities@4.0.2: + dependencies: + '@types/unist': 2.0.11 + character-entities-legacy: 3.0.0 + character-reference-invalid: 2.0.1 + decode-named-character-reference: 1.2.0 + is-alphanumerical: 2.0.1 + is-decimal: 2.0.1 + is-hexadecimal: 2.0.1 + + picocolors@1.1.1: {} + + postcss-value-parser@4.2.0: {} + + postcss@8.4.31: + dependencies: + nanoid: 3.3.11 + picocolors: 1.1.1 + source-map-js: 1.2.1 + + postcss@8.5.6: + dependencies: + nanoid: 3.3.11 + picocolors: 1.1.1 + source-map-js: 1.2.1 + + process-nextick-args@2.0.1: {} + + property-information@7.1.0: {} + + react-dom@19.2.0(react@19.2.0): + dependencies: + react: 19.2.0 + scheduler: 0.27.0 + + react-icons@5.5.0(react@19.2.0): + dependencies: + react: 19.2.0 + + react-markdown@10.1.0(@types/react@19.2.2)(react@19.2.0): + dependencies: + '@types/hast': 3.0.4 + '@types/mdast': 4.0.4 + '@types/react': 19.2.2 + devlop: 1.1.0 + hast-util-to-jsx-runtime: 2.3.6 + html-url-attributes: 3.0.1 + mdast-util-to-hast: 13.2.0 + react: 19.2.0 + remark-parse: 11.0.0 + remark-rehype: 11.1.2 + unified: 11.0.5 + unist-util-visit: 5.0.0 + vfile: 6.0.3 + transitivePeerDependencies: + - supports-color + + react@19.2.0: {} + + readable-stream@2.3.8: + dependencies: + core-util-is: 1.0.3 + inherits: 2.0.4 + isarray: 1.0.0 + process-nextick-args: 2.0.1 + safe-buffer: 5.1.2 + string_decoder: 1.1.1 + util-deprecate: 1.0.2 + + remark-gfm@4.0.1: + dependencies: + '@types/mdast': 4.0.4 + mdast-util-gfm: 3.1.0 + micromark-extension-gfm: 3.0.0 + remark-parse: 11.0.0 + remark-stringify: 11.0.0 + unified: 11.0.5 + transitivePeerDependencies: + - supports-color + + remark-parse@11.0.0: + dependencies: + '@types/mdast': 4.0.4 + mdast-util-from-markdown: 2.0.2 + micromark-util-types: 2.0.2 + unified: 11.0.5 + transitivePeerDependencies: + - supports-color + + remark-rehype@11.1.2: + dependencies: + '@types/hast': 3.0.4 + '@types/mdast': 4.0.4 + mdast-util-to-hast: 13.2.0 + unified: 11.0.5 + vfile: 6.0.3 + + remark-stringify@11.0.0: + dependencies: + '@types/mdast': 4.0.4 + mdast-util-to-markdown: 2.1.2 + unified: 11.0.5 + + safe-buffer@5.1.2: {} + + scheduler@0.27.0: {} + + semver@7.7.3: + optional: true + + setimmediate@1.0.5: {} + + sharp@0.34.4: + dependencies: + '@img/colour': 1.0.0 + detect-libc: 2.1.2 + semver: 7.7.3 + optionalDependencies: + '@img/sharp-darwin-arm64': 0.34.4 + '@img/sharp-darwin-x64': 0.34.4 + '@img/sharp-libvips-darwin-arm64': 1.2.3 + '@img/sharp-libvips-darwin-x64': 1.2.3 + '@img/sharp-libvips-linux-arm': 1.2.3 + '@img/sharp-libvips-linux-arm64': 1.2.3 + '@img/sharp-libvips-linux-ppc64': 1.2.3 + '@img/sharp-libvips-linux-s390x': 1.2.3 + '@img/sharp-libvips-linux-x64': 1.2.3 + '@img/sharp-libvips-linuxmusl-arm64': 1.2.3 + '@img/sharp-libvips-linuxmusl-x64': 1.2.3 + '@img/sharp-linux-arm': 0.34.4 + '@img/sharp-linux-arm64': 0.34.4 + '@img/sharp-linux-ppc64': 0.34.4 + '@img/sharp-linux-s390x': 0.34.4 + '@img/sharp-linux-x64': 0.34.4 + '@img/sharp-linuxmusl-arm64': 0.34.4 + '@img/sharp-linuxmusl-x64': 0.34.4 + '@img/sharp-wasm32': 0.34.4 + '@img/sharp-win32-arm64': 0.34.4 + '@img/sharp-win32-ia32': 0.34.4 + '@img/sharp-win32-x64': 0.34.4 + optional: true + + source-map-js@1.2.1: {} + + space-separated-tokens@2.0.2: {} + + state-local@1.0.7: {} + + string_decoder@1.1.1: + dependencies: + safe-buffer: 5.1.2 + + stringify-entities@4.0.4: + dependencies: + character-entities-html4: 2.1.0 + character-entities-legacy: 3.0.0 + + style-to-js@1.1.18: + dependencies: + style-to-object: 1.0.11 + + style-to-object@1.0.11: + dependencies: + inline-style-parser: 0.2.4 + + styled-jsx@5.1.6(react@19.2.0): + dependencies: + client-only: 0.0.1 + react: 19.2.0 + + tailwindcss@4.1.14: {} + + tapable@2.3.0: {} + + tar@7.5.1: + dependencies: + '@isaacs/fs-minipass': 4.0.1 + chownr: 3.0.0 + minipass: 7.1.2 + minizlib: 3.1.0 + yallist: 5.0.0 + + trim-lines@3.0.1: {} + + trough@2.2.0: {} + + tslib@2.8.1: {} + + typescript@5.8.3: {} + + undici-types@6.21.0: {} + + unified@11.0.5: + dependencies: + '@types/unist': 3.0.3 + bail: 2.0.2 + devlop: 1.1.0 + extend: 3.0.2 + is-plain-obj: 4.1.0 + trough: 2.2.0 + vfile: 6.0.3 + + unist-util-is@6.0.1: + dependencies: + '@types/unist': 3.0.3 + + unist-util-position@5.0.0: + dependencies: + '@types/unist': 3.0.3 + + unist-util-stringify-position@4.0.0: + dependencies: + '@types/unist': 3.0.3 + + unist-util-visit-parents@6.0.2: + dependencies: + '@types/unist': 3.0.3 + unist-util-is: 6.0.1 + + unist-util-visit@5.0.0: + dependencies: + '@types/unist': 3.0.3 + unist-util-is: 6.0.1 + unist-util-visit-parents: 6.0.2 + + update-browserslist-db@1.1.3(browserslist@4.26.3): + dependencies: + browserslist: 4.26.3 + escalade: 3.2.0 + picocolors: 1.1.1 + + util-deprecate@1.0.2: {} + + vfile-message@4.0.3: + dependencies: + '@types/unist': 3.0.3 + unist-util-stringify-position: 4.0.0 + + vfile@6.0.3: + dependencies: + '@types/unist': 3.0.3 + vfile-message: 4.0.3 + + yallist@5.0.0: {} + + zwitch@2.0.4: {} diff --git a/python/vibe-coding-ide/frontend/postcss.config.js b/python/vibe-coding-ide/frontend/postcss.config.js new file mode 100644 index 0000000000..f69c5d4119 --- /dev/null +++ b/python/vibe-coding-ide/frontend/postcss.config.js @@ -0,0 +1,6 @@ +export default { + plugins: { + "@tailwindcss/postcss": {}, + autoprefixer: {}, + }, +}; diff --git a/python/vibe-coding-ide/frontend/scripts/build-templates.js b/python/vibe-coding-ide/frontend/scripts/build-templates.js new file mode 100644 index 0000000000..7dd7aa70f5 --- /dev/null +++ b/python/vibe-coding-ide/frontend/scripts/build-templates.js @@ -0,0 +1,227 @@ +#!/usr/bin/env node + +/** + * Build script to generate src/templates-data.json from the templates directory + * This avoids Turbopack/Next dev server having to process the entire templates tree. + */ + +const fs = require('fs') +const path = require('path') + +const ROOT = path.join(__dirname, '..') +const TEMPLATES_DIR = path.join(ROOT, 'templates') +const OUTPUT_FILE = path.join(ROOT, 'src', 'templates-data.json') + +const TEMPLATE_METADATA = [ + { + id: 'blank', + label: 'Blank', + description: 'Start from an empty project', + directory: 'blank', + defaultActiveFile: 'README.md', + suggestions: [ + 'Create a minimal Python API with FastAPI and run', + 'Add /health and /time endpoints with basic request logging and run', + 'Implement /text/wordcount that accepts JSON and returns counts and run', + ], + }, + { + id: 'react_fastapi', + label: 'Next.js + FastAPI', + description: 'Decoupled frontend (Next.js) and backend (FastAPI)', + directory: 'react-fastapi', + defaultActiveFile: 'backend/main.py', + suggestions: [ + 'Add FastAPI /todos and a Next.js page to list/add todos; connect API and run', + 'Implement FastAPI /math/fibonacci?n=20 and display results in Next.js and run', + 'Add FastAPI logging, /health, /time; add a Next.js status page and run', + ], + }, + { + id: 'fastapi', + label: 'FastAPI', + description: 'Python API with FastAPI + Uvicorn', + directory: 'fastapi', + defaultActiveFile: 'main.py', + suggestions: [ + 'Run this code.', + 'Add a FastAPI /todos API with in-memory CRUD (GET, POST, DELETE) and run', + 'Implement /math/fibonacci?n=20 that returns the sequence as JSON and run', + 'Add request logging middleware plus /health and /time endpoints and run', + ], + }, + { + id: 'express', + label: 'Express', + description: 'Node.js API with Express', + directory: 'express', + defaultActiveFile: 'server.js', + suggestions: [ + 'Run this code.', + 'Add /todos API with in-memory CRUD (GET, POST, DELETE) and run', + 'Implement /math/fibonacci?n=20 that returns the sequence as JSON and run', + 'Add request logging middleware plus /health and /time endpoints and run', + ], + }, + { + id: 'flask', + label: 'Flask', + description: 'Python API with Flask', + directory: 'flask', + defaultActiveFile: 'app.py', + suggestions: [ + 'Run this code.', + 'Add /todos API with in-memory CRUD (GET, POST, DELETE) and run', + 'Implement /math/fibonacci?n=20 that returns the sequence as JSON and run', + 'Add request logging middleware plus /health and /time endpoints and run', + ], + }, + { + id: 'hono', + label: 'Hono', + description: 'TypeScript/JavaScript API with Hono', + directory: 'hono', + defaultActiveFile: 'server.ts', + suggestions: [ + 'Run this code.', + 'Add /todos API with in-memory CRUD (GET, POST, DELETE) and run', + 'Implement /math/fibonacci?n=20 that returns the sequence as JSON and run', + 'Add request logging middleware plus /health and /time endpoints and run', + ], + }, + { + id: 'next', + label: 'Next.js', + description: 'React framework with server-side rendering', + directory: 'next', + defaultActiveFile: 'src/app/page.tsx', + suggestions: [ + 'Run this code.', + 'Add /api/todos route with in-memory CRUD (GET, POST, DELETE) and run', + 'Implement /api/math/fibonacci?n=20 returning JSON and render it on the page and run', + 'Add request logging plus /api/health and /api/time routes and run', + ], + }, + { + id: 'next_stack', + label: 'Next.js', + description: 'Next.js frontend built to pair with a backend', + directory: 'next_stack', + defaultActiveFile: 'src/app/page.tsx', + }, + { + id: 'react', + label: 'React (Vite)', + description: 'React SPA powered by Vite + TypeScript', + directory: 'react', + defaultActiveFile: 'src/App.tsx', + suggestions: [ + 'Run this code.', + 'Add a simple todo list component with local state and run', + 'Fetch from a /api/health endpoint and render status and run', + ], + }, + { + id: 'react_stack', + label: 'React', + description: 'React (Vite) frontend built to pair with a backend', + directory: 'react_stack', + defaultActiveFile: 'src/App.tsx', + }, +] + +const BINARY_IMAGE_EXTENSIONS = [ + '.png', + '.jpg', + '.jpeg', + '.gif', + '.webp', + '.bmp', +] + +function isBinaryImage(filename) { + const ext = path.extname(filename).toLowerCase() + return BINARY_IMAGE_EXTENSIONS.includes(ext) +} + +function readDirRecursive(dir, baseDir = dir) { + const files = {} + + function walk(currentDir) { + const entries = fs.readdirSync(currentDir, { withFileTypes: true }) + + for (const entry of entries) { + const fullPath = path.join(currentDir, entry.name) + const relativePath = path.relative(baseDir, fullPath).replace(/\\/g, '/') + + if (entry.isDirectory()) { + walk(fullPath) + } else if (entry.isFile()) { + // Skip metadata file if present + if (entry.name === 'index.ts') continue + + if (isBinaryImage(entry.name)) { + // For binary images, store as base64 data URL + const buffer = fs.readFileSync(fullPath) + const base64 = buffer.toString('base64') + const ext = path.extname(entry.name).slice(1) + files[relativePath] = `data:image/${ext};base64,${base64}` + } else { + // For text files, store as raw string + files[relativePath] = fs.readFileSync(fullPath, 'utf-8') + } + } + } + } + + walk(dir) + return files +} + +function buildTemplates() { + const templates = [] + + for (const meta of TEMPLATE_METADATA) { + const templateDir = path.join(TEMPLATES_DIR, meta.directory) + + if (!fs.existsSync(templateDir)) { + console.warn(`Warning: Template directory not found: ${templateDir}`) + continue + } + + const files = readDirRecursive(templateDir) + + templates.push({ + id: meta.id, + label: meta.label, + description: meta.description, + files, + defaultActiveFile: meta.defaultActiveFile, + suggestions: meta.suggestions, + }) + } + + return templates +} + +function ensureDirFor(filePath) { + const dir = path.dirname(filePath) + fs.mkdirSync(dir, { recursive: true }) +} + +// Main execution +try { + console.log('Building templates...') + const templates = buildTemplates() + ensureDirFor(OUTPUT_FILE) + fs.writeFileSync(OUTPUT_FILE, JSON.stringify(templates, null, 2)) + console.log( + `✓ Generated ${path.relative(ROOT, OUTPUT_FILE)} with ${ + templates.length + } templates` + ) +} catch (err) { + console.error('Failed to build templates-data.json') + console.error(err && err.stack ? err.stack : err) + process.exitCode = 1 +} diff --git a/python/vibe-coding-ide/frontend/src/app/App.tsx b/python/vibe-coding-ide/frontend/src/app/App.tsx new file mode 100644 index 0000000000..bbf57ee1b0 --- /dev/null +++ b/python/vibe-coding-ide/frontend/src/app/App.tsx @@ -0,0 +1,702 @@ +import React, { useState } from 'react' +import JSZipLib from 'jszip' +import { useAgentStream } from '../hooks/useAgentStream' +import { useAgentEvents } from '../hooks/useAgentEvents' +import { useChat } from '../hooks/useChat' +import { useRuns } from '../context/RunContext' +import { ChatTimeline } from '../components/Chat/ChatTimeline' +import { ChatInput } from '../components/Chat/ChatInput' +import { ModelPicker } from '../components/Chat/ModelPicker' +import ThreePane from '../components/ThreePane' +import { FileTree } from '../components/Editor/FileTree' +import ResizableCenter from '../components/ResizableCenter' +import { CodeFixModal } from '../components/CodeFixModal' +import ProjectTabs from '../components/ProjectTabs' +import NewProjectModal from '../components/NewProjectModal' +import { getTemplateById, getStackById } from '../templates' +import { + getProjectChatThreads, + setCurrentChatThread, + startNewChatThread, + upsertCurrentChatThread, + mergeThreadIntoRuns, + upsertThreadById, + deleteChatThread, + type PersistedChatThread, + MAX_THREADS_PER_PROJECT, +} from '../lib/persistence' +import { History as HistoryIcon, Plus, X } from 'lucide-react' +import { useProjects } from '../context/ProjectsContext' +import { ensureGlobalUserId } from '../lib/user' + +// Ensure a stable user id available globally and persisted +ensureGlobalUserId() +const USER_ID = ensureGlobalUserId() + +function App() { + const { + projects, + activeProjectId, + setActiveProjectId, + project, + proposals, + activeFile, + folders, + expandedFolders, + setExpandedFolders, + isPathIgnored, + projectForTree, + proposalsForTree, + projectForSend, + proposalsForSend, + setActiveFile, + setCode, + clearProposal, + createFile, + createFolder, + renameFile, + deleteFile, + moveFile, + moveFolder, + renameFolder, + deleteFolder, + input, + setInput, + loading, + setLoading, + cancelling, + setCancelling, + model, + setModel, + activeThreadId, + setActiveThreadId, + templateId, + renameProject, + cloneProject, + deleteProject, + upsertFileIfMissing, + createProject, + } = useProjects() + const [showNewProject, setShowNewProject] = useState(false) + + // Empty projects state flag (used for conditional rendering) + const isNoProjects = projects.length === 0 + const [codeFix, setCodeFix] = useState<{ + fileName: string + startLine: number + endLine: number + selectedCode: string + } | null>(null) + const nextProjectName = React.useMemo( + () => `Project ${projects.length + 1}`, + [projects.length] + ) + + const code = project[activeFile] ?? '' + // Runs context + const { runs, runOrder, mergeProjectRuns } = useRuns() + + // ---------------- Chat persistence / history ---------------- + const [chatThreads, setChatThreads] = useState(() => + getProjectChatThreads(activeProjectId) + ) + const [showChatHistory, setShowChatHistory] = useState(false) + const chatHistoryRef = React.useRef(null) + + // Close chat history when clicking outside of its container + React.useEffect(() => { + if (!showChatHistory) return + const handleDocMouseDown = (e: MouseEvent) => { + const root = chatHistoryRef.current + if (root && !root.contains(e.target as Node)) { + setShowChatHistory(false) + } + } + document.addEventListener('mousedown', handleDocMouseDown) + return () => { + document.removeEventListener('mousedown', handleDocMouseDown) + } + }, [showChatHistory]) + + // Keep a memo of last human message timestamps to avoid flicker (only refresh list when a thread's last human time changes) + const lastHumanAtByThreadRef = React.useRef< + Record + >({}) + + // When project changes, load its threads and merge all into memory (do not clear to avoid dropping active streams) + React.useEffect(() => { + const threads = getProjectChatThreads(activeProjectId) + setChatThreads(threads) + if (threads && threads.length > 0) { + for (const t of threads) { + const { runs: scopedRuns, order } = mergeThreadIntoRuns( + activeProjectId, + t + ) + mergeProjectRuns(activeProjectId, scopedRuns, order, t.id) + } + // Set active thread to most recent if none selected + if (!activeThreadId) + setActiveThreadId(threads[0]?.id || `${activeProjectId}_default`) + } else { + // Ensure we have a default active thread id even with no threads + if (!activeThreadId) setActiveThreadId(`${activeProjectId}_default`) + } + }, [activeProjectId, mergeProjectRuns, activeThreadId, setActiveThreadId]) + + // As runs change for this project, persist per-thread so background chats are saved too + // Only refresh the thread list if lastHumanAt changed (i.e., a user_message), to prevent flicker during logs/tool updates + React.useEffect(() => { + // Group runs by thread within the active project + const grouped: Record< + string, + { runs: Record; order: string[] } + > = {} + for (const id of runOrder) { + const r = runs[id] + if (!r || r.projectId !== activeProjectId) continue + const tid = r.threadId || `${activeProjectId}_default` + if (!grouped[tid]) grouped[tid] = { runs: {}, order: [] } + grouped[tid].runs[id] = r + grouped[tid].order.push(id) + } + // If nothing yet, ensure at least a default thread exists to keep UX consistent + if (Object.keys(grouped).length === 0) { + upsertCurrentChatThread(activeProjectId, {}, []) + // Do not refresh list; no user message + return + } + // Upsert all threads + for (const [threadId, data] of Object.entries(grouped)) { + upsertThreadById(activeProjectId, threadId, data.runs, data.order) + } + // Compute lastHumanAt map for this project from grouped data + const newMap: Record = {} + for (const [tid, data] of Object.entries(grouped)) { + let latest = 0 + for (const rid of data.order) { + const run = data.runs[rid] + if (!run) continue + for (const a of run.actions) { + if (a.kind === 'user_message' && a.timestamp) { + const t = Date.parse(a.timestamp) + if (!Number.isNaN(t) && t > latest) latest = t + } + } + } + newMap[tid] = latest > 0 ? new Date(latest).toISOString() : undefined + } + // Decide whether to refresh the UI list + const prevMap = lastHumanAtByThreadRef.current + let changed = false + const keys = new Set([...Object.keys(prevMap), ...Object.keys(newMap)]) + for (const k of keys) { + if (prevMap[k] !== newMap[k]) { + changed = true + break + } + } + lastHumanAtByThreadRef.current = newMap + if (changed) { + setChatThreads(getProjectChatThreads(activeProjectId)) + } + }, [runs, runOrder, activeProjectId]) + + const openCodeFix = React.useCallback( + (args: { + fileName: string + startLine: number + endLine: number + selectedCode: string + }) => { + setCodeFix(args) + }, + [setCodeFix] + ) + + const closeCodeFix = React.useCallback(() => setCodeFix(null), [setCodeFix]) + + // Declare placeholder for send function; assign later after hook is initialized + const submitCodeFixRef = React.useRef< + null | ((instruction: string) => Promise) + >(null) + const submitCodeFix = React.useCallback(async (instruction: string) => { + if (submitCodeFixRef.current) await submitCodeFixRef.current(instruction) + }, []) + + const timelineActions = React.useMemo(() => { + return runOrder + .map((id) => runs[id]) + .filter((r): r is (typeof runs)[string] => Boolean(r)) + .filter( + (run) => + (run.projectId || undefined) === activeProjectId && + (activeThreadId ? run.threadId === activeThreadId : true) + ) + .flatMap((run) => run.actions || []) + }, [runOrder, runs, activeProjectId, activeThreadId]) + + // Only show thinking spinner for the latest run in the active thread + const threadRunIds = React.useMemo(() => { + return runOrder.filter((id) => { + const r = runs[id] + if (!r) return false + if (r.projectId !== activeProjectId) return false + const tid = r.threadId || `${activeProjectId}_default` + return tid === (activeThreadId || `${activeProjectId}_default`) + }) + }, [runOrder, runs, activeProjectId, activeThreadId]) + + const latestRunId = + threadRunIds.length > 0 ? threadRunIds[threadRunIds.length - 1] : null + + // Map SSE events to UI actions and create the stream controller + const isActiveRun = React.useCallback( + (taskId: string) => taskId === latestRunId, + [latestRunId] + ) + const handleAgentEvent = useAgentEvents({ + onRefreshPreview: () => { + setPreviewRefreshToken((t) => t + 1) + }, + isActiveRun, + }) + const stream = useAgentStream({ onMessage: handleAgentEvent }) + + // Derive thinking from final answer presence and basic run lifecycle + const latestRun = latestRunId ? runs[latestRunId] : undefined + const latestRunStatus = latestRun?.status + const latestRunConnected = latestRunId + ? stream.isConnected(latestRunId) + : false + const latestRunHasFinalAnswer = Boolean( + latestRun?.actions?.some((a) => a.kind === 'final_answer') + ) + const isThinking = Boolean( + latestRunId && + !latestRunHasFinalAnswer && + (latestRunConnected || latestRunStatus === 'waiting_exec') + ) + + // Token to force refresh of preview iframes + const [previewRefreshToken, setPreviewRefreshToken] = useState(0) + + // Initialize chat functionality + const { sendPrompt, cancelCurrentTask } = useChat({ + userId: USER_ID, + input, + cancelling, + project: projectForSend, + proposals: proposalsForSend, + projectId: activeProjectId, + threadId: activeThreadId || `${activeProjectId}_default`, + setInput, + setLoading, + setCancelling, + stream, + model, + }) + + // Ensure code-fix submit is bound to the sendPrompt + React.useEffect(() => { + submitCodeFixRef.current = async (instruction: string) => { + setInput(`Fix the following code:\n\n${instruction}`) + await sendPrompt() + } + }, [sendPrompt, setInput]) + + // Initialize the submitCodeFix callback after sendPrompt is available + React.useEffect(() => { + submitCodeFixRef.current = async (instruction: string) => { + const args = codeFix // capture current + if (!args) return + const systemPrompt = `Please update ${args.fileName} between lines ${args.startLine}-${args.endLine} according to the user's instruction. Only make minimal, precise edits within that range using the edit_code tool. Preserve style and indentation. Selected code snippet for reference (do not paste with line numbers):\n\n${args.selectedCode}` + setInput(`${instruction}\n\n${systemPrompt}`) + setCodeFix(null) + if (!isThinking) { + await sendPrompt() + } + } + }, [codeFix, setInput, isThinking, sendPrompt, setCodeFix]) + + const handleSendMessage = async () => { + if (!input.trim() || isThinking) return + await sendPrompt() + } + + const handleCancelTask = () => { + // Cancel only if the latest run is the active thinking one + if (latestRunId && isThinking && !cancelling) { + cancelCurrentTask(latestRunId) + } + } + + const handleNewChat = React.useCallback(() => { + const newThread = startNewChatThread(activeProjectId) + setChatThreads(getProjectChatThreads(activeProjectId)) + setActiveThreadId(newThread.id) + // Reset UI state for this project to focus on new chat + setInput('') + setLoading(false) + setCancelling(false) + }, [activeProjectId, setInput, setLoading, setCancelling, setActiveThreadId]) + + return ( + <> + { + if (id === activeProjectId) return + setActiveProjectId(id) + }} + onAdd={() => { + setShowNewProject(true) + }} + onRename={(id, name) => { + if (!name || !name.trim()) return + renameProject(id, name.trim()) + }} + onClone={(id) => { + cloneProject(id) + }} + onDelete={(id) => { + if ( + !window.confirm( + 'Delete this project? This cannot be undone in this session.' + ) + ) + return + const wasLast = projects.length === 1 && projects[0]?.id === id + deleteProject(id) + if (wasLast) setShowNewProject(true) + }} + onDownload={async () => { + try { + const zip = new JSZipLib() + const rootName = ( + projects.find((p) => p.id === activeProjectId)?.name || + 'project' + ).replace(/\s+/g, '_') + const folder = zip.folder(rootName) + // Include all current project files; proposals are suggestions only + Object.entries(project).forEach(([path, content]) => { + const normalized = (path || '').replace(/^\//, '') + if (!normalized) return + folder.file(normalized, content ?? '') + }) + const blob = await zip.generateAsync({ type: 'blob' }) + const a = document.createElement('a') + const url = URL.createObjectURL(blob) + a.href = url + a.download = `${rootName}.zip` + document.body.appendChild(a) + a.click() + document.body.removeChild(a) + URL.revokeObjectURL(url) + } catch (e) { + console.error('Failed to download ZIP', e) + } + }} + /> + } + left={ + isNoProjects ? ( +
+ No project. Create a new one to get started. +
+ ) : ( + { + createFile(name) + }} + onCreateFolder={(folderPath) => { + createFolder(folderPath) + }} + onRename={(oldPath, newPath) => { + renameFile(oldPath, newPath) + }} + onDelete={(path) => { + deleteFile(path) + }} + onMoveFile={(src, destDir) => { + moveFile(src, destDir) + }} + onMoveFolder={(srcFolder, destDir) => { + moveFolder(srcFolder, destDir) + }} + proposed={proposalsForTree} + folders={folders} + expandedPaths={expandedFolders} + onExpandedChange={setExpandedFolders} + onRenameFolder={(oldPath, newPath) => { + renameFolder(oldPath, newPath) + }} + onDeleteFolder={(folderPath) => { + deleteFolder(folderPath) + }} + /> + ) + } + center={ + isNoProjects ? ( +
+ +
+ ) : ( + {}} + isIgnored={isPathIgnored} + // When editor requests a code fix open modal + onRequestCodeFix={openCodeFix} + /> + ) + } + right={ + isNoProjects ? ( +
+ ) : ( + <> +
+ +
+ {/* Container ref to detect outside clicks and close chat history */} +
+ + {showChatHistory && ( +
+ {chatThreads.length === 0 ? ( +
+ No previous chats +
+ ) : ( +
+ {chatThreads.map((t) => ( +
+ + +
+ ))} +
+ )} +
+ )} +
+
+
+ { + const normalized = (path || '').replace(/^\//, '') + if (!normalized) return + if (!project[normalized]) { + // If the file exists only as a proposal, open it (to view with proposedContent) + if (proposals[normalized] !== undefined) { + upsertFileIfMissing(normalized, project[normalized] ?? '') + } else { + // Otherwise do nothing + } + } + setActiveFile(normalized) + }} + /> +
+ +
+ { + if (timelineActions.length > 0 || loading) return undefined + const tid = templateId + const tmpl = getTemplateById(tid) || getStackById(tid) + const list = + tmpl?.suggestions && tmpl.suggestions.length > 0 + ? tmpl!.suggestions + : [ + 'Create a minimal Python API with FastAPI and run', + 'Add /health and /time endpoints with basic request logging and run', + 'Implement /text/wordcount that accepts JSON and returns counts and run', + ] + return list + })()} + /> +
+ + ) + } + /> + p.name)} + onClose={() => setShowNewProject(false)} + onCreate={(name, templateId) => { + createProject(name, templateId) + setShowNewProject(false) + }} + /> + + + ) +} + +export default App diff --git a/python/vibe-coding-ide/frontend/src/app/globals.css b/python/vibe-coding-ide/frontend/src/app/globals.css new file mode 100644 index 0000000000..c2418cd9bb --- /dev/null +++ b/python/vibe-coding-ide/frontend/src/app/globals.css @@ -0,0 +1,108 @@ +@import 'tailwindcss'; + +:root { + /* Typography */ + font-family: system-ui, Avenir, Helvetica, Arial, sans-serif; + font-synthesis: none; + text-rendering: optimizeLegibility; + -webkit-font-smoothing: antialiased; + -moz-osx-font-smoothing: grayscale; + + /* Vercel-inspired dark palette */ + --vscode-bg: #0a0a0a; /* app background (near-black) */ + --vscode-sidebar: #0d0d0d; /* side panels */ + --vscode-panel: #0f0f0f; /* panel/header */ + --vscode-panel-border: #1a1a1a; /* borders/separators */ + --vscode-surface: #111111; /* tertiary surfaces */ + --vscode-hover: #161616; /* hover */ + --vscode-contrast: #0c0c0c; /* inputs */ + --vscode-text: #e6e6e6; /* primary text */ + --vscode-muted: #a1a1a1; /* muted text */ + --vscode-subtle: #8a8a8a; /* subtler text */ + --vscode-accent: #0070f3; /* brand accent */ + --vscode-selection: #0b2a6b; /* selection bg */ + --vscode-success: #10b981; /* emerald-500 */ + --vscode-danger: #ef4444; /* red-500 */ + /* Unified app header height */ + --header-height: 3rem; + /* Status bar height */ + --statusbar-height: 26px; + + color-scheme: dark; + color: var(--vscode-text); + background-color: var(--vscode-bg); +} + +/* Light mode fallback (optional) */ +@media (prefers-color-scheme: light) { + :root { + color-scheme: light; + color: #1f2328; + background-color: #ffffff; + } +} + +/* App chrome */ +html, +body, +#root { + height: 100%; +} +body { + margin: 0; + background: var(--vscode-bg); + color: var(--vscode-text); +} + +/* Smooth outlines consistent with VS Code */ +button, +input, +textarea, +select { + outline-color: var(--vscode-accent); +} + +/* Scrollbar similar to VS Code */ +*::-webkit-scrollbar { + width: 12px; + height: 12px; +} +*::-webkit-scrollbar-thumb { + background: #262626; + border: 3px solid transparent; + border-radius: 10px; + background-clip: padding-box; +} +*::-webkit-scrollbar-thumb:hover { + background: #2f2f2f; +} +*::-webkit-scrollbar-track { + background: transparent; +} + +/* Ensure Monaco editor fills its flex container and resizes smoothly */ +.monaco-editor, +.monaco-editor .overflow-guard { + position: absolute !important; + top: 0; + right: 0; + bottom: 0; + left: 0; +} + +/* Subtle blue shimmer for FastAPI entrypoint Run button */ +@keyframes shimmer { + 0% { + box-shadow: 0 0 0 0 rgba(0, 112, 243, 0.4); + } + 70% { + box-shadow: 0 0 0 8px rgba(0, 112, 243, 0); + } + 100% { + box-shadow: 0 0 0 0 rgba(0, 112, 243, 0); + } +} + +.shimmer { + animation: shimmer 1.8s ease-out infinite; +} diff --git a/python/vibe-coding-ide/frontend/src/app/layout.tsx b/python/vibe-coding-ide/frontend/src/app/layout.tsx new file mode 100644 index 0000000000..1706823628 --- /dev/null +++ b/python/vibe-coding-ide/frontend/src/app/layout.tsx @@ -0,0 +1,19 @@ +import type { Metadata } from 'next' +import './globals.css' + +export const metadata: Metadata = { + title: 'Code Agent', + description: 'Next.js wrapper for existing UI during migration', +} + +export default function RootLayout({ + children, +}: { + children: React.ReactNode +}) { + return ( + + {children} + + ) +} diff --git a/python/vibe-coding-ide/frontend/src/app/page.tsx b/python/vibe-coding-ide/frontend/src/app/page.tsx new file mode 100644 index 0000000000..4d89f57d6d --- /dev/null +++ b/python/vibe-coding-ide/frontend/src/app/page.tsx @@ -0,0 +1,21 @@ +'use client' + +import React from 'react' +import App from './App' +import { RunProvider } from '../context/RunContext' +import { ProjectsProvider } from '../context/ProjectsContext' + +export default function Home() { + const [mounted, setMounted] = React.useState(false) + React.useEffect(() => { + setMounted(true) + }, []) + if (!mounted) return null + return ( + + + + + + ) +} diff --git a/python/vibe-coding-ide/frontend/src/components/AccountMenu.tsx b/python/vibe-coding-ide/frontend/src/components/AccountMenu.tsx new file mode 100644 index 0000000000..336ce12bb9 --- /dev/null +++ b/python/vibe-coding-ide/frontend/src/components/AccountMenu.tsx @@ -0,0 +1 @@ +export {} diff --git a/python/vibe-coding-ide/frontend/src/components/AuthModal.tsx b/python/vibe-coding-ide/frontend/src/components/AuthModal.tsx new file mode 100644 index 0000000000..336ce12bb9 --- /dev/null +++ b/python/vibe-coding-ide/frontend/src/components/AuthModal.tsx @@ -0,0 +1 @@ +export {} diff --git a/python/vibe-coding-ide/frontend/src/components/Chat/ChatInput.tsx b/python/vibe-coding-ide/frontend/src/components/Chat/ChatInput.tsx new file mode 100644 index 0000000000..1f321e5e7f --- /dev/null +++ b/python/vibe-coding-ide/frontend/src/components/Chat/ChatInput.tsx @@ -0,0 +1,148 @@ +import React from 'react' +import { Loader, Send, X } from 'lucide-react' + +interface ChatInputProps { + value: string + onChange: (next: string) => void + onSend: () => void | Promise + sendDisabled: boolean + showCancel: boolean + onCancel: () => void + cancelling: boolean + // Optional list of suggested prompts that, when clicked, populate the input + suggestions?: string[] +} + +export const ChatInput: React.FC = ({ + value, + onChange, + onSend, + sendDisabled, + showCancel, + onCancel, + cancelling, + suggestions, +}) => { + const textareaRef = React.useRef(null) + const MAX_TEXTAREA_HEIGHT = + typeof window !== 'undefined' ? window.innerHeight / 2 : 400 + + const adjustTextareaHeight = React.useCallback(() => { + const el = textareaRef.current + if (el) { + el.style.height = 'auto' + const nextHeight = Math.min(el.scrollHeight, MAX_TEXTAREA_HEIGHT) + el.style.height = `${nextHeight}px` + // Allow the textarea to scroll internally once it reaches the cap + el.style.overflowY = + el.scrollHeight > MAX_TEXTAREA_HEIGHT ? 'auto' : 'hidden' + } + }, [MAX_TEXTAREA_HEIGHT]) + + React.useEffect(() => { + adjustTextareaHeight() + }, [value, adjustTextareaHeight]) + + const handleKeyPress = (e: React.KeyboardEvent) => { + if (e.key === 'Enter' && !e.shiftKey) { + e.preventDefault() + onSend() + } + } + + return ( +
+ {/* Suggestions list - only when empty and not sending */} + {!value.trim() && (suggestions?.length || 0) > 0 ? ( +
+
+ Click and try one of these prompts: +
+
+ {suggestions!.map((s, idx) => ( + + ))} +
+
+ ) : null} + +
+