diff --git a/.gitignore b/.gitignore index 1c0b88b..eece690 100644 --- a/.gitignore +++ b/.gitignore @@ -75,4 +75,6 @@ coverage/ ._*.json ._*.ts ._*.tsx +._*.css +._*.cjs out/ diff --git a/README.md b/README.md index 712e7da..268b310 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # DeployStack Documentation -This repository contains the official documentation site for the [DeployStack](https://deploystack.io/docs/) ecosystem, built with [fumadocs](https://fumadocs.vercel.app/). Visit [deploystack.io](https://deploystack.io) to learn more about our platform. +This repository contains the official documentation site for [DeployStack](https://deploystack.io/docs/), The Complete MCP Management Platform, built with [fumadocs](https://fumadocs.vercel.app/). Visit [deploystack.io](https://deploystack.io) to learn more about our platform. ## Technology Stack @@ -15,9 +15,14 @@ This repository contains the official documentation site for the [DeployStack](h ```text . ├── docs/ # Documentation content (MDX files) -│ ├── deploystack/ # DeployStack documentation -│ ├── docker-to-iac/ # Docker-to-IaC documentation -│ └── assets/ # Images and static assets +│ ├── development/ # Development documentation +│ │ ├── backend/ # Backend development guides +│ │ ├── frontend/ # Frontend development guides +│ │ └── gateway/ # Gateway architecture & implementation +│ ├── self-hosted/ # Self-hosting guides +│ ├── deploystack/ # Core DeployStack documentation +│ ├── assets/ # Images and static assets +│ └── ... # MCP guides and configuration docs ├── app/ # Next.js app directory (fumadocs framework) ├── lib/ # Documentation utilities & components └── source.config.ts # Fumadocs configuration @@ -54,8 +59,9 @@ npm run lint:links # Link validation 3. **Navigation**: Use `meta.json` files in each directory to control navigation structure 4. **Assets**: Place images in `docs/assets/images/` with appropriate subdirectories 5. **Links**: Use absolute paths for all references: - - Documentation: `/docs/docker-to-iac/` + - Documentation: `/docs/development/gateway/` - Images: `/docs/assets/images/example.png` +6. **Brand Colors**: Use the primary color (`text-primary`, `bg-primary`) for consistency - avoid introducing other accent colors ### Navigation Structure diff --git a/app/[[...slug]]/page.tsx b/app/[[...slug]]/page.tsx index e662a0d..37c3619 100644 --- a/app/[[...slug]]/page.tsx +++ b/app/[[...slug]]/page.tsx @@ -3,7 +3,7 @@ import { DocsLayout } from 'fumadocs-ui/layouts/docs'; import { HomeLayout } from 'fumadocs-ui/layouts/home'; import { DocsPage, DocsBody } from 'fumadocs-ui/page'; import { notFound } from 'next/navigation'; -import { source } from '@/lib/source'; +import { source, mainSource, developmentSource, selfHostedSource } from '@/lib/source'; import { generatePageMetadata, getCanonicalUrl } from '@/lib/seo-utils'; import { getFinalPageTitle } from '@/lib/h1-extractor'; import { readFile } from 'fs/promises'; @@ -41,12 +41,25 @@ export default async function Page({ ); } + // Determine which section we're in and get the appropriate page tree + const firstSegment = slug[0]; + let pageTree = mainSource.pageTree; + let navTitle = 'DeployStack Docs'; + + if (firstSegment === 'development') { + pageTree = developmentSource.pageTree; + navTitle = 'Development Docs'; + } else if (firstSegment === 'self-hosted') { + pageTree = selfHostedSource.pageTree; + navTitle = 'Self-Hosted Docs'; + } + return ( page._file.flattenedPath) - .map((page: any) => ({ - slug: page._file.flattenedPath.split('/'), - })), - ]; - - return result; + // Simply use the unified source generateParams + return source.generateParams(); } export async function generateMetadata({ diff --git a/app/global.css b/app/global.css index fa9f9ec..d07929f 100644 --- a/app/global.css +++ b/app/global.css @@ -1,5 +1,37 @@ -/* Import Fumadocs UI complete styles */ -@import 'fumadocs-ui/css/style.css'; +/* Import Tailwind CSS v4 and Fumadocs UI presets */ +@import 'tailwindcss'; +@import 'fumadocs-ui/css/neutral.css'; +@import 'fumadocs-ui/css/preset.css'; + +/* Include Fumadocs UI source for Tailwind v4 */ +@source '../node_modules/fumadocs-ui/dist/**/*.js'; + +/* Define primary color in Tailwind v4 @theme directive */ +@theme { + /* Primary color - Teal theme matching DeployStack brand */ + --color-primary: hsl(177, 79%, 28%); /* Teal-700 */ + --color-primary-foreground: hsl(0, 0%, 100%); /* White text on primary */ + + /* Additional variants */ + --color-primary-hover: hsl(176, 79%, 23%); /* Darker teal for hover states */ + --color-primary-light: hsl(174, 72%, 56%); /* Lighter teal variant */ + --color-primary-dark: hsl(176, 100%, 16%); /* Darker teal variant */ +} + +/* Dark mode color overrides */ +:root { + /* Light mode is default, defined in @theme above */ +} + +.dark { + /* Dark mode primary colors */ + --color-primary: hsl(174, 72%, 56%); /* Lighter teal for dark mode */ + --color-primary-foreground: hsl(176, 100%, 6%); /* Dark text on primary in dark mode */ + + --color-primary-hover: hsl(173, 68%, 64%); /* Lighter on hover in dark mode */ + --color-primary-light: hsl(172, 66%, 70%); /* Even lighter variant */ + --color-primary-dark: hsl(177, 79%, 28%); /* Original teal for contrast */ +} /* Custom navbar styling to match main site */ /* Navbar height override removed since navbar is now inside content area */ @@ -12,8 +44,8 @@ /* Style the login button to match main site */ [data-fumadocs-nav] a[href*="login"] { - background: hsl(var(--primary)); - color: hsl(var(--primary-foreground)); + background: var(--color-primary); + color: var(--color-primary-foreground); border-radius: 9999px; padding: 0.5rem 1rem; font-weight: 500; @@ -21,12 +53,12 @@ } [data-fumadocs-nav] a[href*="login"]:hover { - background: hsl(var(--primary) / 0.9); + background: var(--color-primary-hover); } /* Ensure Documentation link is highlighted when active */ [data-fumadocs-nav] a[href="/docs"][data-active="true"] { - color: hsl(var(--primary)); + color: var(--color-primary); font-weight: 600; } diff --git a/app/layout.config.tsx b/app/layout.config.tsx index a939c12..1ad7d90 100644 --- a/app/layout.config.tsx +++ b/app/layout.config.tsx @@ -13,11 +13,6 @@ const baseConfig = { export const homeOptions: BaseLayoutProps = { ...baseConfig, links: [ - { - text: 'MCP Server', - url: 'https://deploystack.io/mcp', - external: true, - }, { text: 'Changelog', url: 'https://deploystack.io/changelog', diff --git a/docs/deploystack/auth.mdx b/docs/auth.mdx similarity index 98% rename from docs/deploystack/auth.mdx rename to docs/auth.mdx index c415de0..48ef630 100644 --- a/docs/deploystack/auth.mdx +++ b/docs/auth.mdx @@ -229,4 +229,4 @@ For developers and integrations, DeployStack provides REST API endpoints for aut --- -For technical implementation details, see the [Backend Authentication Documentation](/deploystack/development/backend/api) and [Global Settings Management](/deploystack/global-settings). +For technical implementation details, see the [Backend Authentication Documentation](/development/backend/api) and [Global Settings Management](/global-settings). diff --git a/docs/deploystack/development/index.mdx b/docs/deploystack/development/index.mdx deleted file mode 100644 index 862c2b5..0000000 --- a/docs/deploystack/development/index.mdx +++ /dev/null @@ -1,138 +0,0 @@ ---- -title: Development Guide -description: Complete development documentation for DeployStack - covering frontend, backend, and contribution guidelines for the MCP server deployment platform. -icon: FileCode ---- - -import { Card, Cards } from 'fumadocs-ui/components/card'; -import { Code2, Server, GitBranch, Users } from 'lucide-react'; - -# DeployStack Development - -Welcome to the DeployStack development documentation! DeployStack is the leading infrastructure platform for Model Context Protocol (MCP) server deployment, built with modern web technologies and an extensible architecture. - -## Architecture Overview - -DeployStack follows a modern full-stack architecture: - -- **Frontend**: Vue 3 + TypeScript + Vite for the user interface -- **Backend**: Fastify + TypeScript + SQLite/PostgreSQL for the API and data layer -- **MCP Focus**: Specialized CI/CD pipeline for MCP server deployments -- **Multi-Cloud**: Support for AWS, Render.com, Fly.io, DigitalOcean, and more - -## Development Areas - - - } - href="/deploystack/development/frontend" - title="Frontend Development" - > - Vue 3, TypeScript, and Vite development guide. UI components, state management, internationalization, and plugin system. - - - } - href="/deploystack/development/backend" - title="Backend Development" - > - Fastify, TypeScript, and Drizzle ORM development guide. API design, database management, authentication, and plugin architecture. - - - -## Getting Started - -### Prerequisites - -- Node.js 18 or higher -- npm 8 or higher -- Git for version control - -### Quick Setup - -```bash -# Clone the repository -git clone https://github.com/deploystackio/deploystack.git -cd deploystack - -# Install dependencies for both frontend and backend -npm install - -# Start development servers -npm run dev -``` - -This will start both the frontend (http://localhost:5173) and backend (http://localhost:3000) development servers. - -## Development Workflow - -1. **Choose Your Area**: Start with either frontend or backend development -2. **Set Up Environment**: Follow the specific setup guides for your chosen area -3. **Make Changes**: Implement features, fix bugs, or improve documentation -4. **Test**: Run the appropriate test suites -5. **Submit**: Create pull requests following our contribution guidelines - -## Project Structure - -```bash -deploystack/ -├── services/ -│ ├── frontend/ # Vue 3 frontend application -│ │ ├── src/ -│ │ ├── public/ -│ │ └── package.json -│ └── backend/ # Fastify backend API -│ ├── src/ -│ ├── plugins/ -│ └── package.json -├── docs/ # Documentation -└── docker-compose.yml # Development environment -``` - -## Key Technologies - -### Frontend Stack -- **Vue 3** with Composition API for reactive user interfaces -- **TypeScript** for type safety and better developer experience -- **Vite** for fast development and building -- **TailwindCSS** with shadcn-vue for consistent styling -- **Vue I18n** for internationalization support - -### Backend Stack -- **Fastify** for high-performance HTTP server -- **TypeScript** for type-safe server development -- **Drizzle ORM** for database operations and migrations -- **Zod** for request/response validation -- **Plugin System** for extensible functionality - -## Development Philosophy - -### MCP-First Approach -DeployStack is purpose-built for the Model Context Protocol ecosystem. Our development decisions prioritize: - -- **MCP Server Compatibility**: Seamless deployment of MCP servers -- **Security Isolation**: Plugin system with proper namespacing -- **Multi-Cloud Support**: Cloud provider agnostic deployments -- **Developer Experience**: Simple, one-click deployment workflows - -### Code Quality -- **Type Safety**: TypeScript throughout the stack -- **Testing**: Comprehensive test coverage -- **Documentation**: Clear, up-to-date documentation -- **Security**: Built-in security best practices - -## Contributing - -We welcome contributions to DeployStack! Whether you're: - -- **Adding Features**: New MCP server support, UI improvements, API enhancements -- **Fixing Bugs**: Issues with deployment, UI problems, API errors -- **Improving Documentation**: Better guides, examples, and explanations -- **Creating Plugins**: Extending DeployStack functionality - -## Community - -- **GitHub**: [deploystackio/deploystack](https://github.com/deploystackio/deploystack) -- **Issues**: Report bugs and request features - -For detailed development guides, choose your area of interest from the cards above. Each section contains comprehensive documentation for getting started, best practices, and advanced topics. diff --git a/docs/deploystack/github-integration.mdx b/docs/deploystack/github-integration.mdx deleted file mode 100644 index e41f252..0000000 --- a/docs/deploystack/github-integration.mdx +++ /dev/null @@ -1,344 +0,0 @@ ---- -title: GitHub Integration -description: Seamless GitHub integration for MCP servers, global settings, and automated synchronization in DeployStack. -sidebar: GitHub Integration ---- - -# GitHub Integration - -DeployStack provides comprehensive GitHub integration that enables seamless synchronization of MCP servers, automated repository scanning, and streamlined deployment workflows. This integration connects your GitHub repositories directly with your DeployStack installation. - -## Overview - -The GitHub integration system offers: - -- **MCP Server Synchronization**: Automatic detection and sync of MCP server configurations -- **Repository Metadata Extraction**: Pull descriptions, languages, licenses, and topics -- **Version Management**: Automatic version detection from repository tags and releases -- **Global Settings Integration**: Configure GitHub OAuth and API access -- **Team-Based Access**: Respect team boundaries and permissions -- **Real-time Updates**: Monitor repository changes and trigger updates - -## GitHub OAuth Configuration - -### Setting Up GitHub OAuth - -To enable GitHub integration, you need to configure GitHub OAuth in your global settings: - -#### 1. Create GitHub OAuth App - -1. **Go to GitHub**: Navigate to GitHub.com → Settings → Developer settings → OAuth Apps -2. **Create New App**: Click "New OAuth App" -3. **Configure Application**: - - **Application Name**: `DeployStack - [Your Instance]` - - **Homepage URL**: `https://your-deploystack-domain.com` - - **Authorization Callback URL**: `https://your-deploystack-domain.com/auth/github/callback` - - **Application Description**: Optional description of your DeployStack instance - -#### 2. Configure in DeployStack - -1. **Access Global Settings**: Go to Admin → Global Settings → GitHub OAuth -2. **Enter Credentials**: - - **Client ID**: From your GitHub OAuth app - - **Client Secret**: From your GitHub OAuth app - - **Enable GitHub Integration**: Toggle to activate -3. **Save Configuration**: Apply the settings - -#### 3. Test Integration - -1. **Verify Connection**: Use the "Test Connection" button in settings -2. **Check Permissions**: Ensure the app has necessary repository access -3. **Validate Callback**: Test the OAuth flow with a user account - -### GitHub App vs OAuth App - -DeployStack supports both GitHub OAuth Apps and GitHub Apps: - -#### GitHub OAuth App (Recommended for most users) -- **Simpler Setup**: Easier to configure and manage -- **User-Based Access**: Uses individual user permissions -- **Public Repositories**: Works well with public repositories -- **Rate Limits**: Subject to user-based rate limits - -#### GitHub App (Enterprise/High-Volume) -- **Enhanced Security**: App-level permissions and authentication -- **Higher Rate Limits**: Better rate limiting for high-volume usage -- **Fine-Grained Permissions**: More granular access control -- **Installation-Based**: Installed per organization/repository - -## MCP Server GitHub Integration - -### Automatic Repository Scanning - -When you provide a GitHub URL for an MCP server, DeployStack automatically: - -#### Repository Information Extraction -- **Description**: Uses repository description as server description -- **Language**: Detects primary programming language -- **License**: Extracts license information -- **Topics**: Imports repository topics as server tags -- **Homepage**: Uses repository homepage URL -- **README**: Processes README for additional metadata - -#### MCP Configuration Detection -- **Package Files**: Scans `package.json`, `pyproject.toml`, `Cargo.toml` -- **MCP Config**: Looks for MCP-specific configuration files -- **Dependencies**: Extracts runtime dependencies -- **Scripts**: Identifies installation and run scripts - -### Repository Synchronization - -#### Manual Synchronization - -1. **Server Management**: Go to your MCP server details -2. **Sync Repository**: Click "Sync from GitHub" button -3. **Review Changes**: Preview what will be updated -4. **Apply Updates**: Confirm synchronization - -#### Automatic Synchronization (Future Feature) - -- **Webhook Integration**: Automatic updates on repository changes -- **Scheduled Sync**: Regular synchronization intervals -- **Conflict Resolution**: Handle conflicts between local and remote changes - -### Version Management - -#### Automatic Version Detection - -DeployStack automatically detects versions from: - -- **Git Tags**: Semantic version tags (e.g., `v1.2.3`, `1.2.3`) -- **GitHub Releases**: Published releases with version numbers -- **Package Files**: Version information in `package.json`, etc. -- **Commit History**: Latest commits for development versions - -#### Version Synchronization - -1. **Scan Repository**: Check for new tags and releases -2. **Create Versions**: Automatically create version entries -3. **Update Metadata**: Sync changelog and release notes -4. **Mark Latest**: Identify the latest stable version - -### Supported Repository Structures - -#### Node.js MCP Servers -``` -repository/ -├── package.json # Required: MCP server configuration -├── README.md # Recommended: Documentation -├── src/ # Source code -│ ├── index.ts # Main server file -│ └── tools/ # MCP tools -├── dist/ # Compiled output (optional) -└── .github/ # GitHub workflows (optional) -``` - -#### Python MCP Servers -``` -repository/ -├── pyproject.toml # Required: Python project configuration -├── README.md # Recommended: Documentation -├── src/ # Source code -│ └── mcp_server/ # MCP server package -├── requirements.txt # Dependencies (optional) -└── .github/ # GitHub workflows (optional) -``` - -#### Configuration Requirements - -For optimal integration, repositories should include: - -- **Clear Description**: Repository description explaining the MCP server's purpose -- **Proper Licensing**: Valid open-source license -- **Semantic Versioning**: Use semantic version tags -- **Documentation**: Comprehensive README with usage instructions -- **Topics/Tags**: Relevant GitHub topics for categorization - -## GitHub API Integration - -### API Endpoints - -#### Repository Information -```http -GET /api/mcp/github/repo-info -Query Parameters: - - url: GitHub repository URL - - branch: Target branch (default: main) - -Response: -{ - "success": true, - "data": { - "name": "example-mcp-server", - "description": "An example MCP server", - "language": "TypeScript", - "license": "MIT", - "topics": ["mcp", "ai", "tools"], - "homepage": "https://example.com", - "default_branch": "main", - "latest_commit": { - "sha": "abc123", - "message": "Update server configuration", - "date": "2025-01-07T15:30:00Z" - } - } -} -``` - -#### Repository Synchronization -```http -POST /api/mcp/github/sync/{serverId} -Authorization: Required (server management permissions) - -Response: -{ - "success": true, - "data": { - "server_id": "server123", - "sync_status": "completed", - "changes": { - "description": "Updated from repository", - "version": "1.2.3", - "tags": ["mcp", "ai", "updated"] - }, - "last_sync_at": "2025-01-07T15:30:00Z" - } -} -``` - -### Rate Limiting and Best Practices - -#### GitHub API Rate Limits -- **Authenticated Requests**: 5,000 requests per hour -- **Unauthenticated Requests**: 60 requests per hour -- **Search API**: 30 requests per minute -- **GraphQL API**: 5,000 points per hour - -#### Best Practices -- **Cache Repository Data**: Minimize API calls by caching metadata -- **Batch Operations**: Group multiple repository operations -- **Error Handling**: Graceful handling of rate limit errors -- **Retry Logic**: Implement exponential backoff for failed requests - -## Security Considerations - -### Access Control - -#### Repository Access -- **Public Repositories**: No special permissions required -- **Private Repositories**: Requires appropriate GitHub permissions -- **Organization Repositories**: Respects organization access controls -- **Team Boundaries**: DeployStack team permissions still apply - -#### Token Security -- **Secure Storage**: GitHub tokens encrypted in database -- **Scope Limitation**: Minimal required scopes for OAuth apps -- **Token Rotation**: Regular token refresh and rotation -- **Audit Logging**: Track all GitHub API operations - -### Privacy and Data Handling - -#### Data Collection -- **Repository Metadata**: Only public metadata is collected -- **No Source Code**: Source code is never stored in DeployStack -- **Minimal Permissions**: Request only necessary GitHub permissions -- **User Consent**: Clear disclosure of GitHub integration features - -#### Data Retention -- **Metadata Caching**: Repository metadata cached for performance -- **Sync History**: Synchronization logs for troubleshooting -- **User Control**: Users can disable GitHub integration anytime - -## Troubleshooting - -### Common Issues - -#### OAuth Configuration Problems - -**Problem**: "GitHub OAuth not configured" error -**Solution**: -1. Verify Client ID and Client Secret in global settings -2. Check callback URL matches GitHub OAuth app configuration -3. Ensure GitHub OAuth app is not suspended -4. Test connection using the settings panel - -**Problem**: "Access denied" during OAuth flow -**Solution**: -1. Check user has access to the repository -2. Verify OAuth app permissions -3. Ensure user has granted necessary scopes -4. Check for organization restrictions - -#### Repository Synchronization Issues - -**Problem**: "Repository not found" error -**Solution**: -1. Verify repository URL is correct and accessible -2. Check repository is public or user has access -3. Ensure repository exists and is not archived -4. Verify GitHub token has repository access - -**Problem**: "Sync failed" with rate limit error -**Solution**: -1. Wait for rate limit reset (shown in error message) -2. Reduce frequency of synchronization operations -3. Consider upgrading to GitHub App for higher limits -4. Implement retry logic with exponential backoff - -#### Version Detection Problems - -**Problem**: Versions not detected from repository -**Solution**: -1. Ensure repository uses semantic version tags -2. Check tags follow format: `v1.2.3` or `1.2.3` -3. Verify releases are published (not just tags) -4. Check package.json or pyproject.toml version field - -### Debug Information - -#### Checking GitHub Integration Status - -1. **Global Settings**: Admin → Global Settings → GitHub OAuth -2. **Test Connection**: Use built-in connection test -3. **API Logs**: Check server logs for GitHub API calls -4. **Rate Limit Status**: Monitor current rate limit usage - -#### Repository Analysis - -1. **Repository Info API**: Test `/api/mcp/github/repo-info` endpoint -2. **Manual Sync**: Try manual synchronization from server details -3. **Error Logs**: Check synchronization error messages -4. **GitHub API**: Test direct GitHub API access - -## Future Enhancements - -### Planned Features - -#### Advanced Integration -- **Webhook Support**: Real-time repository change notifications -- **GitHub Actions Integration**: Trigger deployments from CI/CD -- **Pull Request Integration**: Preview changes before merging -- **Issue Tracking**: Link MCP server issues to GitHub issues - -#### Enhanced Automation -- **Automated Testing**: Run MCP server tests on synchronization -- **Dependency Scanning**: Security vulnerability detection -- **License Compliance**: Automated license compatibility checking -- **Quality Metrics**: Code quality and documentation scoring - -#### Enterprise Features -- **GitHub Enterprise Support**: On-premises GitHub integration -- **SAML/SSO Integration**: Enterprise authentication flows -- **Audit Logging**: Comprehensive audit trails -- **Compliance Reporting**: Generate compliance reports - -### Community Contributions - -#### Contributing to GitHub Integration - -- **Feature Requests**: Submit enhancement requests -- **Bug Reports**: Report integration issues -- **Documentation**: Improve integration documentation -- **Testing**: Help test new GitHub features - -The GitHub integration system provides a powerful foundation for connecting your repositories with DeployStack's MCP catalog, enabling streamlined workflows and automated synchronization while maintaining security and team boundaries. diff --git a/docs/deploystack/index.mdx b/docs/deploystack/index.mdx deleted file mode 100644 index 5244b79..0000000 --- a/docs/deploystack/index.mdx +++ /dev/null @@ -1,155 +0,0 @@ ---- -title: DeployStack Documentation -description: Official DeployStack documentation - The first CI/CD platform designed for MCP servers. Deploy Model Context Protocol servers across cloud providers with one click. -sidebar: Introduction -icon: Star ---- - -import { Card, Cards } from 'fumadocs-ui/components/card'; -import { Rocket, Settings, Users, Code2, ExternalLink, Zap, Shield, Wrench } from 'lucide-react'; - -# DeployStack - MCP Server CI/CD Platform - -DeployStack is the first CI/CD platform specifically designed for Model Context Protocol (MCP) servers. We make it easy to deploy and manage MCP servers across different cloud providers, transforming complex technical processes into one-click solutions. - - - {/* } - href="/deploystack/getting-started" - title="Getting Started" - > - Deploy your first MCP server in minutes with our step-by-step guide - */} - - } - href="https://deploystack.io/mcp" - title="MCP Server Catalog" - external - > - Browse ready-to-deploy MCP servers from the community - - - } - href="/deploystack/one-click-deploy" - title="One-Click Deploy" - > - Deploy any MCP server instantly with zero configuration - - - -## User Guides - -**For administrators and team members** using DeployStack: - - - } - href="/deploystack/global-settings" - title="Global Settings" - > - Configure email, authentication, and system preferences - - - } - href="/deploystack/roles" - title="User Roles and Permissions" - > - Manage user access and team collaboration - - - {/* } - href="/deploystack/troubleshooting" - title="Troubleshooting" - > - Resolve common deployment issues and get help - */} - - -## MCP Server Requirements - -**For MCP server creators** who want to make their servers deployable: - - - } - href="/deploystack/docker-compose-requirements" - title="Docker Compose Requirements" - > - Learn about supported configurations and best practices - - - } - href="https://deploystack.io/submit" - title="Submit Your MCP Server" - external - > - Add your server to our catalog for the community - - - -## Developer Documentation - -**For developers** extending or contributing to DeployStack: - - - } - href="/deploystack/development" - title="Development Guide" - > - Complete development documentation for frontend and backend - - - } - href="/deploystack/development/backend" - title="Backend Development" - > - API, database, plugins, and testing documentation - - - } - href="/docker-to-iac/index" - title="Docker-to-IaC Module" - > - Core deployment engine and infrastructure automation - - - -## Community & Resources - - - } - href="https://discord.gg/UjFWwByB" - title="Join our Discord" - external - > - Get help and connect with the DeployStack community - - - } - href="https://deploystack.io" - title="Visit DeployStack" - external - > - Main website and MCP server catalog - - - } - href="https://github.com/deploystackio/deploystack" - title="GitHub Repository" - external - > - Contribute to the project and view source code - - - diff --git a/docs/deploystack/meta.json b/docs/deploystack/meta.json deleted file mode 100644 index c22a970..0000000 --- a/docs/deploystack/meta.json +++ /dev/null @@ -1,25 +0,0 @@ -{ - "title": "DeployStack", - "description": "Documentation for DeployStack", - "icon": "DeployStackLogo", - "root": true, - "pages": [ - "index.mdx", - "quick-start.mdx", - "---General---", - "global-settings.mdx", - "roles.mdx", - "security.mdx", - "---Deployment---", - "local-setup.mdx", - "self-hosted/", - "---Development---", - "development/index.mdx", - "---Frontend Development---", - "development/frontend/index.mdx", - "...development/frontend", - "---Backend Development---", - "development/backend/index.mdx", - "...development/backend" - ] -} diff --git a/docs/deploystack/development/backend/api-pagination.mdx b/docs/development/backend/api-pagination.mdx similarity index 100% rename from docs/deploystack/development/backend/api-pagination.mdx rename to docs/development/backend/api-pagination.mdx diff --git a/docs/deploystack/development/backend/api-security.mdx b/docs/development/backend/api-security.mdx similarity index 90% rename from docs/deploystack/development/backend/api-security.mdx rename to docs/development/backend/api-security.mdx index e82edd2..22bece9 100644 --- a/docs/deploystack/development/backend/api-security.mdx +++ b/docs/development/backend/api-security.mdx @@ -1,9 +1,9 @@ --- -title: API Security Best Practices +title: API Security description: Essential security patterns for DeployStack Backend API development, including proper authorization hook usage and security-first development principles. --- -# API Security Best Practices +# API Security This document outlines critical security patterns and best practices for developing secure APIs in the DeployStack Backend. Following these guidelines ensures consistent security behavior and prevents common vulnerabilities. @@ -148,8 +148,39 @@ requireTeamPermission('permission.name', getTeamIdFn) // Custom team ID extracti // Ownership-based authorization requireOwnershipOrAdmin(getUserIdFromRequest) // User owns resource OR is admin + +// Dual authentication (Cookie + OAuth2) +requireAuthenticationAny() // Accept either cookie or OAuth2 Bearer token +requireOAuthScope('scope.name') // Enforce OAuth2 scope requirements ``` +### Dual Authentication Support + +For endpoints that support both web users (cookies) and CLI users (OAuth2 Bearer tokens), use the dual authentication middleware: + +```typescript +import { requireAuthenticationAny, requireOAuthScope } from '../../middleware/oauthMiddleware'; + +fastify.get('/dual-auth-endpoint', { + schema: { + security: [ + { cookieAuth: [] }, // Cookie authentication + { bearerAuth: [] } // OAuth2 Bearer token + ] + }, + preValidation: [ + requireAuthenticationAny(), // Accept either auth method + requireOAuthScope('your:scope') // Enforce OAuth2 scope + ] +}, async (request, reply) => { + // Endpoint accessible via both authentication methods + const authType = request.tokenPayload ? 'oauth2' : 'cookie'; + const userId = request.user!.id; +}); +``` + +For detailed OAuth2 implementation, see the [Backend OAuth Implementation Guide](/development/backend/oauth) and [Backend Security Policy](/development/backend/security#oauth2-server-security). + ### Team-Aware Permission System For endpoints that operate within team contexts (e.g., `/teams/:teamId/resource`), use the team-aware permission middleware: @@ -453,6 +484,6 @@ Before deploying any protected endpoint, verify: ## Related Documentation -- [API Documentation Generation](/deploystack/development/backend/api) - General API development patterns -- [Authentication System](deploystack/auth) - User authentication implementation -- [Role-Based Access Control](/deploystack/development/backend/roles) - Permission system details +- [API Documentation Generation](/development/backend/api) - General API development patterns +- [Authentication System](/development/backend/auth) - User authentication implementation +- [Role-Based Access Control](/development/backend/roles) - Permission system details diff --git a/docs/deploystack/development/backend/api.mdx b/docs/development/backend/api.mdx similarity index 89% rename from docs/deploystack/development/backend/api.mdx rename to docs/development/backend/api.mdx index ee5bf1a..14a6952 100644 --- a/docs/deploystack/development/backend/api.mdx +++ b/docs/development/backend/api.mdx @@ -17,7 +17,7 @@ The DeployStack Backend uses Fastify with Swagger plugins to automatically gener ## 🔒 Security First -**IMPORTANT**: Before developing any protected API endpoints, read the [API Security Best Practices](./api-security.mdx) documentation. It covers critical security patterns including: +**IMPORTANT**: Before developing any protected API endpoints, read the [API Security Best Practices](/development/backend/api-security) documentation. It covers critical security patterns including: - **Authorization Before Validation**: Why `preValidation` must be used instead of `preHandler` for authorization - **Proper Error Responses**: Ensuring unauthorized users get 403 Forbidden, not validation errors @@ -26,6 +26,84 @@ The DeployStack Backend uses Fastify with Swagger plugins to automatically gener **Key Rule**: Always use `preValidation` for authorization checks to prevent information disclosure to unauthorized users. +## 🔐 Dual Authentication Support + +The DeployStack Backend supports **dual authentication** for API endpoints, allowing both web users (cookie-based) and CLI users (OAuth2 Bearer tokens) to access the same endpoints seamlessly. + +### Authentication Methods + +1. **Cookie Authentication** (Web Users) + - Session-based authentication using HTTP cookies + - Automatic for web browser requests + - Uses `session` cookie set during login + +2. **OAuth2 Bearer Token Authentication** (CLI Users) + - RFC 6749 compliant OAuth2 implementation with PKCE + - Uses `Authorization: Bearer ` header + - Scope-based access control + +### Dual Authentication Middleware + +Use these middleware functions to enable dual authentication on endpoints: + +```typescript +import { requireAuthenticationAny, requireOAuthScope } from '../../middleware/oauthMiddleware'; + +fastify.get('/your-endpoint', { + schema: { + security: [ + { cookieAuth: [] }, // Cookie authentication + { bearerAuth: [] } // OAuth2 Bearer token + ] + }, + preValidation: [ + requireAuthenticationAny(), // Accept either auth method + requireOAuthScope('your:scope') // Enforce OAuth2 scope + ] +}, async (request, reply) => { + // Endpoint accessible via both authentication methods + const authType = request.tokenPayload ? 'oauth2' : 'cookie'; + const userId = request.user!.id; +}); +``` + +### OAuth2 Scopes + +Available OAuth2 scopes for fine-grained access control: + +- `mcp:read` - Read MCP server installations and configurations +- `account:read` - Read account information +- `user:read` - Read user profile information +- `teams:read` - Read team memberships and information +- `offline_access` - Maintain access when not actively using the application + +### OAuth2 Flow Endpoints + +- `GET /api/oauth2/auth` - Authorization endpoint (PKCE required) +- `GET /api/oauth2/consent` - User consent page +- `POST /api/oauth2/consent` - Process consent decision +- `POST /api/oauth2/token` - Token exchange endpoint + +### Client Configuration + +- **Client ID**: `deploystack-gateway-cli` +- **Redirect URIs**: `http://localhost:8976/oauth/callback`, `http://127.0.0.1:8976/oauth/callback` +- **PKCE**: Required (SHA256 method) +- **Token Lifetime**: 1 hour access tokens, 30 day refresh tokens + +### Usage Examples + +**Web Users (Cookie Authentication)**: +```bash +curl -b cookies.txt "http://localhost:3000/api/teams/me/default" +``` + +**CLI Users (OAuth2 Bearer Token)**: +```bash +curl -H "Authorization: Bearer " \ + "http://localhost:3000/api/teams/me/default" +``` + ## Available Commands ### 1. Generate Complete API Specification diff --git a/docs/development/backend/auth.mdx b/docs/development/backend/auth.mdx new file mode 100644 index 0000000..182f065 --- /dev/null +++ b/docs/development/backend/auth.mdx @@ -0,0 +1,6 @@ +--- +title: Authentication System +description: Complete guide to implementing user authentication in DeployStack Backend. +--- + +# DeployStack Authentication System \ No newline at end of file diff --git a/docs/deploystack/development/backend/cloud-credentials.mdx b/docs/development/backend/cloud-credentials.mdx similarity index 99% rename from docs/deploystack/development/backend/cloud-credentials.mdx rename to docs/development/backend/cloud-credentials.mdx index 5129f20..5f81f92 100644 --- a/docs/deploystack/development/backend/cloud-credentials.mdx +++ b/docs/development/backend/cloud-credentials.mdx @@ -136,7 +136,7 @@ interface StoredCredentials { ## Role-Based Access Control -The cloud credentials system uses **team-contextual permissions** rather than global permissions. For detailed role information and permission matrices, see [Role-Based Access Control](/deploystack/development/backend/roles). +The cloud credentials system uses **team-contextual permissions** rather than global permissions. For detailed role information and permission matrices, see [Role-Based Access Control](/development/backend/roles). ### Access Levels diff --git a/docs/deploystack/development/backend/database-sqlite.mdx b/docs/development/backend/database-sqlite.mdx similarity index 98% rename from docs/deploystack/development/backend/database-sqlite.mdx rename to docs/development/backend/database-sqlite.mdx index df58a4f..f93f899 100644 --- a/docs/deploystack/development/backend/database-sqlite.mdx +++ b/docs/development/backend/database-sqlite.mdx @@ -9,7 +9,7 @@ description: Technical implementation details and best practices for SQLite inte SQLite is the default database for DeployStack development and small to medium deployments. It provides excellent performance, zero configuration, and a simple file-based architecture that makes it ideal for development, testing, and single-server deployments. -> **Setup Instructions**: For initial SQLite configuration, see the [Database Setup Guide](/deploystack/self-hosted/database-setup#sqlite). +> **Setup Instructions**: For initial SQLite configuration, see the [Database Setup Guide](/self-hosted/database-setup#sqlite). ## Technical Architecture @@ -441,6 +441,6 @@ Consider hybrid approaches for scaling: --- -For general database concepts and cross-database functionality, see the [Database Development Guide](/deploystack/development/backend/database). +For general database concepts and cross-database functionality, see the [Database Development Guide](/development/backend/database). -For initial setup and configuration, see the [Database Setup Guide](/deploystack/self-hosted/database-setup). +For initial setup and configuration, see the [Database Setup Guide](/self-hosted/database-setup). diff --git a/docs/deploystack/development/backend/database-turso.mdx b/docs/development/backend/database-turso.mdx similarity index 75% rename from docs/deploystack/development/backend/database-turso.mdx rename to docs/development/backend/database-turso.mdx index e05e12d..1a6b192 100644 --- a/docs/deploystack/development/backend/database-turso.mdx +++ b/docs/development/backend/database-turso.mdx @@ -19,6 +19,10 @@ DeployStack integrates with Turso using the official `@libsql/client` driver thr - **Scalability**: Automatic scaling based on usage patterns - **libSQL Protocol**: Enhanced SQLite with additional networking capabilities + + **Migration Compatibility**: Turso requires SQL statements to be executed individually rather than in batches. DeployStack automatically handles this requirement by intelligently splitting migration files into individual statements during execution. You don't need to modify your migrations or write them differently - we handle the complexity for you. + + ## Setup and Configuration ### Prerequisites @@ -132,6 +136,8 @@ npm run db:generate npm run db:up ``` +**Note**: While migrations use standard SQLite syntax with multiple statements and breakpoint markers, DeployStack automatically processes these for Turso compatibility. Each CREATE TABLE, CREATE INDEX, and other SQL statements are executed individually behind the scenes, ensuring smooth deployment without any manual intervention. + ### Database Operations All standard Drizzle operations work with Turso: @@ -169,8 +175,9 @@ Turso connections are managed automatically by the libSQL client: ### Query Optimization - **Prepared Statements**: Use prepared statements for repeated queries -- **Batch Operations**: Group multiple operations when possible +- **Batch Operations**: Group multiple operations when possible (note: migrations are automatically split for compatibility) - **Indexing**: Add appropriate indexes for frequently queried columns +- **Migration Performance**: Initial migrations execute individually for compatibility, but runtime queries maintain full performance ```typescript // Example: Batch operations @@ -246,12 +253,21 @@ Error: Authentication failed - Check that the token matches the database **Migration Errors** +``` +Error: SQL_MANY_STATEMENTS: SQL string contains more than one statement +``` +- **Status**: This error is automatically handled by DeployStack as of version 1.0+ +- **Cause**: Turso's libSQL client requires individual statement execution +- **Solution**: DeployStack automatically splits and executes statements individually +- If you encounter this error, ensure you're running the latest version of DeployStack + ``` Error: Migration failed to apply ``` - Check migration SQL syntax is valid SQLite - Verify no conflicting schema changes - Review migration order and dependencies +- Ensure your Turso database has sufficient resources ### Debug Logging @@ -292,6 +308,35 @@ turso db show your-database turso db shell your-database --command "EXPLAIN QUERY PLAN SELECT * FROM authUser" ``` +## Technical Implementation Details + +### How DeployStack Handles Turso Migrations + +Unlike SQLite which can execute multiple SQL statements in a single call, Turso's libSQL client requires each statement to be executed individually. DeployStack solves this transparently: + +1. **Automatic Statement Splitting**: Migration files are intelligently parsed to separate: + - Statements divided by `--> statement-breakpoint` markers + - Multiple statements on the same line (like consecutive CREATE INDEX commands) + - Complex migrations with mixed DDL operations + +2. **Sequential Execution**: Each statement is executed in order with proper error handling +3. **Transaction Safety**: Failed statements properly roll back to maintain database consistency +4. **Performance Impact**: Migration execution is slightly slower than SQLite (milliseconds per statement), but this only affects initial setup and schema changes, not runtime performance + +### Migration File Compatibility + +Your existing Drizzle migration files work without modification: + +```sql +-- This standard Drizzle migration works perfectly +CREATE TABLE users (...); +--> statement-breakpoint +CREATE INDEX idx_users_email ON users(email);--> statement-breakpoint +CREATE UNIQUE INDEX idx_users_username ON users(username); +``` + +DeployStack automatically handles the parsing and execution, so you write migrations exactly as you would for SQLite. + ## Advanced Features ### Multi-Region Setup @@ -384,6 +429,20 @@ Since Turso is SQLite-compatible, migration is straightforward: 3. **Import to Turso**: Load data into Turso database 4. **Update configuration**: Switch database type to Turso +## Known Limitations and Solutions + +### Statement Execution + +**Limitation**: Turso cannot execute multiple SQL statements in a single database call. + +**DeployStack Solution**: Automatic statement splitting and sequential execution. This is completely transparent - you never need to think about it. + +### Migration Speed + +**Limitation**: Migrations apply slightly slower than with local SQLite due to individual statement execution and network latency. + +**DeployStack Solution**: Migrations are a one-time operation during setup or updates. Runtime performance is unaffected. For large migrations, DeployStack provides progress logging to track execution. + ## Cost Optimization ### Usage Monitoring @@ -418,4 +477,4 @@ turso org show 4. **Start developing** with global SQLite performance 5. **Monitor and optimize** your database usage -For more information about database management in DeployStack, see the [Database Management Guide](/deploystack/development/backend/database). +For more information about database management in DeployStack, see the [Database Management Guide](/development/backend/database). diff --git a/docs/deploystack/development/backend/database.mdx b/docs/development/backend/database.mdx similarity index 75% rename from docs/deploystack/development/backend/database.mdx rename to docs/development/backend/database.mdx index 07f9109..6546ea7 100644 --- a/docs/deploystack/development/backend/database.mdx +++ b/docs/development/backend/database.mdx @@ -18,11 +18,11 @@ All databases use the same SQLite syntax and schema, ensuring consistency across The backend uses an environment-based configuration system where database credentials are provided via environment variables, and the database type is selected through the setup API. -> **Setup Instructions**: For step-by-step setup instructions, see the [Database Setup Guide](/deploystack/self-hosted/database-setup). +> **Setup Instructions**: For step-by-step setup instructions, see the [Database Setup Guide](/self-hosted/database-setup). > **Database-Specific Guides**: For detailed technical information about specific databases, see: -> - [SQLite Development Guide](/deploystack/development/backend/database-sqlite) -> - [Turso Development Guide](/deploystack/development/backend/database-turso) +> - [SQLite Development Guide](/development/backend/database-sqlite) +> - [Turso Development Guide](/development/backend/database-turso) ### Environment Variables @@ -31,6 +31,7 @@ Configure your chosen database type by setting the appropriate environment varia #### SQLite Configuration ```bash # Optional - defaults to persistent_data/database/deploystack.db +# Path is relative to services/backend/ directory SQLITE_DB_PATH=persistent_data/database/deploystack.db ``` @@ -58,12 +59,15 @@ Check the current status of the database configuration and initialization: ### Initial Database Setup -Perform the initial database setup by selecting your database type: +The initial database setup is performed through the frontend setup wizard at `/setup`, which provides a user-friendly interface. The wizard internally calls the backend API: -- **Endpoint:** `POST /api/db/setup` +- **Frontend URL:** `https:///setup` +- **Backend Endpoint (called by frontend):** `POST /api/db/setup` - **Method:** `POST` - **Request Body:** JSON object specifying the database type +**Note for Developers**: While you can call the API endpoint directly for testing, end-users should always use the frontend setup wizard for proper initialization. + #### Setup Examples **SQLite Setup:** @@ -105,9 +109,9 @@ The setup endpoint returns a JSON response indicating success and restart requir ### Database Selection File The chosen database type is stored in: -- `services/backend/persistent_data/db.selection.json` +- `services/backend/persistent_data/db.selection.json` (relative to the backend service directory) -This file is automatically created and managed by the setup API. Manual editing is not recommended. +This file is automatically created and managed by the setup API when users complete the frontend setup wizard at `https:///setup`. Manual editing is not recommended. Example content: ```json @@ -118,6 +122,8 @@ Example content: } ``` +**Important**: This file is created during the initial setup process through the frontend wizard, which internally calls the `/api/db/setup` endpoint. + ## Architecture ### Key Components @@ -212,6 +218,9 @@ Follow these steps to add or modify database tables: - **Automatic Tracking**: Migrations tracked in `__drizzle_migrations` table - **Incremental Application**: Only new migrations are applied - **Transaction Safety**: Migrations applied in transactions for consistency +- **Execution Timing**: Migrations are applied automatically on server startup, but only after the database has been initialized through the setup process + +**Important**: Migrations cannot run until the database exists. The initial setup (via frontend wizard at `/setup`) must be completed first to create the database, then migrations will apply on subsequent server startups. ### Migration Compatibility @@ -237,54 +246,42 @@ The global settings system: **Turso**: Uses efficient batch operations with libSQL protocol -> **Global Settings Documentation**: For detailed information about global settings, see the [Global Settings Guide](/deploystack/development/backend/global-settings). +> **Global Settings Documentation**: For detailed information about global settings, see the [Global Settings Guide](/development/backend/global-settings). ## Plugin Database Extensions -Plugins can add their own tables through the `databaseExtension` property: - -1. Define tables in the plugin's schema file -2. Include tables in the plugin's `databaseExtension.tableDefinitions` -3. Implement `onDatabaseInit()` for seeding or initialization - -Plugin tables are automatically created and work across all database types. - -### Plugin Global Settings +Plugins can extend the database with their own tables and settings. For detailed information about plugin database integration, including table definitions, dynamic table creation, and security boundaries, see the [Plugin System Guide](/development/backend/plugins#plugin-integration-points). -Plugins can also contribute global settings that are automatically integrated during database setup: - -```typescript -// Example plugin with global settings -class MyPlugin implements Plugin { - globalSettingsExtension: GlobalSettingsExtension = { - groups: [{ id: 'my_plugin', name: 'My Plugin Settings' }], - settings: [ - { - key: 'myPlugin.feature.enabled', - defaultValue: true, - type: 'boolean', - groupId: 'my_plugin' - } - ] - }; -} -``` +Key plugin database features: +- **Dynamic table creation** at runtime (separate from core migrations) +- **Automatic table prefixing** with plugin IDs +- **Security isolation** between core and plugin tables +- **Global settings integration** for plugin configuration ## Development Workflow 1. **Environment Setup**: Configure environment variables for your chosen database -2. **Database Selection**: Use `/api/db/setup` to select and initialize database +2. **Initial Setup**: Complete the frontend setup wizard at `/setup` (for first-time setup) + - This creates `persistent_data/db.selection.json` + - Initializes the database based on your selection + - For development, you can also directly call `POST /api/db/setup` 3. **Schema Changes**: Modify `src/db/schema.sqlite.ts` 4. **Generate Migrations**: Run `npm run db:generate` 5. **Apply Changes**: Restart server or run `npm run db:up` 6. **Update Code**: Use the modified schema in your application +**Backup Strategy**: Always backup the entire `services/backend/persistent_data/` directory as it contains: +- The SQLite database file (if using SQLite) +- The database selection configuration +- Any other persistent application data + ## Database-Specific Considerations ### SQLite -- **File Location**: `persistent_data/database/deploystack.db` +- **File Location**: `services/backend/persistent_data/database/deploystack.db` (full path from project root) - **Performance**: Excellent for development and small to medium deployments -- **Backup**: Simple file-based backup +- **Backup**: Simple file-based backup - backup the entire `persistent_data/` directory +- **Selection File**: Database type stored in `persistent_data/db.selection.json` ### Turso - **Global Replication**: Multi-region database replication @@ -315,9 +312,13 @@ class MyPlugin implements Plugin { ### SQLite ```bash -# Using SQLite CLI +# Using SQLite CLI (from project root) sqlite3 services/backend/persistent_data/database/deploystack.db +# Or from backend directory +cd services/backend +sqlite3 persistent_data/database/deploystack.db + # Using DB Browser for SQLite (GUI) # Download from: https://sqlitebrowser.org/ ``` diff --git a/docs/deploystack/development/backend/environment-variables.mdx b/docs/development/backend/environment-variables.mdx similarity index 80% rename from docs/deploystack/development/backend/environment-variables.mdx rename to docs/development/backend/environment-variables.mdx index a98facc..87d99fe 100644 --- a/docs/deploystack/development/backend/environment-variables.mdx +++ b/docs/development/backend/environment-variables.mdx @@ -32,7 +32,7 @@ During development, the backend loads environment variables from `.env` files us PORT=3000 NODE_ENV=development DEPLOYSTACK_FRONTEND_URL=http://localhost:5173 -COOKIE_SECRET=your-secure-cookie-secret-here +DEPLOYSTACK_ENCRYPTION_SECRET=your-secure-cookie-secret-here ``` ### Production Environment Variables @@ -44,7 +44,7 @@ In production Docker containers, variables are injected at runtime via Docker's docker run -e PORT=3000 \ -e NODE_ENV=production \ -e DEPLOYSTACK_FRONTEND_URL="https://app.deploystack.com" \ - -e COOKIE_SECRET="your-production-cookie-secret" \ + -e DEPLOYSTACK_ENCRYPTION_SECRET="your-production-secret" \ deploystack/backend:latest ``` @@ -65,17 +65,11 @@ HOST=localhost DEPLOYSTACK_FRONTEND_URL=http://localhost:5173 # Security -COOKIE_SECRET=a-very-secret-and-strong-secret-for-cookies DEPLOYSTACK_ENCRYPTION_SECRET=your-encryption-secret-here # Logging LOG_LEVEL=debug -# OAuth Configuration (optional for development) -GITHUB_CLIENT_ID=your-github-client-id -GITHUB_CLIENT_SECRET=your-github-client-secret -GITHUB_REDIRECT_URI=http://localhost:3000/api/auth/github/callback - ``` #### `.env.local` (Local Overrides) @@ -152,8 +146,7 @@ docker run -d -p 3000:3000 \ -e NODE_ENV=production \ -e PORT=3000 \ -e DEPLOYSTACK_FRONTEND_URL="https://app.deploystack.com" \ - -e COOKIE_SECRET="your-production-secret" \ - -e DEPLOYSTACK_ENCRYPTION_SECRET="your-encryption-secret" \ + -e DEPLOYSTACK_ENCRYPTION_SECRET="your-production-secret" \ deploystack/backend:latest ``` @@ -170,11 +163,8 @@ services: - NODE_ENV=production - PORT=3000 - DEPLOYSTACK_FRONTEND_URL=https://app.deploystack.com - - COOKIE_SECRET=your-production-cookie-secret - DEPLOYSTACK_ENCRYPTION_SECRET=your-encryption-secret - - GITHUB_CLIENT_ID=your-github-client-id - - GITHUB_CLIENT_SECRET=your-github-client-secret - - GITHUB_REDIRECT_URI=https://api.deploystack.com/api/auth/github/callback + volumes: - ./data:/app/data # For SQLite database persistence ``` @@ -215,7 +205,7 @@ function getRequiredEnv(key: string): string { return value } -const cookieSecret = getRequiredEnv('COOKIE_SECRET') +const encryptionSecret = getRequiredEnv('DEPLOYSTACK_ENCRYPTION_SECRET') ``` ### Server Configuration Example @@ -233,7 +223,7 @@ export const createServer = async () => { // Cookie configuration await server.register(fastifyCookie, { - secret: process.env.COOKIE_SECRET || 'fallback-secret', + secret: process.env.DEPLOYSTACK_ENCRYPTION_SECRET || 'fallback-secret', parseOptions: {} }) @@ -375,8 +365,7 @@ PORT=3000 NODE_ENV=development DEPLOYSTACK_FRONTEND_URL=http://localhost:5173 DEPLOYSTACK_ENCRYPTION_SECRET=secret -GITHUB_CLIENT_ID=client-id -GITHUB_CLIENT_SECRET=client-secret + ❌ Bad port=3000 # Wrong case @@ -397,8 +386,6 @@ DEPLOYSTACK_FRONTEND_URL=http://localhost:5173 DEPLOYSTACK_ENCRYPTION_SECRET=secret # Third-party Services (use service name prefix) -GITHUB_CLIENT_ID=client-id -GITHUB_CLIENT_SECRET=client-secret SMTP_HOST=smtp.example.com SMTP_PORT=587 @@ -444,29 +431,9 @@ docker exec -it container-name printenv PORT docker exec -it container-name printenv NODE_ENV ``` -### Startup Banner - -The backend displays a startup banner with key environment information: - -```typescript -// src/utils/banner.ts -export const displayStartupBanner = (port: number): void => { - const environment = process.env.NODE_ENV || 'development' - - logger.info(` - ╔══════════════════════════════════════════════════════════════════════════════╗ - ║ 🚀 DeployStack Backend ║ - ║ ║ - ║ Running on port ${port} ║ - ║ Environment: ${environment} ║ - ╚══════════════════════════════════════════════════════════════════════════════╝ - `) -} -``` - ## Related Documentation -- [Backend Development Guide](/deploystack/development/backend) - Main backend development guide -- [Database Configuration](/deploystack/development/backend/database) - Database setup and configuration -- [API Documentation](/deploystack/development/backend/api) - API development guide -- [Deploy DeployStack](/deploystack/self-hosted/docker-compose) - Deploy DeployStack with Docker Compose +- [Backend Development Guide](/development/backend) - Main backend development guide +- [Database Configuration](/development/backend/database) - Database setup and configuration +- [API Documentation](/development/backend/api) - API development guide +- [Deploy DeployStack](/self-hosted/docker-compose) - Deploy DeployStack with Docker Compose diff --git a/docs/deploystack/development/backend/global-settings.mdx b/docs/development/backend/global-settings.mdx similarity index 99% rename from docs/deploystack/development/backend/global-settings.mdx rename to docs/development/backend/global-settings.mdx index 7486582..c860280 100644 --- a/docs/deploystack/development/backend/global-settings.mdx +++ b/docs/development/backend/global-settings.mdx @@ -930,7 +930,7 @@ Key points for plugin-contributed settings: - **Declaration**: Plugins declare global settings via a `globalSettingsExtension` property in their main class. - **Initialization**: The `PluginManager` processes these definitions at startup, creating new groups and settings if they don't already exist. - **Precedence**: Core global settings always take precedence. If a plugin tries to define a setting with a key that already exists (either from core or another plugin), the plugin's definition for that specific key is ignored. -- **Documentation**: For details on how plugins can define global settings, refer to the [PLUGINS.MD](PLUGINS.MD) document. +- **Documentation**: For details on how plugins can define global settings, refer to the [Backend Plugins Docs](/development/backend/plugins) document. ## Helper Methods API Reference @@ -1018,5 +1018,5 @@ if (await GlobalSettings.isSet('api.key')) { --- -For more information about the role-based access control system, see [ROLES](/deploystack/development/backend/roles). -For security details, see [SECURITY](/deploystack/development/backend/security). +For more information about the role-based access control system, see [ROLES](/development/backend/roles). +For security details, see [SECURITY](/development/backend/security). diff --git a/docs/deploystack/development/backend/index.mdx b/docs/development/backend/index.mdx similarity index 88% rename from docs/deploystack/development/backend/index.mdx rename to docs/development/backend/index.mdx index 7bdf37d..17d46d6 100644 --- a/docs/deploystack/development/backend/index.mdx +++ b/docs/development/backend/index.mdx @@ -110,8 +110,12 @@ services/backend/ │ ├── auth/ # Authentication utilities │ └── server.ts # Main server configuration ├── plugins/ # Extensible plugin directory -├── persistent_data/ # Data persistence +├── persistent_data/ # All persistent application data (backup this entire directory) +│ ├── database/ # SQLite database files +│ │ └── deploystack.db # Main database (if using SQLite) +│ └── db.selection.json # Database type configuration └── drizzle/ # Database migrations +│ └── migrations_sqlite/ # SQL migration files ``` ## Key Features @@ -136,9 +140,13 @@ services/backend/ ## Development Workflow 1. **Setup**: Install dependencies and start development server -2. **Database**: Use API endpoints to initialize database +2. **Database Initialization**: + - For first-time setup: Visit `/setup` in the frontend + - For development: Call `POST /api/db/setup` directly + - Creates `persistent_data/db.selection.json` and initializes database 3. **Development**: Add routes, modify schemas, create plugins 4. **Testing**: Run comprehensive test suite 5. **Documentation**: Generate API specs for integration +6. **Backup**: Always backup entire `persistent_data/` directory for data persistence For detailed implementation guides, security considerations, and best practices, explore the specific documentation sections above. diff --git a/docs/deploystack/development/backend/logging.mdx b/docs/development/backend/logging.mdx similarity index 100% rename from docs/deploystack/development/backend/logging.mdx rename to docs/development/backend/logging.mdx diff --git a/docs/deploystack/development/backend/mail.mdx b/docs/development/backend/mail.mdx similarity index 99% rename from docs/deploystack/development/backend/mail.mdx rename to docs/development/backend/mail.mdx index 0900891..e5501c7 100644 --- a/docs/deploystack/development/backend/mail.mdx +++ b/docs/development/backend/mail.mdx @@ -692,4 +692,4 @@ const emailResult = await EmailService.sendEmail({...}, request.log); --- -For more information about global settings configuration, see [GLOBAL_SETTINGS](/deploystack/development/backend/global-settings). +For more information about global settings configuration, see [GLOBAL_SETTINGS](/development/backend/global-settings). diff --git a/docs/development/backend/meta.json b/docs/development/backend/meta.json new file mode 100644 index 0000000..97704e8 --- /dev/null +++ b/docs/development/backend/meta.json @@ -0,0 +1,9 @@ +{ + "title": "Backend Development", + "description": "Documentation for DeployStack Backend Development", + "icon": "Server", + "root": true, + "pages": [ + "..." + ] +} diff --git a/docs/deploystack/development/backend/oauth.mdx b/docs/development/backend/oauth.mdx similarity index 70% rename from docs/deploystack/development/backend/oauth.mdx rename to docs/development/backend/oauth.mdx index ecc5ea6..8304b0b 100644 --- a/docs/deploystack/development/backend/oauth.mdx +++ b/docs/development/backend/oauth.mdx @@ -1,21 +1,224 @@ --- title: OAuth Implementation Guide -description: Developer guide for implementing OAuth providers in DeployStack +description: Developer guide for implementing OAuth providers and OAuth2 server in DeployStack --- # OAuth Implementation Guide -This guide explains how to implement OAuth providers in DeployStack's backend. The system is designed to support multiple OAuth providers with a consistent pattern. +This guide explains how to implement OAuth providers and the OAuth2 server in DeployStack's backend. The system supports both OAuth provider integration (GitHub, Google, etc.) and OAuth2 server functionality for API access. ## Architecture Overview -DeployStack uses the following libraries for OAuth implementation: +DeployStack implements two distinct OAuth systems: +### 1. OAuth Provider Integration (Social Login) +For user authentication via third-party providers: - **[Arctic](https://arctic.js.org/)** - OAuth 2.0 client library for various providers - **[Lucia](https://lucia-auth.com/)** - Authentication library for session management - **Global Settings** - Database-driven configuration for OAuth providers -## Current Implementation: GitHub OAuth +### 2. OAuth2 Server (API Access) +For programmatic API access by CLI tools and applications: +- **RFC 6749 compliant** OAuth2 authorization server +- **PKCE support** (RFC 7636) for enhanced security +- **Scope-based access control** for fine-grained permissions +- **Dual authentication** supporting both cookies and Bearer tokens + +## OAuth2 Server Implementation + +The OAuth2 server enables CLI tools and applications to access DeployStack APIs securely using Bearer tokens. + +For OAuth2 security details including PKCE, token security, authorization flow security, and Bearer token authentication, see the [Security Policy](/development/backend/security#oauth2-server-security). + +### Database Schema + +Three tables support OAuth2 functionality: + +```sql +-- Authorization codes (short-lived, exchanged for tokens) +CREATE TABLE oauth_authorization_codes ( + id TEXT PRIMARY KEY, + user_id TEXT NOT NULL, + client_id TEXT NOT NULL, + redirect_uri TEXT NOT NULL, + scope TEXT NOT NULL, + state TEXT NOT NULL, + code_challenge TEXT NOT NULL, + code_challenge_method TEXT NOT NULL, + expires_at INTEGER NOT NULL, + created_at INTEGER NOT NULL, + FOREIGN KEY (user_id) REFERENCES authUser(id) +); + +-- Access tokens (1-hour lifetime) +CREATE TABLE oauth_access_tokens ( + id TEXT PRIMARY KEY, + user_id TEXT NOT NULL, + client_id TEXT NOT NULL, + scope TEXT NOT NULL, + token_hash TEXT NOT NULL, + expires_at INTEGER NOT NULL, + created_at INTEGER NOT NULL, + FOREIGN KEY (user_id) REFERENCES authUser(id) +); + +-- Refresh tokens (30-day lifetime) +CREATE TABLE oauth_refresh_tokens ( + id TEXT PRIMARY KEY, + user_id TEXT NOT NULL, + client_id TEXT NOT NULL, + token_hash TEXT NOT NULL, + expires_at INTEGER NOT NULL, + created_at INTEGER NOT NULL, + FOREIGN KEY (user_id) REFERENCES authUser(id) +); +``` + +### OAuth2 Services + +#### AuthorizationService +Handles OAuth2 authorization flow with PKCE validation: + +```typescript +// services/backend/src/services/oauth/authorizationService.ts +export class AuthorizationService { + static validateClient(clientId: string): boolean + static validateRedirectUri(redirectUri: string): boolean + static validateScope(scope: string): boolean + static async storeAuthorizationRequest(...) + static async getAuthorizationRequest(requestId: string) + static async generateAuthorizationCode(requestId: string) + static async verifyAuthorizationCode(code: string, codeVerifier: string, ...) +} +``` + +#### TokenService +Manages access and refresh tokens: + +```typescript +// services/backend/src/services/oauth/tokenService.ts +export class TokenService { + static async generateAccessToken(userId: string, scope: string, clientId: string) + static async generateRefreshToken(userId: string, clientId: string) + static async verifyAccessToken(token: string) + static async refreshAccessToken(refreshToken: string, clientId: string) + static async revokeToken(tokenId: string) +} +``` + +#### OAuthCleanupService +Automatic cleanup of expired tokens (runs every hour): + +```typescript +// services/backend/src/services/oauth/cleanupService.ts +export class OAuthCleanupService { + static startCleanupScheduler(): void + static async cleanupExpiredTokens(): Promise +} +``` + +### OAuth2 Endpoints + +#### Authorization Endpoint +```typescript +GET /api/oauth2/auth +``` +Initiates OAuth2 flow with PKCE. Redirects to consent page for user authorization. + +**Parameters:** +- `response_type=code` (required) +- `client_id` (required) +- `redirect_uri` (required) +- `scope` (required) +- `state` (required) +- `code_challenge` (required) +- `code_challenge_method=S256` (required) + +#### Consent Endpoints +```typescript +GET /api/oauth2/consent?request_id= +POST /api/oauth2/consent +``` +Professional HTML consent page with security warnings and scope descriptions. + +#### Token Endpoint +```typescript +POST /api/oauth2/token +``` +Exchanges authorization code for access token or refreshes tokens. + +**Grant Types:** +- `authorization_code` - Exchange code for tokens +- `refresh_token` - Refresh access token + +### OAuth2 Scopes + +Available scopes for fine-grained access control: + +- `mcp:read` - Read MCP server installations and configurations +- `account:read` - Read account information +- `user:read` - Read user profile information +- `teams:read` - Read team memberships and information +- `offline_access` - Maintain access when not actively using the application + +### Dual Authentication Middleware + +Enable both cookie and OAuth2 authentication on endpoints: + +```typescript +import { requireAuthenticationAny, requireOAuthScope } from '../../middleware/oauthMiddleware'; + +fastify.get('/your-endpoint', { + schema: { + security: [ + { cookieAuth: [] }, // Cookie authentication + { bearerAuth: [] } // OAuth2 Bearer token + ] + }, + preValidation: [ + requireAuthenticationAny(), // Accept either auth method + requireOAuthScope('your:scope') // Enforce OAuth2 scope + ] +}, async (request, reply) => { + // Endpoint accessible via both authentication methods + const authType = request.tokenPayload ? 'oauth2' : 'cookie'; + const userId = request.user!.id; +}); +``` + +### Client Configuration + +**DeployStack Gateway CLI:** +- **Client ID**: `deploystack-gateway-cli` +- **Redirect URIs**: `http://localhost:8976/oauth/callback`, `http://127.0.0.1:8976/oauth/callback` +- **PKCE**: Required (SHA256 method) +- **Token Lifetime**: 1 hour access tokens, 30 day refresh tokens + +### OAuth2 Flow Example + +```bash +# 1. Authorization Request (redirects to consent page) +curl -b cookies.txt "http://localhost:3000/api/oauth2/auth?response_type=code&client_id=deploystack-gateway-cli&redirect_uri=http://localhost:8976/oauth/callback&scope=mcp:read%20teams:read&state=xyz&code_challenge=abc&code_challenge_method=S256" + +# 2. User approves on consent page, receives authorization code + +# 3. Token Exchange +curl -X POST "http://localhost:3000/api/oauth2/token" \ + -H "Content-Type: application/json" \ + -d '{ + "grant_type": "authorization_code", + "code": "auth_code_here", + "redirect_uri": "http://localhost:8976/oauth/callback", + "client_id": "deploystack-gateway-cli", + "code_verifier": "verifier_here" + }' + +# 4. API Access with Bearer Token +curl -H "Authorization: Bearer " \ + "http://localhost:3000/api/teams/me/default" +``` + +## OAuth Provider Integration (Social Login) The GitHub OAuth implementation serves as a reference for adding other providers. diff --git a/docs/deploystack/development/backend/plugins.mdx b/docs/development/backend/plugins.mdx similarity index 81% rename from docs/deploystack/development/backend/plugins.mdx rename to docs/development/backend/plugins.mdx index 47dee33..1c80587 100644 --- a/docs/deploystack/development/backend/plugins.mdx +++ b/docs/development/backend/plugins.mdx @@ -365,40 +365,110 @@ export default MyCustomPlugin; The `databaseExtension` property allows your plugin to: -1. Define tables using Drizzle ORM -2. Initialize data (seeding, migrations, etc.) -3. Integrate with the core database schema +1. **Define tables dynamically**: Tables are created at runtime from your `tableDefinitions` +2. **Initialize data**: Seed data or perform setup through `onDatabaseInit` +3. **Maintain security boundaries**: Plugin tables are isolated from core migrations + +#### How Plugin Database Tables Work + +**Security Architecture:** +- **Phase 1 (Trusted)**: Core migrations run first (static, secure) +- **Phase 2 (Untrusted)**: Plugin tables created dynamically (sandboxed) +- **Clear Separation**: Plugin tables cannot interfere with core database structure + +**Dynamic Table Creation:** +- Plugin tables are **NOT** included in core migration files +- Tables are created at runtime from your `tableDefinitions` +- System automatically generates CREATE TABLE SQL from your definitions +- Tables are dropped and recreated during development for clean structure + +**Table Definition Format:** +```typescript +const myPluginTableDefinitions = { + 'my_entities': { + id: (b: any) => b('id').primaryKey(), + name: (b: any) => b('name').notNull(), + data: (b: any) => b('data'), + created_at: (b: any) => b('created_at', { mode: 'timestamp' }).notNull().defaultNow(), + } +}; +``` + +**Important Notes:** +- Use `created_at` (snake_case) for database column names, not `createdAt` (camelCase) +- Timestamp columns with `{ mode: 'timestamp' }` automatically get `DEFAULT (strftime('%s', 'now'))` +- Column types are auto-detected: `id`/`count` → INTEGER, `*_at`/`*date` → INTEGER (timestamp), others → TEXT +- Tables are prefixed with your plugin ID: `my-plugin_my_entities` ### API Routes -Register API routes during the plugin's `initialize` method: +Register API routes using the isolated `PluginRouteManager`: ```typescript -app.get('/api/my-feature', async (request, reply) => { - // Handle request - return { feature: 'data' }; -}); +// ✅ Correct way - routes are automatically namespaced +async registerRoutes(routeManager: PluginRouteManager, db: AnyDatabase | null) { + // This becomes: GET /api/plugin/my-plugin/data + routeManager.get('/data', async (request, reply) => { + return { feature: 'data' }; + }); +} + +// ❌ Wrong way - no direct app access +async initialize(app: FastifyInstance, db: AnyDatabase | null) { + app.get('/api/my-feature', handler); // This won't work +} ``` ### Access to Core Services Plugins receive access to: -- **Fastify instance** (`app`) - For registering routes, hooks, and decorations -- **Database instance** (`db`) - For database operations -- **Configuration** - Through the plugin manager (if provided) -- **Global Settings** - Plugins can define their own global settings +- **Database instance** (`db`) - For database operations with your plugin tables +- **Route Manager** (`routeManager`) - For registering isolated, namespaced routes +- **Logger** (`logger`) - For structured logging with plugin context +- **Schema Access** - Access to the generated database schema including your tables +- **Global Settings** - Plugins can define and access their own global settings ## Plugin Lifecycle Plugins follow this lifecycle: -1. **Loading** - Plugin is discovered and loaded -2. **Database Registration** - Schema tables are registered -3. **Database Initialization** - `onDatabaseInit` is called if provided -4. **Initialization** - `initialize` method is called -5. **Runtime** - Plugin operates as part of the application -6. **Shutdown** - `shutdown` method is called during application termination +1. **Discovery** - Plugin is discovered and loaded from the plugins directory +2. **Registration** - Plugin table definitions are registered with the schema system +3. **Core Database Setup** - Core migrations are applied (trusted, static) +4. **Plugin Table Creation** - Plugin tables are created dynamically from definitions +5. **Database Initialization** - `onDatabaseInit` is called for data seeding/setup +6. **Plugin Initialization** - `initialize` method is called for non-route setup +7. **Route Registration** - `registerRoutes` is called to register API endpoints +8. **Runtime** - Plugin operates as part of the application +9. **Shutdown** - `shutdown` method is called during application termination + +### Database Lifecycle Details + +The database initialization follows a strict security-first approach: + +``` +┌─────────────────────────────────────────┐ +│ Phase 1: Core System (Trusted) │ +├─────────────────────────────────────────┤ +│ 1. Apply core migrations │ +│ 2. Create core tables │ +│ 3. Initialize core data │ +└─────────────────────────────────────────┘ + │ + ▼ Security Boundary +┌─────────────────────────────────────────┐ +│ Phase 2: Plugin System (Sandboxed) │ +├─────────────────────────────────────────┤ +│ 1. Generate CREATE TABLE SQL │ +│ 2. Drop existing plugin tables │ +│ 3. Create plugin tables dynamically │ +│ 4. Call plugin onDatabaseInit │ +│ 5. Seed plugin data │ +└─────────────────────────────────────────┘ +``` + +This ensures that untrusted plugin code cannot interfere with the core database structure while still providing full database functionality. ## Testing Your Plugin diff --git a/docs/deploystack/development/backend/roles.mdx b/docs/development/backend/roles.mdx similarity index 96% rename from docs/deploystack/development/backend/roles.mdx rename to docs/development/backend/roles.mdx index ebdedfe..a814f2f 100644 --- a/docs/deploystack/development/backend/roles.mdx +++ b/docs/development/backend/roles.mdx @@ -106,7 +106,7 @@ fastify.post('/teams/:teamId/resources', { }, handler); ``` -See [API Security Best Practices](./api-security.mdx) for detailed information about team-aware permissions and security patterns. +See [API Security Best Practices](/development/backend/api-security) for detailed information about team-aware permissions and security patterns. ### Programmatic Checks diff --git a/docs/deploystack/development/backend/security.mdx b/docs/development/backend/security.mdx similarity index 80% rename from docs/deploystack/development/backend/security.mdx rename to docs/development/backend/security.mdx index c4fca87..4bc9d26 100644 --- a/docs/deploystack/development/backend/security.mdx +++ b/docs/development/backend/security.mdx @@ -75,6 +75,38 @@ Sensitive global application settings (SMTP credentials, API keys, etc.) are enc This approach ensures that sensitive configuration data remains secure even if the database is compromised. The encryption system is separate from password hashing to maintain proper separation of concerns. +## OAuth2 Server Security + +The OAuth2 server implementation follows RFC 6749 and RFC 7636 security standards for API access authentication. + +### PKCE (Proof Key for Code Exchange) +- **Algorithm:** SHA256 code challenge method (RFC 7636) +- **Code Verifier:** Cryptographically random 43-128 character string +- **Code Challenge:** Base64URL-encoded SHA256 hash of code verifier +- **Protection:** Prevents authorization code interception attacks + +### Token Security +- **Access Tokens:** JWT format with 1-hour expiration +- **Refresh Tokens:** Cryptographically random with 30-day expiration +- **Token Storage:** Argon2 hashed in database (same parameters as passwords) +- **Token Cleanup:** Automatic hourly cleanup of expired tokens +- **Scope Validation:** Fine-grained permissions (`mcp:read`, `account:read`, `user:read`, `teams:read`, `offline_access`) + +### Authorization Flow Security +- **State Parameter:** CSRF protection with cryptographically random state +- **Client Validation:** Hardcoded client credentials for `deploystack-gateway-cli` +- **Redirect URI Validation:** Whitelist of allowed callback URLs +- **Consent Page:** User authorization with security warnings +- **Session Binding:** OAuth2 flow requires active user session + +### Bearer Token Authentication +- **Header Format:** `Authorization: Bearer ` +- **Token Verification:** Constant-time comparison to prevent timing attacks +- **Scope Enforcement:** Middleware validates required scopes per endpoint +- **Dual Authentication:** Seamless support for both cookies and Bearer tokens + +For implementation details, see the [Backend OAuth Implementation Guide](/development/backend/oauth). + ## Dependencies We strive to keep our dependencies up-to-date and regularly review them for known vulnerabilities. Automated tools may be used to scan for vulnerabilities in our dependency tree. diff --git a/docs/deploystack/development/backend/test.mdx b/docs/development/backend/test.mdx similarity index 98% rename from docs/deploystack/development/backend/test.mdx rename to docs/development/backend/test.mdx index 014bd80..96ec6e6 100644 --- a/docs/deploystack/development/backend/test.mdx +++ b/docs/development/backend/test.mdx @@ -133,7 +133,7 @@ console.log('User created:', user); server.log.info('User created', { userId: user.id }); ``` -See the [Backend Logging Guide](./logging.mdx) for complete logging best practices. +See the [Backend Logging Guide](/development/backend/logging) for complete logging best practices. ## Writing New Tests diff --git a/docs/deploystack/development/frontend/custom-ui-components.mdx b/docs/development/frontend/custom-ui-components.mdx similarity index 99% rename from docs/deploystack/development/frontend/custom-ui-components.mdx rename to docs/development/frontend/custom-ui-components.mdx index 39ec1bd..48dc067 100644 --- a/docs/deploystack/development/frontend/custom-ui-components.mdx +++ b/docs/development/frontend/custom-ui-components.mdx @@ -571,8 +571,6 @@ import { ButtonGroup, ButtonGroupItem } from '@/components/ui/button-group' | `variant` | `'default' \| 'outline' \| 'ghost'` | `'default'` | Visual style | | `orientation` | `'horizontal' \| 'vertical'` | `'horizontal'` | Layout direction | -``` - ## Maintenance and Updates ### Migration Strategy @@ -592,4 +590,4 @@ When shadcn/vue adds a component you've built custom: --- -For questions about custom components, refer to the [UI Design System](/deploystack/development/frontend/ui-design-system) documentation or reach out to the frontend team. +For questions about custom components, refer to the [UI Design System](/development/frontend/ui-design-system) documentation or reach out to the frontend team. diff --git a/docs/deploystack/development/frontend/environment-variables.mdx b/docs/development/frontend/environment-variables.mdx similarity index 97% rename from docs/deploystack/development/frontend/environment-variables.mdx rename to docs/development/frontend/environment-variables.mdx index 2ffa941..a060226 100644 --- a/docs/deploystack/development/frontend/environment-variables.mdx +++ b/docs/development/frontend/environment-variables.mdx @@ -382,5 +382,5 @@ validateEnvironment() ## Related Documentation -- [Frontend Development Guide](/deploystack/development/frontend) - Main frontend development guide -- [Deploy DeployStack](/deploystack/self-hosted/docker-compose) - Deploy DeployStack with Docker Compose +- [Frontend Development Guide](/development/frontend) - Main frontend development guide +- [Deploy DeployStack](/self-hosted/docker-compose) - Deploy DeployStack with Docker Compose diff --git a/docs/deploystack/development/frontend/event-bus.mdx b/docs/development/frontend/event-bus.mdx similarity index 99% rename from docs/deploystack/development/frontend/event-bus.mdx rename to docs/development/frontend/event-bus.mdx index 5126e5d..2ac1dbe 100644 --- a/docs/deploystack/development/frontend/event-bus.mdx +++ b/docs/development/frontend/event-bus.mdx @@ -617,6 +617,6 @@ if (import.meta.env.DEV) { ## Related Documentation -- **[Frontend Storage System](/deploystack/development/frontend/storage)** - Persistent state management with automatic event emission +- **[Frontend Storage System](/development/frontend/storage)** - Persistent state management with automatic event emission The global event bus system provides a powerful and type-safe way to handle cross-component communication in the DeployStack frontend, enabling immediate updates and better user experience. diff --git a/docs/deploystack/development/frontend/global-settings.mdx b/docs/development/frontend/global-settings.mdx similarity index 97% rename from docs/deploystack/development/frontend/global-settings.mdx rename to docs/development/frontend/global-settings.mdx index ec39059..92ad71e 100644 --- a/docs/deploystack/development/frontend/global-settings.mdx +++ b/docs/development/frontend/global-settings.mdx @@ -507,13 +507,13 @@ function previousStep() { ## Integration with Existing Systems ### Design System -Follow the established [UI Design System](/deploystack/development/frontend/ui-design-system) patterns. Use shadcn-vue components and maintain consistency with the overall design. +Follow the established [UI Design System](/development/frontend/ui-design-system) patterns. Use shadcn-vue components and maintain consistency with the overall design. ### Internationalization -Add i18n support following the [Internationalization Guide](/deploystack/development/frontend/internationalization). Create dedicated translation files for your settings components. +Add i18n support following the [Internationalization Guide](/development/frontend/internationalization). Create dedicated translation files for your settings components. ### Event Bus -Use the [Global Event Bus](/deploystack/development/frontend/event-bus) for cross-component communication when settings are updated. +Use the [Global Event Bus](/development/frontend/event-bus) for cross-component communication when settings are updated. ## Testing Custom Components diff --git a/docs/deploystack/development/frontend/index.mdx b/docs/development/frontend/index.mdx similarity index 93% rename from docs/deploystack/development/frontend/index.mdx rename to docs/development/frontend/index.mdx index 78406da..2a33fda 100644 --- a/docs/deploystack/development/frontend/index.mdx +++ b/docs/development/frontend/index.mdx @@ -240,11 +240,11 @@ const props = defineProps() ## UI Components and Styling -The frontend uses **TailwindCSS** for styling with **shadcn-vue** component library for consistent UI elements. For comprehensive styling guidelines, component patterns, and design standards, see the [UI Design System](/deploystack/development/frontend/ui-design-system) documentation. +The frontend uses **TailwindCSS** for styling with **shadcn-vue** component library for consistent UI elements. For comprehensive styling guidelines, component patterns, and design standards, see the [UI Design System](/development/frontend/ui-design-system) documentation. ## Environment Configuration -The frontend uses a sophisticated environment variable system that works seamlessly across development and production environments. For complete details on configuring and using environment variables, see the dedicated [Environment Variables Guide](/deploystack/development/frontend/environment-variables). +The frontend uses a sophisticated environment variable system that works seamlessly across development and production environments. For complete details on configuring and using environment variables, see the dedicated [Environment Variables Guide](/development/frontend/environment-variables). ### Quick Start @@ -373,11 +373,11 @@ onMounted(() => { Continue reading the detailed guides: -- [UI Design System](/deploystack/development/frontend/ui-design-system) - Component patterns, styling guidelines, and design standards -- [Environment Variables](/deploystack/development/frontend/environment-variables) - Complete environment configuration guide -- [Global Event Bus](/deploystack/development/frontend/event-bus) - Cross-component communication system -- [Internationalization (i18n)](/deploystack/development/frontend/internationalization) - Multi-language support -- [Plugin System](/deploystack/development/frontend/plugins) - Extending functionality +- [UI Design System](/development/frontend/ui-design-system) - Component patterns, styling guidelines, and design standards +- [Environment Variables](/development/frontend/environment-variables) - Complete environment configuration guide +- [Global Event Bus](/development/frontend/event-bus) - Cross-component communication system +- [Internationalization (i18n)](/development/frontend/internationalization) - Multi-language support +- [Plugin System](/development/frontend/plugins) - Extending functionality ## Docker Development diff --git a/docs/deploystack/development/frontend/internationalization.mdx b/docs/development/frontend/internationalization.mdx similarity index 100% rename from docs/deploystack/development/frontend/internationalization.mdx rename to docs/development/frontend/internationalization.mdx diff --git a/docs/development/frontend/meta.json b/docs/development/frontend/meta.json new file mode 100644 index 0000000..644b6a9 --- /dev/null +++ b/docs/development/frontend/meta.json @@ -0,0 +1,9 @@ +{ + "title": "Frontend Development", + "description": "Documentation for DeployStack Frontend Development", + "icon": "Monitor", + "root": true, + "pages": [ + "..." + ] +} diff --git a/docs/deploystack/development/frontend/plugins.mdx b/docs/development/frontend/plugins.mdx similarity index 100% rename from docs/deploystack/development/frontend/plugins.mdx rename to docs/development/frontend/plugins.mdx diff --git a/docs/deploystack/development/frontend/router-optimization.mdx b/docs/development/frontend/router-optimization.mdx similarity index 100% rename from docs/deploystack/development/frontend/router-optimization.mdx rename to docs/development/frontend/router-optimization.mdx diff --git a/docs/deploystack/development/frontend/storage.mdx b/docs/development/frontend/storage.mdx similarity index 95% rename from docs/deploystack/development/frontend/storage.mdx rename to docs/development/frontend/storage.mdx index 7aaa775..bf47da9 100644 --- a/docs/deploystack/development/frontend/storage.mdx +++ b/docs/development/frontend/storage.mdx @@ -6,9 +6,9 @@ sidebar: Storage # Frontend Storage System -The storage system is built into the [global event bus](/deploystack/development/frontend/event-bus) and provides persistent data management across route changes and browser sessions. This system uses localStorage with a type-safe API and automatically emits events when data changes. +The storage system is built into the [global event bus](/development/frontend/event-bus) and provides persistent data management across route changes and browser sessions. This system uses localStorage with a type-safe API and automatically emits events when data changes. -> **📖 For event bus fundamentals, see [Global Event Bus](/deploystack/development/frontend/event-bus)** +> **📖 For event bus fundamentals, see [Global Event Bus](/development/frontend/event-bus)** ## Overview @@ -463,6 +463,6 @@ const selectTeam = (team: Team) => { ## Related Documentation -- **[Global Event Bus](/deploystack/development/frontend/event-bus)** - Core event system that powers storage +- **[Global Event Bus](/development/frontend/event-bus)** - Core event system that powers storage The enhanced event bus storage system provides a powerful, type-safe way to manage persistent state in the DeployStack frontend, making it easy to maintain user preferences and application state across sessions. diff --git a/docs/development/frontend/ui-design-global-sonner.mdx b/docs/development/frontend/ui-design-global-sonner.mdx new file mode 100644 index 0000000..ddd785f --- /dev/null +++ b/docs/development/frontend/ui-design-global-sonner.mdx @@ -0,0 +1,461 @@ +--- +title: Global Sonner Toast System +description: Developer guide for using the global Sonner toast notification system in the DeployStack frontend. +sidebar: Sonner Toasts +--- + +# Global Sonner Toast System + +DeployStack uses [Sonner](https://sonner.emilkowal.ski/) for toast notifications, providing elegant and accessible notifications across the entire application. The system is globally configured and requires no additional setup in individual components. + +## Architecture Overview + +The global Sonner toaster is configured in `App.vue` as the root-level toast provider: + +```vue + + + + +``` + +This setup ensures toasts are displayed consistently across all routes and components without requiring local toast providers. + +## Basic Usage + +### Importing and Using Toasts + +```vue + +``` + +### Toast with Description + +```typescript +toast.success('Team updated successfully!', { + description: 'Your team settings have been saved.' +}) + +toast.error('Failed to save team', { + description: 'Network connection error. Please try again.' +}) +``` + +## Real-World Examples + +### API Success/Error Handling + +```vue + +``` + +### Form Validation Feedback + +```vue + +``` + +### Async Operations with Loading States + +```vue + +``` + +## Advanced Usage + +### Custom Duration + +```typescript +// Auto-dismiss after 10 seconds +toast.success('Long message', { + duration: 10000 +}) + +// Persistent toast (requires manual dismiss) +toast.error('Critical error', { + duration: Infinity +}) +``` + +### Action Buttons + +```typescript +toast('New version available', { + description: 'Click to update your application', + action: { + label: 'Update', + onClick: () => updateApplication() + } +}) +``` + +### Custom Styling + +```typescript +toast.success('Custom styled toast', { + className: 'custom-toast', + style: { + background: 'linear-gradient(45deg, #ff6b6b, #4ecdc4)' + } +}) +``` + +## Integration with Internationalization + +Combine Sonner with the [i18n system](/development/frontend/internationalization) for localized messages: + +```vue + +``` + +## Event Bus Integration + +Sonner works seamlessly with the [global event bus](/development/frontend/event-bus): + +```vue + +``` + +## Best Practices + +### 1. Use Appropriate Toast Types + +```typescript +// Good: Use semantic types +toast.success('Data saved') // For successful operations +toast.error('Save failed') // For errors and failures +toast.warning('Unsaved changes') // For warnings and cautions +toast('Information') // For neutral information + +// Avoid: Using wrong types +toast.success('Error occurred') // Wrong type for error +toast.error('Success message') // Confusing for users +``` + +### 2. Provide Meaningful Descriptions + +```typescript +// Good: Clear and actionable +toast.error('Failed to save team', { + description: 'Network connection error. Please check your internet connection and try again.' +}) + +// Avoid: Vague messages +toast.error('Error', { + description: 'Something went wrong' +}) +``` + +### 3. Handle Loading States + +```vue + +``` + +### 4. Avoid Toast Spam + +```typescript +// Good: Debounce frequent operations +import { debounce } from 'lodash-es' + +const debouncedSave = debounce(() => { + toast.success('Auto-saved') +}, 1000) + +// Avoid: Toast on every input change +const handleInput = () => { + toast('Input changed') // Too frequent! +} +``` + +## Common Pitfalls + +### ❌ Don't Add Multiple Toasters + +```vue + + +``` + +The global Toaster in `App.vue` handles all notifications automatically. + +### ❌ Don't Overuse Toasts + +```typescript +// DON'T: Toast for every minor action +toast('Button clicked') +toast('Mouse moved') +toast('Form field focused') + +// DO: Toast for meaningful user feedback +toast.success('Settings saved') +toast.error('Login failed') +toast.warning('Session expiring soon') +``` + +### ❌ Don't Forget Error Handling + +```typescript +// DON'T: Silent failures +const saveData = async () => { + try { + await api.save(data) + toast.success('Saved!') + } catch (error) { + // Silent failure - user doesn't know what happened + } +} + +// DO: Always provide error feedback +const saveData = async () => { + try { + await api.save(data) + toast.success('Data saved successfully') + } catch (error) { + toast.error('Failed to save data', { + description: error.message + }) + } +} +``` + +## Testing Toasts + +### Unit Testing + +```typescript +// tests/components/TeamManager.test.ts +import { vi } from 'vitest' +import { toast } from 'vue-sonner' + +// Mock toast functions +vi.mock('vue-sonner', () => ({ + toast: { + success: vi.fn(), + error: vi.fn(), + warning: vi.fn(), + } +})) + +describe('TeamManager', () => { + it('shows success toast when team is saved', async () => { + const wrapper = mount(TeamManager) + + await wrapper.vm.saveTeam() + + expect(toast.success).toHaveBeenCalledWith( + 'Team updated successfully!', + { description: 'Your team settings have been saved.' } + ) + }) +}) +``` + +## Migration from Other Toast Systems + +### From Vue-Toastification + +```typescript +// Before (vue-toastification) +this.$toast.success('Success message') +this.$toast.error('Error message') + +// After (Sonner) +import { toast } from 'vue-sonner' +toast.success('Success message') +toast.error('Error message') +``` + +### From Custom Alert Components + +```vue + + + Success message + + + + Error message + +``` + +```vue + + +``` + +The global Sonner system provides a consistent, accessible, and developer-friendly way to handle notifications across the entire DeployStack application. diff --git a/docs/deploystack/development/frontend/ui-design-system-pagination.mdx b/docs/development/frontend/ui-design-system-pagination.mdx similarity index 100% rename from docs/deploystack/development/frontend/ui-design-system-pagination.mdx rename to docs/development/frontend/ui-design-system-pagination.mdx diff --git a/docs/deploystack/development/frontend/ui-design-system-table.mdx b/docs/development/frontend/ui-design-system-table.mdx similarity index 100% rename from docs/deploystack/development/frontend/ui-design-system-table.mdx rename to docs/development/frontend/ui-design-system-table.mdx diff --git a/docs/deploystack/development/frontend/ui-design-system.mdx b/docs/development/frontend/ui-design-system.mdx similarity index 98% rename from docs/deploystack/development/frontend/ui-design-system.mdx rename to docs/development/frontend/ui-design-system.mdx index 4c9c705..9f99c41 100644 --- a/docs/deploystack/development/frontend/ui-design-system.mdx +++ b/docs/development/frontend/ui-design-system.mdx @@ -18,9 +18,9 @@ This document establishes the official UI design patterns and component standard ## Data Tables -For data table implementation, see the dedicated [Table Design System](/deploystack/development/frontend/ui-design-system-table) guide. +For data table implementation, see the dedicated [Table Design System](/development/frontend/ui-design-system-table) guide. -For pagination implementation, see the [Pagination Implementation Guide](/deploystack/development/frontend/ui-design-system-pagination). +For pagination implementation, see the [Pagination Implementation Guide](/development/frontend/ui-design-system-pagination). ## Badge Design Patterns diff --git a/docs/development/gateway/api.mdx b/docs/development/gateway/api.mdx new file mode 100644 index 0000000..719b609 --- /dev/null +++ b/docs/development/gateway/api.mdx @@ -0,0 +1,146 @@ +--- +title: Gateway API Communication +description: Backend communication patterns and URL management for CLI commands +sidebar: API +icon: Globe +--- + +# Gateway API Communication + +The DeployStack Gateway CLI manages backend communication automatically through stored configuration and credential management. This guide covers how CLI commands interact with the backend and manage different environments. + +## Backend URL Management + +### Automatic URL Storage + +When users authenticate with the gateway, the backend URL is automatically stored alongside their credentials. This eliminates the need to specify the backend URL for every command after initial login. + +**Storage Location:** +- **Primary**: macOS Keychain, Windows Credential Manager, or Linux Secret Service +- **Fallback**: Encrypted file at `~/.deploystack/credentials.enc` + +The backend URL is stored as part of the `StoredCredentials` object and persists across CLI sessions. + +### URL Resolution Priority + +CLI commands resolve the backend URL using this priority order: + +1. **Command-line override** - `--url` flag when provided +2. **Stored URL** - URL saved during authentication +3. **Default fallback** - `https://cloud.deploystack.io` + +This approach supports both development workflows with local backends and production usage seamlessly. + +### Environment Detection + +The gateway automatically adapts behavior based on the backend URL: + +**Production Mode** (`https://cloud.deploystack.io`): +- Strict HTTPS enforcement +- Full SSL certificate validation +- Standard error messages + +**Development Mode** (localhost or custom URLs): +- HTTP connections allowed for localhost +- Development-specific error messages +- Additional debugging context + +## Command Implementation Patterns + +### Authentication Check + +All API-dependent commands should verify authentication before making requests. The credential storage handles token validation and expiration checking automatically. + +### Backend URL Usage + +Commands should retrieve stored credentials and use the embedded backend URL rather than requiring URL parameters. The URL resolution pattern ensures consistency across all commands. + +### Error Handling + +Different backend environments may return different error formats. Commands should handle both production and development error responses gracefully. + +## API Client Configuration + +### Credential Integration + +The API client accepts stored credentials and automatically extracts the appropriate backend URL. No additional URL configuration is required when credentials contain the backend information. + +### Request Headers + +All authenticated requests include: +- Bearer token authentication +- User-Agent identification +- Content-Type specification + +### Timeout Handling + +Network operations include appropriate timeouts with different values for various operation types: +- OAuth callback operations +- API requests +- Token refresh operations + +## Development Workflow + +### Local Backend Testing + +Developers working with local backends can authenticate once and have all commands automatically use the development server: + +The authentication flow stores the development URL, and subsequent commands use it automatically without additional configuration. + +### URL Override Capability + +Commands maintain `--url` override options for testing different backends or switching environments temporarily without re-authentication. + +### Environment Switching + +To switch between environments, users can either: +- Re-authenticate with a different backend URL +- Use command-line URL overrides for temporary testing + +## Security Considerations + +### URL Validation + +Backend URLs are validated during authentication to ensure they meet security requirements for the target environment. + +### Credential Isolation + +Each backend URL maintains separate credential storage, preventing credential leakage between development and production environments. + +### HTTPS Enforcement + +Production environments enforce HTTPS communication, while development environments allow HTTP for localhost testing. + +## Error Response Handling + +### Network Errors + +Commands should provide helpful error messages that include the backend URL being used, especially for development environments where connectivity issues are common. + +### Authentication Errors + +Token expiration and invalid token errors should guide users to re-authenticate, preserving their backend URL preference. + +### Backend-Specific Errors + +Different backend versions or configurations may return varying error formats. Commands should handle these gracefully and provide consistent user experience. + +## Integration Guidelines + +### New Command Development + +When developing new CLI commands that interact with the backend: + +1. Use the credential storage system for authentication +2. Extract backend URL from stored credentials +3. Implement URL override options for flexibility +4. Handle environment-specific error cases +5. Provide clear error messages with backend context + +### API Client Usage + +The DeployStack API client handles most backend communication complexity automatically. Commands should focus on business logic rather than HTTP details. + +### Testing Considerations + +Test commands against both production and development backends to ensure consistent behavior across environments. The URL storage system supports this testing workflow naturally. diff --git a/docs/development/gateway/caching-system.mdx b/docs/development/gateway/caching-system.mdx new file mode 100644 index 0000000..bb5da8e --- /dev/null +++ b/docs/development/gateway/caching-system.mdx @@ -0,0 +1,219 @@ +--- +title: Gateway Caching System +description: Team-aware tool caching architecture that enables fast gateway startup and automatic tool discovery across MCP servers +sidebar: Caching System +icon: Database +--- + +import { Card, Cards } from 'fumadocs-ui/components/card'; +import { Zap, Users, RefreshCw, Shield, Clock, HardDrive } from 'lucide-react'; + +# Gateway Caching System + +The DeployStack Gateway implements a sophisticated team-aware caching system that dramatically improves performance by pre-discovering and caching tools from MCP servers. This enables instant gateway startup and seamless tool availability for development teams. + +## Architecture Overview + +The caching system operates on a **cache-as-manifest philosophy** where tools are proactively discovered and stored locally, serving as both a performance optimization and a configuration manifest that defines what should be running versus what is actually running in the persistent background process model. + +## Core Concepts + + + } + title="Fast Gateway Startup" + > + Cached tools enable instant gateway startup without waiting for MCP server discovery + + + } + title="Team-Aware Isolation" + > + Each team's tools are cached separately with complete isolation and security boundaries + + + } + title="Automatic Discovery" + > + Tools are automatically discovered and cached when switching teams or refreshing configurations + + + } + title="Secure Storage" + > + Cache files are stored securely with team-specific access controls and encryption + + + } + title="Intelligent Invalidation" + > + Cache is automatically invalidated based on configuration changes and time-based policies + + + } + title="Fallback Mechanisms" + > + Graceful fallback to cached data when live discovery fails or servers are unavailable + + + +## Cache Architecture + +### Storage Structure +The caching system uses a hierarchical file-based storage approach: + +- **Base Directory**: `~/.deploystack/cache/` +- **Team Isolation**: `teams/{teamId}/` +- **Cache Files**: `tools-cache.json` per team + +This structure ensures complete isolation between teams while providing fast local access to cached tool information. + +### Cache Content +Each team's cache contains: + +- **Tool Definitions**: Complete tool schemas with input parameters and descriptions +- **Server Metadata**: Information about which MCP server provides each tool +- **Namespaced Names**: Tools are namespaced as `serverName-toolName` for conflict resolution +- **Discovery Timestamps**: When each tool was last discovered and validated +- **Configuration Hashes**: Checksums to detect when server configurations change + +## Tool Discovery Workflow + +### Automatic Discovery Triggers +Tool discovery is automatically triggered during: + +- **Team Switching**: When developers switch to a different team context +- **Configuration Refresh**: When MCP server configurations are updated from the cloud +- **Manual Refresh**: When developers explicitly request tool discovery +- **Cache Invalidation**: When cached data becomes stale or invalid + +### Discovery Process +The discovery workflow follows these steps: + +1. **Server Enumeration**: Identify all MCP servers configured for the team +2. **Process Communication**: Connect to already-running MCP server processes as described in [Gateway Process Management](/development/gateway/process-management) +3. **Tool Interrogation**: Query each running server for its available tools using MCP protocol +4. **Schema Extraction**: Extract complete tool schemas including parameters and descriptions +5. **Namespacing**: Apply server-specific namespacing to prevent tool name conflicts +6. **Cache Storage**: Store discovered tools in the team-specific cache file + +**Note**: In the persistent background process model, tool discovery communicates with already-running MCP servers rather than spawning processes specifically for discovery. + +### Centralized Management +All tool discovery operations are managed through a centralized `ToolDiscoveryManager` that: + +- **Eliminates Code Duplication**: Single source of truth for all discovery logic +- **Provides Consistent Behavior**: Uniform discovery behavior across all Gateway components +- **Handles Error Recovery**: Robust error handling with fallback mechanisms +- **Manages Progress Feedback**: Consistent user feedback during discovery operations + +## Cache Invalidation Strategy + +### Time-Based Invalidation +Cache entries are automatically invalidated based on: + +- **Maximum Age**: Default 24-hour time-to-live for cached tool information +- **Configuration Changes**: Immediate invalidation when server configurations change +- **Team Context Changes**: Cache clearing when switching between teams + +### Configuration-Based Invalidation +The system detects configuration changes through: + +- **Server Configuration Hashing**: Checksums of server spawn commands and environment variables +- **Team Membership Changes**: Detection of team member additions or removals +- **Permission Updates**: Changes to team-based access policies + +### Manual Invalidation +Developers and administrators can manually invalidate cache through: + +- **CLI Commands**: Explicit cache clearing and refresh commands +- **Team Switching**: Automatic cache refresh when switching team contexts +- **Configuration Updates**: Cache refresh when updating MCP server configurations + +## Performance Optimization + +### Cache-First Strategy +The Gateway prioritizes cached data for optimal performance: + +- **Instant Tool Exposure**: Cached tools are immediately available to MCP clients +- **Background Refresh**: Cache updates happen asynchronously without blocking operations +- **Predictive Loading**: Frequently-used tools are kept warm in cache +- **Lazy Discovery**: New servers are discovered on-demand when first accessed + +### Fallback Mechanisms +When live discovery fails, the system provides graceful degradation: + +- **Cached Tool Fallback**: Use previously cached tools when servers are unavailable +- **Partial Discovery**: Continue with available tools even if some servers fail +- **Error State Caching**: Cache error states to avoid repeated failed discovery attempts +- **Recovery Strategies**: Automatic retry with exponential backoff for failed discoveries + +## Team Isolation and Security + +### Access Control +Each team's cache is completely isolated through: + +- **Directory Separation**: Team-specific cache directories prevent cross-team access +- **File Permissions**: Operating system-level permissions restrict cache file access +- **Encryption**: Sensitive cache data is encrypted using team-specific keys +- **Audit Logging**: All cache operations are logged for security and compliance + +### Data Privacy +The caching system ensures data privacy by: + +- **Local Storage Only**: Cache files are stored locally and never transmitted +- **Credential Exclusion**: No sensitive credentials are stored in cache files +- **Metadata Only**: Only tool schemas and metadata are cached, not actual data +- **Automatic Cleanup**: Cache files are automatically cleaned up when teams are removed + +## Integration Points + +The caching system integrates seamlessly with other Gateway components: + +- **[MCP Configuration Management](/development/gateway/mcp)**: Uses team configurations to determine which servers to discover +- **[Gateway Process Management](/development/gateway/process-management)**: Coordinates with process spawning for tool discovery +- **[Gateway Project Structure](/development/gateway/structure)**: Implements the centralized architecture through the utils layer +- **HTTP Proxy Server**: Provides cached tool information for immediate client responses + +## Cache Management Operations + +### Developer Commands +The Gateway provides several commands for cache management: + +- **Status Checking**: View current cache status and tool counts +- **Manual Refresh**: Force refresh of cached tools from all servers +- **Cache Clearing**: Remove cached data for troubleshooting +- **Discovery Testing**: Validate tool discovery for specific servers + +### Administrative Operations +Enterprise administrators can manage caching through: + +- **Team-Wide Refresh**: Refresh cache for all team members +- **Policy Enforcement**: Apply caching policies across teams +- **Usage Analytics**: Monitor cache hit rates and discovery patterns +- **Troubleshooting**: Diagnose cache-related issues and performance problems + +## Monitoring and Observability + +### Cache Metrics +The system tracks comprehensive caching metrics: + +- **Cache Hit Rates**: Percentage of requests served from cache vs. live discovery +- **Discovery Success Rates**: Success/failure rates for tool discovery operations +- **Cache Size**: Storage usage and tool counts per team +- **Refresh Frequency**: How often cache is refreshed and invalidated + +### Performance Indicators +Key performance indicators include: + +- **Gateway Startup Time**: Time from start to tool availability +- **Tool Discovery Duration**: Time required to discover tools from each server +- **Cache Effectiveness**: Reduction in discovery time due to caching +- **Error Recovery Time**: Time to recover from failed discovery operations + +This caching system ensures that the DeployStack Gateway provides instant tool availability while maintaining the security, isolation, and performance requirements of enterprise development teams. diff --git a/docs/development/gateway/enterprise-management.mdx b/docs/development/gateway/enterprise-management.mdx new file mode 100644 index 0000000..e77463c --- /dev/null +++ b/docs/development/gateway/enterprise-management.mdx @@ -0,0 +1,303 @@ +--- +title: Enterprise MCP Management +description: How the Gateway transforms MCP servers into enterprise governance tools with toggleable controls +sidebar: Enterprise Management +icon: Building2 +--- + +import { Card, Cards } from 'fumadocs-ui/components/card'; +import { Building2, ToggleLeft, Eye, Shield } from 'lucide-react'; + +# Enterprise MCP Management + +The DeployStack Gateway transforms individual MCP servers into enterprise governance tools, presenting each server as a toggleable tool with comprehensive management capabilities for organizational control. + +## Business Context + +### The Enterprise Challenge +Traditional MCP implementations expose individual tools from multiple servers, creating a complex landscape that's difficult to govern at scale. Enterprise organizations need: + +- **Visibility**: Clear overview of which MCP servers are available and active +- **Control**: Ability to enable/disable entire MCP servers based on policy +- **Governance**: Centralized management with audit trails +- **Compliance**: Team-based access controls and usage monitoring + +### DeployStack Solution +The Gateway addresses these challenges by presenting **MCP servers as tools** rather than exposing individual server tools, enabling enterprise governance while maintaining developer productivity. + +## Architecture Overview + + + } + title="Server-as-Tool Model" + > + Each MCP server appears as a single toggleable tool with rich metadata + + + } + title="Management Actions" + > + Enable, disable, and status operations for operational control + + + } + title="Enterprise Visibility" + > + Rich descriptions and metadata from secure catalog integration + + + } + title="Policy Enforcement" + > + Team-based access controls with centralized governance + + + +## Tool Transformation + +### From Individual Tools to Server Management +**Traditional MCP Approach:** +```json +{ + "tools": [ + {"name": "brightdata__search", "description": "Search the web"}, + {"name": "brightdata__scrape", "description": "Scrape webpage content"}, + {"name": "calculator__add", "description": "Add two numbers"}, + {"name": "calculator__multiply", "description": "Multiply numbers"} + ] +} +``` + +**DeployStack Enterprise Approach:** +```json +{ + "tools": [ + { + "name": "brightdata-mcp", + "description": "brightdata-mcp MCP server - Web scraping and data collection", + "inputSchema": { + "type": "object", + "properties": { + "action": { + "type": "string", + "enum": ["enable", "disable", "status"] + } + } + } + }, + { + "name": "calculator-server", + "description": "calculator-server MCP server - Mathematical operations and calculations", + "inputSchema": { + "type": "object", + "properties": { + "action": { + "type": "string", + "enum": ["enable", "disable", "status"] + } + } + } + } + ] +} +``` + +## Management Actions + +### Enable Action +**Purpose**: Activate an MCP server for use +**Usage**: `{"action": "enable"}` + +**Process:** +1. Validates server configuration from team catalog +2. Spawns MCP server process with injected credentials +3. Establishes stdio communication channel +4. Returns operational status and process information + +**Response Example:** +```json +{ + "server": "brightdata-mcp", + "action": "enabled", + "status": "running", + "message": "brightdata-mcp MCP server has been enabled and is running" +} +``` + +### Disable Action +**Purpose**: Deactivate a running MCP server +**Usage**: `{"action": "disable"}` + +**Process:** +1. Locates running MCP server process +2. Gracefully terminates process with 5-second timeout +3. Cleans up resources and communication channels +4. Confirms successful shutdown + +**Response Example:** +```json +{ + "server": "brightdata-mcp", + "action": "disabled", + "status": "stopped", + "message": "brightdata-mcp MCP server has been disabled" +} +``` + +### Status Action (Default) +**Purpose**: Retrieve comprehensive server information +**Usage**: `{"action": "status"}` or no action parameter + +**Information Provided:** +- Current operational status (running/stopped) +- Server description from enterprise catalog +- Runtime environment details +- Performance metrics (uptime, message count, error count) +- Process health information + +**Response Example:** +```json +{ + "server": "brightdata-mcp", + "action": "status_check", + "status": "running", + "description": "Web scraping and data collection platform", + "runtime": "nodejs", + "message": "brightdata-mcp MCP server is running", + "uptime": 1847293, + "messageCount": 42, + "errorCount": 0 +} +``` + +## Enterprise Benefits + +### Centralized Governance +- **Policy Enforcement**: Administrators control which MCP servers are available per team +- **Access Control**: Team-based permissions determine server availability +- **Audit Trail**: All enable/disable actions logged for compliance +- **Resource Management**: Centralized control over computational resources + +### Developer Experience +- **Simplified Interface**: Developers see clean server names instead of complex tool hierarchies +- **Rich Metadata**: Comprehensive descriptions help developers understand capabilities +- **Operational Control**: Developers can manage server lifecycle as needed +- **Status Transparency**: Clear visibility into server health and performance + +### Operational Excellence +- **Resource Optimization**: Servers only run when needed, reducing resource consumption +- **Error Isolation**: Server-level management isolates issues to specific services +- **Performance Monitoring**: Built-in metrics for operational visibility +- **Graceful Degradation**: Individual server failures don't impact other services + +## Metadata Integration + +### Catalog-Driven Descriptions +Server descriptions are pulled from the enterprise catalog stored securely: + +```typescript +// From team configuration +const installation = teamConfig.installations.find( + inst => inst.installation_name === serverName +); + +const description = installation?.server?.description || ''; + +// Resulting tool description +const toolDescription = `${serverName} MCP server${description ? ` - ${description}` : ''}`; +``` + +### Rich Server Information +Each server tool includes: +- **Installation Name**: Clean, human-readable identifier +- **Description**: Business context from enterprise catalog +- **Runtime**: Technical environment (nodejs, python, go, etc.) +- **Team Context**: Access permissions and policies +- **Operational Metrics**: Performance and health data + +## Security and Compliance + +### Credential Management +- **Secure Injection**: Credentials injected at process spawn time +- **No Exposure**: Developers never see or handle credentials directly +- **Centralized Control**: All credentials managed through enterprise catalog +- **Audit Trail**: Credential usage tracked for compliance + +### Access Control +- **Team-Based**: Server availability determined by team membership +- **Policy-Driven**: Enterprise policies control server access +- **Role-Based**: Different permissions for different team roles +- **Centralized Management**: All access control managed through cloud control plane + +### Monitoring and Compliance +- **Usage Tracking**: All server interactions logged and monitored +- **Performance Metrics**: Operational data for capacity planning +- **Error Reporting**: Centralized error tracking and alerting +- **Compliance Reporting**: Audit trails for regulatory requirements + +## Implementation Workflow + +### Tool Discovery Flow +1. **Client Request**: Development tool calls `tools/list` +2. **Server Enumeration**: Gateway iterates through team's MCP server configurations +3. **Metadata Enrichment**: Descriptions pulled from secure catalog +4. **Tool Generation**: Each server becomes a management tool +5. **Response**: Clean list of server management tools returned + +### Tool Execution Flow +1. **Action Request**: Client calls server tool with management action +2. **Server Identification**: Gateway maps tool name to server configuration +3. **Action Processing**: Enable/disable/status action executed +4. **Process Management**: Server processes spawned/terminated as needed +5. **Response**: Operational status and metadata returned + +## Developer Workflow + +### Typical Usage Pattern +1. **Discovery**: Developer calls `tools/list` to see available MCP servers +2. **Status Check**: Calls server tool with `status` action to understand current state +3. **Activation**: Uses `enable` action to start needed MCP servers +4. **Work**: Utilizes MCP server capabilities through other tools/interfaces +5. **Cleanup**: Uses `disable` action to stop servers when done + +### VS Code Integration +In VS Code, developers see: +``` +🔧 Available Tools: +├── brightdata-mcp - brightdata-mcp MCP server - Web scraping and data collection +├── calculator-server - calculator-server MCP server - Mathematical operations +└── github-integration - github-integration MCP server - GitHub API access +``` + +Each tool can be toggled on/off with simple actions, providing enterprise governance with developer-friendly controls. + +## Developer Tool Discovery + +### CLI-Based Exploration +Before enabling MCP servers through the enterprise management interface, developers can explore available tools using the CLI tool discovery feature: + +**Command**: `deploystack mcp --tools ` + +**Purpose**: Allows developers to understand what capabilities each MCP server provides before activation, enabling informed decisions about which servers to enable for their workflow. + +**Benefits**: +- **Preview Capabilities**: See all available tools and their descriptions without starting the server +- **Parameter Understanding**: Review required and optional parameters for each tool +- **Informed Decisions**: Choose the right MCP servers based on actual tool availability +- **Development Planning**: Plan workflows around available tool capabilities + +### Integration with Enterprise Management +The CLI tool discovery complements the enterprise management approach: + +1. **Discovery Phase**: Developer uses `deploystack mcp --tools` to explore server capabilities +2. **Planning Phase**: Developer identifies which servers provide needed functionality +3. **Activation Phase**: Developer enables specific servers through enterprise management tools +4. **Utilization Phase**: Developer uses the activated servers' capabilities in their workflow + +This workflow ensures developers make informed decisions about server activation while maintaining enterprise governance and control. + +The enterprise management layer transforms complex MCP server ecosystems into manageable, governable, and developer-friendly tools that meet both organizational requirements and developer productivity needs. diff --git a/docs/development/gateway/index.mdx b/docs/development/gateway/index.mdx new file mode 100644 index 0000000..c904b23 --- /dev/null +++ b/docs/development/gateway/index.mdx @@ -0,0 +1,229 @@ +--- +title: Gateway Development +description: Developer documentation for the DeployStack Gateway - the local secure proxy that manages MCP servers and credentials for enterprise teams. +sidebar: Gateway +icon: Terminal +--- + +import { Card, Cards } from 'fumadocs-ui/components/card'; +import { Terminal, Code2, Settings, Shield, Zap, Users, Rocket } from 'lucide-react'; + +# DeployStack Gateway Development + +The DeployStack Gateway is the local secure proxy that connects developers to their team's MCP servers through a centralized control plane. It acts as a smart process manager and credential vault, running MCP server processes as persistent background services while enforcing access policies from the cloud. + +## Architecture Overview + +The Gateway implements a sophisticated Control Plane / Data Plane architecture with dual transport support: + +- **Control Plane**: Authenticates with `cloud.deploystack.io` to download team configurations and access policies +- **Data Plane**: Manages local MCP server processes with both stdio and SSE transport protocols +- **Security Layer**: Injects credentials securely into process environments without exposing them to developers +- **Session Management**: Handles secure SSE connections with cryptographic session IDs for VS Code compatibility + +## Core Features + + + } + title="Dual Transport Support" + > + Supports both stdio transport for CLI tools and SSE transport for VS Code compatibility + + + } + title="Secure Credential Injection" + > + Injects API tokens and credentials directly into process environments without developer exposure + + + } + title="Individual Tool Exposure" + > + Exposes individual MCP tools with namespacing (e.g., brightdata-search_engine) for direct use in development environments + + + } + title="Session Management" + > + Cryptographically secure session handling with automatic cleanup for persistent connections + + + } + title="Unified Proxy" + > + Single HTTP endpoint supporting multiple client types with intelligent request routing + + + } + title="Team-Based Access" + > + Enforces team-based access control policies downloaded from the cloud control plane + + + } + title="Tool Caching System" + > + Team-aware caching enables instant gateway startup and automatic tool discovery on team switching + + + +## Development Setup + +### Prerequisites + +- Node.js (v18 or higher) +- npm (v8 or higher) +- TypeScript development environment +- A DeployStack account at [cloud.deploystack.io](https://cloud.deploystack.io) + +### Local Development + +```bash +# Navigate to the gateway service +cd services/gateway + +# Install dependencies +npm install + +# Start development server +npm run dev + +# Build for production +npm run build + +# Start production build +npm start +``` + +## Key Components + +### Authentication Module +Handles secure authentication with the DeployStack cloud control plane and manages access tokens. + +### Configuration Sync +Downloads and synchronizes team MCP server configurations, including process spawn commands and environment variables. + +### Process Manager +Manages the lifecycle of MCP server processes, including: +- On-demand process spawning +- Stdio communication handling +- Process cleanup and resource management +- Environment variable injection + +### HTTP Proxy Server +Exposes dual endpoints for different client types: +- **GET /sse**: SSE connection establishment for VS Code +- **POST /message**: Session-based JSON-RPC for SSE clients + +### Session Manager +Handles secure SSE connections with: +- Cryptographically secure session ID generation +- Session lifecycle management and cleanup +- Connection state tracking and validation +- Automatic timeout and resource management + +### Enterprise Management Layer +Transforms MCP servers into enterprise governance tools: +- Each MCP server appears as a toggleable tool +- Enable/disable/status actions for operational control +- Rich metadata from secure catalog integration +- Team-based access policy enforcement + +### Security Layer +Ensures credentials are handled securely: +- Encrypted storage of downloaded configurations +- Secure environment variable injection +- No credential exposure to developer environment +- Session-based authentication for persistent connections + +## Configuration Format + +The Gateway works with MCP server configurations in this format: + +```json +{ + "name": "brightdata", + "command": "npx", + "args": ["@brightdata/mcp"], + "env": { + "API_TOKEN": "secure-token-from-vault" + } +} +``` + +## Development Workflow + +1. **Authentication**: Gateway authenticates with cloud control plane +2. **Config Download**: Downloads team's MCP server configurations +3. **Persistent Process Startup**: Starts all configured MCP servers as background processes when gateway launches +4. **HTTP Server**: Starts local HTTP server with SSE endpoints immediately available (default: `localhost:9095/sse`) +5. **Request Handling**: Receives MCP requests from development tools and routes to already-running processes +6. **Process Management**: Maintains persistent background processes as described in [Gateway Process Management](/development/gateway/process-management). +7. **Credential Injection**: Securely injects environment variables into running processes at startup +8. **Tool Routing**: Routes namespaced tool calls to persistent MCP servers via stdio transport + +For detailed information about the caching system, see [Gateway Caching System](/development/gateway/caching-system). + +## Language Support + +The Gateway is language-agnostic and supports MCP servers written in: + +- **Node.js**: `npx`, `node` commands +- **Python**: `python`, `pip`, `pipenv` commands +- **Go**: Compiled binary execution +- **Rust**: Compiled binary execution +- **Any Language**: Via appropriate runtime commands + +## Security Considerations + +### Credential Management +- Credentials are never written to disk in plain text +- Environment variables are injected directly into spawned processes +- No credential exposure to the developer's shell environment + +### Process Isolation +- Each MCP server runs in its own isolated process +- Process cleanup ensures no resource leaks +- Automatic process termination after idle periods + +### Network Security +- Local HTTP server only binds to localhost +- No external network exposure by default +- Secure communication with cloud control plane + +## Testing + +```bash +# Run unit tests +npm test + +# Run integration tests +npm run test:integration + +# Run with coverage +npm run test:coverage +``` + +## Contributing + +The Gateway is actively under development. Key areas for contribution: + +- **Process Management**: Improving spawn/cleanup logic +- **Security**: Enhancing credential handling +- **Performance**: Optimizing stdio communication +- **Platform Support**: Adding Windows/Linux compatibility +- **Error Handling**: Robust error recovery + +## Roadmap + +- **Phase 2**: Enhanced process lifecycle management +- **Phase 3**: Support for remote MCP servers (HTTP transport) +- **Phase 4**: Advanced monitoring and analytics +- **Future**: Plugin system for custom MCP server types diff --git a/docs/development/gateway/mcp.mdx b/docs/development/gateway/mcp.mdx new file mode 100644 index 0000000..dde34d6 --- /dev/null +++ b/docs/development/gateway/mcp.mdx @@ -0,0 +1,157 @@ +--- +title: Gateway MCP Configuration Management +description: How the DeployStack Gateway CLI downloads, processes, and securely stores MCP server configurations for teams +sidebar: MCP Configuration +icon: Bot +--- + +# Gateway MCP Configuration Management + +The DeployStack Gateway CLI automatically manages MCP (Model Context Protocol) server configurations for teams, downloading installation data from the backend API and storing it securely for local process management. + +## Overview + +The Gateway implements a sophisticated MCP configuration system that: + +- **Downloads** team MCP installations from the backend API +- **Processes** raw API data into Gateway-ready server configurations +- **Stores** both raw and processed data securely using OS-level storage +- **Manages** team context switching with automatic config updates + +## API Integration + +### Endpoint +The Gateway fetches MCP installations from: +``` +GET /api/teams/{teamId}/mcp/installations +``` + +### Response Structure +The API returns team MCP installations with this interface: +```typescript +interface MCPInstallationsResponse { + success: boolean; + data: MCPInstallation[]; +} +``` + +## Data Storage Architecture + +### Dual Storage Approach +The Gateway stores **both** raw API data and processed configurations: + +1. **Raw Installations** - Complete API response for audit and debugging +2. **Processed Server Configs** - Gateway-ready configurations for process spawning + +### Storage Interface +```typescript +interface TeamMCPConfig { + team_id: string; + team_name: string; + installations: MCPInstallation[]; // Raw API data + servers: MCPServerConfig[]; // Processed configs + last_updated: string; +} +``` + +### Secure Storage +- **Primary**: OS Keychain (macOS Keychain, Windows Credential Manager, Linux Secret Service) +- **Fallback**: AES-256-CBC encrypted files +- **Key Format**: `${userEmail}-${teamId}` for team isolation + +## Configuration Processing + +The Gateway transforms raw API installations into executable server configurations: + +### Runtime Detection +- **Node.js**: `npx @package-name` +- **Python**: `python -m package_name` +- **Go**: Direct binary execution +- **Custom**: Uses `installation_methods` from API + +### Environment Variable Merging +1. Server default environment variables +2. User-customized overrides from `user_environment_variables` +3. Secure injection at process spawn time + +## Team Context Integration + +### Automatic Management +- **Login**: Downloads default team's MCP configuration +- **Team Switch**: Clears old config, downloads new team's config +- **Logout**: Clears all stored MCP configurations + +### Configuration Lifecycle +1. API authentication and team selection +2. MCP installations download via API +3. Data validation and filtering +4. Configuration processing and transformation +5. Secure storage with team isolation +6. Runtime access for process management + +## Developer Commands + +### Configuration Management +- `deploystack mcp --status` - Show current configuration status +- `deploystack mcp --refresh` - Force refresh from API +- `deploystack mcp --clear` - Clear stored configuration +- `deploystack mcp --test` - Run processing validation tests + +### Debug Information +The `deploystack mcp` command shows raw stored data including: +- Complete team information +- Processed server configurations +- Raw API installation data +- Environment variables (with sensitive data masking) + +## Security Considerations + +### Data Isolation +- Each team's configuration stored with unique keys +- No cross-team data access possible +- Automatic cleanup on team changes + +### Credential Protection +- Environment variables injected at runtime only +- No plain text storage of sensitive data +- OS-level keychain integration for maximum security + +## Tool Discovery and Caching + +Beyond configuration management, the Gateway implements an advanced tool discovery system that automatically identifies and caches individual tools from each MCP server. This system operates seamlessly with the configuration management to provide: + +### Automatic Discovery +- **Team Switching**: Tools are automatically discovered from all servers when switching teams +- **Configuration Updates**: Tool cache is refreshed when server configurations change +- **Manual Refresh**: Developers can explicitly refresh tools using CLI commands + +### Team-Aware Caching +- **Isolated Storage**: Each team's discovered tools are cached separately +- **Fast Startup**: Gateway starts instantly using cached tool information +- **Fallback Support**: Cached tools remain available even when servers are temporarily unavailable + +For comprehensive details about the tool discovery and caching system, see [Gateway Caching System](/development/gateway/caching-system). + +## Developer Commands + +### Configuration Management +- `deploystack mcp --status` - Show current configuration status +- `deploystack mcp --refresh` - Force refresh from API +- `deploystack mcp --clear` - Clear stored configuration + +### Tool Discovery +- `deploystack mcp --tools ` - Discover and display tools from a specific MCP server (requires running gateway) +- `deploystack teams --switch ` - Switch teams with automatic tool discovery + +**Note**: The `--tools` command only works when the gateway is running (`deploystack start`), as it communicates with already-running MCP server processes rather than spawning them on-demand. + +## Integration Points + +The stored MCP configurations are consumed by: + +- **Process Manager** - Spawns MCP server processes using stored configs as described in [Process Management](/development/gateway/process-management) +- **HTTP Proxy** - Routes requests to appropriate MCP servers using cached tool information +- **Environment Injection** - Securely provides credentials to spawned processes +- **Tool Discovery System** - Uses configurations to discover and cache available tools as detailed in [Gateway Caching System](/development/gateway/caching-system) + +This system ensures that the Gateway has immediate access to team-specific MCP server configurations while maintaining security and team isolation throughout the development workflow. diff --git a/docs/development/gateway/meta.json b/docs/development/gateway/meta.json new file mode 100644 index 0000000..b37fbce --- /dev/null +++ b/docs/development/gateway/meta.json @@ -0,0 +1,9 @@ +{ + "title": "Gateway Development", + "description": "Documentation for DeployStack Gateway Development", + "icon": "Plug", + "root": true, + "pages": [ + "..." + ] +} diff --git a/docs/development/gateway/oauth.mdx b/docs/development/gateway/oauth.mdx new file mode 100644 index 0000000..86a98b0 --- /dev/null +++ b/docs/development/gateway/oauth.mdx @@ -0,0 +1,225 @@ +--- +title: Gateway OAuth Implementation +description: OAuth2 client implementation for CLI authentication with DeployStack backend +sidebar: OAuth +icon: Shield +--- + +# Gateway OAuth Implementation + +The DeployStack Gateway implements an OAuth2 client for secure CLI authentication with the DeployStack backend. This enables users to authenticate via their browser and use the CLI with proper access tokens. + +## Architecture Overview + +The gateway acts as an OAuth2 client implementing the **Authorization Code flow with PKCE** (Proof Key for Code Exchange) for enhanced security. The implementation consists of: + +- **OAuth2 Client** - Handles the complete authorization flow +- **Callback Server** - Temporary HTTP server for receiving authorization codes +- **API Client** - Makes authenticated requests to backend APIs +- **Credential Storage** - Secure token storage and retrieval + +## OAuth2 Flow Process + +### 1. Authorization Request + +When a user runs the login command, the CLI: + +- Generates a cryptographically secure PKCE code verifier (128 random bytes) +- Creates a SHA256 code challenge from the verifier +- Generates a random state parameter for CSRF protection +- Builds the authorization URL with all required OAuth2 parameters +- Opens the user's default browser to the authorization endpoint +- Starts a temporary callback server on localhost port 8976 + +The authorization URL includes: +- `response_type=code` for authorization code flow +- `client_id=deploystack-gateway-cli` for client identification +- `redirect_uri=http://localhost:8976/oauth/callback` for callback handling +- Requested scopes: `mcp:read account:read user:read teams:read offline_access` +- PKCE parameters: `code_challenge` and `code_challenge_method=S256` +- Random `state` parameter for security + +### 2. User Authorization + +The browser opens to the backend's consent page where the user: + +- Reviews the requested permissions and scopes +- Sees security warnings about CLI access +- Can approve or deny the authorization request +- Is redirected back to the CLI's callback server upon decision + +### 3. Callback Handling + +The temporary callback server: + +- Listens only on localhost for security +- Validates the callback path (`/oauth/callback`) +- Extracts the authorization code and state parameters +- Validates the state parameter matches the original request +- Displays a success or error page to the user +- Automatically shuts down after receiving the callback + +### 4. Token Exchange + +After receiving the authorization code, the CLI: + +- Exchanges the code for access and refresh tokens +- Includes the PKCE code verifier for verification +- Validates the token response from the backend +- Fetches user information using the new access token +- Stores credentials securely for future use + +## PKCE Security Implementation + +The gateway implements PKCE (Proof Key for Code Exchange) following RFC 7636: + +- **Code Verifier**: 128 random bytes encoded as base64url +- **Code Challenge**: SHA256 hash of the verifier, base64url encoded +- **Challenge Method**: Always uses `S256` (SHA256) +- **State Validation**: Cryptographically secure random state parameter + +PKCE provides security benefits: +- Prevents authorization code interception attacks +- No client secret required (suitable for public clients) +- Protects against malicious applications + +## Client Configuration + +The gateway is pre-registered with the backend as: + +- **Client ID**: `deploystack-gateway-cli` +- **Client Type**: Public client (no secret required) +- **Redirect URIs**: `http://localhost:8976/oauth/callback`, `http://127.0.0.1:8976/oauth/callback` +- **Allowed Scopes**: `mcp:read`, `account:read`, `user:read`, `teams:read`, `offline_access` +- **PKCE**: Required with SHA256 method +- **Token Lifetime**: 1 week access tokens, 30 day refresh tokens + +## Command Integration + +### Login Command + +The login command orchestrates the complete OAuth2 flow: + +- Checks if the user is already authenticated +- Displays "already logged in" message if credentials are valid +- Initiates the OAuth2 flow if authentication is needed +- Handles browser opening and callback server management +- Stores credentials securely upon successful authentication +- Provides clear success confirmation with user email + +### Authenticated Commands + +Commands like `whoami`, `teams`, and `start` use stored credentials: + +- Check authentication status before proceeding +- Display helpful error messages if not authenticated +- Use Bearer token authentication for API requests +- Automatically refresh expired tokens when possible +- Handle token expiration gracefully + +## Error Handling + +The OAuth implementation includes comprehensive error handling: + +### Error Types + +- **TIMEOUT**: OAuth callback not received within time limit +- **ACCESS_DENIED**: User denied the authorization request +- **BROWSER_ERROR**: Failed to open browser automatically +- **NETWORK_ERROR**: Network connectivity issues +- **STORAGE_ERROR**: Failed to store credentials securely +- **TOKEN_EXPIRED**: Access token has expired +- **INVALID_TOKEN**: Token format or signature invalid +- **INVALID_GRANT**: Authorization code or refresh token invalid + +### User Guidance + +Each error type provides specific user guidance: +- Timeout errors suggest retrying the command +- Access denied errors explain the approval requirement +- Browser errors offer manual URL opening +- Network errors suggest connectivity checks +- Storage errors indicate keychain permission issues + +## Browser Integration + +The CLI provides seamless browser integration: + +- **Automatic Opening**: Uses the system's default browser +- **Cross-Platform**: Works on Windows, macOS, and Linux +- **Fallback Handling**: Displays manual URL if auto-open fails +- **User Feedback**: Clear messages about browser actions +- **Security Warnings**: Alerts for development server usage + +## Token Management + +### Token Refresh + +The gateway automatically handles token refresh: + +- Monitors token expiration with 5-minute buffer +- Attempts refresh before tokens expire +- Uses refresh tokens for seamless re-authentication +- Falls back to full re-authentication if refresh fails +- Updates stored credentials with new tokens + +### Token Validation + +Before each API request, the gateway: + +- Checks token expiration locally +- Validates token format and structure +- Handles 401 responses with automatic refresh +- Provides clear error messages for invalid tokens + +## Development vs Production + +The OAuth client adapts to different environments: + +### Development Mode +- Uses HTTP for localhost callback server +- Accepts self-signed certificates for development +- Displays security warnings for non-production servers +- Provides detailed error information for debugging + +### Production Mode +- Enforces HTTPS for all communications +- Validates SSL certificates strictly +- Uses secure callback URLs +- Limits error information exposure + +## Integration with Backend + +The gateway OAuth client integrates with the [backend OAuth2 server](/development/backend/oauth#oauth2-server-implementation): + +- **Client Registration**: Pre-registered with known client ID +- **PKCE Support**: Uses SHA256 method as required by backend +- **Scope Validation**: Requests only backend-supported scopes +- **Token Format**: Handles backend's custom JWT-like token format +- **Error Responses**: Processes standard OAuth2 error responses +- **Endpoint Discovery**: Uses standard OAuth2 endpoint paths + +## Security Considerations + +The OAuth implementation follows security best practices: + +- **PKCE Required**: All authorization requests use PKCE +- **State Validation**: Prevents CSRF attacks +- **Localhost Binding**: Callback server only accepts local connections +- **Timeout Protection**: All operations have reasonable timeouts +- **Secure Storage**: Credentials stored using OS keychain +- **No Secrets**: Public client design eliminates secret management + +For detailed security implementation including credential storage, token expiration, and local file security, see the [Gateway Security Guide](/development/gateway/security). + +## Testing OAuth Flow + +During development, the OAuth flow can be tested: + +1. Start the backend in development mode +2. Build the gateway CLI +3. Run the login command with development URL +4. Complete the browser authorization flow +5. Verify authentication with the whoami command + +The OAuth implementation provides a secure, user-friendly authentication experience that follows industry standards while integrating seamlessly with the DeployStack backend. diff --git a/docs/development/gateway/process-management.mdx b/docs/development/gateway/process-management.mdx new file mode 100644 index 0000000..4c49410 --- /dev/null +++ b/docs/development/gateway/process-management.mdx @@ -0,0 +1,202 @@ +--- +title: Gateway Process Management +description: How the DeployStack Gateway manages MCP server processes with persistent background processes, secure credential injection, and enterprise governance +sidebar: Process Management +icon: Cpu +--- + +import { Card, Cards } from 'fumadocs-ui/components/card'; +import { Zap, Shield, Monitor, RefreshCw, AlertTriangle, Users } from 'lucide-react'; + +# Gateway Process Management + +The DeployStack Gateway implements sophisticated process management to handle MCP server lifecycles with enterprise-grade security, performance, and governance. Each MCP server runs as a persistent background process with secure credential injection and continuous availability. + +## Architecture Overview + +The Gateway's process management system operates on a **persistent background process** model, similar to Claude Desktop, where all configured MCP server processes are started when the gateway launches and run continuously until shutdown. This approach provides instant tool availability and eliminates the latency associated with process spawning during development workflows. + +## Core Concepts + + + } + title="Persistent Background Processes" + > + All configured MCP servers start with the gateway and run continuously, providing instant tool availability + + + } + title="Secure Credential Injection" + > + API tokens and credentials are injected directly into process environments without developer exposure + + + } + title="Runtime State Management" + > + Comprehensive tracking of running processes with health monitoring and team isolation + + + } + title="Graceful Lifecycle Management" + > + Proper MCP shutdown sequence following protocol specifications for clean termination + + + } + title="State Comparison & Recovery" + > + Compares expected vs actual running processes with automatic recovery mechanisms + + + } + title="Team Context Switching" + > + Seamless switching between teams with complete process lifecycle management + + + +## Process Lifecycle + +### Gateway Startup Phase +When the Gateway starts (`deploystack start`), all configured MCP servers for the selected team are launched simultaneously: + +- **Team Configuration Loading**: Downloads and validates team MCP server configurations +- **Bulk Process Spawning**: Starts all configured MCP servers as background processes +- **Runtime Detection**: Automatic detection of Node.js, Python, Go, or custom runtime requirements +- **Environment Preparation**: Secure injection of team-specific credentials and configuration +- **MCP Protocol Handshake**: Establishes JSON-RPC communication with 30-second timeout for package downloads +- **Runtime State Registration**: Adds all successfully started processes to the runtime state manager + +### Continuous Operation Phase +During normal operation, all MCP servers run continuously in the background: + +- **Persistent Availability**: All tools are immediately available without process spawning delays +- **Request Routing**: Direct routing of tool calls to already-running MCP server processes +- **Health Monitoring**: Continuous monitoring of process status, uptime, and responsiveness +- **State Comparison**: Regular comparison of expected vs actual running processes +- **Error Logging**: Proper distinction between informational stderr output and actual errors + +### Team Context Switching +When switching teams, the Gateway performs complete process lifecycle management: + +- **Graceful Shutdown**: Stops all MCP servers for the current team following MCP protocol +- **Configuration Refresh**: Downloads new team's MCP server configurations +- **Process Restart**: Starts all MCP servers for the new team +- **State Synchronization**: Updates runtime state to reflect the new team context + +### Gateway Shutdown Phase +When the Gateway stops (`deploystack stop` or Ctrl+C), processes are terminated gracefully: + +- **MCP Protocol Compliance**: Follows proper MCP shutdown sequence (close stdin → wait → SIGTERM → wait → SIGKILL) +- **Parallel Shutdown**: All processes are stopped concurrently for faster shutdown +- **Resource Cleanup**: Ensures all file descriptors and system resources are properly released +- **State Cleanup**: Clears runtime state and removes process tracking information + +## Security Model + +### Credential Isolation +The Gateway implements a **zero-exposure credential model** where: + +- Credentials are never written to disk in plain text +- Environment variables are injected directly into spawned processes +- No credential access from the developer's shell environment +- Automatic credential rotation when team configurations change + +### Process Isolation +Each MCP server runs in complete isolation with: + +- **Separate Process Space**: No shared memory or resources between MCP servers +- **Independent Environments**: Each process has its own environment variable set +- **Resource Boundaries**: CPU and memory limits to prevent resource exhaustion +- **Network Isolation**: Controlled network access based on server requirements + +## Enterprise Governance + +### Tool-Level Management +The Gateway transforms traditional MCP servers into enterprise-manageable tools by presenting each server as: + +- **Enable/Disable Controls**: Administrators can control which MCP servers are available +- **Status Monitoring**: Real-time visibility into process health and performance +- **Usage Analytics**: Tracking of tool usage patterns and resource consumption +- **Access Policies**: Team-based access control enforcement + +### Operational Controls +Enterprise administrators gain operational control through: + +- **Centralized Configuration**: All MCP server configurations managed through the cloud control plane +- **Policy Enforcement**: Automatic enforcement of team-based access policies +- **Audit Logging**: Comprehensive logging of all process management activities +- **Resource Management**: Monitoring and control of system resource usage + +## Performance Optimization + +### Resource Efficiency +The Gateway optimizes resource usage through the persistent background process model: + +- **Continuous Operation**: All processes run continuously, eliminating spawn/cleanup overhead +- **Shared Process Pool**: Multiple tool requests reuse the same persistent MCP server processes +- **Memory Stability**: Consistent memory usage patterns with no spawn/cleanup cycles +- **CPU Optimization**: Direct request routing to running processes minimizes CPU overhead + +### Response Time Optimization +Instant response times are achieved through: + +- **Zero Latency**: Tools are immediately available from already-running processes +- **Parallel Processing**: Concurrent handling of multiple tool requests across persistent processes +- **Persistent Connections**: Maintained stdio connections eliminate connection establishment overhead +- **Cache-as-Manifest**: Cached tool information serves as configuration manifest for instant startup + +## Error Handling and Recovery + +### Failure Detection +The Gateway monitors for various failure scenarios: + +- **Process Crashes**: Automatic detection of terminated or crashed processes +- **Communication Failures**: Identification of broken stdio communication channels +- **Timeout Conditions**: Detection of unresponsive processes +- **Resource Exhaustion**: Monitoring for memory or CPU limit violations + +### Recovery Strategies +When failures are detected, the Gateway implements: + +- **Automatic Restart**: Immediate restart of crashed processes with exponential backoff +- **Fallback Mechanisms**: Graceful degradation when processes are unavailable +- **Error Reporting**: Detailed error reporting to developers and administrators +- **Circuit Breaker**: Temporary disabling of problematic processes to prevent cascading failures + +## Integration Points + +The process management system integrates with other Gateway components: + +- **[MCP Configuration Management](/development/gateway/mcp)**: Uses team configurations to determine spawn parameters +- **[Caching System](/development/gateway/caching-system)**: Coordinates with tool discovery and caching mechanisms +- **[Project Structure](/development/gateway/structure)**: Implements the architecture defined in the core modules +- **HTTP Proxy Server**: Provides process information for request routing decisions + +## Monitoring and Observability + +### Process Metrics +The Gateway tracks comprehensive metrics including: + +- **Process Count**: Number of active MCP server processes +- **Resource Usage**: CPU, memory, and file descriptor consumption +- **Request Throughput**: Number of requests processed per process +- **Error Rates**: Frequency and types of process errors +- **Response Times**: Latency metrics for tool requests + +### Health Indicators +Key health indicators monitored include: + +- **Process Responsiveness**: Time to respond to health check requests +- **Memory Growth**: Detection of memory leaks or excessive memory usage +- **Error Patterns**: Identification of recurring error conditions +- **Resource Limits**: Proximity to configured resource boundaries + +This process management system ensures that the DeployStack Gateway can reliably handle enterprise workloads while maintaining the security, performance, and governance requirements of modern development teams. diff --git a/docs/development/gateway/security.mdx b/docs/development/gateway/security.mdx new file mode 100644 index 0000000..2148544 --- /dev/null +++ b/docs/development/gateway/security.mdx @@ -0,0 +1,374 @@ +--- +title: Gateway Security +description: Security implementation and best practices for the DeployStack Gateway CLI +sidebar: Security +icon: Lock +--- + +# Gateway Security + +The DeployStack Gateway implements multiple layers of security to protect user credentials, ensure secure communication, and maintain system integrity. This document covers the security architecture and implementation details. + +## Credential Storage Security + +### OS Keychain Integration + +The gateway uses the **Zowe Secrets SDK** for cross-platform secure credential storage, providing native integration with each operating system's secure storage mechanism: + +**Platform-specific storage:** +- **macOS**: Keychain Access using the Security.framework +- **Windows**: Credential Manager using CredWrite/CredRead APIs +- **Linux**: Secret Service API using libsecret + +The keychain integration stores credentials with the service name `deploystack-gateway` and uses the user's email address as the account identifier. This approach leverages the operating system's built-in security features including: + +- Hardware-backed encryption where available +- User authentication requirements for access +- Automatic credential isolation between users +- Integration with system security policies + +### Encrypted File Fallback + +When OS keychain access is unavailable or fails, credentials are stored in encrypted files as a secure fallback: + +**Encryption Details:** +- **Algorithm**: AES-256-CBC encryption +- **Key Derivation**: Fixed key with padding (development approach) +- **Initialization Vector**: Random 16-byte IV generated per encryption +- **Storage Format**: `IV:encrypted_data` in hexadecimal encoding + +**File Security:** +- **Location**: `~/.deploystack/credentials.enc` +- **Permissions**: `0o600` (owner read/write only) +- **Directory Permissions**: `0o700` (owner access only) + +### Account Management + +The gateway maintains a secure account tracking system: + +**Account List:** +- **Location**: `~/.deploystack/accounts.json` +- **Content**: Array of user email addresses (no sensitive data) +- **Purpose**: Enables credential discovery from keychain storage +- **Format**: JSON array with most recent accounts first + +**Security Considerations:** +- Contains only email addresses, no tokens or passwords +- Used for keychain credential lookup +- Automatically maintained during login/logout operations +- Cleaned up when credentials are cleared + +## Token Security + +### Access Token Format + +Access tokens use a custom JWT-like format designed for the DeployStack backend: + +**Token Structure:** +``` +. +``` + +**Components:** +- **Random Token**: 512-bit cryptographically secure random value +- **Payload**: Base64-encoded JSON containing user info, scopes, and expiration +- **Database Storage**: Argon2 hash of the complete token for verification + +**Security Features:** +- No client-side signature verification required +- Embedded user information reduces database lookups +- Cryptographically secure random component +- Server-side hash verification prevents tampering + +### Token Expiration + +**Access Tokens**: 1 week (604,800 seconds) +- Provides reasonable balance between security and usability +- Reduces frequent re-authentication during development +- Long enough for typical CLI usage patterns +- Short enough to limit exposure if compromised + +**Refresh Tokens**: 30 days +- Enables seamless token renewal +- Longer lifetime for better user experience +- Stored securely alongside access tokens +- Automatically used for token refresh + +### Token Validation + +The gateway implements comprehensive token validation: + +**Local Validation:** +- Checks token expiration with 5-minute buffer +- Validates token format and structure +- Prevents unnecessary API calls with expired tokens + +**Server Validation:** +- Backend verifies token hash using Argon2 +- Checks database expiration timestamps +- Validates user permissions and scopes + +## Network Security + +### HTTPS Enforcement + +The gateway enforces secure communication: + +**Production Requirements:** +- All API communications must use HTTPS +- SSL certificate validation is strictly enforced +- Self-signed certificates are rejected +- Insecure HTTP connections are blocked + +**Development Flexibility:** +- Localhost connections allow HTTP for development +- Self-signed certificates accepted for local testing +- Security warnings displayed for non-production servers +- Clear distinction between development and production modes + +### Request Security + +All API requests include comprehensive security headers: + +**Standard Headers:** +- **Authorization**: Bearer token authentication +- **Content-Type**: Proper content type specification +- **User-Agent**: Identifies the CLI client and version + +**Security Measures:** +- Bearer token authentication for all authenticated requests +- Proper content type validation +- Request timeout protection +- Automatic retry logic with exponential backoff + +### Callback Server Security + +The temporary OAuth callback server implements multiple security layers: + +**Network Security:** +- **Binding**: Only accepts connections from localhost/127.0.0.1 +- **Port**: Fixed port 8976 for consistency +- **Protocol**: HTTP (acceptable for localhost) + +**Request Validation:** +- **Path Validation**: Only `/oauth/callback` path is handled +- **Parameter Validation**: Required OAuth parameters are verified +- **State Validation**: CSRF protection through state parameter + +**Lifecycle Management:** +- **Auto-cleanup**: Server automatically shuts down after callback +- **Timeout Protection**: Configurable timeout (default: 5 minutes) +- **Resource Cleanup**: Proper cleanup of server resources + +## OAuth2 Security (PKCE) + +The gateway implements PKCE (Proof Key for Code Exchange) following RFC 7636: + +### Code Verifier Generation + +**Specifications:** +- **Length**: 128 characters (96 random bytes base64url encoded) +- **Entropy**: Cryptographically secure random generation +- **Format**: Base64url encoding without padding +- **Uniqueness**: New verifier generated for each authentication + +### Code Challenge Generation + +**Process:** +- **Input**: Code verifier string +- **Hashing**: SHA256 hash of the verifier +- **Encoding**: Base64url encoding of the hash +- **Method**: Always uses `S256` (SHA256) + +### State Parameter Security + +**Generation:** +- **Length**: 32 random bytes base64url encoded +- **Purpose**: CSRF protection +- **Validation**: Strict comparison with received state +- **Storage**: Temporarily stored during OAuth flow + +**PKCE Security Benefits:** +- Prevents authorization code interception attacks +- Eliminates need for client secrets in public clients +- Provides cryptographic proof of authorization request origin +- Protects against malicious applications + +## Error Handling Security + +### Secure Error Messages + +The gateway implements secure error handling principles: + +**User-Facing Messages:** +- Generic error descriptions to avoid information disclosure +- Helpful guidance without revealing system internals +- No exposure of tokens, credentials, or sensitive data +- Clear action items for users to resolve issues + +**Error Categories:** +- **Authentication Errors**: Login and token-related issues +- **Network Errors**: Connectivity and communication problems +- **Storage Errors**: Credential storage and retrieval issues +- **Authorization Errors**: Permission and scope-related problems + +### Timeout Protection + +All network operations include timeout protection: + +**Timeout Types:** +- **OAuth Callback**: 5-minute default timeout for user authorization +- **API Requests**: Reasonable timeouts for backend communication +- **Token Refresh**: Quick timeout for refresh operations +- **Browser Opening**: Timeout for automatic browser launch + +**Security Benefits:** +- Prevents indefinite resource consumption +- Limits exposure time for temporary servers +- Provides clear failure modes +- Enables graceful error recovery + +## File System Security + +### Directory Permissions + +The gateway creates secure directories for credential storage: + +**Directory Structure:** +- **Base Directory**: `~/.deploystack/` +- **Permissions**: `0o700` (owner read/write/execute only) +- **Creation**: Automatic creation with secure permissions +- **Platform Compatibility**: Works across Windows, macOS, and Linux + +### File Permissions + +**Credential Files:** +- **Encrypted Credentials**: `0o600` (owner read/write only) +- **Account List**: `0o644` (owner write, others read - no sensitive data) +- **Temporary Files**: Secure permissions and automatic cleanup + +### Secure File Cleanup + +Credential removal includes comprehensive cleanup: + +**Cleanup Process:** +- **Keychain Removal**: Credentials removed from OS keychain +- **File Deletion**: Encrypted files securely deleted +- **Account List**: Account entries removed from tracking +- **Directory Cleanup**: Empty directories removed when appropriate + +**Security Considerations:** +- Multiple cleanup attempts for reliability +- Graceful handling of partial failures +- No sensitive data left in temporary files +- Proper error handling during cleanup + +## Development vs Production Security + +### Environment Detection + +The gateway automatically detects and adapts to different environments: + +**Development Mode Indicators:** +- URLs containing `localhost` +- Non-HTTPS protocols for local servers +- Development-specific configuration options + +**Production Mode Requirements:** +- HTTPS enforcement for all communications +- Strict SSL certificate validation +- Limited error information exposure +- Enhanced security warnings + +### Security Warnings + +The CLI provides appropriate security warnings: + +**Development Warnings:** +- Alerts when connecting to non-production servers +- Warnings about HTTP usage in development +- Reminders about development-only features + +**Production Safeguards:** +- Blocks insecure connections +- Validates server certificates +- Limits debug information exposure + +## Security Best Practices + +### 1. Credential Protection +- Never log credentials or tokens in plain text +- Use OS keychain as primary storage mechanism +- Encrypt fallback storage with strong encryption +- Restrict file permissions to owner-only access +- Implement secure credential cleanup + +### 2. Network Security +- Enforce HTTPS in production environments +- Validate SSL certificates strictly +- Use secure headers in all requests +- Implement proper request timeouts +- Handle network errors gracefully + +### 3. OAuth2 Security +- Always use PKCE for authorization code flow +- Validate state parameters to prevent CSRF attacks +- Use cryptographically secure random values +- Implement proper token refresh logic +- Handle authorization errors appropriately + +### 4. Error Handling +- Avoid exposing sensitive data in error messages +- Log detailed errors for debugging (server-side only) +- Provide helpful user guidance without revealing internals +- Implement proper timeout handling +- Use structured error codes for programmatic handling + +### 5. Process Security +- Exit cleanly after operations complete +- Clean up temporary resources properly +- Handle interruption signals gracefully +- Validate all user inputs +- Implement proper resource management + +For OAuth2 flow details and implementation specifics, see the [Gateway OAuth Guide](/development/gateway/oauth). + +## Security Auditing + +### Credential Audit + +**File System Checks:** +- Verify credential directory permissions (`~/.deploystack/`) +- Check encrypted file permissions (`credentials.enc`) +- Validate account list format (`accounts.json`) + +**Keychain Verification:** +- Check for stored credentials in OS keychain +- Verify service name and account identifiers +- Validate keychain access permissions + +### Network Security Audit + +**Connection Monitoring:** +- Monitor HTTPS usage in production +- Verify SSL certificate validation +- Check for secure header usage + +**Certificate Validation:** +- Verify SSL certificate chains +- Check certificate expiration dates +- Validate certificate authority trust + +### Security Monitoring + +**Authentication Events:** +- Monitor login success and failure rates +- Track token refresh patterns +- Identify unusual authentication behavior + +**Error Analysis:** +- Review authentication error patterns +- Monitor network connectivity issues +- Analyze credential storage problems + +The gateway's security implementation follows industry best practices and provides multiple layers of protection for user credentials and system integrity. diff --git a/docs/development/gateway/session-management.mdx b/docs/development/gateway/session-management.mdx new file mode 100644 index 0000000..b90a86a --- /dev/null +++ b/docs/development/gateway/session-management.mdx @@ -0,0 +1,275 @@ +--- +title: Session Management +description: Cryptographically secure session lifecycle management for SSE connections +sidebar: Session Management +icon: Key +--- + +import { Card, Cards } from 'fumadocs-ui/components/card'; +import { Key, Clock, Shield, Trash2 } from 'lucide-react'; + +# Session Management + +The DeployStack Gateway implements a robust session management system that provides cryptographically secure session handling for persistent SSE connections while ensuring automatic cleanup and resource management. + +## Architecture Overview + +The session management system consists of two primary components working together to provide secure, persistent connections: + +- **SessionManager**: Handles session lifecycle, validation, and SSE stream management +- **SSEHandler**: Manages Server-Sent Events connections and message routing + +## Core Components + + + } + title="Cryptographic Security" + > + 256-bit entropy session IDs with base64url encoding for maximum security + + + } + title="Lifecycle Management" + > + Automatic session creation, validation, activity tracking, and timeout handling + + + } + title="Connection Validation" + > + Session-bound SSE streams with comprehensive validation and error handling + + + } + title="Automatic Cleanup" + > + Resource cleanup on disconnect, timeout, or error conditions + + + +## Session ID Generation + +### Cryptographic Properties +- **Algorithm**: Node.js `crypto.randomBytes(32)` +- **Entropy**: 256 bits (32 bytes) of cryptographically secure randomness +- **Encoding**: Base64url for URL safety and compatibility +- **Format**: `L8B-xaw3HBZEftyo-JCrHoGWb_iikRZiwGfp9B71-GA` + +### Security Features +- **Unpredictability**: Cryptographically secure random number generation +- **Collision Resistance**: 2^256 possible values make collisions virtually impossible +- **URL Safety**: Base64url encoding ensures compatibility in query parameters +- **No Sequential Patterns**: Each session ID is completely independent + +### Validation Process +```typescript +private validateSessionId(sessionId: string): boolean { + if (!sessionId || typeof sessionId !== 'string') return false; + if (sessionId.length < 32) return false; + if (!/^[A-Za-z0-9_-]+$/.test(sessionId)) return false; + return true; +} +``` + +## Session Lifecycle + +### 1. Creation Phase +**Trigger**: SSE connection establishment via `GET /sse` + +**Process:** +1. Generate cryptographically secure session ID +2. Create session object with metadata +3. Associate with SSE stream +4. Schedule automatic cleanup timer +5. Send endpoint event to client + +**Session Object:** +```typescript +interface SessionInfo { + id: string; + createdAt: number; + lastActivity: number; + sseStream: ServerResponse; + clientInfo?: { name: string; version: string }; + mcpInitialized: boolean; + requestCount: number; + errorCount: number; +} +``` + +### 2. Active Phase +**Duration**: Until timeout or disconnect + +**Activities:** +- **Activity Tracking**: Updated on every JSON-RPC request +- **Request Counting**: Incremented for each message processed +- **Error Tracking**: Incremented on processing failures +- **Client Info Storage**: MCP client metadata stored during initialization + +### 3. Cleanup Phase +**Triggers:** +- Client disconnect (`close` event) +- Connection error (`error` event) +- Stream finish (`finish` event) +- 30-minute inactivity timeout + +**Process:** +1. Close SSE stream if still open +2. Remove session from active sessions map +3. Log cleanup completion +4. Free associated resources + +## Connection Management + +### SSE Stream Handling +The session manager maintains direct references to SSE streams for efficient message delivery: + +```typescript +sendToSession(sessionId: string, event: { id?: string; event?: string; data: string }): boolean { + const session = this.sessions.get(sessionId); + if (!session || session.sseStream.destroyed) { + return false; + } + + try { + let sseData = ''; + if (event.id) sseData += `id: ${event.id}\n`; + if (event.event) sseData += `event: ${event.event}\n`; + sseData += `data: ${event.data}\n\n`; + + session.sseStream.write(sseData); + return true; + } catch (error) { + this.cleanupSession(sessionId); + return false; + } +} +``` + +### Connection State Tracking +- **Stream Health**: Monitors SSE stream status and handles disconnects +- **Activity Monitoring**: Tracks last activity timestamp for timeout detection +- **Error Handling**: Graceful handling of connection failures and cleanup +- **Resource Management**: Prevents memory leaks through automatic cleanup + +## Security Considerations + +### Session Security +- **Unpredictable IDs**: Impossible to guess or enumerate session IDs +- **Time-Limited**: Automatic expiration prevents indefinite access +- **Connection-Bound**: Sessions tied to specific SSE connections +- **Validation**: Comprehensive validation on every request + +### Timeout Management +- **Inactivity Timeout**: 30 minutes of inactivity triggers cleanup +- **Automatic Scheduling**: Cleanup scheduled at session creation +- **Activity Extension**: Timeout reset on each valid request +- **Resource Protection**: Prevents accumulation of stale sessions + +### Error Handling +- **Graceful Degradation**: Connection errors don't crash the system +- **Automatic Recovery**: Failed connections cleaned up automatically +- **Error Isolation**: Session errors don't affect other sessions +- **Logging**: Comprehensive error logging for debugging + +## Performance Optimization + +### Memory Management +- **Efficient Storage**: Sessions stored in Map for O(1) lookup +- **Automatic Cleanup**: Prevents memory leaks through timeout handling +- **Resource Tracking**: Monitors session count and resource usage +- **Garbage Collection**: Proper cleanup enables efficient garbage collection + +### Connection Efficiency +- **Persistent Connections**: SSE streams maintained for duration of session +- **Minimal Overhead**: Lightweight session objects with essential data only +- **Fast Lookup**: Session validation and retrieval optimized for speed +- **Batch Operations**: Efficient handling of multiple concurrent sessions + +## Monitoring and Debugging + +### Session Statistics +The session manager provides comprehensive statistics for monitoring: + +```typescript +getStatus() { + return { + activeCount: this.sessions.size, + sessions: Array.from(this.sessions.values()).map(session => ({ + id: session.id, + createdAt: session.createdAt, + lastActivity: session.lastActivity, + uptime: Date.now() - session.createdAt, + requestCount: session.requestCount, + errorCount: session.errorCount, + clientInfo: session.clientInfo, + mcpInitialized: session.mcpInitialized + })) + }; +} +``` + +### Logging and Observability +- **Session Creation**: Logged with session ID for tracking +- **Activity Updates**: Request and error counts tracked +- **Cleanup Events**: Cleanup reasons and timing logged +- **Error Conditions**: Detailed error logging for troubleshooting + +## Integration Points + +### SSE Handler Integration +The session manager works closely with the SSE handler: + +```typescript +// Session creation during SSE establishment +const sessionId = this.sessionManager.createSession(reply.raw); + +// Message routing through sessions +this.sseHandler.sendMessage(sessionId, response); + +// Error handling via sessions +this.sseHandler.sendError(sessionId, errorResponse); +``` + +### HTTP Proxy Integration +Session validation in the HTTP proxy: + +```typescript +// Session validation on each request +const session = this.sessionManager.getSession(sessionId); +if (!session) { + // Handle invalid session +} + +// Activity tracking +this.sessionManager.updateActivity(sessionId); + +// Error counting +this.sessionManager.incrementErrorCount(sessionId); +``` + +## Best Practices + +### Session Lifecycle +- **Immediate Creation**: Sessions created immediately on SSE connection +- **Activity Tracking**: Update activity on every valid request +- **Graceful Cleanup**: Always clean up resources on session end +- **Error Handling**: Handle all error conditions gracefully + +### Security Practices +- **Validate Always**: Validate session ID on every request +- **Time Limits**: Enforce reasonable session timeouts +- **Resource Limits**: Monitor and limit concurrent sessions if needed +- **Audit Trail**: Log session activities for security monitoring + +### Performance Practices +- **Efficient Lookup**: Use Map for O(1) session lookup +- **Minimal Data**: Store only essential session data +- **Cleanup Scheduling**: Schedule cleanup to prevent resource leaks +- **Error Recovery**: Implement robust error recovery mechanisms + +The session management system provides a secure, efficient, and robust foundation for persistent SSE connections while maintaining enterprise-grade security and operational requirements. diff --git a/docs/development/gateway/sse-transport.mdx b/docs/development/gateway/sse-transport.mdx new file mode 100644 index 0000000..64f7ec6 --- /dev/null +++ b/docs/development/gateway/sse-transport.mdx @@ -0,0 +1,219 @@ +--- +title: SSE Transport Implementation +description: Server-Sent Events transport layer for VS Code compatibility and dual-endpoint architecture +sidebar: SSE Transport +icon: Radio +--- + +import { Card, Cards } from 'fumadocs-ui/components/card'; +import { Radio, MessageSquare, Shield, Zap } from 'lucide-react'; + +# SSE Transport Implementation + +The DeployStack Gateway implements Server-Sent Events (SSE) transport to provide VS Code compatibility through a clean dual-endpoint architecture. + +## Architecture Overview + +The Gateway uses a **dual-endpoint architecture** for SSE-based communication: + +- **GET /sse**: Establishes SSE connection and returns session endpoint +- **POST /message**: Handles JSON-RPC requests with session context + +## Core Components + + + } + title="SSE Handler" + > + Manages Server-Sent Events connections, event formatting, and message routing + + + } + title="Session Manager" + > + Handles cryptographically secure session lifecycle with automatic cleanup + + + } + title="Dual Endpoints" + > + Supports both SSE and traditional HTTP clients with intelligent routing + + + } + title="Real-time Communication" + > + Persistent connections enable real-time bidirectional communication + + + +## Connection Flow + +### 1. SSE Connection Establishment +```http +GET /sse HTTP/1.1 +Accept: text/event-stream +``` + +**Response:** +``` +HTTP/1.1 200 OK +Content-Type: text/event-stream +Cache-Control: no-cache +Connection: keep-alive + +event: endpoint +data: /message?session=L8B-xaw3HBZEftyo-JCrHoGWb_iikRZiwGfp9B71-GA +``` + +### 2. Session-Based JSON-RPC +```http +POST /message?session=L8B-xaw3HBZEftyo-JCrHoGWb_iikRZiwGfp9B71-GA +Content-Type: application/json + +{ + "jsonrpc": "2.0", + "id": 1, + "method": "initialize", + "params": { + "clientInfo": {"name": "vscode", "version": "1.0.0"}, + "protocolVersion": "2025-03-26" + } +} +``` + +**HTTP Response:** +```json +{"status": "accepted", "messageId": 1} +``` + +**SSE Response:** +``` +id: msg-1753710728979-95czkmmq8 +event: message +data: {"jsonrpc":"2.0","id":1,"result":{"serverInfo":{"name":"deploystack-gateway","version":"1.0.0"},"protocolVersion":"2025-03-26","capabilities":{"tools":{"listChanged":false}}}} +``` + +## Session Management + +### Session ID Generation +- **Algorithm**: Cryptographically secure random bytes (32 bytes = 256 bits) +- **Encoding**: Base64url for URL safety +- **Format**: `L8B-xaw3HBZEftyo-JCrHoGWb_iikRZiwGfp9B71-GA` +- **Validation**: Length and character set validation + +### Session Lifecycle +1. **Creation**: Generated on SSE connection establishment +2. **Validation**: Verified on each JSON-RPC request +3. **Activity Tracking**: Updated on every message +4. **Timeout**: 30-minute inactivity timeout +5. **Cleanup**: Automatic resource cleanup on disconnect + +### Security Features +- **Cryptographic Security**: 256-bit entropy prevents session prediction +- **Automatic Expiration**: Sessions expire after 30 minutes of inactivity +- **Connection Validation**: Session tied to specific SSE stream +- **Resource Cleanup**: Automatic cleanup prevents memory leaks + +## Message Routing + +### Supported Methods +The SSE transport handles all standard MCP protocol methods: + +- **initialize**: Gateway initialization with capabilities +- **notifications/initialized**: Client initialization confirmation +- **tools/list**: Returns available MCP servers as toggleable tools +- **tools/call**: Executes MCP server management actions +- **resources/list**: Returns empty resources (handled locally) +- **resources/templates/list**: Returns empty templates (handled locally) +- **prompts/list**: Returns empty prompts (handled locally) + +### Error Handling +Errors are sent via SSE with proper JSON-RPC error format: + +``` +id: err-1753710744580-061x9gi8x +event: error +data: {"jsonrpc":"2.0","error":{"code":-32603,"message":"Internal server error","data":"Server not available"},"id":2} +``` + +## VS Code Integration + +### Expected Client Behavior +1. **Connection**: Client connects to `http://localhost:9095/sse` via SSE +2. **Endpoint Discovery**: Receives session endpoint via `endpoint` event +3. **Initialization**: Sends `initialize` request to session endpoint +4. **Tool Discovery**: Calls `tools/list` to discover available MCP servers +5. **Tool Management**: Uses `tools/call` to enable/disable/status MCP servers + +### Configuration +VS Code MCP client configuration: +```json +{ + "mcpServers": { + "deploystack": { + "url": "http://localhost:9095/sse" + } + } +} +``` + +## Performance Considerations + +### Connection Management +- **Keep-Alive**: Persistent SSE connections reduce connection overhead +- **Heartbeat**: Optional heartbeat messages maintain connection health +- **Timeout Handling**: Automatic cleanup prevents resource exhaustion + +### Memory Management +- **Session Cleanup**: Automatic cleanup on disconnect or timeout +- **Stream Management**: Proper SSE stream lifecycle management +- **Error Recovery**: Graceful handling of connection failures + +### Client Detection +The Gateway detects SSE clients based on: +- **Accept Header**: `text/event-stream` indicates SSE client +- **User-Agent**: VS Code, Cursor, or other MCP clients +- **Request Method**: GET for SSE establishment, POST for session-based messaging + +## Implementation Details + +### SSE Event Format +All SSE events follow this structure: +``` +id: +event: +data: + +``` + +### Event Types +- **endpoint**: Session endpoint URL +- **message**: JSON-RPC response +- **error**: JSON-RPC error response +- **notification**: Server notifications + +### Connection Cleanup +Cleanup triggers include: +- Client disconnect (`close` event) +- Connection error (`error` event) +- Stream finish (`finish` event) +- Session timeout (30 minutes) + +## Security Considerations + +### Session Security +- **Unpredictable IDs**: Cryptographically secure generation +- **Time-Limited**: Automatic expiration prevents indefinite access +- **Connection-Bound**: Sessions tied to specific SSE connections + +### Network Security +- **Localhost Only**: Server binds only to localhost interface +- **No External Access**: No exposure to external networks +- **CORS Configuration**: Restricted to authorized origins + +The SSE transport implementation provides a robust, secure, and performant foundation for VS Code integration with clean dual-endpoint architecture. diff --git a/docs/development/gateway/structure.mdx b/docs/development/gateway/structure.mdx new file mode 100644 index 0000000..e9be8aa --- /dev/null +++ b/docs/development/gateway/structure.mdx @@ -0,0 +1,160 @@ +--- +title: Gateway Project Structure +description: Directory structure and architecture of the DeployStack Gateway CLI +sidebar: Project Structure +icon: FolderTree +--- + +# Gateway Project Structure + +The DeployStack Gateway is structured as a TypeScript CLI application using Commander.js with a modular architecture designed for maintainability and extensibility. + +## Directory Overview + +```bash +services/gateway/ +├── src/ # Source code +│ ├── index.ts # CLI entry point and command registration +│ ├── commands/ # Command implementations +│ │ ├── index.ts # Command exports +│ │ ├── login.ts # Authentication with cloud.deploystack.io +│ │ ├── logout.ts # Clear local credentials +│ │ ├── start.ts # Start the gateway server +│ │ ├── stop.ts # Stop the gateway server +│ │ ├── status.ts # Show gateway status +│ │ ├── mcp.ts # MCP server management and tool discovery +│ │ └── config.ts # Manage local configuration +│ ├── core/ # Core business logic +│ │ ├── auth/ # Authentication handling +│ │ │ ├── client.ts # OAuth and API client +│ │ │ └── storage.ts # Secure credential storage +│ │ ├── server/ # HTTP proxy server +│ │ │ ├── proxy.ts # Request routing and dual-endpoint logic +│ │ │ ├── session-manager.ts # SSE session lifecycle management +│ │ │ └── sse-handler.ts # Server-Sent Events implementation +│ │ ├── process/ # MCP process management +│ │ │ ├── manager.ts # Process lifecycle and stdio communication +│ │ │ └── runtime-state.ts # In-memory process state tracking +│ │ ├── mcp/ # MCP configuration management +│ │ │ ├── config-service.ts # Team config sync and processing +│ │ │ ├── config-processor.ts # Installation method processing +│ │ │ ├── tool-discovery.ts # MCP server tool discovery +│ │ │ ├── tool-cache.ts # Team-aware tool caching system +│ │ │ └── team-context-manager.ts # Team switching with process lifecycle +│ │ └── config/ # Configuration utilities +│ │ └── defaults.ts # Default gateway settings +│ ├── utils/ # Shared utilities +│ │ ├── tool-discovery-manager.ts # Centralized tool discovery and caching +│ │ ├── state-comparator.ts # Compare expected vs actual running processes +│ │ ├── logger.ts # Centralized logging with chalk +│ │ ├── spinner.ts # Progress indicators with ora +│ │ ├── config.ts # Configuration management +│ │ ├── errors.ts # Custom error types +│ │ └── crypto.ts # Encryption utilities +│ └── types/ # TypeScript type definitions +│ ├── index.ts # Main type exports +│ ├── mcp.ts # MCP protocol types +│ └── config.ts # Configuration types +├── bin/ # Executable entry +│ └── gateway.js # Node.js shebang wrapper +├── dist/ # Compiled JavaScript (gitignored) +├── tests/ # Test suite +│ ├── unit/ # Unit tests +│ ├── integration/ # Integration tests +│ └── fixtures/ # Test data +├── scripts/ # Development scripts +│ ├── build.ts # Build script +│ └── release.ts # Release automation +├── .config/ # Default configurations +│ └── defaults.json # Default gateway settings +├── package.json # Dependencies and scripts +├── tsconfig.json # TypeScript configuration +├── tsup.config.ts # Build configuration +├── .env.example # Environment variables template +└── README.md # Gateway-specific documentation +``` + +## Key Design Decisions + +### Modular Architecture +The codebase is organized into distinct modules: +- **Commands**: User-facing CLI commands +- **Core**: Business logic separated by domain +- **Utils**: Reusable utilities and helpers + +### Process Management +The `process/` module handles the complexity of: +- Managing persistent background MCP server processes +- Runtime state tracking and team isolation +- Managing stdio communication with running processes +- Injecting environment variables securely at startup +- Graceful process lifecycle management following MCP protocol + +### Security First +- Credentials are never stored in plain text +- All sensitive data is encrypted at rest +- Environment injection happens at runtime only + +### Developer Experience +- Intuitive command structure (`deploystack login`, `deploystack start`, `deploystack mcp`) +- Rich CLI feedback with colors and progress indicators +- Clear error messages with actionable solutions +- MCP server management and tool discovery capabilities + +## Module Responsibilities + +### Commands Layer +Each command file exports a function that registers itself with Commander.js: +```typescript +export function registerLoginCommand(program: Command) { + program + .command('login') + .description('Authenticate with DeployStack cloud') + .action(async () => { + // Implementation + }); +} +``` + +### Core Modules + +**auth/**: Handles OAuth flow and token management +- Secure storage of access tokens +- Automatic token refresh +- Session management + +**server/**: HTTP proxy server with dual transport support +- **proxy.ts**: Dual-endpoint routing (GET /sse for SSE connections, POST /message for session-based JSON-RPC) +- **session-manager.ts**: Cryptographically secure session lifecycle management +- **sse-handler.ts**: Server-Sent Events implementation for VS Code compatibility + +**process/**: MCP server process lifecycle +- Persistent background process management +- Runtime state tracking with team isolation +- Stdio transport implementation for continuous communication +- Graceful lifecycle management following MCP protocol +- Enterprise management layer (MCP servers as toggleable tools) + +**mcp/**: Configuration management and processing +- Team configuration synchronization with cloud control plane +- Raw API data storage and processed config generation +- Secure credential injection and environment variable management +- MCP server tool discovery and capability exploration +- Team-aware tool caching system as detailed in [Caching System](/development/gateway/caching-system) +- Installation method processing for correct server spawning + +**utils/**: Shared utilities and centralized services +- **tool-discovery-manager.ts**: Centralized tool discovery eliminating code duplication across commands +- Logging, configuration, and encryption utilities +- Progress indicators and error handling + +**config/**: Configuration utilities and defaults +- Default gateway settings and validation +- Configuration file management +- Environment-specific overrides + +### Build Output +The TypeScript code is compiled to CommonJS for maximum compatibility: +- Source maps for debugging +- Minified for production +- External dependencies preserved diff --git a/docs/development/gateway/teams.mdx b/docs/development/gateway/teams.mdx new file mode 100644 index 0000000..b31e18a --- /dev/null +++ b/docs/development/gateway/teams.mdx @@ -0,0 +1,140 @@ +--- +title: Team Context in Gateway CLI +description: Understanding team-scoped operations and MCP server installations in the DeployStack Gateway CLI +sidebar: Team Context +icon: Users +--- + +# Team Context in Gateway CLI + +The DeployStack Gateway CLI is fundamentally **team-centric**. All MCP server installations and operations are scoped to the currently selected team, reflecting the architectural design where teams serve as isolated workspaces for deployment resources. + +## Team Selection Architecture + +### Secure Storage Location + +Team selection is stored securely alongside authentication credentials using: +- **Primary**: OS keychain (macOS Keychain, Windows Credential Manager, Linux Secret Service) +- **Fallback**: Encrypted file at `~/.deploystack/credentials.enc` + +The selected team information is part of the `StoredCredentials` interface: + +```typescript +interface StoredCredentials { + // ... other credential fields + selectedTeam?: { + id: string; // Team ID for API operations + name: string; // Team name for display + }; +} +``` + +### Automatic Default Selection + +When users authenticate via `deploystack login`, the CLI automatically: + +1. Fetches user's teams from `/api/teams/me` +2. Identifies the default team (`is_default: true`) +3. Sets it as the selected team in secure storage +4. Confirms selection to the user + +### Team Switching + +Users can change their active team context using: + +```bash +deploystack teams --switch +``` + +This updates the stored team selection, affecting all subsequent CLI operations. + +## MCP Server Installation Scope + +### Database Architecture + +MCP server installations are stored in the `mcpServerInstallations` table with team-based foreign keys: + +```sql +mcpServerInstallations: + - team_id (FK to teams.id) -- Scopes installation to specific team + - server_id (FK to mcpServers.id) -- References the MCP server definition + - user_environment_variables -- Team-specific encrypted credentials +``` + +### Team-Scoped Operations + +All MCP-related CLI operations operate within the selected team context: + +- **Credential Injection**: Environment variables are team-specific +- **Server Availability**: Only team's installed servers are accessible +- **Configuration Sync**: Gateway downloads only selected team's configurations +- **Process Management**: Spawned MCP processes use team-scoped credentials + +> **MCP Configuration Management**: For detailed information about how the Gateway downloads, processes, and stores MCP server configurations from the backend API, see the [Gateway MCP Configuration documentation](/development/gateway/mcp). + +### Cross-Team Isolation + +The architecture ensures complete isolation between teams: + +- Team A cannot access Team B's MCP server installations +- Credentials are encrypted per team context +- No cross-team data leakage in local processes + +## CLI Implementation Details + +### Storage Methods + +The `CredentialStorage` class provides team selection methods: + +- `updateSelectedTeam(teamId, teamName)` - Updates selected team +- `getSelectedTeam()` - Retrieves current selection +- Team data is persisted with other authentication credentials + +### Team-Aware Commands + +Key commands that depend on team context: + +- `deploystack start` - Starts gateway for selected team's MCP servers +- `deploystack teams` - Shows selection status and switching options +- Future MCP management commands will operate on selected team + +### API Integration + +Team context affects backend communication: + +- All MCP-related API calls include team context +- Configuration sync requests are team-scoped +- Credential retrieval is filtered by team membership + +## Developer Guidelines + +### Working with Team Context + +When developing CLI features that interact with MCP servers: + +1. **Always check team selection** before MCP operations +2. **Use team ID for API calls** (not just team name) +3. **Scope local storage** by team when caching configurations +4. **Validate team access** before exposing functionality + +### Future Considerations + +The team context system is designed to support: + +- Multi-team development workflows +- Team-specific MCP server catalogs +- Role-based access to different tool sets +- Enterprise governance and audit trails + +For complete team management information, see the [Teams documentation](/teams). + +## Error Handling + +CLI commands should gracefully handle team context issues: + +- **No team selected**: Prompt user to select a team +- **Invalid team**: Guide user to available teams +- **Team access revoked**: Require re-authentication +- **Team deleted**: Clear selection and prompt for new team + +This team-centric design ensures that the Gateway CLI operates as a secure, isolated workspace aligned with organizational boundaries while maintaining a smooth developer experience. diff --git a/docs/development/gateway/tech-stack.mdx b/docs/development/gateway/tech-stack.mdx new file mode 100644 index 0000000..1564320 --- /dev/null +++ b/docs/development/gateway/tech-stack.mdx @@ -0,0 +1,264 @@ +--- +title: Gateway Tech Stack +description: CLI framework and npm packages used in the DeployStack Gateway +sidebar: Tech Stack +icon: Package +--- + +# Gateway Tech Stack + +The DeployStack Gateway is built with a carefully selected set of Node.js packages that prioritize developer experience, security, and performance. + +## Core Framework + +### Commander.js +Our CLI framework of choice for building the gateway's command-line interface. + +**Why Commander?** +- Battle-tested by major CLIs (Vue CLI, Create React App) +- Excellent TypeScript support +- Simple yet powerful API +- Extensive documentation and community + +### Fastify +High-performance HTTP server framework for the proxy server implementation. + +**Why Fastify?** +- Excellent TypeScript support with built-in type definitions +- High performance with low overhead +- Rich plugin ecosystem for middleware +- Built-in JSON schema validation +- Comprehensive logging and error handling + +## UI and Feedback + +### Chalk +Terminal string styling for colorful and readable output. + +**Features:** +- Semantic color methods for different message types +- Support for 256 colors and Truecolor +- Auto-detects color support +- Respects NO_COLOR environment variable + +### Ora +Elegant terminal spinners for long-running operations. + +**Use Cases:** +- Authentication flows +- Configuration syncing +- Process spawning feedback +- Network operations + +### CLI-Progress +Customizable progress bars for detailed operation feedback. + +**Features:** +- Single and multi-bar support +- Customizable formats and styles +- Ideal for file operations and bulk processing + +## Interactive Components + +### Inquirer.js +Interactive command line prompts for user input. + +**Prompt Types:** +- Text input for credentials +- Password input with masking +- Selection lists for configuration options +- Confirmations for destructive operations + +## Development Tools + +### TypeScript +Full TypeScript support for type safety and better developer experience. + +**Benefits:** +- Type safety catches errors at compile time +- Better IDE support with autocomplete +- Self-documenting code through types +- Easier refactoring + +### tsx +Run TypeScript files directly without compilation during development. + +### Build Tool - tsup +Fast TypeScript bundler powered by esbuild. + +**Why tsup?** +- Lightning fast builds using esbuild +- Zero config with sensible defaults +- Built-in TypeScript support +- Generates CommonJS and ESM outputs + +**Configuration Example:** +```typescript +export default defineConfig({ + entry: ['src/index.ts'], + format: ['cjs'], + target: 'node16', + clean: true, + sourcemap: true, +}); +``` + +## Utility Libraries + +### File System Operations + +**fs-extra** +Enhanced file system module with promise support and extra methods. +- Includes all standard fs methods +- Adds useful methods like `copy`, `remove`, `ensureDir` +- Promise-based API for cleaner async code +- Essential for team-aware tool caching system + +**glob** +File pattern matching using shell-style wildcards. +- Find files matching patterns like `*.ts` or `src/**/*.js` +- Essential for batch operations + +### Process Management + +**execa** +Better child process execution for spawning MCP servers. +- Improved error handling +- Promise-based interface +- Better Windows support +- Automatic escaping of arguments + +**ps-tree** +Process tree management for proper cleanup. +- Find all child processes of a parent +- Ensure clean shutdown of spawned MCP servers + +### Configuration + +**cosmiconfig** +Flexible configuration file loader. +- Searches for config in multiple formats (.json, .yml, .js) +- Supports `.deploystackrc`, `deploystack.config.js`, package.json +- Follows common patterns used by ESLint, Prettier, etc. + +**dotenv** +Environment variable loading from .env files. +- Load configuration from `.env` files +- Support for different environments (.env.local, .env.production) + +### Security + +**keytar** +Native OS keychain integration for secure credential storage. +- macOS: Keychain Access +- Windows: Credential Manager +- Linux: Secret Service API +- No plain text passwords on disk + +**crypto-js** +Additional encryption for sensitive data. +- AES encryption for config files +- Secure hashing for verification + +**crypto (Node.js built-in)** +Native cryptographic functionality for session management. +- Cryptographically secure random bytes generation +- Session ID generation with 256-bit entropy +- Base64url encoding for URL-safe session identifiers + +### Networking + +**axios** +Feature-rich HTTP client for cloud API communication. +- Interceptors for auth token injection +- Automatic retry logic +- Request/response transformation + +**http-proxy** +HTTP proxy for routing MCP requests to appropriate servers. +- Route requests based on MCP server name +- Inject authentication headers +- Handle stdio-to-HTTP translation + +## Testing Stack + +**vitest** +Fast unit testing framework with native TypeScript support. +- Compatible with Jest API +- Built-in TypeScript support +- Extremely fast execution + +**supertest** +HTTP assertion library for testing the proxy server. +- Test HTTP endpoints +- Assert response status, headers, and body +- Works seamlessly with vitest + +**msw (Mock Service Worker)** +API mocking for integration tests. +- Mock cloud API responses +- Test error scenarios +- Intercept HTTP requests + +## Why This Stack? + +### 1. **Developer Experience** +- Commander provides intuitive command structure +- Chalk + Ora + CLI-Progress create rich, informative output +- TypeScript ensures type safety and better IDE support + +### 2. **Security First** +- Keytar integrates with OS keychains (macOS Keychain, Windows Credential Manager, Linux Secret Service) +- Crypto-js for additional encryption layers +- No plain text credential storage + +### 3. **Performance** +- tsup/esbuild for fast builds +- Minimal dependencies for quick startup +- Lazy loading of heavy operations + +### 4. **Cross-Platform** +- All packages support Windows, macOS, and Linux +- Platform-specific features handled gracefully + +### 5. **Enterprise Ready** +- Comprehensive error handling +- Detailed logging capabilities +- Extensible architecture + +## Installation + +All dependencies are managed through npm: + +```bash +cd services/gateway +npm install +``` + +## Development Workflow + +```bash +# Development with hot reload +npm run dev + +# Run TypeScript directly +npm run start:dev + +# Build for production +npm run build + +# Run tests +npm test +``` + +## Package Selection Criteria + +When adding new packages, we consider: + +1. **Security**: Regular updates, no known vulnerabilities +2. **Maintenance**: Active development, responsive maintainers +3. **Size**: Minimal impact on CLI startup time +4. **Compatibility**: Works across all target platforms +5. **TypeScript**: First-class TypeScript support preferred + +This tech stack provides a solid foundation for building a secure, performant, and user-friendly CLI that meets enterprise requirements while maintaining excellent developer experience. diff --git a/docs/development/gateway/testing.mdx b/docs/development/gateway/testing.mdx new file mode 100644 index 0000000..ef12d8b --- /dev/null +++ b/docs/development/gateway/testing.mdx @@ -0,0 +1,110 @@ +--- +title: Gateway Testing +description: Testing commands and workflows for the DeployStack Gateway +sidebar: Testing +icon: TestTube +--- + +# Gateway Testing + +The DeployStack Gateway includes testing infrastructure for ensuring reliability and quality of the CLI application. + +## Test Commands + +### Unit Tests +```bash +npm run test:unit +``` +Currently displays a placeholder message as tests are not yet implemented. + +### Linting +```bash +npm run lint +``` +Runs ESLint with automatic fixing of common issues. Essential for maintaining code quality. + +### Build Verification +```bash +npm run build +``` +Compiles TypeScript to JavaScript and verifies the build process. + +## Development Workflow + +### Local Development +```bash +npm run dev +``` +Starts the gateway in development mode with hot reload using `ts-node-dev`. + +### Manual Testing +```bash +npm run link +``` +Links the local gateway for testing CLI commands globally. + +After linking, test commands directly: +```bash +deploystack version +deploystack status +deploystack --help +``` + +## Release Testing + +### Pre-release Checks +```bash +npm run release +``` +Runs linting checks before creating a release through `release-it`. + +### CI/CD Testing +The GitHub Actions workflow automatically runs: +- Build verification +- Linting checks +- Unit tests (when implemented) + +## Testing Strategy + +### CLI-Specific Testing +- **Command validation**: Ensure all commands parse correctly +- **Output formatting**: Verify chalk styling and user messages +- **Error handling**: Test failure scenarios and exit codes +- **Cross-platform**: Validate behavior on Windows, macOS, and Linux + +### Integration Points +- **Authentication flows**: Test login/logout workflows +- **Configuration management**: Verify config file operations +- **Process management**: Test MCP server spawning and cleanup +- **Proxy functionality**: Validate HTTP proxy routing + +## Future Testing Implementation + +The gateway will include comprehensive testing using: +- **vitest** for unit testing +- **supertest** for HTTP endpoint testing +- **msw** for API mocking +- Cross-platform testing in CI/CD + +## Development Tips + +### Quick Validation +```bash +# Check command structure +deploystack --help + +# Verify version info +deploystack version + +# Test error handling +deploystack invalid-command +``` + +### Build and Test Cycle +```bash +npm run lint # Fix code style issues +npm run build # Verify compilation +npm run link # Test locally +``` + +This testing approach ensures the gateway maintains high quality while remaining focused on the essential CLI functionality. \ No newline at end of file diff --git a/docs/development/index.mdx b/docs/development/index.mdx new file mode 100644 index 0000000..5af8b9e --- /dev/null +++ b/docs/development/index.mdx @@ -0,0 +1,175 @@ +--- +title: Development Guide +description: Complete development documentation for DeployStack - covering frontend, backend, and contribution guidelines for the MCP server deployment platform. +icon: FileCode +--- + +import { Card, Cards } from 'fumadocs-ui/components/card'; +import { Code2, Server, GitBranch, Users, Shield } from 'lucide-react'; + +# DeployStack Development + +Welcome to the DeployStack development documentation! DeployStack is a comprehensive enterprise platform for deploying and managing Model Context Protocol (MCP) servers, featuring a cloud control plane, local gateway proxy, and modern web interface for team-based MCP server orchestration. + +## Architecture Overview + +DeployStack implements a sophisticated Control Plane / Data Plane architecture for enterprise MCP server management: + +- **Frontend**: Vue 3 + TypeScript web application providing the management interface for MCP server deployments +- **Backend**: Fastify-based cloud control plane handling authentication, team management, and configuration distribution +- **Gateway**: Local secure proxy that runs on developer machines, managing MCP server processes and credential injection +- **Shared**: Common utilities and TypeScript types used across all services +- **Dual Transport**: Supports both stdio (CLI tools) and SSE (VS Code) protocols for maximum compatibility + +## Development Areas + + + } + href="/development/frontend" + title="Frontend Development" + > + Vue 3 web application with TypeScript, Vite, and shadcn-vue components. Direct fetch API patterns, SFC components, and internationalization. + + + } + href="/development/backend" + title="Backend Development" + > + Fastify cloud control plane with Drizzle ORM, plugin architecture, role-based access control, and OpenAPI documentation generation. + + + } + href="/development/gateway" + title="Gateway Development" + > + Local secure proxy managing MCP server processes, credential injection, dual transport protocols (stdio/SSE), and team-based access control. + + + +## Getting Started + +### Prerequisites + +- Node.js 18 or higher +- npm 8 or higher +- Git for version control +- DeployStack account at [cloud.deploystack.io](https://cloud.deploystack.io) (for gateway development) + +### Quick Setup + +```bash +# Clone the repository +git clone https://github.com/deploystackio/deploystack.git +cd deploystack + +# Install dependencies for all services +cd services/frontend && npm install +cd ../backend && npm install +cd ../gateway && npm install + +# Start development servers (in separate terminals) +# Terminal 1 - Backend +cd services/backend && npm run dev # http://localhost:3000 + +# Terminal 2 - Frontend +cd services/frontend && npm run dev # http://localhost:5173 + +# Terminal 3 - Gateway (optional, for local MCP testing) +cd services/gateway && npm run dev # http://localhost:9095 +``` + +## Development Workflow + +1. **Choose Your Service**: Select frontend, backend, or gateway based on your contribution area +2. **Set Up Environment**: Follow the specific setup guides for your chosen service +3. **Understand Architecture**: Review how services interact (Frontend ↔ Backend ↔ Gateway) +4. **Make Changes**: Implement features following established patterns (Vue SFC for frontend, plugins for backend, process management for gateway) +5. **Test**: Run comprehensive test suites for your service +6. **Submit**: Create pull requests following our contribution guidelines + +## Project Structure + +```bash +deploystack/ +├── services/ +│ ├── frontend/ # Vue 3 frontend application +│ │ ├── src/ +│ │ ├── public/ +│ │ └── package.json +│ ├── backend/ # Fastify backend API +│ │ ├── src/ +│ │ ├── plugins/ +│ │ └── package.json +│ ├── gateway/ # API Gateway service +│ │ ├── src/ +│ │ ├── config/ +│ │ └── package.json +│ └── shared/ # Shared utilities and types +│ └── package.json +├── docs/ # Documentation +└── docker-compose.yml # Development environment +``` + +## Key Technologies + +### Frontend Stack +- **Vue 3** with Composition API and Single File Components (SFC) +- **TypeScript** for type safety throughout the application +- **Vite** for fast HMR and optimized builds +- **TailwindCSS** with shadcn-vue component library +- **Direct fetch()** API calls (no axios or API client libraries) +- **Vue I18n** for multi-language support + +### Backend Stack +- **Fastify** for high-performance cloud control plane +- **TypeScript** with full type safety +- **Drizzle ORM** supporting SQLite and PostgreSQL +- **Zod** validation with automatic OpenAPI generation +- **Plugin System** with isolated routes (`/api/plugin//`) +- **Role-Based Access Control** with session management + +### Gateway Stack +- **Node.js** process management runtime +- **Dual Transport** stdio for CLI tools, SSE for VS Code +- **Secure Credential Injection** without developer exposure +- **Process Manager** for persistent MCP server processes +- **Session Management** with cryptographic security +- **Team-Based Caching** for instant startup and tool discovery + +## Development Philosophy + +### Enterprise MCP Management +DeployStack provides enterprise-grade MCP server orchestration through: + +- **Control Plane Architecture**: Cloud-based configuration management with local gateway execution +- **Security-First Design**: Credential injection without exposure, team-based access control +- **Universal Compatibility**: Supports MCP servers in any language (Node.js, Python, Go, Rust) +- **Developer Experience**: Seamless integration with VS Code, CLI tools, and development workflows +- **Process Persistence**: MCP servers run as managed background services with automatic lifecycle management + +### Code Quality +- **Type Safety**: TypeScript throughout the stack +- **Testing**: Comprehensive test coverage +- **Documentation**: Clear, up-to-date documentation +- **Security**: Built-in security best practices + +## Contributing + +We welcome contributions to DeployStack! Key areas include: + +- **Frontend**: Vue components, UI/UX improvements, new management features +- **Backend**: API endpoints, plugin development, database optimizations +- **Gateway**: Process management, transport protocols, credential handling +- **Documentation**: Guides, examples, API documentation +- **MCP Servers**: Support for new MCP server types and configurations +- **Security**: Enhanced credential management, access control improvements + +## Community + +- **GitHub**: [deploystackio/deploystack](https://github.com/deploystackio/deploystack) +- **Issues**: Report bugs and request features + +For detailed development guides, choose your area of interest from the cards above. Each section contains comprehensive documentation for getting started, best practices, and advanced topics. diff --git a/docs/development/meta.json b/docs/development/meta.json new file mode 100644 index 0000000..e7b4ccb --- /dev/null +++ b/docs/development/meta.json @@ -0,0 +1,12 @@ +{ + "title": "Development", + "description": "Development documentation for DeployStack", + "icon": "Code", + "pages": [ + "index", + "---Sections---", + "frontend", + "backend", + "gateway" + ] +} \ No newline at end of file diff --git a/docs/docker-deployment/application-logo-configuration.mdx b/docs/docker-deployment/application-logo-configuration.mdx deleted file mode 100644 index 11bbcd8..0000000 --- a/docs/docker-deployment/application-logo-configuration.mdx +++ /dev/null @@ -1,60 +0,0 @@ ---- -title: Application Logo Configuration -description: DeployStack logo configuration guide. Add a custom logo to your application with automatic WebP conversion, CDN delivery, and square format optimization. ---- - -# Application Logo Configuration - -Add a custom logo to make your application stand out in the DeployStack catalog. Your logo will be automatically optimized and served through our CDN for the best performance. - -## Adding Your Logo - -Configure your logo in `.deploystack/config.yml` - [DeployStack Configuration File Reference](/docker-deployment/deploystack-config-file): - -```yaml -application: - logo: "https://example.com/path/to/logo.png" -``` - -## Logo Requirements - -### Supported Formats - -- PNG (`.png`) -- JPEG (`.jpg`, `.jpeg`) -- WebP (`.webp`) -- SVG (`.svg`) - -### Size Guidelines - -- Maximum input: 2 MB. -- Final output will be: - - Square format (1:1 aspect ratio) - - Maximum 500x500 pixels -- Input images will be: - - Resized if larger than 500px in either dimension - - Centered and cropped to maintain 1:1 aspect ratio - - Padded with transparency if needed to achieve square format - -## Image Processing - -When you configure a logo, DeployStack automatically: - -1. Downloads your original image -2. Optimizes it for web delivery -3. Converts it to WebP format -4. Stores it on our CDN -5. Serves it through our global CDN network - -### Optimization Process - -- Images larger than 500px in either dimension are resized -- Conversion to WebP for optimal compression -- Automatic quality optimization for web delivery - -## Notes - -- The logo configuration is optional -- You can update your logo at any time by modifying the config file -- Logo changes are processed only when made on the default branch -- Previous logo versions are automatically cleaned up diff --git a/docs/docker-deployment/deploystack-config-file.mdx b/docs/docker-deployment/deploystack-config-file.mdx deleted file mode 100644 index 5623309..0000000 --- a/docs/docker-deployment/deploystack-config-file.mdx +++ /dev/null @@ -1,259 +0,0 @@ ---- -title: DeployStack Config File Reference -description: Documentation for DeployStack's config.yml schema. Customize your application's presentation with automatic IDE validation and flexible repository metadata overrides. ---- - -# DeployStack Configuration File Reference - -The `.deploystack/config.yml` file allows you to customize how your application appears in the DeployStack catalog and deployment interfaces. This configuration file supports automatic validation in popular IDEs through SchemaStore integration. - -## Configuration File Location - -Create a `config.yml` file in your repository's `.deploystack` directory: - -```bash -your-repository/ -├── .deploystack/ -│ ├── config.yml -│ └── docker-compose.yml (example, or docker-run.txt) -└── README.md -``` - -## Basic Configuration - -Here's a minimal configuration example: - -```yaml -application: - name: "My Application" - description: "A scalable web application with Redis caching" - logo: "https://example.com/path/to/logo.png" -``` - -## Configuration Options - -### Application Settings - -When you submit a repository to DeployStack, we automatically use: - -- Repository name as the application name -- Repository description as the application description - -You can override these values using the `config.yml` (only on your main branch) file: - -| Property | Type | Description | Constraints | -|----------|------|-------------|-------------| -| `mappings` | Array | Defines relationships between services for connection configuration | Required | -| `mappings[].fromService` | String | Service that needs to connect to another service | Required | -| `mappings[].toService` | String | Service being connected to | Required | -| `mappings[].environmentVariables` | Array of Strings | Environment variable names that reference the target service | Required | -| `mappings[].property` | String | Type of connection property to reference (e.g., 'connectionString', 'hostport') | Optional, defaults to 'hostport' | - -The override process follows this order: - -1. DeployStack first uses your repository name and description -2. If present, values in `.deploystack/config.yml` override the repository metadata - -### Branch Deployment Settings - -Before configuring multiple branch deployments, ensure you have installed the [DeployStack Repository Sync GitHub App](/docker-deployment/github-application), as it's required for branch monitoring and template updates. - -You can configure multiple branch deployments using the `deployment.branches` section: - -| Property | Type | Description | Constraints | -|----------|------|-------------|-------------| -| `label` | String | Display name for the branch | Maximum 20 characters | -| `description` | String | Explain the branch's purpose or version | Maximum 100 characters | -| `active` | Boolean | Whether this branch is available for deployment | Optional, defaults to true | -| `priority` | Number | Order in which branches appear (lower numbers first) | Minimum value: 1 | -| `exclude_providers` | Array | Optional list of cloud providers to exclude from template generation for this branch | Values must be valid provider codes: "aws", "rnd", "dop" | - -The default branch always has `priority: 0` and appears first in the deployment options, regardless of other branch priorities. - -Example configuration for multiple branches: - -```yaml -application: - name: "My Application" - description: "A scalable web application" - -deployment: - branches: - v2: - label: "Beta (v2.x)" - description: "Preview of upcoming v2.x release" - priority: 1 - exclude_providers: - - "aws" # Exclude AWS CloudFormation for this branch - v3: - label: "Alpha (v3.x)" - description: "Early preview of v3.x" - priority: 2 -``` - -Each branch configuration allows you to: - -- Provide a user-friendly label for the version -- Include a description explaining the branch's purpose -- Control deployment availability with the `active` flag -- Set display order using `priority` - -When multiple branches are configured: - -- DeployStack generates separate deployment templates for each branch -- Users can choose which version to deploy -- The GitHub App monitors all configured branches for updates -- The default branch is always listed first with implicit `priority: 0` - -This is especially useful for projects that maintain multiple active versions simultaneously, such as stable and beta releases. - -The optional `exclude_providers` array allows you to specify which cloud providers should be excluded from template generation for particular branches. This is useful when certain features in a branch version may not be compatible with specific cloud providers. Valid provider codes are: - -Please check our [current supported provider list here](/docker-to-iac/parser/index). - -For example, if your beta version uses features only supported in DigitalOcean, you might exclude the other providers: - -```yaml -v2-beta: - label: "Beta" - description: "Beta version with DigitalOcean-specific features" - exclude_providers: - - "aws" - - "rnd" -``` - -If no providers are excluded, templates will be generated for all supported cloud providers. - -### Service Connections - -You can configure service-to-service communication for multi-container applications using the `serviceConnections` property within each branch configuration. This feature is particularly useful for applications where services need to communicate with each other (e.g., web apps connecting to databases). - -| Property | Type | Description | Constraints | -|----------|------|-------------|-------------| -| `mappings` | Array | Defines relationships between services for connection configuration | Required | -| `mappings[].fromService` | String | Service that needs to connect to another service | Required | -| `mappings[].toService` | String | Service being connected to | Required | -| `mappings[].environmentVariables` | Array of Strings | Environment variable names that reference the target service | Required | - -Example configuration for service connections: - -```yaml -deployment: - branches: - main: - label: "Production" - description: "Production release" - serviceConnections: - mappings: - - fromService: "app" - toService: "db" - environmentVariables: - - "DATABASE_HOST" - - "DATABASE_URL" - property: "connectionString" - - fromService: "frontend" - toService: "api" - environmentVariables: - - "API_URL" - property: "hostport" -``` - -This configuration tells DeployStack how to properly configure communication between: - -- The "app" service and the "db" service through the DATABASE_HOST and DATABASE_URL environment variables -- The "frontend" service and the "api" service through the API_URL environment variable - -When templates are generated, DeployStack will transform these environment variables according to each cloud provider's specific service discovery mechanism: - -- For Render.com: Uses Blueprint's `fromService` syntax -- For DigitalOcean App Platform: Uses direct service name references - -For example, if your docker-compose.yml contains: - -```yaml -services: - app: - image: node:alpine - environment: - DATABASE_HOST: db - db: - image: mariadb:latest -``` - -The generated Render.com template would transform DATABASE_HOST to use their service discovery syntax: - -```yaml -services: - - name: app - # ...other configuration... - envVars: - - key: DATABASE_HOST - fromService: - name: db - type: pserv - property: hostport -``` - -## Schema Validation - -The configuration file is automatically validated against our JSON Schema when using supported IDEs (VS Code, IntelliJ, etc.). The schema is available at: - -```bash -https://cdnx.deploystack.io/schema/config.yml.json -``` - -## Notes - -- Changes to the configuration file are only processed when made on your repository's default branch -- The logo URL must be accessible and point to a valid image file -- All configuration properties are optional, but providing them improves your application's visibility in the DeployStack catalog - -## Usage Examples - -### Override Repository Metadata - -```yaml -application: - name: "Redis Cache Manager" # Overrides repository name - description: "A more detailed description that better explains your application" # Overrides repository description - logo: "https://example.com/logos/redis-manager.png" -``` - -### Configure Multiple Branch Deployments with Service Connections - -```yaml -deployment: - branches: - stable: - label: "Stable" - description: "Production-ready version" - priority: 1 - serviceConnections: - mappings: - - fromService: "web" - toService: "api" - environmentVariables: - - "API_ENDPOINT" - - fromService: "api" - toService: "db" - environmentVariables: - - "DB_HOST" - - "DB_CONNECTION" - - beta: - label: "Beta" - description: "Preview of upcoming features" - priority: 2 - exclude_providers: - - "aws" # Beta version not supported on AWS - serviceConnections: - mappings: - - fromService: "web" - toService: "api" - environmentVariables: - - "API_ENDPOINT" -``` - -### Minimal Configuration example for logo update - -Please visit our [Application Logo Configuration](/docker-deployment/application-logo-configuration) page. diff --git a/docs/docker-deployment/deploystack-configuration-directory.mdx b/docs/docker-deployment/deploystack-configuration-directory.mdx deleted file mode 100644 index 7cd58b9..0000000 --- a/docs/docker-deployment/deploystack-configuration-directory.mdx +++ /dev/null @@ -1,71 +0,0 @@ ---- -title: .deploystack Directory Reference -description: Technical guide for setting up the .deploystack directory to manage Infrastructure as Code template generation and updates across your repository. ---- - -# .deploystack Directory Reference - -The `.deploystack` directory in your repository contains configuration files that DeployStack uses to generate and maintain your Infrastructure as Code templates. Creating this repo allows you to enable the [lifecycle of IaC](/docker-deployment/iac-lifecycle). The deploystack configurations repo only makes sense if you also [install DeployStack GitHub app](/docker-deployment/github-application). Otherwise, changes to DeployStack backend will not be recognized. - - -`.deploystack` directory is optional. You don't need to create it to submit your repository to deploystack.io. - - -## Directory Structure - -```bash -.deploystack/ -├── docker-compose.yml # Docker Compose configuration -├── docker-run.txt # Docker run command -├── env # Environment variables (optional) -``` - -## Configuration Files - -### DeployStack Configuration File - -Please read more at [DeployStack Configuration File Reference](/docker-deployment/deploystack-config-file). - -### Docker Configuration - -Choose one of the following: - -- `docker-compose.yml` - Standard Docker Compose configuration -- `docker-run.txt` - Single Docker run command - -Example `docker-compose.yml`: - -```yaml -version: '3' -services: - web: - image: nginx:alpine - ports: - - "80:80" -``` - -Example `docker-run.txt`: - -```bash -docker run -d -p 80:80 nginx:alpine -``` - -### Environment Variables - -Please read more from our [environment variables](/docker-deployment/docker-environment-variables) page. - -## Automatic Updates - -When the [DeployStack GitHub App](/docker-deployment/github-application) is installed: - -1. Changes to specific (`docker-compose.yml` & `docker-run.txt`) file in `.deploystack/` trigger template updates -2. Updates only process when changes occur on the default branch -3. New templates are generated and stored in the [deploy-templates](https://github.com/deploystackio/deploy-templates) repository - -## Important Notes - -- The `.deploystack` directory is **optional** -- Without this directory, automatic template updates are **not** available -- You can add the directory and install the [DeployStack GitHub Sync App](/docker-deployment/github-application) at any time -- [Environment variables](/docker-deployment/docker-environment-variables) and [DeployStack config](/docker-deployment/deploystack-config-file) are optional components -- Only one Docker configuration file should be used (either compose or run) diff --git a/docs/docker-deployment/docker-compose-requirements.mdx b/docs/docker-deployment/docker-compose-requirements.mdx deleted file mode 100644 index 141ce64..0000000 --- a/docs/docker-deployment/docker-compose-requirements.mdx +++ /dev/null @@ -1,102 +0,0 @@ ---- -title: Docker Compose Requirements -description: Technical requirements for using Docker Compose with DeployStack's cloud deployment automation. Includes supported properties, registry options, and validation rules. ---- - -# Docker Compose Requirements - -DeployStack is designed to work with Docker Compose files that meet specific requirements. This page outlines what we support and what limitations you need to be aware of. - -## Core Requirements - -Your `docker-compose.yml` file must: - -1. Use pre-built Docker images -2. Reference public images from Docker Hub or another registries -> check [Supported Registries](/docker-to-iac/supported-registries) -3. Be a valid Docker Compose file (version 3 and above) - -Your docker-compose file does not necessarily have to be called `docker-compose.yml` and does not have to be located in the root directory. You can rename your docker compose file and store it in any sub directory. - -If your docker compose file is not located in the root directory and has not `docker-compose.yml` as the filename, you must submit the full path name to us by using the submit form [deploystack.io/submit](https://deploystack.io/submit) i.e.: `https://github.com/vuejs/vitepress/tree/main/deployment/docker-compose.yaml`. - -## Image Requirements - -### Must Use Pre-built Images - -Your services must specify the `image` property. For example: - -```yaml title="docker-compose.yml" -# ✅ Supported -services: - app: - image: nginx:latest - ports: - - "80:80" -``` - -### Build Instructions Not Supported - -We do not support services that use the `build` directive: - -```yaml title="docker-compose.yml" -# ❌ Not Supported -services: - app: - build: - context: ./build/app - dockerfile: Dockerfile -``` - -## Why These Requirements? - -The infrastructure templates we generate require specific, immutable container images to ensure consistent deployments. Cloud providers need to know exactly which image to pull, which is why we require pre-built images. - -## Supported Docker Compose Properties - -We currently support these Docker Compose properties -> please check [Supported Docker Compose Variables](/docker-to-iac/supported-docker-compose-variables). - -### Kubernetes/Helm - -When generating Helm charts for Kubernetes: - -- Database services (MySQL, PostgreSQL, Redis, etc.) are converted to Bitnami Helm chart dependencies -- Environment variables are split between ConfigMaps (regular variables) and Secrets (sensitive data) -- Each service in your Docker Compose becomes a separate Deployment and Service -- Volume mounts are supported and configured as needed - -This allows for better security practices and easier management of your application on Kubernetes. - -## Multiple Services Support - -DeployStack can handle Docker Compose files with multiple services, but support varies by cloud provider: - -- Some providers support deploying all services at once -- Others will only deploy the first service in your compose file -- Kubernetes (Helm) supports multi-service deployments with each service becoming a separate Deployment - -Check the specific [Multi Services Support](/docker-to-iac/multi-services-support) for details about multi-service support. - -## Working with Private Images - -Currently, DeployStack only supports public images from Docker Hub. If you need to use private images: - -1. Make your images public on Docker Hub or [other supported registries](/docker-to-iac/supported-registries) -2. Update your docker-compose.yml to reference the public images -3. Submit your repository to DeployStack - -## Environment Variables - -Please read more from our [environment variables](/docker-deployment/docker-environment-variables) page. - -## Validation - -When you submit your repository, we perform these checks: - -1. Valid Docker Compose syntax -2. Presence of required `image` property -3. Absence of unsupported features - -## Next Steps - -- See how [One-Click Deploy](/docker-deployment/one-click-deploy) works -- Check the [Troubleshooting](/docker-deployment/troubleshooting) guide if you run into issues diff --git a/docs/docker-deployment/docker-environment-variables.mdx b/docs/docker-deployment/docker-environment-variables.mdx deleted file mode 100644 index 716b79b..0000000 --- a/docs/docker-deployment/docker-environment-variables.mdx +++ /dev/null @@ -1,113 +0,0 @@ ---- -title: Environment Variables in DeployStack -description: Learn how to manage environment variables in DeployStack using the .deploystack/env file. Support for Docker Compose, Docker run commands, and default values. -sidebar: Environment Variables ---- - -# Environment Variables in DeployStack - -DeployStack supports environment variables for your Docker configurations through the `.deploystack/env` file. This allows you to manage configuration values separately from your Docker files and maintain consistency across deployments. - -## Adding Environment Variables - -Create an `env` file in your `.deploystack` directory: - -```bash -your-repository/ -├── .deploystack/ -│ ├── env -└── README.md -``` - -Your `env` file should follow the standard environment file format: - -```bash -# .deploystack/env -DB_USERNAME=myuser -DB_PASSWORD=mysecretpassword -DB_DATABASE=mydatabase -``` - -## Using Environment Variables - -### In Docker Compose - -Reference environment variables in your `docker-compose.yml` using the `${VARIABLE_NAME}` syntax: - -```yaml -services: - db: - image: mariadb:11.2 - environment: - MYSQL_ROOT_PASSWORD: ${DB_PASSWORD} - MYSQL_USER: ${DB_USERNAME} - MYSQL_DATABASE: ${DB_DATABASE} -``` - -### In Docker Run Commands - -For `docker-run.txt`, use the same variable syntax: - -```bash -docker run -d \ - -e MYSQL_ROOT_PASSWORD=${DB_PASSWORD} \ - -e MYSQL_USER=${DB_USERNAME} \ - -e MYSQL_DATABASE=${DB_DATABASE} \ - mariadb:11.2 -``` - -## Default Values - -DeployStack supports Docker's default value syntax for environment variables. This provides fallback values when variables are not defined. - -### Syntax - -Use `${VARIABLE:-default}` where: - -- `VARIABLE` is your environment variable name -- `default` is the fallback value - -### Examples - -```yaml -# docker-compose.yml -services: - web: - image: nginx:alpine - environment: - PORT: ${PORT:-8080} - NODE_ENV: ${NODE_ENV:-development} -``` - -```bash -# docker-run.txt -docker run -d \ - -e PORT=${PORT:-8080} \ - -e NODE_ENV=${NODE_ENV:-development} \ - nginx:alpine -``` - -## Environment Variable Processing - -When DeployStack processes your repository: - -1. Variables defined in `.deploystack/env` are read -2. These values replace matching `${VARIABLE}` placeholders -3. For undefined variables: - - If a default value is specified (`${VARIABLE:-default}`), it's used - - Otherwise, an empty string is used - -## Important Notes - -- The `env` file is optional -- Keep sensitive information out of version control -- Variable names are case-sensitive -- Default values provide fallbacks but don't expose sensitive data -- Environment variables in your Docker configuration must use the `${VARIABLE}` syntax - -## Limitations - -- Basic environment variable substitution only -- No variable expansion or shell-style manipulation -- Cannot reference other variables within values -- No built-in encryption for sensitive values diff --git a/docs/docker-deployment/getting-started.mdx b/docs/docker-deployment/getting-started.mdx deleted file mode 100644 index de76781..0000000 --- a/docs/docker-deployment/getting-started.mdx +++ /dev/null @@ -1,196 +0,0 @@ ---- -title: Getting Started with DeployStack -description: Start deploying Docker applications across cloud providers with DeployStack. Step-by-step guide to generating infrastructure templates from Docker configurations. -sidebar: Getting Started -icon: Album ---- - -# Getting Started with DeployStack - -DeployStack offers two distinct paths to transform your Docker projects into cloud-ready deployments: a Quick Start path for immediate results, and our Recommended path for enhanced control and automation. Let's explore both approaches in detail. - -## Understanding the Two Paths - -### 🚀 Quick Start Path - -The Quick Start path is designed for developers who want to immediately make their Docker projects deployable, with minimal setup required. This approach works with your existing repository structure. - -### Recommended Path - -The Recommended path provides additional features through a `.deploystack` configuration directory and GitHub app integration. This approach enables automatic updates, environment variable management, and project customization. - -## Quick Start Path: Detailed Guide - -### For Docker Compose Projects - -#### Requirements - -- A public GitHub repository -- A `docker-compose.yml` or `docker-compose.yaml` file in your repository's root directory -- Container images must be: - - Pre-built and available in supported registries - - Publicly accessible - - Referenced using the `image` directive - -#### Step-by-Step Process - -1. **Repository Preparation** - - Ensure your `docker-compose.yml` is in the root directory - - Verify all images are publicly accessible - - Check that your compose file uses supported configuration options - -2. **Submission** - - Visit [deploystack.io/submit](https://deploystack.io/submit) - - Enter your GitHub repository URL - - Our system automatically detects your compose file - - Review the detected configuration - -3. **Template Generation** - - Infrastructure templates are generated for each supported cloud provider - - Templates are stored in our public repository - - You receive deployment button code for your README.md - -### For Docker Run Commands - -#### Requirements - -- A public GitHub repository -- A valid Docker run command that includes: - - Image name and tag - - Port mappings (if required) - - Environment variables (if needed) - - Volume mounts (if necessary) - -#### Step-by-Step Process - -1. **Command Preparation** - - Ensure your Docker run command is complete and valid - - Verify all referenced images are publicly available - - Test the command locally if possible - -2. **Submission** - - Visit [deploystack.io/submit](https://deploystack.io/submit) - - Enter your GitHub repository URL - - Paste your Docker run command - - Review the parsed configuration - -3. **Template Generation** - - Infrastructure templates are generated automatically - - Templates are optimized for each cloud provider - - You receive deployment button code - -## Recommended Path: Comprehensive Setup - -### The `.deploystack` Directory Structure - -Create a `.deploystack` directory in your repository with these components: - -```bash -.deploystack/ -├── docker-compose.yml # Your Docker Compose configuration -├── docker-run.txt # Or your Docker run command -├── env # Environment variables (optional) -└── logo.webp # Your project logo (optional) -``` - -#### Component Details - -**Docker Configuration Files**: - -- `docker-compose.yml`: Your complete Docker Compose configuration -- `docker-run.txt`: Alternative to compose file, contains your Docker run command -- Only one of these files should be present - -For more configuration options please check our [.deploystack Directory Reference](/docker-deployment/deploystack-configuration-directory). - -### GitHub App Integration - -The [DeployStack Repository Sync](https://github.com/apps/deploystack-repository-sync) app enables: - -1. **Automatic Updates** - - Monitors changes to your Docker configurations - - Updates templates when configurations change - - Ensures templates stay in sync with your project - -2. **Installation Steps** - - Visit the GitHub app installation page - - Select your repositories - - Configure access permissions - - Verify installation - -3. **Monitoring and Updates** - - Changes to `.deploystack` directory trigger updates - - Only default branch changes are processed - - Templates are automatically regenerated - -## Behind the Scenes: How It Works - -### The docker-to-iac Module - -Our open-source [docker-to-iac](https://github.com/deploystackio/docker-to-iac) module: - -- Parses your Docker configurations -- Handles multiple cloud provider translations -- Supports various infrastructure patterns -- Maintains provider-specific optimizations - -### Template Generation Process - -1. **Configuration Analysis** - - Docker configurations are parsed - - Dependencies are identified - -2. **Provider-Specific Translation** - - Templates generated for each provider - - Provider best practices applied - - Resource mappings optimized - -3. **Template Storage** - - Templates stored in [deploy-templates](https://github.com/deploystackio/deploy-templates) - - Version controlled for tracking - - Publicly accessible - -### Deployment Button Integration - -After template generation: - -1. Visit [deploystack.io/deploy-button](https://deploystack.io/deploy-button) -2. Select your preferred button style -3. Copy the generated code -4. Add to your README.md - -## Best Practices - -### Repository Organization - -- Keep Docker configurations clean and well-documented -- Use specific version tags for images -- Document environment variable requirements -- Include clear deployment instructions - -### Configuration Management - -- Use the `.deploystack` directory for better organization -- Keep environment variables separate -- Test configurations locally - -### Deployment Strategy - -- Start with the Quick Start path if needed -- Migrate to Recommended path for better control -- Use GitHub app for automatic updates - -## Troubleshooting Common Issues - -### Template Generation - -- Verify image accessibility -- Check Docker configuration syntax -- Ensure all required ports are exposed -- Validate environment variables - -## Need Additional Help? - -- Review our detailed [Troubleshooting Guide](/docker-deployment/troubleshooting) -- Join our active [Discord Community](https://discord.gg/UjFWwByB) -- Submit issues on GitHub to our [Feedback repository](https://github.com/deploystackio/feedback) diff --git a/docs/docker-deployment/github-application.mdx b/docs/docker-deployment/github-application.mdx deleted file mode 100644 index f5a8a8f..0000000 --- a/docs/docker-deployment/github-application.mdx +++ /dev/null @@ -1,69 +0,0 @@ ---- -title: GitHub Application -description: Keep Docker configurations and deployment templates in sync with DeployStack's GitHub App. Updates templates automatically when files change. ---- - -# GitHub App Integration - -The DeployStack GitHub App ensures your Infrastructure as Code (IaC) templates remain synchronized with your Docker configurations. After submitting your repository to deploystack.io, install our GitHub App to enable automatic updates. - -## How It Works - -When you install the [DeployStack Repository Sync](https://github.com/apps/deploystack-repository-sync) app, it monitors specific files in your repository: - -- `.deploystack/` directory - [Contains your Docker configurations and assets](/docker-deployment/deploystack-configuration-directory) -- `README.md` - For README.md updates - -When changes are detected in these files, the app automatically triggers an update of your IaC templates in our [deploy-templates](https://github.com/deploystackio/deploy-templates) repository. - - -Changes are only processed when they occur on your repository's **default branch**. Modifications in other branches will not trigger template, logo, config or any other updates. - - -## Installation - -1. Visit the [installation page](https://github.com/apps/deploystack-repository-sync/installations/new) -2. Select the repositories you want to monitor -3. Approve the requested permissions - -## Security & Permissions - -The app follows the principle of least privilege and requires only: - -- Read access to repository contents -- Read access to repository metadata - -These minimal permissions ensure the app can only: - -- Monitor changes to your Docker configurations -- Access basic repository information needed for template generation - -## What Gets Updated? - -When the app detects changes, it automatically updates: - -- Repository metadata in our catalog: - - Topics - - Repository Homepage - - Description -- IaC templates - - Depends on which technique (docker compose or docker run command) you choose, you can upload the `docker-compose.yml` or `docker-run.txt` in the `.deploystack` directory. Every time you update the files on your main branch (or additional branch), IaC templates will be updated automatically - [Automatic Updates](/docker-deployment/deploystack-configuration-directory#automatic-updates). -- Environment variables - - To make it easier for a user to deploy IaC templates, it is recommended to work with environment variables. For this purpose, you can upload an `env` file and add your appropriate variables - [Environment Variables](/docker-deployment/deploystack-configuration-directory#environment-variables). -- DeployStack Configuration -- Project / Applicaton Logo - - It is possible to upload your own logo to DeployStack catalog. To do this you need to upload a file to our directory `.deploystack`. Read more about it here: [Repository Logo](/docker-deployment/deploystack-configuration-directory#repository-logo) - -## Managing the Integration - -You can manage or remove the integration at any time through your [GitHub Applications Settings](https://github.com/settings/installations). The app installation can be configured for specific repositories or your entire organization. - -## Next Steps - -After installing the app: - -1. Make changes to your Docker configurations in the `.deploystack` directory -2. Commit and push your changes -3. DeployStack will automatically update your deployment templates - -For details about the `.deploystack` directory structure, check our [.deploystack Directory Reference](/docker-deployment/deploystack-configuration-directory). diff --git a/docs/docker-deployment/iac-lifecycle.mdx b/docs/docker-deployment/iac-lifecycle.mdx deleted file mode 100644 index a7c2568..0000000 --- a/docs/docker-deployment/iac-lifecycle.mdx +++ /dev/null @@ -1,88 +0,0 @@ ---- -title: Infrastructure as Code Lifecycle -description: Guide to how DeployStack manages Infrastructure as Code template updates, including automatic synchronization, update triggers, and version control. ---- - -# Infrastructure as Code Lifecycle - -This guide explains how DeployStack manages and updates your Infrastructure as Code (IaC) templates throughout their lifecycle. - -## Template Generation Process - -### Initial Setup - -1. Create a `.deploystack` [configuration directory](/docker-deployment/deploystack-configuration-directory) in your repository -2. Add your Docker configuration files: - - `docker-compose.yml` for Compose configurations - - `docker-run.txt` for Docker run commands -3. Submit your repository to [deploystack.io/submit](https://deploystack.io/submit) -4. Initial IaC templates are generated and stored in our [deploy-templates](https://github.com/deploystackio/deploy-templates) repository - -### Enabling Automatic Updates - -Install the [DeployStack Repository Sync](/docker-deployment/github-application) GitHub App to keep your templates up to date when: - -- You modify Docker configurations in the `.deploystack` directory -- Cloud providers update their IaC specifications -- DeployStack improves its template generation - -![DeployStack IaC Lifecycle](../assets/images/deploystack/iac-lifecycle.webp) - -## Update Flow - -As the app GitHub repository owner, an update flow gives you control over the generation of Infrastructure as Code (IaC) templates. The flow allows you to regenerate IaC templates by changing, for example, the `.deploystack/docker-compose.yml` file. - -All IaC templates are stored in public and open-source repository: [https://github.com/deploystackio/deploy-templates](https://github.com/deploystackio/deploy-templates). - -### Prerequisites for activating the flow - -1. You have installed the [DeployStack GitHub app](/docker-deployment/github-application). -2. You have created the `.deploystack/docker-run.txt` or `.deploystack/docker-compose.yml` file. - -The choice between `docker-run.txt` or `docker-compose.yml` depends on the submission process used to DeployStack. When submitting to DeployStack, you can choose two methods -> Docker Run or Docker Compose. - -### Example flow - -Let's say you want to change your image tag from "deploystack/awesomeapp:v1" to "deploystack/awesomeapp:v2-next". - -![DeployStack IaC Lifecycle](../assets/images/deploystack/deploystack-iac-flow-via-github-app.webp) - -1. To do this, you will edit the file `.deploy stack/docker-run.txt` and change your new docker tag -2. GitHub will send an event to the DeployStack backend with the change of the file `.deploy stack/docker-run.txt` because you have the DeployStack GitHub app installed. -3. DeployStack backend validates the change, and if everything test passes -4. By using [docker-to-iac module](https://github.com/deploystackio/docker-to-iac), DeployStack backend will generate the new IaC templates for your application and store them in our repository [https://github.com/deploystackio/deploy-templates](https://github.com/deploystackio/deploy-templates) - -## Update Triggers - -Your IaC templates are automatically updated in these scenarios: - -### Repository Changes - -When you modify files in your repository's default branch: - -- Changes to `docker-compose.yml` or `docker-run.txt` in `.deploystack` directory -- Updates to repository metadata - -### Provider Updates - -Templates are regenerated when: - -- Cloud providers modify their IaC specifications -- New provider features become available -- Provider API requirements change - -### System Updates - -DeployStack initiates template updates when: - -- The docker-to-iac module receives improvements -- New template optimizations are available -- Bug fixes are released - -## Template Versioning - -All template updates are version controlled in our [deploy-templates repository](https://github.com/deploystackio/deploy-templates), allowing you to: - -- Track template changes over time -- Review modification history -- Understand update triggers diff --git a/docs/docker-deployment/index.mdx b/docs/docker-deployment/index.mdx deleted file mode 100644 index 499e268..0000000 --- a/docs/docker-deployment/index.mdx +++ /dev/null @@ -1,23 +0,0 @@ ---- -title: DeployStack Documentation -description: Official DeployStack documentation - Learn how to automate Docker Compose and run deployments across cloud providers. Clear guides and technical references for effective deployment automation. -menuTitle: DeployStack ---- - -# DeployStack - Open Source Cloud Deployment Guide - -DeployStack helps you deploy Docker Compose and Docker Run applications across different cloud providers by automatically generating Infrastructure as Code templates. Our documentation guides you through using DeployStack effectively. - -## Documentation Sections - -- [Getting Started](/docker-deployment/getting-started) - Quick introduction and first steps -- [Docker Compose Requirements](/docker-deployment/docker-compose-requirements) - Learn about supported configurations -- [One-Click Deploy](/docker-deployment/one-click-deploy) - Learn about deployment automation -- [Troubleshooting](/docker-deployment/troubleshooting) - Resolve common issues - -## Additional Resources - -- [Docker-to-IaC Module Documentation](/docker-to-iac/index) -- [Join our Discord](https://discord.gg/UjFWwByB) -- [Visit DeployStack](https://deploystack.io) - diff --git a/docs/docker-deployment/meta.json b/docs/docker-deployment/meta.json deleted file mode 100644 index e905f80..0000000 --- a/docs/docker-deployment/meta.json +++ /dev/null @@ -1,15 +0,0 @@ -{ - "title": "Docker Deployment", - "description": "Docker Deployment", - "icon": "Container", - "root": true, - "pages": [ - "!index.mdx", - "getting-started.mdx", - "---General---", - "one-click-deploy.mdx", - "deploystack-configuration-directory.mdx", - "deploystack-config-file.mdx", - "..." - ] -} diff --git a/docs/docker-deployment/multiple-branches.mdx b/docs/docker-deployment/multiple-branches.mdx deleted file mode 100644 index d5237a8..0000000 --- a/docs/docker-deployment/multiple-branches.mdx +++ /dev/null @@ -1,150 +0,0 @@ ---- -title: Branch Strategy -description: Guide to implementing multi-branch deployment strategies with DeployStack, enabling version-specific deployments and automated template management. ---- - -# Branch Strategy - -DeployStack's branch strategy allows you to maintain and deploy multiple versions of your application simultaneously. This is particularly valuable when you're evolving your application while maintaining stable versions for production users. - -## The Multi-Version Journey - -### Starting Point: Default Branch - -Every repository starts with its default branch (typically `main` or `master`). This branch: - -- Always generates Infrastructure as Code templates -- Cannot be excluded from template generation -- Has implicit `priority: 0` in deployment options -- Can be changed through your repository settings - -When you change your default branch in GitHub: - -- DeployStack automatically detects the change - you need to install [DeployStack GitHub App](/docker-deployment/github-application) -- Regenerates templates for the new default branch -- Updates all deployment buttons - -### Growing Your Application: Adding New Versions - -As your application evolves, you might want to: - -- Develop a new major version with breaking changes -- Maintain an LTS (Long Term Support) version -- Test new features with early adopters - -This is where branch configurations become powerful. You can maintain up to 5 active branches, each with: - -- Its own Docker configuration -- Separate environment variables -- Independent deployment options - -For example, when developing version 2 of your application: - -```yaml -deployment: - branches: - v2: - label: "Version 2 Beta" - description: "Next generation features" - priority: 1 -``` - -### Branch-Specific Configurations - -Each branch can have its own `.deploystack` directory with standard configuration files. First, create the directories on your default branch: - -```bash -# Default branch configuration - -your-repository/ -└── .deploystack/ - ├── config.yml - ├── docker-compose.yml - └── env -``` - -```bash -# v2 branch configuration - -your-repository/ -└── .deploystack/ - ├── config.yml - ├── docker-compose.yml - └── env -``` - -This structure allows you to: - -- Use different Docker configurations per branch -- Maintain separate environment variables -- Modify service configurations independently -- Keep each version's deployment parameters isolated - -Remember: The DeployStack GitHub App only monitors the standard filenames: check [.deploystack Directory Reference for more info](/docker-deployment/deploystack-configuration-directory) - -## Real-World Example - -Let's say you're maintaining a web application: - -```yaml -application: - name: "MyWebApp" - description: "Modern web application stack" - -deployment: - branches: - v1-lts: - label: "v1 LTS" - description: "Stable v1.x release with long-term support" - priority: 1 - v2-beta: - label: "v2 Beta" - description: "New architecture with enhanced features" - priority: 2 - experimental: - label: "Edge" - description: "Latest experimental features" - priority: 3 -``` - -Each branch can have different Docker configurations: - -- `main` branch (v1.x stable): - - ```yaml - # docker-compose.yml - services: - web: - image: myapp:1.5 - ``` - -- `v2-beta` branch: - - ```yaml - # docker-compose.yml - services: - web: - image: myapp:2.0-beta - ``` - -## Benefits for Your Users - -This strategy allows your users to: - -- Choose the version that best fits their needs -- Test new versions while maintaining production deployments -- Safely transition between versions at their own pace -- Deploy LTS versions for stability -- Try experimental features in isolation - -## Important Notes - -- Maximum of 5 active branches supported -- Each branch can have unique Docker configurations -- Default branch can be changed (switch to another branch and make it default) but not excluded -- Branch configurations ([DeployStack config file](/docker-deployment/deploystack-config-file)) must be in the default branch -- All branches are automatically monitored for changes -- Template regeneration happens automatically when: - - Branch content changes - - Default branch is changed - - Configuration is updated diff --git a/docs/docker-deployment/one-click-deploy.mdx b/docs/docker-deployment/one-click-deploy.mdx deleted file mode 100644 index 3f42931..0000000 --- a/docs/docker-deployment/one-click-deploy.mdx +++ /dev/null @@ -1,157 +0,0 @@ ---- -title: One-Click Deploy -description: Technical docs for DeployStack's one-click deployment feature. Covers infrastructure template generation, cloud provider integration, and button configuration. ---- - -# One-Click Deploy - -DeployStack leverages existing deployment technologies from cloud providers to make application deployment as straightforward as possible. - -## How One-Click Deploy Works - -When you submit your repository to [deploystack.io/submit](https://deploystack.io/submit), we: - -1. Generate Infrastructure as Code (IaC) templates for [supported cloud providers](/docker-to-iac/index) -2. Store these templates in our [deploy-templates repository](https://github.com/deploystackio/deploy-templates) -3. Create provider-specific deployment buttons for your README.md -> by [deploystack.io/deploy-button](https://deploystack.io/deploy-button) - -## Template Generation and Storage - -### Repository Structure - -All generated templates are stored in the [deploystackio/deploy-templates](https://github.com/deploystackio/deploy-templates) repository using this organization: - -- Each project gets its own subfolder and branch -- Naming convention: `-` (lowercase) -- Example: `microsoft-vscode` for Microsoft's VS Code repository - -### Branch Strategy - -We create a dedicated branch for each project to support one-click deployment functionality: - -- Branch name matches the subfolder name -- Contains all necessary IaC templates and configurations -- Enables direct integration with cloud provider deployment systems - -## Cloud Provider Integration - -### Current Providers - -We integrate with cloud providers' native deployment systems. For example: - -- **DigitalOcean**: Uses the "Deploy to DigitalOcean" functionality as documented in their [official guide](https://docs.digitalocean.com/products/app-platform/how-to/add-deploy-do-button/) -- **Kubernetes**: Generates Helm charts that can be deployed to any Kubernetes cluster -- Check [supported cloud providers](/docker-to-iac/index) for full list - -### Provider-Specific Templates - -Each cloud provider may require specific template formats: - -- AWS CloudFormation templates -- DigitalOcean App Spec -- Render Blueprints -- And more based on provider requirements -- Kubernetes Helm charts with all necessary templates - -## Using Deploy Buttons - -After template generation, you'll receive HTML/Markdown code for deployment buttons by visitig the [deploystack.io/deploy-button](https://deploystack.io/deploy-button) page. - -There are two options you can chose from. The main difference is the deploy url. For the mode "Deploy via DeployStack" the deploy address points to the deploystack.io/deploy endpoint, where HTTP status code 302 redirects to the cloud provider one-click deploy endpoint. - -In the example below with render: - -```markdown -1. -> https://deploystack.io/deploy/microsoft-vscode?provider=rnd&language=rnd -2. -> HTTP 302 REDIRECT -3. -> https://render.com/deploy?repo=https://github.com/deploystackio/deploy-templates/tree/microsoft-vscode -``` - -### Deploy via DeployStack - -Link via deploystack deploy endpoint. - -- auto update on One-Click deploy links: if the provider's deploy url changes, we will update. -- future cloud provider support out of the box: The endpoint makes it easier for us to integrate with additional providers, which benefits your users. -- deploy statistics for your app: We collect anonymized statistics to show the number of deployments your application has had, similar to npm download statistics. - -#### Example Markdown Deploy via DeployStack - -- static deploy links: if the cloud provider changes the one-click deploy url, the functionality will also be broken. You have to update your `README.md` manually. -- no statistics collection possible: you will never know how many people use your project :) - -```markdown title="README.md" -## ⚡ One-Click Deploy - -| Cloud Provider | Deploy Button | -|---------------|---------------| -| Render | | -``` - -### Deploy Standalone - -Direct link to Cloud Provider to enable One-Click depoy. - -#### Example Markdown Deploy Standalone - -```markdown title="README.md" -## ⚡ One-Click Deploy - -| Cloud Provider | Deploy Button | -|---------------|---------------| -| Render | | -``` - -## License and Usage - -All generated templates are available under the MIT License: - -- Full license text available in the [deploy-templates repository](https://github.com/deploystackio/deploy-templates/blob/main/LICENSE) -- Free to use, modify, and distribute -- No warranty provided - -## Extending Provider Support - -Want to add support for another cloud provider? You can: - -1. Contribute to the [docker-to-iac module](https://github.com/deploystackio/docker-to-iac) -2. Add a new provider parser -3. Implement the necessary template generation logic - -Benefits of contributing: - -- All existing projects in our catalog automatically get templates for the new provider -- The open-source community benefits from broader deployment options -- Your provider becomes part of the one-click deployment ecosystem - -## Technical Implementation Details - -The one-click deployment process: - -1. User clicks deploy button -2. Cloud provider loads template from our repository's specific branch -3. Provider's deployment system processes the template -4. Application gets deployed according to specifications - -## Validation and Security - -For each deployment: - -- Templates are version controlled -- Source code is publicly accessible -- Infrastructure specifications are transparent -- No sensitive data is stored in templates - -## Future Enhancements - -We're continuously working to: - -- Add more cloud providers -- Improve template generation -- Enhance deployment options -- Support more complex configurations - -## Next Steps - -- Visit our [Discord community](https://discord.gg/UjFWwByB) for help -- Consider [contributing](https://github.com/deploystackio/docker-to-iac) to add more providers diff --git a/docs/docker-deployment/troubleshooting.mdx b/docs/docker-deployment/troubleshooting.mdx deleted file mode 100644 index 3ec950b..0000000 --- a/docs/docker-deployment/troubleshooting.mdx +++ /dev/null @@ -1,152 +0,0 @@ ---- -title: Troubleshooting DeployStack Issues -description: Technical solutions for common DeployStack deployment issues. Find answers to repository submission errors, license restrictions, and Docker Compose validation problems. -sidebar: Troubleshooting ---- - -# Troubleshooting - -This guide helps you resolve common issues that might occur when submitting your repository to DeployStack. - -## Invalid GitHub Repository URL - -This error occurs when the submitted URL doesn't match the expected GitHub repository URL format. - -**Common causes:** - -- Missing 'github.com' in the URL -- Including query parameters - -**Solution:** -Use the standard GitHub repository URL format: - -```bash -https://github.com/username/repository -``` - -## Repository License Not Permitted - -We don't support certain licenses to protect both our users and our service. Here's what we currently don't support: - -| License Type | Reason for Restriction | -|-------------|------------------------| -| No License | Without a license, the code defaults to exclusive copyright, making it legally risky to use or deploy | -| Proprietary License | These licenses typically restrict redistribution and modification rights | -| Commons Clause | This addition to licenses restricts commercial use of the software | -| Shared Source | These licenses often include terms that limit deployment and distribution | -| Custom License | Custom licenses require individual legal review and may contain problematic terms | - -**Other Restrictions:** - -- Licenses containing "proprietary" terms -- Licenses with Commons Clause additions -- Licenses marked as "shared source" -- Any repository without clear license information - -**Solution:** - -Ensure your repository: - -- Uses a widely accepted open-source license (MIT, Apache, GPL, etc.) -- Has clear license information in the repository -- If you can, change your repository license to a supported one -- For special cases, whitelist requests can be submitted via [Discord](https://discord.gg/UjFWwByB) or Twitter DM - -## Invalid Base64 Encoding for Docker File - -This error occurs during our internal processing of your docker-compose file. - -**Common causes:** - -- File encoding issues -- Special characters in the file -- File corruption - -**Solution:** - -- Verify your docker-compose file uses UTF-8 encoding -- Remove any special characters -- Try re-creating the file if issues persist - -## Invalid Docker Compose File - -The submitted docker-compose file doesn't meet the required format or contains unsupported features. - -**Common causes:** - -- Invalid YAML syntax -- Unsupported Docker Compose version -- Missing required fields -- Using unsupported features - -**Solution:** - -- Validate your docker-compose file syntax -- Check our [Docker Compose Requirements](/docker-deployment/docker-compose-requirements) page -- Ensure you're using supported features only - -## Error Converting Docker Compose to IaC - -This error occurs when our system cannot convert your docker-compose configuration to Infrastructure as Code templates. - -**Common causes:** - -- Unsupported service configurations -- Complex dependencies -- Resource definitions that can't be mapped to cloud provider services - -**Solution:** - -- Simplify your docker-compose configuration -- Review our [supported features documentation](/docker-to-iac/supported-docker-compose-variables) -- Ensure all services use supported configurations - -## Error Listing Services from Docker Compose - -The system couldn't properly parse the services defined in your docker-compose file. - -**Common causes:** - -- Malformed service definitions -- Missing required service properties -- Invalid service configurations - -**Solution:** - -- Verify each service has the required `image` property -- Check service definitions follow the correct format -- Remove any unsupported service configurations - -## Internal Server Error - -This indicates an unexpected error in our validation process. - -**What it means:** - -- The error is on our end -- The issue isn't related to your repository or configuration -- We needs to investigate - -**What to do:** - -1. Try your submission again after a few minutes -2. If the error persists, join our [Discord community](https://discord.gg/UjFWwByB) -3. Report the issue with: - - Your repository URL - - Timestamp of the error - - Any error messages you received - -## General Troubleshooting Tips - -1. Validate your docker-compose file locally before submission -2. Ensure your repository meets all [requirements](/docker-deployment/docker-compose-requirements) -3. Check that all services use supported configurations -4. Verify your repository is public and accessible - -## Need More Help? - -If you're still experiencing issues: - -- Join our [Discord community](https://discord.gg/UjFWwByB) -- Check our [Docker Compose Requirements](/docker-deployment/docker-compose-requirements) -- Review [supported features](/docker-to-iac/supported-docker-compose-variables) diff --git a/docs/docker-to-iac/api.mdx b/docs/docker-to-iac/api.mdx deleted file mode 100644 index dd242cc..0000000 --- a/docs/docker-to-iac/api.mdx +++ /dev/null @@ -1,791 +0,0 @@ ---- -title: docker-to-iac module API -description: Here's everything you need to know about our docker-to-iac module - from listing available cloud providers to converting your Docker setup into deployable code. -sidebar: Module API ---- - -# docker-to-iac module API list - -In this page you will find all possible APIs for package docker-to-iac. - -## List all Parser - -To list all available parsers, please use the `listAllParsers()` method. - -### Example - -```typescript -import { listAllParsers } from '@deploystack/docker-to-iac'; - -const parsers = listAllParsers(); - -console.log('Available Parsers:'); -console.log(parsers); -``` - -#### Output - -```json -[ - { - providerWebsite: 'https://aws.amazon.com/cloudformation/', - providerName: 'Amazon Web Services', - providerNameAbbreviation: 'AWS', - languageOfficialDocs: 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/Welcome.html', - languageAbbreviation: 'CFN', - languageName: 'AWS CloudFormation', - defaultParserConfig: { files: [Array], cpu: 512, memory: '1GB' } - }, - { - providerWebsite: 'https://render.com/docs', - providerName: 'Render', - providerNameAbbreviation: 'RND', - languageOfficialDocs: 'https://docs.render.com/infrastructure-as-code', - languageAbbreviation: 'RND', - languageName: 'Render Blue Print', - defaultParserConfig: { - files: [Array], - subscriptionName: 'starter', - region: 'oregon', - diskSizeGB: 10 - } - }, - { - providerWebsite: 'https://www.digitalocean.com/', - providerName: 'DigitalOcean', - providerNameAbbreviation: 'DO', - languageOfficialDocs: 'https://docs.digitalocean.com/products/app-platform/', - languageAbbreviation: 'DOP', - languageName: 'DigitalOcean App Spec', - defaultParserConfig: { files: [Array], region: 'nyc', subscriptionName: 'basic-xxs' } - }, - { - providerWebsite: 'https://helm.sh/', - providerName: 'Kubernetes', - providerNameAbbreviation: 'K8S', - languageOfficialDocs: 'https://helm.sh/docs/', - languageAbbreviation: 'HELM', - languageName: 'Helm Chart', - defaultParserConfig: { - files: [Array], - cpu: '100m', - memory: '128Mi' - } - }, - { - providerWebsite: 'https://www.digitalocean.com/', - providerName: 'DigitalOcean', - providerNameAbbreviation: 'DO', - languageOfficialDocs: 'https://docs.digitalocean.com/products/app-platform/', - languageAbbreviation: 'DOP', - languageName: 'DigitalOcean App Spec', - defaultParserConfig: { files: [Array], region: 'nyc', subscriptionName: 'basic-xxs' } - } -] -``` - -**Note the files array**: that's because we have a [multi file strategy](/docker-to-iac/multi-file-configuration). - -### Type - -```typescript -listAllParsers(): ParserInfo[] -``` - -## Get Parser Info - -If you want to extract the `defaultParserConfig` object from a parser, the `getParserInfo` method is the most suitable for this. - -### Example - -```typescript -import { getParserInfo } from '@deploystack/docker-to-iac'; - -const awsInfo = getParserInfo('CFN'); - -console.log('Available Parsers:'); -console.log(awsInfo); -``` - -#### Output - -```json -{ - providerWebsite: 'https://aws.amazon.com/cloudformation/', - providerName: 'Amazon Web Services', - providerNameAbbreviation: 'AWS', - languageOfficialDocs: 'https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/Welcome.html', - languageAbbreviation: 'CFN', - languageName: 'AWS CloudFormation', - defaultParserConfig: { - files: [ - { - path: 'aws-cloudformation.cf.yml', - templateFormat: 'yaml', - isMain: true, - description: 'AWS CloudFormation template' - } - ], - cpu: 512, - memory: '1GB' - } -} -``` - -### Type - -```typescript -getParserInfo(languageAbbreviation: string): ParserInfo -``` - -## Translate API - -Translate Docker configurations (both Docker run commands and docker-compose.yml files) into your chosen Infrastructure as Code language. - -### Function Signature - -```typescript -translate(input: string, options: { - source: 'run' | 'compose', - target: string, - templateFormat?: TemplateFormat, - environmentVariableGeneration?: EnvironmentVariableGenerationConfig; - environmentVariables?: Record; - persistenceKey?: string; - serviceConnections?: ServiceConnectionsConfig; -}): TranslationResult -``` - -Where `TranslationResult` has the structure: - -```typescript -interface TranslationResult { - files: { - [path: string]: FileOutput - }; - serviceConnections?: ResolvedServiceConnection[]; -} - -interface FileOutput { - content: string; - format: TemplateFormat; - isMain?: boolean; -} -``` - -### Examples - -#### Translating Docker Compose - -```javascript -import { readFileSync, writeFileSync, mkdirSync, existsSync } from 'fs'; -import { join, dirname } from 'path'; -import { translate } from '@deploystack/docker-to-iac'; - -const dockerComposeContent = readFileSync('path/to/docker-compose.yml', 'utf8'); - -const result = translate(dockerComposeContent, { - source: 'compose', - target: 'CFN', - templateFormat: 'yaml' -}); - -// Access individual file contents -console.log(`Generated ${Object.keys(result.files).length} files:`); -Object.keys(result.files).forEach(path => { - console.log(`- ${path}`); -}); - -// Write files to disk preserving directory structure -Object.entries(result.files).forEach(([path, fileData]) => { - const fullPath = join('output', path); - const dir = dirname(fullPath); - - if (!existsSync(dir)) { - mkdirSync(dir, { recursive: true }); - } - - writeFileSync(fullPath, fileData.content); -}); -``` - -#### Translating Docker Run Command - -```javascript -import { translate } from '@deploystack/docker-to-iac'; -import { writeFileSync, mkdirSync, existsSync } from 'fs'; -import { join, dirname } from 'path'; - -const dockerRunCommand = 'docker run -d -p 8080:80 nginx:latest'; - -const result = translate(dockerRunCommand, { - source: 'run', - target: 'RND', - templateFormat: 'yaml' -}); - -console.log(result) - -// Access and save all generated files -Object.entries(result.files).forEach(([path, fileData]) => { - const fullPath = join('output', path); - const dir = dirname(fullPath); - - if (!existsSync(dir)) { - mkdirSync(dir, { recursive: true }); - } - - writeFileSync(fullPath, fileData.content); - console.log(`Created: ${path}`); -}); -``` - -#### Translating Docker Compose to Helm Chart - -```javascript -import { translate } from '@deploystack/docker-to-iac'; -import { writeFileSync, mkdirSync, existsSync } from 'fs'; -import { join, dirname } from 'path'; - -const dockerComposeContent = ` -version: '3' -services: - web: - image: nginx:latest - ports: - - "80:80" - db: - image: postgres:13 - environment: - POSTGRES_USER: myuser - POSTGRES_PASSWORD: mypassword - POSTGRES_DB: myapp -`; - -const result = translate(dockerComposeContent, { - source: 'compose', - target: 'HELM', - templateFormat: 'yaml' -}); - -// Access and save all generated files to create a complete Helm Chart -Object.entries(result.files).forEach(([path, fileData]) => { - const fullPath = join('helm-chart', path); - const dir = dirname(fullPath); - - if (!existsSync(dir)) { - mkdirSync(dir, { recursive: true }); - } - - writeFileSync(fullPath, fileData.content); - console.log(`Created: ${path}`); -}); -``` - -#### Example Output (Partial - Chart.yaml) - -```yaml -apiVersion: v2 -name: deploystack-app -description: A Helm chart for DeployStack application generated from Docker configuration -type: application -version: 0.1.0 -appVersion: 1.0.0 -maintainers: - - name: DeployStack - email: hello@deploystack.io -dependencies: - - name: db - repository: https://charts.bitnami.com/bitnami - version: ^12.0.0 - condition: dependencies.db.enabled -``` - -#### Configuring Service Connections - -```javascript -import { translate } from '@deploystack/docker-to-iac'; - -const dockerComposeContent = ` -version: "3" -services: - db: - image: mariadb:latest - environment: - - MYSQL_ROOT_PASSWORD=rootpass - app: - image: node:alpine - environment: - - DATABASE_HOST=db # This will be transformed -`; - -const result = translate(dockerComposeContent, { - source: 'compose', - target: 'DOP', // DigitalOcean App Platform - templateFormat: 'yaml', - serviceConnections: { - mappings: [ - { - fromService: 'app', // Service that needs to connect - toService: 'db', // Service to connect to - environmentVariables: [ // Env vars that reference the service - 'DATABASE_HOST' - ] - } - ] - } -}); - -// The result will include transformed service references: -console.log(result.serviceConnections); -``` - -### Example Output (AWS CloudFormation) - -```yaml -{ - files: { - 'render.yaml': { - content: 'services:\n' + - ' - name: default\n' + - ' type: web\n' + - ' env: docker\n' + - ' runtime: image\n' + - ' image:\n' + - ' url: docker.io/library/nginx:latest\n' + - ' startCommand: ""\n' + - ' plan: starter\n' + - ' region: oregon\n' + - ' envVars:\n' + - ' - key: PORT\n' + - ' value: "80"\n', - format: 'yaml', - isMain: true - } - } -} -Created: render.yaml -``` - -#### Translation with Environment Variable Generation - -```typescript -import { translate } from '@deploystack/docker-to-iac'; - -// Environment variable configuration -const envConfig = { - 'library/mariadb': { - versions: { - '*': { - environment: { - 'MYSQL_ROOT_PASSWORD': { - type: 'password', - length: 16 - }, - 'MYSQL_DATABASE': { - type: 'string', - length: 12, - pattern: 'lowercase' - } - } - } - } - } -}; - -const translatedConfig = translate(dockerComposeContent, { - source: 'compose', - target: 'CFN', - templateFormat: 'yaml', - environmentVariableGeneration: envConfig -}); -``` - -### Parameters - -#### `input: string` - -For Docker Compose: The contents of your docker-compose.yml file -For Docker run: The complete docker run command - -#### `options.source: 'run' | 'compose'` - -Specifies the input type: - -- `'run'` - For Docker run commands -- `'compose'` - For Docker Compose files - -#### `options.target: string` - -The IaC language to translate to. Currently supported targets: -Please see the sidebar on the left, section Parsers. - -#### `options.templateFormat?: TemplateFormat` - -Optional. The desired output format: - -- `'json'` - JavaScript Object Notation -- `'yaml'` - YAML format -- `'text'` - Plain text - -> [!IMPORTANT] -> Not all template formats are valid for every IaC language. For example, AWS CloudFormation only accepts YAML or JSON formats. Choose a format compatible with your target IaC language. - -#### `options.environmentVariableGeneration?: EnvironmentVariableGenerationConfig` - -Optional. Configuration for generating environment variable values. Structure: - -```typescript -type EnvironmentVariableGenerationConfig = { - [imageName: string]: { - versions: { - [version: string]: { - environment: { - [variableName: string]: { - type: 'password' | 'string' | 'number'; - length?: number; - pattern?: 'uppercase' | 'lowercase' | 'normal'; - min?: number; // For number type - max?: number; // For number type - } - } - } - } - } -} -``` - -Generation types: - -- `password`: Generates a secure random password -- `string`: Generates a random string -- `number`: Generates a random number within specified range - -Patterns (for string type): - -- `uppercase`: Only uppercase characters -- `lowercase`: Only lowercase characters -- `normal`: Mixed case with numbers - -Version matching: - -- Use exact versions (e.g., "10.5") -- Use "*" for all versions -- Use "latest" for latest version - -> [!IMPORTANT] -> Environment variables in your docker-compose.yml must use the `${VARIABLE_NAME}` syntax to be processed by the generator. - -#### `environmentVariables?: Record` - -Optional. The docker-to-iac module supports passing environment variables from `.env` files to your Infrastructure as Code templates. This feature allows you to manage configuration values separately from your Docker configurations and maintain consistency across deployments. - -```typescript -import { translate, parseEnvFile } from '@deploystack/docker-to-iac'; -import { readFileSync } from 'fs'; - -// Read and parse the .env file -const envContent = readFileSync('.env', 'utf-8'); -const envVariables = parseEnvFile(envContent); - -const result = translate(dockerConfig, { - source: 'run', // or 'compose' - target: 'RND', // or other supported targets - templateFormat: 'yaml', - environmentVariables: envVariables -}); -``` - -#### `options.persistenceKey?: string` - -Optional. The `persistenceKey` parameter allows you to maintain consistent variable values across multiple template generations. - -#### `options.serviceConnections?: ServiceConnectionsConfig` - -Optional. Configure service-to-service communications by defining which environment variables reference other services. - -```typescript -type ServiceConnectionsConfig = { - mappings: Array<{ - fromService: string; // Service that needs to connect - toService: string; // Service to connect to - environmentVariables: string[]; // Environment variables that reference the service - property?: string; // Connection property type (connectionString, hostport, etc.) - }> -}; -``` - -This option is currently supported by: - -- Render.com (RND): Uses Blueprint's `fromService` syntax -- DigitalOcean App Platform (DOP): Uses direct service names -- Kubernetes Helm Charts (HELM): Uses Kubernetes DNS service discovery - -Example: - -```javascript -serviceConnections: { - mappings: [ - { - fromService: 'frontend', - toService: 'api', - environmentVariables: ['API_URL'], - property: 'hostport' - }, - { - fromService: 'app', - toService: 'db', - environmentVariables: ['DATABASE_URL'], - property: 'connectionString' - } - ] -} -``` - -### Return Value - -Returns the translated Infrastructure as Code template and any resolved service connections: - -```typescript -{ - files: { - // Generated IaC template files with paths as keys - 'render.yaml': { content: '...', format: 'yaml', isMain: true } - }, - serviceConnections: [ - { - fromService: 'app', - toService: 'db', - variables: { - 'DATABASE_HOST': { - originalValue: 'db', - transformedValue: 'db' // Transformed as appropriate for the provider - } - } - } - ] -} -``` - -## List Services API - -Extract service configurations from either Docker run commands or docker-compose.yml files as structured JSON objects. - -### Function Signature - -```typescript -listServices(content: string, options: ListServicesOptions): { [key: string]: ServiceConfig } - -type ListServicesOptions = { - source: 'compose' | 'run'; - environmentVariableGeneration?: EnvironmentVariableGenerationConfig; - environmentVariables?: Record; - persistenceKey?: string; -}; -``` - -### Examples - -#### Listing Docker Compose Services with Environment Variables - -```javascript -import { readFileSync } from 'fs'; -import { listServices, parseEnvFile } from '@deploystack/docker-to-iac'; - -const dockerComposeContent = readFileSync('path/to/docker-compose.yml', 'utf8'); -const envContent = readFileSync('.env', 'utf-8'); -const envVariables = parseEnvFile(envContent); - -const services = listServices(dockerComposeContent, { - source: 'compose', - environmentVariables: envVariables -}); - -console.log(services); -``` - -##### Output with Environment Variables - -```json -{ - "db": { - "image": "mariadb:11.2", - "ports": [], - "command": "mariadbd --character-set-server=utf8mb4 --collation-server=utf8mb4_bin", - "restart": "unless-stopped", - "volumes": [{"host": "db", "container": "/var/lib/mysql"}], - "environment": { - "MYSQL_ROOT_PASSWORD": "mysecretpassword", - "MYSQL_USER": "myuser", - "MYSQL_PASSWORD": "mysecretpassword", - "MYSQL_DATABASE": "mydatabase" - } - } -} -``` - -#### Listing Docker Run Services - -```javascript -import { listServices } from '@deploystack/docker-to-iac'; - -const dockerRunCommand = 'docker run -d -p 8080:80 -e NODE_ENV=production nginx:latest'; - -const services = listServices(dockerRunCommand, { - source: 'run' -}); - -console.log(services); -``` - -##### Output - -```json -{ - "service": { - "image": "nginx:latest", - "ports": ["8080:80"], - "environment": { - "NODE_ENV": "production" - } - } -} -``` - -### Options - -#### `content: string` - -The input content to parse: - -- For Docker Compose: The contents of your docker-compose.yml file -- For Docker run: The complete docker run command - -#### `options.source: 'run' | 'compose'` - -Specifies the input type: - -- `'run'` - For Docker run commands -- `'compose'` - For Docker Compose files - -#### `options.environmentVariables?: Record` - -Optional. Environment variables from a `.env` file or other source. Used to substitute variables in the format `${VARIABLE_NAME}` in your Docker configuration. - -Example: - -```javascript -const envVariables = { - 'DB_PASSWORD': 'mysecretpassword', - 'DB_USERNAME': 'myuser', - 'DB_DATABASE': 'mydatabase' -}; -``` - -#### `options.environmentVariableGeneration?: EnvironmentVariableGenerationConfig` - -Optional. Configuration for automatically generating environment variable values. Structure: - -```typescript -type EnvironmentVariableGenerationConfig = { - [imageName: string]: { - versions: { - [version: string]: { - environment: { - [variableName: string]: { - type: 'password' | 'string' | 'number'; - length?: number; - pattern?: 'uppercase' | 'lowercase' | 'normal'; - min?: number; // For number type - max?: number; // For number type - } - } - } - } - } -} -``` - -Example: - -```javascript -const envGeneration = { - 'library/mariadb': { - versions: { - '*': { - environment: { - 'MYSQL_ROOT_PASSWORD': { - type: 'password', - length: 16 - }, - 'MYSQL_DATABASE': { - type: 'string', - length: 12, - pattern: 'lowercase' - } - } - } - } - } -}; -``` - -#### `options.persistenceKey?: string` - -Optional. A unique key to maintain consistent generated environment variables across multiple calls to `listServices` or `translate`. - -### Return Value - -Returns an object where: - -- Keys are service names -- Values are service configurations containing: - - `image`: Docker image name and tag - - `ports`: Array of port mappings - - `command`: Custom command (if specified) - - `restart`: Restart policy (if specified) - - `volumes`: Array of volume mappings (if specified) - - `environment`: Object of environment variables - -## Parse Environment File - -Parse a `.env` file content into a key-value object using the `parseEnvFile()` method. The method handles basic environment file syntax including comments and quoted values. - -### Example - -```typescript -import { parseEnvFile } from '@deploystack/docker-to-iac'; - -const envContent = ` -# Database settings -DB_HOST=localhost -DB_USER="admin" -DB_PASS='secretpass' -# Comment line -NUMBERS=123456 -QUOTED="value=with=equals" -`; - -const envVars = parseEnvFile(envContent); - -console.log('Parsed Environment Variables:'); -console.log(envVars); -``` - -#### Output - -```json -{ - "DB_HOST": "localhost", - "DB_USER": "admin", - "DB_PASS": "secretpass", - "NUMBERS": "123456", - "QUOTED": "value=with=equals" -} -``` - -### Type - -```typescript -parseEnvFile(content: string): Record -``` diff --git a/docs/docker-to-iac/available-commands.mdx b/docs/docker-to-iac/available-commands.mdx deleted file mode 100644 index 39548fd..0000000 --- a/docs/docker-to-iac/available-commands.mdx +++ /dev/null @@ -1,77 +0,0 @@ ---- -title: Available Commands -description: List of all available commands that you can use to help us with development and testing ---- - -# docker-to-iac Available Commands - -The following commands are currently supported: - -## Build Commands - -- `npm run build` - - Builds the module using TypeScript compiler and creates output files inside the `dist/` directory. - -## Code Quality Commands - -- `npm run lint` - - Runs ESLint to check code quality. ESLint is also run as part of GitHub action test for new pull requests on the default `main` branch. - -## Testing Commands - -- `npm run test` - - Runs the complete test suite including both unit tests and end-to-end tests. -- `npm run test:unit` - - Runs only the unit tests to validate individual components. -- `npm run test:e2e` - - Runs only the end-to-end tests which validate the entire translation process from Docker run commands or Docker Compose files to infrastructure as code. -- `npm run test:watch` - - Runs tests in watch mode, which automatically re-runs tests when files change. -- `npm run test:coverage` - - Runs tests with coverage reporting to identify untested code paths. - -## Release Commands - -- `npm run release` - - Runs the release-it command which is part of the release process of [docker-to-iac](https://www.npmjs.com/package/@deploystack/docker-to-iac) modules to npm registry. The release is executed through configurations defined in `.release-it.js`. - -## Other Commands - -- `npm run pretest:e2e` - - Automatically run before e2e tests to clean the output directory. - -You can view all commands and their configurations in the [package.json](https://github.com/deploystackio/docker-to-iac/blob/main/package.json) file. - -## Examples - -### Running Unit Tests Only - -```bash -npm run test:unit -``` - -### Running End-to-End Tests Only - -```bash -npm run test:e2e -``` - -### Running All Tests with Coverage - -```bash -npm run test:coverage -``` - -### Building the Module - -```bash -npm run build -``` - -### Checking Code Quality - -```bash -npm run lint -``` - -Each command is configured to provide the most relevant feedback for its purpose. For example, unit tests provide detailed output about each individual function, while end-to-end tests show a summary of the complete translation process from Docker configurations to infrastructure as code. diff --git a/docs/docker-to-iac/before-you-start.mdx b/docs/docker-to-iac/before-you-start.mdx deleted file mode 100644 index 0c58b36..0000000 --- a/docs/docker-to-iac/before-you-start.mdx +++ /dev/null @@ -1,30 +0,0 @@ ---- -title: Before you Start contributing -description: Contribute to DeployStack by adding cloud providers that support Docker runtime and infrastructure as code. Your work benefits the entire open-source community. ---- - -# Before you start contributing to docker-to-iac module - -Thank you for your interest in extending the module. Please remember that [docker-to-iac](https://github.com/deploystackio/docker-to-iac) open-source module is the heart of the DeployStack platform. - -## Adding a new cloud provider - -This means: if your pull request will get approved, all existing applications in our catalog [https://deploystack.io/c/](https://deploystack.io/c) will be expanded to include another cloud provider that you want to add. - -That sounds great, you are helping all open source applications inside our catalog. - -There is one thing to keep in mind: - -### Docker Container Runtime - -The platform you want to add to the docker-to-iac module must have a docker runtime like Render.com or DigitalOcean. - -Currently, our catalog only includes dockerized applications. Therefore, they also need a runtime environment where the Docker container can be executed. - -### Infrastructure as Code - -Our platform's advantage is that visitors to an open source repository can deploy an application using the one-click deploy button. - -Therefore, the new cloud provider must supply the option and allow infrastructure as code teamplte. The template can be something custom like CloudFormation from AWS or something generic like Terraform (although Terraform is not generic, that's a bad example 😄). - -However, one-click deployment or a similar mechanism/automation __must__ be available. diff --git a/docs/docker-to-iac/environment-variable-generation.mdx b/docs/docker-to-iac/environment-variable-generation.mdx deleted file mode 100644 index 6af6dbf..0000000 --- a/docs/docker-to-iac/environment-variable-generation.mdx +++ /dev/null @@ -1,208 +0,0 @@ ---- -title: Environment Variable Generation -description: Use docker-to-iac to automatically create environment variables for your Docker containers. Define rules for variable generation and maintain consistency across multiple deployments. ---- - -# Environment Variable Generation - -The docker-to-iac module includes a system for handling environment variables, with a focus on database images and services that require secure credentials. This feature automatically generates appropriate values for environment variables and can maintain consistency across multiple template generations. - -## Overview - -When working with databases and other services that require credentials, you often need to generate secure passwords and consistent configuration values. The environment variable generation system: - -- Automatically generates credentials and configuration values -- Maintains consistency across multiple template generations -- Handles version-specific variable names -- Supports different variable types (passwords, strings, numbers) - -## Configuration Structure - -Environment variable configurations are defined using a JSON structure that maps Docker images to their version-specific environment requirements: - -```typescript -type EnvironmentVariableConfig = { - [imageName: string]: { - versions: { - [version: string]: { - environment: { - [variableName: string]: { - type: 'password' | 'string' | 'number'; - length?: number; - pattern?: 'uppercase' | 'lowercase' | 'normal'; - min?: number; // For number type - max?: number; // For number type - } - } - } - } - } -} -``` - -## Usage Example - -Here's how to use environment variable generation with a MariaDB container: - -```javascript -import { translate } from '@deploystack/docker-to-iac'; - -// Define environment variable generation configuration -const envConfig = { - 'library/mariadb': { - versions: { - '>=11.0': { - environment: { - 'MYSQL_ROOT_PASSWORD': { - type: 'password', - length: 16 - }, - 'MYSQL_USER': { - type: 'string', - length: 8, - pattern: 'lowercase' - }, - 'MYSQL_PASSWORD': { - type: 'password', - length: 16 - }, - 'MYSQL_DATABASE': { - type: 'string', - length: 12, - pattern: 'lowercase' - } - } - } - } - } -}; - -// Your docker-compose.yml content with variable placeholders -const dockerComposeContent = ` -version: '3' -services: - db: - image: mariadb:latest - environment: - MYSQL_ROOT_PASSWORD: ${MYSQL_ROOT_PASSWORD} - MYSQL_USER: ${MYSQL_USER} - MYSQL_PASSWORD: ${MYSQL_PASSWORD} - MYSQL_DATABASE: ${MYSQL_DATABASE} -`; - -// Generate templates with persistent variables -const result1 = translate(dockerComposeContent, { - source: 'compose', - target: 'CFN', - templateFormat: 'yaml', - environmentVariableGeneration: envConfig, - persistenceKey: 'my-unique-key' // Use this to maintain consistent values -}); - -// Generate for another provider, reusing the same variables -const result2 = translate(dockerComposeContent, { - source: 'compose', - target: 'RND', - templateFormat: 'yaml', - environmentVariableGeneration: envConfig, - persistenceKey: 'my-unique-key' // Same key ensures same values are used -}); -``` - -## Variable Types - -### Password Type - -Generates secure random passwords: - -```json -{ - "type": "password", - "length": 16 // Optional, defaults to 16 -} -``` - -### String Type - -Generates random strings with specified patterns: - -```json -{ - "type": "string", - "length": 8, // Optional, defaults to 8 - "pattern": "lowercase" // Optional: "uppercase", "lowercase", or "normal" -} -``` - -### Number Type - -Generates random numbers within a specified range: - -```json -{ - "type": "number", - "min": 1000, // Optional, defaults to 1 - "max": 9999 // Optional, defaults to 1000000 -} -``` - -## Version Matching - -The system supports semantic version matching: - -- Version ranges: `">=11.0"`, `"<=10.5"` -- Exact versions: `"10.5"`, `"11.2"` -- Wildcard: `"*"` (matches any version) -- Latest: `"latest"` (matches latest version) - -Example with version-specific configurations: - -```json -{ - "library/mariadb": { - "versions": { - ">=11.0": { - "environment": { - "MYSQL_ROOT_PASSWORD": { - "type": "password", - "length": 16 - } - } - }, - "<=10.5": { - "environment": { - "MARIADB_ROOT_PASSWORD": { - "type": "password", - "length": 20 - } - } - } - } - } -} -``` - -## Persistence Key - -The `persistenceKey` parameter allows you to maintain consistent variable values across multiple template generations: - -- Use the same key when generating templates for different providers -- Generated values are cached and reused when the same key is provided -- Different keys will generate new sets of values -- Keys should be unique to your specific use case - -## Important Notes - -- Only variables using the `${VARIABLE_NAME}` syntax will be processed -- Variables without corresponding config entries retain their original values -- Generated passwords meet common security requirements (mixed case, numbers, special characters) -- String patterns affect the character set used in generation -- Version matching uses semantic versioning for comparison -- The `persistenceKey` should be unique to your specific deployment scenario - -## Limitations - -- Only supports public Docker Hub images currently -- Cannot generate values for build-time variables -- No support for complex variable dependencies -- Version matching requires valid semantic versions diff --git a/docs/docker-to-iac/environment-variable.mdx b/docs/docker-to-iac/environment-variable.mdx deleted file mode 100644 index 10ccf59..0000000 --- a/docs/docker-to-iac/environment-variable.mdx +++ /dev/null @@ -1,199 +0,0 @@ ---- -title: Environment Variables in docker-to-iac -description: Learn how to manage environment variables in docker-to-iac. Pass configuration values from .env files to your Infrastructure as Code templates and keep your sensitive data secure. -sidebar: Environment Variables ---- - -# Environment Variables - -The docker-to-iac module supports passing environment variables from `.env` files to your Infrastructure as Code templates. This feature allows you to manage configuration values separately from your Docker configurations and maintain consistency across deployments. - -## Overview - -When translating Docker configurations to Infrastructure as Code templates, you can provide environment variables that will be used to replace placeholders in your Docker configurations. This is particularly useful for: - -- Managing configuration values separately from Docker files -- Providing credentials and sensitive information -- Maintaining consistent values across different deployment environments - -## Usage - -### Reading Environment Variables - -The module provides a utility function `parseEnvFile` to read and parse `.env` files: - -```javascript -import { translate, parseEnvFile } from '@deploystack/docker-to-iac'; -import { readFileSync } from 'fs'; - -// Read and parse the .env file -const envContent = readFileSync('.env', 'utf-8'); -const envVariables = parseEnvFile(envContent); -``` - -### Using Environment Variables in Translation - -Pass the environment variables to the `translate` function using the `environmentVariables` option: - -```javascript -const result = translate(dockerConfig, { - source: 'run', // or 'compose' - target: 'RND', // or other supported targets - templateFormat: 'yaml', - environmentVariables: envVariables -}); -``` - -## Default Values - -The docker-to-iac module supports Docker's default value syntax for environment variables. This allows you to specify fallback values that are used when environment variables are not provided or are undefined. - -### Syntax - -Default values can be specified using the `${VARIABLE:-default}` syntax, where: - -- `VARIABLE` is the environment variable name -- `default` is the value used if `VARIABLE` is not set - -### Docker Compose Example - -```yaml -services: - db: - image: postgres:15-alpine - environment: - POSTGRES_USER: ${DB_USER:-defaultuser} - POSTGRES_PASSWORD: ${DB_PASSWORD:-secret123} - POSTGRES_DB: ${DB_NAME:-myapp} -``` - -### Docker Run Example - -```bash -docker run -d \ - --name db \ - -e POSTGRES_USER=${DB_USER:-defaultuser} \ - -e POSTGRES_PASSWORD=${DB_PASSWORD:-secret123} \ - -e POSTGRES_DB=${DB_NAME:-myapp} \ - postgres:15-alpine -``` - -### How Default Values Work - -The module processes default values in this order: - -1. If an environment variable is provided via `.env` file or `environmentVariables` option, that value is used -2. If no environment variable is found, the default value after `:-` is used -3. If neither exists, an empty string is used - -For example: - -```javascript -// With this .env file: -DB_USER=johndoe -// DB_PASSWORD is not set - -// And this docker-compose.yml: -environment: - POSTGRES_USER: ${DB_USER:-defaultuser} - POSTGRES_PASSWORD: ${DB_PASSWORD:-secret123} - -// The resolved values will be: -POSTGRES_USER: "johndoe" // From .env file -POSTGRES_PASSWORD: "secret123" // Default value used -``` - -Default values provide a way to: - -- Make your configurations more robust by handling missing variables -- Set sensible defaults for development environments -- Ensure required values always have a fallback - -## Complete Example - -Here's a complete example showing how to use environment variables with a MariaDB container: - -```javascript -import { translate, parseEnvFile } from '@deploystack/docker-to-iac'; -import { readFileSync } from 'fs'; - -// Read the .env file -const envContent = readFileSync('.env', 'utf-8'); -const envVariables = parseEnvFile(envContent); - -// Docker run command with environment variable placeholders -const dockerRunCommand = `docker run -d \ - --name mariadb \ - -e MYSQL_ROOT_PASSWORD=\${DB_PASSWORD} \ - -e MYSQL_USER=\${DB_USERNAME} \ - -e MYSQL_PASSWORD=\${DB_PASSWORD} \ - -e MYSQL_DATABASE=\${DB_DATABASE} \ - -v db:/var/lib/mysql \ - docker.io/library/mariadb:11.2`; - -// Translate with environment variables -const result = translate(dockerRunCommand, { - source: 'run', - target: 'RND', - templateFormat: 'yaml', - environmentVariables: envVariables -}); -``` - -### .env File Format - -Your `.env` file should follow standard environment file format: - -```bash -# .env -DB_USERNAME=myuser -DB_PASSWORD=mysecretpassword -DB_DATABASE=mydatabase -``` - -### Variable Substitution - -Environment variables are substituted using the `${VARIABLE_NAME}` syntax in your Docker configurations: - -```yaml -# In docker-compose.yml -services: - db: - image: mariadb:11.2 - environment: - MYSQL_ROOT_PASSWORD: ${DB_PASSWORD} - MYSQL_USER: ${DB_USERNAME} - MYSQL_PASSWORD: ${DB_PASSWORD} - MYSQL_DATABASE: ${DB_DATABASE} -``` - -## Working with Docker Compose - -The environment variables feature works with both Docker run commands and docker-compose.yml files: - -```javascript -// For docker-compose.yml -const dockerComposeContent = readFileSync('docker-compose.yml', 'utf-8'); - -const result = translate(dockerComposeContent, { - source: 'compose', - target: 'RND', - templateFormat: 'yaml', - environmentVariables: envVariables -}); -``` - -## Important Notes - -- Environment variables take precedence over default values in Docker configurations -- Missing environment variables will result in empty values in the output templates -- The module does not validate environment variable values -- Sensitive information should be handled securely -- Variable names are case-sensitive - -## Limitations - -- Only supports basic environment variable substitution -- No support for variable expansion or shell-style variable manipulation -- Cannot reference other environment variables within values -- No built-in encryption for sensitive values diff --git a/docs/docker-to-iac/example-of-a-new-parser.mdx b/docs/docker-to-iac/example-of-a-new-parser.mdx deleted file mode 100644 index 85242da..0000000 --- a/docs/docker-to-iac/example-of-a-new-parser.mdx +++ /dev/null @@ -1,383 +0,0 @@ ---- -title: Example of a New Parser -description: Example code for adding a new parser to docker-to-iac, supporting both Docker run commands and Docker Compose files, with multi-file output and service connections ---- - -# Adding a New Parser - -> [!TIP] -> Thank you for your interest in collaborating! The docker-to-iac module will remain open source forever, helping simplify deployments across cloud providers without vendor lock-in. - -## Parser Implementation - -Create a new file inside `src/parsers/new-provider.ts`: - -```typescript -import { - BaseParser, - ParserInfo, - TemplateFormat, - ParserConfig, - FileOutput, - DockerImageInfo -} from './base-parser'; -import { ApplicationConfig } from '../types/container-config'; -import { parsePort } from '../utils/parsePort'; -import { parseCommand } from '../utils/parseCommand'; - -// Define default configuration for your parser -const defaultParserConfig: ParserConfig = { - files: [ - { - path: 'awesome-iac.yaml', - templateFormat: TemplateFormat.yaml, - isMain: true, - description: 'Main IaC configuration file' - }, - { - path: 'templates/resources.yaml', - templateFormat: TemplateFormat.yaml, - description: 'Additional resources configuration' - } - ], - cpu: 512, - memory: '1GB', - region: 'default-region', - subscriptionName: 'basic-tier' -}; - -// Optional: Add helper functions for your specific provider -function getNewProviderServiceType(imageInfo: DockerImageInfo): string { - // Logic to determine service type based on image - // Example: Check if it's a database, web service, etc. - return 'web-service'; // Default type -} - -// Optional: Add function to determine if an image is a managed service -function isNewProviderManagedService(imageInfo: DockerImageInfo): boolean { - // Check if this image should be handled as a managed service - const imageUrl = `${imageInfo.repository}:${imageInfo.tag || 'latest'}`; - return imageUrl.includes('postgres') || imageUrl.includes('redis'); -} - -class NewProviderParser extends BaseParser { - // Multi-file implementation - required by BaseParser - parseFiles(config: ApplicationConfig): { [path: string]: FileOutput } { - // Initialize result containers - const services: Array = []; - const managedServices: Array = []; - - // Track service mappings for managed services - const managedServiceMap = new Map(); - - // First pass: identify and register managed services - for (const [serviceName, serviceConfig] of Object.entries(config.services)) { - if (isNewProviderManagedService(serviceConfig.image)) { - // Create a managed service instead of a regular service - const managedName = `${serviceName}-managed`; - - // Track the mapping for service connections later - managedServiceMap.set(serviceName, managedName); - - // Add to managed services collection - managedServices.push({ - name: managedName, - type: getNewProviderServiceType(serviceConfig.image), - // Add provider-specific managed service properties - plan: defaultParserConfig.subscriptionName - }); - - // Skip further processing of this service - continue; - } - - // Regular services will be processed in the second pass - } - - // Second pass: process regular services with their connections - for (const [serviceName, serviceConfig] of Object.entries(config.services)) { - // Skip managed services already processed - if (managedServiceMap.has(serviceName)) { - continue; - } - - // Extract ports from service configuration - const ports = new Set(); - if (serviceConfig.ports) { - serviceConfig.ports.forEach(port => { - if (typeof port === 'object' && port !== null) { - ports.add(port.container); - } else { - const parsedPort = parsePort(port); - if (parsedPort) { - ports.add(parsedPort); - } - } - }); - } - - // Prepare basic service definition - const service: any = { - name: serviceName, - type: getNewProviderServiceType(serviceConfig.image), - image: serviceConfig.image, - command: parseCommand(serviceConfig.command), - environment: [] - }; - - // Add ports if available - if (ports.size > 0) { - service.ports = Array.from(ports); - } - - // Process service connections if available - if (config.serviceConnections) { - // First add regular environment variables - for (const [key, value] of Object.entries(serviceConfig.environment)) { - // Check if this variable is handled by service connections - const isHandledByConnection = config.serviceConnections.some(conn => - conn.fromService === serviceName && - Object.keys(conn.variables).includes(key) - ); - - if (!isHandledByConnection) { - // Regular environment variable - service.environment.push({ - key, - value: value.toString() - }); - } - } - - // Then add service connection variables with provider-specific syntax - for (const connection of config.serviceConnections) { - if (connection.fromService === serviceName) { - for (const [varName, varInfo] of Object.entries(connection.variables)) { - // Check if target is a managed service - if (managedServiceMap.has(connection.toService)) { - const targetName = managedServiceMap.get(connection.toService); - - // Use provider-specific reference syntax - service.environment.push({ - key: varName, - // Example: ${resources.MANAGED_SERVICE_NAME.CONNECTION_STRING} - value: `\${resources.${targetName}.${connection.property || 'connectionString'}}` - }); - } else { - // Regular service connection - service.environment.push({ - key: varName, - // Example: ${services.SERVICE_NAME.HOST_PORT} - value: `\${services.${connection.toService}.${connection.property || 'hostport'}}` - }); - } - } - } - } - } else { - // No service connections, just add all environment variables - service.environment = Object.entries(serviceConfig.environment).map(([key, value]) => ({ - key, - value: value.toString() - })); - } - - // Add service to collection - services.push(service); - } - - // Create main configuration - const mainConfig = { - version: '1.0', - provider: 'new-provider', - region: defaultParserConfig.region, - services - }; - - // Create resources configuration if we have managed services - const resourcesConfig = managedServices.length > 0 ? { - version: '1.0', - managedResources: managedServices - } : {}; - - // Return file mappings - the main file is required - const result: { [path: string]: FileOutput } = { - 'awesome-iac.yaml': { - content: this.formatFileContent(mainConfig, TemplateFormat.yaml), - format: TemplateFormat.yaml, - isMain: true - } - }; - - // Add resources file if we have managed services - if (managedServices.length > 0) { - result['templates/resources.yaml'] = { - content: this.formatFileContent(resourcesConfig, TemplateFormat.yaml), - format: TemplateFormat.yaml - }; - } - - return result; - } - - getInfo(): ParserInfo { - return { - providerWebsite: "https://newprovider.example.com", - providerName: "New Provider Cloud", - providerNameAbbreviation: "NP", - languageOfficialDocs: "https://docs.newprovider.example.com/iac", - languageAbbreviation: "NP", - languageName: "New Provider IaC", - defaultParserConfig - }; - } -} - -export default new NewProviderParser(); -``` - -## Configuration and Provider-Specific Logic - -### Service Type Detection - -Create a file for service type configuration in `src/config/newprovider/service-types.ts`: - -```typescript -interface NewProviderServiceTypeConfig { - type: string; - description: string; - versions: string; - isManaged?: boolean; -} - -interface NewProviderServiceTypesConfig { - serviceTypes: { - [key: string]: NewProviderServiceTypeConfig; - }; -} - -export const newProviderServiceTypesConfig: NewProviderServiceTypesConfig = { - serviceTypes: { - 'docker.io/library/mariadb': { - type: 'database', - description: 'MariaDB database service', - versions: '*' - }, - 'docker.io/library/postgres': { - type: 'database', - description: 'PostgreSQL database', - versions: '*', - isManaged: true - }, - 'docker.io/library/redis': { - type: 'cache', - description: 'Redis cache', - versions: '*', - isManaged: true - } - } -}; - -export function getNewProviderServiceType(imageString: string): string { - const baseImage = imageString.split(':')[0]; - return newProviderServiceTypesConfig.serviceTypes[baseImage]?.type || 'web'; -} - -export function isNewProviderManagedService(imageString: string): boolean { - const baseImage = imageString.split(':')[0]; - return !!newProviderServiceTypesConfig.serviceTypes[baseImage]?.isManaged; -} - -export type { NewProviderServiceTypeConfig, NewProviderServiceTypesConfig }; -``` - -### Service Connection Properties - -Update the service connection properties in `src/config/connection-properties.ts`: - -```typescript -export const servicePropertyMappings: Record = { - 'host': { - render: 'host', - digitalOcean: 'PRIVATE_DOMAIN', - newProvider: 'HOST' // Add your provider mapping - }, - 'port': { - render: 'port', - digitalOcean: 'PRIVATE_PORT', - newProvider: 'PORT' // Add your provider mapping - }, - 'hostport': { - render: 'hostport', - digitalOcean: 'PRIVATE_URL', - newProvider: 'ENDPOINT' // Add your provider mapping - } -}; - -export const databasePropertyMappings: Record = { - 'connectionString': { - render: 'connectionString', - digitalOcean: 'DATABASE_URL', - newProvider: 'CONNECTION_STRING' // Add your provider mapping - }, - // Add other mappings... -}; -``` - -## Adding Your Parser to the System - -Update `src/index.ts` to include your new parser: - -```typescript -// Import your new parser -import newProviderParserInstance from './parsers/new-provider'; - -// Add it to the parsers array -const parsers: BaseParser[] = [ - cloudFormationParserInstance, - renderParserInstance, - digitalOceanParserInstance, - newProviderParserInstance // Add your parser here -]; -``` - -## Testing - -Please read our guidelines for testing parsers in the [Testing section](/docker-to-iac/testing). - -## New parser documentation - -Please update documentation in the [github.com/deploystackio/documentation](https://github.com/deploystackio/documentation) repository. - -## Checklist - -1. Support both input types: - - Docker run commands - - Docker Compose files - -2. Handle all service types: - - Regular web/application services - - Managed database services - - Cache services - -3. Handle resource mappings consistently: - - Container ports - - Environment variables - - Volume mounts - - Resource limits - - Service connections - -4. Process service connections correctly: - - Service-to-service references - - Service-to-managed-service references - - Use provider-specific connection syntax - -5. Provide clear error messages for: - - Unsupported features - - Invalid configurations - - Missing required fields - -6. Test edge cases: - - Multiple services with interdependencies - - Complex configurations with service connections - - Various image formats and service types diff --git a/docs/docker-to-iac/index.mdx b/docs/docker-to-iac/index.mdx deleted file mode 100644 index 2fea713..0000000 --- a/docs/docker-to-iac/index.mdx +++ /dev/null @@ -1,50 +0,0 @@ ---- -title: Docker to Infrastructure 2 -description: Introduction to the node module docker-to-iac which allows you to transfer docker-compose into IaC templates -sidebar: docker-to-iac ---- - -# Docker to Infrastructure as Code Module - -Docker to IaC is a Node.js module that translates the `docker-compose.yml` file into various types of Infrastructure as Code (IaC) templates. The goal of the module is to make GitHub repositories with docker-compose more easily accessible to various cloud providers such as Amazon Web Services (AWS), Google Cloud, DigitalOcean, and so on. - -- GitHub repository: [github.com/deploystackio/docker-to-iac](https://github.com/deploystackio/docker-to-iac) -- npm registry: [npmjs.com/package/@deploystack/docker-to-iac](https://www.npmjs.com/package/@deploystack/docker-to-iac) - -## Motivation - -The project's motivation comes from the fact that there are so many cloud providers on the free market that it is impossible to know all of them. - -How the project came about: you found an open source project that you want to deploy to your cloud provider, you found the `docker-compose.yml` file, and now you have to extract all the variables by hand or write the Infrastructure as Code template yourself because your company IT policy at work does not allow deployments without IaC. 😀 - -That's how it can work! That's why we want to simplify deployments and minimize vendor lock-in. - -The focus of this project, however, is on container applications with `docker-compose.yml` or applications that can be containerized. - -## Highlights - -- List of all available parsers from module -- Support for docker-compose multiple services -- Setup for default settings for each cloud provider (i.e. CPU, RAM) -- Docker Compose services variables supported: - - image, command, port, environment - -## How does it work? - -The principle of the translation is straightforward. You need a `docker-compose.yml` file and the desired cloud provider where you want to deploy your container. The docker-to-iac module translates `docker-compose.yml` into an IaC or one-click deploy template. - -After the successful translation, you can deploy your containers to your cloud provider. - -## Limitations - -- Only pre-build container possible - -Please read more at the [limitations page](/docker-to-iac/limitations) - -## Help wanted - -We would be very happy if you could help us to extend the docker-to-iac module to include additional cloud providers (parsers). All open source repositories listed on our [deploystack.io](https://deploystack.io) website would benefit from this. - -If the docker-to-iac module is extended with another parser, our backend automatically creates an update for the repository [github.com/deploystackio/deploy-templates](https://github.com/deploystackio/deploy-templates). Baiscally: if you add a new parser for the provider "foo-cloud" that has its own IaC language, or one-click deployment supported, all open source projects listed on deploystack.io will be extended with the IaC template for cloud provider "foo-cloud". - -Thank you! diff --git a/docs/docker-to-iac/limitations.mdx b/docs/docker-to-iac/limitations.mdx deleted file mode 100644 index 8072dea..0000000 --- a/docs/docker-to-iac/limitations.mdx +++ /dev/null @@ -1,89 +0,0 @@ ---- -title: Limitations -description: Current limitations and constraints of the docker-to-iac module ---- - -# Limitations for docker-to-iac module - -## Registry Support - -The module currently supports Docker images from -> please check [Supported Registries for docker-to-iac module](/docker-to-iac/supported-registries) - -## Docker Image Requirement - -The `docker-to-iac` module is designed to work exclusively with pre-built Docker images. This means that each service in your `docker-compose.yml` file must specify an `image` property. - -## Volume Support - -When working with volume mappings in your Docker configuration, be aware that volume support varies among cloud providers: - -- Some providers fully support multiple volume mappings -- Some providers only support the first volume mapping defined in your configuration -- Some providers support ephemeral files only, meaning no persistent volume storage is available -- Volume mapping implementation details can differ between providers - -Please check the specific provider's documentation to understand their volume mapping capabilities and limitations before deployment. - -For example, if your Docker configuration includes multiple volumes: - -```yaml -services: - app: - image: nginx:latest - volumes: - - ./config:/etc/nginx/conf.d - - ./logs:/var/log/nginx - - ./data:/usr/share/nginx/html -``` - -Depending on your chosen provider: - -- All volume mappings might be supported -- Only the first volume mapping (`./config:/etc/nginx/conf.d`) might be implemented -- No volumes might be supported, with only ephemeral storage available - -We recommend reviewing your target provider's documentation for detailed information about their volume support capabilities. - -### Build Instructions Not Supported - -The module does not support services that use the `build` directive. For example: - -```yaml [docker-compose.yml] -# ❌ Not Supported -services: - app: - build: - context: ./build/app - dockerfile: Dockerfile -``` - -Instead, you must use pre-built images: - -```yaml [docker-compose.yml] -# ✅ Supported -services: - app: - image: nginx:latest -``` - -#### Rationale - -This limitation exists because Infrastructure as Code (IaC) templates require specific, immutable container images to ensure consistent deployments. The infrastructure and the selection of cloud providers for this docker-to-iac module only allow pre-build container images. It is technically not possible to create a build with the preconfigured infrastructure. This is why the pre-build check was built in. This happens also because the scope of this module is only pre-build container. - -### Workaround - -If you need to use custom Docker images: - -Build your Docker images locally or in your CI/CD pipeline -Push them to a container registry (like Docker Hub, GitHub Container Registry, or AWS ECR) -Reference the pushed image in your docker-compose file using the image property - -For example: - -```yaml -services: - app: - image: ghcr.io/your-org/your-app:1.0.0 -``` - -This ensures that your IaC templates will have access to the exact same container image across all deployments. diff --git a/docs/docker-to-iac/meta.json b/docs/docker-to-iac/meta.json deleted file mode 100644 index 69fa5dd..0000000 --- a/docs/docker-to-iac/meta.json +++ /dev/null @@ -1,34 +0,0 @@ -{ - "title": "docker-to-iac", - "description": "docker-to-iac module", - "icon": "Container", - "root": true, - "pages": [ - "!index.mdx", - "quickstart.mdx", - "---General---", - "supported-docker-compose-variables.mdx", - "supported-registries.mdx", - "limitations.mdx", - "api.mdx", - "parser-explanation.mdx", - "environment-variable.mdx", - "environment-variable-generation.mdx", - "multi-services-support.mdx", - "multi-file-configuration.mdx", - "service-connections.mdx", - "---Available Parsers---", - "parser/digitalocean.mdx", - "parser/render.com.mdx", - "parser/helm.mdx", - "parser/aws-cloudformation.mdx", - "---Contributing---", - "before-you-start.mdx", - "project-structure.mdx", - "available-commands.mdx", - "example-of-a-new-parser.mdx", - "testing.mdx", - "publishing-to-npm.mdx", - "render-contributing-to-service-types.mdx" - ] -} diff --git a/docs/docker-to-iac/multi-file-configuration.mdx b/docs/docker-to-iac/multi-file-configuration.mdx deleted file mode 100644 index 2d1d6ab..0000000 --- a/docs/docker-to-iac/multi-file-configuration.mdx +++ /dev/null @@ -1,192 +0,0 @@ ---- -title: Multi-File Configuration in docker-to-iac -description: Learn how docker-to-iac supports complex Infrastructure as Code templates with multiple interconnected files, including Helm Charts and other multi-file IaC formats. -sidebar: Multi-File Configuration ---- - -# Multi-File Configuration in docker-to-iac - -## Introduction to Multi-File Support - -Starting with version 1.20.0, [docker-to-iac](https://github.com/deploystackio/docker-to-iac) supports generating multiple interconnected files for more complex Infrastructure as Code (IaC) templates. This feature was introduced primarily to support Helm Charts and other sophisticated IaC formats that require multiple files with specific directory structures. - -## Why Multi-File Templates? - -Modern IaC solutions often require multiple files that work together: - -- **Helm Charts** need Chart.yaml, values.yaml, and template files -- **Terraform modules** use main.tf, variables.tf, outputs.tf, and more -- **Kubernetes manifests** are typically split into multiple YAML files -- **Multi-tier applications** may need separate configurations for each tier - -These complex deployments would be difficult or impossible to represent in a single file, which led to the introduction of multi-file support. - -## The Main File Concept - -Each parser must designate one file as the "main" file using the `isMain: true` property. This maintains backward compatibility with existing code and provides a clear entry point for deployment tools. - -```typescript -parseFiles(config: ApplicationConfig): { [path: string]: FileOutput } { - return { - 'Chart.yaml': { - content: this.formatFileContent(chartConfig, TemplateFormat.yaml), - format: TemplateFormat.yaml, - isMain: true // This is the main file - }, - 'values.yaml': { - content: this.formatFileContent(valuesConfig, TemplateFormat.yaml), - format: TemplateFormat.yaml - } - }; -} -``` - -When a parser is invoked through the legacy `parse()` method, only the content of the main file is returned. However, when using the `parseFiles()` method, all files are included in the response. - -## Example: Helm Chart Structure - -Helm Charts are a perfect example of why multi-file support is needed. A basic Helm Chart requires at least the following files: - -```bash -mychart/ -├── Chart.yaml # Chart metadata -├── values.yaml # Default configuration values -└── templates/ - ├── deployment.yaml # Kubernetes Deployment - ├── service.yaml # Kubernetes Service - └── _helpers.tpl # Template helpers -``` - -The docker-to-iac module now includes a Helm parser that generates this exact structure when translating Docker configurations to Helm Charts: - -```javascript -const result = translate(dockerComposeContent, { - source: 'compose', - target: 'HELM', - templateFormat: 'yaml' -}); - -// Result contains all files needed for a complete Helm Chart -console.log(Object.keys(result.files)); -// [ -// 'Chart.yaml', -// 'values.yaml', -// 'templates/deployment.yaml', -// 'templates/service.yaml', -// 'templates/configmap.yaml', -// 'templates/secret.yaml', -// 'templates/NOTES.txt', -// 'templates/_helpers.tpl' -// ] -``` - -With the multi-file support, a Helm Chart parser configuration might look like: - -```typescript -const defaultParserConfig: ParserConfig = { - files: [ - { - path: 'Chart.yaml', - templateFormat: TemplateFormat.yaml, - isMain: true, - description: 'Chart metadata file' - }, - { - path: 'values.yaml', - templateFormat: TemplateFormat.yaml, - description: 'Default configuration values' - }, - { - path: 'templates/deployment.yaml', - templateFormat: TemplateFormat.yaml, - description: 'Kubernetes Deployment template' - }, - { - path: 'templates/service.yaml', - templateFormat: TemplateFormat.yaml, - description: 'Kubernetes Service template' - }, - { - path: 'templates/_helpers.tpl', - templateFormat: TemplateFormat.text, - description: 'Template helper functions' - } - ] -}; -``` - -## Implementation Details - -### File Structure - -Each parser must implement the `parseFiles` method, which returns an object mapping file paths to their content: - -```typescript -interface FileOutput { - content: string; // File content as a string - format: TemplateFormat; // Format (yaml, json, text) - isMain?: boolean; // Whether this is the main file -} - -parseFiles(config: ApplicationConfig): { [path: string]: FileOutput }; -``` - -### Directory Support - -The file paths can include directories, which will be created automatically when the templates are saved: - -```typescript -return { - 'templates/deployment.yaml': { - content: deploymentContent, - format: TemplateFormat.yaml - }, - 'templates/ingress.yaml': { - content: ingressContent, - format: TemplateFormat.yaml - } -}; -``` - -### Content Formatting - -The `formatFileContent` helper method ensures that content is properly formatted according to the specified template format. - -## Backward Compatibility - -To maintain backward compatibility, the `BaseParser` class implements a default `parse` method that: - -1. Calls the new `parseFiles` method -2. Finds the file marked with `isMain: true` -3. Returns only that file's content - -```typescript -parse(config: ApplicationConfig, templateFormat?: TemplateFormat): any { - const files = this.parseFiles(config); - const mainFile = Object.values(files).find(file => file.isMain); - - if (!mainFile) { - throw new Error('No main file defined in parser output'); - } - - return typeof mainFile.content === 'string' - ? mainFile.content - : formatResponse(JSON.stringify(mainFile.content, null, 2), templateFormat || mainFile.format); -} -``` - -This ensures that existing code that calls `parse()` will continue to work as expected. - -## Best Practices - -When implementing multi-file parsers: - -1. **Always mark one file as main**: Designate exactly one file as `isMain: true` to maintain backward compatibility. - -2. **Use consistent directory structure**: Follow the conventions of your target IaC format (e.g., Helm Chart layout). - -3. **Use appropriate formats**: Choose the right format for each file (YAML for Kubernetes manifests, text for template helpers, etc.). - -4. **Include descriptive comments**: Add descriptions to help users understand the purpose of each file. - -5. **Handle file dependencies**: Ensure that files reference each other correctly using relative paths. diff --git a/docs/docker-to-iac/multi-services-support.mdx b/docs/docker-to-iac/multi-services-support.mdx deleted file mode 100644 index 209a897..0000000 --- a/docs/docker-to-iac/multi-services-support.mdx +++ /dev/null @@ -1,78 +0,0 @@ ---- -title: Multi-Service Support -description: Learn about multi-service deployment support - See how docker-to-iac handles multiple services in your container configurations. ---- - -# Multi-Service Support - -Multi-service support refers to the ability of a [parser](/docker-to-iac/parser-explanation) to handle multiple container configurations when translating to Infrastructure as Code (IaC) templates. - -## Docker Run vs Docker Compose - -### Docker Run Commands - -By nature, Docker run commands define a single container. When you have multiple Docker run commands, each represents a separate service: - -```bash -# Service 1 -docker run -d -p 8080:80 nginx:alpine - -# Service 2 -docker run -d -p 6379:6379 redis:latest -``` - -### Docker Compose - -Docker Compose files can define multiple services within a single file: - -```yaml title="docker-compose.yml" -version: '3.2' - -services: - web: - image: nginx:alpine - ports: - - '8080:80' - - cache: - image: redis:latest - ports: - - '6379:6379' -``` - -## Parser Support for Multiple Services - -The ability to deploy multiple services simultaneously varies by cloud provider: - -### Full Multi-Service Support - -Some cloud providers can deploy multiple containers as part of a single deployment. In these cases, docker-to-iac will translate all services to the target IaC template: - -```javascript -// All services will be included in the translation -const translation = translate(dockerComposeContent, { - source: 'compose', - target: 'CFN' // AWS CloudFormation supports multiple services -}); -``` - -### Limited Service Support - -Some providers don't support deploying multiple containers simultaneously. For these providers: - -- For Docker Compose input: Only the first service from the file will be translated -- For Docker run commands: Each command must be translated separately - -```javascript -// Only the first service will be translated -const translation = translate(dockerComposeContent, { - source: 'compose', - target: 'RND' // Render.com currently supports single service deployments -}); -``` - -## Provider-Specific Behavior - -Before using a specific parser, check its multi-service capabilities in the [parser documentation](/docker-to-iac/parser-explanation). This helps ensure your deployment strategy aligns with the provider's capabilities. - -Note that some providers may have different service limits or deployment patterns even when they support multiple services. Always consult the target provider's documentation for specific limitations. diff --git a/docs/docker-to-iac/parser-explanation.mdx b/docs/docker-to-iac/parser-explanation.mdx deleted file mode 100644 index c97a29c..0000000 --- a/docs/docker-to-iac/parser-explanation.mdx +++ /dev/null @@ -1,61 +0,0 @@ ---- -title: Parser Explanation -description: Understand how parsers translate Docker run commands and Docker Compose files into cloud-specific Infrastructure as Code templates. Learn about provider configurations and language support. ---- - -# Parser Explanation in docker-to-iac - -A parser in docker-to-iac translates Docker configurations (either Docker run commands or docker-compose.yml files) into Infrastructure as Code (IaC) or One-Click Deploy templates. Each parser is designed to target a specific IaC language or cloud provider template format. - -## Input Types - -docker-to-iac can process two types of input: - -### Docker Run Commands - -```bash -docker run -d -p 8080:80 -e NODE_ENV=production nginx:latest -``` - -### Docker Compose Files - -```yaml -version: '3' -services: - web: - image: nginx:latest - ports: - - "8080:80" - environment: - NODE_ENV: production -``` - -## API - -For detailed API documentation, see the [parser API reference](/docker-to-iac/api). - -## Default Parser Config - -Each parser includes default configurations specific to its target cloud provider. These defaults are necessary because providers have different compute specifications and limitations. - -Example: AWS Fargate has a minimum CPU allocation of 256, while DigitalOcean's [minimum setting is 1 vCPU](https://www.digitalocean.com/pricing/app-platform). The default parser config handles these provider-specific requirements. - -To retrieve default parser configurations through the API, see the [parser info documentation](/docker-to-iac/api#get-parser-info). - -## Parser vs. Language - -The [ParserInfo type](https://github.com/deploystackio/docker-to-iac/blob/main/src/parsers/base-parser.ts) separates variables between `Provider` and `Language`. This separation exists because some cloud providers support multiple IaC languages. - -For example, AWS infrastructure can be defined using: - -- CloudFormation -- AWS CDK (for TypeScript, Python, etc.) -- Terraform - -When adding new parsers, consider whether multiple IaC languages are possible for your target provider. This affects how you name your parser file in `src/parsers/.ts`. It's why the [`translate()`](/docker-to-iac/api#translate-api) method requires the target IaC language name (e.g., `CFN`) rather than the provider name (e.g., `AWS`). - -## Parser Implementation Notes - -Creating parsers for multi-cloud IaC tools like Terraform presents additional challenges. Terraform's [extensive provider ecosystem](https://registry.terraform.io/browse/providers) means a Terraform parser would need complex logic to handle various provider-specific implementations, making maintenance more difficult. - -In contrast, single-provider languages like AWS CloudFormation have a one-to-one relationship with their cloud provider, simplifying parser implementation and maintenance. diff --git a/docs/docker-to-iac/parser/aws-cloudformation.mdx b/docs/docker-to-iac/parser/aws-cloudformation.mdx deleted file mode 100644 index 408e380..0000000 --- a/docs/docker-to-iac/parser/aws-cloudformation.mdx +++ /dev/null @@ -1,97 +0,0 @@ ---- -title: AWS CloudFormation Parser -description: Translate docker docker-compose.yml file into AWS Cloud Formation with DeployStack ---- - -# AWS CloudFormation - Parser Full Documentation - -The parser for CloudFormation translates the `docker-compose.yml` file into CloudFormation. The parser logic can be found in GitHub inside [docker-to-iac repo](https://github.com/deploystackio/docker-to-iac/blob/main/src/parsers/aws-cloudformation.ts). - -## Parser language abbreviation for API - -- `languageAbbreviation`: `CFN`. - -## Prerequisite to deploy CloudFormation Template - -To deploy the CloudFormation template in your AWS account, you need a VPC with internet access. It should also be possible to create ENI ([AWS Elastic Network Interface](https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-eni.html)) with public IP. The template uses __AWS Fargate__ without an Application Load Balancer to save costs. - -If you have the [default VPC](https://docs.aws.amazon.com/vpc/latest/userguide/default-vpc.html) in your AWS account that should be sufficient. - -## Architecture - -The architecture deploys an ECS service into a serverless [AWS Fargate](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/AWS_Fargate.html) cluster. An ECS service = service from `docker-compose.yml`. This means if you have two services in your docker-compose file, you will end up deploying two ECS services into your Fargate cluster. - -![AWS Architecture](../../assets/images/docker-to-iac/aws-fargate.drawio.png) - -The tasks within ECS services create an ENI that has a public IP address. Since we do not use an ALB ([Application Load Balancer](https://docs.aws.amazon.com/elasticloadbalancing/latest/application/introduction.html)), you can only access the tasks via the port and the public IP address. - -When creating CloudFormation template, we decided not to use ALB to save costs. You can of course modify the CloudFormation template and add your ALB if needed. - -## Security Configuration - -### Container Security Groups - -For development and testing purposes, the template configures security groups with open TCP ports (0-65535). This configuration enables easy testing but is not recommended for production use. If you plan to use this template in production, modify the security group rules to restrict access to specific ports. - -```yaml -SecurityGroupIngress: - - IpProtocol: tcp - FromPort: 0 - ToPort: 65535 - CidrIp: 0.0.0.0/0 -``` - -### Container Root Filesystem - -To enable writes to ephemeral ECS storage, containers are configured with: - -```yaml -ReadonlyRootFilesystem: false -``` - -## Default output format - -- The default output format for this parser: `YAML`. - -## File Configuration - -The AWS CloudFormation parser generates a single consolidated template: - -- `aws-cloudformation.cf.yml` - The comprehensive CloudFormation template that defines all resources including ECS clusters, services, tasks, security groups, and IAM roles - -This single-file approach encapsulates the entire infrastructure definition in YAML format, making it ready for immediate deployment through the AWS CloudFormation console, CLI, or other AWS deployment tools. - -## Supported Docker Compose Variables - -The current version supports the following Docker Compose variables: - -For __services__: - -- image -- environment -- ports -- command - - -The supported variables that are not on this list are ignored. This means that they are not translated by the parser in Infrastructure as Code from `docker-compose.yml` or docker run command. - - -## Storage Support - -The current implementation uses ephemeral storage provided by AWS Fargate. Persistent storage solutions like EFS (Elastic File System) or EBS (Elastic Block Store) are not automatically configured due to complexity with multiple mount points and automated deployment requirements. - -For applications requiring persistent storage, consider: - -- Using external storage services (e.g., Amazon RDS for databases) -- Manually configuring EBS volumes -- Implementing a custom storage solution - -## Multi Services Support - -Multi `services` support for CloudFormation: __yes__ - -Please read more about [multi service support here](/docker-to-iac/multi-services-support). - - -This CloudFormation template is designed for development and testing environments. For production deployments, review and adjust security groups, storage configuration, and other security settings according to your requirements. - diff --git a/docs/docker-to-iac/parser/digitalocean.mdx b/docs/docker-to-iac/parser/digitalocean.mdx deleted file mode 100644 index c5b9eaf..0000000 --- a/docs/docker-to-iac/parser/digitalocean.mdx +++ /dev/null @@ -1,189 +0,0 @@ ---- -title: DigitalOcean Parser -description: Translate docker docker-compose.yml file into DigitalOcean Infrastructure as Code with DeployStack ---- - -# DigitalOcean - Parser Full Documentation - -The parser for DigitalOcean translates the `docker-compose.yml` file into a DigitalOcean [App Spec](https://docs.digitalocean.com/products/app-platform/) template. The parser logic can be found in GitHub inside the [docker-to-iac repo](https://github.com/deploystackio/docker-to-iac/blob/main/src/parsers/digitalocean.ts). - -## Parser language abbreviation for API - -- `languageAbbreviation`: `DOP`. - -## Prerequisite to deploy DigitalOcean App Spec - -To use the DigitalOcean App Spec, you need a valid DigitalOcean account with access to the App Platform and sufficient credits. - -## Architecture - -The DigitalOcean App Spec will deploy your application entirely within App Platform using containerized services: - -### App Platform Services - -Services in your App Platform deployment fall into two categories: - -#### HTTP Services - -- Web-facing containers that serve HTTP traffic -- Automatically configured with HTTPS routing: - - First service gets the root path `/` - - Additional services receive paths based on their names, e.g., `/servicename` -- Ideal for web applications, APIs, and frontend services - -#### TCP Services - -- Database containers (MySQL, PostgreSQL, Redis, etc.) run as internal TCP services -- Configured with appropriate health checks and internal ports -- No external HTTP routing - only accessible by other services within the app -- Suitable for databases, caches, and message queues - -### Important Note About Databases - -While DigitalOcean offers managed database services, these cannot be automatically provisioned through one-click deployment. Instead, database containers (like MySQL, PostgreSQL, Redis) are deployed as TCP services within App Platform, allowing: - -- Immediate deployment without pre-existing infrastructure -- Internal communication between application components -- Simplified configuration for development and testing - -For production use cases where you need managed databases, you should: - -1. Manually create managed databases in your DigitalOcean account -2. Update the application configuration to use these managed instances - -After deployment, all services can be monitored and managed through your DigitalOcean App Platform dashboard. - -## Default output format - -- The default output format for this parser: `YAML`. - -## File Configuration - -The DigitalOcean parser generates a structured output with a specific file organization: - -- `.do/deploy.template.yaml` - The main App Platform specification file that defines all services, environment variables, and configuration options for deployment - -This single-file structure follows DigitalOcean's App Platform requirements, where all deployment configurations are contained within the standard location expected by the DigitalOcean CLI and deployment tools. - -## Supported Docker Compose Variables - -This parser supports the following Docker Compose variables for services: - -- image -- environment -- ports -- command - - -Supported variables not listed above will be ignored. They will not be translated into the Infrastructure as Code from `docker-compose.yml` or docker run command. - - -## Database Support - -DigitalOcean App Platform supports running database containers as internal TCP services. The parser automatically configures these services with appropriate health checks and port settings to ensure proper communication within your application. - -### Supported Databases - -The parser recognizes and configures the following database types: - -- MySQL/MariaDB (port 3306) -- PostgreSQL (port 5432) -- Redis (port 6379) -- MongoDB (port 27017) - -### Configuration Details - -Database service configurations are defined in `src/config/digitalocean/database-types.ts`. This configuration maps Docker images to their corresponding TCP port and health check settings. - -To add or modify database configurations: - -1. Locate the `database-types.ts` file -2. Edit the `digitalOceanDatabaseConfig` object -3. Define the mapping using this structure: - -```typescript -'docker.io/library/mariadb': { - engine: 'MYSQL', - description: 'MariaDB database service - maps to MySQL managed database due to compatibility', - portNumber: 3306 -} -``` - -### Example Transformation - -Original docker-compose.yml: - -```yaml -services: - db: - image: mariadb:11.2 - environment: - MYSQL_DATABASE: myapp - app: - image: nginx:alpine - ports: - - "80:80" -``` - -Generated App Spec: - -```yaml -spec: - services: - - name: db - image: - registry_type: DOCKER_HUB - registry: library - repository: mariadb - tag: "11.2" - health_check: - port: 3306 - internal_ports: - - 3306 - - name: app - image: - registry_type: DOCKER_HUB - registry: library - repository: nginx - tag: alpine - http_port: 80 - routes: - - path: / -``` - - -While running databases as App Platform services works well for development and testing, for production workloads consider using DigitalOcean's managed database offerings for better reliability and maintenance. - - -### Understanding TCP Services - -When a database image is detected, the parser: - -1. Configures the service without HTTP routing -2. Sets up appropriate internal ports for database communication -3. Adds health checks on the database's standard port -4. Ensures the service can communicate with other containers in your app - -This approach allows immediate deployment while maintaining proper isolation and communication between your application components. - -## Volume Support - -DigitalOcean App Platform supports ephemeral files only. This means: - -- No persistent volume storage is available -- Local filesystem is limited to 2GB -- Files are temporary and will be deleted after deployments or container replacements -- Each container instance has its own separate filesystem -- Changes to the filesystem are lost when instances are scaled or redeployed - - -Any `volumes` directives in your docker-compose.yml or docker run command will be ignored during the translation to App Platform specifications. - - -## Multi Services Support - -Multi `services` support for DigitalOcean: __yes__ - -DigitalOcean supports multiple services in a single App Spec file. - -Please read more about [multi service support here](/docker-to-iac/multi-services-support). diff --git a/docs/docker-to-iac/parser/helm.mdx b/docs/docker-to-iac/parser/helm.mdx deleted file mode 100644 index b61ff17..0000000 --- a/docs/docker-to-iac/parser/helm.mdx +++ /dev/null @@ -1,211 +0,0 @@ ---- -title: Helm Parser Documentation -description: Translate Docker Compose files into Kubernetes Helm Charts with DeployStack docker-to-iac module -sidebar: Helm Parser ---- - -# Helm - Parser Full Documentation - -The parser for Helm translates Docker configurations into Kubernetes Helm Charts. The parser logic can be found in GitHub inside the [docker-to-iac repo](https://github.com/deploystackio/docker-to-iac/blob/main/src/parsers/helm.ts). - -## Parser language abbreviation for API - -- `languageAbbreviation`: `HELM`. - -## Prerequisite to deploy Helm Charts - -To deploy the generated Helm Charts, you need: - -- A Kubernetes cluster (local or cloud-based) -- Helm CLI installed (version 3.x recommended) -- Appropriate RBAC permissions to deploy resources in your target namespace - -### Kubernetes Resources - -The generated Helm Chart creates the following Kubernetes resources for each service in your Docker configuration: - -- **Deployments**: Container specifications, replica count, resource limits -- **Services**: Network access to your pods with appropriate ports -- **ConfigMaps**: Non-sensitive environment variables -- **Secrets**: Sensitive environment variables (passwords, tokens, etc.) - -### Database Support - -For database services, the parser leverages Helm's dependency management to incorporate official Bitnami charts: - -- **MySQL/MariaDB**: Uses Bitnami's MySQL/MariaDB chart -- **PostgreSQL**: Uses Bitnami's PostgreSQL chart -- **Redis**: Uses Bitnami's Redis chart -- **MongoDB**: Uses Bitnami's MongoDB chart - -Each database dependency is configured with appropriate defaults and includes persistent storage for data. - -## Default output format - -- The default output format for this parser: `YAML`. - -## File Configuration - -The Helm parser generates a complete Helm Chart directory structure: - -- `Chart.yaml` - The main chart definition with metadata and dependencies -- `values.yaml` - Configuration values that can be customized at deployment time -- `templates/` - Directory containing Kubernetes YAML templates: - - `deployment.yaml` - Deployment specifications for each service - - `service.yaml` - Service definitions for network access - - `configmap.yaml` - ConfigMap for non-sensitive environment variables - - `secret.yaml` - Secret for sensitive environment variables - - `_helpers.tpl` - Helper functions for template generation - - `NOTES.txt` - Usage instructions displayed after installation - -This multi-file approach follows the standard Helm Chart structure and allows for maximum flexibility when deploying to Kubernetes. - -## Supported Docker Compose Variables - -This parser supports the following Docker Compose variables: - -- `image` -- `environment` -- `ports` -- `command` -- `volumes` - - -The parser automatically detects sensitive environment variables (containing keywords like "password", "secret", "key", "token", or "auth") and places them in Kubernetes Secrets instead of ConfigMaps. - - -## Volume Support - -The parser supports Docker volume mappings by converting them to Kubernetes volume mounts: - -- Each volume is converted to a hostPath volume by default -- Volume names are sanitized to conform to Kubernetes naming conventions -- For production use, you should modify the generated templates to use more appropriate volume types (PersistentVolumeClaims, etc.) - -## Database Integration - -When a database service is detected (MySQL, PostgreSQL, Redis, MongoDB), the parser: - -1. Adds the corresponding Bitnami Helm chart as a dependency in `Chart.yaml` -2. Configures database settings in `values.yaml` -3. Maps environment variables to the expected format for the database chart -4. Sets up appropriate persistence configurations - -### Example Database Configuration - -For a PostgreSQL database in your Docker Compose file: - -```yaml -services: - db: - image: postgres:13 - environment: - POSTGRES_USER: myuser - POSTGRES_PASSWORD: mypassword - POSTGRES_DB: myapp -``` - -The parser will create: - -```yaml -# In Chart.yaml -dependencies: - - name: db - repository: https://charts.bitnami.com/bitnami - version: ^12.0.0 - condition: dependencies.db.enabled - -# In values.yaml -dependencies: - db: - enabled: true - auth: - postgres: - password: mypassword - database: myapp - username: myuser - password: mypassword - primary: - service: - ports: - postgresql: 5432 - persistence: - enabled: true - size: 8Gi -``` - -## Service Connections - -The parser supports service-to-service connections by leveraging Kubernetes DNS for service discovery. When a service refers to another service in an environment variable, the parser automatically configures the appropriate DNS references. - -For example, if your `app` service connects to a `db` service: - -```yaml -# Docker Compose -services: - app: - image: myapp - environment: - DATABASE_URL: postgresql://postgres:password@db:5432/mydb - - db: - image: postgres - environment: - POSTGRES_PASSWORD: password - POSTGRES_DB: mydb -``` - -The parser will create: - -```yaml -# In ConfigMap template -data: - DATABASE_URL: {{ include "deploystack.serviceReference" (dict "service" (index $.Values.services "db") "serviceKey" "db") }} -``` - -Which resolves to the Kubernetes DNS name: `db.{{ .Release.Namespace }}.svc.cluster.local:5432` - -## Multi Services Support - -Multi `services` support for Helm: **yes** - -Helm Charts are designed to handle multiple services and dependencies in a single deployment, making them ideal for complex applications. The parser transforms all services from your Docker Compose file into corresponding Kubernetes resources. - -Please read more about [multi service support here](/docker-to-iac/multi-services-support). - -## Deployment Instructions - -To deploy the generated Helm Chart: - -1. Navigate to the directory containing the generated chart -2. Install dependencies: - - ```bash - helm dependency update - ``` - -3. Install the chart: - - ```bash - helm install my-release . - ``` - -4. For custom configurations: - - ```bash - helm install my-release . --set services.app.replicaCount=2 - ``` - -## Production Considerations - -For production deployments, consider the following modifications to the generated chart: - -1. Replace hostPath volumes with appropriate persistent volume claims -2. Adjust resource limits in `values.yaml` -3. Configure proper ingress settings for external access -4. Enable and configure horizontal pod autoscaling -5. Set up proper liveness and readiness probes - - -The generated Helm Chart is a starting point that you should review and customize to match your production requirements and security best practices. - diff --git a/docs/docker-to-iac/parser/index.mdx b/docs/docker-to-iac/parser/index.mdx deleted file mode 100644 index f00e214..0000000 --- a/docs/docker-to-iac/parser/index.mdx +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Available parsers -description: View all available parsers in docker-to-iac for converting Docker Compose to cloud infrastructure templates. Supports major cloud providers. -menuTitle: Available parser ---- - -# Available parser list for module docker-to-iac - -Here you can find the list of available [parsers](/docker-to-iac/parser-explanation): - -- [AWS CloudFormation](/docker-to-iac/parser/aws-cloudformation) -- [Render.com](/docker-to-iac/parser/render.com) -- [DigitalOcean](/docker-to-iac/parser/digitalocean) -- [Helm (Kubernetes)](/docker-to-iac/parser/helm) diff --git a/docs/docker-to-iac/parser/render.com.mdx b/docs/docker-to-iac/parser/render.com.mdx deleted file mode 100644 index ecd67b9..0000000 --- a/docs/docker-to-iac/parser/render.com.mdx +++ /dev/null @@ -1,134 +0,0 @@ ---- -title: Render.com - Parser Full Documentation -description: Translate docker docker-compose.yml file into Render.com Infrastructure as Code with DeployStack -sidebar: Render.com Parser ---- - -# Render.com - Parser Full Documentation - -The parser for Render.com translates the `docker-compose.yml` file into Render [BluePrint](https://render.com/docs/infrastructure-as-code). The parser logic can be found in GitHub inside [docker-to-iac repo](https://github.com/deploystackio/docker-to-iac/blob/main/src/parsers/render.ts). - -## Parser language abbreviation for API - -- `languageAbbreviation`: `RND`. - -## Prerequisite to deploy Render BluePrint - -There are no special requirements for using the Render.com blueprint. However, you need a valid render.com account with sufficient credits. - -## Architecture - -The BluePrint will create a render "web" service. - -Type = "[Web Service](https://render.com/docs/blueprint-spec#type)". - -Render dashboard will list all your web services. At the top, you can switch between Dashboard and BluePrints. - -![Render BluePrints](../../assets/images/docker-to-iac/render.com-dashboard-blueprints.png) - -After the BluePrint has been created through one-click deployment, the BluePrint will be visible in the BluePrint menu. - -In contrast to other cloud providers, Render.com's usability is very trivial. There is no VPC / VNet or anything else. After successful deployment, you can open your service via a URL. - -## Default output format - -- The default output format for this parser: `YAML`. - -## File Configuration - -The Render.com parser generates a single file output: - -- `render.yaml` - The main Blueprint configuration file that defines all services, environment variables, and disk configurations - -This straightforward single-file approach aligns with Render's Blueprint specification, which requires all service definitions to be contained within a single YAML file. The file is structured according to Render's requirements with services, environment variables, and disk configurations properly organized for immediate deployment. - -## Supported Docker Compose Variables - -The current version supports the following Docker Compose variables: - -For __services__: - -- image -- environment -- ports -- command - - -The supported variables that are not on this list are ignored. This means that they are not translated by the parser in Infrastructure as Code from `docker-compose.yml` or docker run command. - - -## Volume Support - -Render.com offers two types of storage options: - -### Default: Ephemeral Filesystem - -By default, Render services use an ephemeral filesystem where: - -- Changes to the filesystem are lost after deployments or restarts -- Each service instance has its own separate filesystem -- No data persists between deployments - -### Persistent Disk Option - -The parser supports adding persistent disk storage through the `volumes` directive: - -- Persistent disks are automatically configured with 10GB size -- Only one disk per service is supported -- Files are preserved across deployments and restarts -- Only filesystem changes under the disk's mount path are preserved - -Important limitations for persistent disks: - -- A disk can only be accessed by a single service instance -- Services with persistent disks cannot scale to multiple instances - -Read more here: [render.com/docs/disks](https://render.com/docs/disks) - -## Service Types - -The parser automatically determines the appropriate service type for each container in your Docker configuration: - -### Web Services (Default) - -By default, services are created as `type: web`, which is suitable for: - -- HTTP-based applications -- Frontend applications -- API servers -- Any service that needs to be publicly accessible - -### Private Services - -For databases and other TCP-based services, the parser automatically sets `type: pserv`. These services: - -- Are not publicly accessible -- Can communicate with other services over TCP -- Are ideal for databases and backend services - -Read more here: [render.com/docs/private-services](https://render.com/docs/private-services). - -The service type is determined based on the Docker image being used. For example: - -```yaml -services: - web: - image: nginx:latest - # Automatically set to type: web - - db: - image: mariadb:11.2 - # Automatically set to type: pserv -``` - -### Adding New Service Types - -If you're using a service that should be private but isn't automatically detected, please visit our [Render: Contributing to Render Service Types docs page](/docker-to-iac/render-contributing-to-service-types). - -## Multi Services Support - -Multi `services` support for Render.com: __yes__ - -Since [multi services](https://render.com/docs/blueprint-spec#root-level-fields) feature is supported. - -Please read more about [multi service support here](/docker-to-iac/multi-services-support). diff --git a/docs/docker-to-iac/project-structure.mdx b/docs/docker-to-iac/project-structure.mdx deleted file mode 100644 index d794149..0000000 --- a/docs/docker-to-iac/project-structure.mdx +++ /dev/null @@ -1,175 +0,0 @@ ---- -title: Project Structure -description: Directory structure and organization of the docker-to-iac module, including guidance for adding new parsers, source handlers, and tests. ---- - -# Project Structure of docker-to-iac Module - -The project follows standard npm module organization with a well-defined structure to handle both Docker run commands and Docker Compose files, supporting multiple output formats and comprehensive testing. - -## Directory Structure - -```bash -docker-to-iac/ -|-- src/ # Source code -| |-- index.ts # Main entry point -| |-- config/ # Provider-specific configurations -| | |-- connection-properties.ts -| | |-- digitalocean/ -| | | |-- database-types.ts -| | |-- render/ -| | |-- service-types.ts -| |-- parsers/ # IaC parsers for different cloud providers -| | |-- aws-cloudformation.ts -| | |-- base-parser.ts -| | |-- digitalocean.ts -| | |-- render.ts -| |-- sources/ # Input source handlers -| | |-- base.ts -| | |-- factory.ts -| | |-- compose/ # Docker Compose handling -| | | |-- index.ts -| | | |-- validate.ts -| | |-- run/ # Docker run command handling -| | |-- index.ts -| |-- types/ # TypeScript type definitions -| | |-- container-config.ts -| | |-- environment-config.ts -| | |-- service-connections.ts -| |-- utils/ # Helper utilities -| |-- constructImageString.ts -| |-- detectDatabaseEnvVars.ts -| |-- digitalOceanParserServiceName.ts -| |-- getDigitalOceanDatabaseType.ts -| |-- getImageUrl.ts -| |-- parseCommand.ts -| |-- parseDockerImage.ts -| |-- parseEnvFile.ts -| |-- processEnvironmentVariablesGeneration.ts -| |-- resolveEnvironmentValue.ts -| |-- (... and many more) -|-- test/ # Test files -| |-- e2e/ # End-to-end tests -| | |-- assertions/ # Test assertions -| | | |-- digitalocean.ts -| | | |-- do-port-assertions.ts -| | | |-- port-assertions.ts -| | | |-- render.ts -| | |-- docker-compose-files/ # Test Docker Compose files -| | |-- docker-run-files/ # Test Docker run commands -| | |-- output/ # Test output directory -| | |-- utils/ # Test utilities -| | |-- index.ts # Main E2E test executor -| | |-- test1.ts # Environment variables and volume mapping tests -| | |-- test2.ts # Port mapping tests -| | |-- test3.ts # Environment variable substitution tests -| | |-- test4.ts # Schema validation tests -| |-- unit/ # Unit tests -| | |-- config/ # Configuration tests -| | |-- parsers/ # Parser tests -| | |-- sources/ # Source handler tests -| | |-- utils/ # Utility function tests -| |-- test.ts # Main test entry point -|-- eslint.config.mjs # ESLint configuration -|-- tsconfig.json # TypeScript configuration -|-- vitest.config.ts # Vitest configuration -|-- package.json # Package configuration -|-- README.md # Project documentation -``` - -## Directory Purposes - -### Core Directories - -- `src/` - Source code for the module -- `test/` - Test files organized by test type (unit and end-to-end) -- `dist/` - Compiled output (generated during build) - -### Source Code Organization - -#### Config (`src/config/`) - -Contains provider-specific configurations: - -- `connection-properties.ts` - Cross-provider connection property mappings -- `digitalocean/` - DigitalOcean App Platform specific configurations - - `database-types.ts` - Database type mappings for DigitalOcean -- `render/` - Render.com specific configurations - - `service-types.ts` - Service type mappings for Render deployments - -#### Parsers (`src/parsers/`) - -Contains IaC-specific parsers for different cloud providers: - -- `base-parser.ts` - Base parser class that defines common functionality -- `aws-cloudformation.ts` - AWS CloudFormation parser -- `digitalocean.ts` - DigitalOcean App Platform parser -- `render.ts` - Render Blueprint parser -- ... additional parsers for other providers - -#### Source Handlers (`src/sources/`) - -Handles different input types: - -- `base.ts` - Base source handler interface -- `factory.ts` - Factory for creating appropriate source handlers -- `compose/` - Docker Compose file processing - - `index.ts` - Main Compose parser - - `validate.ts` - Compose file validation -- `run/` - Docker run command processing - - `index.ts` - Docker run command parser - -#### Types (`src/types/`) - -TypeScript type definitions: - -- `container-config.ts` - Container and service configuration types -- `environment-config.ts` - Environment variable configuration types -- `service-connections.ts` - Service connection configuration types - -#### Utilities (`src/utils/`) - -Helper functions for parsing and processing: - -- `constructImageString.ts` - Docker image string construction -- `detectDatabaseEnvVars.ts` - Database environment variable detection -- `digitalOceanParserServiceName.ts` - Name formatting for DigitalOcean -- `getDigitalOceanDatabaseType.ts` - Database type detection for DigitalOcean -- `parseDockerImage.ts` - Docker image parsing -- `parseEnvFile.ts` - Environment file parsing -- `resolveEnvironmentValue.ts` - Environment variable resolution -- And many more utility functions for specific operations - -### Test Organization - -#### End-to-End Tests (`test/e2e/`) - -Integration tests that validate the complete workflow: - -- `assertions/` - Validation functions for test output -- `docker-compose-files/` - Test Docker Compose files -- `docker-run-files/` - Test Docker run commands -- `output/` - Generated test outputs -- `utils/` - Test helper utilities -- `test1.ts` through `test4.ts` - Specific test scenarios: - 1. Environment variables and volume mapping - 2. Port mappings - 3. Environment variable substitution - 4. Schema validation - -#### Unit Tests (`test/unit/`) - -Tests for individual components: - -- `config/` - Tests for configuration modules -- `parsers/` - Tests for IaC parsers -- `sources/` - Tests for source handlers -- `utils/` - Tests for utility functions - -## Adding New Parser - -Please check our [Adding a New Parser](/docker-to-iac/example-of-a-new-parser) documentation for detailed instructions on how to add a new parser to the project. This includes creating a new parser file, implementing the parsing logic, and ensuring compatibility with existing configurations. - -### Adding New Tests - -Please refer to the [Testing](/docker-to-iac/testing) documentation for guidelines on adding new tests, including unit and end-to-end tests. diff --git a/docs/docker-to-iac/publishing-to-npm.mdx b/docs/docker-to-iac/publishing-to-npm.mdx deleted file mode 100644 index b04f135..0000000 --- a/docs/docker-to-iac/publishing-to-npm.mdx +++ /dev/null @@ -1,83 +0,0 @@ ---- -title: Publishing docker-to-iac to NPM -description: Explore our automated NPM publishing workflow for docker-to-iac. From preparing a release branch to package deployment, understand our conventional commits-based process. ---- - -# Publishing docker-to-iac module to NPM - -We have created an organization @deploystack for NPM. Publishing in NPM happens automatically through `semantic-release`. Config: [https://github.com/deploystackio/docker-to-iac/blob/main/.github/workflows/release-pr.yml](https://github.com/deploystackio/docker-to-iac/blob/main/.github/workflows/release-pr.yml) - -## Release Process Overview - -The release process follows these steps: - -1. Initiate a release preparation using the GitHub workflow -2. Review and merge the release pull request -3. Automatic publishing to NPM when the release PR is merged - -## Starting a Release - -Releases can be initiated through the GitHub Actions UI: - -1. Navigate to the "Actions" tab in the repository -2. Select the "Release Process" workflow -3. Click "Run workflow" -4. Choose the release type: - - `patch` (bug fixes) - - `minor` (new features) - - `major` (breaking changes) -5. Optionally select "Prerelease" for beta versions -6. Click "Run workflow" - -## What Happens During Release Preparation - -The workflow performs the following steps: - -1. Updates the version in package.json based on conventional commits -2. Updates the CHANGELOG.md file with details of changes since the last release -3. Creates a new branch with these changes (named `release-v{version}`) -4. Provides a link to create a pull request - -## Creating the Pull Request - -After the workflow completes: - -1. Follow the link provided in the workflow output to create a pull request -2. **Important**: Add the `release` label to your pull request -3. Request a review of the PR - -## Publishing Process - -When the pull request with the `release` label is merged: - -1. The GitHub Action automatically creates a Git tag for the new version -2. A GitHub release is created with the changelog contents -3. The package is built using `npm run build` -4. The package is published to NPM with public access - -## Npm Package - -The published package is available at: [https://www.npmjs.com/package/@deploystack/docker-to-iac](https://www.npmjs.com/package/@deploystack/docker-to-iac) - -## Conventional Commits - -The project uses conventional commits to determine version bumps and generate changelogs. Commit messages should follow this pattern: - -- `feat: ...` - A new feature (minor version bump) -- `fix: ...` - A bug fix (patch version bump) -- `chore: ...` - Maintenance changes -- `docs: ...` - Documentation changes -- `style: ...` - Code style changes -- `refactor: ...` - Code refactoring -- `perf: ...` - Performance improvements -- `test: ...` - Test updates - -Breaking changes should include `BREAKING CHANGE:` in the commit message body or footer. - -## Configuration Files - -The release process is configured through several files: - -- `.github/workflows/release-pr.yml` - GitHub Actions workflow -- `.release-it.js` - Configuration for release-it -- `package.json` - NPM scripts for the release process diff --git a/docs/docker-to-iac/quickstart.mdx b/docs/docker-to-iac/quickstart.mdx deleted file mode 100644 index 602824c..0000000 --- a/docs/docker-to-iac/quickstart.mdx +++ /dev/null @@ -1,65 +0,0 @@ ---- -title: Quickstart Guide -description: Quickstart guide for using docker-to-iac to translate Docker run commands and Docker Compose files into infrastructure as code templates -icon: Album ---- - -# Quickstart Guide to docker-to-iac - -## Installation - -First, install the module and its dependencies: - -```bash -npm i @deploystack/docker-to-iac -``` - -## Usage Examples - -### Translating Docker Compose - -```javascript -import { translate } from '@deploystack/docker-to-iac'; -import { readFileSync, writeFileSync } from 'fs'; - -// Read Docker Compose file content -const dockerComposeContent = readFileSync('path/to/docker-compose.yml', 'utf8'); - -const translatedConfig = translate(dockerComposeContent, { - source: 'compose', - target: 'CFN', - templateFormat: 'yaml' -}); - -// Write the translated config to a file -writeFileSync('output-aws.yml', translatedConfig); -``` - -### Translating Docker Run Commands - -```javascript -import { translate } from '@deploystack/docker-to-iac'; -import { writeFileSync } from 'fs'; - -// Your docker run command -const dockerRunCommand = 'docker run -d -p 8080:80 -e NODE_ENV=production nginx:latest'; - -const translatedConfig = translate(dockerRunCommand, { - source: 'run', - target: 'CFN', - templateFormat: 'yaml' -}); - -// Write the translated config to a file -writeFileSync('output-aws.yml', translatedConfig); -``` - -### Translation Options - -When using the `translate` function, you can specify: - -- `source`: Either 'compose' or 'run' depending on your input -- `target`: The IaC language to translate to (e.g., 'CFN' for AWS CloudFormation) -- `templateFormat`: Output format - 'json', 'yaml', or 'text' - -For a complete list of supported parsers and formats, visit the [API documentation](/docker-to-iac/api). diff --git a/docs/docker-to-iac/render-contributing-to-service-types.mdx b/docs/docker-to-iac/render-contributing-to-service-types.mdx deleted file mode 100644 index 5da72ee..0000000 --- a/docs/docker-to-iac/render-contributing-to-service-types.mdx +++ /dev/null @@ -1,146 +0,0 @@ ---- -title: Contributing to Render Service Types -description: Guide for contributing to Render.com service type configurations in docker-to-iac ---- - -# Render: Contributing to Render Service Types - -This guide explains how to contribute to the Render.com service type configurations in docker-to-iac. The service types configuration determines whether a Docker service should be deployed as a web service, private service, or Redis service on Render.com. - -## Configuration Location - -The service types configuration is stored in: - -```bash -src/config/render/service-types.ts -``` - -This configuration is specific to Render.com and is not used by other cloud provider parsers. - -## TypeScript Configuration Structure - -The configuration uses the following structure: - -```typescript -interface ServiceTypeConfig { - type: string; - description: string; - versions: string; -} - -interface RenderServiceTypesConfig { - serviceTypes: { - [key: string]: ServiceTypeConfig; - }; -} - -export const renderServiceTypesConfig: RenderServiceTypesConfig = { - serviceTypes: { - 'docker.io/library/mariadb': { - type: 'pserv', - description: 'MariaDB database service - requires private service type due to TCP protocol', - versions: '*' - } - } -}; -``` - -### Field Definitions - -- `serviceTypes`: Root object containing all service type mappings -- Image key (e.g., `docker.io/library/mariadb`): The fully qualified Docker image name - - Should match the normalized format from `getImageUrl` utility - - For official Docker Hub images, use `docker.io/library/[name]` - - For user repositories, use `docker.io/[user]/[repo]` - - For GHCR, use `ghcr.io/[owner]/[repo]` -- `type`: The Render service type - - `pserv` for private services (databases, message queues, etc.) - - `web` is the default and doesn't need to be specified -- `description`: A clear explanation of why this service type is needed -- `versions`: Version matching pattern - - Use `"*"` for all versions - - Future: Will support semantic version ranges - -## Contributing Guidelines - -1. Identify the Need - - Service fails when deployed as `web` type - - Service requires TCP/private networking - - Service is a database or backend service - -2. Determine the Correct Image Name - - ```typescript - // Use the getImageUrl utility to find the correct name format - const imageUrl = getImageUrl('mysql:5.7'); - // Returns: docker.io/library/mysql - ``` - -3. Add Your Configuration - - Add a new entry to the `renderServiceTypesConfig` object - - Include a descriptive comment explaining the service type choice - - Follow the TypeScript interface structure shown above - -4. Submit a Pull Request - - Fork the repository - - Add your changes to `service-types.ts` - - Create a pull request with: - - Clear description of the service - - Why it needs a specific service type - - Any relevant documentation links - -## Example Addition - -Here's an example of a good service type addition: - -```typescript -export const renderServiceTypesConfig: RenderServiceTypesConfig = { - serviceTypes: { - // Existing configurations... - 'docker.io/library/postgresql': { - type: 'pserv', - description: 'PostgreSQL database service - requires private networking for security', - versions: '*' - }, - 'docker.io/library/rabbitmq': { - type: 'pserv', - description: 'RabbitMQ message broker - requires private TCP communication', - versions: '*' - } - } -}; -``` - -## Service Type Categories - -### Private Services (`pserv`) - -Common services that should use `pserv`: - -- Databases (MySQL, PostgreSQL, MongoDB) -- Message queues (RabbitMQ, Apache Kafka) -- Cache services (except Redis) -- Backend services that don't serve HTTP traffic - -### Web Services (`web`) - -Services that should remain as default `web` type: - -- HTTP APIs -- Web applications -- Frontend services -- Application servers - -## Getting Help - -If you're unsure about: - -- Which service type to use -- How to format the image name -- Whether a service needs a specific type - -Please: - -- Check Render's [service types documentation](https://render.com/docs/blueprint-spec#type) -- Open a discussion in our feedback GitHub repository [github.com/deploystackio/feedback](https://github.com/deploystackio/feedback) -- Join our [Discord community](https://discord.gg/UjFWwByB) for help diff --git a/docs/docker-to-iac/service-connections.mdx b/docs/docker-to-iac/service-connections.mdx deleted file mode 100644 index 2fa4b06..0000000 --- a/docs/docker-to-iac/service-connections.mdx +++ /dev/null @@ -1,227 +0,0 @@ ---- -title: Service Connections in docker-to-iac -description: Configure service-to-service communication in multi-container applications with docker-to-iac's service connections feature. Transform Docker Compose service references to cloud provider formats. -sidebar: Service Connections ---- - -# Service Connections - -The `docker-to-iac` module supports configuring service-to-service communication when translating Docker Compose files to cloud provider IaC templates. This feature is essential for multi-container applications where services need to communicate with each other (e.g., web applications connecting to databases). - -## The Challenge - -In Docker Compose, services can communicate with each other using the service name as the hostname: - -```yaml -# docker-compose.yml -services: - db: - image: mariadb:latest - # ... - - app: - image: myapp:latest - environment: - - DATABASE_HOST=db # Reference to the db service - - API_URL=http://api:3000 # Reference within a URL - # ... -``` - -However, when deploying to cloud providers, each has its own format for service discovery: - -- **Render.com**: Uses Blueprint's `fromService` syntax for service references -- **DigitalOcean App Platform**: Services connect using the service name directly - -The Service Connections feature automatically configures these references based on the target cloud provider. - -## Supported Providers - -Currently, service connections are supported for: - -| Provider | Implementation | Example Reference | -|----------|---------------|------------------| -| Render.com | Blueprint's `fromService` | `fromService: { name: "db", type: "web", property: "hostport" }` | -| DigitalOcean App Platform | Direct service name | `db` or `http://api:3000` | - -> **Note**: AWS CloudFormation is not supported for service connections as it does not provide a direct way to reference services by name across tasks in the generated architecture. - -## Usage - -To configure service connections, use the `serviceConnections` option in the `translate` function: - -```javascript -import { translate } from '@deploystack/docker-to-iac'; - -const result = translate(dockerComposeContent, { - source: 'compose', - target: 'RND', // Or 'DOP' - templateFormat: 'yaml', - serviceConnections: { - mappings: [ - { - fromService: 'app', // Service that needs to connect - toService: 'db', // Service to connect to - environmentVariables: [ // List of env vars that reference the service - 'DATABASE_HOST', - 'API_URL' - ], - property: 'connectionString' // The type of connection (connectionString, hostport, etc.) - } - ] - } -}); -``` - -### Configuration Options - -For each service connection mapping: - -- `fromService`: The service that needs to connect to another service -- `toService`: The service being connected to -- `environmentVariables`: Array of environment variable names that reference the target service -- `property`: The type of connection property to reference (e.g., 'connectionString', 'hostport', etc.) - -## Provider-Specific Implementations - -### Render.com - -For Render.com, the module generates Blueprint configurations using the native `fromService` syntax: - -```yaml -# Generated Render Blueprint -services: - - name: app - # ...other configuration... - envVars: - # Regular environment variables - - key: NODE_ENV - value: production - - # Service reference using fromService - - key: DATABASE_HOST - fromService: - name: db - type: pserv - property: hostport # This property is derived from the 'property' parameter in your mapping -``` - -This approach leverages Render's built-in service discovery capabilities for reliable inter-service communication. - -### DigitalOcean App Platform - -For DigitalOcean App Platform, the module maintains the direct service name references, as services can communicate directly using the service name: - -```yaml -# Generated DigitalOcean App Spec -services: - - name: app - # ...other configuration... - envs: - - key: DATABASE_HOST - value: db - - key: API_URL - value: http://api:3000 -``` - -## Complete Example - -Here's a complete example showing Node.js microservices communicating with each other, and a more detailed database connection example: - -```javascript -const dockerComposeContent = ` -version: "3" - -services: - api: - image: node:18-alpine - command: node /app/server.js - ports: - - "3000:3000" - - frontend: - image: node:18-alpine - environment: - - API_URL=http://api:3000 # This will be transformed appropriately - ports: - - "8080:8080" -`; - -const serviceConnections = { - mappings: [ - { - fromService: 'frontend', - toService: 'api', - environmentVariables: ['API_URL'], - property: 'hostport' - } - ] -}; - -// For DigitalOcean - Service name stays as "api" in http://api:3000 -// For Render - Will use fromService syntax instead of string replacement - -// Database Connection Example -const databaseComposeContent = ` -services: - app: - image: node:18-alpine - environment: - - DATABASE_URL=postgresql://postgres:secret@postgres:5432/mydb - ports: - - "3000:3000" - - postgres: - image: postgres:latest - environment: - - POSTGRES_DB: mydb - - POSTGRES_USER: postgres - - POSTGRES_PASSWORD: secret -`; - -const dbServiceConnections = { - mappings: [ - { - fromService: 'app', - toService: 'postgres', - environmentVariables: ['DATABASE_URL'], - property: 'connectionString' // Use connectionString for full database URL - } - ] -}; - -// Result will include proper connection format for each provider -// DigitalOcean: DATABASE_URL=${postgres.DATABASE_URL} -// Render: fromDatabase: { name: "postgres", property: "connectionString" } -``` - -## Response Format - -The `translate` function returns information about the resolved service connections: - -```javascript -{ - files: { - // Generated IaC template files - }, - serviceConnections: [ - { - fromService: 'app', - toService: 'db', - variables: { - 'DATABASE_HOST': { - originalValue: 'db', - transformedValue: 'db' // For DigitalOcean or Render - } - } - } - ] -} -``` - -This information can be useful for debugging or understanding how service connections were processed. - -## Limitations - -- The feature only transforms environment variable values that exactly match the service name -- More complex connection strings must be handled separately -- The feature doesn't adjust ports or protocols, only service hostnames diff --git a/docs/docker-to-iac/supported-docker-compose-variables.mdx b/docs/docker-to-iac/supported-docker-compose-variables.mdx deleted file mode 100644 index 091f808..0000000 --- a/docs/docker-to-iac/supported-docker-compose-variables.mdx +++ /dev/null @@ -1,83 +0,0 @@ ---- -title: Supported Docker Compose Variables -description: Detailed reference for Docker Compose variables compatible with docker-to-iac translations. Find supported service properties, registry options, and configuration examples for successful IaC deployments. ---- - -# Supported Docker Compose Variables - -This document outlines all the Docker Compose variables that are currently supported by the docker-to-iac module when translating your docker-compose.yml file into Infrastructure as Code templates. - -## Core Requirements - -The docker-to-iac module requires that your docker-compose.yml file meets these basic requirements: - -1. The file must contain at least one service -2. Each service must specify an `image` property (build instructions are not supported) - -## Supported Service Properties - -For each service in your docker-compose.yml file, the following properties are supported: - -### Required Properties - -| Property | Description | Example | -|----------|-------------|----------| -| `image` | The Docker image to use for the service. Must be a pre-built image available in a public registry. | `image: nginx:latest` | - -### Optional Properties - -| Property | Description | Example | -|----------|-------------|----------| -| `ports` | List of ports to expose. Supports port mapping in standard Docker format. | ports: - "8080:80": - "443:443" | -| `command` | Override the default command of the Docker image. | `command: "npm start"` | -| `restart` | Container restart policy. | `restart: always` | -| `volumes` | List of volume mappings. | `volumes:`: ` - "data:/var/lib/mysql"`: ` - "./config:/etc/nginx/conf.d:ro"` | -| `environment` | Environment variables as key-value pairs. | `environment:`: ` NODE_ENV: production`: ` PORT: 3000` | - -## Example Configuration - -Here's a complete example showing all supported variables: - -```yaml -services: - web: - image: nginx:alpine - ports: - - "80:80" - - "443:443" - command: "nginx -g 'daemon off;'" - restart: always - volumes: - - ./nginx.conf:/etc/nginx/nginx.conf:ro - environment: - NGINX_HOST: example.com - NGINX_PORT: 80 - - api: - image: node:16-alpine - ports: - - "3000:3000" - command: "npm start" - restart: always - environment: - NODE_ENV: production - DATABASE_URL: mongodb://db:27017/myapp -``` - -## Important Notes - -1. Build instructions (`build:`) are not supported - you must use pre-built images -2. Services must use images that are publicly accessible -3. Each service must have an `image` property specified -4. Environment variables should not contain sensitive information as they will be included in the generated IaC templates -5. Volume definitions are supported but their implementation may vary depending on the target cloud platform - -## Validation - -The module performs validation checks to ensure: - -- The docker-compose.yml file contains at least one service -- Each service has an `image` property specified -- The Docker image reference is valid and follows the expected format - -If validation fails, the module will throw a `DockerComposeValidationError` with a descriptive message explaining the issue. diff --git a/docs/docker-to-iac/supported-registries.mdx b/docs/docker-to-iac/supported-registries.mdx deleted file mode 100644 index 5ddb3d0..0000000 --- a/docs/docker-to-iac/supported-registries.mdx +++ /dev/null @@ -1,83 +0,0 @@ ---- -title: Supported Registries -description: Complete guide to Docker registries in docker-to-iac - From official Docker Hub images to GHCR, learn supported formats with practical examples. ---- - -# Supported Registries for docker-to-iac module - -docker-to-iac supports multiple Docker image registries. Below you'll find details about supported registries and examples of how they are handled. - -## Docker Hub Registry - -Docker Hub is the default and most common registry for Docker images. - -### Official Images - -Docker Hub official images are maintained by Docker and don't include a username/organization prefix. - -**Docker Compose Example:** - -```yaml -services: - db: - image: redis:latest -``` - -**Translated Image URL:** - -```text -docker.io/library/redis:latest -``` - -### User/Organization Images - -Docker Hub images that belong to specific users or organizations include the username/organization as prefix. - -**Docker Compose Example:** - -```yaml -services: - app: - image: nginx/nginx-prometheus-exporter:0.10.0 -``` - -**Translated Image URL:** - -```text -docker.io/nginx/nginx-prometheus-exporter:0.10.0 -``` - -## GitHub Container Registry (GHCR) - -GitHub Container Registry is GitHub's container registry service that allows you to host and manage Docker container images. - -**Docker Compose Example:** - -```yaml -services: - monitor: - image: ghcr.io/dgtlmoon/changedetection.io -``` - -**Translated Image URL:** - -```text -ghcr.io/dgtlmoon/changedetection.io -``` - -## Registry URL Formats - -Here's how different registry types are handled: - -| Registry Type | Format | Example | -|--------------|--------|---------| -| Docker Hub (Official) | docker.io/library/[image]:[tag] | docker.io/library/redis:latest | -| Docker Hub (User) | docker.io/[user]/[image]:[tag] | docker.io/nginx/nginx-prometheus-exporter:0.10.0 | -| GitHub Container Registry | ghcr.io/[user]/[image]:[tag] | ghcr.io/dgtlmoon/changedetection.io | - -## Notes - -- If no tag is specified, `latest` is used as the default tag -- The module preserves the original registry URL format for custom registries -- SHA256 digests are supported for all registry types -- All supported registries work with all parsers, including AWS CloudFormation, Render.com, DigitalOcean App Platform, and Kubernetes Helm Charts diff --git a/docs/docker-to-iac/testing.mdx b/docs/docker-to-iac/testing.mdx deleted file mode 100644 index ea2126f..0000000 --- a/docs/docker-to-iac/testing.mdx +++ /dev/null @@ -1,327 +0,0 @@ ---- -title: Testing docker-to-iac Module -description: Learn how to test the docker-to-iac module including Docker run commands and Docker Compose files, with support for integration and end-to-end testing. ---- - -# Testing docker-to-iac Module - -Before submitting a pull request, test your code locally. Testing covers both code quality and functional aspects for Docker run commands and Docker Compose files, including multi-file output capabilities and cross-platform compatibility. - -## Running Tests - -### Code Quality Check - -First, run ESLint to ensure code quality: - -```bash -npm run lint -``` - -ESLint must pass locally before submitting your PR, as GitHub Actions CI/CD will block any PR that fails the lint check. - -### Unit Testing - -To run unit tests only: - -```bash -npm run test:unit -``` - -Unit tests verify individual functions and components work correctly in isolation. - -### End-to-End Testing - -To run end-to-end tests only: - -```bash -npm run test:e2e -``` - -End-to-end tests validate the entire translation process with real inputs and outputs. - -### All Tests - -Run the complete test suite: - -```bash -npm run test -``` - -This will execute both unit tests and end-to-end tests to ensure comprehensive coverage. - -## Test Suite Structure - -The test suite is organized in a hierarchical structure: - -```bash -test/ -├── e2e/ # End-to-end tests -│ ├── assertions/ # Testing assertions for output validation -│ │ ├── digitalocean.ts # DigitalOcean-specific assertions -│ │ ├── do-port-assertions.ts -│ │ ├── port-assertions.ts -│ │ └── render.ts # Render-specific assertions -│ ├── docker-compose-files/ # Test docker-compose files -│ │ ├── test1.yml -│ │ ├── test2.yml -│ │ └── ... -│ ├── docker-run-files/ # Test docker run commands -│ │ ├── test1.txt -│ │ ├── test2.txt -│ │ └── ... -│ ├── output/ # Generated test outputs -│ │ └── README.md # Explanation of the output directory -│ ├── utils/ # Testing utilities -│ ├── index.ts # Main e2e test executor -│ ├── test1.ts # Environment variables and volume mapping tests -│ ├── test2.ts # Port mapping tests -│ ├── test3.ts # Environment variable substitution tests -│ └── test4.ts # Render-specific validation -├── unit/ # Unit tests -│ ├── config/ # Tests for configuration -│ ├── parsers/ # Tests for parsers -│ ├── sources/ # Tests for sources -│ └── utils/ # Tests for utility functions -└── test.ts # Main test file -``` - -## End-to-End Test Scenarios - -The end-to-end tests cover four main scenarios: - -### Test 1: Environment Variables and Volume Mapping - -Tests the translation of environment variables and volume mappings for both Docker run commands and Docker Compose files. It verifies: - -- Environment variables are correctly passed through to the output -- Environment variables with defaults are handled properly -- Volume mappings are correctly configured in the output - -### Test 2: Port Mappings - -Tests port mapping functionality for both Docker run commands and Docker Compose files. It verifies: - -- Basic port mappings are correctly translated -- Multiple port mappings are handled properly -- Database service port configurations are correctly set -- PORT environment variables are properly set - -### Test 3: Environment Variable Substitution - -Tests the functionality to substitute environment variables from a .env file. It verifies: - -- Environment variables can be substituted with values from a .env file -- Default values are used when variables are not defined -- Substitution works in both Docker run and Docker Compose scenarios - -### Test 4: Schema Validation - -Tests that the generated Render.com YAML output conforms to the official Render.com schema. It verifies: - -- Output is valid according to the Render.com schema -- Required fields are present and correctly formatted -- Service configurations are properly structured - -## Test Output Structure - -End-to-end tests generate organized output directories: - -```bash -test/e2e/output/ -├── test1/ # Environment variables and volume mapping test -│ ├── docker-compose/ # Docker Compose test outputs -│ │ ├── dop/ # DigitalOcean outputs -│ │ │ └── .do/ -│ │ │ └── deploy.template.yaml -│ │ └── rnd/ # Render outputs -│ │ └── render.yaml -│ └── docker-run/ # Docker run test outputs -│ ├── dop/ -│ │ └── .do/ -│ │ └── deploy.template.yaml -│ └── rnd/ -│ └── render.yaml -├── test2/ # Port mapping test -│ ├── docker-compose/ -│ │ ├── dop/ -│ │ │ └── .do/ -│ │ │ └── deploy.template.yaml -│ │ └── rnd/ -│ │ └── render.yaml -│ └── docker-run/ -│ ├── dop/ -│ │ └── .do/ -│ │ └── deploy.template.yaml -│ └── rnd/ -│ └── render.yaml -└── ... -``` - -## Adding Test Cases - -### For Docker Compose Tests - -Add your test files to `test/e2e/docker-compose-files/` with `.yml` or `.yaml` extension. Each file should represent a specific test scenario. - -### For Docker Run Commands - -Add your test commands to `test/e2e/docker-run-files/` with `.txt` extension. Each file should contain a single Docker run command. You can use line continuations with `\` for readability: - -```bash -docker run -d \ - --name nginx-proxy \ - -p 80:80 \ - -v /etc/nginx/conf.d:/etc/nginx/conf.d:ro \ - nginx:alpine -``` - -### Adding New End-to-End Tests - -To add a new end-to-end test: - -1. Create a new test file (e.g., `test5.ts`) in `test/e2e/` -2. Follow the pattern of existing test files: - - Define the test scenario - - Create test functions for Docker run and Docker Compose - - Use assertions to validate the output -3. Add your test to `test/e2e/index.ts` to ensure it gets executed - -### Adding Assertions - -For new validation requirements: - -1. Add assertion functions to the appropriate file in `test/e2e/assertions/` -2. Use these assertions in your test functions -3. For provider-specific assertions, create new files if needed - -## Unit Tests - -Unit tests validate individual components of the codebase: - -- **Config tests**: Verify configuration files and settings -- **Parser tests**: Check that parsers handle input correctly -- **Source tests**: Validate source handling (Docker run, Docker Compose) -- **Utility tests**: Ensure utility functions work as expected - -To add a new unit test: - -1. Create a new test file in the appropriate directory under `test/unit/` -2. Use the Vitest framework for testing (similar to Jest) -3. Follow the naming convention: `*.test.ts` - -## Local Testing with `npm link` - -Test locally using `npm link`. Development environment setup: - -```bash -some-root-dir/ -|-- docker-to-iac/ -|-- my-dev-env/ -| |-- index.js -| |-- docker-compose.yml -| |-- docker-run.txt -| |-- node_modules/ -| |-- package.json -``` - -Setup steps: - -1. In `docker-to-iac/`: `npm link` -2. In `my-dev-env`: `npm link @deploystack/docker-to-iac` - -### Setting up my-dev-env - -1. Initialize: `npm init` - -2. Create `index.js`: - -```javascript -import { translate } from '@deploystack/docker-to-iac'; -import { readFileSync, writeFileSync, mkdirSync, existsSync } from 'fs'; -import { join, dirname } from 'path'; - -// Test Docker Compose -const dockerComposeContent = readFileSync('docker-compose.yml', 'utf8'); -const composeResult = translate(dockerComposeContent, { - source: 'compose', - target: 'RND', // Render.com output - templateFormat: 'yaml' -}); - -console.log('Docker Compose Translation - Files:', Object.keys(composeResult.files)); - -// Write output files preserving directory structure -Object.entries(composeResult.files).forEach(([path, fileData]) => { - const fullPath = join('output', 'compose', path); - const dir = dirname(fullPath); - - if (!existsSync(dir)) { - mkdirSync(dir, { recursive: true }); - } - - writeFileSync(fullPath, fileData.content); -}); - -// Test Docker Run -const dockerRunContent = readFileSync('docker-run.txt', 'utf8'); -const runResult = translate(dockerRunContent, { - source: 'run', - target: 'DOP', // DigitalOcean output - templateFormat: 'yaml' -}); - -console.log('Docker Run Translation - Files:', Object.keys(runResult.files)); - -// Write output files preserving directory structure -Object.entries(runResult.files).forEach(([path, fileData]) => { - const fullPath = join('output', 'run', path); - const dir = dirname(fullPath); - - if (!existsSync(dir)) { - mkdirSync(dir, { recursive: true }); - } - - writeFileSync(fullPath, fileData.content); -}); -``` - -3. Test changes: - - Make changes in `docker-to-iac/` - - Run `npm run build` in docker-to-iac - - Test in `my-dev-env/` with `node index.js` - - Check the output directory for generated files - -## Test Results - -The test suite shows detailed results for each test: - -- Unit tests show individual function validation results -- E2E tests provide a summary of passed and failed tests -- Test failures include specific error messages for debugging - -On failure: - -- Error details are logged -- Process exits with code 1 -- GitHub Actions fails PR check - -## Code Coverage - -To generate code coverage reports: - -```bash -npm run test:coverage -``` - -This will create coverage reports in the `coverage/` directory, including HTML reports you can view in a browser. - -## Troubleshooting Test Failures - -If tests fail: - -1. Check the test output for specific error messages -2. Review the actual and expected values in assertion failures -3. Check the generated files in `test/e2e/output/` to see what was produced -4. For schema validation failures, check the error details against the provider's schema documentation - -By following these steps, you can ensure your changes are fully tested and compatible with all supported output formats. diff --git a/docs/deploystack/github-application.mdx b/docs/github-application.mdx similarity index 100% rename from docs/deploystack/github-application.mdx rename to docs/github-application.mdx diff --git a/docs/github-oauth-setup.mdx b/docs/github-oauth-setup.mdx new file mode 100644 index 0000000..6b593cf --- /dev/null +++ b/docs/github-oauth-setup.mdx @@ -0,0 +1,163 @@ +--- +title: GitHub OAuth Setup +description: Configure GitHub OAuth for user authentication and single sign-on in DeployStack. +sidebar: GitHub OAuth +--- + +# GitHub OAuth Setup + +DeployStack provides GitHub OAuth authentication that enables users to sign in with their GitHub accounts. This OAuth integration provides a seamless login experience while maintaining security and team boundaries. + +## Overview + +The GitHub OAuth system offers: + +- **Single Sign-On**: Users can sign in with their GitHub accounts +- **Automatic User Creation**: New users are automatically provisioned on first login +- **Account Linking**: Existing email-based accounts can be linked to GitHub accounts +- **Team Creation**: Default teams are automatically created for new GitHub users + +- **Secure Authentication**: Industry-standard OAuth 2.0 flow with state validation + +## GitHub OAuth Configuration + +### Setting Up GitHub OAuth + +To enable GitHub OAuth authentication, you need to configure GitHub OAuth in your global settings: + +#### 1. Create GitHub OAuth App + +1. **Go to GitHub**: Navigate to GitHub.com → Settings → Developer settings → OAuth Apps +2. **Create New App**: Click "New OAuth App" +3. **Configure Application**: + - **Application Name**: `DeployStack - [Your Instance]` + - **Homepage URL**: `https://your-deploystack-domain.com` + - **Authorization Callback URL**: `https://your-api-domain.com/api/auth/github/callback` + ⚠️ **Important**: Use your API domain (e.g., `backend.deploystack.io`), NOT your frontend domain + - **Application Description**: Optional description of your DeployStack instance + +#### 2. Configure in DeployStack + +1. **Access Global Settings**: Go to Admin → Global Settings → GitHub OAuth Configuration +2. **Enter Credentials**: + - **Client ID**: From your GitHub OAuth app + - **Client Secret**: From your GitHub OAuth app + - **Callback URL**: Should match your GitHub app (e.g., `https://your-api-domain.com/api/auth/github/callback`) + - **Scope**: Default is `user:email` (required for email access) + - **Enable GitHub OAuth**: Toggle to activate +3. **Configure Page URL**: Go to Admin → Global Settings → Global Settings + - **Base URL for the application frontend**: Set to your frontend URL (e.g., `https://cloud.deploystack.io`) + - This URL is used for redirects after successful authentication +4. **Save Configuration**: Apply the settings + +#### 3. Test Integration + +1. **Test Login**: Log out and try the "Sign in with GitHub" button on the login page +2. **Verify Redirect**: Ensure you're redirected to GitHub for authorization +3. **Check Callback**: After authorizing, you should be redirected back to your DeployStack frontend +4. **Validate Session**: Confirm you're logged in with your GitHub account + +### GitHub App vs OAuth App + +DeployStack supports both GitHub OAuth Apps and GitHub Apps: + +#### GitHub OAuth App (For User Authentication) +- **Purpose**: User authentication and login +- **Simpler Setup**: Easier to configure and manage +- **User-Based Access**: Uses individual user permissions +- **Email Access**: Can retrieve user email for account creation +- **Account Linking**: Links GitHub accounts to existing users + +#### GitHub App (For Repository Access) +- **Purpose**: Repository data access for MCP server creation +- **Enhanced Security**: App-level permissions and authentication +- **Higher Rate Limits**: Better rate limiting for high-volume usage +- **Private Repositories**: Can access private repositories when installed +- **Installation-Based**: Installed per organization/repository +- **Note**: Configured separately in GitHub App settings (see GitHub Application documentation) + +## How GitHub OAuth Works + +### Authentication Flow + +1. **User Initiates Login**: User clicks "Sign in with GitHub" on the login page +2. **Redirect to GitHub**: User is redirected to GitHub's authorization page +3. **User Authorizes**: User grants permissions to the DeployStack OAuth app +4. **Callback to API**: GitHub redirects to `https://your-api-domain.com/api/auth/github/callback` with an authorization code +5. **Token Exchange**: Backend exchanges the code for an access token +6. **User Lookup/Creation**: Backend fetches user info from GitHub and creates or links the account +7. **Session Creation**: A session is created and a cookie is set +8. **Frontend Redirect**: User is redirected to the frontend URL configured in `global.page_url` + +### User Provisioning + +#### New Users +When a user signs in with GitHub for the first time: +- A new user account is automatically created +- Username is set to their GitHub username +- Email is retrieved from GitHub (primary verified email preferred) +- First and last names are extracted from GitHub profile (if available) +- User is assigned the `global_user` role (not admin) +- A default team is created with the username +- Email is marked as verified + +#### Existing Users +If a user with the same email already exists: +- The existing account is linked to the GitHub account +- GitHub ID is added to the user record +- User can now sign in with either method (email or GitHub) + +#### First User Restriction +- The first user in the system MUST be created via email registration +- This ensures the first user becomes the global administrator +- GitHub OAuth cannot be used to create the initial admin account + +### Security Considerations + +- **State Parameter**: Used for CSRF protection +- **Secure Cookies**: Session cookies use httpOnly and secure flags +- **Token Storage**: GitHub tokens are not stored permanently +- **Email Verification**: GitHub emails are considered pre-verified +- **Role Assignment**: GitHub users receive standard user role, not admin + + +## Security Considerations + +### OAuth Scope and Permissions + +#### What GitHub OAuth Accesses +The GitHub OAuth integration ONLY requests the `user:email` scope, which provides: +- **GitHub Username**: The user's GitHub login name +- **Email Address**: The user's primary verified email from GitHub +- **Display Name**: The user's full name from their GitHub profile (if provided) +- **GitHub User ID**: A unique identifier for account linking + +#### What GitHub OAuth Does NOT Access +- **No Repository Access**: OAuth login does not access any repositories +- **No Code Access**: Cannot read, write, or view any source code +- **No Organization Data**: Does not access organization information +- **No Private Data**: Only basic public profile information is retrieved + +### Data Handling + +#### Information Stored +When a user logs in via GitHub, DeployStack stores: +- **Username**: From GitHub profile +- **Email**: Primary verified email from GitHub +- **First/Last Name**: Parsed from GitHub display name (if available) +- **GitHub ID**: For account linking purposes +- **Authentication Type**: Marked as "github" authentication + +#### Security Measures +- **Temporary Tokens**: GitHub access tokens are used only during login and not stored +- **State Parameter**: CSRF protection via state parameter validation +- **Secure Cookies**: Session cookies use httpOnly and secure flags +- **Encrypted Storage**: Any sensitive data is encrypted in the database +- **Minimal Scope**: Only requests the minimum required `user:email` scope + +## Related Documentation + +- [GitHub Application](/github-application) - For repository data access and MCP server creation +- [Global Settings](/global-settings) - Complete global settings reference + +GitHub OAuth provides a seamless authentication experience for your users while maintaining security and proper access control within DeployStack. diff --git a/docs/deploystack/global-settings.mdx b/docs/global-settings.mdx similarity index 100% rename from docs/deploystack/global-settings.mdx rename to docs/global-settings.mdx diff --git a/docs/index.mdx b/docs/index.mdx index e51ec17..c971938 100644 --- a/docs/index.mdx +++ b/docs/index.mdx @@ -1,94 +1,116 @@ --- title: DeployStack Documentation -description: Welcome to DeployStack documentation. Learn how to deploy MCP servers to any cloud provider with one-click deployments, secure credential management, and team collaboration features. -icon: CircleHelp +description: Official DeployStack documentation - The Enterprise Control Plane for MCP servers. Secure, centralized management of your organization's AI tool landscape with the DeployStack Gateway. +sidebar: Introduction +icon: Star --- -import { CloudUpload, Rocket, LockKeyhole, Terminal, FileText, Container, BookOpenText, MessageCircleHeart } from 'lucide-react'; +import { Card, Cards } from 'fumadocs-ui/components/card'; +import { Plug, Settings, Users, Code2, Server, Zap, Shield, Wrench, Terminal, Container, BookOpenText, FileText, MessageCircleHeart } from 'lucide-react'; -# DeployStack Documentation +# DeployStack - Enterprise Control Plane for MCP -DeployStack is the first **CI/CD platform specifically built for MCP servers**. Deploy Model Context Protocol servers to any cloud provider with one click, manage credentials securely, and collaborate with your team. Whether you're building MCP servers for Claude Desktop, VS Code, or other AI applications, DeployStack eliminates the deployment complexity that currently limits MCP adoption. +DeployStack is the **Enterprise Control Plane for the Model Context Protocol (MCP) ecosystem**. We provide a secure, centralized platform to manage your company's entire AI tool landscape, eliminating credential sprawl and enabling developers to move faster and more securely through our local DeployStack Gateway. -## Get Started - -DeployStack simplifies MCP server deployment through three key steps: browse our curated MCP server catalog, configure your credentials securely, and deploy to your preferred cloud provider instantly. Start by understanding the core concepts and how MCP servers integrate with AI applications. - -### Core Concepts +## User Guides -} title="Quick-Start Guide" href="/deploystack/quick-start"> -Deploy Model Context Protocol (MCP) servers to selected cloud provider with a single click, eliminating the need for complex setup. - -} title="MCP Server Catalog" href="https://deploystack.io/mcp"> -Browse our curated collection of ready-to-use MCP servers and deploy them instantly. - -} title="Security" href="/deploystack/security"> -Learn how to deploy MCP servers to selected cloud provider with secure credential management. - -} title="Team Management" href="/deploystack/team-collaboration"> -Manage MCP server deployments across your organization with role-based access control. - + } + href="/quick-start" + title="DeployStack Quick Start" + > + Get started with DeployStack in minutes. Set up your first MCP server and connect to the DeployStack Gateway. + + + } + href="/onboard-new-team-members" + title="Onboard New Team Members" + > + Learn how to onboard new developers to your DeployStack team and manage their access + + + } + href="/self-hosted/" + title="Self-Hosted DeployStack" + external + > + Host DeployStack on your own infrastructure for complete control and security + + + } + href="/security" + title="Security & Governance" + > + Understand how DeployStack eliminates credential sprawl and provides enterprise security + -### MCP Server Categories +## Developer Documentation -Our catalog spans essential MCP server categories that connect AI agents to real-world systems and data sources: +**For developers** extending or contributing to DeployStack: -} title="Database Connectors" href="/deploystack/categories/databases"> -Connect AI agents to PostgreSQL, MySQL, MongoDB, and other database systems - -} title="API Integrations" href="/deploystack/categories/apis"> -Access REST APIs, GraphQL endpoints, and third-party services from AI applications - -} title="File Systems" href="/deploystack/categories/filesystems"> -Enable AI agents to read, write, and manage files across local and cloud storage - -} title="Productivity Tools" href="/deploystack/categories/productivity"> -Integrate with Slack, GitHub, Jira, and other tools your team uses daily - + } + href="/development/frontend" + title="Frontend Development" + > + Guides for building and extending the DeployStack web interface + + + } + href="/development/backend" + title="Backend Development" + > + Learn how to extend the DeployStack backend services and APIs + + + } + href="/development/gateway" + title="Gateway Development" + > + Build and extend the DeployStack Gateway for secure MCP server management + -### DeployStack Ecosystem +## User Guides + +**For administrators and team members** using DeployStack: -DeployStack consists of several integrated components that work together to enable comprehensive MCP server deployment and management: + + +## Our Repository Structure + +DeployStack consists of several integrated components that work together to provide enterprise-grade MCP server management: -} title="deploystack" href="https://github.com/deploystackio/deploystack"> -The main platform providing MCP server CI/CD, team collaboration, and multi-cloud deployment +} title="deploystack" href="https://github.com/deploystackio/deploystack"> +The main platform providing centralized MCP server management, team collaboration, and the DeployStack Gateway -} title="awesome-mcp-server" href="https://github.com/deploystackio/awesome-mcp-server"> +} title="awesome-mcp-server" href="https://github.com/deploystackio/awesome-mcp-server"> Community-curated catalog of production-ready MCP servers with standardized configurations -} title="documentation" href="https://github.com/deploystackio/documentation"> -Comprehensive guides for MCP server development, deployment, and integration - -} title="feedback" href="https://github.com/deploystackio/feedback"> -Public repository for feature requests, bug reports, and roadmap discussions +} title="documentation" href="https://github.com/deploystackio/documentation"> +Comprehensive guides for MCP server management, security, and team collaboration -## MCP Server Development - -### Creating MCP Servers - -- Follow MCP specification standards for maximum compatibility -- Implement proper authentication and security practices -- Test across different transport protocols (stdio, HTTP, SSE) -- Document environment variables and configuration requirements +## Community & Resources -### Contributing to the Catalog - -- Submit your MCP server to [awesome-mcp-server](https://github.com/deploystackio/awesome-mcp-server) -- Follow our contribution guidelines for standardized formatting -- Include comprehensive documentation and usage examples -- Ensure production-ready deployment configurations - -## Community and Support - -- Join our [Discord community](https://discord.gg/42Ce3S7b3b) for real-time discussions -- Explore the [MCP Server Catalog](https://deploystack.io/mcp) for deployment-ready servers -- Check our [troubleshooting guide](/deploystack/troubleshooting) for common issues + + } + href="https://discord.gg/UjFWwByB" + title="Join our Discord" + external + > + Get help and connect with the DeployStack community + -Ready to eliminate MCP server deployment complexity? [Get started for free →](https://cloud.deploystack.io) + diff --git a/docs/deploystack/local-setup.mdx b/docs/local-setup.mdx similarity index 98% rename from docs/deploystack/local-setup.mdx rename to docs/local-setup.mdx index 3527bb0..4f57137 100644 --- a/docs/deploystack/local-setup.mdx +++ b/docs/local-setup.mdx @@ -11,10 +11,10 @@ import { Steps, Step } from 'fumadocs-ui/components/steps'; # Local Development Setup -This guide is for contributors and developers who want to run DeployStack locally for development purposes. If you want to deploy DeployStack for production use, see our [Self-Hosted Documentation](/deploystack/self-hosted). +This guide is for contributors and developers who want to run DeployStack locally for development purposes. If you want to deploy DeployStack for production use, see our [Self-Hosted Documentation](/self-hosted). - For production deployments, use our [Docker Compose setup](/deploystack/self-hosted/docker-compose) instead. + For production deployments, use our [Docker Compose setup](/self-hosted/docker-compose) instead. ## Prerequisites diff --git a/docs/deploystack/mcp-catalog.mdx b/docs/mcp-catalog.mdx similarity index 95% rename from docs/deploystack/mcp-catalog.mdx rename to docs/mcp-catalog.mdx index cb330c9..143df48 100644 --- a/docs/deploystack/mcp-catalog.mdx +++ b/docs/mcp-catalog.mdx @@ -38,14 +38,9 @@ The catalog supports two types of servers: ### Categories -Servers are organized into categories for easy discovery: +Servers are organized into categories for easy discovery and filtering. Categories are simple organizational labels that group servers by their purpose or functionality. -- **Development Tools**: Code analysis, Git integration, CI/CD tools -- **Data Sources**: Database connectors, API integrations, file systems -- **AI & ML**: Machine learning models, AI services, data processing -- **Communication**: Chat platforms, email services, notification systems -- **Productivity**: Task management, calendars, document processing -- **Custom**: User-defined categories for specialized use cases +For a complete explanation of how MCP categories work, see the [MCP Categories Guide](/mcp-categories). **Note**: Only Global Administrators can create and manage categories. @@ -166,7 +161,7 @@ The catalog supports comprehensive version tracking: ### GitHub Integration -Seamless integration with GitHub repositories for automatic synchronization and metadata extraction. For complete details on setting up and using GitHub integration, see the [GitHub Integration Guide](./github-integration.mdx). +Integration with GitHub repositories for automatic synchronization and metadata extraction. For complete details on setting up and using GitHub integration, see the [GitHub App Integration Guide](/github-application). **Key Features:** - **Automatic Repository Sync**: Pull server metadata from GitHub repositories diff --git a/docs/mcp-categories.mdx b/docs/mcp-categories.mdx new file mode 100644 index 0000000..d2ab2d3 --- /dev/null +++ b/docs/mcp-categories.mdx @@ -0,0 +1,31 @@ +--- +title: MCP Categories +description: Understanding MCP server categories in DeployStack - simple organizational labels for finding the right MCP servers. +sidebar: MCP Categories +--- + +# MCP Categories + +MCP categories are simple organizational labels that help you find the right MCP servers in the catalog. Think of them like folders or tags that group similar servers together. + +## What are MCP Categories? + +Categories are just labels that organize MCP servers by their purpose or functionality. For example: + +- **Development Tools** - Code analysis, Git integration, CI/CD tools +- **Data Sources** - Database connectors, API integrations, file systems +- **AI & ML** - Machine learning models, AI services, data processing +- **Communication** - Chat platforms, email services, notification systems +- **Productivity** - Task management, calendars, document processing + +When you browse the MCP catalog, you can filter by category to quickly find servers that match what you're looking for, instead of scrolling through everything. + +## Who Can Manage Categories? + +Only Global Administrators can create, edit, or delete categories. Regular users can view and use categories to filter servers, but cannot modify the category system itself. + +## How Categories Work + +When someone adds an MCP server to the catalog, they assign it to a category. This makes it easier for everyone to discover relevant servers. Categories are shared across the entire platform - both global servers and team servers use the same category system. + +That's it! Categories are simply an organizational tool to make finding MCP servers easier. diff --git a/docs/mcp-installation.mdx b/docs/mcp-installation.mdx new file mode 100644 index 0000000..e51f6ad --- /dev/null +++ b/docs/mcp-installation.mdx @@ -0,0 +1,137 @@ +--- +title: MCP Server Installation +description: Learn how to install and manage MCP servers within your team workspace, including credentials, configuration, and team-scoped access. +sidebar: MCP Installation +--- + +# MCP Server Installation + +MCP server installations are how your team actually uses MCP servers from the catalog. Think of the MCP catalog as a "store" where you browse available servers, and installations as the "purchased items" that your team can actually use with your own credentials and settings. + +## Understanding the Connection + +### The Three-Layer System + +DeployStack uses a three-layer system to manage MCP servers: + +1. **Global MCP Catalog**: A centralized library of all available MCP servers +2. **Team Access**: Your team can browse and select servers you have permission to use +3. **Team Installations**: Your team's actual configured instances of MCP servers + +### How It Works + +```mermaid +graph TD + A[Global MCP Catalog] --> B[Team Browsing] + B --> C[Team Installation] + C --> D[Team Workspace] + + A1[GitHub MCP Server] --> B1[Your Team Sees It] + B1 --> C1[Install with Your Credentials] + C1 --> D1[Ready to Use in Your Team] +``` + +## MCP Server Installation Scope + +### Team-Centered Installations + +**Every MCP server installation belongs to a specific team.** This is a fundamental principle of DeployStack: + +- **Team Ownership**: Each installation is owned by one team and isolated from other teams +- **Team Credentials**: Your team provides and manages its own API keys, tokens, and configuration +- **Team Configuration**: Customize server settings specific to your team's needs +- **Team Privacy**: Other teams cannot see or access your installations + +### Why Team-Scoped? + +This design provides several important benefits: + +#### Security Isolation +- **Credential Separation**: Your team's API keys are completely separate from other teams +- **Access Control**: Only your team members can use your team's installations +- **Data Privacy**: No cross-team access to configurations or usage data + +#### Flexibility +- **Custom Names**: Give installations meaningful names for your team context +- **Team-Specific Settings**: Configure servers differently for your team's workflow +- **Independent Updates**: Update or modify installations without affecting other teams + +#### Organization +- **Clear Ownership**: Every installation has a clear team owner +- **Team Management**: Team administrators control all installations +- **Workspace Isolation**: Each team has its own complete workspace + +## Team Workspace Context + +### How Installations Fit in Your Team + +Your team's MCP server installations are part of your complete team workspace: + +#### Team Resources +- **MCP Installations**: Configured MCP servers ready to use +- **Cloud Credentials**: Authentication for deployment platforms +- **Environment Variables**: Global team settings +- **Team Members**: Users who can access these resources + +#### Workspace Benefits +- **Unified Management**: All team resources in one place +- **Consistent Access**: Same permissions across all installations +- **Shared Configuration**: Common settings available to all installations +- **Team Collaboration**: All team members work with the same tools + +### Database Storage + +Behind the scenes, your team's installations are stored securely: + +#### Team-Level Storage +- **Team Workspace**: All installations belong to your team's workspace +- **Encrypted Credentials**: Your API keys and tokens are encrypted at rest +- **Access Control**: Only your team members can access the data +- **Audit Trail**: Complete history of installation changes + +#### Data Isolation +- **Team Boundaries**: Your data is completely separate from other teams +- **Secure Access**: Only authorized team members can view or modify installations +- **Privacy Protection**: No cross-team data sharing or access + +## Installation Types + +### Local Installations + +**Local MCP servers** run on your team's infrastructure: + +- **Your Environment**: Runs in your team's deployment environment +- **Direct Control**: Full control over the server instance +- **Custom Configuration**: Unlimited customization options +- **Team Credentials**: Uses your team's API keys and authentication + +### Cloud Installations (Future) + +**Cloud MCP servers** will run on managed infrastructure: + +- **Managed Service**: DeployStack handles the infrastructure +- **Simplified Setup**: Easier installation and maintenance +- **Automatic Updates**: Managed updates and scaling +- **Team Isolation**: Still team-scoped with your credentials + +## Security Considerations + +### Credential Protection + +Your team's installation credentials are protected through multiple layers: + +- **Encryption at Rest**: All credentials are encrypted in the database +- **Access Control**: Only team members can access credentials +- **Secure Transmission**: Credentials are encrypted during transmission +- **Audit Logging**: All credential access is logged for security + +### Team Boundaries + +The team-scoped installation system provides strong security boundaries: + +- **Complete Isolation**: Teams cannot access each other's installations +- **Separate Credentials**: Each team uses completely separate authentication +- **Independent Configuration**: No shared configuration between teams +- **Secure Defaults**: Installations use secure default settings + +MCP server installations provide the bridge between the global catalog of available servers and your team's actual working environment. By understanding how installations work within your team workspace, you can effectively manage your team's MCP server landscape while maintaining security and organization. diff --git a/docs/meta.json b/docs/meta.json index eeb3e14..224bbdc 100644 --- a/docs/meta.json +++ b/docs/meta.json @@ -1,7 +1,24 @@ { + "title": "DeployStack", + "description": "Documentation for DeployStack", + "icon": "DeployStackLogo", "pages": [ - "deploystack", - "docker-to-iac", - "docker-deployment" + "quick-start", + "---General---", + "teams", + "roles", + "onboard-new-team-members", + "global-settings", + "security", + "---MCP Server---", + "mcp-catalog", + "mcp-installation", + "mcp-categories", + "---Administration---", + "auth", + "github-application", + "github-oauth-setup", + "troubleshooting", + "local-setup" ] } \ No newline at end of file diff --git a/docs/onboard-new-team-members.mdx b/docs/onboard-new-team-members.mdx new file mode 100644 index 0000000..68a8460 --- /dev/null +++ b/docs/onboard-new-team-members.mdx @@ -0,0 +1,8 @@ +--- +title: Onboard New Team Members +description: Step-by-step guide to onboard new team members to DeployStack, ensuring they have the necessary access and understanding of the platform. +--- + +# Onboard New Team Members + +lorem ipsum \ No newline at end of file diff --git a/docs/deploystack/quick-start.mdx b/docs/quick-start.mdx similarity index 95% rename from docs/deploystack/quick-start.mdx rename to docs/quick-start.mdx index 71ef5fd..c486a77 100644 --- a/docs/deploystack/quick-start.mdx +++ b/docs/quick-start.mdx @@ -303,7 +303,6 @@ free -h # Check memory If you need assistance: -- **Documentation**: Check our [comprehensive guides](/deploystack) - **Community**: Join our [Discord](https://discord.gg/UjFWwByB) - **Issues**: Report problems on [GitHub](https://github.com/deploystackio/deploystack/issues) - **Support**: Contact us for enterprise support options @@ -312,10 +311,10 @@ If you need assistance: ### Learn More -- **[Self-Hosted Documentation](/deploystack/self-hosted)**: Comprehensive deployment guides -- **[Local Development](/deploystack/local-setup)**: Set up development environment -- **[Global Settings](/deploystack/global-settings)**: Configure email, auth, and more -- **[User Roles](/deploystack/roles)**: Manage team permissions +- **[Self-Hosted Documentation](/self-hosted)**: Comprehensive deployment guides +- **[Local Development](/local-setup)**: Set up development environment +- **[Global Settings](/global-settings)**: Configure email, auth, and more +- **[User Roles](/roles)**: Manage team permissions ### Deploy MCP Servers diff --git a/docs/deploystack/roles.mdx b/docs/roles.mdx similarity index 100% rename from docs/deploystack/roles.mdx rename to docs/roles.mdx diff --git a/docs/deploystack/security.mdx b/docs/security.mdx similarity index 100% rename from docs/deploystack/security.mdx rename to docs/security.mdx diff --git a/docs/deploystack/self-hosted/database-setup.mdx b/docs/self-hosted/database-setup.mdx similarity index 100% rename from docs/deploystack/self-hosted/database-setup.mdx rename to docs/self-hosted/database-setup.mdx diff --git a/docs/deploystack/self-hosted/docker-compose.mdx b/docs/self-hosted/docker-compose.mdx similarity index 96% rename from docs/deploystack/self-hosted/docker-compose.mdx rename to docs/self-hosted/docker-compose.mdx index 7dd9c68..40ca439 100644 --- a/docs/deploystack/self-hosted/docker-compose.mdx +++ b/docs/self-hosted/docker-compose.mdx @@ -13,7 +13,7 @@ import { Tabs, Tab } from 'fumadocs-ui/components/tabs'; Deploy DeployStack using Docker Compose for a production-ready, self-hosted installation. This method is recommended for most users as it provides a reliable, scalable setup with minimal configuration. - Docker containers are for production hosting or self-hosting. For development contributions, check the [Local Setup](/deploystack/local-setup) guide. + Docker containers are for production hosting or self-hosting. For development contributions, check the [Local Setup](/local-setup) guide. ## Overview @@ -293,7 +293,7 @@ docker stats If you encounter issues not covered here: -1. Check the [Troubleshooting](/deploystack/troubleshooting) guide +1. Check the [Troubleshooting](/troubleshooting) guide 2. Search existing [GitHub Issues](https://github.com/deploystackio/deploystack/issues) 3. Join our [Discord community](https://discord.gg/UjFWwByB) 4. Create a new issue with detailed logs and system information @@ -309,4 +309,4 @@ Once DeployStack is running: --- -**Need to upgrade?** Check our [Upgrade Guide](/deploystack/self-hosted/upgrade-guide) for step-by-step instructions. +**Need to upgrade?** Check our [Upgrade Guide](/self-hosted/upgrade-guide) for step-by-step instructions. diff --git a/docs/deploystack/self-hosted/index.mdx b/docs/self-hosted/index.mdx similarity index 88% rename from docs/deploystack/self-hosted/index.mdx rename to docs/self-hosted/index.mdx index c5a7dbb..158ca5f 100644 --- a/docs/deploystack/self-hosted/index.mdx +++ b/docs/self-hosted/index.mdx @@ -1,7 +1,7 @@ --- title: Self-Hosted DeployStack description: Deploy and manage DeployStack on your own infrastructure with complete control and customization. -sidebar: Self-Hosted +sidebar: Self-Hosted Quickstart icon: Server --- @@ -17,7 +17,7 @@ Run DeployStack on your own infrastructure for maximum control, security, and cu } - href="/deploystack/self-hosted/docker-compose" + href="/self-hosted/docker-compose" title="Docker Compose Setup" > Quick and easy deployment using Docker Compose - recommended for most users @@ -25,7 +25,7 @@ Run DeployStack on your own infrastructure for maximum control, security, and cu } - href="/deploystack/self-hosted/setup" + href="/self-hosted/setup" title="Advanced Setup" > Manual installation and configuration for custom environments @@ -37,7 +37,7 @@ Run DeployStack on your own infrastructure for maximum control, security, and cu } - href="/deploystack/self-hosted/upgrade-guide" + href="/self-hosted/upgrade-guide" title="Upgrade Guide" > Keep your DeployStack instance up-to-date with the latest features and security patches @@ -45,7 +45,7 @@ Run DeployStack on your own infrastructure for maximum control, security, and cu } - href="/deploystack/troubleshooting" + href="/troubleshooting" title="Troubleshooting" > Common issues and solutions for self-hosted deployments @@ -85,7 +85,7 @@ Run DeployStack on your own infrastructure for maximum control, security, and cu ## Getting Started -1. **Choose Your Deployment Method**: Start with [Docker Compose](/deploystack/self-hosted/docker-compose) for the quickest setup +1. **Choose Your Deployment Method**: Start with [Docker Compose](/self-hosted/docker-compose) for the quickest setup 2. **Configure Environment**: Set up your environment variables and secrets 3. **Deploy Services**: Launch your DeployStack instance 4. **Access Interface**: Connect to your self-hosted DeployStack at your configured URL @@ -100,4 +100,4 @@ Need help with your self-hosted deployment? --- -**Ready to deploy?** Start with our [Docker Compose guide](/deploystack/self-hosted/docker-compose) for the fastest setup experience. +**Ready to deploy?** Start with our [Docker Compose guide](/self-hosted/docker-compose) for the fastest setup experience. diff --git a/docs/self-hosted/meta.json b/docs/self-hosted/meta.json new file mode 100644 index 0000000..1eed4b1 --- /dev/null +++ b/docs/self-hosted/meta.json @@ -0,0 +1,8 @@ +{ + "title": "Self Hosted", + "description": "Self-hosted DeployStack", + "icon": "DeployStackLogo", + "pages": [ + "..." + ] +} diff --git a/docs/deploystack/self-hosted/setup.mdx b/docs/self-hosted/setup.mdx similarity index 72% rename from docs/deploystack/self-hosted/setup.mdx rename to docs/self-hosted/setup.mdx index 06fb18a..5d87c7f 100644 --- a/docs/deploystack/self-hosted/setup.mdx +++ b/docs/self-hosted/setup.mdx @@ -17,13 +17,35 @@ Configure your self-hosted DeployStack instance with essential settings to custo Platform setup is performed through the web interface after your DeployStack instance is running. All settings are optional but recommended for production deployments. +## Initial Setup Process + + + **For Developers**: The initial setup consists of two parts: + 1. **Database Setup**: Completed via the frontend wizard at `/setup` which calls `POST /api/db/setup` + 2. **Platform Configuration**: Done through the Settings interface after database initialization + + ## Accessing Platform Settings + + **Complete Initial Setup** + + If this is a fresh installation, first visit `https:///setup` to complete the database initialization wizard. This creates: + + **For Docker deployments:** + - Database configuration stored in the Docker volume `deploystack_backend_persistent` + - Access the setup wizard at `http://localhost:8080/setup` (or your configured frontend URL) + + **For local development:** + - `services/backend/persistent_data/db.selection.json` (database type configuration) + - `services/backend/persistent_data/database/deploystack.db` (if using SQLite) + + **Log in as Administrator** - Access your DeployStack instance and log in with an administrator account. + After database setup, access your DeployStack instance and log in with an administrator account. @@ -189,9 +211,11 @@ Follow this recommended setup workflow for new DeployStack instances: - **Initial Access** + **Initial Database Setup** - - Complete the initial setup wizard + - Navigate to `https:///setup` (Docker: `http://localhost:8080/setup` by default) + - Complete the database setup wizard (SQLite or Turso) + - This initializes the database and saves configuration - Create your admin account - Log in to the platform @@ -254,9 +278,13 @@ Follow this recommended setup workflow for new DeployStack instances: **Problem**: Settings page not accessible or returns errors **Solutions**: +- Ensure the initial database setup at `/setup` has been completed +- **For Docker**: Check that the `deploystack_backend_persistent` volume exists and contains data +- **For local development**: Check for `services/backend/persistent_data/db.selection.json` file existence - Ensure you're logged in as an administrator - Check that your user has the `global_admin` role - Verify the backend service is running properly +- Check that database migrations have been applied (happens automatically after setup) #### Email Not Working @@ -282,7 +310,6 @@ Follow this recommended setup workflow for new DeployStack instances: If you encounter issues during setup: - **Check logs**: Review Docker container logs for error messages -- **Documentation**: Consult our [troubleshooting guide](/deploystack/troubleshooting) - **Community**: Join our [Discord](https://discord.gg/UjFWwByB) for support - **GitHub**: Report issues on our [GitHub repository](https://github.com/deploystackio/deploystack/issues) @@ -310,12 +337,65 @@ Configure different settings for different environments: ### Backup Configuration -Your platform settings are stored in the database. Ensure you: + + + **Docker Volume Backup:** + + Your data is stored in the Docker volume `deploystack_backend_persistent`. To backup: + + ```bash + # Create a backup of the Docker volume + docker run --rm -v deploystack_backend_persistent:/data \ + -v $(pwd):/backup alpine \ + tar czf /backup/deploystack-backup-$(date +%Y%m%d).tar.gz /data + ``` + + **Restore from backup:** + ```bash + # Restore the Docker volume from backup + docker run --rm -v deploystack_backend_persistent:/data \ + -v $(pwd):/backup alpine \ + tar xzf /backup/deploystack-backup-20250108.tar.gz -C / + ``` + + The volume contains: + - `database/deploystack.db` - SQLite database (if using SQLite) + - `db.selection.json` - Database type configuration + - Any other persistent application data + + + + **File System Backup:** + + Your data is stored in `services/backend/persistent_data/`. To backup: + + ```bash + # Create a backup archive + tar czf deploystack-backup-$(date +%Y%m%d).tar.gz \ + services/backend/persistent_data/ + ``` + + **Restore from backup:** + ```bash + # Restore from backup archive + tar xzf deploystack-backup-20250108.tar.gz + ``` + + The directory contains: + - `database/deploystack.db` - SQLite database (if using SQLite) + - `db.selection.json` - Database type configuration + - Any other persistent application data + + + + + **Important**: Always backup the complete data directory/volume, not just the database file, as it contains critical configuration like database type selection. + -- **Backup the database** regularly (included in persistent volume) - **Document custom settings** for disaster recovery - **Test restore procedures** in non-production environments +- **Schedule regular backups** using cron or your preferred scheduling tool --- -**Next Steps**: After completing platform setup, configure [user roles and permissions](/deploystack/roles) and set up your first [team workspaces](/deploystack/teams). +**Next Steps**: After completing platform setup, configure [user roles and permissions](/roles) and set up your first [team workspaces](/teams). diff --git a/docs/deploystack/self-hosted/upgrade-guide.mdx b/docs/self-hosted/upgrade-guide.mdx similarity index 100% rename from docs/deploystack/self-hosted/upgrade-guide.mdx rename to docs/self-hosted/upgrade-guide.mdx diff --git a/docs/deploystack/teams.mdx b/docs/teams.mdx similarity index 98% rename from docs/deploystack/teams.mdx rename to docs/teams.mdx index 411fd43..fb0c8c2 100644 --- a/docs/deploystack/teams.mdx +++ b/docs/teams.mdx @@ -47,14 +47,13 @@ Teams serve as comprehensive containers for all your deployment resources: - **Global MCP Server Access**: Browse and deploy community-wide MCP servers - **Server Management**: Team administrators can create, edit, and delete team servers - **Version Control**: Track different versions of your team's MCP servers -- **GitHub Integration**: Automatic synchronization with your team's repositories (see [GitHub Integration Guide](./github-integration.mdx)) +- **GitHub Integration**: Automatic synchronization with GitHub repositories (see [GitHub App Integration Guide](/github-application)) - **Custom Configurations**: Team-specific server settings and parameters - **Deployment History**: Complete logs and monitoring data for team deployments ### Cloud Provider Credentials -- **Render.com**: API tokens and service configurations -- **Fly.io**: Authentication tokens and app settings -- **Other Providers**: Credentials for additional supported platforms +- **local MCP Servers**: Credentials for local MCP environments +- **Google Cloud**: API tokens and service configurations ### Global Environment Variables - **Node.js Environment Variables**: Custom env vars for Node.js-based MCP servers diff --git a/docs/deploystack/troubleshooting.mdx b/docs/troubleshooting.mdx similarity index 100% rename from docs/deploystack/troubleshooting.mdx rename to docs/troubleshooting.mdx diff --git a/lib/components/DeployStackLogo.tsx b/lib/components/DeployStackLogo.tsx index dc65c8e..ff812c1 100644 --- a/lib/components/DeployStackLogo.tsx +++ b/lib/components/DeployStackLogo.tsx @@ -4,7 +4,7 @@ import Image from 'next/image'; export const DeployStackLogo: React.FC<{ className?: string }> = ({ className = "w-6 h-6" }) => { return ( DeployStack Logo { + console.log('Path:', doc._file.path, 'Flattened:', doc._file.flattenedPath); +}); + +console.log('\n=== DEVELOPMENT DOCS ==='); +const developmentDocs = allDocs.filter((doc: any) => + doc._file.path.startsWith('development/') +); +developmentDocs.forEach((doc: any) => { + console.log('Path:', doc._file.path, 'Flattened:', doc._file.flattenedPath); +}); + +console.log('\n=== SELF-HOSTED DOCS ==='); +const selfHostedDocs = allDocs.filter((doc: any) => + doc._file.path.startsWith('self-hosted/') +); +selfHostedDocs.forEach((doc: any) => { + console.log('Path:', doc._file.path, 'Flattened:', doc._file.flattenedPath); +}); diff --git a/lib/source.ts b/lib/source.ts index 18e4e4e..f5c18c1 100644 --- a/lib/source.ts +++ b/lib/source.ts @@ -7,52 +7,128 @@ import { loader } from 'fumadocs-core/source'; import { icons } from 'lucide-react'; import { createElement } from 'react'; import { DeployStackLogo } from './components/DeployStackLogo'; +import { createMDXSource } from 'fumadocs-mdx'; +import type { PageTree } from 'fumadocs-core/server'; -export const source = loader({ - // Base URL for the documentation pages. - // Since we've moved docs to the root level, baseUrl should be '/' - // This means docs/index.mdx will be at / and other docs at their direct paths - baseUrl: '/', - // The source of the documents, converted to Fumadocs format. - source: docs.toFumadocsSource(), - - - // Icon handler to support both lucide-react icons and custom icons - icon(icon) { - if (!icon) { - return; // No icon specified - } +// Helper function for icon handling +function createIconHandler() { + return (icon?: string) => { + if (!icon) return; - // Handle custom icons if (icon === 'DeployStackLogo') { return createElement(DeployStackLogo); } - // Handle lucide-react icons if (icon in icons) { return createElement(icons[icon as keyof typeof icons]); } - // If icon not found, return undefined return undefined; - }, + }; +} - // Customize page tree to support custom sidebar titles - pageTree: { - attachFile(node, file) { - // If the file has a custom sidebar title, use it instead of the title - if (file?.data && 'sidebar' in file.data && file.data.sidebar) { - node.name = file.data.sidebar as string; - } - return node; - }, - }, +// Filter docs into separate sections +const allDocs = docs.docs; +const allMeta = docs.meta; + +// Main docs (root level - exclude development and self-hosted) +const mainDocs = allDocs.filter((doc: any) => { + const path = doc._file.path; + return !path.startsWith('development/') && !path.startsWith('self-hosted/'); +}); + +const mainMeta = allMeta.filter((meta: any) => { + const path = meta._file.path; + return !path.startsWith('development/') && !path.startsWith('self-hosted/'); +}); + +// Development docs - keep the full path +const developmentDocs = allDocs.filter((doc: any) => + doc._file.path.startsWith('development/') +); + +const developmentMeta = allMeta.filter((meta: any) => + meta._file.path.startsWith('development/') +); + +// Self-hosted docs - keep the full path +const selfHostedDocs = allDocs.filter((doc: any) => + doc._file.path.startsWith('self-hosted/') +); + +const selfHostedMeta = allMeta.filter((meta: any) => + meta._file.path.startsWith('self-hosted/') +); + +// Create separate sources for each section +export const mainSource = loader({ + baseUrl: '/', + source: createMDXSource(mainDocs, mainMeta), + icon: createIconHandler(), +}); - // Optional: You can define global MDX components here if not done elsewhere - // globalMdxComponents: getMDXComponents(), +// For development and self-hosted, we keep the full path structure +export const developmentSource = loader({ + baseUrl: '/', // Changed from '/development' to '/' + source: createMDXSource(developmentDocs, developmentMeta), + icon: createIconHandler(), }); -// You might also want to export page tree and other utilities if needed directly -// export const pageTree = source.pageTree; -// export const getPage = source.getPage; -// export const generateParams = source.generateParams; +export const selfHostedSource = loader({ + baseUrl: '/', // Changed from '/self-hosted' to '/' + source: createMDXSource(selfHostedDocs, selfHostedMeta), + icon: createIconHandler(), +}); + +// Unified source for backward compatibility and dynamic usage +export const source = { + getPage(slug?: string[], locale?: string) { + if (!slug || slug.length === 0) return mainSource.getPage(slug, locale); + + const firstSegment = slug[0]; + + // For development section, use the full slug path + if (firstSegment === 'development') { + return developmentSource.getPage(slug, locale); + } + + // For self-hosted section, use the full slug path + if (firstSegment === 'self-hosted') { + return selfHostedSource.getPage(slug, locale); + } + + // For main docs, use as-is + return mainSource.getPage(slug, locale); + }, + + getPages(locale?: string) { + return [ + ...mainSource.getPages(locale), + ...developmentSource.getPages(locale), + ...selfHostedSource.getPages(locale), + ]; + }, + + generateParams() { + // Get params from all sources + const mainParams = mainSource.generateParams(); + const devParams = developmentSource.generateParams(); + const selfParams = selfHostedSource.generateParams(); + + return [ + ...mainParams, + ...devParams, + ...selfParams, + ]; + }, + + // Get appropriate page tree based on current path + getPageTree(path?: string) { + if (path?.startsWith('/development')) return developmentSource.pageTree; + if (path?.startsWith('/self-hosted')) return selfHostedSource.pageTree; + return mainSource.pageTree; + }, + + // Default page tree for compatibility + pageTree: mainSource.pageTree, +}; diff --git a/public/favicon.ico b/public/favicon.ico index a8822ad..c947817 100644 Binary files a/public/favicon.ico and b/public/favicon.ico differ diff --git a/public/favicon.png b/public/favicon.png index 4b50717..a07ee7e 100644 Binary files a/public/favicon.png and b/public/favicon.png differ diff --git a/public/logo-deploystack.png b/public/logo-deploystack.png new file mode 100644 index 0000000..f28c534 Binary files /dev/null and b/public/logo-deploystack.png differ diff --git a/public/logo-deploystack.svg b/public/logo-deploystack.svg deleted file mode 100644 index 9a7776d..0000000 --- a/public/logo-deploystack.svg +++ /dev/null @@ -1,29 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/tailwind.config.cjs b/tailwind.config.cjs index 241927e..15599ed 100644 --- a/tailwind.config.cjs +++ b/tailwind.config.cjs @@ -1,14 +1,17 @@ /** @type {import('tailwindcss').Config} */ +// Note: With Tailwind CSS v4, this config file is deprecated. +// All configuration is now done in CSS using @theme directive. +// This file is kept for compatibility but can be removed. module.exports = { content: [ "./app/**/*.{js,ts,jsx,tsx,mdx}", "./pages/**/*.{js,ts,jsx,tsx,mdx}", "./components/**/*.{js,ts,jsx,tsx,mdx}", - "./docs/**/*.{md,mdx}", // Added for user's docs - "./node_modules/fumadocs-ui/**/*.{js,ts,jsx,tsx}" // For Fumadocs UI components + "./docs/**/*.{md,mdx}", + "./node_modules/fumadocs-ui/**/*.{js,ts,jsx,tsx}" ], theme: { - extend: {}, + extend: {} }, plugins: [], }