diff --git a/.github/copilot-instructions.md b/.github/copilot-instructions.md deleted file mode 100644 index f6a5d32..0000000 --- a/.github/copilot-instructions.md +++ /dev/null @@ -1,146 +0,0 @@ -# Docker DB Manager - AI Agent Instructions - -## Project Overview -A Tauri v2 desktop app for managing Docker database containers (PostgreSQL, MySQL, Redis, MongoDB). Built with React + TypeScript frontend and Rust backend, using a multi-window architecture. - -## Architecture - -### Multi-Window System -The app uses **separate HTML entry points** for different windows: -- `index.html` → MainPage (container list) -- `create-container.html` → CreateContainerPage (wizard) -- `edit-container.html` → EditContainerPage (edit existing) - -Windows communicate via Tauri events (`container-created`, `container-updated`) rather than shared state. See `src-tauri/src/commands/window.rs` for window management. - -### Frontend-Backend Bridge -- **Typed wrapper**: All Tauri commands go through `src/core/tauri/invoke.ts` for centralized error handling -- **Command pattern**: Rust commands in `src-tauri/src/commands/` are invoked by name string from TypeScript -- **State management**: `useApp()` orchestrates container list + Docker status; hooks are split by responsibility (see `src/features/`) - -### Data Flow -1. **Storage**: `tauri-plugin-store` persists containers to `databases.json` (key-value store) -2. **Synchronization**: Auto-sync every 5s via `useContainerList` to reconcile with actual Docker state -3. **Docker commands**: Shell execution through `tauri-plugin-shell` (see `DockerService::run_container`) -4. **Error handling**: Rust returns JSON-encoded errors; TypeScript parses via `core/errors/error-handler.ts` - -## Critical Patterns - -### Container Lifecycle -When updating containers (port/name changes): -1. Container is **recreated** (removed and re-run with new config) -2. Volume migration happens if `persist_data=true` and name changed -3. Always cleanup volumes on errors (see `create_database_container` cleanup logic) - -### Database-Specific Configuration -Each DB type has unique settings stored in `CreateDatabaseRequest`: -- PostgreSQL: `postgres_settings` (host_auth_method, initdb_args) -- MySQL: `mysql_settings` (character_set, collation) -- Redis: `redis_settings` (max_memory, append_only) -- MongoDB: `mongo_settings` (auth_source, enable_sharding) - -Default ports are defined in `DockerService::get_default_port()`. - -### Form Validation -Multi-step wizard uses `react-hook-form` + `zod` schemas: -- Schemas in `src/pages/create-container/schemas/` -- Step validation via `isCurrentStepValid` in wizard hook -- Form state persists across steps (single form instance) - -## Development Commands - -```bash -# Development (starts both Vite + Tauri) -npm run dev - -# Linting (uses Biome, not ESLint) -npm run lint # check only -npm run lint:fix # auto-fix - -# Testing -npm test # watch mode -npm run test:run # CI mode -npm run test:ui # Vitest UI - -# Rust tests -cd src-tauri && cargo test -``` - -### Build System -- **Vite**: Multi-entry build in `vite.config.ts` (rollupOptions.input) -- **Alias**: `@/` maps to `src/` in both Vite and tsconfig -- **Tauri dev**: Runs on port 1420 (strict port enforcement) - -## Code Conventions - -### File Naming -- **Enforced**: `kebab-case` for all files (Biome rule `useFilenamingConvention`) -- Hooks: `use-*.ts` (e.g., `use-container-list.ts`) -- Components: `PascalCase.tsx` (e.g., `MainPage.tsx`) - -### Hook Composition -- **Separation of concerns**: Split hooks by responsibility - - `use-container-list.ts`: State + sync - - `use-container-actions.ts`: CRUD operations - - `use-app.ts`: Orchestration layer -- Avoid monolithic hooks; prefer composition - -### Error Handling -```typescript -// Frontend: Always use error handler -import { handleContainerError } from '@/core/errors/error-handler'; -try { - await containersApi.create(data); -} catch (error) { - handleContainerError(error); // Parses JSON errors, shows toast -} - -// Backend: Return Result with JSON error structs -return Err(serde_json::to_string(&CreateContainerError { - error_type: "PORT_IN_USE".to_string(), - message: format!("Port {} is already in use", port), - details: Some("Change the port...".to_string()), -}).unwrap()); -``` - -## Testing Strategy - -### Frontend -- **Vitest** with jsdom environment (`src/test/setup.ts`) -- Test files: `**/*.{test,spec}.{ts,tsx}` (not in `src-tauri/`) -- Coverage excludes: `src/test/`, `*.config.*`, type definitions - -### Backend -- Unit tests: `src-tauri/tests/unit_services.rs` -- Integration tests: `src-tauri/tests/integration_tests.rs` -- Mock Docker commands when testing services - -## Common Gotchas - -1. **Window events**: Main window needs listeners for `container-created`/`container-updated` to refresh list -2. **Volume naming**: Always `{container-name}-data` format; handle renames carefully -3. **Docker status overlay**: Shown when Docker is unavailable; check `useDockerStatus` hook -4. **Rust mutex locks**: Always clone before async operations to avoid holding locks across await points -5. **Port validation**: Check both Docker availability AND port conflicts before creation - -## Dependencies Notes - -- **Biome**: Used instead of ESLint/Prettier (single config in `biome.json`) -- **Shadcn/ui**: Components in `src/shared/components/ui/` (Radix UI + Tailwind) -- **Framer Motion**: Animations in wizard steps (see `pageVariants` in `DatabaseSelectionForm`) -- **tauri-plugin-store**: Key-value persistence (NOT a database) - -## Adding New Database Types - -1. Add type to `DatabaseType` in `src/shared/types/container.ts` -2. Update `DockerService::build_docker_command()` with env vars -3. Add default port in `get_default_port()` -4. Add data path in `get_data_path()` -5. Create settings interface in `CreateContainerRequest` -6. Update wizard form validation schemas - -## When in Doubt - -- **Type mismatches**: Check both Rust types (`src-tauri/src/types/`) and TypeScript types (`src/shared/types/`) -- **Docker issues**: Run `docker version` and `docker info` commands used by `check_docker_status` -- **State sync**: Containers auto-sync every 5s; force reload with `loadContainers()` diff --git a/package-lock.json b/package-lock.json index f0b0555..d40825c 100644 --- a/package-lock.json +++ b/package-lock.json @@ -11,6 +11,7 @@ "dependencies": { "@hookform/resolvers": "5.2.2", "@radix-ui/react-accordion": "1.2.12", + "@radix-ui/react-checkbox": "^1.3.3", "@radix-ui/react-dialog": "1.1.15", "@radix-ui/react-dropdown-menu": "2.1.16", "@radix-ui/react-label": "2.1.7", @@ -1297,6 +1298,36 @@ } } }, + "node_modules/@radix-ui/react-checkbox": { + "version": "1.3.3", + "resolved": "https://registry.npmjs.org/@radix-ui/react-checkbox/-/react-checkbox-1.3.3.tgz", + "integrity": "sha512-wBbpv+NQftHDdG86Qc0pIyXk5IR3tM8Vd0nWLKDcX8nNn4nXFOFwsKuqw2okA/1D/mpaAkmuyndrPJTYDNZtFw==", + "license": "MIT", + "dependencies": { + "@radix-ui/primitive": "1.1.3", + "@radix-ui/react-compose-refs": "1.1.2", + "@radix-ui/react-context": "1.1.2", + "@radix-ui/react-presence": "1.1.5", + "@radix-ui/react-primitive": "2.1.3", + "@radix-ui/react-use-controllable-state": "1.2.2", + "@radix-ui/react-use-previous": "1.1.1", + "@radix-ui/react-use-size": "1.1.1" + }, + "peerDependencies": { + "@types/react": "*", + "@types/react-dom": "*", + "react": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc", + "react-dom": "^16.8 || ^17.0 || ^18.0 || ^19.0 || ^19.0.0-rc" + }, + "peerDependenciesMeta": { + "@types/react": { + "optional": true + }, + "@types/react-dom": { + "optional": true + } + } + }, "node_modules/@radix-ui/react-collapsible": { "version": "1.1.12", "resolved": "https://registry.npmjs.org/@radix-ui/react-collapsible/-/react-collapsible-1.1.12.tgz", diff --git a/package.json b/package.json index 9fddfde..83f3a02 100644 --- a/package.json +++ b/package.json @@ -36,6 +36,7 @@ "dependencies": { "@hookform/resolvers": "5.2.2", "@radix-ui/react-accordion": "1.2.12", + "@radix-ui/react-checkbox": "^1.3.3", "@radix-ui/react-dialog": "1.1.15", "@radix-ui/react-dropdown-menu": "2.1.16", "@radix-ui/react-label": "2.1.7", diff --git a/src-tauri/src/commands/app.rs b/src-tauri/src/commands/app.rs index b6c8e1f..a8f5082 100644 --- a/src-tauri/src/commands/app.rs +++ b/src-tauri/src/commands/app.rs @@ -2,8 +2,3 @@ pub fn get_app_version() -> String { env!("CARGO_PKG_VERSION").to_string() } - -#[tauri::command] -pub fn greet(name: &str) -> String { - format!("Hello, {}! You've been greeted from Rust!", name) -} diff --git a/src-tauri/src/commands/database.rs b/src-tauri/src/commands/database.rs index 6d73519..1a9294e 100644 --- a/src-tauri/src/commands/database.rs +++ b/src-tauri/src/commands/database.rs @@ -1,55 +1,54 @@ use crate::services::*; use crate::types::*; use tauri::{AppHandle, State}; -use uuid::Uuid; +/// Create database container from generic Docker run request +/// This command is database-agnostic and uses the docker args built by the frontend provider #[tauri::command] -pub async fn create_database_container( - request: CreateDatabaseRequest, +pub async fn create_container_from_docker_args( + request: DockerRunRequest, app: AppHandle, databases: State<'_, DatabaseStore>, ) -> Result { let docker_service = DockerService::new(); let storage_service = StorageService::new(); - // Generate container ID - let container_id = Uuid::new_v4().to_string(); - let volume_name = if request.persist_data { - Some(format!("{}-data", request.name)) - } else { - None - }; - - // Create volume if needed - if let Some(vol_name) = &volume_name { + // Create volumes if needed + for volume in &request.docker_args.volumes { docker_service - .create_volume_if_needed(&app, vol_name) + .create_volume_if_needed(&app, &volume.name) .await?; } - // Build Docker command - let docker_args = docker_service.build_docker_command(&request, &volume_name)?; + // Build Docker command from generic args + let docker_args = + docker_service.build_docker_command_from_args(&request.name, &request.docker_args); // Execute Docker run command let real_container_id = match docker_service.run_container(&app, &docker_args).await { Ok(container_id) => container_id, Err(error) => { - // Cleanup resources on error - do this synchronously to ensure cleanup before returning + // Cleanup resources on error let _ = docker_service .force_remove_container_by_name(&app, &request.name) .await; - if let Some(vol_name) = &volume_name { - let _ = docker_service.remove_volume_if_exists(&app, vol_name).await; + // Cleanup volumes + for volume in &request.docker_args.volumes { + let _ = docker_service + .remove_volume_if_exists(&app, &volume.name) + .await; } // Check if it's a port already in use error if error.contains("port is already allocated") || error.contains("Bind for") { let port_error = CreateContainerError { error_type: "PORT_IN_USE".to_string(), - message: format!("Port {} is already in use", request.port), - port: Some(request.port), - details: Some("You can change the port in the configuration and try again. The port can be modified later if needed.".to_string()), + message: format!("Port {} is already in use", request.metadata.port), + port: Some(request.metadata.port), + details: Some( + "You can change the port in the configuration and try again.".to_string(), + ), }; return Err(serde_json::to_string(&port_error) .unwrap_or_else(|_| "Port in use error".to_string())); @@ -82,29 +81,29 @@ pub async fn create_database_container( } }; - // Create database object + // Create database object using metadata let database = DatabaseContainer { - id: container_id.clone(), + id: request.metadata.id.clone(), name: request.name.clone(), - db_type: request.db_type, - version: request.version, + db_type: request.metadata.db_type, + version: request.metadata.version, status: "running".to_string(), - port: request.port, + port: request.metadata.port, created_at: chrono::Utc::now().format("%Y-%m-%d").to_string(), - max_connections: request.max_connections.unwrap_or(100), + max_connections: request.metadata.max_connections.unwrap_or(100), container_id: Some(real_container_id.clone()), - stored_password: Some(request.password.clone()), - stored_username: request.username.clone(), - stored_database_name: request.database_name.clone(), - stored_persist_data: request.persist_data, - stored_enable_auth: request.enable_auth, + stored_password: Some(request.metadata.password.clone()), + stored_username: request.metadata.username.clone(), + stored_database_name: request.metadata.database_name.clone(), + stored_persist_data: request.metadata.persist_data, + stored_enable_auth: request.metadata.enable_auth, }; // Store in memory databases .lock() .unwrap() - .insert(container_id.clone(), database.clone()); + .insert(request.metadata.id.clone(), database.clone()); // Persist to store let db_map = { @@ -115,15 +114,18 @@ pub async fn create_database_container( // If saving to store fails, cleanup the created container if let Err(store_error) = storage_service.save_databases_to_store(&app, &db_map).await { // Remove from memory - databases.lock().unwrap().remove(&container_id); + databases.lock().unwrap().remove(&request.metadata.id); // Cleanup Docker resources let _ = docker_service .remove_container(&app, &real_container_id) .await; - if let Some(vol_name) = &volume_name { - let _ = docker_service.remove_volume_if_exists(&app, vol_name).await; + // Cleanup volumes + for volume in &request.docker_args.volumes { + let _ = docker_service + .remove_volume_if_exists(&app, &volume.name) + .await; } return Err(format!("Error saving configuration: {}", store_error)); @@ -132,6 +134,242 @@ pub async fn create_database_container( Ok(database) } +/// Update database container from generic Docker run request +/// This command is database-agnostic and uses the docker args built by the frontend provider +#[tauri::command] +pub async fn update_container_from_docker_args( + container_id: String, + request: DockerRunRequest, + app: AppHandle, + databases: State<'_, DatabaseStore>, +) -> Result { + let docker_service = DockerService::new(); + let storage_service = StorageService::new(); + + // Get current container info + let mut container = { + let db_map = databases.lock().unwrap(); + db_map + .get(&container_id) + .cloned() + .ok_or("Container not found")? + }; + + // Capture previous name for later cleanup + let previous_name = container.name.clone(); + + // Determine if we need to recreate the container + let name_changed = request.name != container.name; + let port_changed = request.metadata.port != container.port; + let persist_data_changed = request.metadata.persist_data != container.stored_persist_data; + let needs_recreation = name_changed || port_changed || persist_data_changed; + + // Track volumes for cleanup - define outside the if block for later access + let old_volumes: Vec = if container.stored_persist_data { + vec![format!("{}-data", container.name)] + } else { + vec![] + }; + + // Track if we need to cleanup old volumes after successful update + let should_cleanup_old_volumes = container.stored_persist_data && !request.metadata.persist_data; + + if needs_recreation { + // Remove old container + if let Some(old_id) = &container.container_id { + docker_service.remove_container(&app, old_id).await?; + } + + // Handle volume migration if needed + let new_volumes = &request.docker_args.volumes; + + // Track if migration occurred for cleanup purposes + let volume_migrated = + name_changed && container.stored_persist_data && request.metadata.persist_data; + + // Case 1: Name changed AND has persistent data -> migrate volume + if volume_migrated { + let old_volume_name = format!("{}-data", container.name); + let new_volume_name = format!("{}-data", request.name); + + // Get data path from the provider's volume configuration + let data_path = if let Some(vol) = new_volumes.first() { + vol.path.as_str() + } else { + "/data" // fallback + }; + + docker_service + .migrate_volume_data(&app, &old_volume_name, &new_volume_name, data_path) + .await?; + } + // Case 2: Enabling persistent data -> create new volume + else if !container.stored_persist_data && request.metadata.persist_data { + for volume in new_volumes { + docker_service + .create_volume_if_needed(&app, &volume.name) + .await?; + } + } + // Case 3: Disabling persistent data -> defer cleanup until after success + // (old volumes will be cleaned up after successful store save to prevent data loss) + // Case 4: Name changed but NO persistent data -> just ensure new volumes exist if needed + else if name_changed && request.metadata.persist_data { + for volume in new_volumes { + docker_service + .create_volume_if_needed(&app, &volume.name) + .await?; + } + } + + // Build Docker command from generic args + let docker_args = + docker_service.build_docker_command_from_args(&request.name, &request.docker_args); + + // Execute Docker run command + let real_container_id = match docker_service.run_container(&app, &docker_args).await { + Ok(container_id) => container_id, + Err(error) => { + // Cleanup resources on error + let _ = docker_service + .force_remove_container_by_name(&app, &request.name) + .await; + + // Cleanup new volumes if they were created + // Note: If volume migration occurred, the old volume still exists with original data + for volume in new_volumes { + let _ = docker_service + .remove_volume_if_exists(&app, &volume.name) + .await; + } + + // If migration occurred, note that old volume is preserved with original data + // User can retry the update operation without data loss + + // Check if it's a port already in use error + if error.contains("port is already allocated") || error.contains("Bind for") { + let port_error = CreateContainerError { + error_type: "PORT_IN_USE".to_string(), + message: format!("Port {} is already in use", request.metadata.port), + port: Some(request.metadata.port), + details: Some( + "You can change the port in the configuration and try again." + .to_string(), + ), + }; + return Err(serde_json::to_string(&port_error) + .unwrap_or_else(|_| "Port in use error".to_string())); + } + + // Check if it's a container name already exists error + if error.contains("name is already in use") || error.contains("already exists") { + let name_error = CreateContainerError { + error_type: "NAME_IN_USE".to_string(), + message: format!( + "A container with the name '{}' already exists", + request.name + ), + port: None, + details: Some("Change the container name and try again.".to_string()), + }; + return Err(serde_json::to_string(&name_error) + .unwrap_or_else(|_| "Name in use error".to_string())); + } + + // Generic Docker error + let generic_error = CreateContainerError { + error_type: "DOCKER_ERROR".to_string(), + message: "Error updating container".to_string(), + port: None, + details: Some(error.to_string()), + }; + return Err(serde_json::to_string(&generic_error) + .unwrap_or_else(|_| format!("Docker command failed: {}", error))); + } + }; + + // Update container info with new values + container.name = request.name.clone(); + container.port = request.metadata.port; + container.version = request.metadata.version; + container.container_id = Some(real_container_id); + container.status = "running".to_string(); + container.stored_persist_data = request.metadata.persist_data; + container.stored_enable_auth = request.metadata.enable_auth; + + // Only update password if a non-empty value is provided + if !request.metadata.password.is_empty() { + container.stored_password = Some(request.metadata.password.clone()); + } + + container.stored_username = request.metadata.username; + container.stored_database_name = request.metadata.database_name; + + if let Some(max_conn) = request.metadata.max_connections { + container.max_connections = max_conn; + } + } else { + // For non-recreating changes, just update the metadata + // (currently only max_connections would fall here) + if let Some(max_conn) = request.metadata.max_connections { + container.max_connections = max_conn; + } + } + + // Update in memory store + { + let mut db_map = databases.lock().unwrap(); + db_map.insert(container.id.clone(), container.clone()); + } + + // Save to persistent store + let db_map = { + let map = databases.lock().unwrap(); + map.clone() + }; + + // If saving to store fails, rollback the changes (align with create_container behavior) + if let Err(store_error) = storage_service.save_databases_to_store(&app, &db_map).await { + // Remove from memory store + databases.lock().unwrap().remove(&container_id); + + // Cleanup new Docker resources if container was recreated + if needs_recreation { + if let Some(new_id) = &container.container_id { + let _ = docker_service.remove_container(&app, new_id).await; + } + + // Cleanup new volumes + for volume in &request.docker_args.volumes { + let _ = docker_service + .remove_volume_if_exists(&app, &volume.name) + .await; + } + } + + return Err(format!("Error saving configuration: {}", store_error)); + } + + // After successfully saving to store, cleanup old volume if migration occurred + if name_changed && container.stored_persist_data && request.metadata.persist_data { + let old_volume_name = format!("{}-data", previous_name); + let _ = docker_service + .remove_volume_if_exists(&app, &old_volume_name) + .await; + } + + // Cleanup old volumes if persistent data was disabled (deferred to prevent data loss on error) + if should_cleanup_old_volumes { + for old_volume in &old_volumes { + let _ = docker_service + .remove_volume_if_exists(&app, old_volume) + .await; + } + } + + Ok(container) +} + #[tauri::command] pub async fn get_all_databases( app: AppHandle, @@ -312,161 +550,3 @@ pub async fn remove_container( Ok(()) } - -#[tauri::command] -pub async fn update_container_config( - request: UpdateContainerRequest, - app: AppHandle, - databases: State<'_, DatabaseStore>, -) -> Result { - let docker_service = DockerService::new(); - let storage_service = StorageService::new(); - - // Store values we need to check later - let password_provided = request.password.is_some(); - let username_provided = request.username.is_some(); - let database_name_provided = request.database_name.is_some(); - - // Get current container info - let mut container = { - let db_map = databases.lock().unwrap(); - db_map - .get(&request.container_id) - .cloned() - .ok_or("Container not found")? - }; - - // Determine if we need to recreate the container - let needs_recreation = request.port.is_some() && request.port != Some(container.port) - || request.name.is_some() && request.name != Some(container.name.clone()) - || request.persist_data.is_some(); - - if needs_recreation { - // Remove old container - if let Some(old_id) = &container.container_id { - docker_service.remove_container(&app, old_id).await?; - } - - // Create new container request with updated values - let new_name = request.name.unwrap_or(container.name.clone()); - let new_port = request.port.unwrap_or(container.port); - let persist_data = request - .persist_data - .unwrap_or(container.stored_persist_data); - let enable_auth = request.enable_auth.unwrap_or(container.stored_enable_auth); - - let password = request.password.unwrap_or_else(|| { - container - .stored_password - .clone() - .unwrap_or_else(|| "password".to_string()) - }); - let username = request - .username - .or_else(|| container.stored_username.clone()); - let database_name = request - .database_name - .or_else(|| container.stored_database_name.clone()); - - let create_request = CreateDatabaseRequest { - name: new_name.clone(), - db_type: container.db_type.clone(), - version: container.version.clone(), - port: new_port, - username: username.clone(), - password: password.clone(), - database_name: database_name.clone(), - persist_data, - enable_auth, - max_connections: request.max_connections.or(Some(container.max_connections)), - postgres_settings: None, - mysql_settings: None, - redis_settings: None, - mongo_settings: None, - }; - - // Handle volume migration if needed - let volume_name = if persist_data { - let old_volume_name = format!("{}-data", container.name); - let new_volume_name = format!("{}-data", new_name); - - // If the container name is changing and we have persistent data, - // we need to migrate the volume data - if container.name != new_name && container.stored_persist_data { - let data_path = docker_service.get_data_path(&container.db_type); - docker_service - .migrate_volume_data(&app, &old_volume_name, &new_volume_name, data_path) - .await?; - - // Remove old volume after successful migration - docker_service - .remove_volume_if_exists(&app, &old_volume_name) - .await?; - } else { - // Just create the new volume if needed - docker_service - .create_volume_if_needed(&app, &new_volume_name) - .await?; - } - - Some(new_volume_name) - } else { - // If we're changing from persistent to non-persistent, clean up old volume - if container.stored_persist_data && container.name != new_name { - let old_volume_name = format!("{}-data", container.name); - docker_service - .remove_volume_if_exists(&app, &old_volume_name) - .await?; - } - None - }; - - // Build and run Docker command - let docker_args = docker_service.build_docker_command(&create_request, &volume_name)?; - let real_container_id = docker_service.run_container(&app, &docker_args).await?; - - // Update container info - container.name = new_name; - container.port = new_port; - container.container_id = Some(real_container_id); - container.status = "running".to_string(); - container.stored_persist_data = persist_data; - container.stored_enable_auth = enable_auth; - - if password_provided { - container.stored_password = Some(password); - } - if username_provided { - container.stored_username = username; - } - if database_name_provided { - container.stored_database_name = database_name; - } - - if let Some(max_conn) = request.max_connections { - container.max_connections = max_conn; - } - } else { - // For non-recreating changes, just update the metadata - if let Some(max_conn) = request.max_connections { - container.max_connections = max_conn; - } - } - - // Update in memory store - { - let mut db_map = databases.lock().unwrap(); - db_map.insert(container.id.clone(), container.clone()); - } - - // Save to persistent store - let db_map = { - let map = databases.lock().unwrap(); - map.clone() - }; - storage_service - .save_databases_to_store(&app, &db_map) - .await?; - - Ok(container) -} diff --git a/src-tauri/src/lib.rs b/src-tauri/src/lib.rs index 34852ae..07f7a52 100644 --- a/src-tauri/src/lib.rs +++ b/src-tauri/src/lib.rs @@ -15,14 +15,13 @@ pub fn run() { .plugin(tauri_plugin_store::Builder::default().build()) .manage(DatabaseStore::default()) .invoke_handler(tauri::generate_handler![ - greet, get_app_version, - create_database_container, + create_container_from_docker_args, + update_container_from_docker_args, get_all_databases, start_container, stop_container, remove_container, - update_container_config, get_docker_status, sync_containers_with_docker, open_container_creation_window, diff --git a/src-tauri/src/services/docker.rs b/src-tauri/src/services/docker.rs index 74eba88..6fe70e0 100644 --- a/src-tauri/src/services/docker.rs +++ b/src-tauri/src/services/docker.rs @@ -62,166 +62,47 @@ impl DockerService { std::env::var("PATH").unwrap_or_else(|_| String::new()) } - pub fn build_docker_command( + /// Build Docker command from generic DockerRunArgs + /// This method is database-agnostic and doesn't need to know about specific database types + pub fn build_docker_command_from_args( &self, - request: &CreateDatabaseRequest, - volume_name: &Option, - ) -> Result, String> { + container_name: &str, + docker_args: &DockerRunArgs, + ) -> Vec { let mut args = vec![ "run".to_string(), "-d".to_string(), "--name".to_string(), - request.name.clone(), - "-p".to_string(), - format!( - "{}:{}", - request.port, - self.get_default_port(&request.db_type) - ), + container_name.to_string(), ]; - // Add volume if persist_data is true - if let Some(vol_name) = volume_name { - args.push("-v".to_string()); - args.push(format!( - "{}:{}", - vol_name, - self.get_data_path(&request.db_type) - )); + // Add port mappings + for port in &docker_args.ports { + args.push("-p".to_string()); + args.push(format!("{}:{}", port.host, port.container)); } - // Add environment variables based on database type - match request.db_type.as_str() { - "PostgreSQL" => { - args.push("-e".to_string()); - args.push(format!("POSTGRES_PASSWORD={}", request.password)); - - if let Some(username) = &request.username { - if username != "postgres" { - args.push("-e".to_string()); - args.push(format!("POSTGRES_USER={}", username)); - } - } - - if let Some(db_name) = &request.database_name { - if db_name != "postgres" { - args.push("-e".to_string()); - args.push(format!("POSTGRES_DB={}", db_name)); - } - } - - if let Some(pg_settings) = &request.postgres_settings { - if !pg_settings.host_auth_method.is_empty() { - args.push("-e".to_string()); - args.push(format!( - "POSTGRES_HOST_AUTH_METHOD={}", - pg_settings.host_auth_method - )); - } - - if let Some(initdb_args) = &pg_settings.initdb_args { - if !initdb_args.is_empty() { - args.push("-e".to_string()); - args.push(format!("POSTGRES_INITDB_ARGS={}", initdb_args)); - } - } - } - - args.push(format!("postgres:{}", request.version)); - } - "MySQL" => { - args.push("-e".to_string()); - args.push(format!("MYSQL_ROOT_PASSWORD={}", request.password)); - - if let Some(db_name) = &request.database_name { - args.push("-e".to_string()); - args.push(format!("MYSQL_DATABASE={}", db_name)); - } - - if let Some(mysql_settings) = &request.mysql_settings { - args.push("-e".to_string()); - args.push(format!( - "MYSQL_CHARACTER_SET_SERVER={}", - mysql_settings.character_set - )); - - args.push("-e".to_string()); - args.push(format!( - "MYSQL_COLLATION_SERVER={}", - mysql_settings.collation - )); - } - - args.push(format!("mysql:{}", request.version)); - } - "Redis" => { - args.push(format!("redis:{}", request.version)); - - if request.enable_auth || request.redis_settings.is_some() { - args.push("redis-server".to_string()); - - if request.enable_auth { - args.push("--requirepass".to_string()); - args.push(request.password.clone()); - } - - if let Some(redis_settings) = &request.redis_settings { - if !redis_settings.max_memory.is_empty() { - args.push("--maxmemory".to_string()); - args.push(redis_settings.max_memory.clone()); - } - - args.push("--maxmemory-policy".to_string()); - args.push(redis_settings.max_memory_policy.clone()); - - if redis_settings.append_only { - args.push("--appendonly".to_string()); - args.push("yes".to_string()); - } - } - } - } - "MongoDB" => { - args.push("-e".to_string()); - args.push(format!( - "MONGO_INITDB_ROOT_USERNAME={}", - request.username.as_ref().unwrap_or(&"admin".to_string()) - )); - - args.push("-e".to_string()); - args.push(format!("MONGO_INITDB_ROOT_PASSWORD={}", request.password)); - - if let Some(db_name) = &request.database_name { - args.push("-e".to_string()); - args.push(format!("MONGO_INITDB_DATABASE={}", db_name)); - } + // Add volume mounts + for volume in &docker_args.volumes { + args.push("-v".to_string()); + args.push(format!("{}:{}", volume.name, volume.path)); + } - args.push(format!("mongo:{}", request.version)); - } - _ => return Err("Unsupported database type".to_string()), + // Add environment variables + for (key, value) in &docker_args.env_vars { + args.push("-e".to_string()); + args.push(format!("{}={}", key, value)); } - Ok(args) - } + // Add image + args.push(docker_args.image.clone()); - pub fn get_default_port(&self, db_type: &str) -> i32 { - match db_type { - "PostgreSQL" => 5432, - "MySQL" => 3306, - "Redis" => 6379, - "MongoDB" => 27017, - _ => 5432, + // Add additional command arguments (e.g., for Redis) + if !docker_args.command.is_empty() { + args.extend(docker_args.command.clone()); } - } - pub fn get_data_path(&self, db_type: &str) -> &str { - match db_type { - "PostgreSQL" => "/var/lib/postgresql/data", - "MySQL" => "/var/lib/mysql", - "Redis" => "/data", - "MongoDB" => "/data/db", - _ => "/data", - } + args } pub async fn check_docker_status(&self, app: &AppHandle) -> Result { diff --git a/src-tauri/src/types/docker.rs b/src-tauri/src/types/docker.rs new file mode 100644 index 0000000..bdf87e9 --- /dev/null +++ b/src-tauri/src/types/docker.rs @@ -0,0 +1,56 @@ +use serde::{Deserialize, Serialize}; +use std::collections::HashMap; + +/// Port mapping for Docker containers +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct PortMapping { + pub host: i32, + pub container: i32, +} + +/// Volume mount configuration +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct VolumeMount { + pub name: String, + pub path: String, +} + +/// Generic Docker run arguments (database-agnostic) +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct DockerRunArgs { + pub image: String, + #[serde(rename = "envVars")] + pub env_vars: HashMap, + pub ports: Vec, + pub volumes: Vec, + pub command: Vec, +} + +/// Container metadata (for storage and tracking) +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct ContainerMetadata { + pub id: String, + #[serde(rename = "dbType")] + pub db_type: String, + pub version: String, + pub port: i32, + pub username: Option, + pub password: String, + #[serde(rename = "databaseName")] + pub database_name: Option, + #[serde(rename = "persistData")] + pub persist_data: bool, + #[serde(rename = "enableAuth")] + pub enable_auth: bool, + #[serde(rename = "maxConnections")] + pub max_connections: Option, +} + +/// Complete Docker run request from frontend +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct DockerRunRequest { + pub name: String, + #[serde(rename = "dockerArgs")] + pub docker_args: DockerRunArgs, + pub metadata: ContainerMetadata, +} diff --git a/src-tauri/src/types/mod.rs b/src-tauri/src/types/mod.rs index 2fcccc8..27ae4bd 100644 --- a/src-tauri/src/types/mod.rs +++ b/src-tauri/src/types/mod.rs @@ -1,8 +1,7 @@ pub mod database; +pub mod docker; pub mod errors; -pub mod requests; -pub mod settings; pub use database::*; +pub use docker::*; pub use errors::*; -pub use requests::*; diff --git a/src-tauri/src/types/requests.rs b/src-tauri/src/types/requests.rs deleted file mode 100644 index 1f2e6e3..0000000 --- a/src-tauri/src/types/requests.rs +++ /dev/null @@ -1,35 +0,0 @@ -use super::settings::*; -use serde::{Deserialize, Serialize}; - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct CreateDatabaseRequest { - pub name: String, - pub db_type: String, - pub version: String, - pub port: i32, - pub username: Option, - pub password: String, - pub database_name: Option, - pub persist_data: bool, - pub enable_auth: bool, - pub max_connections: Option, - pub postgres_settings: Option, - pub mysql_settings: Option, - pub redis_settings: Option, - pub mongo_settings: Option, -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct UpdateContainerRequest { - pub container_id: String, - pub name: Option, - pub port: Option, - pub username: Option, - pub password: Option, - pub database_name: Option, - pub max_connections: Option, - pub enable_auth: Option, - pub persist_data: Option, - pub restart_policy: Option, - pub auto_start: Option, -} diff --git a/src-tauri/src/types/settings.rs b/src-tauri/src/types/settings.rs deleted file mode 100644 index 2bf10c7..0000000 --- a/src-tauri/src/types/settings.rs +++ /dev/null @@ -1,31 +0,0 @@ -use serde::{Deserialize, Serialize}; - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct PostgresSettings { - pub initdb_args: Option, - pub host_auth_method: String, - pub shared_preload_libraries: Option, -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct MysqlSettings { - pub root_host: String, - pub character_set: String, - pub collation: String, - pub sql_mode: String, -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct RedisSettings { - pub max_memory: String, - pub max_memory_policy: String, - pub append_only: bool, - pub require_pass: bool, -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct MongoSettings { - pub auth_source: String, - pub enable_sharding: bool, - pub oplog_size: String, -} diff --git a/src-tauri/tests/integration/container_update_test.rs b/src-tauri/tests/integration/container_update_test.rs deleted file mode 100644 index 3de5575..0000000 --- a/src-tauri/tests/integration/container_update_test.rs +++ /dev/null @@ -1,336 +0,0 @@ -use docker_db_manager_lib::types::*; - -/// Integration tests for container updates -/// -/// These tests verify the complete container update flow, -/// especially the correct handling of volumes when the name changes. -mod container_update_integration_tests { - use super::*; - - /// Helper to create a test DatabaseContainer - fn create_test_container(name: &str, persistent: bool) -> DatabaseContainer { - DatabaseContainer { - id: uuid::Uuid::new_v4().to_string(), - name: name.to_string(), - db_type: "PostgreSQL".to_string(), - version: "15".to_string(), - status: "running".to_string(), - port: 5432, - created_at: chrono::Utc::now().format("%Y-%m-%d").to_string(), - max_connections: 100, - container_id: Some(format!("docker-{}", name)), - stored_password: Some("testpass".to_string()), - stored_username: Some("testuser".to_string()), - stored_database_name: Some("testdb".to_string()), - stored_persist_data: persistent, - stored_enable_auth: true, - } - } - - #[tokio::test] - async fn should_detect_recreation_needed_for_name_change() { - // Arrange - let original_container = create_test_container("postgres-old", true); - let container_id = original_container.id.clone(); - - let update_request = UpdateContainerRequest { - container_id: container_id.clone(), - name: Some("postgres-new".to_string()), - port: None, - username: None, - password: None, - database_name: None, - max_connections: None, - enable_auth: None, - persist_data: None, - restart_policy: None, - auto_start: None, - }; - - // Act - let needs_recreation = update_request.name.is_some() - && update_request.name != Some(original_container.name.clone()); - - // Assert - assert!( - needs_recreation, - "Should detect that recreation is needed when name changes" - ); - } - - #[tokio::test] - async fn should_detect_recreation_needed_for_port_change() { - // Arrange - let original_container = create_test_container("postgres-test", true); - let container_id = original_container.id.clone(); - - let update_request = UpdateContainerRequest { - container_id: container_id.clone(), - name: None, - port: Some(5433), - username: None, - password: None, - database_name: None, - max_connections: None, - enable_auth: None, - persist_data: None, - restart_policy: None, - auto_start: None, - }; - - // Act - let needs_recreation = - update_request.port.is_some() && update_request.port != Some(original_container.port); - - // Assert - assert!( - needs_recreation, - "Should detect that recreation is needed when port changes" - ); - } - - #[tokio::test] - async fn should_not_recreate_when_only_metadata_changes() { - // Arrange - let original_container = create_test_container("postgres-test", true); - let container_id = original_container.id.clone(); - - let update_request = UpdateContainerRequest { - container_id: container_id.clone(), - name: None, - port: None, - username: Some("newuser".to_string()), - password: Some("newpass".to_string()), - database_name: Some("newdb".to_string()), - max_connections: Some(200), - enable_auth: None, - persist_data: None, - restart_policy: None, - auto_start: None, - }; - - // Act - let needs_recreation = update_request.port.is_some() - && update_request.port != Some(original_container.port) - || update_request.name.is_some() - && update_request.name != Some(original_container.name.clone()) - || update_request.persist_data.is_some(); - - // Assert - assert!( - !needs_recreation, - "Should not recreate when only metadata changes (username, password, etc.)" - ); - } - - #[tokio::test] - async fn should_preserve_unchanged_data_in_update() { - // Arrange - let original_container = create_test_container("postgres-test", true); - - let update_request = UpdateContainerRequest { - container_id: original_container.id.clone(), - name: Some("postgres-new".to_string()), - port: None, - username: None, - password: None, - database_name: None, - max_connections: None, - enable_auth: None, - persist_data: None, - restart_policy: None, - auto_start: None, - }; - - // Act - Simulate update logic - let new_name = update_request - .name - .unwrap_or(original_container.name.clone()); - let new_port = update_request.port.unwrap_or(original_container.port); - let persist_data = update_request - .persist_data - .unwrap_or(original_container.stored_persist_data); - let enable_auth = update_request - .enable_auth - .unwrap_or(original_container.stored_enable_auth); - - // Assert - assert_eq!(new_name, "postgres-new"); - assert_eq!(new_port, 5432); // Should keep original port - assert_eq!(persist_data, true); // Should keep original persistence - assert_eq!(enable_auth, true); // Should keep original auth - } - - #[tokio::test] - async fn should_generate_correct_volume_names_for_migration() { - // Arrange - let old_name = "postgres-old"; - let new_name = "postgres-new"; - - // Act - let old_volume_name = format!("{}-data", old_name); - let new_volume_name = format!("{}-data", new_name); - - // Assert - assert_eq!(old_volume_name, "postgres-old-data"); - assert_eq!(new_volume_name, "postgres-new-data"); - assert_ne!( - old_volume_name, new_volume_name, - "Volume names should be different when container name changes" - ); - } - - #[tokio::test] - async fn should_handle_persistence_change_correctly() { - // Arrange - let original_container = create_test_container("postgres-test", true); - - // Change from persistent to non-persistent - let update_request = UpdateContainerRequest { - container_id: original_container.id.clone(), - name: Some("postgres-new".to_string()), - port: None, - username: None, - password: None, - database_name: None, - max_connections: None, - enable_auth: None, - persist_data: Some(false), - restart_policy: None, - auto_start: None, - }; - - // Act - let old_persistent = original_container.stored_persist_data; - let new_persistent = update_request.persist_data.unwrap_or(old_persistent); - let name_changed = update_request.name.is_some() - && update_request.name != Some(original_container.name.clone()); - - let should_cleanup_old = old_persistent && !new_persistent && name_changed; - - // Assert - assert!( - should_cleanup_old, - "Should cleanup old volume when changing from persistent to non-persistent" - ); - } - - #[tokio::test] - async fn should_validate_create_request_structure_for_recreation() { - // Arrange - let original_container = create_test_container("postgres-old", true); - let new_name = "postgres-new"; - let new_port = 5433; - - // Act - Simulate CreateDatabaseRequest creation - let create_request = CreateDatabaseRequest { - name: new_name.to_string(), - db_type: original_container.db_type.clone(), - version: original_container.version.clone(), - port: new_port, - username: original_container.stored_username.clone(), - password: original_container - .stored_password - .clone() - .unwrap_or_else(|| "password".to_string()), - database_name: original_container.stored_database_name.clone(), - persist_data: original_container.stored_persist_data, - enable_auth: original_container.stored_enable_auth, - max_connections: Some(original_container.max_connections), - postgres_settings: None, - mysql_settings: None, - redis_settings: None, - mongo_settings: None, - }; - - // Assert - assert_eq!(create_request.name, new_name); - assert_eq!(create_request.db_type, "PostgreSQL"); - assert_eq!(create_request.version, "15"); - assert_eq!(create_request.port, new_port); - assert_eq!(create_request.username, Some("testuser".to_string())); - assert_eq!(create_request.password, "testpass"); - assert_eq!(create_request.database_name, Some("testdb".to_string())); - assert_eq!(create_request.persist_data, true); - assert_eq!(create_request.enable_auth, true); - assert_eq!(create_request.max_connections, Some(100)); - } - - /// Specific tests for container and volume removal - mod container_removal_tests { - use super::*; - - #[tokio::test] - async fn should_remove_volume_when_persistent_container_is_removed() { - // Arrange - let container = create_test_container("postgres-persistent", true); - - // Act - Simulate removal logic - let should_remove_volume = container.stored_persist_data; - let expected_volume_name = format!("{}-data", container.name); - - // Assert - assert!( - should_remove_volume, - "Should remove volume when removing a container with persistent data" - ); - assert_eq!(expected_volume_name, "postgres-persistent-data"); - } - - #[tokio::test] - async fn should_not_remove_volume_when_non_persistent_container_is_removed() { - // Arrange - let container = create_test_container("postgres-temp", false); - - // Act - Simulate removal logic - let should_remove_volume = container.stored_persist_data; - - // Assert - assert!( - !should_remove_volume, - "Should not remove volume when removing a container without persistent data" - ); - } - - #[tokio::test] - async fn should_generate_correct_volume_name_for_removal() { - // Arrange - let container_names = vec![ - ("postgres-db", "postgres-db-data"), - ("mi_contenedor", "mi_contenedor-data"), - ("test-123", "test-123-data"), - ("redis-cache", "redis-cache-data"), - ]; - - for (container_name, expected_volume) in container_names { - // Act - let volume_name = format!("{}-data", container_name); - - // Assert - assert_eq!( - volume_name, expected_volume, - "Volume name should follow the correct pattern for {}", - container_name - ); - } - } - - #[tokio::test] - async fn should_validate_complete_container_removal_logic() { - // Arrange - let persistent_container = create_test_container("postgres-persistent", true); - let temp_container = create_test_container("postgres-temp", false); - - // Act & Assert - Persistent container - assert!(persistent_container.stored_persist_data); - assert_eq!(persistent_container.name, "postgres-persistent"); - - let persistent_volume = format!("{}-data", persistent_container.name); - assert_eq!(persistent_volume, "postgres-persistent-data"); - - // Act & Assert - Temporary container - assert!(!temp_container.stored_persist_data); - assert_eq!(temp_container.name, "postgres-temp"); - } - } -} diff --git a/src-tauri/tests/integration/mongodb_integration_test.rs b/src-tauri/tests/integration/mongodb_integration_test.rs new file mode 100644 index 0000000..1cc5ff6 --- /dev/null +++ b/src-tauri/tests/integration/mongodb_integration_test.rs @@ -0,0 +1,305 @@ +use docker_db_manager_lib::services::DockerService; +use docker_db_manager_lib::types::{ + ContainerMetadata, DockerRunArgs, DockerRunRequest, PortMapping, VolumeMount, +}; +use std::collections::HashMap; + +mod utils; +use utils::*; + +/// Integration tests specific to MongoDB +/// +/// These tests verify that MongoDB functionality works correctly +/// with real Docker, including container creation, configuration, and cleanup. + +#[tokio::test] +async fn test_create_basic_mongodb_container() { + if !docker_available() { + println!("⚠️ Docker is not available, skipping MongoDB test"); + return; + } + + let container_name = "test-mongo-basic-integration"; + + // Initial cleanup + clean_container(container_name).await; + + let service = DockerService::new(); + + let mut env_vars = HashMap::new(); + env_vars.insert( + "MONGO_INITDB_ROOT_USERNAME".to_string(), + "admin".to_string(), + ); + env_vars.insert( + "MONGO_INITDB_ROOT_PASSWORD".to_string(), + "mongopass123".to_string(), + ); + env_vars.insert("MONGO_INITDB_DATABASE".to_string(), "testdb".to_string()); + + let request = DockerRunRequest { + name: container_name.to_string(), + docker_args: DockerRunArgs { + image: "mongo:7".to_string(), + env_vars, + ports: vec![PortMapping { + host: 27018, + container: 27017, + }], + volumes: vec![], + command: vec![], + }, + metadata: ContainerMetadata { + id: uuid::Uuid::new_v4().to_string(), + db_type: "MongoDB".to_string(), + version: "7".to_string(), + port: 27018, + username: Some("admin".to_string()), + password: "mongopass123".to_string(), + database_name: Some("testdb".to_string()), + persist_data: false, + enable_auth: true, + max_connections: Some(1000), + }, + }; + + let command = service.build_docker_command_from_args(&request.name, &request.docker_args); + println!("🐳 MongoDB command generated: {:?}", command); + + // Verify MongoDB-specific elements + assert!( + command.contains(&"mongo:7".to_string()), + "Should use correct MongoDB image" + ); + assert!( + command.contains(&"27018:27017".to_string()), + "Should map MongoDB port correctly" + ); + assert!( + command.contains(&"MONGO_INITDB_ROOT_USERNAME=admin".to_string()), + "Should include MongoDB username" + ); + assert!( + command.contains(&"MONGO_INITDB_ROOT_PASSWORD=mongopass123".to_string()), + "Should include MongoDB password" + ); + + let container_id = run_docker_command(command).await; + + if let Err(e) = container_id { + clean_container(container_name).await; + panic!("Docker failed to create MongoDB container: {}", e); + } + + println!( + "✅ MongoDB container created with ID: {}", + container_id.unwrap() + ); + + // Wait for MongoDB to be ready + assert!( + wait_for_container_ready(container_name, 15, 1).await, + "MongoDB container failed to start within timeout" + ); + + assert!( + container_exists(container_name).await, + "MongoDB container should exist" + ); + + if let Some(status) = get_container_status(container_name).await { + println!("📊 MongoDB container status: {}", status); + } + + // Cleanup + clean_container(container_name).await; + + println!("✅ Basic MongoDB test completed successfully"); +} + +#[tokio::test] +async fn test_create_mongodb_container_with_volume() { + if !docker_available() { + println!("⚠️ Docker is not available, skipping MongoDB volume test"); + return; + } + + let container_name = "test-mongo-volume-integration"; + let volume_name = format!("{}-data", container_name); + + // Initial cleanup + clean_container(container_name).await; + clean_volume(&volume_name).await; + + let service = DockerService::new(); + + let mut env_vars = HashMap::new(); + env_vars.insert( + "MONGO_INITDB_ROOT_USERNAME".to_string(), + "admin".to_string(), + ); + env_vars.insert( + "MONGO_INITDB_ROOT_PASSWORD".to_string(), + "mongopass".to_string(), + ); + + let request = DockerRunRequest { + name: container_name.to_string(), + docker_args: DockerRunArgs { + image: "mongo:7".to_string(), + env_vars, + ports: vec![PortMapping { + host: 27019, + container: 27017, + }], + volumes: vec![VolumeMount { + name: volume_name.clone(), + path: "/data/db".to_string(), + }], + command: vec![], + }, + metadata: ContainerMetadata { + id: uuid::Uuid::new_v4().to_string(), + db_type: "MongoDB".to_string(), + version: "7".to_string(), + port: 27019, + username: Some("admin".to_string()), + password: "mongopass".to_string(), + database_name: None, + persist_data: true, + enable_auth: true, + max_connections: Some(1000), + }, + }; + + let command = service.build_docker_command_from_args(&request.name, &request.docker_args); + println!("🐳 MongoDB command with volume: {:?}", command); + + assert!( + command.contains(&"-v".to_string()), + "Should include volume flag" + ); + assert!( + command.contains(&format!("{}:/data/db", volume_name)), + "Should map MongoDB volume correctly" + ); + + if let Err(e) = create_volume(&volume_name).await { + println!("⚠️ Warning when creating volume: {}", e); + } + + let container_id = run_docker_command(command).await; + + if let Err(e) = container_id { + clean_container(container_name).await; + clean_volume(&volume_name).await; + panic!( + "Docker failed to create MongoDB container with volume: {}", + e + ); + } + + println!("✅ MongoDB container with volume created successfully"); + + // Wait for MongoDB to be ready + assert!( + wait_for_container_ready(container_name, 15, 1).await, + "MongoDB container with volume failed to start within timeout" + ); + + assert!( + container_exists(container_name).await, + "Container should exist" + ); + assert!(volume_exists(&volume_name).await, "Volume should exist"); + + // Cleanup + clean_container(container_name).await; + clean_volume(&volume_name).await; + + println!("✅ MongoDB volume test completed"); +} + +#[tokio::test] +async fn test_create_mongodb_container_without_auth() { + if !docker_available() { + println!("⚠️ Docker is not available, skipping MongoDB no-auth test"); + return; + } + + let container_name = "test-mongo-noauth-integration"; + + // Initial cleanup + clean_container(container_name).await; + + let service = DockerService::new(); + + let env_vars = HashMap::new(); // No auth env vars + + let request = DockerRunRequest { + name: container_name.to_string(), + docker_args: DockerRunArgs { + image: "mongo:7".to_string(), + env_vars, + ports: vec![PortMapping { + host: 27020, + container: 27017, + }], + volumes: vec![], + command: vec![], + }, + metadata: ContainerMetadata { + id: uuid::Uuid::new_v4().to_string(), + db_type: "MongoDB".to_string(), + version: "7".to_string(), + port: 27020, + username: None, + password: String::new(), + database_name: None, + persist_data: false, + enable_auth: false, + max_connections: Some(1000), + }, + }; + + let command = service.build_docker_command_from_args(&request.name, &request.docker_args); + println!("🐳 MongoDB command without auth: {:?}", command); + + // Verify no auth env vars + assert!( + !command.contains(&"MONGO_INITDB_ROOT_USERNAME".to_string()), + "Should not include username when auth is disabled" + ); + assert!( + !command.contains(&"MONGO_INITDB_ROOT_PASSWORD".to_string()), + "Should not include password when auth is disabled" + ); + + let container_id = run_docker_command(command).await; + + if let Err(e) = container_id { + clean_container(container_name).await; + panic!( + "Docker failed to create MongoDB container without auth: {}", + e + ); + } + + println!("✅ MongoDB container without auth created"); + + // Wait for MongoDB to be ready + assert!( + wait_for_container_ready(container_name, 15, 1).await, + "MongoDB container without auth failed to start within timeout" + ); + + assert!( + container_exists(container_name).await, + "Container should exist" + ); + + // Cleanup + clean_container(container_name).await; + + println!("✅ MongoDB no-auth test completed"); +} diff --git a/src-tauri/tests/integration/mysql_integration_test.rs b/src-tauri/tests/integration/mysql_integration_test.rs new file mode 100644 index 0000000..a15b2ef --- /dev/null +++ b/src-tauri/tests/integration/mysql_integration_test.rs @@ -0,0 +1,209 @@ +use docker_db_manager_lib::services::DockerService; +use docker_db_manager_lib::types::{ + ContainerMetadata, DockerRunArgs, DockerRunRequest, PortMapping, VolumeMount, +}; +use std::collections::HashMap; + +mod utils; +use utils::*; + +/// Integration tests specific to MySQL +/// +/// These tests verify that MySQL functionality works correctly +/// with real Docker, including container creation, configuration, and cleanup. + +#[tokio::test] +async fn test_create_basic_mysql_container() { + if !docker_available() { + println!("⚠️ Docker is not available, skipping MySQL test"); + return; + } + + let container_name = "test-mysql-basic-integration"; + + // Initial cleanup + clean_container(container_name).await; + + let service = DockerService::new(); + + let mut env_vars = HashMap::new(); + env_vars.insert("MYSQL_ROOT_PASSWORD".to_string(), "rootpass123".to_string()); + env_vars.insert("MYSQL_DATABASE".to_string(), "testdb".to_string()); + env_vars.insert("MYSQL_USER".to_string(), "testuser".to_string()); + env_vars.insert("MYSQL_PASSWORD".to_string(), "testpass123".to_string()); + + let request = DockerRunRequest { + name: container_name.to_string(), + docker_args: DockerRunArgs { + image: "mysql:8.0".to_string(), + env_vars, + ports: vec![PortMapping { + host: 3307, + container: 3306, + }], + volumes: vec![], + command: vec![], + }, + metadata: ContainerMetadata { + id: uuid::Uuid::new_v4().to_string(), + db_type: "MySQL".to_string(), + version: "8.0".to_string(), + port: 3307, + username: Some("testuser".to_string()), + password: "testpass123".to_string(), + database_name: Some("testdb".to_string()), + persist_data: false, + enable_auth: true, + max_connections: Some(150), + }, + }; + + let command = service.build_docker_command_from_args(&request.name, &request.docker_args); + println!("🐳 MySQL command generated: {:?}", command); + + // Verify MySQL-specific elements + assert!( + command.contains(&"mysql:8.0".to_string()), + "Should use correct MySQL image" + ); + assert!( + command.contains(&"3307:3306".to_string()), + "Should map MySQL port correctly" + ); + assert!( + command.contains(&"MYSQL_ROOT_PASSWORD=rootpass123".to_string()), + "Should include MySQL root password" + ); + assert!( + command.contains(&"MYSQL_DATABASE=testdb".to_string()), + "Should include MySQL database name" + ); + + let container_id = run_docker_command(command).await; + + if let Err(e) = container_id { + clean_container(container_name).await; + panic!("Docker failed to create MySQL container: {}", e); + } + + println!( + "✅ MySQL container created with ID: {}", + container_id.unwrap() + ); + + // Wait for MySQL to be ready + assert!( + wait_for_container_ready(container_name, 10, 1).await, + "MySQL container failed to start within timeout" + ); + + assert!( + container_exists(container_name).await, + "MySQL container should exist" + ); + + if let Some(status) = get_container_status(container_name).await { + println!("📊 MySQL container status: {}", status); + } + + // Cleanup + clean_container(container_name).await; + + println!("✅ Basic MySQL test completed successfully"); +} + +#[tokio::test] +async fn test_create_mysql_container_with_volume() { + if !docker_available() { + println!("⚠️ Docker is not available, skipping MySQL volume test"); + return; + } + + let container_name = "test-mysql-volume-integration"; + let volume_name = format!("{}-data", container_name); + + // Initial cleanup + clean_container(container_name).await; + clean_volume(&volume_name).await; + + let service = DockerService::new(); + + let mut env_vars = HashMap::new(); + env_vars.insert("MYSQL_ROOT_PASSWORD".to_string(), "rootpass".to_string()); + env_vars.insert("MYSQL_DATABASE".to_string(), "voldb".to_string()); + + let request = DockerRunRequest { + name: container_name.to_string(), + docker_args: DockerRunArgs { + image: "mysql:8.0".to_string(), + env_vars, + ports: vec![PortMapping { + host: 3308, + container: 3306, + }], + volumes: vec![VolumeMount { + name: volume_name.clone(), + path: "/var/lib/mysql".to_string(), + }], + command: vec![], + }, + metadata: ContainerMetadata { + id: uuid::Uuid::new_v4().to_string(), + db_type: "MySQL".to_string(), + version: "8.0".to_string(), + port: 3308, + username: Some("root".to_string()), + password: "rootpass".to_string(), + database_name: Some("voldb".to_string()), + persist_data: true, + enable_auth: true, + max_connections: Some(150), + }, + }; + + let command = service.build_docker_command_from_args(&request.name, &request.docker_args); + println!("🐳 MySQL command with volume: {:?}", command); + + assert!( + command.contains(&"-v".to_string()), + "Should include volume flag" + ); + assert!( + command.contains(&format!("{}:/var/lib/mysql", volume_name)), + "Should map MySQL volume correctly" + ); + + if let Err(e) = create_volume(&volume_name).await { + clean_container(container_name).await; + clean_volume(&volume_name).await; + panic!("Failed to create volume: {}", e); + } + + let container_id = run_docker_command(command).await; + + if let Err(e) = container_id { + clean_container(container_name).await; + clean_volume(&volume_name).await; + panic!("Docker failed to create MySQL container with volume: {}", e); + } + + println!("✅ MySQL container with volume created successfully"); + + // Wait for MySQL to be ready + assert!( + wait_for_container_ready(container_name, 10, 1).await, + "MySQL container with volume failed to start within timeout" + ); + + assert!( + container_exists(container_name).await, + "Container should exist" + ); + assert!(volume_exists(&volume_name).await, "Volume should exist"); + + // Cleanup + clean_container(container_name).await; + clean_volume(&volume_name).await; + + println!("✅ MySQL volume test completed"); +} diff --git a/src-tauri/tests/integration/postgresql_integration_test.rs b/src-tauri/tests/integration/postgresql_integration_test.rs index 86dd97c..d37ad54 100644 --- a/src-tauri/tests/integration/postgresql_integration_test.rs +++ b/src-tauri/tests/integration/postgresql_integration_test.rs @@ -1,6 +1,8 @@ use docker_db_manager_lib::services::DockerService; -use docker_db_manager_lib::types::CreateDatabaseRequest; -use std::process::Command; +use docker_db_manager_lib::types::{ + ContainerMetadata, DockerRunArgs, DockerRunRequest, PortMapping, VolumeMount, +}; +use std::collections::HashMap; mod utils; use utils::*; @@ -23,33 +25,42 @@ async fn test_create_basic_postgresql_container() { // Initial cleanup clean_container(container_name).await; - // Arrange - Basic PostgreSQL configuration + // Arrange - Basic PostgreSQL configuration using DockerRunRequest let service = DockerService::new(); - let request = CreateDatabaseRequest { + + let mut env_vars = HashMap::new(); + env_vars.insert("POSTGRES_PASSWORD".to_string(), "testpass123".to_string()); + env_vars.insert("POSTGRES_USER".to_string(), "testuser".to_string()); + env_vars.insert("POSTGRES_DB".to_string(), "testdb".to_string()); + + let request = DockerRunRequest { name: container_name.to_string(), - db_type: "PostgreSQL".to_string(), - version: "13-alpine".to_string(), - port: 5435, - persist_data: false, - username: Some("testuser".to_string()), - password: "testpass123".to_string(), - database_name: Some("testdb".to_string()), - enable_auth: true, - max_connections: Some(50), - postgres_settings: None, - mysql_settings: None, - redis_settings: None, - mongo_settings: None, + docker_args: DockerRunArgs { + image: "postgres:13-alpine".to_string(), + env_vars, + ports: vec![PortMapping { + host: 5435, + container: 5432, + }], + volumes: vec![], + command: vec![], + }, + metadata: ContainerMetadata { + id: uuid::Uuid::new_v4().to_string(), + db_type: "PostgreSQL".to_string(), + version: "13-alpine".to_string(), + port: 5435, + username: Some("testuser".to_string()), + password: "testpass123".to_string(), + database_name: Some("testdb".to_string()), + persist_data: false, + enable_auth: true, + max_connections: Some(50), + }, }; // Act - Build and execute command - let command_result = service.build_docker_command(&request, &None); - assert!( - command_result.is_ok(), - "DockerService should build valid PostgreSQL command" - ); - - let command = command_result.unwrap(); + let command = service.build_docker_command_from_args(&request.name, &request.docker_args); println!("🐳 PostgreSQL command generated: {:?}", command); // Verify PostgreSQL-specific elements @@ -75,31 +86,33 @@ async fn test_create_basic_postgresql_container() { ); // Execute Docker command - let output = Command::new("docker") - .args(&command) - .output() - .expect("Failed to execute PostgreSQL command"); + let container_id = run_docker_command(command).await; - if !output.status.success() { - let error = String::from_utf8_lossy(&output.stderr); + if let Err(e) = container_id { clean_container(container_name).await; - panic!("Docker failed to create PostgreSQL container: {}", error); + panic!("Docker failed to create PostgreSQL container: {}", e); } - let container_id = String::from_utf8_lossy(&output.stdout).trim().to_string(); - println!("✅ PostgreSQL container created with ID: {}", container_id); + println!( + "✅ PostgreSQL container created with ID: {}", + container_id.unwrap() + ); - // Verify that the container exists and is running - wait_for_container(3).await; + // Wait for PostgreSQL to be ready + assert!( + wait_for_container_ready(container_name, 10, 1).await, + "PostgreSQL container failed to start within timeout" + ); assert!( container_exists(container_name).await, "PostgreSQL container should exist" ); - // Verify status using utility + // Verify status if let Some(status) = get_container_status(container_name).await { println!("📊 PostgreSQL container status: {}", status); + assert!(status.contains("Up"), "Container should be running"); } // Cleanup @@ -127,31 +140,43 @@ async fn test_create_postgresql_container_with_volume() { clean_volume(&volume_name).await; let service = DockerService::new(); - let request = CreateDatabaseRequest { + + let mut env_vars = HashMap::new(); + env_vars.insert("POSTGRES_PASSWORD".to_string(), "volpass123".to_string()); + env_vars.insert("POSTGRES_USER".to_string(), "voluser".to_string()); + env_vars.insert("POSTGRES_DB".to_string(), "voldb".to_string()); + + let request = DockerRunRequest { name: container_name.to_string(), - db_type: "PostgreSQL".to_string(), - version: "13-alpine".to_string(), - port: 5436, - persist_data: true, // With persistence - username: Some("voluser".to_string()), - password: "volpass123".to_string(), - database_name: Some("voldb".to_string()), - enable_auth: true, - max_connections: Some(100), - postgres_settings: None, - mysql_settings: None, - redis_settings: None, - mongo_settings: None, + docker_args: DockerRunArgs { + image: "postgres:13-alpine".to_string(), + env_vars, + ports: vec![PortMapping { + host: 5436, + container: 5432, + }], + volumes: vec![VolumeMount { + name: volume_name.clone(), + path: "/var/lib/postgresql/data".to_string(), + }], + command: vec![], + }, + metadata: ContainerMetadata { + id: uuid::Uuid::new_v4().to_string(), + db_type: "PostgreSQL".to_string(), + version: "13-alpine".to_string(), + port: 5436, + username: Some("voluser".to_string()), + password: "volpass123".to_string(), + database_name: Some("voldb".to_string()), + persist_data: true, + enable_auth: true, + max_connections: Some(100), + }, }; // Build command with volume - let command_result = service.build_docker_command(&request, &Some(volume_name.clone())); - assert!( - command_result.is_ok(), - "Should build PostgreSQL command with volume" - ); - - let command = command_result.unwrap(); + let command = service.build_docker_command_from_args(&request.name, &request.docker_args); println!("🐳 PostgreSQL command with volume: {:?}", command); // Verify that it includes the volume @@ -164,32 +189,37 @@ async fn test_create_postgresql_container_with_volume() { "Should map PostgreSQL volume correctly" ); - // Create volume using utility + // Create volume if let Err(e) = create_volume(&volume_name).await { println!("⚠️ Warning when creating volume: {}", e); } // Execute command - let output = Command::new("docker") - .args(&command) - .output() - .expect("Failed to execute PostgreSQL command with volume"); + let container_id = run_docker_command(command).await; - if !output.status.success() { - let error = String::from_utf8_lossy(&output.stderr); + if let Err(e) = container_id { clean_container(container_name).await; - let _ = Command::new("docker") - .args(&["volume", "rm", &volume_name]) - .output(); + clean_volume(&volume_name).await; panic!( "Docker failed to create PostgreSQL container with volume: {}", - error + e ); } println!("✅ PostgreSQL container with volume created successfully"); - wait_for_container(2).await; + // Wait for PostgreSQL to be ready + assert!( + wait_for_container_ready(container_name, 10, 1).await, + "PostgreSQL container with volume failed to start within timeout" + ); + + // Verify container and volume exist + assert!( + container_exists(container_name).await, + "Container should exist" + ); + assert!(volume_exists(&volume_name).await, "Volume should exist"); // Cleanup clean_container(container_name).await; @@ -197,3 +227,137 @@ async fn test_create_postgresql_container_with_volume() { println!("✅ PostgreSQL test with volume completed"); } + +#[tokio::test] +async fn test_update_postgresql_port() { + if !docker_available() { + println!("⚠️ Docker is not available, skipping PostgreSQL port update test"); + return; + } + + let container_name = "test-postgres-port-update"; + let old_port = 5440; + let new_port = 5441; + + // Initial cleanup + clean_container(container_name).await; + + let service = DockerService::new(); + + // Create initial container + let mut env_vars = HashMap::new(); + env_vars.insert("POSTGRES_PASSWORD".to_string(), "testpass".to_string()); + env_vars.insert("POSTGRES_USER".to_string(), "testuser".to_string()); + env_vars.insert("POSTGRES_DB".to_string(), "testdb".to_string()); + + let initial_request = DockerRunRequest { + name: container_name.to_string(), + docker_args: DockerRunArgs { + image: "postgres:13-alpine".to_string(), + env_vars: env_vars.clone(), + ports: vec![PortMapping { + host: old_port, + container: 5432, + }], + volumes: vec![], + command: vec![], + }, + metadata: ContainerMetadata { + id: uuid::Uuid::new_v4().to_string(), + db_type: "PostgreSQL".to_string(), + version: "13-alpine".to_string(), + port: old_port, + username: Some("testuser".to_string()), + password: "testpass".to_string(), + database_name: Some("testdb".to_string()), + persist_data: false, + enable_auth: true, + max_connections: Some(100), + }, + }; + + let command = + service.build_docker_command_from_args(&initial_request.name, &initial_request.docker_args); + let result = run_docker_command(command).await; + + if let Err(e) = result { + clean_container(container_name).await; + panic!("Failed to create initial container: {}", e); + } + + // Wait for initial container to be ready + assert!( + wait_for_container_ready(container_name, 10, 1).await, + "Initial PostgreSQL container failed to start" + ); + + // Verify initial port + if let Some(ports) = get_container_port(container_name).await { + println!("📊 Initial ports: {}", ports); + assert!( + ports.contains(&old_port.to_string()), + "Should have old port mapping" + ); + } + + // Update: Remove old container and create with new port + clean_container(container_name).await; + + // Wait longer to ensure port is released + tokio::time::sleep(tokio::time::Duration::from_secs(2)).await; + + let updated_request = DockerRunRequest { + name: container_name.to_string(), + docker_args: DockerRunArgs { + image: "postgres:13-alpine".to_string(), + env_vars, + ports: vec![PortMapping { + host: new_port, + container: 5432, + }], + volumes: vec![], + command: vec![], + }, + metadata: ContainerMetadata { + id: uuid::Uuid::new_v4().to_string(), + db_type: "PostgreSQL".to_string(), + version: "13-alpine".to_string(), + port: new_port, + username: Some("testuser".to_string()), + password: "testpass".to_string(), + database_name: Some("testdb".to_string()), + persist_data: false, + enable_auth: true, + max_connections: Some(100), + }, + }; + + let new_command = + service.build_docker_command_from_args(&updated_request.name, &updated_request.docker_args); + let new_result = run_docker_command(new_command).await; + + if let Err(e) = new_result { + clean_container(container_name).await; + panic!("Failed to create updated container: {}", e); + } + + // Wait for updated container to be ready + assert!( + wait_for_container_ready(container_name, 10, 1).await, + "Updated PostgreSQL container failed to start" + ); + + // Verify new port + if let Some(ports) = get_container_port(container_name).await { + println!("📊 Updated ports: {}", ports); + assert!( + ports.contains(&new_port.to_string()), + "Should have new port mapping" + ); + } + + // Cleanup + clean_container(container_name).await; + + println!("✅ PostgreSQL port update test completed successfully"); +} diff --git a/src-tauri/tests/integration/redis_integration_test.rs b/src-tauri/tests/integration/redis_integration_test.rs index 8f3c862..c8bdd0f 100644 --- a/src-tauri/tests/integration/redis_integration_test.rs +++ b/src-tauri/tests/integration/redis_integration_test.rs @@ -1,6 +1,8 @@ use docker_db_manager_lib::services::DockerService; -use docker_db_manager_lib::types::{settings::RedisSettings, CreateDatabaseRequest}; -use std::process::Command; +use docker_db_manager_lib::types::{ + ContainerMetadata, DockerRunArgs, DockerRunRequest, PortMapping, VolumeMount, +}; +use std::collections::HashMap; mod utils; use utils::*; @@ -8,49 +10,52 @@ use utils::*; /// Integration tests specific to Redis /// /// These tests verify that Redis functionality works correctly -/// with real Docker, including creation with and without authentication. +/// with real Docker, including container creation, configuration, and cleanup. #[tokio::test] -async fn test_create_redis_container_without_auth() { - // Skip if Docker is not available +async fn test_create_basic_redis_container() { if !docker_available() { println!("⚠️ Docker is not available, skipping Redis test"); return; } - let container_name = "test-redis-no-auth-integration"; + let container_name = "test-redis-basic-integration"; // Initial cleanup clean_container(container_name).await; - // Arrange - Redis configuration without authentication let service = DockerService::new(); - let request = CreateDatabaseRequest { + + let env_vars = HashMap::new(); // Redis doesn't need env vars for basic setup + + let request = DockerRunRequest { name: container_name.to_string(), - db_type: "Redis".to_string(), - version: "7-alpine".to_string(), - port: 6381, - persist_data: false, - username: None, - password: "".to_string(), // No password - database_name: None, - enable_auth: false, // No authentication - max_connections: Some(100), - postgres_settings: None, - mysql_settings: None, - redis_settings: None, - mongo_settings: None, + docker_args: DockerRunArgs { + image: "redis:7-alpine".to_string(), + env_vars, + ports: vec![PortMapping { + host: 6380, + container: 6379, + }], + volumes: vec![], + command: vec![], + }, + metadata: ContainerMetadata { + id: uuid::Uuid::new_v4().to_string(), + db_type: "Redis".to_string(), + version: "7-alpine".to_string(), + port: 6380, + username: None, + password: String::new(), + database_name: None, + persist_data: false, + enable_auth: false, + max_connections: Some(10000), + }, }; - // Act - Build and execute command - let command_result = service.build_docker_command(&request, &None); - assert!( - command_result.is_ok(), - "DockerService should build valid Redis command" - ); - - let command = command_result.unwrap(); - println!("🐳 Redis command without auth generated: {:?}", command); + let command = service.build_docker_command_from_args(&request.name, &request.docker_args); + println!("🐳 Redis command generated: {:?}", command); // Verify Redis-specific elements assert!( @@ -58,69 +63,48 @@ async fn test_create_redis_container_without_auth() { "Should use correct Redis image" ); assert!( - command.contains(&"6381:6379".to_string()), + command.contains(&"6380:6379".to_string()), "Should map Redis port correctly" ); - // Without auth, should not contain requirepass - assert!( - !command.iter().any(|arg| arg.contains("requirepass")), - "Redis without auth should not have requirepass" - ); - // Execute Docker command - let output = Command::new("docker") - .args(&command) - .output() - .expect("Failed to execute Redis command"); + let container_id = run_docker_command(command).await; - if !output.status.success() { - let error = String::from_utf8_lossy(&output.stderr); + if let Err(e) = container_id { clean_container(container_name).await; - panic!("Docker failed to create Redis container: {}", error); + panic!("Docker failed to create Redis container: {}", e); } - let container_id = String::from_utf8_lossy(&output.stdout).trim().to_string(); - println!("✅ Redis container created with ID: {}", container_id); + println!( + "✅ Redis container created with ID: {}", + container_id.unwrap() + ); - // Verify that the container exists and is running - tokio::time::sleep(tokio::time::Duration::from_secs(2)).await; + // Wait for Redis to be ready + assert!( + wait_for_container_ready(container_name, 10, 1).await, + "Redis container failed to start within timeout" + ); assert!( container_exists(container_name).await, "Redis container should exist" ); - // Verify status - let status_output = Command::new("docker") - .args(&[ - "ps", - "--filter", - &format!("name={}", container_name), - "--format", - "{{.Status}}", - ]) - .output() - .expect("Failed to get Redis status"); - - let status = String::from_utf8_lossy(&status_output.stdout) - .trim() - .to_string(); - println!("📊 Redis container status: {}", status); + if let Some(status) = get_container_status(container_name).await { + println!("📊 Redis container status: {}", status); + assert!(status.contains("Up"), "Container should be running"); + } // Cleanup clean_container(container_name).await; - assert!( - !container_exists(container_name).await, - "Redis container should be deleted" - ); - println!("✅ Redis test without auth completed successfully"); + println!("✅ Basic Redis test completed successfully"); } #[tokio::test] async fn test_create_redis_container_with_auth() { if !docker_available() { - println!("⚠️ Docker is not available, skipping Redis test with auth"); + println!("⚠️ Docker is not available, skipping Redis auth test"); return; } @@ -129,190 +113,178 @@ async fn test_create_redis_container_with_auth() { // Initial cleanup clean_container(container_name).await; - // Arrange - Redis configuration with authentication let service = DockerService::new(); - let request = CreateDatabaseRequest { + + let env_vars = HashMap::new(); + + let request = DockerRunRequest { name: container_name.to_string(), - db_type: "Redis".to_string(), - version: "7-alpine".to_string(), - port: 6382, - persist_data: false, - username: None, - password: "redis_secure_pass_123".to_string(), - database_name: None, - enable_auth: true, // With authentication - max_connections: Some(200), - postgres_settings: None, - mysql_settings: None, - redis_settings: None, - mongo_settings: None, + docker_args: DockerRunArgs { + image: "redis:7-alpine".to_string(), + env_vars, + ports: vec![PortMapping { + host: 6381, + container: 6379, + }], + volumes: vec![], + command: vec![ + "redis-server".to_string(), + "--requirepass".to_string(), + "myredispass123".to_string(), + ], + }, + metadata: ContainerMetadata { + id: uuid::Uuid::new_v4().to_string(), + db_type: "Redis".to_string(), + version: "7-alpine".to_string(), + port: 6381, + username: None, + password: "myredispass123".to_string(), + database_name: None, + persist_data: false, + enable_auth: true, + max_connections: Some(10000), + }, }; - // Act - Build command with authentication - let command_result = service.build_docker_command(&request, &None); - assert!( - command_result.is_ok(), - "Should build Redis command with auth" - ); - - let command = command_result.unwrap(); + let command = service.build_docker_command_from_args(&request.name, &request.docker_args); println!("🐳 Redis command with auth: {:?}", command); - // Verify Redis-specific elements with auth - assert!( - command.contains(&"redis:7-alpine".to_string()), - "Should use correct Redis image" - ); - assert!( - command.contains(&"6382:6379".to_string()), - "Should map Redis port correctly" - ); - assert!( - command.contains(&"redis-server".to_string()), - "Should include redis-server for configuration" - ); + // Verify auth command assert!( command.contains(&"--requirepass".to_string()), - "Should include requirepass for auth" + "Should include requirepass flag" ); assert!( - command.contains(&"redis_secure_pass_123".to_string()), - "Should include the password" + command.contains(&"myredispass123".to_string()), + "Should include password" ); - // Execute command - let output = Command::new("docker") - .args(&command) - .output() - .expect("Failed to execute Redis command with auth"); + let container_id = run_docker_command(command).await; - if !output.status.success() { - let error = String::from_utf8_lossy(&output.stderr); + if let Err(e) = container_id { clean_container(container_name).await; - panic!( - "Docker failed to create Redis container with auth: {}", - error - ); + panic!("Docker failed to create Redis container with auth: {}", e); } - let container_id = String::from_utf8_lossy(&output.stdout).trim().to_string(); - println!( - "✅ Redis container with auth created with ID: {}", - container_id + println!("✅ Redis container with auth created"); + + // Wait for Redis to be ready + assert!( + wait_for_container_ready(container_name, 10, 1).await, + "Redis container with auth failed to start within timeout" ); - // Verify functionality - tokio::time::sleep(tokio::time::Duration::from_secs(2)).await; assert!( container_exists(container_name).await, - "Redis container with auth should exist" + "Container should exist" ); // Cleanup clean_container(container_name).await; - assert!( - !container_exists(container_name).await, - "Redis container should be deleted" - ); - println!("✅ Redis test with auth completed successfully"); + println!("✅ Redis auth test completed"); } #[tokio::test] -async fn test_create_redis_container_with_advanced_configuration() { +async fn test_create_redis_container_with_persistence() { if !docker_available() { - println!("⚠️ Docker is not available, skipping advanced Redis test"); + println!("⚠️ Docker is not available, skipping Redis persistence test"); return; } - let container_name = "test-redis-advanced-integration"; + let container_name = "test-redis-persist-integration"; + let volume_name = format!("{}-data", container_name); // Initial cleanup clean_container(container_name).await; + clean_volume(&volume_name).await; - // Arrange - Redis with advanced configuration using redis_settings let service = DockerService::new(); - let redis_settings = RedisSettings { - max_memory: "256mb".to_string(), - max_memory_policy: "allkeys-lru".to_string(), - append_only: true, - require_pass: true, - }; - let request = CreateDatabaseRequest { + let env_vars = HashMap::new(); + + let request = DockerRunRequest { name: container_name.to_string(), - db_type: "Redis".to_string(), - version: "7-alpine".to_string(), - port: 6383, - persist_data: false, - username: None, - password: "advanced_pass".to_string(), - database_name: None, - enable_auth: true, - max_connections: Some(500), - postgres_settings: None, - mysql_settings: None, - redis_settings: Some(redis_settings), - mongo_settings: None, + docker_args: DockerRunArgs { + image: "redis:7-alpine".to_string(), + env_vars, + ports: vec![PortMapping { + host: 6382, + container: 6379, + }], + volumes: vec![VolumeMount { + name: volume_name.clone(), + path: "/data".to_string(), + }], + command: vec![ + "redis-server".to_string(), + "--appendonly".to_string(), + "yes".to_string(), + ], + }, + metadata: ContainerMetadata { + id: uuid::Uuid::new_v4().to_string(), + db_type: "Redis".to_string(), + version: "7-alpine".to_string(), + port: 6382, + username: None, + password: String::new(), + database_name: None, + persist_data: true, + enable_auth: false, + max_connections: Some(10000), + }, }; - // Act - Build command with advanced configuration - let command_result = service.build_docker_command(&request, &None); - assert!( - command_result.is_ok(), - "Should build advanced Redis command" - ); - - let command = command_result.unwrap(); - println!("🐳 Advanced Redis command: {:?}", command); + let command = service.build_docker_command_from_args(&request.name, &request.docker_args); + println!("🐳 Redis command with persistence: {:?}", command); - // Verify advanced configuration assert!( - command.contains(&"--maxmemory".to_string()), - "Should include maxmemory" + command.contains(&"-v".to_string()), + "Should include volume flag" ); assert!( - command.contains(&"256mb".to_string()), - "Should include memory limit" - ); - assert!( - command.contains(&"--maxmemory-policy".to_string()), - "Should include memory policy" - ); - assert!( - command.contains(&"allkeys-lru".to_string()), - "Should include LRU policy" + command.contains(&format!("{}:/data", volume_name)), + "Should map Redis data volume" ); assert!( command.contains(&"--appendonly".to_string()), - "Should include appendonly" - ); - assert!( - command.contains(&"yes".to_string()), - "Should enable appendonly" + "Should enable AOF persistence" ); - // Execute command - let output = Command::new("docker") - .args(&command) - .output() - .expect("Failed to execute advanced Redis command"); + if let Err(e) = create_volume(&volume_name).await { + println!("⚠️ Warning when creating volume: {}", e); + } + + let container_id = run_docker_command(command).await; - if !output.status.success() { - let error = String::from_utf8_lossy(&output.stderr); + if let Err(e) = container_id { clean_container(container_name).await; + clean_volume(&volume_name).await; panic!( - "Docker failed to create advanced Redis container: {}", - error + "Docker failed to create Redis container with persistence: {}", + e ); } - println!("✅ Advanced Redis container created successfully"); + println!("✅ Redis container with persistence created"); - tokio::time::sleep(tokio::time::Duration::from_secs(2)).await; + // Wait for Redis to be ready + assert!( + wait_for_container_ready(container_name, 10, 1).await, + "Redis container with persistence failed to start within timeout" + ); + + assert!( + container_exists(container_name).await, + "Container should exist" + ); + assert!(volume_exists(&volume_name).await, "Volume should exist"); // Cleanup clean_container(container_name).await; + clean_volume(&volume_name).await; - println!("✅ Advanced Redis test completed successfully"); + println!("✅ Redis persistence test completed"); } diff --git a/src-tauri/tests/integration/utils.rs b/src-tauri/tests/integration/utils.rs index e601592..01caeae 100644 --- a/src-tauri/tests/integration/utils.rs +++ b/src-tauri/tests/integration/utils.rs @@ -73,18 +73,68 @@ pub async fn get_container_status(name: &str) -> Option { }) } -/// Waits a specified time for the container to initialize -pub async fn wait_for_container_ready(seconds: u64) { +/// Gets the port mapping of a container +pub async fn get_container_port(name: &str) -> Option { + Command::new("docker") + .args(&[ + "ps", + "-a", + "--filter", + &format!("name={}", name), + "--format", + "{{.Ports}}", + ]) + .output() + .ok() + .and_then(|output| { + if output.status.success() { + let ports = String::from_utf8_lossy(&output.stdout).trim().to_string(); + if !ports.is_empty() { + Some(ports) + } else { + None + } + } else { + None + } + }) +} + +/// Waits for a container to be ready with retry logic +/// Returns true if container is ready, false if timeout is reached +pub async fn wait_for_container_ready( + container_name: &str, + max_attempts: u32, + delay_secs: u64, +) -> bool { println!( - "⏳ Waiting {} seconds for the container to initialize...", - seconds + "⏳ Waiting for container '{}' to be ready (max {} attempts, {}s delay)...", + container_name, max_attempts, delay_secs ); - tokio::time::sleep(tokio::time::Duration::from_secs(seconds)).await; -} -/// Shorter alias to wait for container -pub async fn wait_for_container(seconds: u64) { - wait_for_container_ready(seconds).await; + for attempt in 1..=max_attempts { + if container_exists(container_name).await { + if let Some(status) = get_container_status(container_name).await { + if status.contains("Up") { + println!( + "✅ Container '{}' is ready (attempt {}/{})", + container_name, attempt, max_attempts + ); + return true; + } + } + } + + if attempt < max_attempts { + tokio::time::sleep(tokio::time::Duration::from_secs(delay_secs)).await; + } + } + + println!( + "❌ Container '{}' did not become ready after {} attempts", + container_name, max_attempts + ); + false } /// Creates a Docker volume @@ -113,3 +163,26 @@ pub async fn clean_volume(name: &str) { println!("✅ Volume {} cleaned up", name); } + +/// Checks if a volume exists +pub async fn volume_exists(name: &str) -> bool { + Command::new("docker") + .args(&["volume", "inspect", name]) + .output() + .map(|output| output.status.success()) + .unwrap_or(false) +} + +/// Executes a Docker command and returns stdout on success +pub async fn run_docker_command(args: Vec) -> Result { + let output = Command::new("docker") + .args(&args) + .output() + .map_err(|e| format!("Failed to execute Docker command: {}", e))?; + + if output.status.success() { + Ok(String::from_utf8_lossy(&output.stdout).trim().to_string()) + } else { + Err(String::from_utf8_lossy(&output.stderr).trim().to_string()) + } +} diff --git a/src-tauri/tests/integration_tests.rs b/src-tauri/tests/integration_tests.rs index 559c8a5..c02da82 100644 --- a/src-tauri/tests/integration_tests.rs +++ b/src-tauri/tests/integration_tests.rs @@ -1,13 +1,22 @@ /// Integration tests for Docker DB Manager /// -/// These tests verify the complete functionality by interacting with real components. -/// Some require Docker to be running on the system. +/// These tests verify the complete functionality by interacting with real Docker. +/// Docker must be running on the system for these tests to pass. +/// +/// Tests are organized by database type and functionality: +/// - PostgreSQL: Basic creation, volumes, and port updates +/// - MySQL: Basic creation and volumes +/// - Redis: Basic creation, auth, and persistence +/// - MongoDB: Basic creation, volumes, and no-auth mode #[path = "integration/postgresql_integration_test.rs"] mod postgresql_integration_test; +#[path = "integration/mysql_integration_test.rs"] +mod mysql_integration_test; + #[path = "integration/redis_integration_test.rs"] mod redis_integration_test; -#[path = "integration/container_update_test.rs"] -mod container_update_test; +#[path = "integration/mongodb_integration_test.rs"] +mod mongodb_integration_test; diff --git a/src-tauri/tests/unit/docker_service_test.rs b/src-tauri/tests/unit/docker_service_test.rs new file mode 100644 index 0000000..6ded0a6 --- /dev/null +++ b/src-tauri/tests/unit/docker_service_test.rs @@ -0,0 +1,175 @@ +use docker_db_manager_lib::services::DockerService; +use docker_db_manager_lib::types::docker::*; +use std::collections::HashMap; + +#[cfg(test)] +mod docker_service_tests { + use super::*; + + fn create_test_docker_args() -> DockerRunArgs { + let mut env_vars = HashMap::new(); + env_vars.insert("POSTGRES_USER".to_string(), "postgres".to_string()); + env_vars.insert("POSTGRES_PASSWORD".to_string(), "secret123".to_string()); + env_vars.insert("POSTGRES_DB".to_string(), "testdb".to_string()); + + DockerRunArgs { + image: "postgres:16".to_string(), + env_vars, + ports: vec![PortMapping { + host: 5432, + container: 5432, + }], + volumes: vec![VolumeMount { + name: "test-postgres-data".to_string(), + path: "/var/lib/postgresql/data".to_string(), + }], + command: vec![], + } + } + + #[test] + fn test_build_docker_command_from_args_basic() { + let service = DockerService::new(); + let args = create_test_docker_args(); + + let command_args = service.build_docker_command_from_args("test-postgres", &args); + + let command = command_args.join(" "); + + // Verify basic structure + assert!(command.contains("run")); + assert!(command.contains("-d")); + assert!(command.contains("--name")); + assert!(command.contains("test-postgres")); + assert!(command.contains("postgres:16")); + } + + #[test] + fn test_build_docker_command_with_ports() { + let service = DockerService::new(); + let args = create_test_docker_args(); + + let command_args = service.build_docker_command_from_args("test-db", &args); + let command = command_args.join(" "); + + // Verify port mapping + assert!(command.contains("-p")); + assert!(command.contains("5432:5432")); + } + + #[test] + fn test_build_docker_command_with_env_vars() { + let service = DockerService::new(); + let args = create_test_docker_args(); + + let command_args = service.build_docker_command_from_args("test-db", &args); + let command = command_args.join(" "); + + // Verify environment variables + assert!(command.contains("-e")); + assert!(command.contains("POSTGRES_USER=postgres")); + assert!(command.contains("POSTGRES_PASSWORD=secret123")); + assert!(command.contains("POSTGRES_DB=testdb")); + } + + #[test] + fn test_build_docker_command_with_volume() { + let service = DockerService::new(); + let args = create_test_docker_args(); + + let command_args = service.build_docker_command_from_args("test-db", &args); + let command = command_args.join(" "); + + // Verify volume mount + assert!(command.contains("-v")); + assert!(command.contains("test-postgres-data:/var/lib/postgresql/data")); + } + + #[test] + fn test_build_docker_command_without_volume() { + let service = DockerService::new(); + let mut args = create_test_docker_args(); + args.volumes = vec![]; // No volumes + + let command_args = service.build_docker_command_from_args("test-db", &args); + let command = command_args.join(" "); + + // Should not contain volume flags + assert!(!command.contains("-v")); + } + + #[test] + fn test_build_docker_command_with_command_args() { + let service = DockerService::new(); + let mut args = create_test_docker_args(); + args.image = "redis:7".to_string(); + args.command = vec![ + "redis-server".to_string(), + "--requirepass".to_string(), + "secret".to_string(), + ]; + + let command_args = service.build_docker_command_from_args("test-redis", &args); + let command = command_args.join(" "); + + // Verify command arguments + assert!(command.contains("redis-server")); + assert!(command.contains("--requirepass")); + assert!(command.contains("secret")); + } + + #[test] + fn test_build_docker_command_with_multiple_ports() { + let service = DockerService::new(); + let mut args = create_test_docker_args(); + args.ports = vec![ + PortMapping { + host: 8080, + container: 80, + }, + PortMapping { + host: 8443, + container: 443, + }, + ]; + + let command_args = service.build_docker_command_from_args("test-web", &args); + let command = command_args.join(" "); + + // Verify multiple port mappings + assert!(command.contains("8080:80")); + assert!(command.contains("8443:443")); + } + + #[test] + fn test_build_docker_command_with_no_env_vars() { + let service = DockerService::new(); + let mut args = create_test_docker_args(); + args.env_vars = HashMap::new(); + + let command_args = service.build_docker_command_from_args("test-db", &args); + let command = command_args.join(" "); + + // Should still be valid without env vars + assert!(command.contains("run")); + assert!(command.contains("-d")); + } + + #[test] + fn test_docker_run_args_serialization() { + let args = create_test_docker_args(); + + // Verify the structure can be serialized + let json = serde_json::to_string(&args); + assert!(json.is_ok()); + + // Verify it can be deserialized back + let json_str = json.unwrap(); + let deserialized: Result = serde_json::from_str(&json_str); + assert!(deserialized.is_ok()); + + let recovered = deserialized.unwrap(); + assert_eq!(recovered.image, "postgres:16"); + assert_eq!(recovered.ports.len(), 1); + } +} diff --git a/src-tauri/tests/unit/generic_commands_test.rs b/src-tauri/tests/unit/generic_commands_test.rs new file mode 100644 index 0000000..3f1eef0 --- /dev/null +++ b/src-tauri/tests/unit/generic_commands_test.rs @@ -0,0 +1,218 @@ +use docker_db_manager_lib::types::docker::*; +use std::collections::HashMap; + +#[cfg(test)] +mod generic_commands_tests { + use super::*; + + /// Helper to create a test DockerRunRequest + fn create_test_docker_request(name: &str, image: &str, port: i32) -> DockerRunRequest { + let mut env_vars = HashMap::new(); + env_vars.insert("TEST_VAR".to_string(), "test_value".to_string()); + + DockerRunRequest { + name: name.to_string(), + docker_args: DockerRunArgs { + image: image.to_string(), + env_vars, + ports: vec![PortMapping { + host: port, + container: 5432, + }], + volumes: vec![VolumeMount { + name: format!("{}-data", name), + path: "/var/lib/postgresql/data".to_string(), + }], + command: vec![], + }, + metadata: ContainerMetadata { + id: uuid::Uuid::new_v4().to_string(), + db_type: "PostgreSQL".to_string(), + version: "16".to_string(), + port, + username: Some("postgres".to_string()), + password: "test123".to_string(), + database_name: Some("testdb".to_string()), + persist_data: true, + enable_auth: true, + max_connections: Some(100), + }, + } + } + + #[test] + fn test_docker_run_request_structure() { + let request = create_test_docker_request("test-postgres", "postgres:16", 5432); + + assert_eq!(request.name, "test-postgres"); + assert_eq!(request.docker_args.image, "postgres:16"); + assert_eq!(request.docker_args.ports.len(), 1); + assert_eq!(request.docker_args.ports[0].host, 5432); + assert_eq!(request.docker_args.volumes.len(), 1); + assert_eq!(request.metadata.db_type, "PostgreSQL"); + assert_eq!(request.metadata.version, "16"); + } + + #[test] + fn test_port_mapping() { + let port_mapping = PortMapping { + host: 5432, + container: 5432, + }; + + assert_eq!(port_mapping.host, 5432); + assert_eq!(port_mapping.container, 5432); + } + + #[test] + fn test_volume_mount() { + let volume = VolumeMount { + name: "test-data".to_string(), + path: "/data".to_string(), + }; + + assert_eq!(volume.name, "test-data"); + assert_eq!(volume.path, "/data"); + } + + #[test] + fn test_container_metadata() { + let metadata = ContainerMetadata { + id: "test-id".to_string(), + db_type: "PostgreSQL".to_string(), + version: "16".to_string(), + port: 5432, + username: Some("postgres".to_string()), + password: "secret".to_string(), + database_name: Some("mydb".to_string()), + persist_data: true, + enable_auth: true, + max_connections: Some(100), + }; + + assert_eq!(metadata.db_type, "PostgreSQL"); + assert_eq!(metadata.version, "16"); + assert_eq!(metadata.port, 5432); + assert!(metadata.persist_data); + assert!(metadata.enable_auth); + assert_eq!(metadata.max_connections, Some(100)); + } + + #[test] + fn test_docker_run_args_with_empty_command() { + let args = DockerRunArgs { + image: "postgres:16".to_string(), + env_vars: HashMap::new(), + ports: vec![], + volumes: vec![], + command: vec![], + }; + + assert_eq!(args.image, "postgres:16"); + assert!(args.command.is_empty()); + assert!(args.ports.is_empty()); + } + + #[test] + fn test_docker_run_args_with_command() { + let args = DockerRunArgs { + image: "redis:7".to_string(), + env_vars: HashMap::new(), + ports: vec![], + volumes: vec![], + command: vec![ + "redis-server".to_string(), + "--requirepass".to_string(), + "secret".to_string(), + ], + }; + + assert_eq!(args.image, "redis:7"); + assert_eq!(args.command.len(), 3); + assert_eq!(args.command[0], "redis-server"); + } + + #[test] + fn test_multiple_port_mappings() { + let request = DockerRunRequest { + name: "multi-port-test".to_string(), + docker_args: DockerRunArgs { + image: "test:1.0".to_string(), + env_vars: HashMap::new(), + ports: vec![ + PortMapping { + host: 8080, + container: 80, + }, + PortMapping { + host: 8443, + container: 443, + }, + ], + volumes: vec![], + command: vec![], + }, + metadata: ContainerMetadata { + id: "test-id".to_string(), + db_type: "Custom".to_string(), + version: "1.0".to_string(), + port: 8080, + username: None, + password: "".to_string(), + database_name: None, + persist_data: false, + enable_auth: false, + max_connections: None, + }, + }; + + assert_eq!(request.docker_args.ports.len(), 2); + assert_eq!(request.docker_args.ports[0].host, 8080); + assert_eq!(request.docker_args.ports[1].host, 8443); + } + + #[test] + fn test_multiple_volumes() { + let volumes = vec![ + VolumeMount { + name: "data-vol".to_string(), + path: "/data".to_string(), + }, + VolumeMount { + name: "config-vol".to_string(), + path: "/config".to_string(), + }, + ]; + + assert_eq!(volumes.len(), 2); + assert_eq!(volumes[0].name, "data-vol"); + assert_eq!(volumes[1].path, "/config"); + } + + #[test] + fn test_env_vars_multiple() { + let mut env_vars = HashMap::new(); + env_vars.insert("POSTGRES_USER".to_string(), "admin".to_string()); + env_vars.insert("POSTGRES_PASSWORD".to_string(), "secret".to_string()); + env_vars.insert("POSTGRES_DB".to_string(), "mydb".to_string()); + + let args = DockerRunArgs { + image: "postgres:16".to_string(), + env_vars: env_vars.clone(), + ports: vec![], + volumes: vec![], + command: vec![], + }; + + assert_eq!(args.env_vars.len(), 3); + assert_eq!( + args.env_vars.get("POSTGRES_USER"), + Some(&"admin".to_string()) + ); + assert_eq!( + args.env_vars.get("POSTGRES_PASSWORD"), + Some(&"secret".to_string()) + ); + assert_eq!(args.env_vars.get("POSTGRES_DB"), Some(&"mydb".to_string())); + } +} diff --git a/src-tauri/tests/unit/services/docker_command_builder_test.rs b/src-tauri/tests/unit/services/docker_command_builder_test.rs deleted file mode 100644 index b07bade..0000000 --- a/src-tauri/tests/unit/services/docker_command_builder_test.rs +++ /dev/null @@ -1,207 +0,0 @@ -use docker_db_manager_lib::services::DockerService; -use docker_db_manager_lib::types::CreateDatabaseRequest; - -/// Unit tests for Docker command building -/// -/// These tests verify that Docker commands are built correctly -/// according to input parameters, without executing real Docker. -mod docker_command_builder_tests { - use super::*; - - /// Helper function to create a basic PostgreSQL request - fn create_basic_postgresql_request() -> CreateDatabaseRequest { - CreateDatabaseRequest { - name: "test-postgres".to_string(), - db_type: "PostgreSQL".to_string(), - version: "15".to_string(), - port: 5432, - persist_data: false, - username: Some("testuser".to_string()), - password: "testpass".to_string(), - database_name: Some("testdb".to_string()), - enable_auth: true, - max_connections: Some(100), - postgres_settings: None, - mysql_settings: None, - redis_settings: None, - mongo_settings: None, - } - } - - #[test] - fn should_build_basic_postgresql_command_without_volume() { - // Arrange - let service = DockerService::new(); - let request = create_basic_postgresql_request(); - let volume_name = None; - - // Act - let resultado = service.build_docker_command(&request, &volume_name); - - // Assert - assert!(resultado.is_ok(), "Command building should be successful"); - - let comando = resultado.unwrap(); - - // Verify basic command elements - assert_eq!(comando[0], "run", "First argument should be 'run'"); - assert_eq!(comando[1], "-d", "Should run in daemon mode"); - assert!( - comando.contains(&"--name".to_string()), - "Should include --name" - ); - assert!( - comando.contains(&"test-postgres".to_string()), - "Should include container name" - ); - assert!( - comando.contains(&"-p".to_string()), - "Should include port mapping" - ); - assert!( - comando.contains(&"5432:5432".to_string()), - "Should map port correctly" - ); - } - - #[test] - fn should_include_volume_when_persist_data_is_true() { - // Arrange - let service = DockerService::new(); - let mut request = create_basic_postgresql_request(); - request.persist_data = true; - let volume_name = Some("test-postgres-data".to_string()); - - // Act - let resultado = service.build_docker_command(&request, &volume_name); - - // Assert - assert!(resultado.is_ok(), "Command building should be successful"); - - let comando = resultado.unwrap(); - - // Verify that it includes the volume - assert!( - comando.contains(&"-v".to_string()), - "Should include volume flag" - ); - assert!( - comando.contains(&"test-postgres-data:/var/lib/postgresql/data".to_string()), - "Should map volume correctly" - ); - } - - #[test] - fn should_use_custom_port_when_specified() { - // Arrange - let service = DockerService::new(); - let mut request = create_basic_postgresql_request(); - request.port = 5433; // Custom port - let volume_name = None; - - // Act - let resultado = service.build_docker_command(&request, &volume_name); - - // Assert - assert!(resultado.is_ok(), "Command building should be successful"); - - let comando = resultado.unwrap(); - - // Verify that it uses the custom port - assert!( - comando.contains(&"5433:5432".to_string()), - "Should map custom port to correct internal port" - ); - } - - #[test] - fn should_include_postgresql_environment_variables() { - // Arrange - let service = DockerService::new(); - let request = create_basic_postgresql_request(); - let volume_name = None; - - // Act - let resultado = service.build_docker_command(&request, &volume_name); - - // Assert - assert!(resultado.is_ok(), "Command building should be successful"); - - let comando = resultado.unwrap(); - - // Verify PostgreSQL environment variables - assert!( - comando.contains(&"-e".to_string()), - "Should include environment variables" - ); - assert!( - comando.contains(&"POSTGRES_USER=testuser".to_string()), - "Should include username" - ); - assert!( - comando.contains(&"POSTGRES_PASSWORD=testpass".to_string()), - "Should include password" - ); - assert!( - comando.contains(&"POSTGRES_DB=testdb".to_string()), - "Should include database name" - ); - } - - /// Helper to create MySQL request - fn create_basic_mysql_request() -> CreateDatabaseRequest { - CreateDatabaseRequest { - name: "test-mysql".to_string(), - db_type: "MySQL".to_string(), - version: "8.0".to_string(), - port: 3306, - persist_data: false, - username: Some("root".to_string()), - password: "rootpass".to_string(), - database_name: Some("testdb".to_string()), - enable_auth: true, - max_connections: Some(151), - postgres_settings: None, - mysql_settings: None, - redis_settings: None, - mongo_settings: None, - } - } - - #[test] - fn should_build_mysql_command_correctly() { - // Arrange - let service = DockerService::new(); - let request = create_basic_mysql_request(); - let volume_name = None; - - // Act - let resultado = service.build_docker_command(&request, &volume_name); - - // Assert - assert!( - resultado.is_ok(), - "MySQL command building should be successful" - ); - - let comando = resultado.unwrap(); - - // Verify MySQL-specific elements - assert!( - comando.contains(&"3306:3306".to_string()), - "Should map MySQL port" - ); - assert!( - comando.contains(&"MYSQL_ROOT_PASSWORD=rootpass".to_string()), - "Should include root password" - ); - assert!( - comando.contains(&"MYSQL_DATABASE=testdb".to_string()), - "Should include database name" - ); - assert!( - comando.contains(&"mysql:8.0".to_string()), - "Should use correct MySQL image" - ); - } -} diff --git a/src-tauri/tests/unit/services/docker_service_test.rs b/src-tauri/tests/unit/services/docker_service_test.rs deleted file mode 100644 index 0a22a0b..0000000 --- a/src-tauri/tests/unit/services/docker_service_test.rs +++ /dev/null @@ -1,115 +0,0 @@ -use docker_db_manager_lib::services::DockerService; - -/// Unit tests for DockerService -/// -/// These tests verify the basic functionality of the Docker service -/// without interacting with real Docker, only testing pure logic. -mod docker_service_unit_tests { - use super::*; - - /// Test to verify that the correct default ports are returned - /// for each supported database type - mod default_ports { - use super::*; - - #[test] - fn should_return_port_5432_for_postgresql() { - // Arrange - Prepare - let service = DockerService::new(); - - // Act - Execute - let puerto = service.get_default_port("PostgreSQL"); - - // Assert - Verify - assert_eq!(puerto, 5432, "PostgreSQL should use port 5432"); - } - - #[test] - fn should_return_port_3306_for_mysql() { - let service = DockerService::new(); - let puerto = service.get_default_port("MySQL"); - assert_eq!(puerto, 3306, "MySQL should use port 3306"); - } - - #[test] - fn should_return_port_6379_for_redis() { - let service = DockerService::new(); - let puerto = service.get_default_port("Redis"); - assert_eq!(puerto, 6379, "Redis should use port 6379"); - } - - #[test] - fn should_return_port_27017_for_mongodb() { - let service = DockerService::new(); - let puerto = service.get_default_port("MongoDB"); - assert_eq!(puerto, 27017, "MongoDB should use port 27017"); - } - - #[test] - fn should_return_default_port_for_unknown_database() { - let service = DockerService::new(); - let puerto = service.get_default_port("BaseDesconocida"); - assert_eq!( - puerto, 5432, - "An unknown database should use the default port (PostgreSQL)" - ); - } - - #[test] - fn should_handle_empty_string() { - let service = DockerService::new(); - let puerto = service.get_default_port(""); - assert_eq!(puerto, 5432, "An empty string should use the default port"); - } - } - - /// Tests to verify the default data paths - /// for each database type - mod data_paths { - use super::*; - - #[test] - fn should_return_correct_path_for_postgresql() { - let service = DockerService::new(); - let ruta = service.get_data_path("PostgreSQL"); - assert_eq!( - ruta, "/var/lib/postgresql/data", - "PostgreSQL should use its standard data path" - ); - } - - #[test] - fn should_return_correct_path_for_mysql() { - let service = DockerService::new(); - let ruta = service.get_data_path("MySQL"); - assert_eq!( - ruta, "/var/lib/mysql", - "MySQL should use its standard data path" - ); - } - - #[test] - fn should_return_correct_path_for_redis() { - let service = DockerService::new(); - let ruta = service.get_data_path("Redis"); - assert_eq!(ruta, "/data", "Redis should use /data as path"); - } - - #[test] - fn should_return_correct_path_for_mongodb() { - let service = DockerService::new(); - let ruta = service.get_data_path("MongoDB"); - assert_eq!(ruta, "/data/db", "MongoDB should use /data/db as path"); - } - - #[test] - fn should_return_default_path_for_unknown_database() { - let service = DockerService::new(); - let ruta = service.get_data_path("BaseDesconocida"); - assert_eq!( - ruta, "/data", - "An unknown database should use /data by default" - ); - } - } -} diff --git a/src-tauri/tests/unit/services/volume_migration_test.rs b/src-tauri/tests/unit/services/volume_migration_test.rs deleted file mode 100644 index 006a640..0000000 --- a/src-tauri/tests/unit/services/volume_migration_test.rs +++ /dev/null @@ -1,329 +0,0 @@ -/// Unit tests for volume migration -/// -/// These tests verify the volume migration logic when -/// a container name is changed. -mod volume_migration_tests { - - /// Test to verify that the volume naming logic is correct - mod volume_naming { - - #[test] - fn should_generate_correct_volume_name_for_container() { - // Arrange - let container_name = "mi-postgres"; - let expected_volume_name = "mi-postgres-data"; - - // Act - let volume_name = format!("{}-data", container_name); - - // Assert - assert_eq!( - volume_name, expected_volume_name, - "The volume name should follow the pattern {{container_name}}-data" - ); - } - - #[test] - fn should_handle_names_with_special_characters() { - let container_name = "mi_contenedor-test_123"; - let expected_volume_name = "mi_contenedor-test_123-data"; - let volume_name = format!("{}-data", container_name); - - assert_eq!( - volume_name, expected_volume_name, - "Should preserve special characters in the volume name" - ); - } - - #[test] - fn should_detect_name_change_correctly() { - let old_name = "contenedor-viejo"; - let new_name = "contenedor-nuevo"; - - let old_volume = format!("{}-data", old_name); - let new_volume = format!("{}-data", new_name); - - assert_ne!( - old_volume, new_volume, - "Volumes should have different names when the container name changes" - ); - assert_eq!(old_volume, "contenedor-viejo-data"); - assert_eq!(new_volume, "contenedor-nuevo-data"); - } - } - - /// Tests for the migration decision logic - mod migration_logic { - - #[test] - fn should_require_migration_when_name_changes_and_has_persistence() { - // Arrange - let old_name = "postgres-old"; - let new_name = "postgres-new"; - let has_persistent_data = true; - - // Act - let should_migrate = old_name != new_name && has_persistent_data; - - // Assert - assert!( - should_migrate, - "Should require migration when the name changes and has persistent data" - ); - } - - #[test] - fn should_not_require_migration_when_name_does_not_change() { - let old_name = "postgres-test"; - let new_name = "postgres-test"; - let has_persistent_data = true; - - let should_migrate = old_name != new_name && has_persistent_data; - - assert!( - !should_migrate, - "Should not require migration when the name does not change" - ); - } - - #[test] - fn should_not_require_migration_when_no_persistence() { - let old_name = "postgres-old"; - let new_name = "postgres-new"; - let has_persistent_data = false; - - let should_migrate = old_name != new_name && has_persistent_data; - - assert!( - !should_migrate, - "Should not require migration when there is no persistent data" - ); - } - - #[test] - fn should_clean_old_volume_when_changing_from_persistent_to_non_persistent() { - let old_persistent = true; - let new_persistent = false; - let name_changed = true; - - let should_cleanup_old = old_persistent && !new_persistent && name_changed; - - assert!( - should_cleanup_old, - "Should clean the old volume when changing from persistent to non-persistent" - ); - } - } - - /// Tests to validate the structure of update requests - mod update_request_validation { - use docker_db_manager_lib::types::UpdateContainerRequest; - - #[test] - fn should_create_update_request_correctly() { - // Arrange - let container_id = "test-container-id"; - let new_name = "nuevo-nombre"; - let new_port = 5433; - - // Act - let request = UpdateContainerRequest { - container_id: container_id.to_string(), - name: Some(new_name.to_string()), - port: Some(new_port), - username: None, - password: None, - database_name: None, - max_connections: None, - enable_auth: None, - persist_data: None, - restart_policy: None, - auto_start: None, - }; - - // Assert - assert_eq!(request.container_id, container_id); - assert_eq!(request.name, Some(new_name.to_string())); - assert_eq!(request.port, Some(new_port)); - } - - #[test] - fn should_handle_request_with_only_name() { - let request = UpdateContainerRequest { - container_id: "test-id".to_string(), - name: Some("nuevo-nombre".to_string()), - port: None, - username: None, - password: None, - database_name: None, - max_connections: None, - enable_auth: None, - persist_data: None, - restart_policy: None, - auto_start: None, - }; - - assert!(request.name.is_some()); - assert!(request.port.is_none()); - assert!(request.password.is_none()); - } - } - - /// Tests to validate the creation of migration containers - mod migration_container_logic { - - #[test] - fn should_generate_unique_temporary_container_name() { - // Simulate the generation of unique names for temporary containers - let prefix = "temp-migrate-"; - let uuid1 = uuid::Uuid::new_v4().to_string(); - let uuid2 = uuid::Uuid::new_v4().to_string(); - - let name1 = format!("{}{}", prefix, uuid1); - let name2 = format!("{}{}", prefix, uuid2); - - assert_ne!(name1, name2, "Temporary container names should be unique"); - assert!(name1.starts_with(prefix), "Should use the correct prefix"); - assert!(name2.starts_with(prefix), "Should use the correct prefix"); - } - - #[test] - fn should_generate_correct_docker_arguments_for_migration() { - let old_volume = "old-postgres-data"; - let new_volume = "new-postgres-data"; - let temp_name = "temp-migrate-123"; - - // Create format strings with longer-lived variables - let old_volume_mount = format!("{}:/old_data", old_volume); - let new_volume_mount = format!("{}:/new_data", new_volume); - - let expected_args = vec![ - "create", - "--name", - temp_name, - "-v", - &old_volume_mount, - "-v", - &new_volume_mount, - "alpine:latest", - "sh", - "-c", - "cp -a /old_data/. /new_data/ 2>/dev/null || true", - ]; - - // Verify that the arguments contain the necessary elements - assert!(expected_args.contains(&"create")); - assert!(expected_args.contains(&"--name")); - assert!(expected_args.contains(&temp_name)); - assert!(expected_args.contains(&"-v")); - assert!(expected_args.contains(&"alpine:latest")); - assert!(expected_args.contains(&&old_volume_mount.as_str())); - assert!(expected_args.contains(&&new_volume_mount.as_str())); - } - } - - /// Tests for volume removal - mod volume_removal_logic { - #[test] - fn should_correctly_determine_when_to_remove_volume() { - // Arrange - let scenarios = vec![ - // (stored_persist_data, should_remove_volume, description) - (true, true, "Should remove volume from persistent container"), - ( - false, - false, - "Should not remove volume from non-persistent container", - ), - ]; - - for (stored_persist_data, expected_removal, description) in scenarios { - // Act - let should_remove = stored_persist_data; - - // Assert - assert_eq!(should_remove, expected_removal, "{}", description); - } - } - - #[test] - fn should_generate_correct_volume_names_for_removal() { - // Arrange - let test_cases = vec![ - ("mysql-production", "mysql-production-data"), - ("redis-cache", "redis-cache-data"), - ("postgres_dev", "postgres_dev-data"), - ("mongo-db-01", "mongo-db-01-data"), - ("test123", "test123-data"), - ]; - - for (container_name, expected_volume_name) in test_cases { - // Act - let volume_name = format!("{}-data", container_name); - - // Assert - assert_eq!( - volume_name, expected_volume_name, - "Volumen para contenedor '{}' debe ser '{}'", - container_name, expected_volume_name - ); - } - } - - #[test] - fn should_validate_complete_removal_flow() { - // Arrange - Simulate container information - struct MockContainer { - name: String, - stored_persist_data: bool, - } - - let containers = vec![ - MockContainer { - name: "postgres-app".to_string(), - stored_persist_data: true, - }, - MockContainer { - name: "redis-temp".to_string(), - stored_persist_data: false, - }, - ]; - - for container in containers { - // Act - Simulate removal logic - let should_remove_volume = container.stored_persist_data; - let volume_name = if should_remove_volume { - Some(format!("{}-data", container.name)) - } else { - None - }; - - // Assert - if container.stored_persist_data { - assert!( - should_remove_volume, - "Container {} should have its volume removed", - container.name - ); - assert!( - volume_name.is_some(), - "Should generate volume name for {}", - container.name - ); - assert_eq!(volume_name.unwrap(), format!("{}-data", container.name)); - } else { - assert!( - !should_remove_volume, - "Container {} should not have volume removed", - container.name - ); - assert!( - volume_name.is_none(), - "Should not generate volume name for {}", - container.name - ); - } - } - } - } -} diff --git a/src-tauri/tests/unit_services.rs b/src-tauri/tests/unit_services.rs deleted file mode 100644 index c47eb49..0000000 --- a/src-tauri/tests/unit_services.rs +++ /dev/null @@ -1,13 +0,0 @@ -/// Unit tests for services -/// -/// This file includes all unit tests related to the services -/// of the Docker DB Manager project. - -#[path = "unit/services/docker_service_test.rs"] -mod docker_service_test; - -#[path = "unit/services/docker_command_builder_test.rs"] -mod docker_command_builder_test; - -#[path = "unit/services/volume_migration_test.rs"] -mod volume_migration_test; diff --git a/src-tauri/tests/unit_tests.rs b/src-tauri/tests/unit_tests.rs new file mode 100644 index 0000000..d3b15ba --- /dev/null +++ b/src-tauri/tests/unit_tests.rs @@ -0,0 +1,14 @@ +/// Unit tests for Docker DB Manager +/// +/// These tests verify individual components and functions in isolation, +/// without requiring Docker to be running. +/// +/// Tests are organized by component: +/// - docker_service_test: Tests for DockerService methods +/// - generic_commands_test: Tests for generic command structures (DockerRunRequest, DockerRunArgs, etc.) + +#[path = "unit/docker_service_test.rs"] +mod docker_service_test; + +#[path = "unit/generic_commands_test.rs"] +mod generic_commands_test; diff --git a/src/features/app/api/__tests__/app.api.test.ts b/src/features/app/api/__tests__/app.api.test.ts deleted file mode 100644 index 8553a0c..0000000 --- a/src/features/app/api/__tests__/app.api.test.ts +++ /dev/null @@ -1,35 +0,0 @@ -import { beforeEach, describe, expect, it, vi } from 'vitest'; -import { invoke } from '../../../../core/tauri/invoke'; -import { appApi } from '../app.api'; - -// Mock invoke -vi.mock('../../../../core/tauri/invoke', () => ({ - invoke: vi.fn(), -})); - -describe('appApi', () => { - beforeEach(() => { - vi.clearAllMocks(); - }); - - describe('getVersion', () => { - it('should fetch app version', async () => { - const mockVersion = '1.2.3'; - vi.mocked(invoke).mockResolvedValue(mockVersion); - - const result = await appApi.getVersion(); - - expect(invoke).toHaveBeenCalledWith('get_app_version'); - expect(result).toBe(mockVersion); - }); - - it('should handle errors from invoke', async () => { - const error = new Error('Failed to get version'); - vi.mocked(invoke).mockRejectedValue(error); - - await expect(appApi.getVersion()).rejects.toThrow( - 'Failed to get version', - ); - }); - }); -}); diff --git a/src/features/app/use-app.ts b/src/features/app/use-app.ts index a5080bb..27c3e97 100644 --- a/src/features/app/use-app.ts +++ b/src/features/app/use-app.ts @@ -1,10 +1,6 @@ import { useCallback, useEffect } from 'react'; -import type { - CreateContainerRequest, - UpdateContainerRequest, -} from '../../shared/types/container'; -import { useContainerActions } from '../containers/hooks/use-container-actions'; -import { useContainerList } from '../containers/hooks/use-container-list'; +import { useDatabaseActions } from '../databases/hooks/use-database-actions'; +import { useDatabaseList } from '../databases/hooks/use-database-list'; import { useDockerStatus } from '../docker/hooks/use-docker-status'; /** @@ -14,9 +10,9 @@ import { useDockerStatus } from '../docker/hooks/use-docker-status'; * This hook replaces the old useApp, but with clearer responsibilities */ export function useApp() { - // Container state and actions - const containerList = useContainerList(); - const containerActions = useContainerActions(); + // Database container state and actions + const containerList = useDatabaseList(); + const containerActions = useDatabaseActions(); // Docker status const docker = useDockerStatus(); @@ -52,30 +48,6 @@ export function useApp() { [containerList, containerActions], ); - /** - * Create container and update list - */ - const createContainer = useCallback( - async (request: CreateContainerRequest) => { - const newContainer = await containerActions.create(request); - containerList.addLocal(newContainer); - return newContainer; - }, - [containerActions, containerList], - ); - - /** - * Update container and synchronize list - */ - const updateContainer = useCallback( - async (request: UpdateContainerRequest) => { - const updatedContainer = await containerActions.update(request); - containerList.updateLocal(updatedContainer); - return updatedContainer; - }, - [containerActions, containerList], - ); - /** * Remove container and update list */ @@ -88,13 +60,11 @@ export function useApp() { ); return { - // Container state + // Database container state containers: containerList.containers, containersLoading: containerList.loading, - // Container actions - createContainer, - updateContainer, + // Database container actions removeContainer, startContainer: containerActions.start, stopContainer: containerActions.stop, diff --git a/src/features/containers/api/__tests__/containers.api.test.ts b/src/features/containers/api/__tests__/containers.api.test.ts deleted file mode 100644 index 0d37ee1..0000000 --- a/src/features/containers/api/__tests__/containers.api.test.ts +++ /dev/null @@ -1,150 +0,0 @@ -import { beforeEach, describe, expect, it, vi } from 'vitest'; -import { invoke } from '../../../../core/tauri/invoke'; -import { containersApi } from '../containers.api'; - -// Mock del invoke -vi.mock('../../../../core/tauri/invoke', () => ({ - invoke: vi.fn(), -})); - -describe('containersApi', () => { - beforeEach(() => { - vi.clearAllMocks(); - }); - - describe('getAll', () => { - it('should fetch and transform all containers', async () => { - const mockResponse = [ - { - id: '1', - name: 'postgres-1', - db_type: 'PostgreSQL', - version: '15', - status: 'running', - port: 5432, - created_at: '2024-01-01T00:00:00Z', - max_connections: 100, - persist_data: true, - enable_auth: true, - }, - ]; - - vi.mocked(invoke).mockResolvedValue(mockResponse); - - const result = await containersApi.getAll(); - - expect(invoke).toHaveBeenCalledWith('get_all_databases'); - expect(result).toHaveLength(1); - expect(result[0].dbType).toBe('PostgreSQL'); // Transformed to camelCase - }); - }); - - describe('create', () => { - it('should create a container and transform response', async () => { - const request = { - name: 'test-db', - dbType: 'PostgreSQL' as const, - version: '15', - port: 5432, - password: 'password', - username: 'postgres', - persistData: true, - enableAuth: true, - }; - - const mockResponse = { - id: '1', - name: 'test-db', - db_type: 'PostgreSQL', - version: '15', - status: 'creating', - port: 5432, - created_at: '2024-01-01T00:00:00Z', - max_connections: 100, - persist_data: true, - enable_auth: true, - }; - - vi.mocked(invoke).mockResolvedValue(mockResponse); - - const result = await containersApi.create(request); - - expect(invoke).toHaveBeenCalledWith('create_database_container', { - request: expect.objectContaining({ - name: 'test-db', - db_type: 'PostgreSQL', // Transformed to snake_case - version: '15', - port: 5432, - password: 'password', - username: 'postgres', - persist_data: true, - enable_auth: true, - }), - }); - expect(result.dbType).toBe('PostgreSQL'); - }); - }); - - describe('start', () => { - it('should start a container', async () => { - vi.mocked(invoke).mockResolvedValue(undefined); - - await containersApi.start('container-id'); - - expect(invoke).toHaveBeenCalledWith('start_container', { - containerId: 'container-id', - }); - }); - }); - - describe('stop', () => { - it('should stop a container', async () => { - vi.mocked(invoke).mockResolvedValue(undefined); - - await containersApi.stop('container-id'); - - expect(invoke).toHaveBeenCalledWith('stop_container', { - containerId: 'container-id', - }); - }); - }); - - describe('remove', () => { - it('should remove a container', async () => { - vi.mocked(invoke).mockResolvedValue(undefined); - - await containersApi.remove('container-id'); - - expect(invoke).toHaveBeenCalledWith('remove_container', { - containerId: 'container-id', - }); - }); - }); - - describe('sync', () => { - it('should sync containers with Docker', async () => { - const mockResponse = [ - { - id: '1', - name: 'synced-container', - db_type: 'MySQL', - version: '8.0', - status: 'running', - port: 3306, - created_at: '2024-01-01T00:00:00Z', - max_connections: 100, - persist_data: true, - enable_auth: true, - }, - ]; - - vi.mocked(invoke).mockResolvedValue(mockResponse); - - const result = await containersApi.sync(); - - expect(invoke).toHaveBeenCalledWith('sync_containers_with_docker'); - expect(result).toHaveLength(1); - expect(result[0].dbType).toBe('MySQL'); - }); - }); -}); diff --git a/src/features/containers/api/containers.api.ts b/src/features/containers/api/containers.api.ts deleted file mode 100644 index d6ff90e..0000000 --- a/src/features/containers/api/containers.api.ts +++ /dev/null @@ -1,89 +0,0 @@ -import { invoke } from '../../../core/tauri/invoke'; -import type { - Container, - CreateContainerRequest, - UpdateContainerRequest, -} from '../../../shared/types/container'; -import { - containerFromJSON, - createRequestToTauri, - updateRequestToTauri, -} from '../../../shared/utils/container'; - -/** - * API Layer - All Tauri calls for containers - * Pure layer without business logic, only data transformations - */ -export const containersApi = { - /** - * Get all containers - */ - async getAll(): Promise { - const result = await invoke('get_all_databases'); - return result.map(containerFromJSON); - }, - - /** - * Get a container by ID - */ - async getById(id: string): Promise { - // For now, get all and filter (no individual get_database command) - const all = await this.getAll(); - const container = all.find((c) => c.id === id); - if (!container) { - throw new Error(`Container with id ${id} not found`); - } - return container; - }, - - /** - * Create a new container - */ - async create(request: CreateContainerRequest): Promise { - const tauriRequest = createRequestToTauri(request); - const result = await invoke('create_database_container', { - request: tauriRequest, - }); - return containerFromJSON(result); - }, - - /** - * Update an existing container - */ - async update(request: UpdateContainerRequest): Promise { - const tauriRequest = updateRequestToTauri(request); - const result = await invoke('update_container_config', { - request: tauriRequest, - }); - return containerFromJSON(result); - }, - - /** - * Start a container - */ - async start(id: string): Promise { - await invoke('start_container', { containerId: id }); - }, - - /** - * Stop a container - */ - async stop(id: string): Promise { - await invoke('stop_container', { containerId: id }); - }, - - /** - * Remove a container - */ - async remove(id: string): Promise { - await invoke('remove_container', { containerId: id }); - }, - - /** - * Synchronize containers with Docker - */ - async sync(): Promise { - const result = await invoke('sync_containers_with_docker'); - return result.map(containerFromJSON); - }, -}; diff --git a/src/features/containers/hooks/use-container-actions.ts b/src/features/containers/hooks/use-container-actions.ts deleted file mode 100644 index cf24402..0000000 --- a/src/features/containers/hooks/use-container-actions.ts +++ /dev/null @@ -1,110 +0,0 @@ -import { useCallback } from 'react'; -import { toast } from 'sonner'; -import { handleContainerError } from '../../../core/errors/error-handler'; -import type { - Container, - CreateContainerRequest, - UpdateContainerRequest, -} from '../../../shared/types/container'; -import { containersApi } from '../api/containers.api'; - -/** - * Hook for container actions (CRUD) - * Responsibility: Individual operations without global state management - */ -export function useContainerActions() { - /** - * Create a new container - */ - const create = useCallback( - async (request: CreateContainerRequest): Promise => { - try { - const container = await containersApi.create(request); - toast.success('Database created', { - description: `${container.name} has been created successfully`, - }); - return container; - } catch (error) { - handleContainerError(error); - } - }, - [], - ); - - /** - * Update an existing container - */ - const update = useCallback( - async (request: UpdateContainerRequest): Promise => { - try { - const container = await containersApi.update(request); - toast.success('Database updated', { - description: `${container.name} has been updated`, - }); - return container; - } catch (error) { - handleContainerError(error); - } - }, - [], - ); - - /** - * Start a container - */ - const start = useCallback(async (containerId: string): Promise => { - try { - await containersApi.start(containerId); - toast.success('Database started'); - } catch (error) { - handleContainerError(error); - } - }, []); - - /** - * Stop a container - */ - const stop = useCallback(async (containerId: string): Promise => { - try { - await containersApi.stop(containerId); - toast.success('Database stopped'); - } catch (error) { - handleContainerError(error); - } - }, []); - - /** - * Remove a container - */ - const remove = useCallback(async (containerId: string): Promise => { - try { - await containersApi.remove(containerId); - toast.success('Database removed'); - } catch (error) { - handleContainerError(error); - } - }, []); - - /** - * Get a container by ID - */ - const getById = useCallback( - async (containerId: string): Promise => { - try { - return await containersApi.getById(containerId); - } catch (error) { - handleContainerError(error); - } - }, - [], - ); - - return { - create, - update, - start, - stop, - remove, - getById, - }; -} diff --git a/src/features/containers/services/__tests__/container.service.test.ts b/src/features/containers/services/__tests__/container.service.test.ts deleted file mode 100644 index 8aeadac..0000000 --- a/src/features/containers/services/__tests__/container.service.test.ts +++ /dev/null @@ -1,344 +0,0 @@ -import { describe, expect, it } from 'vitest'; -import type { Container } from '../../../../shared/types/container'; -import { ContainerService } from '../container.service'; - -describe('ContainerService', () => { - const mockContainers: Container[] = [ - { - id: '1', - name: 'postgres-1', - dbType: 'PostgreSQL', - version: '15', - status: 'running', - port: 5432, - createdAt: new Date(), - maxConnections: 100, - persistData: true, - enableAuth: true, - }, - { - id: '2', - name: 'mysql-1', - dbType: 'MySQL', - version: '8.0', - status: 'stopped', - port: 3306, - createdAt: new Date(), - maxConnections: 100, - persistData: true, - enableAuth: true, - }, - ]; - - describe('isPortAvailable', () => { - it('should return false when port is in use', () => { - const result = ContainerService.isPortAvailable(5432, mockContainers); - expect(result).toBe(false); - }); - - it('should return true when port is available', () => { - const result = ContainerService.isPortAvailable(9999, mockContainers); - expect(result).toBe(true); - }); - - it('should exclude container when checking availability', () => { - const result = ContainerService.isPortAvailable( - 5432, - mockContainers, - '1', - ); - expect(result).toBe(true); - }); - }); - - describe('isNameAvailable', () => { - it('should return false when name is in use', () => { - const result = ContainerService.isNameAvailable( - 'postgres-1', - mockContainers, - ); - expect(result).toBe(false); - }); - - it('should return true when name is available', () => { - const result = ContainerService.isNameAvailable( - 'redis-1', - mockContainers, - ); - expect(result).toBe(true); - }); - }); - - describe('generateUniqueName', () => { - it('should generate unique name with counter when name exists', () => { - // Add a container that occupies the base name - const containersWithPostgres = [ - ...mockContainers, - { - id: '3', - name: 'postgresql', - dbType: 'PostgreSQL' as const, - version: '15', - status: 'running' as const, - port: 5433, - createdAt: new Date(), - maxConnections: 100, - persistData: true, - enableAuth: true, - }, - ]; - const result = ContainerService.generateUniqueName( - 'postgresql', - containersWithPostgres, - ); - expect(result).toBe('postgresql-1'); - }); - - it('should return base name if available', () => { - const result = ContainerService.generateUniqueName( - 'Redis', - mockContainers, - ); - expect(result).toBe('redis'); - }); - }); - - describe('getDefaultPort', () => { - it('should return correct default port for PostgreSQL', () => { - expect(ContainerService.getDefaultPort('PostgreSQL')).toBe(5432); - }); - - it('should return correct default port for MySQL', () => { - expect(ContainerService.getDefaultPort('MySQL')).toBe(3306); - }); - - it('should return correct default port for Redis', () => { - expect(ContainerService.getDefaultPort('Redis')).toBe(6379); - }); - - it('should return correct default port for MongoDB', () => { - expect(ContainerService.getDefaultPort('MongoDB')).toBe(27017); - }); - - it('should return fallback port for unknown database', () => { - expect(ContainerService.getDefaultPort('Unknown')).toBe(5432); - }); - }); - - describe('findAvailablePort', () => { - it('should find next available port when default is taken', () => { - const result = ContainerService.findAvailablePort( - 'PostgreSQL', - mockContainers, - ); - expect(result).toBe(5433); // 5432 is taken - }); - - it('should return default port when available', () => { - const result = ContainerService.findAvailablePort( - 'Redis', - mockContainers, - ); - expect(result).toBe(6379); // Redis default, not taken - }); - }); - - describe('generateSecurePassword', () => { - it('should generate password of specified length', () => { - const password = ContainerService.generateSecurePassword(16); - expect(password).toHaveLength(16); - }); - - it('should generate different passwords', () => { - const pass1 = ContainerService.generateSecurePassword(16); - const pass2 = ContainerService.generateSecurePassword(16); - expect(pass1).not.toBe(pass2); - }); - - it('should contain valid characters', () => { - const password = ContainerService.generateSecurePassword(20); - const validChars = /^[a-zA-Z0-9!@#$%^&*]+$/; - expect(password).toMatch(validChars); - }); - }); - - describe('getConnectionString', () => { - it('should generate correct PostgreSQL connection string', () => { - const container: Container = { - ...mockContainers[0], - username: 'postgres', - password: 'pass123', - databaseName: 'mydb', - }; - - const result = ContainerService.getConnectionString(container); - expect(result).toBe('postgresql://postgres:pass123@localhost:5432/mydb'); - }); - - it('should generate correct MySQL connection string', () => { - const container: Container = { - ...mockContainers[1], - username: 'root', - password: 'pass123', - databaseName: 'mydb', - }; - - const result = ContainerService.getConnectionString(container); - expect(result).toBe('mysql://root:pass123@localhost:3306/mydb'); - }); - }); - - describe('validateCreateRequest', () => { - it('should validate correct request', () => { - const request = { - name: 'new-container', - dbType: 'PostgreSQL' as const, - version: '15', - port: 9999, - password: 'password', - username: 'user', - persistData: true, - enableAuth: true, - }; - - const result = ContainerService.validateCreateRequest( - request, - mockContainers, - ); - expect(result.valid).toBe(true); - expect(result.errors).toHaveLength(0); - }); - - it('should fail when name is empty', () => { - const request = { - name: '', - dbType: 'PostgreSQL' as const, - version: '15', - port: 9999, - password: 'password', - username: 'user', - persistData: true, - enableAuth: true, - }; - - const result = ContainerService.validateCreateRequest( - request, - mockContainers, - ); - expect(result.valid).toBe(false); - expect(result.errors).toContain('Name is required'); - }); - - it('should fail when name is already in use', () => { - const request = { - name: 'postgres-1', - dbType: 'PostgreSQL' as const, - version: '15', - port: 9999, - password: 'password', - username: 'user', - persistData: true, - enableAuth: true, - }; - - const result = ContainerService.validateCreateRequest( - request, - mockContainers, - ); - expect(result.valid).toBe(false); - expect(result.errors).toContain( - 'A database with that name already exists', - ); - }); - - it('should fail when port is already in use', () => { - const request = { - name: 'new-container', - dbType: 'PostgreSQL' as const, - version: '15', - port: 5432, - password: 'password', - username: 'user', - persistData: true, - enableAuth: true, - }; - - const result = ContainerService.validateCreateRequest( - request, - mockContainers, - ); - expect(result.valid).toBe(false); - expect(result.errors).toContain('Port is already in use'); - }); - }); - - describe('filterContainers', () => { - it('should filter by name', () => { - const result = ContainerService.filterContainers( - mockContainers, - 'postgres', - ); - expect(result).toHaveLength(1); - expect(result[0].name).toBe('postgres-1'); - }); - - it('should filter by db type', () => { - const result = ContainerService.filterContainers(mockContainers, 'mysql'); - expect(result).toHaveLength(1); - expect(result[0].dbType).toBe('MySQL'); - }); - - it('should return all when query is empty', () => { - const result = ContainerService.filterContainers(mockContainers, ''); - expect(result).toHaveLength(2); - }); - - it('should be case insensitive', () => { - const result = ContainerService.filterContainers( - mockContainers, - 'POSTGRES', - ); - expect(result).toHaveLength(1); - }); - }); - - describe('sortContainers', () => { - it('should sort by name ascending', () => { - const result = ContainerService.sortContainers( - mockContainers, - 'name', - 'asc', - ); - expect(result[0].name).toBe('mysql-1'); - expect(result[1].name).toBe('postgres-1'); - }); - - it('should sort by name descending', () => { - const result = ContainerService.sortContainers( - mockContainers, - 'name', - 'desc', - ); - expect(result[0].name).toBe('postgres-1'); - expect(result[1].name).toBe('mysql-1'); - }); - - it('should sort by type', () => { - const result = ContainerService.sortContainers( - mockContainers, - 'type', - 'asc', - ); - expect(result[0].dbType).toBe('MySQL'); - expect(result[1].dbType).toBe('PostgreSQL'); - }); - }); - - describe('countByStatus', () => { - it('should count containers by status', () => { - const result = ContainerService.countByStatus(mockContainers); - expect(result.running).toBe(1); - expect(result.stopped).toBe(1); - }); - }); -}); diff --git a/src/features/containers/services/container.service.ts b/src/features/containers/services/container.service.ts deleted file mode 100644 index 8de3398..0000000 --- a/src/features/containers/services/container.service.ts +++ /dev/null @@ -1,227 +0,0 @@ -import type { - Container, - CreateContainerRequest, -} from '../../../shared/types/container'; - -/** - * Container Service - Pure business logic (without React, 100% testable) - */ -export class ContainerService { - /** - * Validate if a port is available - */ - static isPortAvailable( - port: number, - existingContainers: Container[], - excludeId?: string, - ): boolean { - return !existingContainers.some( - (container) => container.port === port && container.id !== excludeId, - ); - } - - /** - * Validate if a name is available - */ - static isNameAvailable( - name: string, - existingContainers: Container[], - excludeId?: string, - ): boolean { - return !existingContainers.some( - (container) => container.name === name && container.id !== excludeId, - ); - } - - /** - * Generate a unique name for a container - */ - static generateUniqueName( - dbType: string, - existingContainers: Container[], - ): string { - const baseName = dbType.toLowerCase(); - let counter = 1; - let name = baseName; - - while (!ContainerService.isNameAvailable(name, existingContainers)) { - name = `${baseName}-${counter}`; - counter++; - } - - return name; - } - - /** - * Get the default port according to database type - */ - static getDefaultPort(dbType: string): number { - const defaultPorts: Record = { - PostgreSQL: 5432, - MySQL: 3306, - Redis: 6379, - MongoDB: 27017, - }; - - return defaultPorts[dbType] || 5432; - } - - /** - * Find an available port based on the default port - */ - static findAvailablePort( - dbType: string, - existingContainers: Container[], - ): number { - let port = ContainerService.getDefaultPort(dbType); - - while (!ContainerService.isPortAvailable(port, existingContainers)) { - port++; - } - - return port; - } - - /** - * Generate a secure random password - */ - static generateSecurePassword(length = 16): string { - const charset = - 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!@#$%^&*'; - let password = ''; - const array = new Uint32Array(length); - crypto.getRandomValues(array); - - for (let i = 0; i < length; i++) { - password += charset[array[i] % charset.length]; - } - - return password; - } - - /** - * Generate connection string for a container - */ - static getConnectionString(container: Container): string { - const { dbType, username, password, port, databaseName } = container; - - switch (dbType) { - case 'PostgreSQL': - return `postgresql://${username}:${password}@localhost:${port}/${databaseName}`; - case 'MySQL': - return `mysql://${username}:${password}@localhost:${port}/${databaseName}`; - case 'MongoDB': - return `mongodb://${username}:${password}@localhost:${port}/${databaseName}`; - case 'Redis': - return `redis://${password ? `:${password}@` : ''}localhost:${port}`; - default: - return ''; - } - } - - /** - * Validate a container creation request - */ - static validateCreateRequest( - request: CreateContainerRequest, - existingContainers: Container[], - ): { valid: boolean; errors: string[] } { - const errors: string[] = []; - - // Validate name - if (!request.name || request.name.trim() === '') { - errors.push('Name is required'); - } else if ( - !ContainerService.isNameAvailable(request.name, existingContainers) - ) { - errors.push('A database with that name already exists'); - } - - // Validate port - if (!request.port || request.port < 1 || request.port > 65535) { - errors.push('Port must be between 1 and 65535'); - } else if ( - !ContainerService.isPortAvailable(request.port, existingContainers) - ) { - errors.push('Port is already in use'); - } - - // Validate credentials (except for Redis which is optional) - if (request.dbType !== 'Redis') { - if (!request.username) { - errors.push('Username is required'); - } - if (!request.password) { - errors.push('Password is required'); - } - } - - return { - valid: errors.length === 0, - errors, - }; - } - - /** - * Filter containers by search query - */ - static filterContainers(containers: Container[], query: string): Container[] { - if (!query.trim()) { - return containers; - } - - const lowerQuery = query.toLowerCase(); - return containers.filter( - (container) => - container.name.toLowerCase().includes(lowerQuery) || - container.dbType.toLowerCase().includes(lowerQuery) || - container.status.toLowerCase().includes(lowerQuery), - ); - } - - /** - * Sort containers by criteria - */ - static sortContainers( - containers: Container[], - sortBy: 'name' | 'type' | 'status' | 'createdAt', - order: 'asc' | 'desc' = 'asc', - ): Container[] { - const sorted = [...containers].sort((a, b) => { - let comparison = 0; - - switch (sortBy) { - case 'name': - comparison = a.name.localeCompare(b.name); - break; - case 'type': - comparison = a.dbType.localeCompare(b.dbType); - break; - case 'status': - comparison = a.status.localeCompare(b.status); - break; - case 'createdAt': - comparison = - new Date(a.createdAt).getTime() - new Date(b.createdAt).getTime(); - break; - } - - return order === 'asc' ? comparison : -comparison; - }); - - return sorted; - } - - /** - * Count containers by status - */ - static countByStatus(containers: Container[]): Record { - return containers.reduce( - (acc, container) => { - acc[container.status] = (acc[container.status] || 0) + 1; - return acc; - }, - {} as Record, - ); - } -} diff --git a/src/features/databases/api/databases.api.ts b/src/features/databases/api/databases.api.ts new file mode 100644 index 0000000..fdc368e --- /dev/null +++ b/src/features/databases/api/databases.api.ts @@ -0,0 +1,104 @@ +import { invoke } from '@/core/tauri/invoke'; +import type { Container } from '@/shared/types/container'; +import type { DockerRunRequest } from '../types/docker.types'; + +const containerFromJSON = (data: any): Container => ({ + id: data.id, + name: data.name, + dbType: data.db_type, + version: data.version, + status: data.status, + port: data.port, + createdAt: new Date(data.created_at), + maxConnections: data.max_connections, + containerId: data.container_id, + username: data.stored_username, + password: data.stored_password, + databaseName: data.stored_database_name, + persistData: data.stored_persist_data, + enableAuth: data.stored_enable_auth, +}); + +/** + * Unified Databases API + * Contains all database/container operations: + * - CRUD operations using provider-based system + * - Container lifecycle (start, stop, remove) + * - Query operations (getAll, getById, sync) + */ +export const databasesApi = { + /** + * Create a new database container from generic Docker run request + * Uses provider-generated Docker args + */ + async create(request: DockerRunRequest): Promise { + const result = await invoke('create_container_from_docker_args', { + request, + }); + return containerFromJSON(result); + }, + + /** + * Update an existing database container from generic Docker run request + * Uses provider-generated Docker args + */ + async update( + containerId: string, + request: DockerRunRequest, + ): Promise { + const result = await invoke('update_container_from_docker_args', { + containerId, + request, + }); + return containerFromJSON(result); + }, + + /** + * Get all database containers + */ + async getAll(): Promise { + const result = await invoke('get_all_databases'); + return result.map(containerFromJSON); + }, + + /** + * Get a database container by ID + */ + async getById(id: string): Promise { + const all = await this.getAll(); + const container = all.find((c) => c.id === id); + if (!container) { + throw new Error(`Database container with id ${id} not found`); + } + return container; + }, + + /** + * Start a database container + */ + async start(id: string): Promise { + await invoke('start_container', { containerId: id }); + }, + + /** + * Stop a database container + */ + async stop(id: string): Promise { + await invoke('stop_container', { containerId: id }); + }, + + /** + * Remove a database container + */ + async remove(id: string): Promise { + await invoke('remove_container', { containerId: id }); + }, + + /** + * Synchronize database containers with Docker + */ + async sync(): Promise { + const result = await invoke('sync_containers_with_docker'); + return result.map(containerFromJSON); + }, +}; diff --git a/src/features/databases/components/dynamic-form-field.tsx b/src/features/databases/components/dynamic-form-field.tsx new file mode 100644 index 0000000..3a8ae4b --- /dev/null +++ b/src/features/databases/components/dynamic-form-field.tsx @@ -0,0 +1,186 @@ +import { Controller, UseFormReturn } from 'react-hook-form'; +import { Checkbox } from '@/shared/components/ui/checkbox'; +import { Input } from '@/shared/components/ui/input'; +import { Label } from '@/shared/components/ui/label'; +import { + Select, + SelectContent, + SelectItem, + SelectTrigger, + SelectValue, +} from '@/shared/components/ui/select'; +import type { FormField } from '../types/form.types'; + +interface Props { + form: UseFormReturn; + field: FormField; + fieldPrefix?: string; +} + +/** + * Dynamic Form Field Component + * Renders different input types based on field configuration + */ +export function DynamicFormField({ form, field, fieldPrefix = '' }: Props) { + const { + control, + formState: { errors }, + } = form; + + const fullFieldName = fieldPrefix + field.name; + + // Access nested error correctly + // For "containerConfiguration.password", split and access nested + const getNestedError = (errors: any, path: string) => { + const parts = path.split('.'); + let current = errors; + for (const part of parts) { + if (!current) return undefined; + current = current[part]; + } + return current; + }; + + const error = getNestedError(errors, fullFieldName); + + return ( +
+ {field.type !== 'checkbox' && ( + + )} + + { + if (field.type === 'text' || field.type === 'password') { + return ( + + ); + } + + if (field.type === 'number') { + return ( + + controllerField.onChange(Number(e.target.value)) + } + /> + ); + } + + if (field.type === 'select') { + return ( + + ); + } + + if (field.type === 'checkbox') { + return ( +
+ + +
+ ); + } + + return
Unsupported field type
; + }} + /> + + {field.helpText && !error && ( +

{field.helpText}

+ )} + + {error && ( +

{error.message as string}

+ )} +
+ ); +} diff --git a/src/features/databases/components/dynamic-form-section.tsx b/src/features/databases/components/dynamic-form-section.tsx new file mode 100644 index 0000000..e1dfe15 --- /dev/null +++ b/src/features/databases/components/dynamic-form-section.tsx @@ -0,0 +1,65 @@ +import { UseFormReturn } from 'react-hook-form'; +import type { FieldGroup, FormField } from '../types/form.types'; +import { DynamicFormField } from './dynamic-form-field'; + +interface SectionProps { + form: UseFormReturn; + fields: FormField[]; + fieldPrefix?: string; +} + +/** + * Dynamic Form Section + * Renders a list of fields + */ +export function DynamicFormSection({ + form, + fields, + fieldPrefix = '', +}: SectionProps) { + return ( +
+ {fields.map((field) => ( + + ))} +
+ ); +} + +interface GroupsProps { + form: UseFormReturn; + groups: FieldGroup[]; +} + +/** + * Dynamic Field Groups + * Renders grouped fields with labels and descriptions + */ +export function DynamicFieldGroups({ form, groups }: GroupsProps) { + return ( +
+ {groups.map((group, index) => ( +
+
+

{group.label}

+ {group.description && ( +

+ {group.description} +

+ )} +
+ +
+ ))} +
+ ); +} diff --git a/src/features/databases/hooks/use-database-actions.ts b/src/features/databases/hooks/use-database-actions.ts new file mode 100644 index 0000000..b109723 --- /dev/null +++ b/src/features/databases/hooks/use-database-actions.ts @@ -0,0 +1,68 @@ +import { useCallback } from 'react'; +import { toast } from 'sonner'; +import { handleContainerError } from '@/core/errors/error-handler'; +import type { Container } from '@/shared/types/container'; +import { databasesApi } from '../api/databases.api'; + +/** + * Hook for database actions (start, stop, remove, getById) + * Responsibility: Individual operations without global state management + */ +export function useDatabaseActions() { + /** + * Start a database container + */ + const start = useCallback(async (containerId: string): Promise => { + try { + await databasesApi.start(containerId); + toast.success('Database started'); + } catch (error) { + handleContainerError(error); + } + }, []); + + /** + * Stop a database container + */ + const stop = useCallback(async (containerId: string): Promise => { + try { + await databasesApi.stop(containerId); + toast.success('Database stopped'); + } catch (error) { + handleContainerError(error); + } + }, []); + + /** + * Remove a database container + */ + const remove = useCallback(async (containerId: string): Promise => { + try { + await databasesApi.remove(containerId); + toast.success('Database removed'); + } catch (error) { + handleContainerError(error); + } + }, []); + + /** + * Get a database container by ID + */ + const getById = useCallback( + async (containerId: string): Promise => { + try { + return await databasesApi.getById(containerId); + } catch (error) { + handleContainerError(error); + } + }, + [], + ); + + return { + start, + stop, + remove, + getById, + }; +} diff --git a/src/features/containers/hooks/use-container-list.ts b/src/features/databases/hooks/use-database-list.ts similarity index 81% rename from src/features/containers/hooks/use-container-list.ts rename to src/features/databases/hooks/use-database-list.ts index daa7ce9..43d0da2 100644 --- a/src/features/containers/hooks/use-container-list.ts +++ b/src/features/databases/hooks/use-database-list.ts @@ -1,24 +1,24 @@ import { useCallback, useEffect, useRef, useState } from 'react'; -import { handleContainerError } from '../../../core/errors/error-handler'; -import type { Container } from '../../../shared/types/container'; -import { containersApi } from '../api/containers.api'; +import { handleContainerError } from '@/core/errors/error-handler'; +import type { Container } from '@/shared/types/container'; +import { databasesApi } from '../api/databases.api'; /** - * Hook to manage the list of containers + * Hook to manage the list of database containers * Responsibility: State and periodic synchronization */ -export function useContainerList() { +export function useDatabaseList() { const [containers, setContainers] = useState([]); const [loading, setLoading] = useState(false); const intervalRef = useRef(null); /** - * Load the complete list of containers + * Load the complete list of database containers */ const load = useCallback(async () => { setLoading(true); try { - const data = await containersApi.getAll(); + const data = await databasesApi.getAll(); setContainers(data); } catch (error) { handleContainerError(error); @@ -28,11 +28,11 @@ export function useContainerList() { }, []); /** - * Synchronize containers with Docker + * Synchronize database containers with Docker */ const sync = useCallback(async () => { try { - const data = await containersApi.sync(); + const data = await databasesApi.sync(); setContainers(data); } catch (error) { console.error('Error syncing containers:', error); diff --git a/src/features/databases/providers/mongodb.provider.tsx b/src/features/databases/providers/mongodb.provider.tsx new file mode 100644 index 0000000..dafd6b9 --- /dev/null +++ b/src/features/databases/providers/mongodb.provider.tsx @@ -0,0 +1,291 @@ +import { SiMongodb } from 'react-icons/si'; +import type { Container } from '@/shared/types/container'; +import type { + DatabaseProvider, + FieldsOptions, +} from '../registry/database-provider.interface'; +import type { DockerRunArgs, ValidationResult } from '../types/docker.types'; +import type { FieldGroup, FormField } from '../types/form.types'; + +/** + * MongoDB Database Provider + * Implements all configuration for MongoDB databases + */ +export class MongoDBDatabaseProvider implements DatabaseProvider { + // ==================== Identification ==================== + readonly id = 'MongoDB'; + readonly name = 'MongoDB'; + readonly description = 'Document-oriented NoSQL database'; + readonly icon = ; + readonly color = '#47A248'; + + // ==================== Docker Configuration ==================== + readonly defaultPort = 27017; + readonly containerPort = 27017; + readonly dataPath = '/data/db'; + readonly versions = [ + // MongoDB 8.0 + '8.0.15', + '8.0', + '8', + '8.0-noble', + '8-noble', + // MongoDB 7.0 + '7.0.25', + '7.0', + '7', + '7.0-jammy', + '7-jammy', + // MongoDB 6.0 + '6.0.26', + '6.0', + '6', + '6.0-jammy', + '6-jammy', + ]; + + // ==================== Form Fields ==================== + getBasicFields({ isEditMode = false }: FieldsOptions): FormField[] { + return [ + { + name: 'name', + label: 'Container Name', + type: 'text', + required: true, + placeholder: `my-${this.id.toLowerCase()}-db`, + validation: { + min: 3, + message: 'Container name must be at least 3 characters', + }, + helpText: 'Unique name for this container', + }, + { + name: 'port', + label: 'Port', + type: 'number', + defaultValue: this.defaultPort, + required: true, + placeholder: this.defaultPort.toString(), + validation: { + min: 1024, + max: 65535, + message: 'Port must be between 1024 and 65535', + }, + helpText: `Host port to map to container port ${this.containerPort}`, + }, + { + name: 'version', + label: 'MongoDB Version', + type: 'select', + options: this.versions, + defaultValue: this.versions[0], + required: true, + readonly: isEditMode, + helpText: isEditMode + ? 'Version cannot be changed after creation' + : 'Select the MongoDB version to install', + }, + ]; + } + + getAuthenticationFields(): FormField[] { + return [ + { + name: 'username', + label: 'Root Username', + type: 'text', + defaultValue: 'admin', + required: true, + placeholder: 'Database admin user', + helpText: 'Username for the MongoDB admin user', + }, + { + name: 'password', + label: 'Root Password', + type: 'password', + required: true, + placeholder: 'Strong password', + validation: { + min: 4, + message: 'Password must be at least 4 characters', + }, + helpText: 'Password for the admin account', + }, + { + name: 'databaseName', + label: 'Initial Database', + type: 'text', + placeholder: 'my_database', + helpText: 'Optional: Create an initial database (defaults to "admin")', + }, + ]; + } + + getAdvancedFields(): FieldGroup[] { + return [ + { + label: 'Authentication', + description: 'Configure MongoDB authentication settings', + fields: [ + { + name: 'mongoSettings.authSource', + label: 'Authentication Database', + type: 'text', + defaultValue: 'admin', + helpText: + 'Database where user credentials are stored (usually "admin")', + }, + ], + }, + { + label: 'Replication & Sharding', + description: 'Configure replication and sharding features', + fields: [ + { + name: 'mongoSettings.enableSharding', + label: 'Enable Sharding', + type: 'checkbox', + defaultValue: false, + helpText: + 'Enable sharding for horizontal scaling (requires replica set)', + }, + { + name: 'mongoSettings.replicaSet', + label: 'Replica Set Name', + type: 'text', + placeholder: 'rs0', + helpText: 'Optional: Name of the replica set (enables replication)', + }, + ], + }, + { + label: 'Storage', + description: 'Configure storage engine and oplog settings', + fields: [ + { + name: 'mongoSettings.storageEngine', + label: 'Storage Engine', + type: 'select', + options: ['wiredTiger', 'inMemory'], + defaultValue: 'wiredTiger', + helpText: + 'Storage engine to use. WiredTiger is the default. Note: inMemory requires MongoDB Enterprise Edition.', + }, + { + name: 'mongoSettings.oplogSize', + label: 'Oplog Size (MB)', + type: 'number', + defaultValue: 512, + helpText: + 'Size of the operation log for replication (only for replica sets)', + }, + ], + }, + { + label: 'Security', + description: 'Additional security settings', + fields: [ + { + name: 'mongoSettings.directoryPerDB', + label: 'Directory Per Database', + type: 'checkbox', + defaultValue: false, + helpText: + 'Store each database in its own directory for better organization', + }, + ], + }, + ]; + } + + // ==================== Docker Command Building ==================== + buildDockerArgs(config: any): DockerRunArgs { + const envVars: Record = { + MONGO_INITDB_ROOT_USERNAME: config.username || 'admin', + MONGO_INITDB_ROOT_PASSWORD: config.password, + }; + + // Initial database + if (config.databaseName) { + envVars.MONGO_INITDB_DATABASE = config.databaseName; + } + + // Command arguments for advanced settings + const command: string[] = []; + + // Replica set + if (config.mongoSettings?.replicaSet) { + command.push('--replSet', config.mongoSettings.replicaSet); + } + + // Storage engine (inMemory is Enterprise-only and not available in Community Docker images) + // Only set non-default storage engines if explicitly needed and image supports it + if ( + config.mongoSettings?.storageEngine && + config.mongoSettings.storageEngine !== 'wiredTiger' + ) { + console.warn( + `Note: ${config.mongoSettings.storageEngine} storage engine may require MongoDB Enterprise Edition`, + ); + command.push('--storageEngine', config.mongoSettings.storageEngine); + } + + // Directory per DB + if (config.mongoSettings?.directoryPerDB) { + command.push('--directoryperdb'); + } + + // Oplog size (only for replica sets) + if (config.mongoSettings?.replicaSet && config.mongoSettings?.oplogSize) { + command.push('--oplogSizeMB', config.mongoSettings.oplogSize.toString()); + } + + return { + image: `mongo:${config.version}`, + envVars, + ports: [{ host: config.port, container: this.containerPort }], + volumes: config.persistData + ? [{ name: `${config.name}-data`, path: this.dataPath }] + : [], + command, + }; + } + + // ==================== Utilities ==================== + getConnectionString(container: Container): string { + const username = container.username || 'admin'; + const database = container.databaseName || 'admin'; + const authSource = 'admin'; // MongoDB always authenticates against admin + + return `mongodb://${username}:${container.password}@localhost:${container.port}/${database}?authSource=${authSource}`; + } + + validateConfig(config: any): ValidationResult { + const errors: string[] = []; + + if (!config.password || config.password.length < 4) { + errors.push('Password must be at least 4 characters'); + } + + if (!config.version) { + errors.push('MongoDB version is required'); + } + + if (!config.username) { + errors.push('Username is required'); + } + + return { + valid: errors.length === 0, + errors, + }; + } + + getDefaultUsername(): string { + return 'admin'; + } + + requiresAuth(): boolean { + return true; + } +} diff --git a/src/features/databases/providers/mysql.provider.tsx b/src/features/databases/providers/mysql.provider.tsx new file mode 100644 index 0000000..89f41c0 --- /dev/null +++ b/src/features/databases/providers/mysql.provider.tsx @@ -0,0 +1,244 @@ +import { SiMysql } from 'react-icons/si'; +import type { Container } from '@/shared/types/container'; +import type { + DatabaseProvider, + FieldsOptions, +} from '../registry/database-provider.interface'; +import type { DockerRunArgs, ValidationResult } from '../types/docker.types'; +import type { FieldGroup, FormField } from '../types/form.types'; + +/** + * MySQL Database Provider + * Implements all configuration for MySQL databases + */ +export class MySQLDatabaseProvider implements DatabaseProvider { + // ==================== Identification ==================== + readonly id = 'MySQL'; + readonly name = 'MySQL'; + readonly description = 'Popular open-source relational database'; + readonly icon = ; + readonly color = '#4479A1'; + + // ==================== Docker Configuration ==================== + readonly defaultPort = 3306; + readonly containerPort = 3306; + readonly dataPath = '/var/lib/mysql'; + readonly versions = [ + // MySQL 9 (Innovation) + '9.4.0', + '9.4', + '9', + 'innovation', + '9-oraclelinux9', + '9-oracle', + // MySQL 8.4 (LTS) + '8.4.6', + '8.4', + '8', + 'lts', + '8.4-oraclelinux9', + '8.4-oracle', + // MySQL 8.0 + '8.0.43', + '8.0', + '8.0-oraclelinux9', + '8.0-oracle', + '8.0-bookworm', + '8.0-debian', + ]; + + // ==================== Form Fields ==================== + getBasicFields({ isEditMode = false }: FieldsOptions): FormField[] { + return [ + { + name: 'name', + label: 'Container Name', + type: 'text', + required: true, + placeholder: `my-${this.id.toLowerCase()}-db`, + validation: { + min: 3, + message: 'Container name must be at least 3 characters', + }, + helpText: 'Unique name for this container', + }, + { + name: 'port', + label: 'Port', + type: 'number', + defaultValue: this.defaultPort, + required: true, + placeholder: this.defaultPort.toString(), + validation: { + min: 1024, + max: 65535, + message: 'Port must be between 1024 and 65535', + }, + helpText: `Host port to map to container port ${this.containerPort}`, + }, + { + name: 'version', + label: 'MySQL Version', + type: 'select', + options: this.versions, + defaultValue: this.versions[0], + required: true, + readonly: isEditMode, + helpText: isEditMode + ? 'Version cannot be changed after creation' + : 'Select the MySQL version to install', + }, + ]; + } + + getAuthenticationFields(): FormField[] { + return [ + { + name: 'username', + label: 'Root Username', + type: 'text', + defaultValue: 'root', + required: true, + readonly: true, + helpText: 'MySQL always uses "root" as the superuser', + }, + { + name: 'password', + label: 'Root Password', + type: 'password', + required: true, + placeholder: 'Strong password for root user', + validation: { + min: 4, + message: 'Password must be at least 4 characters', + }, + helpText: 'Password for the root account', + }, + { + name: 'databaseName', + label: 'Initial Database', + type: 'text', + placeholder: 'my_database', + helpText: 'Optional: Create an initial database', + }, + ]; + } + + getAdvancedFields(): FieldGroup[] { + return [ + { + label: 'Character Set & Collation', + description: 'Configure default character encoding and collation', + fields: [ + { + name: 'mysqlSettings.characterSet', + label: 'Character Set', + type: 'select', + options: ['utf8mb4', 'utf8', 'latin1'], + defaultValue: 'utf8mb4', + helpText: 'Default character set for databases', + }, + { + name: 'mysqlSettings.collation', + label: 'Collation', + type: 'text', + defaultValue: 'utf8mb4_unicode_ci', + helpText: 'Default collation for string comparisons', + }, + ], + }, + { + label: 'SQL Mode', + description: 'Configure SQL behavior and strictness', + fields: [ + { + name: 'mysqlSettings.sqlMode', + label: 'SQL Mode', + type: 'select', + options: [ + 'TRADITIONAL', + 'STRICT_TRANS_TABLES', + 'NO_ZERO_IN_DATE', + 'NO_ZERO_DATE', + 'ERROR_FOR_DIVISION_BY_ZERO', + 'NO_ENGINE_SUBSTITUTION', + ], + defaultValue: 'TRADITIONAL', + helpText: 'SQL mode affects MySQL behavior', + }, + ], + }, + ]; + } + + // ==================== Docker Command Building ==================== + buildDockerArgs(config: any): DockerRunArgs { + const envVars: Record = { + MYSQL_ROOT_PASSWORD: config.password, + }; + + // Database name + if (config.databaseName) { + envVars.MYSQL_DATABASE = config.databaseName; + } + + // Advanced settings via mysqld command flags (official mysql image doesn't support these as env vars) + const command: string[] = []; + + if (config.mysqlSettings?.characterSet) { + command.push( + `--character-set-server=${config.mysqlSettings.characterSet}`, + ); + } + + if (config.mysqlSettings?.collation) { + command.push(`--collation-server=${config.mysqlSettings.collation}`); + } + + if (config.mysqlSettings?.sqlMode) { + command.push(`--sql-mode=${config.mysqlSettings.sqlMode}`); + } + + return { + image: `mysql:${config.version}`, + envVars, + ports: [{ host: config.port, container: this.containerPort }], + volumes: config.persistData + ? [{ name: `${config.name}-data`, path: this.dataPath }] + : [], + command, + }; + } + + // ==================== Utilities ==================== + getConnectionString(container: Container): string { + const username = container.username || 'root'; + const database = container.databaseName || ''; + return `mysql://${username}:${container.password}@localhost:${container.port}/${database}`; + } + + validateConfig(config: any): ValidationResult { + const errors: string[] = []; + + if (!config.password || config.password.length < 4) { + errors.push('Password must be at least 4 characters'); + } + + if (!config.version) { + errors.push('MySQL version is required'); + } + + return { + valid: errors.length === 0, + errors, + }; + } + + getDefaultUsername(): string { + return 'root'; + } + + requiresAuth(): boolean { + return true; + } +} diff --git a/src/features/databases/providers/postgres.provider.tsx b/src/features/databases/providers/postgres.provider.tsx new file mode 100644 index 0000000..598313c --- /dev/null +++ b/src/features/databases/providers/postgres.provider.tsx @@ -0,0 +1,264 @@ +import { SiPostgresql } from 'react-icons/si'; +import type { Container } from '@/shared/types/container'; +import type { + DatabaseProvider, + FieldsOptions, +} from '../registry/database-provider.interface'; +import type { DockerRunArgs, ValidationResult } from '../types/docker.types'; +import type { FieldGroup, FormField } from '../types/form.types'; + +/** + * PostgreSQL Database Provider + * Implements all configuration for PostgreSQL databases + */ +export class PostgresDatabaseProvider implements DatabaseProvider { + // ==================== Identification ==================== + readonly id = 'PostgreSQL'; + readonly name = 'PostgreSQL'; + readonly description = 'Advanced open-source relational database'; + readonly icon = ; + readonly color = '#336791'; + + // ==================== Docker Configuration ==================== + readonly defaultPort = 5432; + readonly containerPort = 5432; + readonly dataPath = '/var/lib/postgresql/data'; + readonly versions = [ + // PostgreSQL 18 + '18.0', + '18', + '18-bookworm', + '18-alpine3.22', + '18-alpine3.21', + '18-alpine', + // PostgreSQL 17 + '17.6', + '17', + '17-bookworm', + '17-alpine3.22', + '17-alpine3.21', + '17-alpine', + // PostgreSQL 16 + '16.10', + '16', + '16-bookworm', + '16-alpine3.22', + '16-alpine3.21', + '16-alpine', + // PostgreSQL 15 + '15.14', + '15', + '15-bookworm', + '15-alpine3.22', + '15-alpine3.21', + '15-alpine', + // PostgreSQL 14 + '14.19', + '14', + '14-bookworm', + '14-alpine3.22', + '14-alpine3.21', + '14-alpine', + // PostgreSQL 13 + '13.22', + '13', + '13-bookworm', + '13-alpine3.22', + '13-alpine3.21', + '13-alpine', + ]; + + // ==================== Form Fields ==================== + getBasicFields({ isEditMode = false }: FieldsOptions): FormField[] { + return [ + { + name: 'name', + label: 'Container Name', + type: 'text', + required: true, + placeholder: `my-${this.id.toLowerCase()}-db`, + validation: { + min: 3, + message: 'Container name must be at least 3 characters', + }, + helpText: 'Unique name for this container', + }, + { + name: 'port', + label: 'Port', + type: 'number', + defaultValue: this.defaultPort, + required: true, + placeholder: this.defaultPort.toString(), + validation: { + min: 1024, + max: 65535, + message: 'Port must be between 1024 and 65535', + }, + helpText: `Host port to map to container port ${this.containerPort}`, + }, + { + name: 'version', + label: 'PostgreSQL Version', + type: 'select', + options: this.versions, + defaultValue: this.versions[0], + required: true, + readonly: isEditMode, // Version cannot be changed after creation + helpText: isEditMode + ? 'Version cannot be changed after creation' + : 'Select the PostgreSQL version to install', + }, + ]; + } + + getAuthenticationFields(): FormField[] { + return [ + { + name: 'username', + label: 'Username', + type: 'text', + defaultValue: 'postgres', + required: true, + placeholder: 'Database superuser name', + helpText: 'Default superuser for PostgreSQL', + }, + { + name: 'password', + label: 'Password', + type: 'password', + required: true, + placeholder: 'Strong password', + validation: { + min: 4, + message: 'Password must be at least 4 characters', + }, + helpText: 'Password for the superuser account', + }, + { + name: 'databaseName', + label: 'Initial Database', + type: 'text', + placeholder: 'my_database', + helpText: + 'Optional: Create an initial database (defaults to "postgres")', + }, + ]; + } + + getAdvancedFields(): FieldGroup[] { + return [ + { + label: 'Authentication & Security', + description: 'Configure how PostgreSQL handles authentication', + fields: [ + { + name: 'postgresSettings.hostAuthMethod', + label: 'Host Authentication Method', + type: 'select', + options: ['md5', 'trust', 'scram-sha-256', 'password'], + defaultValue: 'md5', + helpText: + 'Authentication method for TCP/IP connections. md5 is recommended.', + }, + ], + }, + { + label: 'Database Initialization', + description: 'Advanced settings for database initialization', + fields: [ + { + name: 'postgresSettings.initdbArgs', + label: 'INITDB Arguments', + type: 'text', + placeholder: '--encoding=UTF8 --locale=en_US.utf8', + helpText: + 'Additional arguments passed to initdb during initialization', + }, + { + name: 'postgresSettings.sharedPreloadLibraries', + label: 'Shared Preload Libraries', + type: 'text', + placeholder: 'pg_stat_statements', + helpText: + 'Comma-separated list of extensions to preload on startup', + }, + ], + }, + ]; + } + + // ==================== Docker Command Building ==================== + buildDockerArgs(config: any): DockerRunArgs { + const envVars: Record = { + POSTGRES_PASSWORD: config.password, + }; + + // Username (only if not default) + if (config.username && config.username !== 'postgres') { + envVars.POSTGRES_USER = config.username; + } + + // Database name + if (config.databaseName) { + envVars.POSTGRES_DB = config.databaseName; + } + + // Advanced settings + if (config.postgresSettings?.hostAuthMethod) { + envVars.POSTGRES_HOST_AUTH_METHOD = + config.postgresSettings.hostAuthMethod; + } + + if (config.postgresSettings?.initdbArgs) { + envVars.POSTGRES_INITDB_ARGS = config.postgresSettings.initdbArgs; + } + + if (config.postgresSettings?.sharedPreloadLibraries) { + envVars.POSTGRES_SHARED_PRELOAD_LIBRARIES = + config.postgresSettings.sharedPreloadLibraries; + } + + return { + image: `postgres:${config.version}`, + envVars, + ports: [{ host: config.port, container: this.containerPort }], + volumes: config.persistData + ? [{ name: `${config.name}-data`, path: this.dataPath }] + : [], + command: [], + }; + } + + // ==================== Utilities ==================== + getConnectionString(container: Container): string { + const username = container.username || 'postgres'; + const database = container.databaseName || 'postgres'; + return `postgresql://${username}:${container.password}@localhost:${container.port}/${database}`; + } + + validateConfig(config: any): ValidationResult { + const errors: string[] = []; + + if (!config.password || config.password.length < 4) { + errors.push('Password must be at least 4 characters'); + } + + if (!config.version) { + errors.push('PostgreSQL version is required'); + } + + return { + valid: errors.length === 0, + errors, + }; + } + + getDefaultUsername(): string { + return 'postgres'; + } + + requiresAuth(): boolean { + return true; + } +} diff --git a/src/features/databases/providers/redis.provider.tsx b/src/features/databases/providers/redis.provider.tsx new file mode 100644 index 0000000..c8fc997 --- /dev/null +++ b/src/features/databases/providers/redis.provider.tsx @@ -0,0 +1,287 @@ +import { SiRedis } from 'react-icons/si'; +import type { Container } from '@/shared/types/container'; +import type { + DatabaseProvider, + FieldsOptions, +} from '../registry/database-provider.interface'; +import type { DockerRunArgs, ValidationResult } from '../types/docker.types'; +import type { FieldGroup, FormField } from '../types/form.types'; + +/** + * Redis Database Provider + * Implements all configuration for Redis databases + */ +export class RedisDatabaseProvider implements DatabaseProvider { + // ==================== Identification ==================== + readonly id = 'Redis'; + readonly name = 'Redis'; + readonly description = 'In-memory data structure store'; + readonly icon = ; + readonly color = '#DC382D'; + + // ==================== Docker Configuration ==================== + readonly defaultPort = 6379; + readonly containerPort = 6379; + readonly dataPath = '/data'; + readonly versions = [ + // Redis 8.2 + '8.2.2', + '8.2', + '8', + '8-bookworm', + '8.2-alpine', + '8-alpine3.22', + '8-alpine', + // Redis 8.0 + '8.0.4', + '8.0', + '8.0-bookworm', + '8.0-alpine', + '8.0-alpine3.21', + // Redis 7.4 + '7.4.6', + '7.4', + '7', + '7-bookworm', + '7.4-alpine', + '7-alpine3.21', + '7-alpine', + // Redis 7.2 + '7.2.11', + '7.2', + '7.2-bookworm', + '7.2-alpine', + '7.2-alpine3.21', + // Redis 6.2 + '6.2.20', + '6.2', + '6', + '6-bookworm', + '6.2-alpine', + '6-alpine3.21', + '6-alpine', + ]; + + // ==================== Form Fields ==================== + getBasicFields({ isEditMode = false }: FieldsOptions): FormField[] { + return [ + { + name: 'name', + label: 'Container Name', + type: 'text', + required: true, + placeholder: `my-${this.id.toLowerCase()}-db`, + validation: { + min: 3, + message: 'Container name must be at least 3 characters', + }, + helpText: 'Unique name for this container', + }, + { + name: 'port', + label: 'Port', + type: 'number', + defaultValue: this.defaultPort, + required: true, + placeholder: this.defaultPort.toString(), + validation: { + min: 1024, + max: 65535, + message: 'Port must be between 1024 and 65535', + }, + helpText: `Host port to map to container port ${this.containerPort}`, + }, + { + name: 'version', + label: 'Redis Version', + type: 'select', + options: this.versions, + defaultValue: this.versions[0], + required: true, + readonly: isEditMode, + helpText: isEditMode + ? 'Version cannot be changed after creation' + : 'Select the Redis version to install', + }, + ]; + } + + getAuthenticationFields(): FormField[] { + return [ + { + name: 'password', + label: 'Password', + type: 'password', + required: false, + placeholder: 'Optional password', + validation: { + min: 4, + message: 'Password must be at least 4 characters', + }, + helpText: + 'Optional: Set a password for Redis. Leave empty for no authentication.', + }, + ]; + } + + getAdvancedFields(): FieldGroup[] { + return [ + { + label: 'Memory Management', + description: 'Configure Redis memory usage and eviction policies', + fields: [ + { + name: 'redisSettings.maxMemory', + label: 'Max Memory', + type: 'text', + defaultValue: '256mb', + placeholder: '256mb, 1gb, 2gb', + helpText: + 'Maximum memory Redis can use (e.g., 256mb, 1gb). Leave empty for unlimited.', + }, + { + name: 'redisSettings.maxMemoryPolicy', + label: 'Eviction Policy', + type: 'select', + options: [ + 'allkeys-lru', + 'volatile-lru', + 'allkeys-lfu', + 'volatile-lfu', + 'allkeys-random', + 'volatile-random', + 'volatile-ttl', + 'noeviction', + ], + defaultValue: 'allkeys-lru', + helpText: + 'Policy for evicting keys when max memory is reached. LRU = Least Recently Used.', + }, + ], + }, + { + label: 'Persistence', + description: 'Configure data persistence options', + fields: [ + { + name: 'redisSettings.appendOnly', + label: 'Enable AOF (Append Only File)', + type: 'checkbox', + defaultValue: false, + helpText: + 'Enable append-only file for better durability. Logs every write operation.', + }, + { + name: 'redisSettings.save', + label: 'RDB Snapshots', + type: 'text', + placeholder: '900 1 300 10 60 10000', + helpText: + 'Save snapshots in pairs: seconds changes (e.g., "900 1 300 10" = save after 900s if 1+ keys changed, or after 300s if 10+ keys changed)', + }, + ], + }, + { + label: 'Performance', + description: 'Configure Redis performance settings', + fields: [ + { + name: 'redisSettings.maxClients', + label: 'Max Clients', + type: 'number', + defaultValue: 10000, + helpText: 'Maximum number of connected clients', + }, + ], + }, + ]; + } + + // ==================== Docker Command Building ==================== + buildDockerArgs(config: any): DockerRunArgs { + const envVars: Record = {}; + const command: string[] = ['redis-server']; + + // Password authentication + if (config.password) { + command.push('--requirepass', config.password); + } + + // Memory settings + if (config.redisSettings?.maxMemory) { + command.push('--maxmemory', config.redisSettings.maxMemory); + } + + if (config.redisSettings?.maxMemoryPolicy) { + command.push('--maxmemory-policy', config.redisSettings.maxMemoryPolicy); + } + + // Persistence + if (config.redisSettings?.appendOnly) { + command.push('--appendonly', 'yes'); + } + + if (config.redisSettings?.save) { + // Split save pairs (e.g., "900 1 300 10" -> --save 900 1 --save 300 10) + const tokens = String(config.redisSettings.save) + .trim() + .split(/\s+/) + .filter(Boolean); + + // Push pairs: --save + for (let i = 0; i < tokens.length; i += 2) { + const seconds = tokens[i]; + const changes = tokens[i + 1]; + if (seconds && changes) { + command.push('--save', seconds, changes); + } + } + } + + // Performance + if (config.redisSettings?.maxClients) { + command.push('--maxclients', config.redisSettings.maxClients.toString()); + } + + return { + image: `redis:${config.version}`, + envVars, + ports: [{ host: config.port, container: this.containerPort }], + volumes: config.persistData + ? [{ name: `${config.name}-data`, path: this.dataPath }] + : [], + command, + }; + } + + // ==================== Utilities ==================== + getConnectionString(container: Container): string { + const auth = container.password ? `:${container.password}@` : ''; + return `redis://${auth}localhost:${container.port}`; + } + + validateConfig(config: any): ValidationResult { + const errors: string[] = []; + + if (config.password && config.password.length < 4) { + errors.push('Password must be at least 4 characters if provided'); + } + + if (!config.version) { + errors.push('Redis version is required'); + } + + return { + valid: errors.length === 0, + errors, + }; + } + + getDefaultUsername(): string | undefined { + return undefined; // Redis doesn't use username + } + + requiresAuth(): boolean { + return false; // Redis auth is optional + } +} diff --git a/src/features/databases/registry/database-provider.interface.ts b/src/features/databases/registry/database-provider.interface.ts new file mode 100644 index 0000000..14fbeaa --- /dev/null +++ b/src/features/databases/registry/database-provider.interface.ts @@ -0,0 +1,73 @@ +import type { ReactNode } from 'react'; +import type { Container } from '@/shared/types/container'; +import type { DockerRunArgs, ValidationResult } from '../types/docker.types'; +import type { FieldGroup, FormField } from '../types/form.types'; + +export interface FieldsOptions { + isEditMode?: boolean; +} + +/** + * Database Provider Interface + * Each database type implements this interface to provide all necessary configuration + */ +export interface DatabaseProvider { + // ==================== Identification ==================== + readonly id: string; + readonly name: string; + readonly description: string; + readonly icon: ReactNode; + readonly color: string; + + // ==================== Docker Configuration ==================== + readonly defaultPort: number; + readonly containerPort: number; + readonly dataPath: string; + readonly versions: string[]; + + // ==================== Form Fields (Dynamic) ==================== + /** + * Get basic fields specific to this database (name, port, version, etc.) + * @param options - Options for customizing field behavior + */ + getBasicFields(options?: FieldsOptions): FormField[]; + + /** + * Get authentication fields (username, password, database name) + * @param options - Options for customizing field behavior + */ + getAuthenticationFields(options?: FieldsOptions): FormField[]; + + /** + * Get advanced configuration fields grouped by category + * @param options - Options for customizing field behavior + */ + getAdvancedFields(options?: FieldsOptions): FieldGroup[]; + + // ==================== Docker Command Building ==================== + /** + * Build Docker run arguments from form configuration + */ + buildDockerArgs(config: any): DockerRunArgs; + + // ==================== Utilities ==================== + /** + * Generate connection string for this database type + */ + getConnectionString(container: Container): string; + + /** + * Validate configuration before creating container + */ + validateConfig(config: any): ValidationResult; + + /** + * Get default username for this database (if any) + */ + getDefaultUsername?(): string | undefined; + + /** + * Check if this database requires authentication + */ + requiresAuth(): boolean; +} diff --git a/src/features/databases/registry/database-registry.ts b/src/features/databases/registry/database-registry.ts new file mode 100644 index 0000000..56ca183 --- /dev/null +++ b/src/features/databases/registry/database-registry.ts @@ -0,0 +1,65 @@ +import { MongoDBDatabaseProvider } from '../providers/mongodb.provider'; +import { MySQLDatabaseProvider } from '../providers/mysql.provider'; +import { PostgresDatabaseProvider } from '../providers/postgres.provider'; +import { RedisDatabaseProvider } from '../providers/redis.provider'; +import type { DatabaseProvider } from './database-provider.interface'; + +class DatabaseRegistry { + private providers = new Map(); + + constructor(providers: DatabaseProvider[]) { + providers.forEach((provider) => { + this.providers.set(provider.id, provider); + }); + } + + /** + * Get a specific provider by ID + */ + get(id: string): DatabaseProvider | undefined { + return this.providers.get(id); + } + + /** + * Get all registered providers + */ + getAll(): DatabaseProvider[] { + return Array.from(this.providers.values()); + } + + /** + * Get all provider IDs + */ + getAllIds(): string[] { + return Array.from(this.providers.keys()); + } + + /** + * Check if a provider exists + */ + has(id: string): boolean { + return this.providers.has(id); + } + + /** + * Get count of registered providers + */ + count(): number { + return this.providers.size; + } +} + +/** + * Factory function to create a DatabaseRegistry instance + * Add new providers here when extending the application + */ +export function createDatabaseRegistry(): DatabaseRegistry { + return new DatabaseRegistry([ + new PostgresDatabaseProvider(), + new MySQLDatabaseProvider(), + new RedisDatabaseProvider(), + new MongoDBDatabaseProvider(), + ]); +} + +export const databaseRegistry = createDatabaseRegistry(); diff --git a/src/features/databases/types/docker.types.ts b/src/features/databases/types/docker.types.ts new file mode 100644 index 0000000..5da0632 --- /dev/null +++ b/src/features/databases/types/docker.types.ts @@ -0,0 +1,46 @@ +/** + * Docker configuration types + * These types represent the structure sent to the backend + */ + +export interface PortMapping { + host: number; + container: number; +} + +export interface VolumeMount { + name: string; + path: string; +} + +export interface DockerRunArgs { + image: string; + envVars: Record; + ports: PortMapping[]; + volumes: VolumeMount[]; + command: string[]; +} + +export interface DockerRunRequest { + name: string; + dockerArgs: DockerRunArgs; + metadata: ContainerMetadata; +} + +export interface ContainerMetadata { + id: string; + dbType: string; + version: string; + port: number; + username?: string; + password: string; + databaseName?: string; + persistData: boolean; + enableAuth: boolean; + maxConnections?: number; +} + +export interface ValidationResult { + valid: boolean; + errors: string[]; +} diff --git a/src/features/databases/types/form.types.ts b/src/features/databases/types/form.types.ts new file mode 100644 index 0000000..9866996 --- /dev/null +++ b/src/features/databases/types/form.types.ts @@ -0,0 +1,41 @@ +/** + * Form field types for dynamic form generation + */ + +export interface FormFieldBase { + name: string; + label: string; + placeholder?: string; + required?: boolean; + readonly?: boolean; + helpText?: string; + defaultValue?: string | number | boolean; +} + +export interface TextFormField extends FormFieldBase { + type: 'text' | 'password' | 'number'; + validation?: { + min?: number; + max?: number; + pattern?: RegExp; + message?: string; + }; +} + +export interface SelectFormField extends FormFieldBase { + type: 'select'; + options: string[]; +} + +export interface CheckboxFormField extends FormFieldBase { + type: 'checkbox'; + defaultValue?: boolean; +} + +export type FormField = TextFormField | SelectFormField | CheckboxFormField; + +export interface FieldGroup { + label: string; + description?: string; + fields: FormField[]; +} diff --git a/src/features/docker/hooks/use-docker-status.ts b/src/features/docker/hooks/use-docker-status.ts index 01d308a..65f05c3 100644 --- a/src/features/docker/hooks/use-docker-status.ts +++ b/src/features/docker/hooks/use-docker-status.ts @@ -1,7 +1,6 @@ import { useCallback, useEffect, useRef, useState } from 'react'; import { toast } from 'sonner'; import type { DockerStatus } from '../../../shared/types/docker'; -import { canInteractWithDocker } from '../../../shared/utils/docker'; import { dockerApi } from '../api/docker.api'; /** @@ -26,8 +25,7 @@ export function useDockerStatus() { setDockerStatus(status); - // Show overlay if Docker is not available - if (!canInteractWithDocker(status)) { + if (status.status !== 'running') { setShouldShowOverlay(true); } else { setShouldShowOverlay(false); @@ -118,8 +116,6 @@ export function useDockerStatus() { isRefreshing, shouldShowOverlay, refreshStatus, - isDockerAvailable: dockerStatus - ? canInteractWithDocker(dockerStatus) - : false, + isDockerAvailable: dockerStatus ? dockerStatus.status === 'running' : false, }; } diff --git a/src/pages/create-container/components/DatabaseSelectionForm.tsx b/src/pages/create-container/components/DatabaseSelectionForm.tsx index aea958f..5be7594 100644 --- a/src/pages/create-container/components/DatabaseSelectionForm.tsx +++ b/src/pages/create-container/components/DatabaseSelectionForm.tsx @@ -60,9 +60,7 @@ export function DatabaseSelectionForm() { ); case 2: - return ( - - ); + return ; case 3: return ; default: diff --git a/src/pages/create-container/hooks/use-container-creation-wizard.ts b/src/pages/create-container/hooks/use-container-creation-wizard.ts index dd5a7f3..bd90169 100644 --- a/src/pages/create-container/hooks/use-container-creation-wizard.ts +++ b/src/pages/create-container/hooks/use-container-creation-wizard.ts @@ -1,17 +1,23 @@ -import { zodResolver } from '@hookform/resolvers/zod'; import { emit } from '@tauri-apps/api/event'; import { getCurrentWindow } from '@tauri-apps/api/window'; -import { useCallback, useState } from 'react'; +import { useCallback, useEffect, useState } from 'react'; import { useForm } from 'react-hook-form'; -import { useContainerActions } from '../../../features/containers/hooks/use-container-actions'; -import { ContainerService } from '../../../features/containers/services/container.service'; -import type { CreateContainerRequest } from '../../../shared/types/container'; -import { - type CreateDatabaseFormValidation, - createDatabaseFormSchema, -} from '../schemas/database-form.schema'; +import { databasesApi } from '@/features/databases/api/databases.api'; +import { databaseRegistry } from '@/features/databases/registry/database-registry'; +import type { DockerRunRequest } from '@/features/databases/types/docker.types'; import { FORM_STEPS } from '../types/form-steps'; +/** + * Form data structure (no Zod schema needed) + * Validation is handled by providers and field-level rules + */ +export interface CreateDatabaseFormData { + databaseSelection: { + dbType?: string; + }; + containerConfiguration: Record; +} + /** * Hook to manage the container creation wizard * Responsibility: Wizard logic (steps, validation, submit) @@ -19,49 +25,115 @@ import { FORM_STEPS } from '../types/form-steps'; export function useContainerCreationWizard() { const [currentStep, setCurrentStep] = useState(1); const [completedSteps, setCompletedSteps] = useState([]); - const { create } = useContainerActions(); - // Form setup - const form = useForm({ - resolver: zodResolver(createDatabaseFormSchema), + // Form setup - NO ZOD RESOLVER + // Validation is handled by individual field rules from providers + const form = useForm({ defaultValues: { databaseSelection: { dbType: undefined, }, containerConfiguration: { name: '', - port: 5432, - version: '', - username: '', - password: '', - databaseName: '', + port: undefined, // Will be set by provider + version: '', // Will be set by provider persistData: true, enableAuth: true, - maxConnections: undefined, - postgresSettings: {}, - mysqlSettings: {}, - redisSettings: {}, - mongoSettings: {}, }, }, mode: 'onChange', }); - const { handleSubmit, trigger, watch } = form; + const { handleSubmit, watch, setValue } = form; + + // Watch for database type changes to apply provider defaults + const selectedDbType = watch('databaseSelection.dbType'); + + /** + * Apply provider defaults when database type changes + */ + useEffect(() => { + if (!selectedDbType) return; + + const provider = databaseRegistry.get(selectedDbType); + if (!provider) return; + + // Apply default port + setValue('containerConfiguration.port', provider.defaultPort); + + // Apply default version (first in the list) + if (provider.versions.length > 0) { + setValue('containerConfiguration.version', provider.versions[0]); + } + + // Apply default username if provider has one + const defaultUsername = provider.getDefaultUsername?.(); + if (defaultUsername) { + setValue('containerConfiguration.username', defaultUsername); + } + + // Get all fields from provider and apply their default values + const allFields = [ + ...provider.getBasicFields({ isEditMode: false }), + ...provider.getAuthenticationFields(), + ...provider.getAdvancedFields().flatMap((group) => group.fields), + ]; + + // Apply default values from fields + for (const field of allFields) { + if (field.defaultValue !== undefined) { + setValue( + `containerConfiguration.${field.name}` as any, + field.defaultValue, + ); + } + } + + console.log( + `✅ Applied defaults for ${provider.name}: port=${provider.defaultPort}, version=${provider.versions[0]}`, + ); + }, [selectedDbType, setValue]); /** - * Advance to next step if validation is correct + * Advance to next step - Uses react-hook-form validation and provider validation */ const nextStep = useCallback(async () => { - let isValid = false; + let canProceed = false; if (currentStep === 1) { - isValid = await trigger('databaseSelection'); + // Step 1: Must have selected a database + canProceed = Boolean(watch('databaseSelection.dbType')); } else if (currentStep === 2) { - isValid = await trigger('containerConfiguration'); + // Step 2: Validate with react-hook-form (field-level validation) + // This triggers validation for all fields in the form + const isFormValid = await form.trigger('containerConfiguration'); + + if (!isFormValid) { + console.log('❌ Form validation failed'); + return; + } + + const config = watch('containerConfiguration'); + const selectedDbType = watch('databaseSelection.dbType'); + + // Also validate with provider + if (selectedDbType) { + const provider = databaseRegistry.get(selectedDbType); + if (provider) { + const validation = provider.validateConfig(config); + if (!validation.valid) { + console.log('❌ Provider validation failed:', validation.errors); + return; + } + } + } + + canProceed = true; + } else { + canProceed = true; } - if (isValid) { + if (canProceed) { if (!completedSteps.includes(currentStep)) { setCompletedSteps((prev) => [...prev, currentStep]); } @@ -69,7 +141,7 @@ export function useContainerCreationWizard() { setCurrentStep((prev) => prev + 1); } } - }, [currentStep, completedSteps, trigger]); + }, [currentStep, completedSteps, watch, form]); /** * Go back to previous step @@ -94,58 +166,62 @@ export function useContainerCreationWizard() { /** * Validate if current step is complete + * This is used to enable/disable the "Next" button */ const isCurrentStepValid = useCallback(() => { switch (currentStep) { case 1: return Boolean(watch('databaseSelection.dbType')); - case 2: - const config = watch('containerConfiguration'); - return !!(config.name && config.port && config.version); + case 2: { + return true; + } case 3: return true; default: return true; } - }, [currentStep, watch]); + }, [currentStep, watch, form.formState.errors]); /** - * Transform form data to API format + * Transform form data to Docker Run Request using provider */ - const transformFormToRequest = useCallback( - (data: CreateDatabaseFormValidation): CreateContainerRequest => { + const transformFormToDockerRequest = useCallback( + (data: CreateDatabaseFormData): DockerRunRequest => { const { databaseSelection, containerConfiguration } = data; + if (!databaseSelection.dbType) { + throw new Error('Database type not selected'); + } + + // Get the provider for this database type + const provider = databaseRegistry.get(databaseSelection.dbType); + if (!provider) { + throw new Error( + `No provider found for database type: ${databaseSelection.dbType}`, + ); + } + + // Let the provider build the Docker arguments + const dockerArgs = provider.buildDockerArgs(containerConfiguration); + + // Generate unique ID for this container + const containerId = crypto.randomUUID(); + return { name: containerConfiguration.name, - dbType: databaseSelection.dbType, - version: containerConfiguration.version, - port: containerConfiguration.port!, - username: containerConfiguration.username, - password: containerConfiguration.password || '', - databaseName: containerConfiguration.databaseName, - persistData: containerConfiguration.persistData ?? true, - enableAuth: containerConfiguration.enableAuth ?? true, - maxConnections: - containerConfiguration.maxConnections || - ContainerService.getDefaultPort(databaseSelection.dbType), - // Include DB-specific settings if they exist - ...(databaseSelection.dbType === 'PostgreSQL' && - containerConfiguration.postgresSettings && { - postgresSettings: containerConfiguration.postgresSettings, - }), - ...(databaseSelection.dbType === 'MySQL' && - containerConfiguration.mysqlSettings && { - mysqlSettings: containerConfiguration.mysqlSettings, - }), - ...(databaseSelection.dbType === 'Redis' && - containerConfiguration.redisSettings && { - redisSettings: containerConfiguration.redisSettings, - }), - ...(databaseSelection.dbType === 'MongoDB' && - containerConfiguration.mongoSettings && { - mongoSettings: containerConfiguration.mongoSettings, - }), + dockerArgs, + metadata: { + id: containerId, + dbType: databaseSelection.dbType, + version: containerConfiguration.version, + port: containerConfiguration.port!, + username: containerConfiguration.username, + password: containerConfiguration.password || '', + databaseName: containerConfiguration.databaseName, + persistData: containerConfiguration.persistData ?? true, + enableAuth: containerConfiguration.enableAuth ?? true, + maxConnections: containerConfiguration.maxConnections, + }, }; }, [], @@ -155,10 +231,37 @@ export function useContainerCreationWizard() { * Final submit - create container and close window */ const submit = useCallback( - async (data: CreateDatabaseFormValidation) => { + async (data: CreateDatabaseFormData) => { try { - const request = transformFormToRequest(data); - const newContainer = await create(request); + const { databaseSelection, containerConfiguration } = data; + + if (!databaseSelection.dbType) { + throw new Error('Database type not selected'); + } + + // Get the provider for validation + const provider = databaseRegistry.get(databaseSelection.dbType); + if (!provider) { + throw new Error( + `No provider found for database type: ${databaseSelection.dbType}`, + ); + } + + // Validate with provider before creating + const validation = provider.validateConfig(containerConfiguration); + if (!validation.valid) { + // Show validation errors + const errorMessage = validation.errors.join('\n'); + console.error('❌ Validation errors:', validation.errors); + alert(`Validation failed:\n${errorMessage}`); + throw new Error(`Validation failed: ${errorMessage}`); + } + + const dockerRequest = transformFormToDockerRequest(data); + + // Use the unified databases API + console.log('🚀 Creating container with Docker args:', dockerRequest); + const newContainer = await databasesApi.create(dockerRequest); // Mark all steps as completed setCompletedSteps([1, 2, 3]); @@ -174,13 +277,13 @@ export function useContainerCreationWizard() { const currentWindow = getCurrentWindow(); await currentWindow.close(); } catch (error) { - // Error is already handled in useContainerActions + // Error is already handled by invoke wrapper console.error('Error creating container:', error); + throw error; } }, - [create, transformFormToRequest], + [transformFormToDockerRequest], ); - return { // Form form, diff --git a/src/pages/create-container/schemas/database-form.schema.ts b/src/pages/create-container/schemas/database-form.schema.ts deleted file mode 100644 index 113c664..0000000 --- a/src/pages/create-container/schemas/database-form.schema.ts +++ /dev/null @@ -1,75 +0,0 @@ -import { z } from 'zod'; - -export const createDatabaseFormSchema = z.object({ - databaseSelection: z.object({ - dbType: z.enum(['PostgreSQL', 'MySQL', 'Redis', 'MongoDB'] as const), - }), - containerConfiguration: z.object({ - // Basic container configuration - name: z - .string() - .min(1, 'Database name is required') - .max(50, 'Name cannot exceed 50 characters') - .regex( - /^[a-zA-Z0-9_-]+$/, - 'Only letters, numbers, hyphens and underscores are allowed', - ), - port: z - .number() - .min(1024, 'Port must be greater than 1024') - .max(65535, 'Port must be less than 65535'), - version: z.string().min(1, 'Version is required'), - - // Authentication configuration (basic fields) - username: z.string().optional(), - password: z - .string() - .min(4, 'Password must be at least 4 characters') - .optional(), - databaseName: z.string().optional(), - - // Default configurations - persistData: z.boolean(), - enableAuth: z.boolean(), - maxConnections: z.number().optional(), - - // Database-specific configurations - postgresSettings: z - .object({ - initdbArgs: z.string().optional(), - hostAuthMethod: z.string().optional(), - sharedPreloadLibraries: z.string().optional(), - }) - .optional(), - - mysqlSettings: z - .object({ - rootHost: z.string().optional(), - characterSet: z.string().optional(), - collation: z.string().optional(), - sqlMode: z.string().optional(), - }) - .optional(), - - redisSettings: z - .object({ - maxMemory: z.string().optional(), - maxMemoryPolicy: z.string().optional(), - appendOnly: z.boolean().optional(), - requirePass: z.boolean().optional(), - }) - .optional(), - - mongoSettings: z - .object({ - authSource: z.string().optional(), - enableSharding: z.boolean().optional(), - oplogSize: z.string().optional(), - }) - .optional(), - }), -}); - -export type CreateDatabaseFormValidation = z.infer< - typeof createDatabaseFormSchema ->; diff --git a/src/pages/create-container/steps/container-configuration-step.tsx b/src/pages/create-container/steps/container-configuration-step.tsx index 3a6c63d..9f39632 100644 --- a/src/pages/create-container/steps/container-configuration-step.tsx +++ b/src/pages/create-container/steps/container-configuration-step.tsx @@ -1,266 +1,29 @@ import { motion } from 'framer-motion'; -import React from 'react'; import { UseFormReturn } from 'react-hook-form'; +import { + DynamicFieldGroups, + DynamicFormSection, +} from '@/features/databases/components/dynamic-form-section'; +import { databaseRegistry } from '@/features/databases/registry/database-registry'; import { Accordion, AccordionContent, AccordionItem, AccordionTrigger, } from '../../../shared/components/ui/accordion'; +import { Checkbox } from '../../../shared/components/ui/checkbox'; import { FormControl, FormField, FormItem, FormLabel, - FormMessage, } from '../../../shared/components/ui/form'; -import { Input } from '../../../shared/components/ui/input'; -import { - Select, - SelectContent, - SelectItem, - SelectTrigger, - SelectValue, -} from '../../../shared/components/ui/select'; -import { CreateDatabaseFormValidation } from '../schemas/database-form.schema'; +import type { CreateDatabaseFormData } from '../hooks/use-container-creation-wizard'; interface Props { - form: UseFormReturn; - isSubmitting: boolean; -} - -interface BaseField { - name: string; - label: string; - type?: string; - placeholder?: string; - required?: boolean; - readonly?: boolean; - defaultValue?: string | boolean; -} - -interface SelectField extends BaseField { - type: 'select'; - options: string[]; -} - -type ConfigField = BaseField | SelectField; - -interface DatabaseConfig { - defaultPort: number; - versions: string[]; - authentication: { - supportsAuth: boolean; - fields: ConfigField[]; - }; - specificConfig: { - fields: ConfigField[]; - }; + form: UseFormReturn; } -const databaseConfigs: Record = { - PostgreSQL: { - defaultPort: 5432, - versions: ['16', '15', '14', '13', '12'], - authentication: { - supportsAuth: true, - fields: [ - { name: 'username', label: 'Username', defaultValue: 'postgres' }, - { - name: 'password', - label: 'Password', - type: 'password', - required: true, - placeholder: 'Password to access PostgreSQL', - }, - { - name: 'databaseName', - label: 'Database Name', - required: false, - placeholder: 'Name of the database to create', - }, - ], - }, - specificConfig: { - fields: [ - { - name: 'initdbArgs', - label: 'INITDB Arguments', - type: 'text', - placeholder: '--auth-host=md5', - }, - { - name: 'hostAuthMethod', - label: 'Authentication Method', - type: 'select', - options: ['md5', 'trust', 'scram-sha-256'], - defaultValue: 'md5', - } as SelectField, - { - name: 'sharedPreloadLibraries', - label: 'Shared Libraries', - type: 'text', - placeholder: 'pg_stat_statements', - }, - ], - }, - }, - MySQL: { - defaultPort: 3306, - versions: ['8.0', '5.7'], - authentication: { - supportsAuth: true, - fields: [ - { - name: 'username', - label: 'Username', - defaultValue: 'root', - readonly: true, - }, - { - name: 'password', - label: 'Password', - type: 'password', - required: true, - placeholder: 'Password for MySQL root user', - }, - { - name: 'databaseName', - label: 'Database Name', - required: false, - placeholder: 'Name of the database to create', - }, - ], - }, - specificConfig: { - fields: [ - { - name: 'rootHost', - label: 'Root Host', - type: 'text', - defaultValue: '%', - }, - { - name: 'characterSet', - label: 'Charset', - type: 'select', - options: ['utf8mb4', 'utf8', 'latin1'], - defaultValue: 'utf8mb4', - } as SelectField, - { - name: 'collation', - label: 'Collation', - type: 'text', - defaultValue: 'utf8mb4_unicode_ci', - }, - { - name: 'sqlMode', - label: 'SQL Mode', - type: 'text', - defaultValue: 'TRADITIONAL', - }, - ], - }, - }, - Redis: { - defaultPort: 6379, - versions: ['7.2', '7.0', '6.2'], - authentication: { - supportsAuth: true, - fields: [ - { - name: 'password', - label: 'Password', - type: 'password', - required: true, - placeholder: 'Password to access Redis', - }, - ], - }, - specificConfig: { - fields: [ - { - name: 'maxMemory', - label: 'Max Memory', - type: 'text', - defaultValue: '256mb', - }, - { - name: 'maxMemoryPolicy', - label: 'Memory Policy', - type: 'select', - options: [ - 'allkeys-lru', - 'volatile-lru', - 'allkeys-random', - 'volatile-random', - 'volatile-ttl', - 'noeviction', - ], - defaultValue: 'allkeys-lru', - } as SelectField, - { - name: 'appendOnly', - label: 'Append Only', - type: 'checkbox', - defaultValue: false, - }, - { - name: 'requirePass', - label: 'Requires Password', - type: 'checkbox', - defaultValue: false, - }, - ], - }, - }, - MongoDB: { - defaultPort: 27017, - versions: ['7.0', '6.0', '5.0'], - authentication: { - supportsAuth: true, - fields: [ - { name: 'username', label: 'Username', defaultValue: 'admin' }, - { - name: 'password', - label: 'Password', - type: 'password', - required: true, - placeholder: 'Password for MongoDB admin user', - }, - { - name: 'databaseName', - label: 'Database Name', - required: false, - placeholder: 'Name of the database to create', - }, - ], - }, - specificConfig: { - fields: [ - { - name: 'authSource', - label: 'Auth Source', - type: 'text', - defaultValue: 'admin', - }, - { - name: 'enableSharding', - label: 'Enable Sharding', - type: 'checkbox', - defaultValue: false, - }, - { - name: 'oplogSize', - label: 'Oplog Size (MB)', - type: 'text', - defaultValue: '512', - }, - ], - }, - }, -}; - const containerVariants = { hidden: { opacity: 0 }, visible: { @@ -286,51 +49,17 @@ const itemVariants = { }, }; -export function ContainerConfigurationStep({ form, isSubmitting }: Props) { +export function ContainerConfigurationStep({ form }: Props) { const selectedDbType = form.watch('databaseSelection.dbType'); - const config = databaseConfigs[selectedDbType]; - - // Set default values when database type changes - React.useEffect(() => { - if (config) { - // Set default port - const currentPort = form.getValues('containerConfiguration.port'); - if ( - !currentPort || - Object.values(databaseConfigs).some( - (dbConfig) => dbConfig.defaultPort === currentPort, - ) - ) { - form.setValue('containerConfiguration.port', config.defaultPort); - } - - // Set default authentication values - if (config.authentication.supportsAuth) { - config.authentication.fields.forEach((field) => { - if (field.defaultValue !== undefined) { - form.setValue( - `containerConfiguration.${field.name}` as any, - field.defaultValue, - ); - } - }); - } + const provider = databaseRegistry.get(selectedDbType!); - // Set default database-specific values - if (config.specificConfig) { - const settingsKey = - `${selectedDbType.toLowerCase()}Settings` as keyof typeof form.getValues; - config.specificConfig.fields.forEach((field) => { - if (field.defaultValue !== undefined) { - form.setValue( - `containerConfiguration.${settingsKey}.${field.name}` as any, - field.defaultValue, - ); - } - }); - } - } - }, [selectedDbType, config, form]); + if (!provider) { + return ( +
+ Please select a database type first +
+ ); + } return (
@@ -342,217 +71,87 @@ export function ContainerConfigurationStep({ form, isSubmitting }: Props) { >
- {selectedDbType} Database Configuration + {provider.name} Database Configuration
- {/* Section 1: Basic container configuration */} + {/* Section 1: Basic Container Configuration */} - Database Configuration + Basic Configuration -
- {/* Container name */} -
- ( - - Database Name * - - - - - - )} - /> -
- - {/* Port */} -
- ( - - Port * - - - field.onChange(Number(e.target.value)) - } - /> - - - - )} - /> -
- - {/* Version */} -
- ( - - Version * - - - - )} - /> -
+ {/* Dynamic Basic Fields from Provider (includes name, port, version, etc.) */} + + + {/* Persistence Option */} +
+ ( + + + + +
+ + Persist data with Docker volume + +

+ Data will be preserved when container is removed +

+
+
+ )} + />
- {/* Section 2: Authentication */} - {config?.authentication.supportsAuth && ( + {/* Section 2: Authentication (Dynamic from Provider) */} + {provider.requiresAuth() && ( Authentication -
- {config.authentication.fields.map((field) => ( -
- ( - - - {field.label} - {field.required && ' *'} - - - - - - - )} - /> -
- ))} -
+
)} - {/* Section 3: Database-specific configurations */} - {config?.specificConfig && - config.specificConfig.fields.length > 0 && ( - - - {selectedDbType} Specific Configuration - - -
- {config.specificConfig.fields.map((field) => { - const settingsKey = `${selectedDbType.toLowerCase()}Settings`; - return ( -
- ( - - - {field.label} - {field.required && ' *'} - - - {field.type === 'select' && - 'options' in field ? ( - - ) : ( - - )} - - - - )} - /> -
- ); - })} -
-
-
- )} + {/* Section 3: Advanced Configuration (Dynamic from Provider) */} + {provider.getAdvancedFields().length > 0 && ( + + + Advanced Configuration + + + + + + )} diff --git a/src/pages/create-container/steps/database-selection-step.tsx b/src/pages/create-container/steps/database-selection-step.tsx index 3790dd1..fab0500 100644 --- a/src/pages/create-container/steps/database-selection-step.tsx +++ b/src/pages/create-container/steps/database-selection-step.tsx @@ -1,45 +1,14 @@ import { motion } from 'framer-motion'; import { Controller, UseFormReturn } from 'react-hook-form'; -import { SiMongodb, SiMysql, SiPostgresql, SiRedis } from 'react-icons/si'; +import { databaseRegistry } from '@/features/databases/registry/database-registry'; import { cn } from '../../../shared/utils/cn'; -import { CreateDatabaseFormValidation } from '../schemas/database-form.schema'; +import type { CreateDatabaseFormData } from '../hooks/use-container-creation-wizard'; interface Props { - form: UseFormReturn; + form: UseFormReturn; isSubmitting: boolean; } -const databases = [ - { - name: 'PostgreSQL', - icon: , - color: '#336791', - description: 'Advanced open-source relational database', - versions: ['16', '15', '14', '13', '12'], - }, - { - name: 'MySQL', - icon: , - color: '#4479A1', - description: 'Relational database management system', - versions: ['8.0', '5.7'], - }, - { - name: 'Redis', - icon: , - color: '#DC382D', - description: 'In-memory database for data structure storage', - versions: ['7.2', '7.0', '6.2'], - }, - { - name: 'MongoDB', - icon: , - color: '#47A248', - description: 'Document-oriented NoSQL database', - versions: ['7.0', '6.0', '5.0'], - }, -] as const; - const containerVariants = { hidden: { opacity: 0 }, visible: { @@ -83,6 +52,8 @@ export function DatabaseSelectionStep({ form, isSubmitting }: Props) { control, } = form; + const providers = databaseRegistry.getAll(); + return ( - {databases.map((database, index) => ( + {providers.map((provider, index) => ( onChange(database.name)} + onClick={() => onChange(provider.id)} disabled={isSubmitting} variants={itemVariants} > @@ -130,20 +101,19 @@ export function DatabaseSelectionStep({ form, isSubmitting }: Props) { 'group-hover:scale-110', 'group-active:scale-90', )} - style={{ backgroundColor: database.color }} + style={{ backgroundColor: provider.color }} animate={{ - rotate: - value === database.name ? [0, -5, 5, -5, 0] : 0, + rotate: value === provider.id ? [0, -5, 5, -5, 0] : 0, }} transition={{ duration: 0.5, ease: 'easeInOut', }} > - {database.icon} + {provider.icon} - {database.name} + {provider.name}
diff --git a/src/pages/create-container/steps/review-step.tsx b/src/pages/create-container/steps/review-step.tsx index 6c22430..e563415 100644 --- a/src/pages/create-container/steps/review-step.tsx +++ b/src/pages/create-container/steps/review-step.tsx @@ -1,11 +1,12 @@ import { motion } from 'framer-motion'; import { UseFormReturn } from 'react-hook-form'; +import { databaseRegistry } from '@/features/databases/registry/database-registry'; import { Card, CardContent } from '../../../shared/components/ui/card'; import { CodeBlock } from '../../../shared/components/ui/code-block'; -import { CreateDatabaseFormValidation } from '../schemas/database-form.schema'; +import { CreateDatabaseFormData } from '../hooks/use-container-creation-wizard'; interface Props { - form: UseFormReturn; + form: UseFormReturn; } const containerVariants = { @@ -33,158 +34,60 @@ const itemVariants = { }, }; -// Function to generate Docker command based on configuration -function generateDockerCommand(formData: CreateDatabaseFormValidation): string { +/** + * Generate Docker command preview from form data using provider + */ +function generateDockerCommand(formData: CreateDatabaseFormData): string { const { databaseSelection, containerConfiguration } = formData; const { dbType } = databaseSelection; - const { - name, - port, - version, - username, - password, - databaseName, - persistData, - enableAuth, - postgresSettings, - mysqlSettings, - redisSettings, - mongoSettings, - } = containerConfiguration; - - // Command lines formatted for better readability + + if (!dbType) { + return '# Please select a database type first'; + } + + // Get the provider for this database type + const provider = databaseRegistry.get(dbType); + if (!provider) { + return `# Error: No provider found for ${dbType}`; + } + + // Let the provider build the Docker arguments + const dockerArgs = provider.buildDockerArgs(containerConfiguration); + const { name } = containerConfiguration; + + // Format the command nicely for display const lines: string[] = ['docker run -d \\']; - // Add container name + // Container name lines.push(` --name ${name} \\`); - // Add port mapping and other configurations based on database type - switch (dbType) { - case 'PostgreSQL': - lines.push(` -p ${port}:5432 \\`); - - // Basic environment variables - lines.push(` -e POSTGRES_USER=${username || 'postgres'} \\`); - lines.push(` -e POSTGRES_PASSWORD=${password} \\`); - if (databaseName) { - lines.push(` -e POSTGRES_DB=${databaseName} \\`); - } - - // PostgreSQL-specific configurations - if (postgresSettings?.initdbArgs) { - lines.push( - ` -e POSTGRES_INITDB_ARGS="${postgresSettings.initdbArgs}" \\`, - ); - } - if (postgresSettings?.hostAuthMethod) { - lines.push( - ` -e POSTGRES_HOST_AUTH_METHOD=${postgresSettings.hostAuthMethod} \\`, - ); - } - if (postgresSettings?.sharedPreloadLibraries) { - lines.push( - ` -e POSTGRES_SHARED_PRELOAD_LIBRARIES=${postgresSettings.sharedPreloadLibraries} \\`, - ); - } - - // Data persistence - if (persistData) { - lines.push(` -v ${name}-data:/var/lib/postgresql/data \\`); - } - - lines.push(` postgres:${version}`); - break; - - case 'MySQL': - lines.push(` -p ${port}:3306 \\`); - - // Basic environment variables - lines.push(` -e MYSQL_ROOT_PASSWORD=${password} \\`); - if (databaseName) { - lines.push(` -e MYSQL_DATABASE=${databaseName} \\`); - } - - // MySQL-specific configurations - if (mysqlSettings?.rootHost) { - lines.push(` -e MYSQL_ROOT_HOST=${mysqlSettings.rootHost} \\`); - } - if (mysqlSettings?.characterSet) { - lines.push(` -e MYSQL_CHARSET=${mysqlSettings.characterSet} \\`); - } - if (mysqlSettings?.collation) { - lines.push(` -e MYSQL_COLLATION=${mysqlSettings.collation} \\`); - } - if (mysqlSettings?.sqlMode) { - lines.push(` -e MYSQL_SQL_MODE=${mysqlSettings.sqlMode} \\`); - } - - // Data persistence - if (persistData) { - lines.push(` -v ${name}-data:/var/lib/mysql \\`); - } - - lines.push(` mysql:${version}`); - break; - - case 'Redis': - lines.push(` -p ${port}:6379 \\`); - - // Data persistence - if (persistData) { - lines.push(` -v ${name}-data:/data \\`); - } - - // Redis command with specific configurations - const redisCommand = 'redis-server'; - const redisArgs: string[] = []; - - if (password) { - redisArgs.push(`--requirepass ${password}`); - } - if (redisSettings?.maxMemory) { - redisArgs.push(`--maxmemory ${redisSettings.maxMemory}`); - } - if (redisSettings?.maxMemoryPolicy) { - redisArgs.push(`--maxmemory-policy ${redisSettings.maxMemoryPolicy}`); - } - if (redisSettings?.appendOnly) { - redisArgs.push('--appendonly yes'); - } - - if (redisArgs.length > 0) { - lines.push(` redis:${version} ${redisCommand} ${redisArgs.join(' ')}`); - } else { - lines.push(` redis:${version}`); - } - break; - - case 'MongoDB': - lines.push(` -p ${port}:27017 \\`); - - // Environment variables for authentication - if (enableAuth && username && password) { - lines.push(` -e MONGO_INITDB_ROOT_USERNAME=${username} \\`); - lines.push(` -e MONGO_INITDB_ROOT_PASSWORD=${password} \\`); - } - if (databaseName) { - lines.push(` -e MONGO_INITDB_DATABASE=${databaseName} \\`); - } - - // MongoDB-specific configurations - if (mongoSettings?.authSource) { - lines.push(` -e MONGO_AUTH_SOURCE=${mongoSettings.authSource} \\`); - } - if (mongoSettings?.oplogSize) { - lines.push(` -e MONGO_OPLOG_SIZE=${mongoSettings.oplogSize} \\`); - } - - // Data persistence - if (persistData) { - lines.push(` -v ${name}-data:/data/db \\`); - } - - lines.push(` mongo:${version}`); - break; + // Port mappings + if (dockerArgs.ports && dockerArgs.ports.length > 0) { + for (const port of dockerArgs.ports) { + lines.push(` -p ${port.host}:${port.container} \\`); + } + } + + // Environment variables + if (dockerArgs.envVars && Object.keys(dockerArgs.envVars).length > 0) { + for (const [key, value] of Object.entries(dockerArgs.envVars)) { + lines.push(` -e ${key}=${value} \\`); + } + } + + // Volumes + if (dockerArgs.volumes && dockerArgs.volumes.length > 0) { + for (const volume of dockerArgs.volumes) { + lines.push(` -v ${volume.name}:${volume.path} \\`); + } + } + + // Image with tag + lines.push(` ${dockerArgs.image}`); + + // Command arguments (if any) + if (dockerArgs.command && dockerArgs.command.length > 0) { + lines[lines.length - 1] += ` ${dockerArgs.command.join(' ')}`; } return lines.join('\n'); diff --git a/src/pages/edit-container/components/EditContainerForm.tsx b/src/pages/edit-container/components/EditContainerForm.tsx index 103f4d8..2aafa5f 100644 --- a/src/pages/edit-container/components/EditContainerForm.tsx +++ b/src/pages/edit-container/components/EditContainerForm.tsx @@ -1,34 +1,48 @@ import { Copy, Loader2, Save, X } from 'lucide-react'; import { toast } from 'sonner'; -import { ContainerService } from '../../../features/containers/services/container.service'; -import { Badge } from '../../../shared/components/ui/badge'; -import { Button } from '../../../shared/components/ui/button'; -import { Input } from '../../../shared/components/ui/input'; -import { Label } from '../../../shared/components/ui/label'; -import { useContainerEdit } from '../hooks/use-container-edit'; +import { + DynamicFieldGroups, + DynamicFormSection, +} from '@/features/databases/components/dynamic-form-section'; +import { databaseRegistry } from '@/features/databases/registry/database-registry'; +import { + Accordion, + AccordionContent, + AccordionItem, + AccordionTrigger, +} from '@/shared/components/ui/accordion'; +import { Badge } from '@/shared/components/ui/badge'; +import { Button } from '@/shared/components/ui/button'; +import { Checkbox } from '@/shared/components/ui/checkbox'; +import { + Form, + FormControl, + FormField, + FormItem, + FormLabel, +} from '@/shared/components/ui/form'; +import { Input } from '@/shared/components/ui/input'; +import { useDatabaseEditWizard } from '../hooks/use-database-edit-wizard'; interface EditContainerFormProps { containerId: string; } -/** - * Presentation component for editing container - * Responsibility: Only rendering and UI events - */ export function EditContainerForm({ containerId }: EditContainerFormProps) { const { container, loading, saving, form, save, cancel } = - useContainerEdit(containerId); + useDatabaseEditWizard(containerId); const { - register, handleSubmit, - formState: { errors, isDirty }, + formState: { isDirty }, } = form; + const provider = container ? databaseRegistry.get(container.dbType) : null; + const handleCopyConnectionString = async () => { - if (!container) return; + if (!container || !provider) return; - const connectionString = ContainerService.getConnectionString(container); + const connectionString = provider.getConnectionString(container); try { await navigator.clipboard.writeText(connectionString); @@ -47,182 +61,179 @@ export function EditContainerForm({ containerId }: EditContainerFormProps) { ); } - if (!container) { + if (!container || !provider) { return (
-

Database not found

+

+ {!container ? 'Database not found' : 'Provider not found'} +

); } return ( -
- {/* Header */} -
-
-
-

{container.name}

-
- {container.dbType} - - {container.status} - +
+
+ {/* Header */} +
+
+
+

{container.name}

+
+ {container.dbType} + + {container.status} + +
-
- {/* Form Content */} -
- - {/* Container Info */} -
-
- - - {errors.name && ( -

- {errors.name.message} -

+ {/* Form Content - Same structure as create form but without wizard */} +
+ + + {/* Section 1: Basic Container Configuration */} + + + Basic Configuration + + + {/* Dynamic Basic Fields from Provider (includes name, port, version, etc.) */} + + + {/* Persistence Option */} +
+ ( + + + + +
+ + Persist data with Docker volume + +

+ Data will be preserved when container is removed +

+
+
+ )} + /> +
+
+
+ + {/* Section 2: Authentication (Dynamic from Provider) */} + {provider.requiresAuth() && ( + + + Authentication + + + + + )} -
-
- - - {errors.port && ( -

- {errors.port.message} -

+ {/* Section 3: Advanced Configuration (Dynamic from Provider) */} + {provider.getAdvancedFields().length > 0 && ( + + + Advanced Configuration + + + + + )} -
-
- {/* Database Credentials */} - {container.dbType !== 'Redis' && ( -
-

Database Credentials

- -
- - - {errors.username && ( -

- {errors.username.message} -

- )} -
- -
- - - {errors.password && ( -

- {errors.password.message} -

- )} -

- Will only be updated if you provide a new password -

-
+ {/* Section 4: Connection String */} + + + Connection String + + +
+ + +
+
+
+ + +
-
- - - {errors.databaseName && ( -

- {errors.databaseName.message} -

- )} -
-
- )} - - {/* Connection String */} -
- -
- - -
+ {/* Footer */} +
+
+ +
- -
- - {/* Footer */} -
-
- -
-
+ ); } diff --git a/src/pages/edit-container/hooks/use-container-edit.ts b/src/pages/edit-container/hooks/use-container-edit.ts deleted file mode 100644 index af2637d..0000000 --- a/src/pages/edit-container/hooks/use-container-edit.ts +++ /dev/null @@ -1,141 +0,0 @@ -import { zodResolver } from '@hookform/resolvers/zod'; -import { emit } from '@tauri-apps/api/event'; -import { getCurrentWindow } from '@tauri-apps/api/window'; -import { useCallback, useEffect, useState } from 'react'; -import { useForm } from 'react-hook-form'; -import { z } from 'zod'; -import { useContainerActions } from '../../../features/containers/hooks/use-container-actions'; -import type { - Container, - UpdateContainerRequest, -} from '../../../shared/types/container'; - -const editContainerSchema = z.object({ - name: z.string().min(1, 'Name is required'), - port: z - .number() - .min(1, 'Port must be greater than 0') - .max(65535, 'Port must be less than 65535'), - username: z.string().optional(), - password: z.string().optional(), - databaseName: z.string().optional(), -}); - -type EditContainerFormData = z.infer; - -/** - * Hook to manage container editing - * Responsibility: Loading, updating and submission logic - */ -export function useContainerEdit(containerId: string) { - const [container, setContainer] = useState(null); - const [loading, setLoading] = useState(true); - const [saving, setSaving] = useState(false); - - const { getById, update } = useContainerActions(); - - // Form setup - const form = useForm({ - resolver: zodResolver(editContainerSchema), - defaultValues: { - name: '', - port: 5432, - username: '', - password: '', - databaseName: '', - }, - }); - - const { reset } = form; - - /** - * Load container by ID - */ - const loadContainer = useCallback(async () => { - setLoading(true); - try { - const loadedContainer = await getById(containerId); - setContainer(loadedContainer); - - // Update form with container data - reset({ - name: loadedContainer.name, - port: loadedContainer.port, - username: loadedContainer.username || '', - password: loadedContainer.password || '', - databaseName: loadedContainer.databaseName || '', - }); - } catch (error) { - console.error('Error loading container:', error); - } finally { - setLoading(false); - } - }, [containerId, getById, reset]); - - /** - * Initial load - */ - useEffect(() => { - loadContainer(); - }, [loadContainer]); - - /** - * Cancel and close window - */ - const cancel = useCallback(async () => { - try { - const currentWindow = getCurrentWindow(); - await currentWindow.close(); - } catch (error) { - console.error('Error closing window:', error); - } - }, []); - - /** - * Save changes and close window - */ - const save = useCallback( - async (data: EditContainerFormData) => { - if (!container) return; - - setSaving(true); - try { - const updateRequest: UpdateContainerRequest = { - containerId: container.id, - name: data.name, - port: data.port, - username: data.username, - password: data.password, - databaseName: data.databaseName, - }; - - const updatedContainer = await update(updateRequest); - - // Emit event to notify main window - try { - await emit('container-updated', { container: updatedContainer }); - } catch (eventError) { - console.warn('Error emitting event:', eventError); - } - - // Close window - const currentWindow = getCurrentWindow(); - await currentWindow.close(); - } catch (error) { - console.error('Error updating container:', error); - } finally { - setSaving(false); - } - }, - [container, update], - ); - - return { - container, - loading, - saving, - form, - save, - cancel, - }; -} diff --git a/src/pages/edit-container/hooks/use-database-edit-wizard.ts b/src/pages/edit-container/hooks/use-database-edit-wizard.ts new file mode 100644 index 0000000..7fd54a7 --- /dev/null +++ b/src/pages/edit-container/hooks/use-database-edit-wizard.ts @@ -0,0 +1,215 @@ +import { emit } from '@tauri-apps/api/event'; +import { getCurrentWindow } from '@tauri-apps/api/window'; +import { useCallback, useEffect, useState } from 'react'; +import { useForm } from 'react-hook-form'; +import { databasesApi } from '@/features/databases/api/databases.api'; +import { databaseRegistry } from '@/features/databases/registry/database-registry'; +import type { DockerRunRequest } from '@/features/databases/types/docker.types'; +import type { Container } from '@/shared/types/container'; + +/** + * Form data structure for editing + * Uses the same structure as creation but all fields are optional for updates + */ +export interface EditDatabaseFormData { + containerConfiguration: Record; +} + +/** + * Hook to manage database container editing with provider-based system + * Uses the new generic update API with providers + */ +export function useDatabaseEditWizard(containerId: string) { + const [container, setContainer] = useState(null); + const [loading, setLoading] = useState(true); + const [saving, setSaving] = useState(false); + + // Form setup - NO ZOD RESOLVER + const form = useForm({ + defaultValues: { + containerConfiguration: {}, + }, + mode: 'onChange', + }); + + const { setValue } = form; + + /** + * Load container by ID and populate form + */ + const loadContainer = useCallback(async () => { + setLoading(true); + try { + const loadedContainer = await databasesApi.getById(containerId); + setContainer(loadedContainer); + + // Get the provider for this database type + const provider = databaseRegistry.get(loadedContainer.dbType); + if (!provider) { + throw new Error(`No provider found for ${loadedContainer.dbType}`); + } + + // Populate form with current container data + setValue('containerConfiguration.name', loadedContainer.name); + setValue('containerConfiguration.port', loadedContainer.port); + setValue('containerConfiguration.version', loadedContainer.version); + setValue( + 'containerConfiguration.persistData', + loadedContainer.persistData, + ); + setValue('containerConfiguration.enableAuth', loadedContainer.enableAuth); + setValue('containerConfiguration.username', loadedContainer.username); + setValue('containerConfiguration.password', loadedContainer.password); + setValue( + 'containerConfiguration.databaseName', + loadedContainer.databaseName, + ); + setValue( + 'containerConfiguration.maxConnections', + loadedContainer.maxConnections, + ); + + console.log('✅ Loaded container for editing:', loadedContainer); + } catch (error) { + console.error('Error loading container:', error); + throw error; + } finally { + setLoading(false); + } + }, [containerId, setValue]); + + /** + * Initial load + */ + useEffect(() => { + loadContainer(); + }, [loadContainer]); + + /** + * Cancel and close window + */ + const cancel = useCallback(async () => { + try { + const currentWindow = getCurrentWindow(); + await currentWindow.close(); + } catch (error) { + console.error('Error closing window:', error); + } + }, []); + + /** + * Transform form data to Docker Run Request using provider + */ + const transformFormToDockerRequest = useCallback( + (data: EditDatabaseFormData, container: Container): DockerRunRequest => { + const { containerConfiguration } = data; + + if (!container) { + throw new Error('Container not loaded'); + } + + // Validate required fields + if (!containerConfiguration.port) { + throw new Error('Port is required'); + } + + // Get the provider for this database type + const provider = databaseRegistry.get(container.dbType); + if (!provider) { + throw new Error( + `No provider found for database type: ${container.dbType}`, + ); + } + + // Let the provider build the Docker arguments + const dockerArgs = provider.buildDockerArgs(containerConfiguration); + + return { + name: containerConfiguration.name, + dockerArgs, + metadata: { + id: container.id, // Keep the same ID + dbType: container.dbType, + version: containerConfiguration.version, + port: containerConfiguration.port, + username: containerConfiguration.username, + password: containerConfiguration.password || container.password || '', + databaseName: containerConfiguration.databaseName, + persistData: containerConfiguration.persistData ?? true, + enableAuth: containerConfiguration.enableAuth ?? true, + maxConnections: containerConfiguration.maxConnections, + }, + }; + }, + [], + ); + + /** + * Save changes and close window + */ + const save = useCallback( + async (data: EditDatabaseFormData) => { + if (!container) { + console.error('❌ No container loaded'); + return; + } + + setSaving(true); + try { + const { containerConfiguration } = data; + + // Get the provider for validation + const provider = databaseRegistry.get(container.dbType); + if (!provider) { + throw new Error( + `No provider found for database type: ${container.dbType}`, + ); + } + + // Validate with provider before updating + const validation = provider.validateConfig(containerConfiguration); + if (!validation.valid) { + const errorMessage = validation.errors.join('\n'); + console.error('❌ Validation errors:', validation.errors); + alert(`Validation failed:\n${errorMessage}`); + throw new Error(`Validation failed: ${errorMessage}`); + } + + const dockerRequest = transformFormToDockerRequest(data, container); + + // Use the new unified databases API + console.log('🔄 Updating container with Docker args:', dockerRequest); + const updatedContainer = await databasesApi.update( + container.id, + dockerRequest, + ); + + // Emit event to notify main window + try { + await emit('container-updated', { container: updatedContainer }); + } catch (eventError) { + console.warn('Error emitting event:', eventError); + } + + // Close window + const currentWindow = getCurrentWindow(); + await currentWindow.close(); + } catch (error) { + console.error('Error updating container:', error); + throw error; + } finally { + setSaving(false); + } + }, + [container, transformFormToDockerRequest], + ); + + return { + container, + loading, + saving, + form, + save, + cancel, + }; +} diff --git a/src/pages/main/MainPage.tsx b/src/pages/main/MainPage.tsx index fce0572..b182619 100644 --- a/src/pages/main/MainPage.tsx +++ b/src/pages/main/MainPage.tsx @@ -1,5 +1,4 @@ import { Toaster } from 'sonner'; -import { DatabaseConfigPanel } from '../../shared/components/DatabaseConfigPanel'; import { DeleteConfirmationDialog } from '../../shared/components/DeleteConfirmationDialog'; import { DockerUnavailableOverlay } from '../../shared/components/DockerUnavailableOverlay'; import { DatabaseManager } from './components/DatabaseManager'; @@ -28,15 +27,6 @@ export function MainPage() { disabled={page.containersLoading || !page.isDockerAvailable} /> - { - await page.updateContainer(command); - }} - /> - ( - null, - ); const [containerToDelete, setContainerToDelete] = useState( null, ); @@ -129,22 +125,6 @@ export function useMainPage() { setContainerToDelete(null); }, []); - /** - * Open configuration panel - */ - const openConfigPanel = useCallback((container: Container) => { - setSelectedContainer(container); - setConfigPanelOpen(true); - }, []); - - /** - * Close configuration panel - */ - const closeConfigPanel = useCallback(() => { - setConfigPanelOpen(false); - setSelectedContainer(null); - }, []); - return { // App state containers: app.containers, @@ -156,7 +136,6 @@ export function useMainPage() { // App actions refreshDockerStatus: app.refreshDockerStatus, - updateContainer: app.updateContainer, // Window navigation openCreateWindow, @@ -168,13 +147,6 @@ export function useMainPage() { handleConfirmDelete, handleCancelDelete, - // Config panel - configPanelOpen, - selectedContainer, - openConfigPanel, - closeConfigPanel, - setConfigPanelOpen, - // Delete dialog deleteDialogOpen, containerToDelete, diff --git a/src/shared/components/DatabaseConfigPanel.tsx b/src/shared/components/DatabaseConfigPanel.tsx deleted file mode 100644 index 46f08ca..0000000 --- a/src/shared/components/DatabaseConfigPanel.tsx +++ /dev/null @@ -1,705 +0,0 @@ -import { - AlertTriangle, - CheckCircle, - Copy, - Database, - Eye, - EyeOff, - Network, - RotateCcw, - Save, - Settings, - Shield, -} from 'lucide-react'; -import { useEffect, useState } from 'react'; -import { Container, UpdateContainerRequest } from '../types/container'; -import { Alert, AlertDescription } from './ui/alert'; -import { Badge } from './ui/badge'; -import { Button } from './ui/button'; -import { Card, CardContent, CardHeader, CardTitle } from './ui/card'; -import { Dialog, DialogContent, DialogHeader, DialogTitle } from './ui/dialog'; -import { Input } from './ui/input'; -import { Label } from './ui/label'; -import { - Select, - SelectContent, - SelectItem, - SelectTrigger, - SelectValue, -} from './ui/select'; -import { Switch } from './ui/switch'; -import { Tabs, TabsContent, TabsList, TabsTrigger } from './ui/tabs'; - -interface DatabaseConfigPanelProps { - open: boolean; - onOpenChange: (open: boolean) => void; - container: Container | null; - onContainerUpdate: (command: UpdateContainerRequest) => Promise; -} - -const databaseIcons = { - PostgreSQL: '🐘', - MySQL: '🐬', - Redis: '🔴', - MongoDB: '🍃', -}; - -export function DatabaseConfigPanel({ - open, - onOpenChange, - container, - onContainerUpdate, -}: DatabaseConfigPanelProps) { - const [config, setConfig] = useState({ - name: '', - port: '', - username: '', - password: '', - databaseName: '', - version: '', - maxConnections: '', - enableAuth: true, - persistData: true, - autoStart: false, - restartPolicy: 'unless-stopped', - }); - - const [originalConfig, setOriginalConfig] = useState(config); - const [showPassword, setShowPassword] = useState(false); - const [hasChanges, setHasChanges] = useState(false); - const [validationErrors, setValidationErrors] = useState< - Record - >({}); - const [isSaving, setIsSaving] = useState(false); - const [saveSuccess, setSaveSuccess] = useState(false); - - // Initialize config when container changes - useEffect(() => { - if (container) { - const initialConfig = { - name: container.name || '', - port: container.port?.toString() || '', - username: container.username || getDefaultUsername(container.dbType), - password: container.password || '', - databaseName: - container.databaseName || getDefaultDatabaseName(container.dbType), - version: container.version || '', - maxConnections: container.maxConnections?.toString() || '', - enableAuth: container.enableAuth ?? true, - persistData: container.persistData ?? true, - autoStart: false, - restartPolicy: 'unless-stopped', - }; - setConfig(initialConfig); - setOriginalConfig(initialConfig); - setHasChanges(false); - setValidationErrors({}); - setSaveSuccess(false); - } - }, [container]); - - // Track changes - useEffect(() => { - const changed = JSON.stringify(config) !== JSON.stringify(originalConfig); - setHasChanges(changed); - if (changed) { - setSaveSuccess(false); - } - }, [config, originalConfig]); - - const getDefaultUsername = (dbType: string) => { - switch (dbType) { - case 'PostgreSQL': - return 'postgres'; - case 'MySQL': - return 'root'; - case 'MongoDB': - return 'admin'; - case 'Redis': - return ''; - default: - return ''; - } - }; - - const getDefaultDatabaseName = (dbType: string) => { - switch (dbType) { - case 'PostgreSQL': - return 'postgres'; - case 'MySQL': - return 'mysql'; - case 'MongoDB': - return 'admin'; - case 'Redis': - return ''; - default: - return ''; - } - }; - - const validateConfig = () => { - const errors: Record = {}; - - // Validate container name - if (!config.name.trim()) { - errors.name = 'Container name is required'; - } else if (!/^[a-zA-Z0-9][a-zA-Z0-9_.-]*$/.test(config.name)) { - errors.name = 'Invalid container name format'; - } - - // Validate port - const port = Number.parseInt(config.port); - if (!config.port) { - errors.port = 'Port is required'; - } else if (Number.isNaN(port) || port < 1 || port > 65535) { - errors.port = 'Port must be between 1 and 65535'; - } else if (port < 1024 && port !== container?.port) { - errors.port = 'Ports below 1024 require elevated privileges'; - } - - // Validate max connections - if (config.maxConnections) { - const maxConn = Number.parseInt(config.maxConnections); - if (Number.isNaN(maxConn) || maxConn < 1) { - errors.maxConnections = 'Max connections must be a positive number'; - } - } - - setValidationErrors(errors); - return Object.keys(errors).length === 0; - }; - - const handleSave = async () => { - if (!validateConfig() || !container) { - return; - } - - setIsSaving(true); - - try { - const command: UpdateContainerRequest = { - containerId: container.id, - name: config.name !== originalConfig.name ? config.name : undefined, - port: - config.port !== originalConfig.port - ? Number.parseInt(config.port) - : undefined, - username: - config.username !== originalConfig.username - ? config.username - : undefined, - password: - config.password !== originalConfig.password - ? config.password - : undefined, - databaseName: - config.databaseName !== originalConfig.databaseName - ? config.databaseName - : undefined, - maxConnections: - config.maxConnections !== originalConfig.maxConnections - ? Number.parseInt(config.maxConnections) - : undefined, - enableAuth: - config.enableAuth !== originalConfig.enableAuth - ? config.enableAuth - : undefined, - persistData: - config.persistData !== originalConfig.persistData - ? config.persistData - : undefined, - restartPolicy: - config.restartPolicy !== originalConfig.restartPolicy - ? config.restartPolicy - : undefined, - autoStart: - config.autoStart !== originalConfig.autoStart - ? config.autoStart - : undefined, - }; - - await onContainerUpdate(command); - setOriginalConfig(config); - setHasChanges(false); - setSaveSuccess(true); - - // Hide success message after 3 seconds - setTimeout(() => setSaveSuccess(false), 3000); - } catch (error) { - console.error('Failed to save configuration:', error); - setValidationErrors({ general: `Error: ${error}` }); - } finally { - setIsSaving(false); - } - }; - - const handleReset = () => { - setConfig(originalConfig); - setValidationErrors({}); - setSaveSuccess(false); - }; - - const copyConnectionString = async () => { - let connectionString = ''; - const host = 'localhost'; - const port = config.port; - - switch (container?.dbType) { - case 'PostgreSQL': - connectionString = `postgresql://${config.username}:${config.password}@${host}:${port}/${config.databaseName}`; - break; - case 'MySQL': - connectionString = `mysql://${config.username}:${config.password}@${host}:${port}/${config.databaseName}`; - break; - case 'MongoDB': - connectionString = `mongodb://${config.username}:${config.password}@${host}:${port}/${config.databaseName}`; - break; - case 'Redis': - connectionString = `redis://${config.password ? `:${config.password}@` : ''}${host}:${port}`; - break; - } - - try { - await navigator.clipboard.writeText(connectionString); - } catch (error) { - console.error('Failed to copy to clipboard:', error); - } - }; - - const generatePassword = () => { - const chars = - 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789!@#$%^&*'; - let result = ''; - for (let i = 0; i < 16; i++) { - result += chars.charAt(Math.floor(Math.random() * chars.length)); - } - setConfig({ ...config, password: result }); - }; - - if (!container) return null; - - return ( - - - - -
- {databaseIcons[container.dbType as keyof typeof databaseIcons] || - '💾'} -
-
- Database Configuration -
- - {container.status} - - - {container.dbType} {container.version} - -
-
-
-
- - {/* Success/Error Alerts */} - {saveSuccess && ( - - - - Configuration saved successfully! - - - )} - - {Object.keys(validationErrors).length > 0 && ( - - - - {validationErrors.general || - 'Please fix the validation errors before saving.'} - - - )} - -
- - - - - General - - - - Connection - - - - Security - - - -
- - - - - - Basic Information - - - -
-
- - - setConfig({ ...config, name: e.target.value }) - } - className={ - validationErrors.name ? 'border-red-500' : '' - } - /> - {validationErrors.name && ( -

- {validationErrors.name} -

- )} -
-
- - -
-
- -
-
- - -
-
- - -
-
- -
- - -
- -
-
- -

- Start container automatically when system boots -

-
- - setConfig({ ...config, autoStart: checked }) - } - /> -
-
-
-
- - - - - - - Connection Settings - - - -
-
- - - setConfig({ ...config, port: e.target.value }) - } - className={ - validationErrors.port ? 'border-red-500' : '' - } - /> - {validationErrors.port && ( -

- {validationErrors.port} -

- )} -
-
- - - setConfig({ - ...config, - maxConnections: e.target.value, - }) - } - className={ - validationErrors.maxConnections - ? 'border-red-500' - : '' - } - /> - {validationErrors.maxConnections && ( -

- {validationErrors.maxConnections} -

- )} -
-
- - {container.dbType !== 'Redis' && ( -
-
- - - setConfig({ ...config, username: e.target.value }) - } - /> -
-
- - - setConfig({ - ...config, - databaseName: e.target.value, - }) - } - /> -
-
- )} - -
-
- - -
- - {container.dbType === 'PostgreSQL' && - `postgresql://${config.username}:••••••••@localhost:${config.port}/${config.name}`} - {container.dbType === 'MySQL' && - `mysql://${config.username}:••••••••@localhost:${config.port}/${config.name}`} - {container.dbType === 'MongoDB' && - `mongodb://${config.username}:••••••••@localhost:${config.port}/${config.name}`} - {container.dbType === 'Redis' && - `redis://:••••••••@localhost:${config.port}`} - -
-
-
-
- - - - - - - Security Settings - - - -
-
- -

- Require password for database connections -

-
- - setConfig({ ...config, enableAuth: checked }) - } - /> -
- - {config.enableAuth && ( -
- -
-
- - setConfig({ - ...config, - password: e.target.value, - }) - } - className="pr-10" - /> - -
- -
- {validationErrors.password && ( -

- {validationErrors.password} -

- )} -
- )} - -
-
- -

- Keep data when container is removed -

-
- - setConfig({ ...config, persistData: checked }) - } - /> -
-
-
-
-
-
-
- - {/* Action Buttons */} -
-
- {hasChanges && ( - - Unsaved changes - - )} -
-
- - {hasChanges && ( - - )} - -
-
-
-
- ); -} diff --git a/src/shared/components/ui/checkbox.tsx b/src/shared/components/ui/checkbox.tsx new file mode 100644 index 0000000..60a089a --- /dev/null +++ b/src/shared/components/ui/checkbox.tsx @@ -0,0 +1,29 @@ +import * as CheckboxPrimitive from '@radix-ui/react-checkbox'; +import { CheckIcon } from 'lucide-react'; +import * as React from 'react'; +import { cn } from '@/shared/utils/cn'; + +function Checkbox({ + className, + ...props +}: React.ComponentProps) { + return ( + + + + + + ); +} + +export { Checkbox }; diff --git a/src/shared/types/container.ts b/src/shared/types/container.ts index 570c6f0..969c5ca 100644 --- a/src/shared/types/container.ts +++ b/src/shared/types/container.ts @@ -1,28 +1,3 @@ -export interface ContainerSettings { - postgres?: { - initdb_args?: string; - host_auth_method: string; - shared_preload_libraries?: string; - }; - mysql?: { - root_host: string; - character_set: string; - collation: string; - sql_mode: string; - }; - redis?: { - max_memory: string; - max_memory_policy: string; - append_only: boolean; - require_pass: boolean; - }; - mongo?: { - auth_source: string; - enable_sharding: boolean; - oplog_size: string; - }; -} - export type ContainerStatus = | 'running' | 'stopped' @@ -32,6 +7,10 @@ export type ContainerStatus = export type DatabaseType = 'PostgreSQL' | 'MySQL' | 'Redis' | 'MongoDB'; +/** + * Container/Database representation + * This is what gets stored and displayed + */ export interface Container { id: string; name: string; @@ -47,62 +26,4 @@ export interface Container { databaseName?: string; persistData: boolean; enableAuth: boolean; - settings?: ContainerSettings; -} - -export interface CreateContainerRequest { - name: string; - dbType: DatabaseType; - version: string; - port: number; - username?: string; - password: string; - databaseName?: string; - persistData: boolean; - enableAuth: boolean; - maxConnections?: number; - // Database-specific settings - postgresSettings?: { - initdbArgs?: string; - hostAuthMethod?: string; - sharedPreloadLibraries?: string; - }; - mysqlSettings?: { - rootHost?: string; - characterSet?: string; - collation?: string; - sqlMode?: string; - }; - redisSettings?: { - maxMemory?: string; - maxMemoryPolicy?: string; - appendOnly?: boolean; - requirePass?: boolean; - }; - mongoSettings?: { - authSource?: string; - enableSharding?: boolean; - oplogSize?: string; - }; -} - -export interface UpdateContainerRequest { - containerId: string; - name?: string; - port?: number; - username?: string; - password?: string; - databaseName?: string; - maxConnections?: number; - enableAuth?: boolean; - persistData?: boolean; - restartPolicy?: string; - autoStart?: boolean; -} - -export interface ContainerError { - error_type: string; - message: string; - port?: number; - details?: string; } diff --git a/src/shared/types/settings.ts b/src/shared/types/settings.ts deleted file mode 100644 index bd1060d..0000000 --- a/src/shared/types/settings.ts +++ /dev/null @@ -1,7 +0,0 @@ -export interface Settings { - theme: 'light' | 'dark' | 'system'; - autoStartContainers: boolean; - showNotifications: boolean; - refreshInterval: number; - dockerPath?: string; -} diff --git a/src/shared/utils/container.ts b/src/shared/utils/container.ts deleted file mode 100644 index 8ad34d8..0000000 --- a/src/shared/utils/container.ts +++ /dev/null @@ -1,218 +0,0 @@ -import { - Container, - ContainerStatus, - CreateContainerRequest, - DatabaseType, - UpdateContainerRequest, -} from '../types/container'; - -export const isContainerRunning = (container: Container): boolean => - container.status === 'running'; - -export const isContainerStopped = (container: Container): boolean => - container.status === 'stopped'; - -export const canStartContainer = (container: Container): boolean => - container.status === 'stopped'; - -export const canStopContainer = (container: Container): boolean => - container.status === 'running'; - -export const canRemoveContainer = (container: Container): boolean => - container.status === 'stopped'; - -export const getContainerIcon = (dbType: DatabaseType): string => { - const icons = { - PostgreSQL: '🐘', - MySQL: '🐬', - Redis: '🔴', - MongoDB: '🍃', - }; - return icons[dbType] || '🗄️'; -}; - -export const getContainerStatusColor = (status: ContainerStatus): string => { - const colors = { - running: 'green', - stopped: 'gray', - error: 'red', - creating: 'blue', - removing: 'orange', - }; - return colors[status] || 'gray'; -}; - -export const containerFromJSON = (data: any): Container => ({ - id: data.id, - name: data.name, - dbType: data.db_type, - version: data.version, - status: data.status, - port: data.port, - createdAt: new Date(data.created_at), - maxConnections: data.max_connections, - containerId: data.container_id, - username: data.stored_username, - password: data.stored_password, - databaseName: data.stored_database_name, - persistData: data.stored_persist_data, - enableAuth: data.stored_enable_auth, -}); - -export const createRequestToTauri = (request: CreateContainerRequest): any => ({ - name: request.name, - db_type: request.dbType, - version: request.version, - port: request.port, - username: request.username, - password: request.password, - database_name: request.databaseName, - persist_data: request.persistData, - enable_auth: request.enableAuth, - max_connections: request.maxConnections, - postgres_settings: request.postgresSettings - ? { - initdb_args: request.postgresSettings.initdbArgs, - host_auth_method: request.postgresSettings.hostAuthMethod || 'md5', - shared_preload_libraries: - request.postgresSettings.sharedPreloadLibraries, - } - : undefined, - mysql_settings: request.mysqlSettings - ? { - root_host: request.mysqlSettings.rootHost || '%', - character_set: request.mysqlSettings.characterSet || 'utf8mb4', - collation: request.mysqlSettings.collation || 'utf8mb4_unicode_ci', - sql_mode: request.mysqlSettings.sqlMode || 'TRADITIONAL', - } - : undefined, - redis_settings: request.redisSettings - ? { - max_memory: request.redisSettings.maxMemory || '256mb', - max_memory_policy: - request.redisSettings.maxMemoryPolicy || 'allkeys-lru', - append_only: request.redisSettings.appendOnly || false, - require_pass: request.redisSettings.requirePass || false, - } - : undefined, - mongo_settings: request.mongoSettings - ? { - auth_source: request.mongoSettings.authSource || 'admin', - enable_sharding: request.mongoSettings.enableSharding || false, - oplog_size: request.mongoSettings.oplogSize || '512', - } - : undefined, -}); - -export const updateRequestToTauri = (request: UpdateContainerRequest): any => ({ - container_id: request.containerId, - name: request.name, - port: request.port, - username: request.username, - password: request.password, - database_name: request.databaseName, - persist_data: request.persistData, - enable_auth: request.enableAuth, - max_connections: request.maxConnections, - restart_policy: request.restartPolicy, - auto_start: request.autoStart, -}); - -export const validateCreateRequest = ( - request: CreateContainerRequest, -): string[] => { - const errors: string[] = []; - - if (!request.name.trim()) { - errors.push('Name is required'); - } - - if (request.name.length > 50) { - errors.push('Name cannot exceed 50 characters'); - } - - if (!request.password.trim()) { - errors.push('Password is required'); - } - - if (request.password.length < 4) { - errors.push('Password must be at least 4 characters'); - } - - if (request.port < 1024 || request.port > 65535) { - errors.push('Port must be between 1024 and 65535'); - } - - if (request.maxConnections && request.maxConnections < 1) { - errors.push('Maximum number of connections must be greater than 0'); - } - - return errors; -}; - -export const validateUpdateRequest = ( - request: UpdateContainerRequest, -): string[] => { - const errors: string[] = []; - - if (!request.containerId.trim()) { - errors.push('Container ID is required'); - } - - if (request.name !== undefined && !request.name.trim()) { - errors.push('Name cannot be empty'); - } - - if (request.name !== undefined && request.name.length > 50) { - errors.push('Name cannot exceed 50 characters'); - } - - if ( - request.port !== undefined && - (request.port < 1024 || request.port > 65535) - ) { - errors.push('Port must be between 1024 and 65535'); - } - - if (request.password !== undefined && request.password.length < 4) { - errors.push('Password must be at least 4 characters'); - } - - if (request.maxConnections !== undefined && request.maxConnections < 1) { - errors.push('Maximum number of connections must be greater than 0'); - } - - return errors; -}; - -export const generateConnectionString = (container: Container): string => { - const host = 'localhost'; - const port = container.port; - const username = container.username || ''; - const password = container.password || ''; - const databaseName = container.databaseName || ''; - - switch (container.dbType) { - case 'PostgreSQL': - return `postgresql://${username}:${password}@${host}:${port}/${databaseName}`; - case 'MySQL': - return `mysql://${username}:${password}@${host}:${port}/${databaseName}`; - case 'MongoDB': - return `mongodb://${username}:${password}@${host}:${port}/${databaseName}`; - case 'Redis': - return `redis://${password ? `:${password}@` : ''}${host}:${port}`; - default: - return ''; - } -}; - -export const copyToClipboard = async (text: string): Promise => { - try { - const { writeText } = await import('@tauri-apps/plugin-clipboard-manager'); - await writeText(text); - return true; - } catch (error) { - console.error('Failed to copy to clipboard:', error); - return false; - } -}; diff --git a/src/shared/utils/docker.ts b/src/shared/utils/docker.ts deleted file mode 100644 index 56963bf..0000000 --- a/src/shared/utils/docker.ts +++ /dev/null @@ -1,48 +0,0 @@ -import { DockerStatus } from '../types/docker'; - -export const isDockerRunning = (status: DockerStatus): boolean => - status.status === 'running'; - -export const isDockerStopped = (status: DockerStatus): boolean => - status.status === 'stopped'; - -export const hasDockerError = (status: DockerStatus): boolean => - status.status === 'error'; - -export const isDockerConnecting = (status: DockerStatus): boolean => - status.status === 'connecting'; - -export const canInteractWithDocker = (status: DockerStatus): boolean => - status.status === 'running'; - -export const getDockerStatusMessage = (status: DockerStatus): string => { - switch (status.status) { - case 'running': - return 'Docker is running correctly'; - case 'stopped': - return 'Docker daemon is not running'; - case 'error': - return status.error || 'Unknown Docker error'; - case 'connecting': - return 'Connecting to Docker...'; - default: - return 'Unknown status'; - } -}; - -export const getDockerDisplayInfo = (status: DockerStatus): string => { - if (status.status === 'running' && status.containers) { - return `${status.containers.running}/${status.containers.total} databases running`; - } - return getDockerStatusMessage(status); -}; - -export const dockerStatusFromJSON = (data: any): DockerStatus => ({ - status: data.status, - version: data.version, - host: data.host, - containers: data.containers, - images: data.images, - uptime: data.uptime, - error: data.error, -}); diff --git a/src/test/unit/database-registry.test.ts b/src/test/unit/database-registry.test.ts new file mode 100644 index 0000000..09cbc5f --- /dev/null +++ b/src/test/unit/database-registry.test.ts @@ -0,0 +1,109 @@ +import { describe, expect, it } from 'vitest'; +import { + createDatabaseRegistry, + databaseRegistry, +} from '@/features/databases/registry/database-registry'; + +describe('DatabaseRegistry', () => { + describe('Factory Pattern', () => { + it('should create a registry with providers', () => { + const customRegistry = createDatabaseRegistry(); + + expect(customRegistry).toBeDefined(); + expect(customRegistry.count()).toBeGreaterThan(0); + }); + + it('should create independent registry instances', () => { + const registry1 = createDatabaseRegistry(); + const registry2 = createDatabaseRegistry(); + + // Both should have the same providers but be different instances + expect(registry1.count()).toBe(registry2.count()); + expect(registry1).not.toBe(registry2); + }); + }); + + describe('Retrieval', () => { + it('should retrieve a registered provider by ID', () => { + const retrieved = databaseRegistry.get('PostgreSQL'); + expect(retrieved).toBeDefined(); + expect(retrieved?.id).toBe('PostgreSQL'); + }); + + it('should return undefined for non-existent provider', () => { + const retrieved = databaseRegistry.get('NonExistentDB'); + expect(retrieved).toBeUndefined(); + }); + + it('should retrieve all registered providers', () => { + const allProviders = databaseRegistry.getAll(); + expect(Array.isArray(allProviders)).toBe(true); + expect(allProviders.length).toBeGreaterThan(0); + }); + + it('should retrieve all provider IDs', () => { + const allIds = databaseRegistry.getAllIds(); + expect(Array.isArray(allIds)).toBe(true); + expect(allIds.length).toBeGreaterThan(0); + }); + }); + + describe('Existence Check', () => { + it('should correctly check if provider exists', () => { + expect(databaseRegistry.has('PostgreSQL')).toBe(true); + expect(databaseRegistry.has('NonExistentDB')).toBe(false); + }); + }); + + describe('Count', () => { + it('should return correct count of registered providers', () => { + const count = databaseRegistry.count(); + expect(typeof count).toBe('number'); + expect(count).toBeGreaterThan(0); + }); + }); + + describe('Real Providers', () => { + it('should have PostgreSQL provider registered', () => { + const postgres = databaseRegistry.get('PostgreSQL'); + expect(postgres).toBeDefined(); + expect(postgres?.name).toBe('PostgreSQL'); + expect(postgres?.defaultPort).toBe(5432); + }); + + it('should have MySQL provider registered', () => { + const mysql = databaseRegistry.get('MySQL'); + expect(mysql).toBeDefined(); + expect(mysql?.name).toBe('MySQL'); + expect(mysql?.defaultPort).toBe(3306); + }); + + it('should have Redis provider registered', () => { + const redis = databaseRegistry.get('Redis'); + expect(redis).toBeDefined(); + expect(redis?.name).toBe('Redis'); + expect(redis?.defaultPort).toBe(6379); + }); + + it('should have MongoDB provider registered', () => { + const mongodb = databaseRegistry.get('MongoDB'); + expect(mongodb).toBeDefined(); + expect(mongodb?.name).toBe('MongoDB'); + expect(mongodb?.defaultPort).toBe(27017); + }); + + it('should have at least 4 real providers registered', () => { + // Should have at least the 4 main database providers + // (May have more if mock providers were registered in other tests) + expect(databaseRegistry.count()).toBeGreaterThanOrEqual(4); + }); + + it('should have all expected provider IDs', () => { + const ids = databaseRegistry.getAllIds(); + expect(ids).toContain('PostgreSQL'); + expect(ids).toContain('MySQL'); + expect(ids).toContain('Redis'); + expect(ids).toContain('MongoDB'); + }); + }); +}); diff --git a/src/test/unit/providers.test.ts b/src/test/unit/providers.test.ts new file mode 100644 index 0000000..f2477c9 --- /dev/null +++ b/src/test/unit/providers.test.ts @@ -0,0 +1,374 @@ +import { describe, expect, it } from 'vitest'; +import { MongoDBDatabaseProvider } from '@/features/databases/providers/mongodb.provider'; +import { MySQLDatabaseProvider } from '@/features/databases/providers/mysql.provider'; +import { PostgresDatabaseProvider } from '@/features/databases/providers/postgres.provider'; +import { RedisDatabaseProvider } from '@/features/databases/providers/redis.provider'; +import { + createMockContainer, + createMockFormConfig, + validateDockerRunArgs, +} from '../utils/test-utils'; + +describe('Database Providers', () => { + // ==================== PostgreSQL Provider ==================== + describe('PostgresDatabaseProvider', () => { + const provider = new PostgresDatabaseProvider(); + + describe('Identification', () => { + it('should have correct identification properties', () => { + expect(provider.id).toBe('PostgreSQL'); + expect(provider.name).toBe('PostgreSQL'); + expect(provider.description).toBeTruthy(); + expect(provider.color).toBe('#336791'); + }); + + it('should have correct docker configuration', () => { + expect(provider.defaultPort).toBe(5432); + expect(provider.containerPort).toBe(5432); + expect(provider.dataPath).toBe('/var/lib/postgresql/data'); + expect(provider.versions).toContain('16'); + }); + }); + + describe('Form Fields', () => { + it('should return basic fields with correct structure', () => { + const fields = provider.getBasicFields({ isEditMode: false }); + expect(fields).toHaveLength(3); + expect(fields.find((f) => f.name === 'name')).toBeDefined(); + expect(fields.find((f) => f.name === 'port')).toBeDefined(); + expect(fields.find((f) => f.name === 'version')).toBeDefined(); + }); + + it('should make version readonly in edit mode', () => { + const editFields = provider.getBasicFields({ isEditMode: true }); + const versionField = editFields.find((f) => f.name === 'version'); + expect(versionField?.readonly).toBe(true); + }); + + it('should return authentication fields', () => { + const fields = provider.getAuthenticationFields(); + expect(fields.length).toBeGreaterThan(0); + expect(fields.find((f) => f.name === 'username')).toBeDefined(); + expect(fields.find((f) => f.name === 'password')).toBeDefined(); + }); + + it('should return advanced fields grouped', () => { + const groups = provider.getAdvancedFields(); + expect(Array.isArray(groups)).toBe(true); + }); + }); + + describe('Docker Args Building', () => { + it('should build valid docker args with basic config', () => { + const config = createMockFormConfig('postgres', { + name: 'test-postgres', + port: 5432, + version: '16', + username: 'postgres', + password: 'testpass123', + persistData: false, + }); + + const args = provider.buildDockerArgs(config); + + expect(validateDockerRunArgs(args)).toBe(true); + expect(args.image).toBe('postgres:16'); + expect(args.envVars.POSTGRES_PASSWORD).toBe('testpass123'); + expect(args.ports).toHaveLength(1); + expect(args.ports[0].host).toBe(5432); + expect(args.ports[0].container).toBe(5432); + }); + + it('should include username env var when not default', () => { + const config = createMockFormConfig('postgres', { + username: 'customuser', + password: 'testpass123', + version: '16', + }); + + const args = provider.buildDockerArgs(config); + expect(args.envVars.POSTGRES_USER).toBe('customuser'); + }); + + it('should not include username env var when using default', () => { + const config = createMockFormConfig('postgres', { + username: 'postgres', + password: 'testpass123', + version: '16', + }); + + const args = provider.buildDockerArgs(config); + expect(args.envVars.POSTGRES_USER).toBeUndefined(); + }); + + it('should include database name when provided', () => { + const config = createMockFormConfig('postgres', { + password: 'testpass123', + databaseName: 'mydb', + version: '16', + }); + + const args = provider.buildDockerArgs(config); + expect(args.envVars.POSTGRES_DB).toBe('mydb'); + }); + + it('should create volume when persistData is true', () => { + const config = createMockFormConfig('postgres', { + name: 'test-postgres', + password: 'testpass123', + version: '16', + persistData: true, + }); + + const args = provider.buildDockerArgs(config); + expect(args.volumes).toHaveLength(1); + expect(args.volumes[0].name).toBe('test-postgres-data'); + expect(args.volumes[0].path).toBe('/var/lib/postgresql/data'); + }); + + it('should not create volume when persistData is false', () => { + const config = createMockFormConfig('postgres', { + password: 'testpass123', + version: '16', + persistData: false, + }); + + const args = provider.buildDockerArgs(config); + expect(args.volumes).toHaveLength(0); + }); + + it('should include advanced postgres settings when provided', () => { + const config = createMockFormConfig('postgres', { + password: 'testpass123', + version: '16', + postgresSettings: { + hostAuthMethod: 'scram-sha-256', + initdbArgs: '--encoding=UTF8', + sharedPreloadLibraries: 'pg_stat_statements', + }, + }); + + const args = provider.buildDockerArgs(config); + expect(args.envVars.POSTGRES_HOST_AUTH_METHOD).toBe('scram-sha-256'); + expect(args.envVars.POSTGRES_INITDB_ARGS).toBe('--encoding=UTF8'); + expect(args.envVars.POSTGRES_SHARED_PRELOAD_LIBRARIES).toBe( + 'pg_stat_statements', + ); + }); + }); + + describe('Validation', () => { + it('should validate valid config', () => { + const config = createMockFormConfig('postgres', { + password: 'testpass123', + version: '16', + }); + + const result = provider.validateConfig(config); + expect(result.valid).toBe(true); + expect(result.errors).toHaveLength(0); + }); + + it('should reject config with short password', () => { + const config = createMockFormConfig('postgres', { + password: 'abc', + version: '16', + }); + + const result = provider.validateConfig(config); + expect(result.valid).toBe(false); + expect(result.errors).toContain( + 'Password must be at least 4 characters', + ); + }); + + it('should reject config without version', () => { + const config = createMockFormConfig('postgres', { + password: 'testpass123', + version: '', + }); + + const result = provider.validateConfig(config); + expect(result.valid).toBe(false); + expect(result.errors.some((e) => e.includes('version'))).toBe(true); + }); + }); + + describe('Connection String', () => { + it('should generate correct connection string', () => { + const container = createMockContainer({ + username: 'postgres', + password: 'testpass', + port: 5432, + databaseName: 'testdb', + }); + + const connStr = provider.getConnectionString(container); + expect(connStr).toBe( + 'postgresql://postgres:testpass@localhost:5432/testdb', + ); + }); + + it('should use default database when not specified', () => { + const container = createMockContainer({ + username: 'postgres', + password: 'testpass', + port: 5432, + databaseName: undefined, + }); + + const connStr = provider.getConnectionString(container); + expect(connStr).toContain('/postgres'); + }); + }); + + describe('Utilities', () => { + it('should return default username', () => { + expect(provider.getDefaultUsername()).toBe('postgres'); + }); + + it('should require authentication', () => { + expect(provider.requiresAuth()).toBe(true); + }); + }); + }); + + // ==================== MySQL Provider ==================== + describe('MySQLDatabaseProvider', () => { + const provider = new MySQLDatabaseProvider(); + + it('should have correct identification', () => { + expect(provider.id).toBe('MySQL'); + expect(provider.defaultPort).toBe(3306); + expect(provider.containerPort).toBe(3306); + expect(provider.dataPath).toBe('/var/lib/mysql'); + }); + + it('should build valid docker args', () => { + const config = createMockFormConfig('mysql', { + name: 'test-mysql', + port: 3306, + version: '8.0', + username: 'root', + password: 'rootpass123', + persistData: false, + }); + + const args = provider.buildDockerArgs(config); + expect(validateDockerRunArgs(args)).toBe(true); + expect(args.image).toContain('mysql'); + expect(args.envVars.MYSQL_ROOT_PASSWORD).toBe('rootpass123'); + }); + + it('should validate config correctly', () => { + const validConfig = createMockFormConfig('mysql', { + password: 'testpass123', + version: '8.0', + }); + + const result = provider.validateConfig(validConfig); + expect(result.valid).toBe(true); + }); + + it('should require authentication', () => { + expect(provider.requiresAuth()).toBe(true); + }); + }); + + // ==================== Redis Provider ==================== + describe('RedisDatabaseProvider', () => { + const provider = new RedisDatabaseProvider(); + + it('should have correct identification', () => { + expect(provider.id).toBe('Redis'); + expect(provider.defaultPort).toBe(6379); + expect(provider.containerPort).toBe(6379); + expect(provider.dataPath).toBe('/data'); + }); + + it('should build valid docker args without auth', () => { + const config = createMockFormConfig('redis', { + name: 'test-redis', + port: 6379, + version: '7', + enableAuth: false, + persistData: false, + }); + + const args = provider.buildDockerArgs(config); + expect(validateDockerRunArgs(args)).toBe(true); + expect(args.image).toContain('redis'); + }); + + it('should include password in command args when auth enabled', () => { + const config = createMockFormConfig('redis', { + port: 6379, + version: '7', + enableAuth: true, + password: 'redispass123', + persistData: false, + }); + + const args = provider.buildDockerArgs(config); + expect(args.command).toContain('--requirepass'); + expect(args.command).toContain('redispass123'); + }); + + it('should not require authentication by default', () => { + expect(provider.requiresAuth()).toBe(false); + }); + }); + + // ==================== MongoDB Provider ==================== + describe('MongoDBDatabaseProvider', () => { + const provider = new MongoDBDatabaseProvider(); + + it('should have correct identification', () => { + expect(provider.id).toBe('MongoDB'); + expect(provider.defaultPort).toBe(27017); + expect(provider.containerPort).toBe(27017); + expect(provider.dataPath).toBe('/data/db'); + }); + + it('should build valid docker args with auth', () => { + const config = createMockFormConfig('mongodb', { + name: 'test-mongo', + port: 27017, + version: '7', + username: 'admin', + password: 'mongopass123', + enableAuth: true, + persistData: false, + }); + + const args = provider.buildDockerArgs(config); + expect(validateDockerRunArgs(args)).toBe(true); + expect(args.image).toContain('mongo'); + expect(args.envVars.MONGO_INITDB_ROOT_USERNAME).toBe('admin'); + expect(args.envVars.MONGO_INITDB_ROOT_PASSWORD).toBe('mongopass123'); + }); + + it('should always include auth env vars', () => { + // MongoDB provider always sets username (defaults to 'admin') and password + const config = createMockFormConfig('mongodb', { + port: 27017, + version: '7.0', + username: undefined, // Not provided + password: 'testpass123', + enableAuth: false, + persistData: false, + }); + + const args = provider.buildDockerArgs(config); + expect(validateDockerRunArgs(args)).toBe(true); + // MongoDB defaults username to 'admin' if not provided + expect(args.envVars.MONGO_INITDB_ROOT_USERNAME).toBe('admin'); + expect(args.envVars.MONGO_INITDB_ROOT_PASSWORD).toBe('testpass123'); + }); + + it('should require authentication', () => { + // MongoDB provider requires authentication + expect(provider.requiresAuth()).toBe(true); + }); + }); +}); diff --git a/src/test/utils/mock-providers.ts b/src/test/utils/mock-providers.ts new file mode 100644 index 0000000..5cf70c6 --- /dev/null +++ b/src/test/utils/mock-providers.ts @@ -0,0 +1,123 @@ +import type { DatabaseProvider } from '@/features/databases/registry/database-provider.interface'; +import type { + DockerRunArgs, + ValidationResult, +} from '@/features/databases/types/docker.types'; +import type { + FieldGroup, + FormField, +} from '@/features/databases/types/form.types'; +import type { Container } from '@/shared/types/container'; + +/** + * Mock Database Provider for testing + * Implements a simple provider with all required methods + */ +export class MockDatabaseProvider implements DatabaseProvider { + readonly id = 'MockDB'; + readonly name = 'Mock Database'; + readonly description = 'A mock database for testing'; + readonly icon = null; + readonly color = '#000000'; + readonly defaultPort = 5000; + readonly containerPort = 5000; + readonly dataPath = '/data'; + readonly versions = ['1.0', '2.0', '3.0']; + + getBasicFields(): FormField[] { + return [ + { + name: 'name', + label: 'Container Name', + type: 'text', + required: true, + }, + { + name: 'port', + label: 'Port', + type: 'number', + defaultValue: this.defaultPort, + required: true, + }, + { + name: 'version', + label: 'Version', + type: 'select', + options: this.versions, + defaultValue: this.versions[0], + required: true, + }, + ]; + } + + getAuthenticationFields(): FormField[] { + return [ + { + name: 'username', + label: 'Username', + type: 'text', + required: true, + }, + { + name: 'password', + label: 'Password', + type: 'password', + required: true, + }, + ]; + } + + getAdvancedFields(): FieldGroup[] { + return []; + } + + buildDockerArgs(config: any): DockerRunArgs { + return { + image: `mockdb:${config.version}`, + envVars: { + MOCK_USER: config.username, + MOCK_PASSWORD: config.password, + }, + ports: [{ host: config.port, container: this.containerPort }], + volumes: config.persistData + ? [{ name: `${config.name}-data`, path: this.dataPath }] + : [], + command: [], + }; + } + + getConnectionString(container: Container): string { + return `mockdb://localhost:${container.port}`; + } + + validateConfig(config: any): ValidationResult { + const errors: string[] = []; + + if (!config.username) { + errors.push('Username is required'); + } + + if (!config.password || config.password.length < 4) { + errors.push('Password must be at least 4 characters'); + } + + return { + valid: errors.length === 0, + errors, + }; + } + + requiresAuth(): boolean { + return true; + } +} + +/** + * Create a mock provider with custom overrides + */ +export function createMockProvider( + overrides: Partial = {}, +): DatabaseProvider { + const mock = new MockDatabaseProvider(); + return Object.assign(mock, overrides); +} diff --git a/src/test/utils/test-utils.tsx b/src/test/utils/test-utils.tsx new file mode 100644 index 0000000..735a32b --- /dev/null +++ b/src/test/utils/test-utils.tsx @@ -0,0 +1,87 @@ +import type { + Container, + ContainerStatus, + DatabaseType, +} from '@/shared/types/container'; + +/** + * Helper to create a mock Container object + */ +export function createMockContainer( + overrides: Partial = {}, +): Container { + return { + id: 'test-container-id', + name: 'test-postgres', + dbType: 'PostgreSQL' as DatabaseType, + status: 'running' as ContainerStatus, + port: 5432, + version: '16', + username: 'postgres', + password: 'test-password', + databaseName: 'testdb', + persistData: true, + enableAuth: true, + maxConnections: 100, + createdAt: new Date(), + ...overrides, + }; +} + +/** + * Helper to create mock form configuration + */ +export function createMockFormConfig(dbType: string, overrides = {}) { + const baseConfig = { + name: `test-${dbType.toLowerCase()}`, + port: 5432, + version: '16', + username: 'testuser', + password: 'testpassword', + databaseName: 'testdb', + persistData: true, + enableAuth: true, + }; + + return { ...baseConfig, ...overrides }; +} + +/** + * Helper to validate DockerRunArgs structure + */ +export function validateDockerRunArgs(args: any): boolean { + if (!args) return false; + if (!args.image || typeof args.image !== 'string') return false; + if (!args.envVars || typeof args.envVars !== 'object') return false; + if (!Array.isArray(args.ports)) return false; + if (!Array.isArray(args.volumes)) return false; + if (!Array.isArray(args.command)) return false; + + // Validate ports structure + for (const port of args.ports) { + if ( + typeof port.host !== 'number' || + typeof port.container !== 'number' || + port.host < 0 || + port.host > 65535 || + port.container <= 0 || + port.container > 65535 + ) { + return false; + } + } + + // Validate volumes structure + for (const volume of args.volumes) { + if ( + !volume.name || + !volume.path || + typeof volume.name !== 'string' || + typeof volume.path !== 'string' + ) { + return false; + } + } + + return true; +}