From 4754b97eba2c564caa3a8c9295e47d43431d7d32 Mon Sep 17 00:00:00 2001 From: vncloudsco Date: Mon, 6 Oct 2025 03:52:54 +0000 Subject: [PATCH 01/32] feat: Update Features Backup & Restore --- .gitignore | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index f16e911..6705fdf 100644 --- a/.gitignore +++ b/.gitignore @@ -43,4 +43,5 @@ yarn.lock *.sw? landing/* .env -.pnpm-store/ \ No newline at end of file +.pnpm-store/ +.seeded \ No newline at end of file From 52c0c0781522982b138880bf44b6d28cbc0fb609 Mon Sep 17 00:00:00 2001 From: vncloudsco Date: Mon, 6 Oct 2025 03:53:01 +0000 Subject: [PATCH 02/32] feat: Update Features Backup & Restore --- .../migration.sql | 41 + apps/api/prisma/schema.prisma | 44 + apps/api/src/controllers/backup.controller.ts | 898 ++++++++++++++++++ apps/api/src/routes/backup.routes.ts | 107 +++ apps/api/src/routes/index.ts | 2 + apps/web/src/components/pages/Backup.tsx | 283 ++++-- apps/web/src/services/backup.service.ts | 151 +++ docs/BACKUP_SSL_GUIDE.md | 253 +++++ 8 files changed, 1721 insertions(+), 58 deletions(-) create mode 100644 apps/api/prisma/migrations/20251006033542_add_backup_feature/migration.sql create mode 100644 apps/api/src/controllers/backup.controller.ts create mode 100644 apps/api/src/routes/backup.routes.ts create mode 100644 apps/web/src/services/backup.service.ts create mode 100644 docs/BACKUP_SSL_GUIDE.md diff --git a/apps/api/prisma/migrations/20251006033542_add_backup_feature/migration.sql b/apps/api/prisma/migrations/20251006033542_add_backup_feature/migration.sql new file mode 100644 index 0000000..12fe8b1 --- /dev/null +++ b/apps/api/prisma/migrations/20251006033542_add_backup_feature/migration.sql @@ -0,0 +1,41 @@ +-- CreateEnum +CREATE TYPE "BackupStatus" AS ENUM ('success', 'failed', 'running', 'pending'); + +-- CreateTable +CREATE TABLE "backup_schedules" ( + "id" TEXT NOT NULL, + "name" TEXT NOT NULL, + "schedule" TEXT NOT NULL, + "enabled" BOOLEAN NOT NULL DEFAULT true, + "lastRun" TIMESTAMP(3), + "nextRun" TIMESTAMP(3), + "status" "BackupStatus" NOT NULL DEFAULT 'pending', + "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + "updatedAt" TIMESTAMP(3) NOT NULL, + + CONSTRAINT "backup_schedules_pkey" PRIMARY KEY ("id") +); + +-- CreateTable +CREATE TABLE "backup_files" ( + "id" TEXT NOT NULL, + "scheduleId" TEXT, + "filename" TEXT NOT NULL, + "filepath" TEXT NOT NULL, + "size" BIGINT NOT NULL, + "status" "BackupStatus" NOT NULL DEFAULT 'success', + "type" TEXT NOT NULL DEFAULT 'full', + "metadata" JSONB, + "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + + CONSTRAINT "backup_files_pkey" PRIMARY KEY ("id") +); + +-- CreateIndex +CREATE INDEX "backup_files_scheduleId_idx" ON "backup_files"("scheduleId"); + +-- CreateIndex +CREATE INDEX "backup_files_createdAt_idx" ON "backup_files"("createdAt"); + +-- AddForeignKey +ALTER TABLE "backup_files" ADD CONSTRAINT "backup_files_scheduleId_fkey" FOREIGN KEY ("scheduleId") REFERENCES "backup_schedules"("id") ON DELETE SET NULL ON UPDATE CASCADE; diff --git a/apps/api/prisma/schema.prisma b/apps/api/prisma/schema.prisma index 3bac0a7..fe145b5 100644 --- a/apps/api/prisma/schema.prisma +++ b/apps/api/prisma/schema.prisma @@ -462,3 +462,47 @@ model PerformanceMetric { @@index([domain, timestamp]) @@index([timestamp]) } + +enum BackupStatus { + success + failed + running + pending +} + +model BackupSchedule { + id String @id @default(cuid()) + name String + schedule String // Cron expression + enabled Boolean @default(true) + lastRun DateTime? + nextRun DateTime? + status BackupStatus @default(pending) + + backups BackupFile[] + + createdAt DateTime @default(now()) + updatedAt DateTime @updatedAt + + @@map("backup_schedules") +} + +model BackupFile { + id String @id @default(cuid()) + scheduleId String? + schedule BackupSchedule? @relation(fields: [scheduleId], references: [id], onDelete: SetNull) + + filename String + filepath String + size BigInt // Size in bytes + status BackupStatus @default(success) + type String @default("full") // full, incremental, manual + + metadata Json? // Additional metadata (domains count, rules count, etc.) + + createdAt DateTime @default(now()) + + @@index([scheduleId]) + @@index([createdAt]) + @@map("backup_files") +} diff --git a/apps/api/src/controllers/backup.controller.ts b/apps/api/src/controllers/backup.controller.ts new file mode 100644 index 0000000..2dad9fb --- /dev/null +++ b/apps/api/src/controllers/backup.controller.ts @@ -0,0 +1,898 @@ +import { Response } from 'express'; +import { AuthRequest } from '../middleware/auth'; +import logger from '../utils/logger'; +import prisma from '../config/database'; +import * as fs from 'fs/promises'; +import * as path from 'path'; + +const BACKUP_DIR = process.env.BACKUP_DIR || '/var/backups/nginx-love'; +const SSL_CERTS_PATH = '/etc/nginx/ssl'; + +/** + * Ensure backup directory exists + */ +async function ensureBackupDir(): Promise { + try { + await fs.mkdir(BACKUP_DIR, { recursive: true }); + } catch (error) { + logger.error('Failed to create backup directory:', error); + throw new Error('Failed to create backup directory'); + } +} + +/** + * Format bytes to human readable size + */ +function formatBytes(bytes: number): string { + if (bytes === 0) return '0 B'; + const k = 1024; + const sizes = ['B', 'KB', 'MB', 'GB']; + const i = Math.floor(Math.log(bytes) / Math.log(k)); + return Math.round((bytes / Math.pow(k, i)) * 100) / 100 + ' ' + sizes[i]; +} + +/** + * Get all backup schedules + */ +export const getBackupSchedules = async (req: AuthRequest, res: Response): Promise => { + try { + const schedules = await prisma.backupSchedule.findMany({ + include: { + backups: { + take: 1, + orderBy: { + createdAt: 'desc' + } + } + }, + orderBy: { + createdAt: 'desc' + } + }); + + // Format the response + const formattedSchedules = schedules.map(schedule => ({ + id: schedule.id, + name: schedule.name, + schedule: schedule.schedule, + enabled: schedule.enabled, + lastRun: schedule.lastRun?.toISOString(), + nextRun: schedule.nextRun?.toISOString(), + status: schedule.status, + size: schedule.backups[0] ? formatBytes(Number(schedule.backups[0].size)) : undefined, + createdAt: schedule.createdAt, + updatedAt: schedule.updatedAt + })); + + res.json({ + success: true, + data: formattedSchedules + }); + } catch (error) { + logger.error('Get backup schedules error:', error); + res.status(500).json({ + success: false, + message: 'Internal server error' + }); + } +}; + +/** + * Get single backup schedule + */ +export const getBackupSchedule = async (req: AuthRequest, res: Response): Promise => { + try { + const { id } = req.params; + + const schedule = await prisma.backupSchedule.findUnique({ + where: { id }, + include: { + backups: { + orderBy: { + createdAt: 'desc' + } + } + } + }); + + if (!schedule) { + res.status(404).json({ + success: false, + message: 'Backup schedule not found' + }); + return; + } + + res.json({ + success: true, + data: schedule + }); + } catch (error) { + logger.error('Get backup schedule error:', error); + res.status(500).json({ + success: false, + message: 'Internal server error' + }); + } +}; + +/** + * Create backup schedule + */ +export const createBackupSchedule = async (req: AuthRequest, res: Response): Promise => { + try { + const { name, schedule, enabled } = req.body; + + const newSchedule = await prisma.backupSchedule.create({ + data: { + name, + schedule, + enabled: enabled ?? true + } + }); + + logger.info(`Backup schedule created: ${name}`, { + userId: req.user?.userId, + scheduleId: newSchedule.id + }); + + res.status(201).json({ + success: true, + message: 'Backup schedule created successfully', + data: newSchedule + }); + } catch (error) { + logger.error('Create backup schedule error:', error); + res.status(500).json({ + success: false, + message: 'Internal server error' + }); + } +}; + +/** + * Update backup schedule + */ +export const updateBackupSchedule = async (req: AuthRequest, res: Response): Promise => { + try { + const { id } = req.params; + const { name, schedule, enabled } = req.body; + + const updatedSchedule = await prisma.backupSchedule.update({ + where: { id }, + data: { + ...(name && { name }), + ...(schedule && { schedule }), + ...(enabled !== undefined && { enabled }) + } + }); + + logger.info(`Backup schedule updated: ${id}`, { + userId: req.user?.userId + }); + + res.json({ + success: true, + message: 'Backup schedule updated successfully', + data: updatedSchedule + }); + } catch (error) { + logger.error('Update backup schedule error:', error); + res.status(500).json({ + success: false, + message: 'Internal server error' + }); + } +}; + +/** + * Delete backup schedule + */ +export const deleteBackupSchedule = async (req: AuthRequest, res: Response): Promise => { + try { + const { id } = req.params; + + await prisma.backupSchedule.delete({ + where: { id } + }); + + logger.info(`Backup schedule deleted: ${id}`, { + userId: req.user?.userId + }); + + res.json({ + success: true, + message: 'Backup schedule deleted successfully' + }); + } catch (error) { + logger.error('Delete backup schedule error:', error); + res.status(500).json({ + success: false, + message: 'Internal server error' + }); + } +}; + +/** + * Toggle backup schedule enabled status + */ +export const toggleBackupSchedule = async (req: AuthRequest, res: Response): Promise => { + try { + const { id } = req.params; + + const schedule = await prisma.backupSchedule.findUnique({ + where: { id } + }); + + if (!schedule) { + res.status(404).json({ + success: false, + message: 'Backup schedule not found' + }); + return; + } + + const updated = await prisma.backupSchedule.update({ + where: { id }, + data: { + enabled: !schedule.enabled + } + }); + + logger.info(`Backup schedule toggled: ${id} (enabled: ${updated.enabled})`, { + userId: req.user?.userId + }); + + res.json({ + success: true, + message: `Backup schedule ${updated.enabled ? 'enabled' : 'disabled'}`, + data: updated + }); + } catch (error) { + logger.error('Toggle backup schedule error:', error); + res.status(500).json({ + success: false, + message: 'Internal server error' + }); + } +}; + +/** + * Run backup now (manual backup) + */ +export const runBackupNow = async (req: AuthRequest, res: Response): Promise => { + try { + const { id } = req.params; + + await ensureBackupDir(); + + // Update schedule status to running + await prisma.backupSchedule.update({ + where: { id }, + data: { + status: 'running', + lastRun: new Date() + } + }); + + // Collect backup data + const backupData = await collectBackupData(); + + // Generate filename + const timestamp = new Date().toISOString().replace(/:/g, '-').split('.')[0]; + const filename = `backup-${timestamp}.json`; + const filepath = path.join(BACKUP_DIR, filename); + + // Write backup file + await fs.writeFile(filepath, JSON.stringify(backupData, null, 2), 'utf-8'); + + // Get file size + const stats = await fs.stat(filepath); + + // Create backup file record + const backupFile = await prisma.backupFile.create({ + data: { + scheduleId: id, + filename, + filepath, + size: BigInt(stats.size), + status: 'success', + type: 'manual', + metadata: { + domainsCount: backupData.domains.length, + sslCount: backupData.ssl.length, + modsecRulesCount: backupData.modsec.customRules.length, + aclRulesCount: backupData.acl.length + } + } + }); + + // Update schedule status + await prisma.backupSchedule.update({ + where: { id }, + data: { + status: 'success' + } + }); + + logger.info(`Manual backup completed: ${filename}`, { + userId: req.user?.userId, + size: stats.size + }); + + res.json({ + success: true, + message: 'Backup completed successfully', + data: { + filename, + size: formatBytes(stats.size) + } + }); + } catch (error) { + logger.error('Run backup error:', error); + + // Update schedule status to failed + const { id } = req.params; + if (id) { + await prisma.backupSchedule.update({ + where: { id }, + data: { status: 'failed' } + }).catch(() => {}); + } + + res.status(500).json({ + success: false, + message: 'Backup failed' + }); + } +}; + +/** + * Export configuration (download as JSON) + */ +export const exportConfig = async (req: AuthRequest, res: Response): Promise => { + try { + await ensureBackupDir(); + + // Collect backup data + const backupData = await collectBackupData(); + + // Generate filename + const timestamp = new Date().toISOString().replace(/:/g, '-').split('.')[0]; + const filename = `nginx-config-${timestamp}.json`; + + // Set headers for download + res.setHeader('Content-Type', 'application/json'); + res.setHeader('Content-Disposition', `attachment; filename="${filename}"`); + + logger.info('Configuration exported', { + userId: req.user?.userId + }); + + res.json(backupData); + } catch (error) { + logger.error('Export config error:', error); + res.status(500).json({ + success: false, + message: 'Export failed' + }); + } +}; + +/** + * Import configuration (restore from backup) + */ +export const importConfig = async (req: AuthRequest, res: Response): Promise => { + try { + const backupData = req.body; + + if (!backupData || typeof backupData !== 'object') { + res.status(400).json({ + success: false, + message: 'Invalid backup data' + }); + return; + } + + const results = { + domains: 0, + ssl: 0, + sslFiles: 0, + modsec: 0, + acl: 0, + alertChannels: 0, + alertRules: 0 + }; + + // Restore domains (if present) + if (backupData.domains && Array.isArray(backupData.domains)) { + for (const domain of backupData.domains) { + try { + // Create or update domain + await prisma.domain.upsert({ + where: { name: domain.name }, + update: { + status: domain.status, + sslEnabled: domain.sslEnabled, + modsecEnabled: domain.modsecEnabled + }, + create: { + name: domain.name, + status: domain.status, + sslEnabled: domain.sslEnabled, + modsecEnabled: domain.modsecEnabled + } + }); + results.domains++; + } catch (error) { + logger.error(`Failed to restore domain ${domain.name}:`, error); + } + } + } + + // Restore SSL certificates (if present) + if (backupData.ssl && Array.isArray(backupData.ssl)) { + for (const sslCert of backupData.ssl) { + try { + // Find domain by name + const domain = await prisma.domain.findUnique({ + where: { name: sslCert.domainName } + }); + + if (!domain) { + logger.warn(`Domain not found for SSL cert: ${sslCert.domainName}`); + continue; + } + + // Restore SSL certificate files if present + if (sslCert.files && sslCert.files.certificate && sslCert.files.privateKey) { + // Create or update SSL certificate in database with actual certificate content + await prisma.sSLCertificate.upsert({ + where: { domainId: domain.id }, + update: { + commonName: sslCert.commonName, + sans: sslCert.sans || [], + issuer: sslCert.issuer, + certificate: sslCert.files.certificate, + privateKey: sslCert.files.privateKey, + chain: sslCert.files.chain || null, + autoRenew: sslCert.autoRenew || false + }, + create: { + domain: { + connect: { id: domain.id } + }, + commonName: sslCert.commonName, + sans: sslCert.sans || [], + issuer: sslCert.issuer, + certificate: sslCert.files.certificate, + privateKey: sslCert.files.privateKey, + chain: sslCert.files.chain || null, + validFrom: new Date(), + validTo: new Date(Date.now() + 90 * 24 * 60 * 60 * 1000), // 90 days from now + autoRenew: sslCert.autoRenew || false + } + }); + + // Also write files to disk + await writeSSLCertificateFiles(sslCert.domainName, { + certificate: sslCert.files.certificate, + privateKey: sslCert.files.privateKey, + chain: sslCert.files.chain + }); + + results.ssl++; + results.sslFiles++; + logger.info(`SSL certificate and files restored for domain: ${sslCert.domainName}`); + } else { + // Only create DB record if no files + await prisma.sSLCertificate.upsert({ + where: { domainId: domain.id }, + update: { + commonName: sslCert.commonName, + sans: sslCert.sans || [], + issuer: sslCert.issuer, + autoRenew: sslCert.autoRenew || false + }, + create: { + domain: { + connect: { id: domain.id } + }, + commonName: sslCert.commonName, + sans: sslCert.sans || [], + issuer: sslCert.issuer, + certificate: '', // Empty placeholder + privateKey: '', // Empty placeholder + validFrom: new Date(), + validTo: new Date(Date.now() + 90 * 24 * 60 * 60 * 1000), + autoRenew: sslCert.autoRenew || false + } + }); + results.ssl++; + logger.info(`SSL metadata restored for domain: ${sslCert.domainName} (no files)`); + } + } catch (error) { + logger.error(`Failed to restore SSL cert for ${sslCert.domainName}:`, error); + } + } + } + + // Restore ACL rules (if present) + if (backupData.acl && Array.isArray(backupData.acl)) { + for (const rule of backupData.acl) { + try { + await prisma.aclRule.create({ + data: { + name: rule.name, + type: rule.type, + conditionField: rule.condition.field, + conditionOperator: rule.condition.operator, + conditionValue: rule.condition.value, + action: rule.action, + enabled: rule.enabled + } + }); + results.acl++; + } catch (error) { + logger.error(`Failed to restore ACL rule ${rule.name}:`, error); + } + } + } + + // Restore notification channels (if present) + if (backupData.notificationChannels && Array.isArray(backupData.notificationChannels)) { + for (const channel of backupData.notificationChannels) { + try { + await prisma.notificationChannel.create({ + data: { + name: channel.name, + type: channel.type, + enabled: channel.enabled, + config: channel.config + } + }); + results.alertChannels++; + } catch (error) { + logger.error(`Failed to restore notification channel ${channel.name}:`, error); + } + } + } + + logger.info('Configuration imported successfully', { + userId: req.user?.userId, + results + }); + + res.json({ + success: true, + message: 'Configuration imported successfully', + data: results + }); + } catch (error) { + logger.error('Import config error:', error); + res.status(500).json({ + success: false, + message: 'Import failed' + }); + } +}; + +/** + * Get all backup files + */ +export const getBackupFiles = async (req: AuthRequest, res: Response): Promise => { + try { + const { scheduleId } = req.query; + + const backups = await prisma.backupFile.findMany({ + where: scheduleId ? { scheduleId: scheduleId as string } : {}, + include: { + schedule: true + }, + orderBy: { + createdAt: 'desc' + } + }); + + const formattedBackups = backups.map(backup => ({ + ...backup, + size: formatBytes(Number(backup.size)) + })); + + res.json({ + success: true, + data: formattedBackups + }); + } catch (error) { + logger.error('Get backup files error:', error); + res.status(500).json({ + success: false, + message: 'Internal server error' + }); + } +}; + +/** + * Download backup file + */ +export const downloadBackup = async (req: AuthRequest, res: Response): Promise => { + try { + const { id } = req.params; + + const backup = await prisma.backupFile.findUnique({ + where: { id } + }); + + if (!backup) { + res.status(404).json({ + success: false, + message: 'Backup file not found' + }); + return; + } + + // Check if file exists + try { + await fs.access(backup.filepath); + } catch { + res.status(404).json({ + success: false, + message: 'Backup file not found on disk' + }); + return; + } + + // Send file + res.download(backup.filepath, backup.filename); + + logger.info(`Backup downloaded: ${backup.filename}`, { + userId: req.user?.userId + }); + } catch (error) { + logger.error('Download backup error:', error); + res.status(500).json({ + success: false, + message: 'Download failed' + }); + } +}; + +/** + * Delete backup file + */ +export const deleteBackupFile = async (req: AuthRequest, res: Response): Promise => { + try { + const { id } = req.params; + + const backup = await prisma.backupFile.findUnique({ + where: { id } + }); + + if (!backup) { + res.status(404).json({ + success: false, + message: 'Backup file not found' + }); + return; + } + + // Delete file from disk + try { + await fs.unlink(backup.filepath); + } catch (error) { + logger.warn(`Failed to delete backup file from disk: ${backup.filepath}`, error); + } + + // Delete from database + await prisma.backupFile.delete({ + where: { id } + }); + + logger.info(`Backup deleted: ${backup.filename}`, { + userId: req.user?.userId + }); + + res.json({ + success: true, + message: 'Backup file deleted successfully' + }); + } catch (error) { + logger.error('Delete backup file error:', error); + res.status(500).json({ + success: false, + message: 'Internal server error' + }); + } +}; + +/** + * Helper function to read SSL certificate files for a domain + */ +async function readSSLCertificateFiles(domainName: string) { + try { + const certPath = path.join(SSL_CERTS_PATH, `${domainName}.crt`); + const keyPath = path.join(SSL_CERTS_PATH, `${domainName}.key`); + const chainPath = path.join(SSL_CERTS_PATH, `${domainName}.chain.crt`); + + const sslFiles: { + certificate?: string; + privateKey?: string; + chain?: string; + } = {}; + + // Try to read certificate file + try { + sslFiles.certificate = await fs.readFile(certPath, 'utf-8'); + } catch (error) { + logger.warn(`SSL certificate not found for ${domainName}: ${certPath}`); + } + + // Try to read private key file + try { + sslFiles.privateKey = await fs.readFile(keyPath, 'utf-8'); + } catch (error) { + logger.warn(`SSL private key not found for ${domainName}: ${keyPath}`); + } + + // Try to read chain file (optional) + try { + sslFiles.chain = await fs.readFile(chainPath, 'utf-8'); + } catch (error) { + // Chain is optional, don't log warning + } + + return sslFiles; + } catch (error) { + logger.error(`Error reading SSL files for ${domainName}:`, error); + return {}; + } +} + +/** + * Helper function to write SSL certificate files for a domain + */ +async function writeSSLCertificateFiles(domainName: string, sslFiles: { + certificate?: string; + privateKey?: string; + chain?: string; +}) { + try { + await fs.mkdir(SSL_CERTS_PATH, { recursive: true }); + + if (sslFiles.certificate) { + const certPath = path.join(SSL_CERTS_PATH, `${domainName}.crt`); + await fs.writeFile(certPath, sslFiles.certificate, 'utf-8'); + logger.info(`SSL certificate written for ${domainName}`); + } + + if (sslFiles.privateKey) { + const keyPath = path.join(SSL_CERTS_PATH, `${domainName}.key`); + await fs.writeFile(keyPath, sslFiles.privateKey, 'utf-8'); + // Set proper permissions for private key + await fs.chmod(keyPath, 0o600); + logger.info(`SSL private key written for ${domainName}`); + } + + if (sslFiles.chain) { + const chainPath = path.join(SSL_CERTS_PATH, `${domainName}.chain.crt`); + await fs.writeFile(chainPath, sslFiles.chain, 'utf-8'); + logger.info(`SSL chain written for ${domainName}`); + } + } catch (error) { + logger.error(`Error writing SSL files for ${domainName}:`, error); + throw error; + } +} + +/** + * Helper function to collect all backup data + */ +async function collectBackupData() { + // Get all domains + const domains = await prisma.domain.findMany({ + include: { + upstreams: true, + loadBalancer: true, + sslCertificate: true + } + }); + + // Get all SSL certificates with actual certificate files + const ssl = await prisma.sSLCertificate.findMany({ + include: { + domain: true + } + }); + + // Read SSL certificate files for each certificate + const sslWithFiles = await Promise.all( + ssl.map(async (s) => { + if (!s.domain?.name) { + return { + domainName: s.domain?.name, + commonName: s.commonName, + sans: s.sans, + issuer: s.issuer, + autoRenew: s.autoRenew + }; + } + + const sslFiles = await readSSLCertificateFiles(s.domain.name); + + return { + domainName: s.domain.name, + commonName: s.commonName, + sans: s.sans, + issuer: s.issuer, + autoRenew: s.autoRenew, + // Include actual certificate files + files: sslFiles + }; + }) + ); + + // Get ModSecurity CRS rules + const modsecCRSRules = await prisma.modSecCRSRule.findMany(); + + // Get ModSecurity custom rules + const modsecCustomRules = await prisma.modSecRule.findMany(); + + // Get ACL rules + const aclRules = await prisma.aclRule.findMany(); + + // Get notification channels + const notificationChannels = await prisma.notificationChannel.findMany(); + + // Get alert rules + const alertRules = await prisma.alertRule.findMany({ + include: { + channels: { + include: { + channel: true + } + } + } + }); + + // Get nginx configs + const nginxConfigs = await prisma.nginxConfig.findMany(); + + return { + version: '1.0', + timestamp: new Date().toISOString(), + domains: domains.map(d => ({ + name: d.name, + status: d.status, + sslEnabled: d.sslEnabled, + modsecEnabled: d.modsecEnabled, + upstreams: d.upstreams, + loadBalancer: d.loadBalancer + })), + ssl: sslWithFiles, + modsec: { + crsRules: modsecCRSRules, + customRules: modsecCustomRules + }, + acl: aclRules.map(r => ({ + name: r.name, + type: r.type, + condition: { + field: r.conditionField, + operator: r.conditionOperator, + value: r.conditionValue + }, + action: r.action, + enabled: r.enabled + })), + notificationChannels, + alertRules: alertRules.map(r => ({ + name: r.name, + condition: r.condition, + threshold: r.threshold, + severity: r.severity, + enabled: r.enabled, + channels: r.channels.map(c => c.channel.name) + })), + nginxConfigs + }; +} diff --git a/apps/api/src/routes/backup.routes.ts b/apps/api/src/routes/backup.routes.ts new file mode 100644 index 0000000..26cde69 --- /dev/null +++ b/apps/api/src/routes/backup.routes.ts @@ -0,0 +1,107 @@ +import { Router } from 'express'; +import { authenticate, authorize } from '../middleware/auth'; +import { + getBackupSchedules, + getBackupSchedule, + createBackupSchedule, + updateBackupSchedule, + deleteBackupSchedule, + toggleBackupSchedule, + runBackupNow, + exportConfig, + importConfig, + getBackupFiles, + downloadBackup, + deleteBackupFile +} from '../controllers/backup.controller'; + +const router = Router(); + +// All routes require authentication +router.use(authenticate); + +/** + * @route GET /api/backup/schedules + * @desc Get all backup schedules + * @access Private (all roles) + */ +router.get('/schedules', getBackupSchedules); + +/** + * @route GET /api/backup/schedules/:id + * @desc Get single backup schedule + * @access Private (all roles) + */ +router.get('/schedules/:id', getBackupSchedule); + +/** + * @route POST /api/backup/schedules + * @desc Create backup schedule + * @access Private (admin, moderator) + */ +router.post('/schedules', authorize('admin', 'moderator'), createBackupSchedule); + +/** + * @route PUT /api/backup/schedules/:id + * @desc Update backup schedule + * @access Private (admin, moderator) + */ +router.put('/schedules/:id', authorize('admin', 'moderator'), updateBackupSchedule); + +/** + * @route DELETE /api/backup/schedules/:id + * @desc Delete backup schedule + * @access Private (admin, moderator) + */ +router.delete('/schedules/:id', authorize('admin', 'moderator'), deleteBackupSchedule); + +/** + * @route PATCH /api/backup/schedules/:id/toggle + * @desc Toggle backup schedule enabled status + * @access Private (admin, moderator) + */ +router.patch('/schedules/:id/toggle', authorize('admin', 'moderator'), toggleBackupSchedule); + +/** + * @route POST /api/backup/schedules/:id/run + * @desc Run backup now (manual) + * @access Private (admin, moderator) + */ +router.post('/schedules/:id/run', authorize('admin', 'moderator'), runBackupNow); + +/** + * @route GET /api/backup/export + * @desc Export configuration + * @access Private (admin, moderator) + */ +router.get('/export', authorize('admin', 'moderator'), exportConfig); + +/** + * @route POST /api/backup/import + * @desc Import configuration + * @access Private (admin) + */ +router.post('/import', authorize('admin'), importConfig); + +/** + * @route GET /api/backup/files + * @desc Get all backup files + * @access Private (all roles) + */ +router.get('/files', getBackupFiles); + +/** + * @route GET /api/backup/files/:id/download + * @desc Download backup file + * @access Private (admin, moderator) + */ +router.get('/files/:id/download', authorize('admin', 'moderator'), downloadBackup); + +/** + * @route DELETE /api/backup/files/:id + * @desc Delete backup file + * @access Private (admin) + */ +router.delete('/files/:id', authorize('admin'), deleteBackupFile); + +export default router; diff --git a/apps/api/src/routes/index.ts b/apps/api/src/routes/index.ts index 5c8e613..151a9b7 100644 --- a/apps/api/src/routes/index.ts +++ b/apps/api/src/routes/index.ts @@ -11,6 +11,7 @@ import aclRoutes from './acl.routes'; import performanceRoutes from './performance.routes'; import userRoutes from './user.routes'; import dashboardRoutes from './dashboard.routes'; +import backupRoutes from './backup.routes'; const router = Router(); @@ -36,5 +37,6 @@ router.use('/acl', aclRoutes); router.use('/performance', performanceRoutes); router.use('/users', userRoutes); router.use('/dashboard', dashboardRoutes); +router.use('/backup', backupRoutes); export default router; diff --git a/apps/web/src/components/pages/Backup.tsx b/apps/web/src/components/pages/Backup.tsx index 748ce25..7df1a96 100644 --- a/apps/web/src/components/pages/Backup.tsx +++ b/apps/web/src/components/pages/Backup.tsx @@ -1,5 +1,4 @@ -import { useState } from "react"; -import { useTranslation } from "react-i18next"; +import { useState, useEffect } from "react"; import { Card, CardContent, CardDescription, CardHeader, CardTitle } from "@/components/ui/card"; import { Badge } from "@/components/ui/badge"; import { Button } from "@/components/ui/button"; @@ -8,17 +7,19 @@ import { Table, TableBody, TableCell, TableHead, TableHeader, TableRow } from "@ import { Dialog, DialogContent, DialogDescription, DialogFooter, DialogHeader, DialogTitle, DialogTrigger } from "@/components/ui/dialog"; import { Label } from "@/components/ui/label"; import { Switch } from "@/components/ui/switch"; -import { Download, Upload, Play, Trash2, Calendar, FileArchive, Database } from "lucide-react"; -import { mockBackups } from "@/mocks/data"; -import { BackupConfig } from "@/types"; +import { Download, Upload, Play, Trash2, Calendar, FileArchive, Database, Loader2 } from "lucide-react"; import { useToast } from "@/hooks/use-toast"; -import { UnderConstructionBanner } from "@/components/ui/under-construction-banner"; +import { backupService, BackupSchedule } from "@/services/backup.service"; +import { AlertDialog, AlertDialogAction, AlertDialogCancel, AlertDialogContent, AlertDialogDescription, AlertDialogFooter, AlertDialogHeader, AlertDialogTitle } from "@/components/ui/alert-dialog"; const Backup = () => { - const { t } = useTranslation(); const { toast } = useToast(); - const [backups, setBackups] = useState(mockBackups); + const [backups, setBackups] = useState([]); const [isDialogOpen, setIsDialogOpen] = useState(false); + const [deleteDialogOpen, setDeleteDialogOpen] = useState(false); + const [scheduleToDelete, setScheduleToDelete] = useState(null); + const [exportLoading, setExportLoading] = useState(false); + const [importLoading, setImportLoading] = useState(false); const [formData, setFormData] = useState({ name: "", @@ -26,18 +27,56 @@ const Backup = () => { enabled: true }); - const handleAddBackup = () => { - const newBackup: BackupConfig = { - id: `bk${backups.length + 1}`, - name: formData.name, - schedule: formData.schedule, - enabled: formData.enabled, - status: 'pending' - }; - setBackups([...backups, newBackup]); - setIsDialogOpen(false); - resetForm(); - toast({ title: "Backup schedule created successfully" }); + // Load backup schedules + useEffect(() => { + loadBackupSchedules(); + }, []); + + const loadBackupSchedules = async () => { + try { + const data = await backupService.getSchedules(); + setBackups(data); + } catch (error: any) { + toast({ + title: "Error loading backups", + description: error.response?.data?.message || "Failed to load backup schedules", + variant: "destructive" + }); + } + }; + + const handleAddBackup = async () => { + if (!formData.name.trim()) { + toast({ + title: "Validation error", + description: "Please enter a backup name", + variant: "destructive" + }); + return; + } + + try { + await backupService.createSchedule({ + name: formData.name, + schedule: formData.schedule, + enabled: formData.enabled + }); + + setIsDialogOpen(false); + resetForm(); + loadBackupSchedules(); + + toast({ + title: "Success", + description: "Backup schedule created successfully" + }); + } catch (error: any) { + toast({ + title: "Error", + description: error.response?.data?.message || "Failed to create backup schedule", + variant: "destructive" + }); + } }; const resetForm = () => { @@ -48,44 +87,137 @@ const Backup = () => { }); }; - const handleToggle = (id: string) => { - setBackups(backups.map(b => b.id === id ? { ...b, enabled: !b.enabled } : b)); + const handleToggle = async (id: string) => { + try { + await backupService.toggleSchedule(id); + loadBackupSchedules(); + toast({ + title: "Success", + description: "Backup schedule updated" + }); + } catch (error: any) { + toast({ + title: "Error", + description: error.response?.data?.message || "Failed to toggle backup schedule", + variant: "destructive" + }); + } }; - const handleDelete = (id: string) => { - setBackups(backups.filter(b => b.id !== id)); - toast({ title: "Backup schedule deleted" }); + const confirmDelete = (id: string) => { + setScheduleToDelete(id); + setDeleteDialogOpen(true); }; - const handleRunNow = (id: string) => { - toast({ - title: "Backup started", - description: "Manual backup is running (mock mode)" - }); + const handleDelete = async () => { + if (!scheduleToDelete) return; + + try { + await backupService.deleteSchedule(scheduleToDelete); + setDeleteDialogOpen(false); + setScheduleToDelete(null); + loadBackupSchedules(); + toast({ + title: "Success", + description: "Backup schedule deleted" + }); + } catch (error: any) { + toast({ + title: "Error", + description: error.response?.data?.message || "Failed to delete backup schedule", + variant: "destructive" + }); + } }; - const handleExportConfig = () => { - const config = { - domains: "Mock domain configurations", - ssl: "Mock SSL certificates", - modsec: "Mock ModSecurity rules", - settings: "Mock system settings" - }; - const dataStr = JSON.stringify(config, null, 2); - const dataUri = 'data:application/json;charset=utf-8,'+ encodeURIComponent(dataStr); - const exportFileDefaultName = `nginx-config-${new Date().toISOString()}.json`; - const linkElement = document.createElement('a'); - linkElement.setAttribute('href', dataUri); - linkElement.setAttribute('download', exportFileDefaultName); - linkElement.click(); - toast({ title: "Configuration exported successfully" }); + const handleRunNow = async (id: string) => { + try { + toast({ + title: "Backup started", + description: "Manual backup is running..." + }); + + const result = await backupService.runNow(id); + loadBackupSchedules(); + + toast({ + title: "Backup completed", + description: `Backup file created: ${result.filename} (${result.size})` + }); + } catch (error: any) { + toast({ + title: "Backup failed", + description: error.response?.data?.message || "Failed to run backup", + variant: "destructive" + }); + } }; - const handleImportConfig = () => { - toast({ - title: "Import configuration", - description: "Select a backup file to restore (mock mode)" - }); + const handleExportConfig = async () => { + try { + setExportLoading(true); + const blob = await backupService.exportConfig(); + + const timestamp = new Date().toISOString().replace(/:/g, '-').split('.')[0]; + const filename = `nginx-config-${timestamp}.json`; + + const url = window.URL.createObjectURL(blob); + const link = document.createElement('a'); + link.href = url; + link.download = filename; + link.click(); + window.URL.revokeObjectURL(url); + + toast({ + title: "Success", + description: "Configuration exported successfully" + }); + } catch (error: any) { + toast({ + title: "Export failed", + description: error.response?.data?.message || "Failed to export configuration", + variant: "destructive" + }); + } finally { + setExportLoading(false); + } + }; + + const handleImportConfig = async () => { + const input = document.createElement('input'); + input.type = 'file'; + input.accept = 'application/json'; + + input.onchange = async (e: Event) => { + const file = (e.target as HTMLInputElement).files?.[0]; + if (!file) return; + + try { + setImportLoading(true); + const text = await file.text(); + const data = JSON.parse(text); + + const result = await backupService.importConfig(data); + + toast({ + title: "Import successful", + description: `Restored: ${result.domains} domains, ${result.ssl} SSL certs${result.sslFiles ? ` (${result.sslFiles} with files)` : ''}, ${result.acl} ACL rules, ${result.modsec} ModSec rules` + }); + + // Reload data + loadBackupSchedules(); + } catch (error: any) { + toast({ + title: "Import failed", + description: error.response?.data?.message || "Failed to import configuration. Please check the file format.", + variant: "destructive" + }); + } finally { + setImportLoading(false); + } + }; + + input.click(); }; const getStatusColor = (status: string) => { @@ -100,7 +232,6 @@ const Backup = () => { return (
-
@@ -123,9 +254,18 @@ const Backup = () => {

Export all domains, SSL certificates, ModSecurity rules, and system settings to a JSON file.

- @@ -139,9 +279,18 @@ const Backup = () => {

Import and restore configuration from a previously exported backup file.

- @@ -247,7 +396,7 @@ const Backup = () => { - @@ -294,6 +443,24 @@ const Backup = () => {
+ + {/* Delete Confirmation Dialog */} + + + + Delete Backup Schedule + + Are you sure you want to delete this backup schedule? This action cannot be undone. + + + + Cancel + + Delete + + + +
); }; diff --git a/apps/web/src/services/backup.service.ts b/apps/web/src/services/backup.service.ts new file mode 100644 index 0000000..cb435e6 --- /dev/null +++ b/apps/web/src/services/backup.service.ts @@ -0,0 +1,151 @@ +import api from './api'; + +export interface BackupSchedule { + id: string; + name: string; + schedule: string; + enabled: boolean; + lastRun?: string; + nextRun?: string; + status: 'success' | 'failed' | 'running' | 'pending'; + size?: string; + createdAt?: string; + updatedAt?: string; +} + +export interface BackupFile { + id: string; + scheduleId?: string; + filename: string; + filepath: string; + size: string; + status: 'success' | 'failed' | 'running' | 'pending'; + type: string; + metadata?: any; + createdAt: string; + schedule?: BackupSchedule; +} + +export interface CreateBackupScheduleRequest { + name: string; + schedule: string; + enabled?: boolean; +} + +export interface UpdateBackupScheduleRequest { + name?: string; + schedule?: string; + enabled?: boolean; +} + +export interface ImportResult { + domains: number; + ssl: number; + sslFiles?: number; + modsec: number; + acl: number; + alertChannels: number; + alertRules: number; +} + +export const backupService = { + /** + * Get all backup schedules + */ + async getSchedules(): Promise { + const response = await api.get('/backup/schedules'); + return response.data.data; + }, + + /** + * Get single backup schedule + */ + async getSchedule(id: string): Promise { + const response = await api.get(`/backup/schedules/${id}`); + return response.data.data; + }, + + /** + * Create backup schedule + */ + async createSchedule(data: CreateBackupScheduleRequest): Promise { + const response = await api.post('/backup/schedules', data); + return response.data.data; + }, + + /** + * Update backup schedule + */ + async updateSchedule(id: string, data: UpdateBackupScheduleRequest): Promise { + const response = await api.put(`/backup/schedules/${id}`, data); + return response.data.data; + }, + + /** + * Delete backup schedule + */ + async deleteSchedule(id: string): Promise { + await api.delete(`/backup/schedules/${id}`); + }, + + /** + * Toggle backup schedule enabled status + */ + async toggleSchedule(id: string): Promise { + const response = await api.patch(`/backup/schedules/${id}/toggle`); + return response.data.data; + }, + + /** + * Run backup now (manual backup) + */ + async runNow(id: string): Promise<{ filename: string; size: string }> { + const response = await api.post(`/backup/schedules/${id}/run`); + return response.data.data; + }, + + /** + * Export configuration + */ + async exportConfig(): Promise { + const response = await api.get('/backup/export', { + responseType: 'blob' + }); + return response.data; + }, + + /** + * Import configuration + */ + async importConfig(data: any): Promise { + const response = await api.post('/backup/import', data); + return response.data.data; + }, + + /** + * Get all backup files + */ + async getFiles(scheduleId?: string): Promise { + const response = await api.get('/backup/files', { + params: { scheduleId } + }); + return response.data.data; + }, + + /** + * Download backup file + */ + async downloadFile(id: string): Promise { + const response = await api.get(`/backup/files/${id}/download`, { + responseType: 'blob' + }); + return response.data; + }, + + /** + * Delete backup file + */ + async deleteFile(id: string): Promise { + await api.delete(`/backup/files/${id}`); + } +}; diff --git a/docs/BACKUP_SSL_GUIDE.md b/docs/BACKUP_SSL_GUIDE.md new file mode 100644 index 0000000..218e396 --- /dev/null +++ b/docs/BACKUP_SSL_GUIDE.md @@ -0,0 +1,253 @@ +# Backup & Restore with SSL Certificates + +## Tổng quan + +Hệ thống backup đã được nâng cấp để hỗ trợ **backup và restore đầy đủ SSL certificates**, cho phép bạn di chuyển cấu hình giữa các máy chủ một cách hoàn chỉnh. + +## Những gì được backup + +### 1. **Database Records** +- Domain configurations +- SSL certificate metadata (issuer, validity, SANs) +- ModSecurity rules (CRS + Custom) +- ACL rules +- Alert rules & notification channels +- Nginx configurations + +### 2. **SSL Certificate Files** ✨ (NEW) +Cho mỗi domain có SSL enabled, hệ thống sẽ backup: +- **Certificate file** (.crt) - Public certificate +- **Private key file** (.key) - Private key (được mã hóa an toàn) +- **Certificate chain** (.chain.crt) - Intermediate certificates (nếu có) + +## Cách hoạt động + +### Export/Backup + +Khi bạn export configuration hoặc chạy backup: + +```typescript +// Backend tự động: +1. Đọc metadata SSL từ database +2. Đọc SSL certificate files từ /etc/nginx/ssl/ +3. Include nội dung files vào backup JSON +4. Tạo file backup hoàn chỉnh +``` + +**File backup JSON structure:** +```json +{ + "version": "1.0", + "timestamp": "2025-10-06T10:30:00Z", + "ssl": [ + { + "domainName": "example.com", + "commonName": "example.com", + "sans": ["example.com", "www.example.com"], + "issuer": "Let's Encrypt", + "autoRenew": true, + "files": { + "certificate": "-----BEGIN CERTIFICATE-----\n...", + "privateKey": "-----BEGIN PRIVATE KEY-----\n...", + "chain": "-----BEGIN CERTIFICATE-----\n..." + } + } + ] +} +``` + +### Import/Restore + +Khi bạn import backup trên máy chủ mới: + +```typescript +// Backend tự động: +1. Parse backup JSON +2. Restore domains vào database +3. Restore SSL metadata vào database +4. Write SSL certificate files vào /etc/nginx/ssl/ +5. Set permissions (private key = 600) +6. Restore các cấu hình khác +``` + +## Sử dụng + +### 1. Export Configuration + +**Từ UI:** +``` +Backup & Restore → Export Configuration → Download +``` + +**Kết quả:** File JSON chứa toàn bộ cấu hình + SSL certificates + +### 2. Import Configuration + +**Trên máy chủ mới:** +``` +Backup & Restore → Import Configuration → Select file +``` + +**Hệ thống sẽ:** +- ✅ Restore domains +- ✅ Restore SSL certificates (cả metadata và files) +- ✅ Restore ACL rules +- ✅ Restore ModSecurity rules +- ✅ Restore alert configurations + +**Toast notification sẽ hiển thị:** +``` +Restored: 5 domains, 3 SSL certs (3 with files), 10 ACL rules, 25 ModSec rules +``` + +## Bảo mật + +### Private Keys +- Private keys được lưu trong backup JSON +- **QUAN TRỌNG:** Bảo vệ file backup như bạn bảo vệ private keys +- Khuyến nghị: Mã hóa file backup trước khi lưu trữ +- Set permission 600 cho private keys khi restore + +### Best Practices + +1. **Lưu trữ an toàn:** + ```bash + # Encrypt backup file + gpg -c nginx-config-2025-10-06.json + + # Decrypt when needed + gpg -d nginx-config-2025-10-06.json.gpg > nginx-config-2025-10-06.json + ``` + +2. **Backup định kỳ:** + - Sử dụng scheduled backups + - Lưu trữ off-site + - Kiểm tra backup thường xuyên + +3. **Test restore:** + - Test restore trên môi trường staging + - Xác minh SSL certificates hoạt động + - Kiểm tra tất cả domains + +## API Endpoints + +### Export +```bash +GET /api/backup/export +Authorization: Bearer +``` + +### Import +```bash +POST /api/backup/import +Authorization: Bearer +Content-Type: application/json + +{ + "version": "1.0", + "ssl": [...], + "domains": [...], + ... +} +``` + +## File Locations + +### SSL Certificates +``` +/etc/nginx/ssl/ +├── example.com.crt +├── example.com.key (chmod 600) +├── example.com.chain.crt +├── another-domain.com.crt +├── another-domain.com.key (chmod 600) +└── another-domain.com.chain.crt +``` + +### Backups +``` +/var/backups/nginx-love/ +├── backup-2025-10-06T10-30-00.json +├── backup-2025-10-05T02-00-00.json +└── ... +``` + +## Troubleshooting + +### SSL files không được restore + +**Nguyên nhân:** Domain chưa tồn tại trong database + +**Giải pháp:** +1. Import sẽ tự động tạo domains trước +2. Sau đó restore SSL certificates +3. Check logs: `/home/nginx-love-dev/apps/api/logs/combined.log` + +### Permission denied khi restore + +**Nguyên nhân:** Không có quyền ghi vào /etc/nginx/ssl/ + +**Giải pháp:** +```bash +sudo mkdir -p /etc/nginx/ssl +sudo chown -R :nginx /etc/nginx/ssl +sudo chmod 755 /etc/nginx/ssl +``` + +### Certificate không valid sau restore + +**Nguyên nhân:** +- File bị corrupt trong quá trình backup/restore +- Certificate đã hết hạn + +**Giải pháp:** +```bash +# Verify certificate +openssl x509 -in /etc/nginx/ssl/example.com.crt -text -noout + +# Check private key +openssl rsa -in /etc/nginx/ssl/example.com.key -check + +# Verify cert matches key +openssl x509 -noout -modulus -in /etc/nginx/ssl/example.com.crt | openssl md5 +openssl rsa -noout -modulus -in /etc/nginx/ssl/example.com.key | openssl md5 +``` + +## Migration Example + +### Máy chủ cũ (Production): +```bash +1. Login vào UI +2. Navigate to Backup & Restore +3. Click "Export Configuration" +4. Download: nginx-config-2025-10-06.json +5. Encrypt (optional): gpg -c nginx-config-2025-10-06.json +``` + +### Máy chủ mới (Staging/New Production): +```bash +1. Setup nginx-love application +2. Run migrations: npx prisma migrate deploy +3. Start services +4. Login vào UI +5. Navigate to Backup & Restore +6. Click "Import Configuration" +7. Select: nginx-config-2025-10-06.json +8. Wait for import to complete +9. Verify: Check domains, SSL certs, và configurations +10. Test: Access domains qua HTTPS +``` + +## Notes + +- ✅ SSL certificates được backup **BẢN ĐẦY ĐỦ** (không chỉ metadata) +- ✅ Private keys được bảo mật trong backup file +- ✅ Hỗ trợ certificate chains (intermediate certs) +- ✅ Tự động set permissions cho private keys +- ✅ Compatible với Let's Encrypt, self-signed, và commercial certificates +- ⚠️ **Bảo vệ backup files như bạn bảo vệ private keys** +- ⚠️ Backup files có thể rất lớn nếu có nhiều SSL certificates + +## Version History + +- **v1.0** (2025-10-06): Initial release với SSL certificate backup support From 78a4ada2407494927f3833102dfff684024f1253 Mon Sep 17 00:00:00 2001 From: vncloudsco Date: Mon, 6 Oct 2025 07:12:13 +0000 Subject: [PATCH 03/32] feat: Enhance backup import functionality with confirmation dialog and detailed results --- apps/api/src/controllers/backup.controller.ts | 480 +++++++++++++++--- apps/web/src/components/pages/Backup.tsx | 146 +++++- apps/web/src/services/backup.service.ts | 10 +- 3 files changed, 546 insertions(+), 90 deletions(-) diff --git a/apps/api/src/controllers/backup.controller.ts b/apps/api/src/controllers/backup.controller.ts index 2dad9fb..6787635 100644 --- a/apps/api/src/controllers/backup.controller.ts +++ b/apps/api/src/controllers/backup.controller.ts @@ -4,8 +4,14 @@ import logger from '../utils/logger'; import prisma from '../config/database'; import * as fs from 'fs/promises'; import * as path from 'path'; +import { exec } from 'child_process'; +import { promisify } from 'util'; + +const execAsync = promisify(exec); const BACKUP_DIR = process.env.BACKUP_DIR || '/var/backups/nginx-love'; +const NGINX_SITES_AVAILABLE = '/etc/nginx/sites-available'; +const NGINX_SITES_ENABLED = '/etc/nginx/sites-enabled'; const SSL_CERTS_PATH = '/etc/nginx/ssl'; /** @@ -20,6 +26,38 @@ async function ensureBackupDir(): Promise { } } +/** + * Reload nginx configuration + */ +async function reloadNginx(): Promise { + try { + // Test nginx configuration first + logger.info('Testing nginx configuration...'); + await execAsync('nginx -t'); + + // Reload nginx + logger.info('Reloading nginx...'); + await execAsync('systemctl reload nginx'); + + logger.info('Nginx reloaded successfully'); + return true; + } catch (error: any) { + logger.error('Failed to reload nginx:', error); + logger.error('Nginx test/reload output:', error.stdout || error.stderr); + + // Try alternative reload methods + try { + logger.info('Trying alternative reload method...'); + await execAsync('nginx -s reload'); + logger.info('Nginx reloaded successfully (alternative method)'); + return true; + } catch (altError) { + logger.error('Alternative reload also failed:', altError); + return false; + } + } +} + /** * Format bytes to human readable size */ @@ -396,45 +434,115 @@ export const importConfig = async (req: AuthRequest, res: Response): Promise { + const vhostConfig = await readNginxVhostConfig(d.name); + + return { + name: d.name, + status: d.status, + sslEnabled: d.sslEnabled, + modsecEnabled: d.modsecEnabled, + upstreams: d.upstreams, + loadBalancer: d.loadBalancer, + // Include nginx vhost configuration file + vhostConfig: vhostConfig?.config, + vhostEnabled: vhostConfig?.enabled + }; + }) + ); + // Get all SSL certificates with actual certificate files const ssl = await prisma.sSLCertificate.findMany({ include: { @@ -813,7 +1140,9 @@ async function collectBackupData() { commonName: s.commonName, sans: s.sans, issuer: s.issuer, - autoRenew: s.autoRenew + autoRenew: s.autoRenew, + validFrom: s.validFrom, + validTo: s.validTo }; } @@ -825,6 +1154,8 @@ async function collectBackupData() { sans: s.sans, issuer: s.issuer, autoRenew: s.autoRenew, + validFrom: s.validFrom, + validTo: s.validTo, // Include actual certificate files files: sslFiles }; @@ -837,6 +1168,9 @@ async function collectBackupData() { // Get ModSecurity custom rules const modsecCustomRules = await prisma.modSecRule.findMany(); + // Get ModSecurity global settings + const modsecGlobalSettings = await prisma.nginxConfig.findMany(); + // Get ACL rules const aclRules = await prisma.aclRule.findMany(); @@ -854,25 +1188,40 @@ async function collectBackupData() { } }); + // Get all users (excluding passwords for security) + const users = await prisma.user.findMany({ + include: { + profile: true + } + }); + + // Remove password from users + const usersWithoutPassword = users.map(u => { + const { password, ...userWithoutPassword } = u; + return userWithoutPassword; + }); + // Get nginx configs const nginxConfigs = await prisma.nginxConfig.findMany(); return { - version: '1.0', + version: '2.0', // Bumped version for complete backup timestamp: new Date().toISOString(), - domains: domains.map(d => ({ - name: d.name, - status: d.status, - sslEnabled: d.sslEnabled, - modsecEnabled: d.modsecEnabled, - upstreams: d.upstreams, - loadBalancer: d.loadBalancer - })), + + // Domain configurations with vhost files + domains: domainsWithVhostConfig, + + // SSL certificates with actual files ssl: sslWithFiles, + + // ModSecurity configurations modsec: { + globalSettings: modsecGlobalSettings, crsRules: modsecCRSRules, customRules: modsecCustomRules }, + + // ACL rules acl: aclRules.map(r => ({ name: r.name, type: r.type, @@ -884,6 +1233,8 @@ async function collectBackupData() { action: r.action, enabled: r.enabled })), + + // Alert and notification configurations notificationChannels, alertRules: alertRules.map(r => ({ name: r.name, @@ -893,6 +1244,11 @@ async function collectBackupData() { enabled: r.enabled, channels: r.channels.map(c => c.channel.name) })), + + // Users (without passwords) + users: usersWithoutPassword, + + // Global nginx configurations nginxConfigs }; } diff --git a/apps/web/src/components/pages/Backup.tsx b/apps/web/src/components/pages/Backup.tsx index 7df1a96..e970c60 100644 --- a/apps/web/src/components/pages/Backup.tsx +++ b/apps/web/src/components/pages/Backup.tsx @@ -20,6 +20,8 @@ const Backup = () => { const [scheduleToDelete, setScheduleToDelete] = useState(null); const [exportLoading, setExportLoading] = useState(false); const [importLoading, setImportLoading] = useState(false); + const [importConfirmOpen, setImportConfirmOpen] = useState(false); + const [pendingImportFile, setPendingImportFile] = useState(null); const [formData, setFormData] = useState({ name: "", @@ -183,43 +185,56 @@ const Backup = () => { } }; - const handleImportConfig = async () => { + const handleImportConfig = () => { const input = document.createElement('input'); input.type = 'file'; input.accept = 'application/json'; - input.onchange = async (e: Event) => { + input.onchange = (e: Event) => { const file = (e.target as HTMLInputElement).files?.[0]; if (!file) return; - - try { - setImportLoading(true); - const text = await file.text(); - const data = JSON.parse(text); - - const result = await backupService.importConfig(data); - - toast({ - title: "Import successful", - description: `Restored: ${result.domains} domains, ${result.ssl} SSL certs${result.sslFiles ? ` (${result.sslFiles} with files)` : ''}, ${result.acl} ACL rules, ${result.modsec} ModSec rules` - }); - - // Reload data - loadBackupSchedules(); - } catch (error: any) { - toast({ - title: "Import failed", - description: error.response?.data?.message || "Failed to import configuration. Please check the file format.", - variant: "destructive" - }); - } finally { - setImportLoading(false); - } + + // Show confirmation dialog first + setPendingImportFile(file); + setImportConfirmOpen(true); }; input.click(); }; + const confirmImport = async () => { + if (!pendingImportFile) return; + + try { + setImportLoading(true); + setImportConfirmOpen(false); + + const text = await pendingImportFile.text(); + const data = JSON.parse(text); + + const result = await backupService.importConfig(data); + + toast({ + title: "✅ Restore successful!", + description: `Restored: ${result.domains} domains, ${result.vhostConfigs} vhost configs, ${result.upstreams} upstreams, ${result.loadBalancers} LB configs, ${result.ssl} SSL certs (${result.sslFiles} files), ${result.modsecCRS + result.modsecCustom} ModSec rules, ${result.acl} ACL rules, ${result.alertChannels} channels, ${result.alertRules} alerts, ${result.users} users, ${result.nginxConfigs} configs. Nginx has been reloaded.`, + duration: 10000 + }); + + // Reload data + loadBackupSchedules(); + setPendingImportFile(null); + } catch (error: any) { + toast({ + title: "❌ Restore failed", + description: error.response?.data?.message || "Failed to restore configuration. Please check the file format.", + variant: "destructive", + duration: 8000 + }); + } finally { + setImportLoading(false); + } + }; + const getStatusColor = (status: string) => { switch (status) { case 'success': return 'default'; @@ -461,6 +476,85 @@ const Backup = () => { + + {/* Import/Restore Confirmation Dialog */} + + + + + + ⚠️ Confirm Configuration Restore + + +
+

+ 🚨 CRITICAL WARNING - Data Replacement +

+

+ Restoring this backup will REPLACE ALL existing data on this server with data from the backup file. +

+
+ +
+

The following will be REPLACED:

+
    +
  • Domains: All domain configurations, upstreams, load balancers
  • +
  • Nginx Configs: Virtual host files in /etc/nginx/sites-available/
  • +
  • SSL Certificates: Certificate files (.crt, .key) in /etc/nginx/ssl/
  • +
  • ModSecurity Rules: CRS rules and custom security rules
  • +
  • ACL Rules: All access control configurations
  • +
  • Alert Settings: Notification channels and alert rules
  • +
  • Users: User accounts (passwords must be reset)
  • +
  • System Configs: Global nginx configurations
  • +
+
+ +
+

+ ✅ After Restore: +

+
    +
  • Nginx will be automatically reloaded
  • +
  • Domains will be immediately accessible with restored configurations
  • +
  • SSL certificates will be active and functional
  • +
  • Users will need to reset their passwords (security measure)
  • +
+
+ +
+

+ 💡 Recommendation: Create a backup of your current configuration before proceeding with the restore. +

+
+ +

+ Do you want to proceed with the restore? +

+
+
+ + setPendingImportFile(null)}> + Cancel - Keep Current Data + + + {importLoading ? ( + <> + + Restoring... + + ) : ( + <> + Confirm - Restore Backup + + )} + + +
+
); }; diff --git a/apps/web/src/services/backup.service.ts b/apps/web/src/services/backup.service.ts index cb435e6..5a9783c 100644 --- a/apps/web/src/services/backup.service.ts +++ b/apps/web/src/services/backup.service.ts @@ -40,12 +40,18 @@ export interface UpdateBackupScheduleRequest { export interface ImportResult { domains: number; + vhostConfigs: number; + upstreams: number; + loadBalancers: number; ssl: number; - sslFiles?: number; - modsec: number; + sslFiles: number; + modsecCRS: number; + modsecCustom: number; acl: number; alertChannels: number; alertRules: number; + users: number; + nginxConfigs: number; } export const backupService = { From 3d41e98328861dfac513931ad07aa6494200c5e6 Mon Sep 17 00:00:00 2001 From: vncloudsco Date: Mon, 6 Oct 2025 07:22:53 +0000 Subject: [PATCH 04/32] feat: Add nginx vhost config generation during backup restore --- apps/api/src/controllers/backup.controller.ts | 236 +++++++++++++++++- 1 file changed, 232 insertions(+), 4 deletions(-) diff --git a/apps/api/src/controllers/backup.controller.ts b/apps/api/src/controllers/backup.controller.ts index 6787635..1ecccf4 100644 --- a/apps/api/src/controllers/backup.controller.ts +++ b/apps/api/src/controllers/backup.controller.ts @@ -531,6 +531,27 @@ export const importConfig = async (req: AuthRequest, res: Response): Promise { + const configPath = path.join(NGINX_SITES_AVAILABLE, `${domain.name}.conf`); + const enabledPath = path.join(NGINX_SITES_ENABLED, `${domain.name}.conf`); + + // Determine if any upstream uses HTTPS + const hasHttpsUpstream = domain.upstreams?.some( + (u: any) => u.protocol === "https" + ) || false; + const upstreamProtocol = hasHttpsUpstream ? "https" : "http"; + + // Generate upstream block + const upstreamBlock = ` +upstream ${domain.name.replace(/\./g, "_")}_backend { + ${domain.loadBalancer?.algorithm === "least_conn" ? "least_conn;" : ""} + ${domain.loadBalancer?.algorithm === "ip_hash" ? "ip_hash;" : ""} + + ${(domain.upstreams || []) + .map( + (u: any) => + `server ${u.host}:${u.port} weight=${u.weight || 1} max_fails=${u.maxFails || 3} fail_timeout=${u.failTimeout || 10}s;` + ) + .join("\n ")} +} +`; + + // HTTP server block (always present) + let httpServerBlock = ` +server { + listen 80; + server_name ${domain.name}; + + # Include ACL rules (IP whitelist/blacklist) + include /etc/nginx/conf.d/acl-rules.conf; + + # Include ACME challenge location for Let's Encrypt + include /etc/nginx/snippets/acme-challenge.conf; + + ${ + domain.sslEnabled + ? ` + # Redirect HTTP to HTTPS + return 301 https://$server_name$request_uri; + ` + : ` + ${domain.modsecEnabled ? "modsecurity on;" : "modsecurity off;"} + + access_log /var/log/nginx/${domain.name}_access.log main; + error_log /var/log/nginx/${domain.name}_error.log warn; + + location / { + proxy_pass ${upstreamProtocol}://${domain.name.replace(/\./g, "_")}_backend; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + + ${ + hasHttpsUpstream + ? ` + # HTTPS Backend Settings + ${ + domain.upstreams?.some((u: any) => u.protocol === "https" && !u.sslVerify) + ? "proxy_ssl_verify off;" + : "proxy_ssl_verify on;" + } + proxy_ssl_server_name on; + proxy_ssl_name ${domain.name}; + proxy_ssl_protocols TLSv1.2 TLSv1.3; + ` + : "" + } + + ${ + domain.loadBalancer?.healthCheckEnabled + ? ` + # Health check settings + proxy_next_upstream error timeout http_502 http_503 http_504; + proxy_next_upstream_tries 3; + proxy_next_upstream_timeout ${domain.loadBalancer.healthCheckTimeout || 5}s; + ` + : "" + } + } + + location /nginx_health { + access_log off; + return 200 "healthy\\n"; + add_header Content-Type text/plain; + } + ` + } +} +`; + + // HTTPS server block (only if SSL enabled) + let httpsServerBlock = ""; + if (domain.sslEnabled && domain.sslCertificate) { + httpsServerBlock = ` +server { + listen 443 ssl http2; + server_name ${domain.name}; + + # Include ACL rules (IP whitelist/blacklist) + include /etc/nginx/conf.d/acl-rules.conf; + + # SSL Certificate Configuration + ssl_certificate /etc/nginx/ssl/${domain.name}.crt; + ssl_certificate_key /etc/nginx/ssl/${domain.name}.key; + ${ + domain.sslCertificate.chain + ? `ssl_trusted_certificate /etc/nginx/ssl/${domain.name}.chain.crt;` + : "" + } + + # SSL Security Settings + ssl_protocols TLSv1.2 TLSv1.3; + ssl_ciphers ECDHE-RSA-AES128-GCM-SHA256:ECDHE-ECDSA-AES128-GCM-SHA256:ECDHE-RSA-AES256-GCM-SHA384:ECDHE-ECDSA-AES256-GCM-SHA384:DHE-RSA-AES128-GCM-SHA256:DHE-DSS-AES128-GCM-SHA256:kEDH+AESGCM:ECDHE-RSA-AES128-SHA256:ECDHE-ECDSA-AES128-SHA256:ECDHE-RSA-AES128-SHA:ECDHE-ECDSA-AES128-SHA:ECDHE-RSA-AES256-SHA384:ECDHE-ECDSA-AES256-SHA384:ECDHE-RSA-AES256-SHA:ECDHE-ECDSA-AES256-SHA:DHE-RSA-AES128-SHA256:DHE-RSA-AES128-SHA:DHE-DSS-AES128-SHA256:DHE-RSA-AES256-SHA256:DHE-DSS-AES256-SHA:DHE-RSA-AES256-SHA:!aNULL:!eNULL:!EXPORT:!DES:!RC4:!3DES:!MD5:!PSK; + ssl_prefer_server_ciphers on; + ssl_session_cache shared:SSL:10m; + ssl_session_timeout 10m; + ssl_stapling on; + ssl_stapling_verify on; + + # Security Headers + add_header Strict-Transport-Security "max-age=31536000; includeSubDomains" always; + add_header X-Frame-Options "SAMEORIGIN" always; + add_header X-Content-Type-Options "nosniff" always; + add_header X-XSS-Protection "1; mode=block" always; + + ${domain.modsecEnabled ? "modsecurity on;" : "modsecurity off;"} + + access_log /var/log/nginx/${domain.name}_ssl_access.log main; + error_log /var/log/nginx/${domain.name}_ssl_error.log warn; + + location / { + proxy_pass ${upstreamProtocol}://${domain.name.replace(/\./g, "_")}_backend; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + + ${ + hasHttpsUpstream + ? ` + # HTTPS Backend Settings + ${ + domain.upstreams?.some((u: any) => u.protocol === "https" && !u.sslVerify) + ? "proxy_ssl_verify off;" + : "proxy_ssl_verify on;" + } + proxy_ssl_server_name on; + proxy_ssl_name ${domain.name}; + proxy_ssl_protocols TLSv1.2 TLSv1.3; + ` + : "" + } + + ${ + domain.loadBalancer?.healthCheckEnabled + ? ` + # Health check settings + proxy_next_upstream error timeout http_502 http_503 http_504; + proxy_next_upstream_tries 3; + proxy_next_upstream_timeout ${domain.loadBalancer.healthCheckTimeout || 5}s; + ` + : "" + } + } + + location /nginx_health { + access_log off; + return 200 "healthy\\n"; + add_header Content-Type text/plain; + } +} +`; + } + + const fullConfig = upstreamBlock + httpServerBlock + httpsServerBlock; + + // Write configuration file + try { + await fs.mkdir(NGINX_SITES_AVAILABLE, { recursive: true }); + await fs.mkdir(NGINX_SITES_ENABLED, { recursive: true }); + await fs.writeFile(configPath, fullConfig); + + // Create symlink if domain is active + if (domain.status === "active") { + try { + await fs.unlink(enabledPath); + } catch (e) { + // File doesn't exist, ignore + } + await fs.symlink(configPath, enabledPath); + } + + logger.info(`Nginx configuration generated for ${domain.name} during backup restore`); + } catch (error) { + logger.error(`Failed to write nginx config for ${domain.name}:`, error); + throw error; + } +} + /** * Helper function to read nginx vhost configuration file for a domain */ async function readNginxVhostConfig(domainName: string) { try { - const vhostPath = path.join(NGINX_SITES_AVAILABLE, domainName); + const vhostPath = path.join(NGINX_SITES_AVAILABLE, `${domainName}.conf`); const vhostConfig = await fs.readFile(vhostPath, 'utf-8'); // Check if symlink exists in sites-enabled let isEnabled = false; try { - const enabledPath = path.join(NGINX_SITES_ENABLED, domainName); + const enabledPath = path.join(NGINX_SITES_ENABLED, `${domainName}.conf`); await fs.access(enabledPath); isEnabled = true; } catch { @@ -992,13 +1220,13 @@ async function writeNginxVhostConfig(domainName: string, config: string, enabled await fs.mkdir(NGINX_SITES_AVAILABLE, { recursive: true }); await fs.mkdir(NGINX_SITES_ENABLED, { recursive: true }); - const vhostPath = path.join(NGINX_SITES_AVAILABLE, domainName); + const vhostPath = path.join(NGINX_SITES_AVAILABLE, `${domainName}.conf`); await fs.writeFile(vhostPath, config, 'utf-8'); logger.info(`Nginx vhost config written for ${domainName}`); // Create symlink in sites-enabled if enabled if (enabled) { - const enabledPath = path.join(NGINX_SITES_ENABLED, domainName); + const enabledPath = path.join(NGINX_SITES_ENABLED, `${domainName}.conf`); try { await fs.unlink(enabledPath); } catch { From 7cc70bc10ab343c3bc386e59a9125f3d1738a678 Mon Sep 17 00:00:00 2001 From: vncloudsco Date: Mon, 6 Oct 2025 07:32:06 +0000 Subject: [PATCH 05/32] feat: Add import warning dialog and file upload functionality in Backup component --- .gitignore | 3 +- apps/web/src/components/pages/Backup.tsx | 150 ++++++++++++++++++++++- 2 files changed, 146 insertions(+), 7 deletions(-) diff --git a/.gitignore b/.gitignore index 6705fdf..1b5cfbc 100644 --- a/.gitignore +++ b/.gitignore @@ -44,4 +44,5 @@ yarn.lock landing/* .env .pnpm-store/ -.seeded \ No newline at end of file +.seeded +*.md \ No newline at end of file diff --git a/apps/web/src/components/pages/Backup.tsx b/apps/web/src/components/pages/Backup.tsx index e970c60..c3dfc45 100644 --- a/apps/web/src/components/pages/Backup.tsx +++ b/apps/web/src/components/pages/Backup.tsx @@ -20,8 +20,10 @@ const Backup = () => { const [scheduleToDelete, setScheduleToDelete] = useState(null); const [exportLoading, setExportLoading] = useState(false); const [importLoading, setImportLoading] = useState(false); + const [importWarningOpen, setImportWarningOpen] = useState(false); const [importConfirmOpen, setImportConfirmOpen] = useState(false); const [pendingImportFile, setPendingImportFile] = useState(null); + const [isDragging, setIsDragging] = useState(false); const [formData, setFormData] = useState({ name: "", @@ -186,17 +188,55 @@ const Backup = () => { }; const handleImportConfig = () => { + // Open warning dialog first + setImportWarningOpen(true); + }; + + const handleFileSelect = (file: File) => { + if (!file.name.endsWith('.json')) { + toast({ + title: "Invalid file type", + description: "Please select a JSON backup file", + variant: "destructive" + }); + return; + } + + setPendingImportFile(file); + setImportWarningOpen(false); + setImportConfirmOpen(true); + }; + + const handleFileDrop = (e: React.DragEvent) => { + e.preventDefault(); + setIsDragging(false); + + const file = e.dataTransfer.files[0]; + if (file) { + handleFileSelect(file); + } + }; + + const handleDragOver = (e: React.DragEvent) => { + e.preventDefault(); + setIsDragging(true); + }; + + const handleDragLeave = (e: React.DragEvent) => { + e.preventDefault(); + setIsDragging(false); + }; + + const openFileDialog = () => { const input = document.createElement('input'); input.type = 'file'; - input.accept = 'application/json'; + input.accept = 'application/json,.json'; input.onchange = (e: Event) => { const file = (e.target as HTMLInputElement).files?.[0]; - if (!file) return; - - // Show confirmation dialog first - setPendingImportFile(file); - setImportConfirmOpen(true); + if (file) { + handleFileSelect(file); + } }; input.click(); @@ -477,6 +517,104 @@ const Backup = () => { + {/* Import Warning Dialog with File Upload */} + + + + + + Import Configuration Backup + + +
+

+ ⚠️ + CRITICAL WARNING - ALL DATA WILL BE REPLACED +

+

+ Importing a backup will COMPLETELY REPLACE all existing configurations on this server. + This action is IRREVERSIBLE without a prior backup. +

+
+ +
+

+ 📦 What will be replaced: +

+
+
• All domain configurations
+
• Load balancer settings
+
• SSL certificates & files
+
• ModSecurity rules
+
• ACL access rules
+
• Alert configurations
+
• User accounts
+
• Nginx vhost files
+
+
+ +
+

+ 💡 Before you proceed: +

+
    +
  • Export your current configuration as a safety backup
  • +
  • Ensure the backup file is from a trusted source
  • +
  • Verify the backup file is not corrupted
  • +
  • Notify other administrators about the restore
  • +
+
+ + {/* File Upload Zone */} +
+ +
+
+
+ +
+
+

+ {isDragging ? 'Drop file here' : 'Click to browse or drag & drop'} +

+

+ Accepts .json backup files only +

+
+
+ + + Maximum file size: 50MB + +
+
+
+
+
+
+ + + +
+
+ {/* Import/Restore Confirmation Dialog */} From 188b2d62c9c6597466a1dff670a034b65778b522 Mon Sep 17 00:00:00 2001 From: vncloudsco Date: Mon, 6 Oct 2025 07:41:43 +0000 Subject: [PATCH 06/32] feat: Update .gitignore to exclude documentation files and remove SSL guide --- .gitignore | 3 +- docs/BACKUP_SSL_GUIDE.md | 253 --------------------------------------- 2 files changed, 2 insertions(+), 254 deletions(-) delete mode 100644 docs/BACKUP_SSL_GUIDE.md diff --git a/.gitignore b/.gitignore index 1b5cfbc..467dd2c 100644 --- a/.gitignore +++ b/.gitignore @@ -45,4 +45,5 @@ landing/* .env .pnpm-store/ .seeded -*.md \ No newline at end of file +*.md +/docs/* \ No newline at end of file diff --git a/docs/BACKUP_SSL_GUIDE.md b/docs/BACKUP_SSL_GUIDE.md deleted file mode 100644 index 218e396..0000000 --- a/docs/BACKUP_SSL_GUIDE.md +++ /dev/null @@ -1,253 +0,0 @@ -# Backup & Restore with SSL Certificates - -## Tổng quan - -Hệ thống backup đã được nâng cấp để hỗ trợ **backup và restore đầy đủ SSL certificates**, cho phép bạn di chuyển cấu hình giữa các máy chủ một cách hoàn chỉnh. - -## Những gì được backup - -### 1. **Database Records** -- Domain configurations -- SSL certificate metadata (issuer, validity, SANs) -- ModSecurity rules (CRS + Custom) -- ACL rules -- Alert rules & notification channels -- Nginx configurations - -### 2. **SSL Certificate Files** ✨ (NEW) -Cho mỗi domain có SSL enabled, hệ thống sẽ backup: -- **Certificate file** (.crt) - Public certificate -- **Private key file** (.key) - Private key (được mã hóa an toàn) -- **Certificate chain** (.chain.crt) - Intermediate certificates (nếu có) - -## Cách hoạt động - -### Export/Backup - -Khi bạn export configuration hoặc chạy backup: - -```typescript -// Backend tự động: -1. Đọc metadata SSL từ database -2. Đọc SSL certificate files từ /etc/nginx/ssl/ -3. Include nội dung files vào backup JSON -4. Tạo file backup hoàn chỉnh -``` - -**File backup JSON structure:** -```json -{ - "version": "1.0", - "timestamp": "2025-10-06T10:30:00Z", - "ssl": [ - { - "domainName": "example.com", - "commonName": "example.com", - "sans": ["example.com", "www.example.com"], - "issuer": "Let's Encrypt", - "autoRenew": true, - "files": { - "certificate": "-----BEGIN CERTIFICATE-----\n...", - "privateKey": "-----BEGIN PRIVATE KEY-----\n...", - "chain": "-----BEGIN CERTIFICATE-----\n..." - } - } - ] -} -``` - -### Import/Restore - -Khi bạn import backup trên máy chủ mới: - -```typescript -// Backend tự động: -1. Parse backup JSON -2. Restore domains vào database -3. Restore SSL metadata vào database -4. Write SSL certificate files vào /etc/nginx/ssl/ -5. Set permissions (private key = 600) -6. Restore các cấu hình khác -``` - -## Sử dụng - -### 1. Export Configuration - -**Từ UI:** -``` -Backup & Restore → Export Configuration → Download -``` - -**Kết quả:** File JSON chứa toàn bộ cấu hình + SSL certificates - -### 2. Import Configuration - -**Trên máy chủ mới:** -``` -Backup & Restore → Import Configuration → Select file -``` - -**Hệ thống sẽ:** -- ✅ Restore domains -- ✅ Restore SSL certificates (cả metadata và files) -- ✅ Restore ACL rules -- ✅ Restore ModSecurity rules -- ✅ Restore alert configurations - -**Toast notification sẽ hiển thị:** -``` -Restored: 5 domains, 3 SSL certs (3 with files), 10 ACL rules, 25 ModSec rules -``` - -## Bảo mật - -### Private Keys -- Private keys được lưu trong backup JSON -- **QUAN TRỌNG:** Bảo vệ file backup như bạn bảo vệ private keys -- Khuyến nghị: Mã hóa file backup trước khi lưu trữ -- Set permission 600 cho private keys khi restore - -### Best Practices - -1. **Lưu trữ an toàn:** - ```bash - # Encrypt backup file - gpg -c nginx-config-2025-10-06.json - - # Decrypt when needed - gpg -d nginx-config-2025-10-06.json.gpg > nginx-config-2025-10-06.json - ``` - -2. **Backup định kỳ:** - - Sử dụng scheduled backups - - Lưu trữ off-site - - Kiểm tra backup thường xuyên - -3. **Test restore:** - - Test restore trên môi trường staging - - Xác minh SSL certificates hoạt động - - Kiểm tra tất cả domains - -## API Endpoints - -### Export -```bash -GET /api/backup/export -Authorization: Bearer -``` - -### Import -```bash -POST /api/backup/import -Authorization: Bearer -Content-Type: application/json - -{ - "version": "1.0", - "ssl": [...], - "domains": [...], - ... -} -``` - -## File Locations - -### SSL Certificates -``` -/etc/nginx/ssl/ -├── example.com.crt -├── example.com.key (chmod 600) -├── example.com.chain.crt -├── another-domain.com.crt -├── another-domain.com.key (chmod 600) -└── another-domain.com.chain.crt -``` - -### Backups -``` -/var/backups/nginx-love/ -├── backup-2025-10-06T10-30-00.json -├── backup-2025-10-05T02-00-00.json -└── ... -``` - -## Troubleshooting - -### SSL files không được restore - -**Nguyên nhân:** Domain chưa tồn tại trong database - -**Giải pháp:** -1. Import sẽ tự động tạo domains trước -2. Sau đó restore SSL certificates -3. Check logs: `/home/nginx-love-dev/apps/api/logs/combined.log` - -### Permission denied khi restore - -**Nguyên nhân:** Không có quyền ghi vào /etc/nginx/ssl/ - -**Giải pháp:** -```bash -sudo mkdir -p /etc/nginx/ssl -sudo chown -R :nginx /etc/nginx/ssl -sudo chmod 755 /etc/nginx/ssl -``` - -### Certificate không valid sau restore - -**Nguyên nhân:** -- File bị corrupt trong quá trình backup/restore -- Certificate đã hết hạn - -**Giải pháp:** -```bash -# Verify certificate -openssl x509 -in /etc/nginx/ssl/example.com.crt -text -noout - -# Check private key -openssl rsa -in /etc/nginx/ssl/example.com.key -check - -# Verify cert matches key -openssl x509 -noout -modulus -in /etc/nginx/ssl/example.com.crt | openssl md5 -openssl rsa -noout -modulus -in /etc/nginx/ssl/example.com.key | openssl md5 -``` - -## Migration Example - -### Máy chủ cũ (Production): -```bash -1. Login vào UI -2. Navigate to Backup & Restore -3. Click "Export Configuration" -4. Download: nginx-config-2025-10-06.json -5. Encrypt (optional): gpg -c nginx-config-2025-10-06.json -``` - -### Máy chủ mới (Staging/New Production): -```bash -1. Setup nginx-love application -2. Run migrations: npx prisma migrate deploy -3. Start services -4. Login vào UI -5. Navigate to Backup & Restore -6. Click "Import Configuration" -7. Select: nginx-config-2025-10-06.json -8. Wait for import to complete -9. Verify: Check domains, SSL certs, và configurations -10. Test: Access domains qua HTTPS -``` - -## Notes - -- ✅ SSL certificates được backup **BẢN ĐẦY ĐỦ** (không chỉ metadata) -- ✅ Private keys được bảo mật trong backup file -- ✅ Hỗ trợ certificate chains (intermediate certs) -- ✅ Tự động set permissions cho private keys -- ✅ Compatible với Let's Encrypt, self-signed, và commercial certificates -- ⚠️ **Bảo vệ backup files như bạn bảo vệ private keys** -- ⚠️ Backup files có thể rất lớn nếu có nhiều SSL certificates - -## Version History - -- **v1.0** (2025-10-06): Initial release với SSL certificate backup support From b63e88c5054751e45cbe4edb2f05d94dd428b571 Mon Sep 17 00:00:00 2001 From: vncloudsco Date: Mon, 6 Oct 2025 07:55:16 +0000 Subject: [PATCH 07/32] feat: Update backup import process to include hashed passwords for user accounts --- apps/api/src/controllers/backup.controller.ts | 65 +++++++++---------- apps/web/src/components/pages/Backup.tsx | 4 +- 2 files changed, 31 insertions(+), 38 deletions(-) diff --git a/apps/api/src/controllers/backup.controller.ts b/apps/api/src/controllers/backup.controller.ts index 1ecccf4..752bec2 100644 --- a/apps/api/src/controllers/backup.controller.ts +++ b/apps/api/src/controllers/backup.controller.ts @@ -747,7 +747,7 @@ export const importConfig = async (req: AuthRequest, res: Response): Promise { - const { password, ...userWithoutPassword } = u; - return userWithoutPassword; - }); + // Keep passwords as they are already hashed (bcrypt) + // This allows users to login immediately after restore without password reset // Get nginx configs const nginxConfigs = await prisma.nginxConfig.findMany(); @@ -1473,8 +1466,8 @@ async function collectBackupData() { channels: r.channels.map(c => c.channel.name) })), - // Users (without passwords) - users: usersWithoutPassword, + // Users (with hashed passwords for complete restore) + users: users, // Global nginx configurations nginxConfigs diff --git a/apps/web/src/components/pages/Backup.tsx b/apps/web/src/components/pages/Backup.tsx index c3dfc45..ce9e35e 100644 --- a/apps/web/src/components/pages/Backup.tsx +++ b/apps/web/src/components/pages/Backup.tsx @@ -642,7 +642,7 @@ const Backup = () => {
  • ModSecurity Rules: CRS rules and custom security rules
  • ACL Rules: All access control configurations
  • Alert Settings: Notification channels and alert rules
  • -
  • Users: User accounts (passwords must be reset)
  • +
  • Users: User accounts with passwords (can login immediately)
  • System Configs: Global nginx configurations
  • @@ -655,7 +655,7 @@ const Backup = () => {
  • Nginx will be automatically reloaded
  • Domains will be immediately accessible with restored configurations
  • SSL certificates will be active and functional
  • -
  • Users will need to reset their passwords (security measure)
  • +
  • Users can login with their original passwords from backup
  • From a3ea1cb904ef951b2b198a9284a3a1799c3f5c23 Mon Sep 17 00:00:00 2001 From: vncloudsco Date: Mon, 6 Oct 2025 07:59:40 +0000 Subject: [PATCH 08/32] feat: Enhance user import functionality to upsert users and profiles with hashed passwords from backup --- apps/api/src/controllers/backup.controller.ts | 79 ++++++++++++------- 1 file changed, 52 insertions(+), 27 deletions(-) diff --git a/apps/api/src/controllers/backup.controller.ts b/apps/api/src/controllers/backup.controller.ts index 752bec2..4dcd85e 100644 --- a/apps/api/src/controllers/backup.controller.ts +++ b/apps/api/src/controllers/backup.controller.ts @@ -751,38 +751,63 @@ export const importConfig = async (req: AuthRequest, res: Response): Promise Date: Mon, 6 Oct 2025 08:46:03 +0000 Subject: [PATCH 09/32] feat: Add slave node management features with sync capabilities - Updated SlaveNode interface to include sync configuration and metrics. - Implemented database migration for slave nodes and sync logs. - Created API endpoints for registering, updating, deleting, and syncing slave nodes. - Added middleware for validating slave API keys. - Developed service layer for handling slave node operations. - Implemented query options for fetching slave nodes and their statuses. - Enhanced mock data for slave nodes to include new fields. --- .../migration.sql | 85 ++ apps/api/prisma/schema.prisma | 88 ++ apps/api/src/controllers/slave.controller.ts | 790 ++++++++++++++++++ apps/api/src/middleware/slaveAuth.ts | 80 ++ apps/api/src/routes/index.ts | 2 + apps/api/src/routes/slave.routes.ts | 172 ++++ apps/web/src/components/pages/SlaveNodes.tsx | 466 +++++++---- apps/web/src/mocks/data.ts | 28 +- apps/web/src/queries/slave.query-options.ts | 31 + apps/web/src/services/slave.service.ts | 123 +++ apps/web/src/types/index.ts | 25 +- 11 files changed, 1715 insertions(+), 175 deletions(-) create mode 100644 apps/api/prisma/migrations/20251006084450_add_slave_node_feature/migration.sql create mode 100644 apps/api/src/controllers/slave.controller.ts create mode 100644 apps/api/src/middleware/slaveAuth.ts create mode 100644 apps/api/src/routes/slave.routes.ts create mode 100644 apps/web/src/queries/slave.query-options.ts create mode 100644 apps/web/src/services/slave.service.ts diff --git a/apps/api/prisma/migrations/20251006084450_add_slave_node_feature/migration.sql b/apps/api/prisma/migrations/20251006084450_add_slave_node_feature/migration.sql new file mode 100644 index 0000000..232cb55 --- /dev/null +++ b/apps/api/prisma/migrations/20251006084450_add_slave_node_feature/migration.sql @@ -0,0 +1,85 @@ +-- CreateEnum +CREATE TYPE "SlaveNodeStatus" AS ENUM ('online', 'offline', 'syncing', 'error'); + +-- CreateEnum +CREATE TYPE "SyncLogStatus" AS ENUM ('success', 'failed', 'partial', 'running'); + +-- CreateEnum +CREATE TYPE "SyncLogType" AS ENUM ('full_sync', 'incremental_sync', 'health_check'); + +-- CreateTable +CREATE TABLE "slave_nodes" ( + "id" TEXT NOT NULL, + "name" TEXT NOT NULL, + "host" TEXT NOT NULL, + "port" INTEGER NOT NULL DEFAULT 3001, + "apiKey" TEXT NOT NULL, + "status" "SlaveNodeStatus" NOT NULL DEFAULT 'offline', + "lastSeen" TIMESTAMP(3), + "version" TEXT, + "syncEnabled" BOOLEAN NOT NULL DEFAULT true, + "syncInterval" INTEGER NOT NULL DEFAULT 60, + "configHash" TEXT, + "lastSyncAt" TIMESTAMP(3), + "latency" INTEGER, + "cpuUsage" DOUBLE PRECISION, + "memoryUsage" DOUBLE PRECISION, + "diskUsage" DOUBLE PRECISION, + "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + "updatedAt" TIMESTAMP(3) NOT NULL, + + CONSTRAINT "slave_nodes_pkey" PRIMARY KEY ("id") +); + +-- CreateTable +CREATE TABLE "sync_logs" ( + "id" TEXT NOT NULL, + "nodeId" TEXT NOT NULL, + "type" "SyncLogType" NOT NULL, + "status" "SyncLogStatus" NOT NULL DEFAULT 'running', + "configHash" TEXT, + "changesCount" INTEGER, + "errorMessage" TEXT, + "startedAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + "completedAt" TIMESTAMP(3), + "duration" INTEGER, + + CONSTRAINT "sync_logs_pkey" PRIMARY KEY ("id") +); + +-- CreateTable +CREATE TABLE "config_versions" ( + "id" TEXT NOT NULL, + "version" SERIAL NOT NULL, + "configHash" TEXT NOT NULL, + "configData" JSONB NOT NULL, + "createdBy" TEXT, + "description" TEXT, + "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + + CONSTRAINT "config_versions_pkey" PRIMARY KEY ("id") +); + +-- CreateIndex +CREATE UNIQUE INDEX "slave_nodes_name_key" ON "slave_nodes"("name"); + +-- CreateIndex +CREATE UNIQUE INDEX "slave_nodes_apiKey_key" ON "slave_nodes"("apiKey"); + +-- CreateIndex +CREATE INDEX "slave_nodes_status_idx" ON "slave_nodes"("status"); + +-- CreateIndex +CREATE INDEX "slave_nodes_lastSeen_idx" ON "slave_nodes"("lastSeen"); + +-- CreateIndex +CREATE INDEX "sync_logs_nodeId_startedAt_idx" ON "sync_logs"("nodeId", "startedAt"); + +-- CreateIndex +CREATE UNIQUE INDEX "config_versions_configHash_key" ON "config_versions"("configHash"); + +-- CreateIndex +CREATE INDEX "config_versions_createdAt_idx" ON "config_versions"("createdAt"); + +-- AddForeignKey +ALTER TABLE "sync_logs" ADD CONSTRAINT "sync_logs_nodeId_fkey" FOREIGN KEY ("nodeId") REFERENCES "slave_nodes"("id") ON DELETE CASCADE ON UPDATE CASCADE; diff --git a/apps/api/prisma/schema.prisma b/apps/api/prisma/schema.prisma index fe145b5..994c1d8 100644 --- a/apps/api/prisma/schema.prisma +++ b/apps/api/prisma/schema.prisma @@ -506,3 +506,91 @@ model BackupFile { @@index([createdAt]) @@map("backup_files") } + +enum SlaveNodeStatus { + online + offline + syncing + error +} + +enum SyncLogStatus { + success + failed + partial + running +} + +enum SyncLogType { + full_sync + incremental_sync + health_check +} + +model SlaveNode { + id String @id @default(cuid()) + name String @unique + host String + port Int @default(3001) + apiKey String @unique // Authentication token for slave + + status SlaveNodeStatus @default(offline) + lastSeen DateTime? + version String? + + // Sync configuration + syncEnabled Boolean @default(true) + syncInterval Int @default(60) // seconds + configHash String? // SHA256 hash of current config + lastSyncAt DateTime? + + // Metrics + latency Int? // milliseconds + cpuUsage Float? + memoryUsage Float? + diskUsage Float? + + createdAt DateTime @default(now()) + updatedAt DateTime @updatedAt + + syncLogs SyncLog[] + + @@index([status]) + @@index([lastSeen]) + @@map("slave_nodes") +} + +model SyncLog { + id String @id @default(cuid()) + nodeId String + node SlaveNode @relation(fields: [nodeId], references: [id], onDelete: Cascade) + + type SyncLogType + status SyncLogStatus @default(running) + + configHash String? + changesCount Int? + errorMessage String? @db.Text + + startedAt DateTime @default(now()) + completedAt DateTime? + duration Int? // milliseconds + + @@index([nodeId, startedAt]) + @@map("sync_logs") +} + +model ConfigVersion { + id String @id @default(cuid()) + version Int @default(autoincrement()) + configHash String @unique + configData Json // Serialized config + + createdBy String? + description String? + + createdAt DateTime @default(now()) + + @@index([createdAt]) + @@map("config_versions") +} diff --git a/apps/api/src/controllers/slave.controller.ts b/apps/api/src/controllers/slave.controller.ts new file mode 100644 index 0000000..068bbe3 --- /dev/null +++ b/apps/api/src/controllers/slave.controller.ts @@ -0,0 +1,790 @@ +import { Response } from 'express'; +import { AuthRequest } from '../middleware/auth'; +import { SlaveRequest } from '../middleware/slaveAuth'; +import prisma from '../config/database'; +import logger from '../utils/logger'; +import crypto from 'crypto'; +import axios from 'axios'; + +/** + * Generate SHA256 hash for config data + */ +function generateConfigHash(config: any): string { + return crypto + .createHash('sha256') + .update(JSON.stringify(config)) + .digest('hex'); +} + +/** + * Generate random API key for slave authentication + */ +function generateApiKey(): string { + return crypto.randomBytes(32).toString('hex'); +} + +/** + * Collect current configuration for sync + */ +async function collectCurrentConfig() { + const [domains, ssl, modsec, crsRules, acl, nginxConfigs, alertRules, notificationChannels] = await Promise.all([ + prisma.domain.findMany({ + include: { + upstreams: true, + loadBalancer: true, + sslCertificate: true + } + }), + prisma.sSLCertificate.findMany({ + include: { + domain: true + } + }), + prisma.modSecRule.findMany(), + prisma.modSecCRSRule.findMany(), + prisma.aclRule.findMany(), + prisma.nginxConfig.findMany(), + prisma.alertRule.findMany({ + include: { + channels: { + include: { + channel: true + } + } + } + }), + prisma.notificationChannel.findMany() + ]); + + return { + version: '2.0', + timestamp: new Date().toISOString(), + domains: domains.map(d => ({ + id: d.id, + name: d.name, + status: d.status, + sslEnabled: d.sslEnabled, + modsecEnabled: d.modsecEnabled, + upstreams: d.upstreams, + loadBalancer: d.loadBalancer, + sslCertificateId: d.sslCertificateId + })), + ssl: ssl.map(s => ({ + id: s.id, + commonName: s.commonName, + sans: s.sans, + issuer: s.issuer, + validFrom: s.validFrom, + validTo: s.validTo, + autoRenew: s.autoRenew, + domainId: s.domainId + })), + modsec: { + customRules: modsec, + crsRules: crsRules + }, + acl: acl, + nginxConfigs: nginxConfigs, + alertRules: alertRules.map(r => ({ + id: r.id, + name: r.name, + condition: r.condition, + threshold: r.threshold, + severity: r.severity, + enabled: r.enabled, + channels: r.channels.map(c => c.channelId) + })), + notificationChannels: notificationChannels + }; +} + +// ========================================== +// MASTER API ENDPOINTS +// ========================================== + +/** + * Register new slave node + */ +export const registerSlaveNode = async (req: AuthRequest, res: Response): Promise => { + try { + const { name, host, port, syncInterval } = req.body; + + // Check if node with same name exists + const existing = await prisma.slaveNode.findUnique({ + where: { name } + }); + + if (existing) { + res.status(400).json({ + success: false, + message: 'Node with this name already exists' + }); + return; + } + + // Generate API key + const apiKey = generateApiKey(); + + const node = await prisma.slaveNode.create({ + data: { + name, + host, + port: port || 3001, + apiKey, + syncInterval: syncInterval || 60, + status: 'offline' + } + }); + + logger.info(`Slave node registered: ${name}`, { + userId: req.user?.userId, + nodeId: node.id + }); + + res.status(201).json({ + success: true, + message: 'Slave node registered successfully', + data: { + id: node.id, + name: node.name, + host: node.host, + port: node.port, + apiKey: node.apiKey, // Send once during registration + syncInterval: node.syncInterval + } + }); + } catch (error) { + logger.error('Register slave node error:', error); + res.status(500).json({ + success: false, + message: 'Failed to register slave node' + }); + } +}; + +/** + * Get all slave nodes + */ +export const getSlaveNodes = async (req: AuthRequest, res: Response): Promise => { + try { + const nodes = await prisma.slaveNode.findMany({ + include: { + syncLogs: { + take: 1, + orderBy: { + startedAt: 'desc' + } + } + }, + orderBy: { + createdAt: 'desc' + } + }); + + res.json({ + success: true, + data: nodes.map(node => ({ + ...node, + apiKey: undefined, // Don't expose API key in list + lastSync: node.syncLogs[0] || null, + syncLogs: undefined + })) + }); + } catch (error) { + logger.error('Get slave nodes error:', error); + res.status(500).json({ + success: false, + message: 'Failed to get slave nodes' + }); + } +}; + +/** + * Get single slave node + */ +export const getSlaveNode = async (req: AuthRequest, res: Response): Promise => { + try { + const { id } = req.params; + + const node = await prisma.slaveNode.findUnique({ + where: { id }, + include: { + syncLogs: { + take: 10, + orderBy: { + startedAt: 'desc' + } + } + } + }); + + if (!node) { + res.status(404).json({ + success: false, + message: 'Slave node not found' + }); + return; + } + + res.json({ + success: true, + data: { + ...node, + apiKey: undefined // Don't expose API key + } + }); + } catch (error) { + logger.error('Get slave node error:', error); + res.status(500).json({ + success: false, + message: 'Failed to get slave node' + }); + } +}; + +/** + * Update slave node + */ +export const updateSlaveNode = async (req: AuthRequest, res: Response): Promise => { + try { + const { id } = req.params; + const { name, host, port, syncEnabled, syncInterval } = req.body; + + const updated = await prisma.slaveNode.update({ + where: { id }, + data: { + ...(name && { name }), + ...(host && { host }), + ...(port && { port }), + ...(syncEnabled !== undefined && { syncEnabled }), + ...(syncInterval && { syncInterval }) + } + }); + + logger.info(`Slave node updated: ${id}`, { + userId: req.user?.userId + }); + + res.json({ + success: true, + message: 'Slave node updated successfully', + data: { + ...updated, + apiKey: undefined + } + }); + } catch (error) { + logger.error('Update slave node error:', error); + res.status(500).json({ + success: false, + message: 'Failed to update slave node' + }); + } +}; + +/** + * Delete slave node + */ +export const deleteSlaveNode = async (req: AuthRequest, res: Response): Promise => { + try { + const { id } = req.params; + + await prisma.slaveNode.delete({ + where: { id } + }); + + logger.info(`Slave node deleted: ${id}`, { + userId: req.user?.userId + }); + + res.json({ + success: true, + message: 'Slave node deleted successfully' + }); + } catch (error) { + logger.error('Delete slave node error:', error); + res.status(500).json({ + success: false, + message: 'Failed to delete slave node' + }); + } +}; + +/** + * Sync configuration to specific node + */ +export const syncConfigToNode = async (req: AuthRequest, res: Response): Promise => { + try { + const { id } = req.params; + const { force } = req.body; + + const node = await prisma.slaveNode.findUnique({ where: { id } }); + if (!node) { + res.status(404).json({ + success: false, + message: 'Slave node not found' + }); + return; + } + + if (!node.syncEnabled) { + res.status(400).json({ + success: false, + message: 'Sync is disabled for this node' + }); + return; + } + + // Create sync log + const syncLog = await prisma.syncLog.create({ + data: { + nodeId: id, + type: 'full_sync', + status: 'running' + } + }); + + const startTime = Date.now(); + + try { + // Collect config + const config = await collectCurrentConfig(); + const configHash = generateConfigHash(config); + + // Check if sync needed (skip if same hash and not forced) + if (!force && node.configHash === configHash) { + await prisma.syncLog.update({ + where: { id: syncLog.id }, + data: { + status: 'success', + completedAt: new Date(), + duration: Date.now() - startTime, + configHash + } + }); + + return res.json({ + success: true, + message: 'Configuration already up to date', + data: { + skipped: true, + configHash + } + }); + } + + // Store config version + await prisma.configVersion.create({ + data: { + configHash, + configData: config as any, + createdBy: req.user?.userId, + description: `Sync to ${node.name}` + } + }).catch(() => {}); // Ignore if hash already exists + + // Update node status + await prisma.slaveNode.update({ + where: { id }, + data: { status: 'syncing' } + }); + + // Push config to slave via HTTP + const response = await axios.post( + `http://${node.host}:${node.port}/api/slave/sync/apply-config`, + { + config, + configHash, + masterVersion: '2.0' + }, + { + headers: { + 'X-API-Key': node.apiKey, + 'Content-Type': 'application/json' + }, + timeout: 60000 // 60s timeout for large configs + } + ); + + if (response.data.success) { + // Update sync log - success + await prisma.syncLog.update({ + where: { id: syncLog.id }, + data: { + status: 'success', + completedAt: new Date(), + duration: Date.now() - startTime, + configHash, + changesCount: response.data.changesCount + } + }); + + // Update node + await prisma.slaveNode.update({ + where: { id }, + data: { + status: 'online', + configHash, + lastSyncAt: new Date(), + lastSeen: new Date(), + version: response.data.version + } + }); + + logger.info(`Config synced to node: ${node.name}`, { + userId: req.user?.userId, + nodeId: id, + configHash, + duration: Date.now() - startTime + }); + + res.json({ + success: true, + message: 'Configuration synced successfully', + data: { + configHash, + changesCount: response.data.changesCount, + duration: Date.now() - startTime + } + }); + } else { + throw new Error(response.data.message || 'Slave rejected configuration'); + } + } catch (error: any) { + // Update sync log - failed + await prisma.syncLog.update({ + where: { id: syncLog.id }, + data: { + status: 'failed', + completedAt: new Date(), + duration: Date.now() - startTime, + errorMessage: error.message + } + }); + + // Update node status to error + await prisma.slaveNode.update({ + where: { id }, + data: { status: 'error' } + }); + + throw error; + } + } catch (error: any) { + logger.error('Sync config to node error:', error); + res.status(500).json({ + success: false, + message: error.message || 'Sync failed' + }); + } +}; + +/** + * Sync to all active nodes + */ +export const syncConfigToAllNodes = async (req: AuthRequest, res: Response): Promise => { + try { + const nodes = await prisma.slaveNode.findMany({ + where: { + syncEnabled: true, + status: { not: 'error' } + } + }); + + if (nodes.length === 0) { + res.json({ + success: true, + message: 'No active nodes to sync', + data: { total: 0, success: 0, failed: 0 } + }); + return; + } + + const results = await Promise.allSettled( + nodes.map(async (node) => { + const mockReq = { ...req, params: { id: node.id }, body: { force: false } } as any; + const mockRes = { + status: () => mockRes, + json: (data: any) => data + } as any; + + await syncConfigToNode(mockReq, mockRes); + return node; + }) + ); + + const summary = { + total: nodes.length, + success: results.filter(r => r.status === 'fulfilled').length, + failed: results.filter(r => r.status === 'rejected').length + }; + + logger.info('Sync to all nodes completed', { + userId: req.user?.userId, + summary + }); + + res.json({ + success: true, + message: `Synced to ${summary.success}/${summary.total} nodes`, + data: summary + }); + } catch (error) { + logger.error('Sync all nodes error:', error); + res.status(500).json({ + success: false, + message: 'Sync failed' + }); + } +}; + +/** + * Get node status (health check from master) + */ +export const getNodeStatus = async (req: AuthRequest, res: Response): Promise => { + try { + const { id } = req.params; + + const node = await prisma.slaveNode.findUnique({ where: { id } }); + if (!node) { + res.status(404).json({ + success: false, + message: 'Node not found' + }); + return; + } + + try { + const startTime = Date.now(); + const response = await axios.get( + `http://${node.host}:${node.port}/api/slave/sync/health`, + { + headers: { 'X-API-Key': node.apiKey }, + timeout: 5000 + } + ); + + const latency = Date.now() - startTime; + + // Update node with health check results + await prisma.slaveNode.update({ + where: { id }, + data: { + status: 'online', + lastSeen: new Date(), + version: response.data.version, + latency, + cpuUsage: response.data.metrics?.cpu, + memoryUsage: response.data.metrics?.memory, + diskUsage: response.data.metrics?.disk + } + }); + + res.json({ + success: true, + data: { + status: 'online', + latency, + ...response.data + } + }); + } catch (error) { + // Mark node as offline + await prisma.slaveNode.update({ + where: { id }, + data: { status: 'offline' } + }); + + res.json({ + success: true, + data: { + status: 'offline', + error: 'Failed to reach node' + } + }); + } + } catch (error) { + logger.error('Get node status error:', error); + res.status(500).json({ + success: false, + message: 'Failed to get node status' + }); + } +}; + +/** + * Get node sync history + */ +export const getNodeSyncHistory = async (req: AuthRequest, res: Response): Promise => { + try { + const { id } = req.params; + const { limit = 50 } = req.query; + + const syncLogs = await prisma.syncLog.findMany({ + where: { nodeId: id }, + orderBy: { startedAt: 'desc' }, + take: Number(limit) + }); + + res.json({ + success: true, + data: syncLogs + }); + } catch (error) { + logger.error('Get sync history error:', error); + res.status(500).json({ + success: false, + message: 'Failed to get sync history' + }); + } +}; + +/** + * Regenerate API key for slave node + */ +export const regenerateApiKey = async (req: AuthRequest, res: Response): Promise => { + try { + const { id } = req.params; + + const newApiKey = generateApiKey(); + + const updated = await prisma.slaveNode.update({ + where: { id }, + data: { apiKey: newApiKey } + }); + + logger.warn(`API key regenerated for node: ${updated.name}`, { + userId: req.user?.userId, + nodeId: id + }); + + res.json({ + success: true, + message: 'API key regenerated successfully', + data: { + apiKey: newApiKey // Send new key once + } + }); + } catch (error) { + logger.error('Regenerate API key error:', error); + res.status(500).json({ + success: false, + message: 'Failed to regenerate API key' + }); + } +}; + +// ========================================== +// SLAVE API ENDPOINTS +// ========================================== + +/** + * Pull configuration from master (called by slave) + */ +export const pullConfig = async (req: SlaveRequest, res: Response): Promise => { + try { + const config = await collectCurrentConfig(); + const configHash = generateConfigHash(config); + + logger.info('Config pulled by slave', { + nodeId: req.slaveNode?.id, + nodeName: req.slaveNode?.name + }); + + res.json({ + success: true, + data: { + config, + configHash, + timestamp: new Date().toISOString() + } + }); + } catch (error) { + logger.error('Pull config error:', error); + res.status(500).json({ + success: false, + message: 'Failed to pull configuration' + }); + } +}; + +/** + * Report status to master (called by slave) + */ +export const reportStatus = async (req: SlaveRequest, res: Response): Promise => { + try { + const { configHash, metrics } = req.body; + const nodeId = req.slaveNode?.id; + + if (!nodeId) { + res.status(400).json({ + success: false, + message: 'Node ID not found' + }); + return; + } + + // Update node status + await prisma.slaveNode.update({ + where: { id: nodeId }, + data: { + status: 'online', + lastSeen: new Date(), + ...(configHash && { configHash }), + ...(metrics?.cpu !== undefined && { cpuUsage: metrics.cpu }), + ...(metrics?.memory !== undefined && { memoryUsage: metrics.memory }), + ...(metrics?.disk !== undefined && { diskUsage: metrics.disk }) + } + }); + + // Check if config needs sync + const masterConfig = await collectCurrentConfig(); + const masterHash = generateConfigHash(masterConfig); + const needsSync = configHash !== masterHash; + + res.json({ + success: true, + data: { + needsSync, + masterHash: needsSync ? masterHash : undefined + } + }); + } catch (error) { + logger.error('Report status error:', error); + res.status(500).json({ + success: false, + message: 'Failed to report status' + }); + } +}; + +/** + * Health check endpoint (called by master) + */ +export const healthCheck = async (req: SlaveRequest, res: Response): Promise => { + try { + const nodeId = req.slaveNode?.id; + + // Get system metrics (basic implementation) + const metrics = { + cpu: 0, // TODO: Implement actual CPU usage + memory: 0, // TODO: Implement actual memory usage + disk: 0 // TODO: Implement actual disk usage + }; + + res.json({ + success: true, + status: 'healthy', + version: process.env.APP_VERSION || '2.0.0', + nodeId, + timestamp: new Date().toISOString(), + metrics + }); + } catch (error) { + logger.error('Health check error:', error); + res.status(500).json({ + success: false, + message: 'Health check failed' + }); + } +}; diff --git a/apps/api/src/middleware/slaveAuth.ts b/apps/api/src/middleware/slaveAuth.ts new file mode 100644 index 0000000..d07754e --- /dev/null +++ b/apps/api/src/middleware/slaveAuth.ts @@ -0,0 +1,80 @@ +import { Request, Response, NextFunction } from 'express'; +import prisma from '../config/database'; +import logger from '../utils/logger'; + +export interface SlaveRequest extends Request { + slaveNode?: { + id: string; + name: string; + host: string; + port: number; + }; +} + +/** + * Validate Slave API Key + * Used for slave nodes to authenticate with master + */ +export const validateSlaveApiKey = async ( + req: SlaveRequest, + res: Response, + next: NextFunction +): Promise => { + try { + const apiKey = req.headers['x-api-key'] as string; + + if (!apiKey) { + res.status(401).json({ + success: false, + message: 'API key required' + }); + return; + } + + // Find slave node by API key + const slaveNode = await prisma.slaveNode.findFirst({ + where: { apiKey }, + select: { + id: true, + name: true, + host: true, + port: true, + syncEnabled: true + } + }); + + if (!slaveNode) { + logger.warn('Invalid slave API key attempt', { apiKey: apiKey.substring(0, 8) + '...' }); + res.status(401).json({ + success: false, + message: 'Invalid API key' + }); + return; + } + + if (!slaveNode.syncEnabled) { + res.status(403).json({ + success: false, + message: 'Node sync is disabled' + }); + return; + } + + // Attach slave node info to request + req.slaveNode = slaveNode; + + // Update last seen + await prisma.slaveNode.update({ + where: { id: slaveNode.id }, + data: { lastSeen: new Date() } + }).catch(() => {}); // Don't fail if update fails + + next(); + } catch (error) { + logger.error('Slave API key validation error:', error); + res.status(500).json({ + success: false, + message: 'Authentication failed' + }); + } +}; diff --git a/apps/api/src/routes/index.ts b/apps/api/src/routes/index.ts index 151a9b7..92c9612 100644 --- a/apps/api/src/routes/index.ts +++ b/apps/api/src/routes/index.ts @@ -12,6 +12,7 @@ import performanceRoutes from './performance.routes'; import userRoutes from './user.routes'; import dashboardRoutes from './dashboard.routes'; import backupRoutes from './backup.routes'; +import slaveRoutes from './slave.routes'; const router = Router(); @@ -38,5 +39,6 @@ router.use('/performance', performanceRoutes); router.use('/users', userRoutes); router.use('/dashboard', dashboardRoutes); router.use('/backup', backupRoutes); +router.use('/slave', slaveRoutes); export default router; diff --git a/apps/api/src/routes/slave.routes.ts b/apps/api/src/routes/slave.routes.ts new file mode 100644 index 0000000..d8881fc --- /dev/null +++ b/apps/api/src/routes/slave.routes.ts @@ -0,0 +1,172 @@ +import { Router } from 'express'; +import { body } from 'express-validator'; +import { authenticate, authorize } from '../middleware/auth'; +import { validateSlaveApiKey } from '../middleware/slaveAuth'; +import { + // Master API endpoints + registerSlaveNode, + getSlaveNodes, + getSlaveNode, + updateSlaveNode, + deleteSlaveNode, + syncConfigToNode, + syncConfigToAllNodes, + getNodeStatus, + getNodeSyncHistory, + regenerateApiKey, + + // Slave API endpoints + pullConfig, + reportStatus, + healthCheck +} from '../controllers/slave.controller'; + +const router = Router(); + +// ========================================== +// MASTER API ENDPOINTS (Authenticated) +// ========================================== + +/** + * @route POST /api/slave/nodes + * @desc Register new slave node + * @access Private (admin) + */ +router.post( + '/nodes', + authenticate, + authorize('admin'), + [ + body('name').notEmpty().withMessage('Name is required'), + body('host').notEmpty().withMessage('Host is required'), + body('port').optional().isInt({ min: 1, max: 65535 }), + body('syncInterval').optional().isInt({ min: 10 }) + ], + registerSlaveNode +); + +/** + * @route GET /api/slave/nodes + * @desc Get all slave nodes + * @access Private (all roles) + */ +router.get('/nodes', authenticate, getSlaveNodes); + +/** + * @route GET /api/slave/nodes/:id + * @desc Get single slave node + * @access Private (all roles) + */ +router.get('/nodes/:id', authenticate, getSlaveNode); + +/** + * @route PUT /api/slave/nodes/:id + * @desc Update slave node + * @access Private (admin, moderator) + */ +router.put( + '/nodes/:id', + authenticate, + authorize('admin', 'moderator'), + [ + body('name').optional().notEmpty(), + body('host').optional().notEmpty(), + body('port').optional().isInt({ min: 1, max: 65535 }), + body('syncEnabled').optional().isBoolean(), + body('syncInterval').optional().isInt({ min: 10 }) + ], + updateSlaveNode +); + +/** + * @route DELETE /api/slave/nodes/:id + * @desc Delete slave node + * @access Private (admin) + */ +router.delete('/nodes/:id', authenticate, authorize('admin'), deleteSlaveNode); + +/** + * @route POST /api/slave/nodes/:id/sync + * @desc Sync configuration to specific node + * @access Private (admin, moderator) + */ +router.post( + '/nodes/:id/sync', + authenticate, + authorize('admin', 'moderator'), + [body('force').optional().isBoolean()], + syncConfigToNode +); + +/** + * @route POST /api/slave/nodes/sync-all + * @desc Sync configuration to all active nodes + * @access Private (admin, moderator) + */ +router.post( + '/nodes/sync-all', + authenticate, + authorize('admin', 'moderator'), + syncConfigToAllNodes +); + +/** + * @route GET /api/slave/nodes/:id/status + * @desc Get node status (health check) + * @access Private (all roles) + */ +router.get('/nodes/:id/status', authenticate, getNodeStatus); + +/** + * @route GET /api/slave/nodes/:id/sync-history + * @desc Get node sync history + * @access Private (all roles) + */ +router.get('/nodes/:id/sync-history', authenticate, getNodeSyncHistory); + +/** + * @route POST /api/slave/nodes/:id/regenerate-key + * @desc Regenerate API key for slave node + * @access Private (admin) + */ +router.post( + '/nodes/:id/regenerate-key', + authenticate, + authorize('admin'), + regenerateApiKey +); + +// ========================================== +// SLAVE API ENDPOINTS (API Key Authenticated) +// ========================================== + +/** + * @route POST /api/slave/sync/pull-config + * @desc Pull configuration from master (called by slave) + * @access Slave API Key + */ +router.post('/sync/pull-config', validateSlaveApiKey, pullConfig); + +/** + * @route POST /api/slave/sync/report-status + * @desc Report status to master (called by slave) + * @access Slave API Key + */ +router.post( + '/sync/report-status', + validateSlaveApiKey, + [ + body('configHash').optional().isString(), + body('metrics').optional().isObject() + ], + reportStatus +); + +/** + * @route GET /api/slave/sync/health + * @desc Health check endpoint (called by master) + * @access Slave API Key + */ +router.get('/sync/health', validateSlaveApiKey, healthCheck); + +export default router; diff --git a/apps/web/src/components/pages/SlaveNodes.tsx b/apps/web/src/components/pages/SlaveNodes.tsx index ea8e0d1..fe07a87 100644 --- a/apps/web/src/components/pages/SlaveNodes.tsx +++ b/apps/web/src/components/pages/SlaveNodes.tsx @@ -1,5 +1,5 @@ import { useState } from "react"; -import { useTranslation } from "react-i18next"; +import { useQuery, useMutation, useQueryClient } from "@tanstack/react-query"; import { Card, CardContent, CardDescription, CardHeader, CardTitle } from "@/components/ui/card"; import { Badge } from "@/components/ui/badge"; import { Button } from "@/components/ui/button"; @@ -7,84 +7,145 @@ import { Input } from "@/components/ui/input"; import { Table, TableBody, TableCell, TableHead, TableHeader, TableRow } from "@/components/ui/table"; import { Dialog, DialogContent, DialogDescription, DialogFooter, DialogHeader, DialogTitle, DialogTrigger } from "@/components/ui/dialog"; import { Label } from "@/components/ui/label"; -import { Server, RefreshCw, Send, Trash2, CheckCircle2, XCircle, Clock } from "lucide-react"; -import { mockSlaveNodes } from "@/mocks/data"; +import { Server, RefreshCw, Trash2, CheckCircle2, XCircle, Clock, AlertCircle, Loader2 } from "lucide-react"; import { SlaveNode } from "@/types"; import { useToast } from "@/hooks/use-toast"; -import { UnderConstructionBanner } from "@/components/ui/under-construction-banner"; +import { slaveNodesQueryOptions } from "@/queries/slave.query-options"; +import { slaveNodeService } from "@/services/slave.service"; const SlaveNodes = () => { - const { t } = useTranslation(); const { toast } = useToast(); - const [nodes, setNodes] = useState(mockSlaveNodes); + const queryClient = useQueryClient(); const [isDialogOpen, setIsDialogOpen] = useState(false); const [formData, setFormData] = useState({ name: "", host: "", - port: 8088 + port: 3001, + syncInterval: 60 + }); + + // Fetch slave nodes + const { data: nodes = [], isLoading } = useQuery(slaveNodesQueryOptions.all); + + // Register mutation + const registerMutation = useMutation({ + mutationFn: slaveNodeService.register, + onSuccess: (data) => { + queryClient.invalidateQueries({ queryKey: ['slave-nodes'] }); + setIsDialogOpen(false); + resetForm(); + + // Show API key in toast (only shown once) + toast({ + title: "Slave node registered", + description: `API Key: ${data.data.apiKey} (save this, it won't be shown again)`, + duration: 10000 + }); + }, + onError: (error: any) => { + toast({ + title: "Registration failed", + description: error.response?.data?.message || "Failed to register node", + variant: "destructive" + }); + } + }); + + // Delete mutation + const deleteMutation = useMutation({ + mutationFn: slaveNodeService.delete, + onSuccess: () => { + queryClient.invalidateQueries({ queryKey: ['slave-nodes'] }); + toast({ title: "Node removed successfully" }); + }, + onError: (error: any) => { + toast({ + title: "Delete failed", + description: error.response?.data?.message || "Failed to delete node", + variant: "destructive" + }); + } + }); + + // Sync mutation + const syncMutation = useMutation({ + mutationFn: ({ id, force }: { id: string; force?: boolean }) => + slaveNodeService.syncToNode(id, { force }), + onSuccess: (data) => { + queryClient.invalidateQueries({ queryKey: ['slave-nodes'] }); + const message = data.data?.skipped + ? "Configuration already up to date" + : `Synced ${data.data?.changesCount || 0} changes in ${data.data?.duration || 0}ms`; + toast({ title: "Sync completed", description: message }); + }, + onError: (error: any) => { + toast({ + title: "Sync failed", + description: error.response?.data?.message || "Failed to sync configuration", + variant: "destructive" + }); + } + }); + + // Sync all mutation + const syncAllMutation = useMutation({ + mutationFn: slaveNodeService.syncToAll, + onSuccess: (data) => { + queryClient.invalidateQueries({ queryKey: ['slave-nodes'] }); + toast({ + title: "Sync to all nodes completed", + description: `${data.data.success}/${data.data.total} nodes synced successfully` + }); + }, + onError: (error: any) => { + toast({ + title: "Sync all failed", + description: error.response?.data?.message || "Failed to sync to all nodes", + variant: "destructive" + }); + } }); const handleAddNode = () => { - const newNode: SlaveNode = { - id: `node${nodes.length + 1}`, + if (!formData.name || !formData.host) { + toast({ + title: "Validation error", + description: "Name and host are required", + variant: "destructive" + }); + return; + } + + registerMutation.mutate({ name: formData.name, host: formData.host, port: formData.port, - status: 'offline', - lastSeen: new Date().toISOString(), - version: '1.24.0', - syncStatus: { - lastSync: new Date().toISOString(), - configHash: '', - inSync: false - } - }; - setNodes([...nodes, newNode]); - setIsDialogOpen(false); - resetForm(); - toast({ title: "Slave node registered", description: "Node added successfully" }); + syncInterval: formData.syncInterval + }); }; const resetForm = () => { setFormData({ name: "", host: "", - port: 8088 + port: 3001, + syncInterval: 60 }); }; - const handlePushConfig = (nodeId: string) => { - const node = nodes.find(n => n.id === nodeId); - toast({ - title: "Configuration pushed", - description: `Config sync initiated to ${node?.name} (mock mode)` - }); + const handleSync = (nodeId: string) => { + syncMutation.mutate({ id: nodeId, force: false }); }; - const handleSync = (nodeId: string) => { - setNodes(nodes.map(n => - n.id === nodeId - ? { - ...n, - status: 'syncing', - syncStatus: { ...n.syncStatus, lastSync: new Date().toISOString() } - } - : n - )); - setTimeout(() => { - setNodes(nodes.map(n => - n.id === nodeId - ? { ...n, status: 'online', syncStatus: { ...n.syncStatus, inSync: true } } - : n - )); - toast({ title: "Sync completed" }); - }, 2000); + const handleSyncAll = () => { + syncAllMutation.mutate(); }; const handleDelete = (id: string) => { - setNodes(nodes.filter(n => n.id !== id)); - toast({ title: "Node removed" }); + if (confirm("Are you sure you want to remove this node?")) { + deleteMutation.mutate(id); + } }; const getStatusColor = (status: string) => { @@ -92,6 +153,7 @@ const SlaveNodes = () => { case 'online': return 'default'; case 'offline': return 'destructive'; case 'syncing': return 'secondary'; + case 'error': return 'destructive'; default: return 'secondary'; } }; @@ -101,13 +163,30 @@ const SlaveNodes = () => { case 'online': return ; case 'offline': return ; case 'syncing': return ; + case 'error': return ; default: return ; } }; + const isNodeInSync = (node: SlaveNode) => { + // Legacy support for old mock data + if (node.syncStatus?.inSync !== undefined) { + return node.syncStatus.inSync; + } + // New logic: check if configHash exists and lastSyncAt is recent + return !!node.configHash && node.lastSyncAt; + }; + + if (isLoading) { + return ( +
    + +
    + ); + } + return (
    -
    @@ -118,56 +197,84 @@ const SlaveNodes = () => {

    Manage distributed nginx nodes and configuration sync

    - - - - - - - Register Slave Node - - Add a new slave node to the cluster - - -
    -
    - - setFormData({ ...formData, name: e.target.value })} - placeholder="nginx-slave-04" - /> -
    -
    - - setFormData({ ...formData, host: e.target.value })} - placeholder="10.0.10.14" - /> -
    -
    - - setFormData({ ...formData, port: Number(e.target.value) })} - placeholder="8088" - /> +
    + + + + + + + + Register Slave Node + + Add a new slave node to the cluster + + +
    +
    + + setFormData({ ...formData, name: e.target.value })} + placeholder="nginx-slave-01" + /> +
    +
    + + setFormData({ ...formData, host: e.target.value })} + placeholder="10.0.10.11" + /> +
    +
    + + setFormData({ ...formData, port: Number(e.target.value) })} + placeholder="3001" + /> +
    +
    + + setFormData({ ...formData, syncInterval: Number(e.target.value) })} + placeholder="60" + /> +
    -
    - - - - - -
    + + + + + + +
    @@ -206,7 +313,7 @@ const SlaveNodes = () => {
    - {nodes.filter(n => n.syncStatus.inSync).length}/{nodes.length} + {nodes.filter(n => isNodeInSync(n)).length}/{nodes.length}

    Nodes in sync @@ -232,66 +339,80 @@ const SlaveNodes = () => { Last Seen Sync Status Config Hash + Enabled Actions - {nodes.map((node) => ( - - {node.name} - {node.host}:{node.port} - - - {getStatusIcon(node.status)} - {node.status} - - - {node.version} - - {new Date(node.lastSeen).toLocaleString()} + {nodes.length === 0 ? ( + + + No slave nodes registered. Click "Register Node" to add one. - - {node.syncStatus.inSync ? ( - - - In Sync + + ) : ( + nodes.map((node) => ( + + {node.name} + {node.host}:{node.port} + + + {getStatusIcon(node.status)} + {node.status} - ) : ( - - - Out of Sync + + {node.version || 'N/A'} + + {node.lastSeen ? new Date(node.lastSeen).toLocaleString() : 'Never'} + + + {isNodeInSync(node) ? ( + + + In Sync + + ) : ( + + + Out of Sync + + )} + + + {node.configHash?.substring(0, 12) || 'N/A'}... + + + + {node.syncEnabled ? 'Yes' : 'No'} - )} - - - {node.syncStatus.configHash || 'N/A'} - - - - - - - - ))} + + + + + + + )) + )}

    @@ -312,24 +433,29 @@ const SlaveNodes = () => {

    Master Node

    Primary

    -
    -
    - {nodes.map((node) => ( -
    -
    - -
    -

    {node.name}

    - - {node.status} - + {nodes.length > 0 && ( + <> +
    +
    + {nodes.map((node) => ( +
    +
    + +
    +

    {node.name}

    + + {node.status} + +
    + ))}
    - ))} -
    + + )}
    diff --git a/apps/web/src/mocks/data.ts b/apps/web/src/mocks/data.ts index 37cc44c..b87e96e 100644 --- a/apps/web/src/mocks/data.ts +++ b/apps/web/src/mocks/data.ts @@ -385,6 +385,16 @@ export const mockSlaveNodes: SlaveNode[] = [ status: 'online', lastSeen: '2025-03-29T14:35:00Z', version: '1.24.0', + syncEnabled: true, + syncInterval: 60, + configHash: 'a1b2c3d4e5f6', + lastSyncAt: '2025-03-29T14:30:00Z', + latency: 15, + cpuUsage: 25.5, + memoryUsage: 45.2, + diskUsage: 60.1, + createdAt: '2025-01-15T10:00:00Z', + updatedAt: '2025-03-29T14:35:00Z', syncStatus: { lastSync: '2025-03-29T14:30:00Z', configHash: 'a1b2c3d4e5f6', @@ -399,6 +409,16 @@ export const mockSlaveNodes: SlaveNode[] = [ status: 'online', lastSeen: '2025-03-29T14:34:55Z', version: '1.24.0', + syncEnabled: true, + syncInterval: 60, + configHash: 'a1b2c3d4e5f5', + lastSyncAt: '2025-03-29T14:00:00Z', + latency: 22, + cpuUsage: 35.8, + memoryUsage: 52.3, + diskUsage: 55.7, + createdAt: '2025-01-20T11:30:00Z', + updatedAt: '2025-03-29T14:34:55Z', syncStatus: { lastSync: '2025-03-29T14:00:00Z', configHash: 'a1b2c3d4e5f5', @@ -413,6 +433,12 @@ export const mockSlaveNodes: SlaveNode[] = [ status: 'offline', lastSeen: '2025-03-28T22:15:00Z', version: '1.23.4', + syncEnabled: false, + syncInterval: 120, + configHash: 'x9y8z7w6v5u4', + lastSyncAt: '2025-03-28T20:00:00Z', + createdAt: '2025-02-01T09:00:00Z', + updatedAt: '2025-03-28T22:15:00Z', syncStatus: { lastSync: '2025-03-28T20:00:00Z', configHash: 'x9y8z7w6v5u4', @@ -423,7 +449,7 @@ export const mockSlaveNodes: SlaveNode[] = [ export const mockPerformanceMetrics: PerformanceMetric[] = Array.from({ length: 20 }, (_, i) => ({ id: `perf${i + 1}`, - domain: ['api.example.com', 'app.production.com', 'cdn.assets.com'][i % 3], + domain: ['api.example.com', 'app.production.com', 'cdn.assets.com'][i % 3] || 'api.example.com', timestamp: new Date(Date.now() - (19 - i) * 300000).toISOString(), responseTime: Math.random() * 200 + 50, throughput: Math.random() * 1000 + 500, diff --git a/apps/web/src/queries/slave.query-options.ts b/apps/web/src/queries/slave.query-options.ts new file mode 100644 index 0000000..6369693 --- /dev/null +++ b/apps/web/src/queries/slave.query-options.ts @@ -0,0 +1,31 @@ +import { queryOptions } from '@tanstack/react-query'; +import { slaveNodeService } from '@/services/slave.service'; + +export const slaveNodesQueryOptions = { + all: queryOptions({ + queryKey: ['slave-nodes', 'list'], + queryFn: () => slaveNodeService.getAll(), + staleTime: 30 * 1000, // 30 seconds + }), + + detail: (id: string) => + queryOptions({ + queryKey: ['slave-nodes', 'detail', id], + queryFn: () => slaveNodeService.getById(id), + staleTime: 30 * 1000, + }), + + status: (id: string) => + queryOptions({ + queryKey: ['slave-nodes', 'status', id], + queryFn: () => slaveNodeService.getStatus(id), + staleTime: 10 * 1000, // 10 seconds + }), + + syncHistory: (id: string, limit: number = 50) => + queryOptions({ + queryKey: ['slave-nodes', 'sync-history', id, limit], + queryFn: () => slaveNodeService.getSyncHistory(id, limit), + staleTime: 30 * 1000, + }), +}; diff --git a/apps/web/src/services/slave.service.ts b/apps/web/src/services/slave.service.ts new file mode 100644 index 0000000..a33957e --- /dev/null +++ b/apps/web/src/services/slave.service.ts @@ -0,0 +1,123 @@ +import axios from 'axios'; +import { SlaveNode } from '@/types'; + +const API_URL = import.meta.env.VITE_API_URL || 'http://localhost:3001/api'; + +export interface RegisterSlaveNodeRequest { + name: string; + host: string; + port?: number; + syncInterval?: number; +} + +export interface UpdateSlaveNodeRequest { + name?: string; + host?: string; + port?: number; + syncEnabled?: boolean; + syncInterval?: number; +} + +export interface SyncConfigRequest { + force?: boolean; +} + +export interface SyncLog { + id: string; + nodeId: string; + type: 'full_sync' | 'incremental_sync' | 'health_check'; + status: 'success' | 'failed' | 'partial' | 'running'; + configHash?: string; + changesCount?: number; + errorMessage?: string; + startedAt: string; + completedAt?: string; + duration?: number; +} + +export interface SlaveNodeWithLogs extends SlaveNode { + syncLogs?: SyncLog[]; +} + +class SlaveNodeService { + private getHeaders() { + const token = localStorage.getItem('accessToken'); + return { + 'Content-Type': 'application/json', + Authorization: token ? `Bearer ${token}` : '', + }; + } + + async getAll(): Promise { + const response = await axios.get(`${API_URL}/slave/nodes`, { + headers: this.getHeaders(), + }); + return response.data.data; + } + + async getById(id: string): Promise { + const response = await axios.get(`${API_URL}/slave/nodes/${id}`, { + headers: this.getHeaders(), + }); + return response.data.data; + } + + async register(data: RegisterSlaveNodeRequest) { + const response = await axios.post(`${API_URL}/slave/nodes`, data, { + headers: this.getHeaders(), + }); + return response.data; + } + + async update(id: string, data: UpdateSlaveNodeRequest) { + const response = await axios.put(`${API_URL}/slave/nodes/${id}`, data, { + headers: this.getHeaders(), + }); + return response.data; + } + + async delete(id: string) { + const response = await axios.delete(`${API_URL}/slave/nodes/${id}`, { + headers: this.getHeaders(), + }); + return response.data; + } + + async syncToNode(id: string, data: SyncConfigRequest = {}) { + const response = await axios.post(`${API_URL}/slave/nodes/${id}/sync`, data, { + headers: this.getHeaders(), + }); + return response.data; + } + + async syncToAll() { + const response = await axios.post(`${API_URL}/slave/nodes/sync-all`, {}, { + headers: this.getHeaders(), + }); + return response.data; + } + + async getStatus(id: string) { + const response = await axios.get(`${API_URL}/slave/nodes/${id}/status`, { + headers: this.getHeaders(), + }); + return response.data; + } + + async getSyncHistory(id: string, limit: number = 50) { + const response = await axios.get(`${API_URL}/slave/nodes/${id}/sync-history`, { + headers: this.getHeaders(), + params: { limit }, + }); + return response.data.data; + } + + async regenerateApiKey(id: string) { + const response = await axios.post(`${API_URL}/slave/nodes/${id}/regenerate-key`, {}, { + headers: this.getHeaders(), + }); + return response.data; + } +} + +export const slaveNodeService = new SlaveNodeService(); diff --git a/apps/web/src/types/index.ts b/apps/web/src/types/index.ts index 9ac70c4..e8dacf2 100644 --- a/apps/web/src/types/index.ts +++ b/apps/web/src/types/index.ts @@ -186,10 +186,27 @@ export interface SlaveNode { name: string; host: string; port: number; - status: 'online' | 'offline' | 'syncing'; - lastSeen: string; - version: string; - syncStatus: { + status: 'online' | 'offline' | 'syncing' | 'error'; + lastSeen?: string; + version?: string; + + // Sync configuration + syncEnabled: boolean; + syncInterval: number; + configHash?: string; + lastSyncAt?: string; + + // Metrics + latency?: number; + cpuUsage?: number; + memoryUsage?: number; + diskUsage?: number; + + createdAt: string; + updatedAt: string; + + // Legacy support for old mock data + syncStatus?: { lastSync: string; configHash: string; inSync: boolean; From 5476ea8dcabea05517beae05954fe13ccf96ecf4 Mon Sep 17 00:00:00 2001 From: vncloudsco Date: Mon, 6 Oct 2025 08:47:33 +0000 Subject: [PATCH 10/32] fix: Correct sslCertificateId to sslCertificate in collectCurrentConfig function --- apps/api/src/controllers/slave.controller.ts | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/apps/api/src/controllers/slave.controller.ts b/apps/api/src/controllers/slave.controller.ts index 068bbe3..efce5d6 100644 --- a/apps/api/src/controllers/slave.controller.ts +++ b/apps/api/src/controllers/slave.controller.ts @@ -67,7 +67,7 @@ async function collectCurrentConfig() { modsecEnabled: d.modsecEnabled, upstreams: d.upstreams, loadBalancer: d.loadBalancer, - sslCertificateId: d.sslCertificateId + sslCertificate: d.sslCertificate })), ssl: ssl.map(s => ({ id: s.id, From 5a2519843ea789e48fd0ec6ce7c5f7420b82adf7 Mon Sep 17 00:00:00 2001 From: vncloudsco Date: Mon, 6 Oct 2025 08:51:01 +0000 Subject: [PATCH 11/32] fix: Simplify response handling in syncConfigToNode function --- apps/api/src/controllers/slave.controller.ts | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/apps/api/src/controllers/slave.controller.ts b/apps/api/src/controllers/slave.controller.ts index efce5d6..a54464d 100644 --- a/apps/api/src/controllers/slave.controller.ts +++ b/apps/api/src/controllers/slave.controller.ts @@ -363,7 +363,7 @@ export const syncConfigToNode = async (req: AuthRequest, res: Response): Promise } }); - return res.json({ + res.json({ success: true, message: 'Configuration already up to date', data: { @@ -371,6 +371,7 @@ export const syncConfigToNode = async (req: AuthRequest, res: Response): Promise configHash } }); + return; } // Store config version From cd319e54d9fbf9fbc24b571cccc622d051c7f6c1 Mon Sep 17 00:00:00 2001 From: vncloudsco Date: Mon, 6 Oct 2025 09:06:41 +0000 Subject: [PATCH 12/32] feat: Enhance slave node registration with API key dialog and authentication checks --- apps/web/src/components/pages/SlaveNodes.tsx | 131 +++++++++++++++++-- apps/web/src/services/slave.service.ts | 53 +++++--- 2 files changed, 153 insertions(+), 31 deletions(-) diff --git a/apps/web/src/components/pages/SlaveNodes.tsx b/apps/web/src/components/pages/SlaveNodes.tsx index fe07a87..996ba39 100644 --- a/apps/web/src/components/pages/SlaveNodes.tsx +++ b/apps/web/src/components/pages/SlaveNodes.tsx @@ -24,6 +24,10 @@ const SlaveNodes = () => { port: 3001, syncInterval: 60 }); + const [apiKeyDialog, setApiKeyDialog] = useState<{ open: boolean; apiKey: string }>({ + open: false, + apiKey: '' + }); // Fetch slave nodes const { data: nodes = [], isLoading } = useQuery(slaveNodesQueryOptions.all); @@ -36,18 +40,36 @@ const SlaveNodes = () => { setIsDialogOpen(false); resetForm(); - // Show API key in toast (only shown once) + // Show API key in separate dialog (critical info!) + setApiKeyDialog({ + open: true, + apiKey: data.data.apiKey + }); + + // Also show toast toast({ - title: "Slave node registered", - description: `API Key: ${data.data.apiKey} (save this, it won't be shown again)`, - duration: 10000 + title: "Slave node registered successfully", + description: `Node ${data.data.name} has been registered`, }); }, onError: (error: any) => { + console.error('Registration error:', error); + + let errorMessage = "Failed to register node"; + + if (error.response?.status === 401) { + errorMessage = "Authentication required. Please login first."; + } else if (error.response?.data?.message) { + errorMessage = error.response.data.message; + } else if (error.message) { + errorMessage = error.message; + } + toast({ title: "Registration failed", - description: error.response?.data?.message || "Failed to register node", - variant: "destructive" + description: errorMessage, + variant: "destructive", + duration: 5000 }); } }); @@ -117,6 +139,8 @@ const SlaveNodes = () => { return; } + console.log('Registering node:', formData); + registerMutation.mutate({ name: formData.name, host: formData.host, @@ -177,6 +201,9 @@ const SlaveNodes = () => { return !!node.configHash && node.lastSyncAt; }; + // Check authentication + const isAuthenticated = !!localStorage.getItem('accessToken'); + if (isLoading) { return (
    @@ -187,6 +214,25 @@ const SlaveNodes = () => { return (
    + {/* Authentication Warning */} + {!isAuthenticated && ( + + +
    + +
    +

    + Authentication Required +

    +

    + You need to login to register and manage slave nodes. +

    +
    +
    +
    +
    + )} +
    @@ -202,7 +248,7 @@ const SlaveNodes = () => { size="sm" variant="outline" onClick={handleSyncAll} - disabled={syncAllMutation.isPending || nodes.length === 0} + disabled={syncAllMutation.isPending || nodes.length === 0 || !isAuthenticated} > {syncAllMutation.isPending ? ( @@ -213,7 +259,7 @@ const SlaveNodes = () => { - @@ -267,9 +313,74 @@ const SlaveNodes = () => {
    - + + + + + {/* API Key Dialog - Critical Information */} + setApiKeyDialog({ ...apiKeyDialog, open })}> + + + + + Save Your API Key + + + This is the only time you'll see this API key. Copy it now and store it securely. + + +
    +
    +

    + ⚠️ Important: You will need this API key to configure your slave node. +

    +
    + +
    + +
    + + +
    +
    + +
    +

    Next Steps:

    +
      +
    1. Copy the API key above
    2. +
    3. Save it in your slave node's environment variables
    4. +
    5. Configure: SLAVE_API_KEY={apiKeyDialog.apiKey.substring(0, 16)}...
    6. +
    7. Start your slave node application
    8. +
    +
    +
    + +
    diff --git a/apps/web/src/services/slave.service.ts b/apps/web/src/services/slave.service.ts index a33957e..b58a444 100644 --- a/apps/web/src/services/slave.service.ts +++ b/apps/web/src/services/slave.service.ts @@ -39,74 +39,85 @@ export interface SlaveNodeWithLogs extends SlaveNode { syncLogs?: SyncLog[]; } -class SlaveNodeService { - private getHeaders() { - const token = localStorage.getItem('accessToken'); - return { - 'Content-Type': 'application/json', - Authorization: token ? `Bearer ${token}` : '', - }; - } +// Helper function to get headers +const getHeaders = () => { + const token = localStorage.getItem('accessToken'); + return { + 'Content-Type': 'application/json', + Authorization: token ? `Bearer ${token}` : '', + }; +}; +class SlaveNodeService { async getAll(): Promise { const response = await axios.get(`${API_URL}/slave/nodes`, { - headers: this.getHeaders(), + headers: getHeaders(), }); return response.data.data; } async getById(id: string): Promise { const response = await axios.get(`${API_URL}/slave/nodes/${id}`, { - headers: this.getHeaders(), + headers: getHeaders(), }); return response.data.data; } async register(data: RegisterSlaveNodeRequest) { - const response = await axios.post(`${API_URL}/slave/nodes`, data, { - headers: this.getHeaders(), - }); - return response.data; + console.log('SlaveNodeService.register called with:', data); + console.log('API_URL:', API_URL); + console.log('Headers:', getHeaders()); + + try { + const response = await axios.post(`${API_URL}/slave/nodes`, data, { + headers: getHeaders(), + }); + console.log('Register response:', response.data); + return response.data; + } catch (error: any) { + console.error('Register error:', error.response?.data || error.message); + throw error; + } } async update(id: string, data: UpdateSlaveNodeRequest) { const response = await axios.put(`${API_URL}/slave/nodes/${id}`, data, { - headers: this.getHeaders(), + headers: getHeaders(), }); return response.data; } async delete(id: string) { const response = await axios.delete(`${API_URL}/slave/nodes/${id}`, { - headers: this.getHeaders(), + headers: getHeaders(), }); return response.data; } async syncToNode(id: string, data: SyncConfigRequest = {}) { const response = await axios.post(`${API_URL}/slave/nodes/${id}/sync`, data, { - headers: this.getHeaders(), + headers: getHeaders(), }); return response.data; } async syncToAll() { const response = await axios.post(`${API_URL}/slave/nodes/sync-all`, {}, { - headers: this.getHeaders(), + headers: getHeaders(), }); return response.data; } async getStatus(id: string) { const response = await axios.get(`${API_URL}/slave/nodes/${id}/status`, { - headers: this.getHeaders(), + headers: getHeaders(), }); return response.data; } async getSyncHistory(id: string, limit: number = 50) { const response = await axios.get(`${API_URL}/slave/nodes/${id}/sync-history`, { - headers: this.getHeaders(), + headers: getHeaders(), params: { limit }, }); return response.data.data; @@ -114,7 +125,7 @@ class SlaveNodeService { async regenerateApiKey(id: string) { const response = await axios.post(`${API_URL}/slave/nodes/${id}/regenerate-key`, {}, { - headers: this.getHeaders(), + headers: getHeaders(), }); return response.data; } From ba7171a565f0918979f56ac783811f3bb9190abd Mon Sep 17 00:00:00 2001 From: vncloudsco Date: Mon, 6 Oct 2025 09:53:54 +0000 Subject: [PATCH 13/32] feat: add system configuration management for master/slave node modes - Introduced SystemConfig interface in types for managing node configurations. - Created migration for system_configs table with necessary fields and enum for node modes. - Implemented system configuration controller with endpoints to get, update, connect, disconnect, and test connection to master node. - Added routes for system configuration management. - Developed service methods for API interactions related to system configuration. - Implemented query options for fetching system configuration with automatic refetching. --- .../migration.sql | 20 + apps/api/prisma/schema.prisma | 29 + .../controllers/system-config.controller.ts | 341 ++++++ apps/api/src/routes/index.ts | 2 + apps/api/src/routes/system-config.routes.ts | 25 + apps/web/src/components/pages/SlaveNodes.tsx | 994 +++++++++++------- .../queries/system-config.query-options.ts | 10 + .../web/src/services/system-config.service.ts | 88 ++ apps/web/src/types/index.ts | 22 + 9 files changed, 1157 insertions(+), 374 deletions(-) create mode 100644 apps/api/prisma/migrations/20251006092848_add_system_config_and_node_mode/migration.sql create mode 100644 apps/api/src/controllers/system-config.controller.ts create mode 100644 apps/api/src/routes/system-config.routes.ts create mode 100644 apps/web/src/queries/system-config.query-options.ts create mode 100644 apps/web/src/services/system-config.service.ts diff --git a/apps/api/prisma/migrations/20251006092848_add_system_config_and_node_mode/migration.sql b/apps/api/prisma/migrations/20251006092848_add_system_config_and_node_mode/migration.sql new file mode 100644 index 0000000..d43c3d3 --- /dev/null +++ b/apps/api/prisma/migrations/20251006092848_add_system_config_and_node_mode/migration.sql @@ -0,0 +1,20 @@ +-- CreateEnum +CREATE TYPE "NodeMode" AS ENUM ('master', 'slave'); + +-- CreateTable +CREATE TABLE "system_configs" ( + "id" TEXT NOT NULL, + "nodeMode" "NodeMode" NOT NULL DEFAULT 'master', + "masterApiEnabled" BOOLEAN NOT NULL DEFAULT true, + "slaveApiEnabled" BOOLEAN NOT NULL DEFAULT false, + "masterHost" TEXT, + "masterPort" INTEGER, + "masterApiKey" TEXT, + "connected" BOOLEAN NOT NULL DEFAULT false, + "lastConnectedAt" TIMESTAMP(3), + "connectionError" TEXT, + "createdAt" TIMESTAMP(3) NOT NULL DEFAULT CURRENT_TIMESTAMP, + "updatedAt" TIMESTAMP(3) NOT NULL, + + CONSTRAINT "system_configs_pkey" PRIMARY KEY ("id") +); diff --git a/apps/api/prisma/schema.prisma b/apps/api/prisma/schema.prisma index 994c1d8..3fa98fb 100644 --- a/apps/api/prisma/schema.prisma +++ b/apps/api/prisma/schema.prisma @@ -527,6 +527,11 @@ enum SyncLogType { health_check } +enum NodeMode { + master + slave +} + model SlaveNode { id String @id @default(cuid()) name String @unique @@ -560,6 +565,30 @@ model SlaveNode { @@map("slave_nodes") } +model SystemConfig { + id String @id @default(cuid()) + nodeMode NodeMode @default(master) // master or slave + + // Master mode settings + masterApiEnabled Boolean @default(true) + + // Slave mode settings + slaveApiEnabled Boolean @default(false) + masterHost String? // IP of master node + masterPort Int? // Port of master node + masterApiKey String? // API key to connect to master + + // Connection status (for slave mode) + connected Boolean @default(false) + lastConnectedAt DateTime? + connectionError String? + + createdAt DateTime @default(now()) + updatedAt DateTime @updatedAt + + @@map("system_configs") +} + model SyncLog { id String @id @default(cuid()) nodeId String diff --git a/apps/api/src/controllers/system-config.controller.ts b/apps/api/src/controllers/system-config.controller.ts new file mode 100644 index 0000000..a681cb4 --- /dev/null +++ b/apps/api/src/controllers/system-config.controller.ts @@ -0,0 +1,341 @@ +import { Response } from 'express'; +import { AuthRequest } from '../middleware/auth'; +import prisma from '../config/database'; +import logger from '../utils/logger'; +import axios from 'axios'; + +/** + * Get system configuration (node mode, }); + + logger.info('Disconnected from master node', { + userId: req.user?.userId + });er/slave settings) + */ +export const getSystemConfig = async (req: AuthRequest, res: Response) => { + try { + let config = await prisma.systemConfig.findFirst(); + + // Create default config if not exists + if (!config) { + config = await prisma.systemConfig.create({ + data: { + nodeMode: 'master', + masterApiEnabled: true, + slaveApiEnabled: false + } + }); + } + + res.json({ + success: true, + data: config + }); + } catch (error) { + logger.error('Get system config error:', error); + res.status(500).json({ + success: false, + message: 'Failed to get system configuration' + }); + } +}; + +/** + * Update node mode (master or slave) + */ +export const updateNodeMode = async (req: AuthRequest, res: Response) => { + try { + const { nodeMode } = req.body; + + if (!['master', 'slave'].includes(nodeMode)) { + return res.status(400).json({ + success: false, + message: 'Invalid node mode. Must be "master" or "slave"' + }); + } + + let config = await prisma.systemConfig.findFirst(); + + if (!config) { + config = await prisma.systemConfig.create({ + data: { + nodeMode: nodeMode as any, + masterApiEnabled: nodeMode === 'master', + slaveApiEnabled: nodeMode === 'slave' + } + }); + } else { + // Build update data + const updateData: any = { + nodeMode: nodeMode as any, + masterApiEnabled: nodeMode === 'master', + slaveApiEnabled: nodeMode === 'slave' + }; + + // Reset slave connection if switching to master + if (nodeMode === 'master') { + updateData.masterHost = null; + updateData.masterPort = null; + updateData.masterApiKey = null; + updateData.connected = false; + updateData.connectionError = null; + updateData.lastConnectedAt = null; + } + + config = await prisma.systemConfig.update({ + where: { id: config.id }, + data: updateData + }); + } + + logger.info(`Node mode changed to: ${nodeMode}`, { + userId: req.user?.userId, + configId: config.id + }); + + res.json({ + success: true, + data: config, + message: `Node mode changed to ${nodeMode}` + }); + } catch (error) { + logger.error('Update node mode error:', error); + res.status(500).json({ + success: false, + message: 'Failed to update node mode' + }); + } +}; + +/** + * Connect to master node (for slave mode) + */ +export const connectToMaster = async (req: AuthRequest, res: Response) => { + try { + const { masterHost, masterPort, masterApiKey } = req.body; + + if (!masterHost || !masterPort || !masterApiKey) { + return res.status(400).json({ + success: false, + message: 'Master host, port, and API key are required' + }); + } + + // Get current config + let config = await prisma.systemConfig.findFirst(); + + if (!config) { + return res.status(400).json({ + success: false, + message: 'System config not found. Please set node mode first.' + }); + } + + if (config.nodeMode !== 'slave') { + return res.status(400).json({ + success: false, + message: 'Cannot connect to master. Node mode must be "slave".' + }); + } + + // Test connection to master + try { + logger.info('Testing connection to master...', { masterHost, masterPort }); + + const response = await axios.get( + `http://${masterHost}:${masterPort}/api/slave/health`, + { + headers: { + 'X-API-Key': masterApiKey + }, + timeout: 10000 + } + ); + + if (!response.data.success) { + throw new Error('Master health check failed'); + } + + // Connection successful, update config + config = await prisma.systemConfig.update({ + where: { id: config.id }, + data: { + masterHost, + masterPort: parseInt(masterPort.toString()), + masterApiKey, + connected: true, + lastConnectedAt: new Date(), + connectionError: null + } + }); + + logger.info('Successfully connected to master', { + userId: req.user?.userId, + masterHost, + masterPort + }); + + res.json({ + success: true, + data: config, + message: 'Successfully connected to master node' + }); + + } catch (connectionError: any) { + // Connection failed, update config with error + const errorMessage = connectionError.response?.data?.message || + connectionError.message || + 'Failed to connect to master'; + + config = await prisma.systemConfig.update({ + where: { id: config.id }, + data: { + masterHost, + masterPort: parseInt(masterPort.toString()), + masterApiKey, + connected: false, + connectionError: errorMessage + } + }); + + logger.error('Failed to connect to master:', { + error: errorMessage, + masterHost, + masterPort + }); + + return res.status(400).json({ + success: false, + message: errorMessage, + data: config + }); + } + + } catch (error: any) { + logger.error('Connect to master error:', error); + res.status(500).json({ + success: false, + message: error.message || 'Failed to connect to master' + }); + } +}; + +/** + * Disconnect from master node (for slave mode) + */ +export const disconnectFromMaster = async (req: AuthRequest, res: Response) => { + try { + let config = await prisma.systemConfig.findFirst(); + + if (!config) { + return res.status(400).json({ + success: false, + message: 'System config not found' + }); + } + + config = await prisma.systemConfig.update({ + where: { id: config.id }, + data: { + masterHost: null, + masterPort: null, + masterApiKey: null, + connected: false, + lastConnectedAt: null, + connectionError: null + } + }); + + logger.info('Disconnected from master', { + userId: req.user?.userId + }); + + res.json({ + success: true, + data: config, + message: 'Disconnected from master node' + }); + + } catch (error) { + logger.error('Disconnect from master error:', error); + res.status(500).json({ + success: false, + message: 'Failed to disconnect from master' + }); + } +}; + +/** + * Test connection to master (for slave mode) + */ +export const testMasterConnection = async (req: AuthRequest, res: Response) => { + try { + const config = await prisma.systemConfig.findFirst(); + + if (!config) { + return res.status(400).json({ + success: false, + message: 'System config not found' + }); + } + + if (!config.masterHost || !config.masterPort || !config.masterApiKey) { + return res.status(400).json({ + success: false, + message: 'Master connection not configured' + }); + } + + // Test connection + const startTime = Date.now(); + const response = await axios.get( + `http://${config.masterHost}:${config.masterPort}/api/slave/health`, + { + headers: { + 'X-API-Key': config.masterApiKey + }, + timeout: 10000 + } + ); + const latency = Date.now() - startTime; + + // Update config + await prisma.systemConfig.update({ + where: { id: config.id }, + data: { + connected: true, + lastConnectedAt: new Date(), + connectionError: null + } + }); + + res.json({ + success: true, + message: 'Connection to master successful', + data: { + latency, + masterVersion: response.data.version, + masterStatus: response.data.status + } + }); + + } catch (error: any) { + logger.error('Test master connection error:', error); + + // Update config with error + const config = await prisma.systemConfig.findFirst(); + if (config) { + await prisma.systemConfig.update({ + where: { id: config.id }, + data: { + connected: false, + connectionError: error.message + } + }); + } + + res.status(400).json({ + success: false, + message: error.response?.data?.message || error.message || 'Connection test failed' + }); + } +}; diff --git a/apps/api/src/routes/index.ts b/apps/api/src/routes/index.ts index 92c9612..8486848 100644 --- a/apps/api/src/routes/index.ts +++ b/apps/api/src/routes/index.ts @@ -13,6 +13,7 @@ import userRoutes from './user.routes'; import dashboardRoutes from './dashboard.routes'; import backupRoutes from './backup.routes'; import slaveRoutes from './slave.routes'; +import systemConfigRoutes from './system-config.routes'; const router = Router(); @@ -40,5 +41,6 @@ router.use('/users', userRoutes); router.use('/dashboard', dashboardRoutes); router.use('/backup', backupRoutes); router.use('/slave', slaveRoutes); +router.use('/system-config', systemConfigRoutes); export default router; diff --git a/apps/api/src/routes/system-config.routes.ts b/apps/api/src/routes/system-config.routes.ts new file mode 100644 index 0000000..6932570 --- /dev/null +++ b/apps/api/src/routes/system-config.routes.ts @@ -0,0 +1,25 @@ +import { Router } from 'express'; +import { authenticate } from '../middleware/auth'; +import { + getSystemConfig, + updateNodeMode, + connectToMaster, + disconnectFromMaster, + testMasterConnection +} from '../controllers/system-config.controller'; + +const router = Router(); + +// All routes require authentication +router.use(authenticate); + +// System configuration routes +router.get('/', getSystemConfig); +router.put('/node-mode', updateNodeMode); + +// Slave mode routes +router.post('/connect-master', connectToMaster); +router.post('/disconnect-master', disconnectFromMaster); +router.post('/test-master-connection', testMasterConnection); + +export default router; diff --git a/apps/web/src/components/pages/SlaveNodes.tsx b/apps/web/src/components/pages/SlaveNodes.tsx index 996ba39..313b308 100644 --- a/apps/web/src/components/pages/SlaveNodes.tsx +++ b/apps/web/src/components/pages/SlaveNodes.tsx @@ -7,38 +7,86 @@ import { Input } from "@/components/ui/input"; import { Table, TableBody, TableCell, TableHead, TableHeader, TableRow } from "@/components/ui/table"; import { Dialog, DialogContent, DialogDescription, DialogFooter, DialogHeader, DialogTitle, DialogTrigger } from "@/components/ui/dialog"; import { Label } from "@/components/ui/label"; -import { Server, RefreshCw, Trash2, CheckCircle2, XCircle, Clock, AlertCircle, Loader2 } from "lucide-react"; +import { Tabs, TabsContent, TabsList, TabsTrigger } from "@/components/ui/tabs"; +import { Alert, AlertDescription } from "@/components/ui/alert"; +import { Server, RefreshCw, Trash2, CheckCircle2, XCircle, Clock, AlertCircle, Loader2, Power, Link as LinkIcon, KeyRound } from "lucide-react"; import { SlaveNode } from "@/types"; import { useToast } from "@/hooks/use-toast"; import { slaveNodesQueryOptions } from "@/queries/slave.query-options"; +import { systemConfigQueryOptions } from "@/queries/system-config.query-options"; import { slaveNodeService } from "@/services/slave.service"; +import { systemConfigService } from "@/services/system-config.service"; const SlaveNodes = () => { const { toast } = useToast(); const queryClient = useQueryClient(); const [isDialogOpen, setIsDialogOpen] = useState(false); + const [isMasterDialogOpen, setIsMasterDialogOpen] = useState(false); - const [formData, setFormData] = useState({ + // Form data for Register Slave Node (Master mode) + const [slaveFormData, setSlaveFormData] = useState({ name: "", host: "", port: 3001, syncInterval: 60 }); + + // Form data for Connect to Master (Slave mode) + const [masterFormData, setMasterFormData] = useState({ + masterHost: "", + masterPort: 3001, + masterApiKey: "" + }); + const [apiKeyDialog, setApiKeyDialog] = useState<{ open: boolean; apiKey: string }>({ open: false, apiKey: '' }); - // Fetch slave nodes - const { data: nodes = [], isLoading } = useQuery(slaveNodesQueryOptions.all); + // Confirm mode change dialog + const [modeChangeDialog, setModeChangeDialog] = useState<{ open: boolean; newMode: 'master' | 'slave' | null }>({ + open: false, + newMode: null + }); + + // Fetch system configuration + const { data: systemConfigData, isLoading: isConfigLoading } = useQuery(systemConfigQueryOptions.all); + const systemConfig = systemConfigData?.data; - // Register mutation + // Fetch slave nodes (only in master mode) + const { data: nodes = [], isLoading: isNodesLoading } = useQuery({ + ...slaveNodesQueryOptions.all, + enabled: systemConfig?.nodeMode === 'master' + }); + + // Update node mode mutation + const updateNodeModeMutation = useMutation({ + mutationFn: systemConfigService.updateNodeMode, + onSuccess: (data) => { + queryClient.invalidateQueries({ queryKey: ['system-config'] }); + queryClient.invalidateQueries({ queryKey: ['slave-nodes'] }); + + toast({ + title: "Node mode changed", + description: `Node is now in ${data.data.nodeMode} mode`, + }); + }, + onError: (error: any) => { + toast({ + title: "Failed to change mode", + description: error.response?.data?.message || "An error occurred", + variant: "destructive" + }); + } + }); + + // Register slave node mutation (Master mode) const registerMutation = useMutation({ mutationFn: slaveNodeService.register, onSuccess: (data) => { queryClient.invalidateQueries({ queryKey: ['slave-nodes'] }); setIsDialogOpen(false); - resetForm(); + resetSlaveForm(); // Show API key in separate dialog (critical info!) setApiKeyDialog({ @@ -46,7 +94,6 @@ const SlaveNodes = () => { apiKey: data.data.apiKey }); - // Also show toast toast({ title: "Slave node registered successfully", description: `Node ${data.data.name} has been registered`, @@ -55,25 +102,77 @@ const SlaveNodes = () => { onError: (error: any) => { console.error('Registration error:', error); - let errorMessage = "Failed to register node"; - - if (error.response?.status === 401) { - errorMessage = "Authentication required. Please login first."; - } else if (error.response?.data?.message) { - errorMessage = error.response.data.message; - } else if (error.message) { - errorMessage = error.message; - } - toast({ title: "Registration failed", - description: errorMessage, + description: error.response?.data?.message || "Failed to register node", variant: "destructive", duration: 5000 }); } }); + // Connect to master mutation (Slave mode) + const connectToMasterMutation = useMutation({ + mutationFn: systemConfigService.connectToMaster, + onSuccess: (data) => { + queryClient.invalidateQueries({ queryKey: ['system-config'] }); + setIsMasterDialogOpen(false); + resetMasterForm(); + + toast({ + title: "Connected to master", + description: `Successfully connected to ${data.data.masterHost}:${data.data.masterPort}`, + }); + }, + onError: (error: any) => { + toast({ + title: "Connection failed", + description: error.response?.data?.message || "Failed to connect to master", + variant: "destructive" + }); + } + }); + + // Disconnect from master mutation + const disconnectMutation = useMutation({ + mutationFn: systemConfigService.disconnectFromMaster, + onSuccess: () => { + queryClient.invalidateQueries({ queryKey: ['system-config'] }); + + toast({ + title: "Disconnected", + description: "Disconnected from master node", + }); + }, + onError: (error: any) => { + toast({ + title: "Disconnect failed", + description: error.response?.data?.message || "Failed to disconnect", + variant: "destructive" + }); + } + }); + + // Test master connection mutation + const testConnectionMutation = useMutation({ + mutationFn: systemConfigService.testMasterConnection, + onSuccess: (data) => { + queryClient.invalidateQueries({ queryKey: ['system-config'] }); + + toast({ + title: "Connection test successful", + description: `Latency: ${data.data.latency}ms | Master: ${data.data.masterStatus}`, + }); + }, + onError: (error: any) => { + toast({ + title: "Connection test failed", + description: error.response?.data?.message || "Failed to connect", + variant: "destructive" + }); + } + }); + // Delete mutation const deleteMutation = useMutation({ mutationFn: slaveNodeService.delete, @@ -129,8 +228,8 @@ const SlaveNodes = () => { } }); - const handleAddNode = () => { - if (!formData.name || !formData.host) { + const handleRegisterSlave = () => { + if (!slaveFormData.name || !slaveFormData.host) { toast({ title: "Validation error", description: "Name and host are required", @@ -139,18 +238,33 @@ const SlaveNodes = () => { return; } - console.log('Registering node:', formData); - registerMutation.mutate({ - name: formData.name, - host: formData.host, - port: formData.port, - syncInterval: formData.syncInterval + name: slaveFormData.name, + host: slaveFormData.host, + port: slaveFormData.port, + syncInterval: slaveFormData.syncInterval + }); + }; + + const handleConnectToMaster = () => { + if (!masterFormData.masterHost || !masterFormData.masterApiKey) { + toast({ + title: "Validation error", + description: "Master host and API key are required", + variant: "destructive" + }); + return; + } + + connectToMasterMutation.mutate({ + masterHost: masterFormData.masterHost, + masterPort: masterFormData.masterPort, + masterApiKey: masterFormData.masterApiKey }); }; - const resetForm = () => { - setFormData({ + const resetSlaveForm = () => { + setSlaveFormData({ name: "", host: "", port: 3001, @@ -158,6 +272,14 @@ const SlaveNodes = () => { }); }; + const resetMasterForm = () => { + setMasterFormData({ + masterHost: "", + masterPort: 3001, + masterApiKey: "" + }); + }; + const handleSync = (nodeId: string) => { syncMutation.mutate({ id: nodeId, force: false }); }; @@ -172,6 +294,23 @@ const SlaveNodes = () => { } }; + const handleModeChange = (newMode: 'master' | 'slave') => { + if (systemConfig?.nodeMode === newMode) return; + + // Show custom dialog instead of browser confirm + setModeChangeDialog({ + open: true, + newMode + }); + }; + + const confirmModeChange = () => { + if (modeChangeDialog.newMode) { + updateNodeModeMutation.mutate(modeChangeDialog.newMode); + setModeChangeDialog({ open: false, newMode: null }); + } + }; + const getStatusColor = (status: string) => { switch (status) { case 'online': return 'default'; @@ -192,19 +331,7 @@ const SlaveNodes = () => { } }; - const isNodeInSync = (node: SlaveNode) => { - // Legacy support for old mock data - if (node.syncStatus?.inSync !== undefined) { - return node.syncStatus.inSync; - } - // New logic: check if configHash exists and lastSyncAt is recent - return !!node.configHash && node.lastSyncAt; - }; - - // Check authentication - const isAuthenticated = !!localStorage.getItem('accessToken'); - - if (isLoading) { + if (isConfigLoading || isNodesLoading) { return (
    @@ -212,364 +339,483 @@ const SlaveNodes = () => { ); } + const currentMode = systemConfig?.nodeMode || 'master'; + const isMasterMode = currentMode === 'master'; + const isSlaveMode = currentMode === 'slave'; + return (
    - {/* Authentication Warning */} - {!isAuthenticated && ( - - -
    - -
    -

    - Authentication Required -

    -

    - You need to login to register and manage slave nodes. -

    -
    -
    -
    -
    - )} - + {/* Header */}
    -

    Slave Nodes

    -

    Manage distributed nginx nodes and configuration sync

    +

    Node Synchronization

    +

    Manage master-slave node configuration

    -
    - - - - - - - - Register Slave Node - - Add a new slave node to the cluster - - -
    -
    - - setFormData({ ...formData, name: e.target.value })} - placeholder="nginx-slave-01" - /> -
    -
    - - setFormData({ ...formData, host: e.target.value })} - placeholder="10.0.10.11" - /> -
    -
    - - setFormData({ ...formData, port: Number(e.target.value) })} - placeholder="3001" - /> -
    -
    - - setFormData({ ...formData, syncInterval: Number(e.target.value) })} - placeholder="60" - /> -
    +
    + + {/* Node Mode Status Card */} + + +
    +
    + {isMasterMode ? ( + + ) : ( + + )} +
    +

    + Current Mode: + {isMasterMode ? 'MASTER' : 'SLAVE'} + +

    +

    + {isMasterMode ? 'This node can register and manage slave nodes' : 'This node is connected to a master node'} +

    - - - - - -
    - - {/* API Key Dialog - Critical Information */} - setApiKeyDialog({ ...apiKeyDialog, open })}> - - - - - Save Your API Key - - - This is the only time you'll see this API key. Copy it now and store it securely. - - -
    -
    -

    - ⚠️ Important: You will need this API key to configure your slave node. +

    + {isSlaveMode && systemConfig?.connected && ( + + + Connected to Master + + )} +
    + + + + {/* Main Tabs */} + handleModeChange(value as 'master' | 'slave')}> + + + + Master Mode + + + + Slave Mode + + + + {/* MASTER MODE TAB */} + + + + Master Node Configuration + + Register slave nodes and manage distributed configuration sync + + + +
    +
    +

    Registered Slave Nodes

    +

    + {nodes.length} slave node(s) registered

    - -
    - -
    - - -
    +
    + + + + + + + + Register Slave Node + + Add a new slave node to receive configuration updates + + +
    +
    + + setSlaveFormData({ ...slaveFormData, name: e.target.value })} + placeholder="slave-node-01" + /> +
    +
    + + setSlaveFormData({ ...slaveFormData, host: e.target.value })} + placeholder="Enter slave node IP address" + /> +
    +
    + + setSlaveFormData({ ...slaveFormData, port: Number(e.target.value) })} + placeholder="3001" + /> +
    +
    + + setSlaveFormData({ ...slaveFormData, syncInterval: Number(e.target.value) })} + placeholder="60" + /> +
    +
    + + + + +
    +
    +
    -
    -

    Next Steps:

    -
      -
    1. Copy the API key above
    2. -
    3. Save it in your slave node's environment variables
    4. -
    5. Configure: SLAVE_API_KEY={apiKeyDialog.apiKey.substring(0, 16)}...
    6. -
    7. Start your slave node application
    8. -
    -
    + {/* Slave Nodes Table */} +
    + + + + Name + Host:Port + Status + Last Seen + Config Hash + Actions + + + + {nodes.length === 0 ? ( + + + No slave nodes registered. Click "Register Slave Node" to add one. + + + ) : ( + nodes.map((node) => ( + + {node.name} + {node.host}:{node.port} + + + {getStatusIcon(node.status)} + {node.status} + + + + {node.lastSeen ? new Date(node.lastSeen).toLocaleString() : 'Never'} + + + {node.configHash?.substring(0, 12) || 'N/A'}... + + + + + + + )) + )} + +
    - - - - -
    -
    -
    + + + -
    - - - Total Nodes - - - -
    {nodes.length}
    -

    - Registered slave nodes -

    -
    -
    - - - - Online Nodes - - - -
    - {nodes.filter(n => n.status === 'online').length} -
    -

    - Active and healthy -

    -
    -
    - - - - Sync Status - - - -
    - {nodes.filter(n => isNodeInSync(n)).length}/{nodes.length} -
    -

    - Nodes in sync -

    -
    -
    -
    + {/* SLAVE MODE TAB */} + + + + Slave Node Configuration + + Connect to a master node to receive configuration updates + + + + {!systemConfig?.connected ? ( +
    + + + + You are in Slave Mode but not connected to any master node. + Click "Connect to Master" to configure the connection. + + - - - Registered Nodes ({nodes.length}) - View and manage slave node cluster - - -
    - - - - Name - Host:Port - Status - Version - Last Seen - Sync Status - Config Hash - Enabled - Actions - - - - {nodes.length === 0 ? ( - - - No slave nodes registered. Click "Register Node" to add one. - - - ) : ( - nodes.map((node) => ( - - {node.name} - {node.host}:{node.port} - - - {getStatusIcon(node.status)} - {node.status} - - - {node.version || 'N/A'} - - {node.lastSeen ? new Date(node.lastSeen).toLocaleString() : 'Never'} - - - {isNodeInSync(node) ? ( - - - In Sync - - ) : ( - - - Out of Sync - - )} - - - {node.configHash?.substring(0, 12) || 'N/A'}... - - - - {node.syncEnabled ? 'Yes' : 'No'} - - - - + + + + Connect to Master Node + + Enter the master node details and API key to establish connection + + +
    +
    + + setMasterFormData({ ...masterFormData, masterHost: e.target.value })} + placeholder="Enter master node IP address" + /> +
    +
    + + setMasterFormData({ ...masterFormData, masterPort: Number(e.target.value) })} + placeholder="3001" + /> +
    +
    + + setMasterFormData({ ...masterFormData, masterApiKey: e.target.value })} + placeholder="Enter API key from master node" + /> +

    + Get this API key from the master node when registering this slave +

    +
    +
    + + - -
    -
    - )) - )} -
    -
    -
    -
    -
    + + +
    +
    + ) : ( +
    + + +
    +
    +
    + + Connected to Master +
    + + Active + +
    +
    +
    + Master Host: + {systemConfig.masterHost}:{systemConfig.masterPort} +
    + {systemConfig.lastConnectedAt && ( +
    + Last Connected: + {new Date(systemConfig.lastConnectedAt).toLocaleString()} +
    + )} +
    +
    + + +
    +
    +
    +
    +
    + )} + + + + + + {/* Mode Change Confirmation Dialog */} + setModeChangeDialog({ ...modeChangeDialog, open })}> + + + + + Confirm Mode Change + + + {modeChangeDialog.newMode === 'slave' + ? "Switching to Slave mode will disable the ability to register slave nodes. You will need to connect to a master node." + : "Switching to Master mode will disconnect from the current master and allow you to register slave nodes."} + + + + + + + + + + {/* API Key Dialog */} + setApiKeyDialog({ ...apiKeyDialog, open })}> + + + + + Slave Node API Key + + + Save this API key! You'll need it to connect the slave node to this master. + + +
    + + + + This API key will only be shown once. Copy it now and store it securely. + + - - - Cluster Topology - Visual representation of node cluster - - -
    -
    -
    - +
    + +
    + +
    -

    Master Node

    -

    Primary

    - {nodes.length > 0 && ( - <> -
    -
    - {nodes.map((node) => ( -
    -
    - -
    -

    {node.name}

    - - {node.status} - -
    - ))} -
    - - )} + +
    +

    Next Steps:

    +
      +
    1. Go to the slave node web interface
    2. +
    3. Switch to Slave Mode
    4. +
    5. Click "Connect to Master Node"
    6. +
    7. Enter this API key along with master host/port
    8. +
    9. Click "Connect" to establish synchronization
    10. +
    +
    - - + + + + +
    ); }; diff --git a/apps/web/src/queries/system-config.query-options.ts b/apps/web/src/queries/system-config.query-options.ts new file mode 100644 index 0000000..6c6a651 --- /dev/null +++ b/apps/web/src/queries/system-config.query-options.ts @@ -0,0 +1,10 @@ +import { queryOptions } from '@tanstack/react-query'; +import { systemConfigService } from '@/services/system-config.service'; + +export const systemConfigQueryOptions = { + all: queryOptions({ + queryKey: ['system-config'], + queryFn: systemConfigService.getConfig, + refetchInterval: 30000, // Refetch every 30s + }), +}; diff --git a/apps/web/src/services/system-config.service.ts b/apps/web/src/services/system-config.service.ts new file mode 100644 index 0000000..efb20b1 --- /dev/null +++ b/apps/web/src/services/system-config.service.ts @@ -0,0 +1,88 @@ +import axios from 'axios'; +import { SystemConfig, ApiResponse } from '@/types'; + +const API_URL = import.meta.env.VITE_API_URL || 'http://localhost:3001/api'; + +const getHeaders = () => { + const token = localStorage.getItem('accessToken'); + return { + 'Content-Type': 'application/json', + Authorization: token ? `Bearer ${token}` : '', + }; +}; + +export const systemConfigService = { + /** + * Get system configuration + */ + getConfig: async (): Promise> => { + const response = await axios.get(`${API_URL}/system-config`, { + headers: getHeaders(), + }); + return response.data; + }, + + /** + * Update node mode (master or slave) + */ + updateNodeMode: async (nodeMode: 'master' | 'slave'): Promise> => { + const response = await axios.put( + `${API_URL}/system-config/node-mode`, + { nodeMode }, + { + headers: getHeaders(), + } + ); + return response.data; + }, + + /** + * Connect to master node (for slave mode) + */ + connectToMaster: async (params: { + masterHost: string; + masterPort: number; + masterApiKey: string; + }): Promise> => { + const response = await axios.post( + `${API_URL}/system-config/connect-master`, + params, + { + headers: getHeaders(), + } + ); + return response.data; + }, + + /** + * Disconnect from master node + */ + disconnectFromMaster: async (): Promise> => { + const response = await axios.post( + `${API_URL}/system-config/disconnect-master`, + {}, + { + headers: getHeaders(), + } + ); + return response.data; + }, + + /** + * Test connection to master + */ + testMasterConnection: async (): Promise> => { + const response = await axios.post( + `${API_URL}/system-config/test-master-connection`, + {}, + { + headers: getHeaders(), + } + ); + return response.data; + }, +}; diff --git a/apps/web/src/types/index.ts b/apps/web/src/types/index.ts index e8dacf2..0158688 100644 --- a/apps/web/src/types/index.ts +++ b/apps/web/src/types/index.ts @@ -278,3 +278,25 @@ export interface ApiResponse { message?: string; pagination?: Pagination; } + +export interface SystemConfig { + id: string; + nodeMode: 'master' | 'slave'; + + // Master mode settings + masterApiEnabled: boolean; + + // Slave mode settings + slaveApiEnabled: boolean; + masterHost?: string | null; + masterPort?: number | null; + masterApiKey?: string | null; + + // Connection status (for slave mode) + connected: boolean; + lastConnectedAt?: string | null; + connectionError?: string | null; + + createdAt: string; + updatedAt: string; +} From d3841de5aa2530fcb501d68b54023372ab9ec7cd Mon Sep 17 00:00:00 2001 From: vncloudsco Date: Mon, 6 Oct 2025 10:01:42 +0000 Subject: [PATCH 14/32] feat: add health check endpoint for slave nodes --- apps/api/src/routes/slave.routes.ts | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/apps/api/src/routes/slave.routes.ts b/apps/api/src/routes/slave.routes.ts index d8881fc..2f8acbc 100644 --- a/apps/api/src/routes/slave.routes.ts +++ b/apps/api/src/routes/slave.routes.ts @@ -140,6 +140,13 @@ router.post( // SLAVE API ENDPOINTS (API Key Authenticated) // ========================================== +/** + * @route GET /api/slave/health + * @desc Health check endpoint (called by master to verify slave is alive) + * @access Slave API Key + */ +router.get('/health', validateSlaveApiKey, healthCheck); + /** * @route POST /api/slave/sync/pull-config * @desc Pull configuration from master (called by slave) From 846fd22a2533ce9ffa9e4e8e25f87eb1c8f9bd0b Mon Sep 17 00:00:00 2001 From: vncloudsco Date: Mon, 6 Oct 2025 10:10:10 +0000 Subject: [PATCH 15/32] feat: implement sync configuration from master for slave nodes --- apps/web/src/components/pages/SlaveNodes.tsx | 96 ++++++------------- .../web/src/services/system-config.service.ts | 17 ++++ 2 files changed, 47 insertions(+), 66 deletions(-) diff --git a/apps/web/src/components/pages/SlaveNodes.tsx b/apps/web/src/components/pages/SlaveNodes.tsx index 313b308..3396e4c 100644 --- a/apps/web/src/components/pages/SlaveNodes.tsx +++ b/apps/web/src/components/pages/SlaveNodes.tsx @@ -9,7 +9,7 @@ import { Dialog, DialogContent, DialogDescription, DialogFooter, DialogHeader, D import { Label } from "@/components/ui/label"; import { Tabs, TabsContent, TabsList, TabsTrigger } from "@/components/ui/tabs"; import { Alert, AlertDescription } from "@/components/ui/alert"; -import { Server, RefreshCw, Trash2, CheckCircle2, XCircle, Clock, AlertCircle, Loader2, Power, Link as LinkIcon, KeyRound } from "lucide-react"; +import { Server, RefreshCw, Trash2, CheckCircle2, XCircle, Clock, AlertCircle, Loader2, Link as LinkIcon, KeyRound } from "lucide-react"; import { SlaveNode } from "@/types"; import { useToast } from "@/hooks/use-toast"; import { slaveNodesQueryOptions } from "@/queries/slave.query-options"; @@ -173,56 +173,37 @@ const SlaveNodes = () => { } }); - // Delete mutation - const deleteMutation = useMutation({ - mutationFn: slaveNodeService.delete, - onSuccess: () => { - queryClient.invalidateQueries({ queryKey: ['slave-nodes'] }); - toast({ title: "Node removed successfully" }); - }, - onError: (error: any) => { + // Sync from master mutation (slave pulls config) + const syncFromMasterMutation = useMutation({ + mutationFn: systemConfigService.syncWithMaster, + onSuccess: (data) => { + queryClient.invalidateQueries({ queryKey: ['system-config'] }); + toast({ - title: "Delete failed", - description: error.response?.data?.message || "Failed to delete node", - variant: "destructive" + title: "Sync completed", + description: `${data.data.changesApplied} changes applied from master`, }); - } - }); - - // Sync mutation - const syncMutation = useMutation({ - mutationFn: ({ id, force }: { id: string; force?: boolean }) => - slaveNodeService.syncToNode(id, { force }), - onSuccess: (data) => { - queryClient.invalidateQueries({ queryKey: ['slave-nodes'] }); - const message = data.data?.skipped - ? "Configuration already up to date" - : `Synced ${data.data?.changesCount || 0} changes in ${data.data?.duration || 0}ms`; - toast({ title: "Sync completed", description: message }); }, onError: (error: any) => { toast({ title: "Sync failed", - description: error.response?.data?.message || "Failed to sync configuration", + description: error.response?.data?.message || "Failed to sync with master", variant: "destructive" }); } }); - // Sync all mutation - const syncAllMutation = useMutation({ - mutationFn: slaveNodeService.syncToAll, - onSuccess: (data) => { + // Delete mutation + const deleteMutation = useMutation({ + mutationFn: slaveNodeService.delete, + onSuccess: () => { queryClient.invalidateQueries({ queryKey: ['slave-nodes'] }); - toast({ - title: "Sync to all nodes completed", - description: `${data.data.success}/${data.data.total} nodes synced successfully` - }); + toast({ title: "Node removed successfully" }); }, onError: (error: any) => { toast({ - title: "Sync all failed", - description: error.response?.data?.message || "Failed to sync to all nodes", + title: "Delete failed", + description: error.response?.data?.message || "Failed to delete node", variant: "destructive" }); } @@ -280,14 +261,6 @@ const SlaveNodes = () => { }); }; - const handleSync = (nodeId: string) => { - syncMutation.mutate({ id: nodeId, force: false }); - }; - - const handleSyncAll = () => { - syncAllMutation.mutate(); - }; - const handleDelete = (id: string) => { if (confirm("Are you sure you want to remove this node?")) { deleteMutation.mutate(id); @@ -416,23 +389,10 @@ const SlaveNodes = () => {

    Registered Slave Nodes

    - {nodes.length} slave node(s) registered + {nodes.length} slave node(s) registered - Slaves will pull config automatically

    -
    +
    -
    - - setSlaveFormData({ ...slaveFormData, syncInterval: Number(e.target.value) })} - placeholder="60" - /> -
    +
    + + setMasterFormData({ ...masterFormData, syncInterval: Number(e.target.value) })} + placeholder="60" + /> +

    + How often to pull configuration from master (minimum: 10 seconds) +

    +
    + + + + + + {/* Disconnect Confirmation Dialog */} + + + + + + Confirm Disconnect + + + Are you sure you want to disconnect from the master node? You will need to reconnect manually. + + + + + + + + ); }; diff --git a/apps/web/src/services/system-config.service.ts b/apps/web/src/services/system-config.service.ts index c6c37fa..3ef8810 100644 --- a/apps/web/src/services/system-config.service.ts +++ b/apps/web/src/services/system-config.service.ts @@ -43,6 +43,7 @@ export const systemConfigService = { masterHost: string; masterPort: number; masterApiKey: string; + syncInterval?: number; }): Promise> => { const response = await axios.post( `${API_URL}/system-config/connect-master`, diff --git a/apps/web/src/types/index.ts b/apps/web/src/types/index.ts index 0158688..f25c029 100644 --- a/apps/web/src/types/index.ts +++ b/apps/web/src/types/index.ts @@ -291,6 +291,7 @@ export interface SystemConfig { masterHost?: string | null; masterPort?: number | null; masterApiKey?: string | null; + syncInterval: number; // Sync interval in seconds // Connection status (for slave mode) connected: boolean; From 71157240b3dd500b8c051a5392f8c07a295766e9 Mon Sep 17 00:00:00 2001 From: vncloudsco Date: Mon, 6 Oct 2025 11:10:06 +0000 Subject: [PATCH 17/32] feat: Update slave node configuration and status during API key validation --- .../src/controllers/node-sync.controller.ts | 13 +++++ apps/api/src/middleware/slaveAuth.ts | 56 ++++++++++++++++--- 2 files changed, 62 insertions(+), 7 deletions(-) diff --git a/apps/api/src/controllers/node-sync.controller.ts b/apps/api/src/controllers/node-sync.controller.ts index fffdac3..0484210 100644 --- a/apps/api/src/controllers/node-sync.controller.ts +++ b/apps/api/src/controllers/node-sync.controller.ts @@ -22,6 +22,19 @@ export const exportForSync = async (req: SlaveRequest, res: Response): Promise { + logger.warn('[NODE-SYNC] Failed to update configHash', { + nodeId: req.slaveNode?.id, + error: err.message + }); + }); + } + res.json({ success: true, data: { diff --git a/apps/api/src/middleware/slaveAuth.ts b/apps/api/src/middleware/slaveAuth.ts index 2e12cba..5d693fd 100644 --- a/apps/api/src/middleware/slaveAuth.ts +++ b/apps/api/src/middleware/slaveAuth.ts @@ -82,10 +82,10 @@ export const validateSlaveApiKey = async ( /** * Validate Master API Key for Node Sync * Used when slave nodes pull config from master - * Checks against SystemConfig.masterApiKey (which is the key slaves use) + * Updates slave node status when they connect */ export const validateMasterApiKey = async ( - req: Request, + req: SlaveRequest, res: Response, next: NextFunction ): Promise => { @@ -100,17 +100,59 @@ export const validateMasterApiKey = async ( return; } - // Check if any slave has this API key configured (master can accept any slave) - // For now, just validate format (non-empty, min length) - if (apiKey.length < 10) { + // Find slave node by API key + const slaveNode = await prisma.slaveNode.findFirst({ + where: { apiKey }, + select: { + id: true, + name: true, + host: true, + port: true, + syncEnabled: true + } + }); + + if (!slaveNode) { + logger.warn('[NODE-SYNC] Invalid slave API key attempt', { + apiKey: apiKey.substring(0, 8) + '...' + }); res.status(401).json({ success: false, - message: 'Invalid API key format' + message: 'Invalid API key' }); return; } - // API key is valid, continue + if (!slaveNode.syncEnabled) { + res.status(403).json({ + success: false, + message: 'Node sync is disabled' + }); + return; + } + + // Attach slave node info to request + req.slaveNode = slaveNode; + + // Update last seen and status to online + await prisma.slaveNode.update({ + where: { id: slaveNode.id }, + data: { + lastSeen: new Date(), + status: 'online' + } + }).catch((err) => { + logger.warn('[NODE-SYNC] Failed to update slave node status', { + nodeId: slaveNode.id, + error: err.message + }); + }); + + logger.info('[NODE-SYNC] Slave node authenticated', { + nodeId: slaveNode.id, + nodeName: slaveNode.name + }); + next(); } catch (error: any) { logger.error('[SLAVE-AUTH] Validate master API key error:', error); From 152f4d9820bca2707d597b786091991b067a340e Mon Sep 17 00:00:00 2001 From: vncloudsco Date: Mon, 6 Oct 2025 11:11:46 +0000 Subject: [PATCH 18/32] feat: Add slave node status checker and integrate with application lifecycle --- apps/api/src/index.ts | 11 ++++ apps/api/src/utils/slave-status-checker.ts | 68 ++++++++++++++++++++++ 2 files changed, 79 insertions(+) create mode 100644 apps/api/src/utils/slave-status-checker.ts diff --git a/apps/api/src/index.ts b/apps/api/src/index.ts index 66404c1..f19f1c6 100644 --- a/apps/api/src/index.ts +++ b/apps/api/src/index.ts @@ -10,9 +10,11 @@ import logger from './utils/logger'; import { initializeNginxForSSL } from './utils/nginx-setup'; import { initializeModSecurityConfig } from './utils/modsec-setup'; import { startAlertMonitoring, stopAlertMonitoring } from './utils/alert-monitoring.service'; +import { startSlaveNodeStatusCheck, stopSlaveNodeStatusCheck } from './utils/slave-status-checker'; const app: Application = express(); let monitoringTimer: NodeJS.Timeout | null = null; +let slaveStatusTimer: NodeJS.Timeout | null = null; // Security middleware app.use(helmet()); @@ -65,6 +67,9 @@ const server = app.listen(PORT, () => { // Start alert monitoring service (global scan every 10 seconds) // Each rule has its own checkInterval for when to actually check monitoringTimer = startAlertMonitoring(10); + + // Start slave node status checker (check every minute) + slaveStatusTimer = startSlaveNodeStatusCheck(); }); // Graceful shutdown @@ -73,6 +78,9 @@ process.on('SIGTERM', () => { if (monitoringTimer) { stopAlertMonitoring(monitoringTimer); } + if (slaveStatusTimer) { + stopSlaveNodeStatusCheck(slaveStatusTimer); + } server.close(() => { logger.info('HTTP server closed'); process.exit(0); @@ -84,6 +92,9 @@ process.on('SIGINT', () => { if (monitoringTimer) { stopAlertMonitoring(monitoringTimer); } + if (slaveStatusTimer) { + stopSlaveNodeStatusCheck(slaveStatusTimer); + } server.close(() => { logger.info('HTTP server closed'); process.exit(0); diff --git a/apps/api/src/utils/slave-status-checker.ts b/apps/api/src/utils/slave-status-checker.ts new file mode 100644 index 0000000..590057f --- /dev/null +++ b/apps/api/src/utils/slave-status-checker.ts @@ -0,0 +1,68 @@ +import prisma from '../config/database'; +import logger from './logger'; + +/** + * Check slave nodes and mark as offline if not seen for 5 minutes + */ +export async function checkSlaveNodeStatus() { + try { + const fiveMinutesAgo = new Date(Date.now() - 5 * 60 * 1000); + + // Find nodes that haven't been seen in 5 minutes and are currently online + const staleNodes = await prisma.slaveNode.findMany({ + where: { + status: 'online', + lastSeen: { + lt: fiveMinutesAgo + } + }, + select: { + id: true, + name: true, + lastSeen: true + } + }); + + if (staleNodes.length > 0) { + logger.info('[SLAVE-STATUS] Marking stale nodes as offline', { + count: staleNodes.length, + nodes: staleNodes.map(n => n.name) + }); + + // Update to offline + await prisma.slaveNode.updateMany({ + where: { + id: { + in: staleNodes.map(n => n.id) + } + }, + data: { + status: 'offline' + } + }); + } + } catch (error: any) { + logger.error('[SLAVE-STATUS] Check slave status error:', error); + } +} + +/** + * Start background job to check slave node status every 1 minute + */ +export function startSlaveNodeStatusCheck(): NodeJS.Timeout { + logger.info('[SLAVE-STATUS] Starting slave node status checker (interval: 60s)'); + + // Run immediately on start + checkSlaveNodeStatus(); + + // Then run every minute + return setInterval(checkSlaveNodeStatus, 60 * 1000); +} + +/** + * Stop background job + */ +export function stopSlaveNodeStatusCheck(timer: NodeJS.Timeout) { + logger.info('[SLAVE-STATUS] Stopping slave node status checker'); + clearInterval(timer); +} From f3415d325ad44935f591ccafb62be02466d287a9 Mon Sep 17 00:00:00 2001 From: vncloudsco Date: Mon, 6 Oct 2025 12:44:17 +0000 Subject: [PATCH 19/32] feat: Add endpoint to get current config hash for slave nodes --- apps/api/prisma/schema.prisma | 600 +++++++++--------- .../src/controllers/node-sync.controller.ts | 108 ++-- .../controllers/system-config.controller.ts | 55 +- apps/api/src/routes/node-sync.routes.ts | 8 +- 4 files changed, 408 insertions(+), 363 deletions(-) diff --git a/apps/api/prisma/schema.prisma b/apps/api/prisma/schema.prisma index 3ee7b86..247fa99 100644 --- a/apps/api/prisma/schema.prisma +++ b/apps/api/prisma/schema.prisma @@ -31,21 +31,21 @@ enum ActivityType { } model User { - id String @id @default(cuid()) - username String @unique - email String @unique - password String - fullName String - role UserRole @default(viewer) - status UserStatus @default(active) - avatar String? - phone String? - timezone String @default("Asia/Ho_Chi_Minh") - language String @default("en") - createdAt DateTime @default(now()) - updatedAt DateTime @updatedAt - lastLogin DateTime? - + id String @id @default(cuid()) + username String @unique + email String @unique + password String + fullName String + role UserRole @default(viewer) + status UserStatus @default(active) + avatar String? + phone String? + timezone String @default("Asia/Ho_Chi_Minh") + language String @default("en") + createdAt DateTime @default(now()) + updatedAt DateTime @updatedAt + lastLogin DateTime? + // Relations profile UserProfile? twoFactor TwoFactorAuth? @@ -57,15 +57,15 @@ model User { } model UserProfile { - id String @id @default(cuid()) - userId String @unique - user User @relation(fields: [userId], references: [id], onDelete: Cascade) - + id String @id @default(cuid()) + userId String @unique + user User @relation(fields: [userId], references: [id], onDelete: Cascade) + // Additional profile fields can be added here - bio String? - location String? - website String? - + bio String? + location String? + website String? + createdAt DateTime @default(now()) updatedAt DateTime @updatedAt @@ -73,34 +73,34 @@ model UserProfile { } model TwoFactorAuth { - id String @id @default(cuid()) - userId String @unique - user User @relation(fields: [userId], references: [id], onDelete: Cascade) - - enabled Boolean @default(false) - method String @default("totp") // totp, sms - secret String? - backupCodes String[] // Encrypted backup codes - - createdAt DateTime @default(now()) - updatedAt DateTime @updatedAt + id String @id @default(cuid()) + userId String @unique + user User @relation(fields: [userId], references: [id], onDelete: Cascade) + + enabled Boolean @default(false) + method String @default("totp") // totp, sms + secret String? + backupCodes String[] // Encrypted backup codes + + createdAt DateTime @default(now()) + updatedAt DateTime @updatedAt @@map("two_factor_auth") } model ActivityLog { - id String @id @default(cuid()) - userId String - user User @relation(fields: [userId], references: [id], onDelete: Cascade) - - action String - type ActivityType - ip String - userAgent String @db.Text - details String? @db.Text - success Boolean @default(true) - - timestamp DateTime @default(now()) + id String @id @default(cuid()) + userId String + user User @relation(fields: [userId], references: [id], onDelete: Cascade) + + action String + type ActivityType + ip String + userAgent String @db.Text + details String? @db.Text + success Boolean @default(true) + + timestamp DateTime @default(now()) @@index([userId, timestamp]) @@index([type, timestamp]) @@ -108,35 +108,35 @@ model ActivityLog { } model RefreshToken { - id String @id @default(cuid()) - userId String - user User @relation(fields: [userId], references: [id], onDelete: Cascade) - - token String @unique + id String @id @default(cuid()) + userId String + user User @relation(fields: [userId], references: [id], onDelete: Cascade) + + token String @unique expiresAt DateTime - createdAt DateTime @default(now()) + createdAt DateTime @default(now()) revokedAt DateTime? - + @@index([userId]) @@index([token]) @@map("refresh_tokens") } model UserSession { - id String @id @default(cuid()) - userId String - user User @relation(fields: [userId], references: [id], onDelete: Cascade) - - sessionId String @unique - ip String - userAgent String @db.Text - device String? - location String? - - lastActive DateTime @default(now()) - expiresAt DateTime - createdAt DateTime @default(now()) - + id String @id @default(cuid()) + userId String + user User @relation(fields: [userId], references: [id], onDelete: Cascade) + + sessionId String @unique + ip String + userAgent String @db.Text + device String? + location String? + + lastActive DateTime @default(now()) + expiresAt DateTime + createdAt DateTime @default(now()) + @@index([userId]) @@index([sessionId]) @@map("user_sessions") @@ -169,22 +169,22 @@ enum SSLStatus { } model Domain { - id String @id @default(cuid()) - name String @unique - status DomainStatus @default(inactive) - sslEnabled Boolean @default(false) - sslExpiry DateTime? - modsecEnabled Boolean @default(true) - + id String @id @default(cuid()) + name String @unique + status DomainStatus @default(inactive) + sslEnabled Boolean @default(false) + sslExpiry DateTime? + modsecEnabled Boolean @default(true) + // Relations - upstreams Upstream[] - loadBalancer LoadBalancerConfig? - sslCertificate SSLCertificate? - modsecCRSRules ModSecCRSRule[] - modsecRules ModSecRule[] - - createdAt DateTime @default(now()) - updatedAt DateTime @updatedAt + upstreams Upstream[] + loadBalancer LoadBalancerConfig? + sslCertificate SSLCertificate? + modsecCRSRules ModSecCRSRule[] + modsecRules ModSecRule[] + + createdAt DateTime @default(now()) + updatedAt DateTime @updatedAt @@index([name]) @@index([status]) @@ -192,62 +192,62 @@ model Domain { } model Upstream { - id String @id @default(cuid()) - domainId String - domain Domain @relation(fields: [domainId], references: [id], onDelete: Cascade) - + id String @id @default(cuid()) + domainId String + domain Domain @relation(fields: [domainId], references: [id], onDelete: Cascade) + host String port Int - protocol String @default("http") // http or https - sslVerify Boolean @default(true) // proxy_ssl_verify on/off - weight Int @default(1) - maxFails Int @default(3) - failTimeout Int @default(10) // seconds - status UpstreamStatus @default(checking) - - createdAt DateTime @default(now()) - updatedAt DateTime @updatedAt + protocol String @default("http") // http or https + sslVerify Boolean @default(true) // proxy_ssl_verify on/off + weight Int @default(1) + maxFails Int @default(3) + failTimeout Int @default(10) // seconds + status UpstreamStatus @default(checking) + + createdAt DateTime @default(now()) + updatedAt DateTime @updatedAt @@index([domainId]) @@map("upstreams") } model LoadBalancerConfig { - id String @id @default(cuid()) - domainId String @unique - domain Domain @relation(fields: [domainId], references: [id], onDelete: Cascade) - - algorithm LoadBalancerAlgorithm @default(round_robin) - healthCheckEnabled Boolean @default(true) - healthCheckInterval Int @default(30) // seconds - healthCheckTimeout Int @default(5) // seconds - healthCheckPath String @default("/") - - createdAt DateTime @default(now()) - updatedAt DateTime @updatedAt + id String @id @default(cuid()) + domainId String @unique + domain Domain @relation(fields: [domainId], references: [id], onDelete: Cascade) + + algorithm LoadBalancerAlgorithm @default(round_robin) + healthCheckEnabled Boolean @default(true) + healthCheckInterval Int @default(30) // seconds + healthCheckTimeout Int @default(5) // seconds + healthCheckPath String @default("/") + + createdAt DateTime @default(now()) + updatedAt DateTime @updatedAt @@map("load_balancer_configs") } model SSLCertificate { - id String @id @default(cuid()) - domainId String @unique - domain Domain @relation(fields: [domainId], references: [id], onDelete: Cascade) - + id String @id @default(cuid()) + domainId String @unique + domain Domain @relation(fields: [domainId], references: [id], onDelete: Cascade) + commonName String - sans String[] // Subject Alternative Names + sans String[] // Subject Alternative Names issuer String - certificate String @db.Text // PEM format - privateKey String @db.Text // PEM format - chain String? @db.Text // PEM format - - validFrom DateTime - validTo DateTime - autoRenew Boolean @default(true) - status SSLStatus @default(valid) - - createdAt DateTime @default(now()) - updatedAt DateTime @updatedAt + certificate String @db.Text // PEM format + privateKey String @db.Text // PEM format + chain String? @db.Text // PEM format + + validFrom DateTime + validTo DateTime + autoRenew Boolean @default(true) + status SSLStatus @default(valid) + + createdAt DateTime @default(now()) + updatedAt DateTime @updatedAt @@index([domainId]) @@index([validTo]) @@ -258,19 +258,19 @@ model SSLCertificate { // Only stores metadata and enabled status // Actual rules come from CRS files model ModSecCRSRule { - id String @id @default(cuid()) - domainId String? - domain Domain? @relation(fields: [domainId], references: [id], onDelete: Cascade) - - ruleFile String // e.g., "REQUEST-942-APPLICATION-ATTACK-SQLI.conf" + id String @id @default(cuid()) + domainId String? + domain Domain? @relation(fields: [domainId], references: [id], onDelete: Cascade) + + ruleFile String // e.g., "REQUEST-942-APPLICATION-ATTACK-SQLI.conf" name String category String - description String? @db.Text - enabled Boolean @default(true) - paranoia Int @default(1) // Paranoia level 1-4 - - createdAt DateTime @default(now()) - updatedAt DateTime @updatedAt + description String? @db.Text + enabled Boolean @default(true) + paranoia Int @default(1) // Paranoia level 1-4 + + createdAt DateTime @default(now()) + updatedAt DateTime @updatedAt @@unique([ruleFile, domainId]) @@index([domainId]) @@ -281,18 +281,18 @@ model ModSecCRSRule { // ModSecurity Custom Rules (kept from original, renamed table) // Stores full rule content for user-defined rules model ModSecRule { - id String @id @default(cuid()) - domainId String? - domain Domain? @relation(fields: [domainId], references: [id], onDelete: Cascade) - + id String @id @default(cuid()) + domainId String? + domain Domain? @relation(fields: [domainId], references: [id], onDelete: Cascade) + name String category String - ruleContent String @db.Text - enabled Boolean @default(true) - description String? @db.Text - - createdAt DateTime @default(now()) - updatedAt DateTime @updatedAt + ruleContent String @db.Text + enabled Boolean @default(true) + description String? @db.Text + + createdAt DateTime @default(now()) + updatedAt DateTime @updatedAt @@index([domainId]) @@index([category]) @@ -300,30 +300,30 @@ model ModSecRule { } model NginxConfig { - id String @id @default(cuid()) - configType String // main, site, upstream, etc. - name String - content String @db.Text - enabled Boolean @default(true) - - createdAt DateTime @default(now()) - updatedAt DateTime @updatedAt + id String @id @default(cuid()) + configType String // main, site, upstream, etc. + name String + content String @db.Text + enabled Boolean @default(true) + + createdAt DateTime @default(now()) + updatedAt DateTime @updatedAt @@index([configType]) @@map("nginx_configs") } model InstallationStatus { - id String @id @default(cuid()) - component String @unique // nginx, modsecurity, etc. - status String // pending, running, completed, failed - step String? - message String? @db.Text - progress Int @default(0) // 0-100 - - startedAt DateTime @default(now()) + id String @id @default(cuid()) + component String @unique // nginx, modsecurity, etc. + status String // pending, running, completed, failed + step String? + message String? @db.Text + progress Int @default(0) // 0-100 + + startedAt DateTime @default(now()) completedAt DateTime? - updatedAt DateTime @updatedAt + updatedAt DateTime @updatedAt @@map("installation_status") } @@ -340,46 +340,46 @@ enum AlertSeverity { } model NotificationChannel { - id String @id @default(cuid()) - name String - type NotificationChannelType - enabled Boolean @default(true) - config Json // { email?, chatId?, botToken? } - + id String @id @default(cuid()) + name String + type NotificationChannelType + enabled Boolean @default(true) + config Json // { email?, chatId?, botToken? } + alertRules AlertRuleChannel[] - - createdAt DateTime @default(now()) - updatedAt DateTime @updatedAt + + createdAt DateTime @default(now()) + updatedAt DateTime @updatedAt @@map("notification_channels") } model AlertRule { - id String @id @default(cuid()) - name String - condition String // cpu > threshold, upstream_status == down, etc. + id String @id @default(cuid()) + name String + condition String // cpu > threshold, upstream_status == down, etc. threshold Int severity AlertSeverity - enabled Boolean @default(true) - checkInterval Int @default(60) // Check interval in seconds (default: 60s) - - channels AlertRuleChannel[] - - createdAt DateTime @default(now()) - updatedAt DateTime @updatedAt + enabled Boolean @default(true) + checkInterval Int @default(60) // Check interval in seconds (default: 60s) + + channels AlertRuleChannel[] + + createdAt DateTime @default(now()) + updatedAt DateTime @updatedAt @@map("alert_rules") } model AlertRuleChannel { - id String @id @default(cuid()) + id String @id @default(cuid()) ruleId String channelId String - - rule AlertRule @relation(fields: [ruleId], references: [id], onDelete: Cascade) - channel NotificationChannel @relation(fields: [channelId], references: [id], onDelete: Cascade) - - createdAt DateTime @default(now()) + + rule AlertRule @relation(fields: [ruleId], references: [id], onDelete: Cascade) + channel NotificationChannel @relation(fields: [channelId], references: [id], onDelete: Cascade) + + createdAt DateTime @default(now()) @@unique([ruleId, channelId]) @@index([ruleId]) @@ -388,16 +388,16 @@ model AlertRuleChannel { } model AlertHistory { - id String @id @default(cuid()) - severity AlertSeverity - message String @db.Text - source String - acknowledged Boolean @default(false) + id String @id @default(cuid()) + severity AlertSeverity + message String @db.Text + source String + acknowledged Boolean @default(false) acknowledgedBy String? acknowledgedAt DateTime? - - timestamp DateTime @default(now()) - createdAt DateTime @default(now()) + + timestamp DateTime @default(now()) + createdAt DateTime @default(now()) @@index([severity]) @@index([acknowledged]) @@ -440,27 +440,27 @@ model AclRule { conditionValue String action AclAction enabled Boolean @default(true) - - createdAt DateTime @default(now()) - updatedAt DateTime @updatedAt + + createdAt DateTime @default(now()) + updatedAt DateTime @updatedAt @@map("acl_rules") } model PerformanceMetric { - id String @id @default(cuid()) - domain String - timestamp DateTime @default(now()) - responseTime Float - throughput Float - errorRate Float - requestCount Int - - createdAt DateTime @default(now()) + id String @id @default(cuid()) + domain String + timestamp DateTime @default(now()) + responseTime Float + throughput Float + errorRate Float + requestCount Int + + createdAt DateTime @default(now()) - @@map("performance_metrics") @@index([domain, timestamp]) @@index([timestamp]) + @@map("performance_metrics") } enum BackupStatus { @@ -471,37 +471,37 @@ enum BackupStatus { } model BackupSchedule { - id String @id @default(cuid()) - name String - schedule String // Cron expression - enabled Boolean @default(true) - lastRun DateTime? - nextRun DateTime? - status BackupStatus @default(pending) - - backups BackupFile[] - - createdAt DateTime @default(now()) - updatedAt DateTime @updatedAt + id String @id @default(cuid()) + name String + schedule String // Cron expression + enabled Boolean @default(true) + lastRun DateTime? + nextRun DateTime? + status BackupStatus @default(pending) + + backups BackupFile[] + + createdAt DateTime @default(now()) + updatedAt DateTime @updatedAt @@map("backup_schedules") } model BackupFile { - id String @id @default(cuid()) - scheduleId String? - schedule BackupSchedule? @relation(fields: [scheduleId], references: [id], onDelete: SetNull) - - filename String - filepath String - size BigInt // Size in bytes - status BackupStatus @default(success) - type String @default("full") // full, incremental, manual - - metadata Json? // Additional metadata (domains count, rules count, etc.) - - createdAt DateTime @default(now()) - + id String @id @default(cuid()) + scheduleId String? + schedule BackupSchedule? @relation(fields: [scheduleId], references: [id], onDelete: SetNull) + + filename String + filepath String + size BigInt // Size in bytes + status BackupStatus @default(success) + type String @default("full") // full, incremental, manual + + metadata Json? // Additional metadata (domains count, rules count, etc.) + + createdAt DateTime @default(now()) + @@index([scheduleId]) @@index([createdAt]) @@map("backup_files") @@ -533,95 +533,95 @@ enum NodeMode { } model SlaveNode { - id String @id @default(cuid()) - name String @unique - host String - port Int @default(3001) - apiKey String @unique // Authentication token for slave - - status SlaveNodeStatus @default(offline) - lastSeen DateTime? - version String? - + id String @id @default(cuid()) + name String @unique + host String + port Int @default(3001) + apiKey String @unique // Authentication token for slave + + status SlaveNodeStatus @default(offline) + lastSeen DateTime? + version String? + // Sync configuration - syncEnabled Boolean @default(true) - syncInterval Int @default(60) // seconds - configHash String? // SHA256 hash of current config - lastSyncAt DateTime? - + syncEnabled Boolean @default(true) + syncInterval Int @default(60) // seconds + configHash String? // SHA256 hash of current config + lastSyncAt DateTime? + // Metrics - latency Int? // milliseconds - cpuUsage Float? - memoryUsage Float? - diskUsage Float? - - createdAt DateTime @default(now()) - updatedAt DateTime @updatedAt - - syncLogs SyncLog[] - + latency Int? // milliseconds + cpuUsage Float? + memoryUsage Float? + diskUsage Float? + + createdAt DateTime @default(now()) + updatedAt DateTime @updatedAt + + syncLogs SyncLog[] + @@index([status]) @@index([lastSeen]) @@map("slave_nodes") } model SystemConfig { - id String @id @default(cuid()) - nodeMode NodeMode @default(master) // master or slave - + id String @id @default(cuid()) + nodeMode NodeMode @default(master) // master or slave + // Master mode settings - masterApiEnabled Boolean @default(true) - + masterApiEnabled Boolean @default(true) + // Slave mode settings - slaveApiEnabled Boolean @default(false) - masterHost String? // IP of master node - masterPort Int? // Port of master node - masterApiKey String? // API key to connect to master - syncInterval Int @default(60) // Sync interval in seconds (for slave mode) - lastSyncHash String? // Hash of last synced config (for change detection) - + slaveApiEnabled Boolean @default(false) + masterHost String? // IP of master node + masterPort Int? // Port of master node + masterApiKey String? // API key to connect to master + syncInterval Int @default(60) // Sync interval in seconds (for slave mode) + lastSyncHash String? // Hash of last synced config (for change detection) + // Connection status (for slave mode) - connected Boolean @default(false) - lastConnectedAt DateTime? - connectionError String? - - createdAt DateTime @default(now()) - updatedAt DateTime @updatedAt - + connected Boolean @default(false) + lastConnectedAt DateTime? + connectionError String? + + createdAt DateTime @default(now()) + updatedAt DateTime @updatedAt + @@map("system_configs") } model SyncLog { - id String @id @default(cuid()) - nodeId String - node SlaveNode @relation(fields: [nodeId], references: [id], onDelete: Cascade) - - type SyncLogType - status SyncLogStatus @default(running) - - configHash String? - changesCount Int? - errorMessage String? @db.Text - - startedAt DateTime @default(now()) - completedAt DateTime? - duration Int? // milliseconds - + id String @id @default(cuid()) + nodeId String + node SlaveNode @relation(fields: [nodeId], references: [id], onDelete: Cascade) + + type SyncLogType + status SyncLogStatus @default(running) + + configHash String? + changesCount Int? + errorMessage String? @db.Text + + startedAt DateTime @default(now()) + completedAt DateTime? + duration Int? // milliseconds + @@index([nodeId, startedAt]) @@map("sync_logs") } model ConfigVersion { - id String @id @default(cuid()) - version Int @default(autoincrement()) - configHash String @unique - configData Json // Serialized config - + id String @id @default(cuid()) + version Int @default(autoincrement()) + configHash String @unique + configData Json // Serialized config + createdBy String? description String? - - createdAt DateTime @default(now()) - + + createdAt DateTime @default(now()) + @@index([createdAt]) @@map("config_versions") } diff --git a/apps/api/src/controllers/node-sync.controller.ts b/apps/api/src/controllers/node-sync.controller.ts index 0484210..8999d1e 100644 --- a/apps/api/src/controllers/node-sync.controller.ts +++ b/apps/api/src/controllers/node-sync.controller.ts @@ -125,37 +125,59 @@ export const importFromMaster = async (req: AuthRequest, res: Response): Promise }; /** - * Collect sync data (NO timestamps, NO IDs - only actual config) + * Get current config hash of slave node + */ +export const getCurrentConfigHash = async (req: AuthRequest, res: Response) => { + try { + const currentConfig = await collectSyncData(); + const configString = JSON.stringify(currentConfig); + const hash = crypto.createHash('sha256').update(configString).digest('hex'); + + logger.info('[NODE-SYNC] Current config hash calculated', { hash }); + + res.json({ + success: true, + data: { hash } + }); + } catch (error: any) { + logger.error('[NODE-SYNC] Get current hash error:', error); + res.status(500).json({ + success: false, + message: error.message || 'Failed to calculate current config hash' + }); + } +}; + +/** + * Collect sync data (NO timestamps for stable hash) */ async function collectSyncData() { - const [domains, ssl, modsecCRS, modsecCustom, acl, users] = await Promise.all([ - prisma.domain.findMany({ - include: { - upstreams: true, - loadBalancer: true - } - }), - prisma.sSLCertificate.findMany({ - include: { - domain: true - } - }), - prisma.modSecCRSRule.findMany(), - prisma.modSecRule.findMany(), - prisma.aclRule.findMany(), - prisma.user.findMany() - ]); + const domains = await prisma.domain.findMany({ + include: { + upstreams: true, + loadBalancer: true + } + }); + + const ssl = await prisma.sSLCertificate.findMany({ + include: { + domain: true + } + }); + + const modsecCRS = await prisma.modSecCRSRule.findMany(); + const modsecCustom = await prisma.modSecRule.findMany(); + const acl = await prisma.aclRule.findMany(); + const users = await prisma.user.findMany(); return { - version: '1.0-sync', - // Domains (NO timestamps, NO IDs) domains: domains.map(d => ({ name: d.name, status: d.status, sslEnabled: d.sslEnabled, modsecEnabled: d.modsecEnabled, - upstreams: d.upstreams?.map(u => ({ + upstreams: d.upstreams.map(u => ({ host: u.host, port: u.port, protocol: u.protocol, @@ -163,7 +185,7 @@ async function collectSyncData() { weight: u.weight, maxFails: u.maxFails, failTimeout: u.failTimeout - })) || [], + })), loadBalancer: d.loadBalancer ? { algorithm: d.loadBalancer.algorithm, healthCheckEnabled: d.loadBalancer.healthCheckEnabled, @@ -172,7 +194,7 @@ async function collectSyncData() { healthCheckTimeout: d.loadBalancer.healthCheckTimeout } : null })), - + // SSL Certificates (NO timestamps, NO IDs) ssl: ssl.map(s => ({ domainName: s.domain?.name, @@ -182,12 +204,12 @@ async function collectSyncData() { certificate: s.certificate, privateKey: s.privateKey, chain: s.chain, - validFrom: s.validFrom, - validTo: s.validTo, - autoRenew: s.autoRenew + autoRenew: s.autoRenew, + validFrom: s.validFrom.toISOString(), + validTo: s.validTo.toISOString() })), - - // ModSecurity CRS Rules + + // ModSecurity CRS Rules (NO timestamps, NO IDs) modsecCRS: modsecCRS.map(r => ({ ruleFile: r.ruleFile, name: r.name, @@ -196,27 +218,27 @@ async function collectSyncData() { enabled: r.enabled, paranoia: r.paranoia })), - - // ModSecurity Custom Rules + + // ModSecurity Custom Rules (NO timestamps, NO IDs) modsecCustom: modsecCustom.map(r => ({ name: r.name, category: r.category, ruleContent: r.ruleContent, - enabled: r.enabled, - description: r.description - })), - - // ACL Rules - acl: acl.map(r => ({ - name: r.name, - type: r.type, - conditionField: r.conditionField, - conditionOperator: r.conditionOperator, - conditionValue: r.conditionValue, - action: r.action, + description: r.description, enabled: r.enabled })), - + + // ACL (NO timestamps, NO IDs) + acl: acl.map(a => ({ + name: a.name, + type: a.type, + conditionField: a.conditionField, + conditionOperator: a.conditionOperator, + conditionValue: a.conditionValue, + action: a.action, + enabled: a.enabled + })), + // Users (NO timestamps, NO IDs, keep password hashes) users: users.map(u => ({ email: u.email, diff --git a/apps/api/src/controllers/system-config.controller.ts b/apps/api/src/controllers/system-config.controller.ts index 50d09dd..b11ef85 100644 --- a/apps/api/src/controllers/system-config.controller.ts +++ b/apps/api/src/controllers/system-config.controller.ts @@ -388,41 +388,57 @@ export const syncWithMaster = async (req: AuthRequest, res: Response) => { throw new Error(response.data.message || 'Failed to export config from master'); } - const { hash, config: masterConfig } = response.data.data; + const { hash: masterHash, config: masterConfig } = response.data.data; - logger.info('Downloaded config from master', { - hash, - lastKnownHash: config.lastSyncHash || 'none' + // Calculate CURRENT hash of slave's config (to detect data loss) + const slaveCurrentConfigResponse = await axios.get( + `http://localhost:${process.env.PORT || 3001}/api/node-sync/current-hash`, + { + headers: { + 'Authorization': req.headers.authorization || '' + } + } + ); + + const slaveCurrentHash = slaveCurrentConfigResponse.data.data?.hash || null; + + logger.info('Comparing slave current config with master', { + masterHash, + slaveCurrentHash, + lastSyncHash: config.lastSyncHash || 'none' }); - // Check if config changed (compare hashes) - if (config.lastSyncHash && config.lastSyncHash === hash) { - logger.info('Config unchanged (hash match), skipping import'); + // Compare CURRENT slave hash with master hash + if (slaveCurrentHash && slaveCurrentHash === masterHash) { + logger.info('Config identical (hash match), skipping import'); - // Update lastConnectedAt anyway + // Update lastConnectedAt and lastSyncHash await prisma.systemConfig.update({ where: { id: config.id }, data: { - lastConnectedAt: new Date() + lastConnectedAt: new Date(), + lastSyncHash: masterHash } }); return res.json({ success: true, - message: 'Configuration already up to date (no changes detected)', + message: 'Configuration already synchronized (no changes detected)', data: { imported: false, - hash, + masterHash, + slaveHash: slaveCurrentHash, changesApplied: 0, lastSyncAt: new Date().toISOString() } }); } - // Hash changed → Import config - logger.info('Config changed (hash mismatch), importing...', { - oldHash: config.lastSyncHash || 'none', - newHash: hash + // Hash different → Force sync (data loss or master updated) + logger.info('Config mismatch detected, force syncing...', { + masterHash, + slaveCurrentHash: slaveCurrentHash || 'null', + reason: !slaveCurrentHash ? 'slave_empty' : 'data_mismatch' }); // Extract JWT token from request @@ -433,7 +449,7 @@ export const syncWithMaster = async (req: AuthRequest, res: Response) => { const importResponse = await axios.post( `http://localhost:${process.env.PORT || 3001}/api/node-sync/import`, { - hash, + hash: masterHash, config: masterConfig }, { @@ -453,7 +469,7 @@ export const syncWithMaster = async (req: AuthRequest, res: Response) => { await prisma.systemConfig.update({ where: { id: config.id }, data: { - lastSyncHash: hash, + lastSyncHash: masterHash, lastConnectedAt: new Date() } }); @@ -462,10 +478,11 @@ export const syncWithMaster = async (req: AuthRequest, res: Response) => { res.json({ success: true, - message: 'Sync completed successfully', + message: 'Configuration synchronized successfully', data: { imported: true, - hash, + masterHash, + slaveHash: slaveCurrentHash, changesApplied: importData.changes, details: importData.details, lastSyncAt: new Date().toISOString() diff --git a/apps/api/src/routes/node-sync.routes.ts b/apps/api/src/routes/node-sync.routes.ts index cfb7a14..4d8dc08 100644 --- a/apps/api/src/routes/node-sync.routes.ts +++ b/apps/api/src/routes/node-sync.routes.ts @@ -1,5 +1,5 @@ import express from 'express'; -import { exportForSync, importFromMaster } from '../controllers/node-sync.controller'; +import { exportForSync, importFromMaster, getCurrentConfigHash } from '../controllers/node-sync.controller'; import { authenticate } from '../middleware/auth'; import { validateMasterApiKey } from '../middleware/slaveAuth'; @@ -17,4 +17,10 @@ router.get('/export', validateMasterApiKey, exportForSync); */ router.post('/import', authenticate, importFromMaster); +/** + * Get current config hash (requires user auth) + * GET /api/node-sync/current-hash + */ +router.get('/current-hash', authenticate, getCurrentConfigHash); + export default router; From 48e87ef7b02233653105d794a054a4cf4f69dcad Mon Sep 17 00:00:00 2001 From: vncloudsco Date: Mon, 6 Oct 2025 13:16:29 +0000 Subject: [PATCH 20/32] feat: Enhance SSRF protection with URL validation and response checks --- .../controllers/system-config.controller.ts | 69 ++++-- apps/api/src/utils/url-validator.ts | 203 ++++++++++++++++++ 2 files changed, 252 insertions(+), 20 deletions(-) create mode 100644 apps/api/src/utils/url-validator.ts diff --git a/apps/api/src/controllers/system-config.controller.ts b/apps/api/src/controllers/system-config.controller.ts index b11ef85..317e41d 100644 --- a/apps/api/src/controllers/system-config.controller.ts +++ b/apps/api/src/controllers/system-config.controller.ts @@ -3,6 +3,7 @@ import { AuthRequest } from '../middleware/auth'; import prisma from '../config/database'; import logger from '../utils/logger'; import axios from 'axios'; +import { validateMasterUrl, constructSafeUrl, validateHealthResponse, validateConfigExportResponse } from '../utils/url-validator'; /** * Get system configuration (node mode, }); @@ -137,19 +138,32 @@ export const connectToMaster = async (req: AuthRequest, res: Response) => { }); } + // Validate master URL format (prevent injection, allow LAN IPs) + const urlValidation = validateMasterUrl(masterHost, parseInt(masterPort.toString())); + if (!urlValidation.isValid) { + return res.status(400).json({ + success: false, + message: `Invalid master configuration: ${urlValidation.error}` + }); + } + // Test connection to master try { logger.info('Testing connection to master...', { masterHost, masterPort }); - const response = await axios.get( - `http://${masterHost}:${masterPort}/api/slave/health`, - { - headers: { - 'X-API-Key': masterApiKey - }, - timeout: 10000 - } - ); + const healthUrl = constructSafeUrl(masterHost, parseInt(masterPort.toString()), '/api/slave/health'); + const response = await axios.get(healthUrl!, { + headers: { + 'X-API-Key': masterApiKey + }, + timeout: 10000 + }); + + // SSRF Protection: Validate response data + const responseValidation = validateHealthResponse(response.data); + if (!responseValidation.isValid) { + throw new Error(`Invalid response from master: ${responseValidation.error}`); + } if (!response.data.success) { throw new Error('Master health check failed'); @@ -287,17 +301,22 @@ export const testMasterConnection = async (req: AuthRequest, res: Response) => { // Test connection const startTime = Date.now(); - const response = await axios.get( - `http://${config.masterHost}:${config.masterPort}/api/slave/health`, - { - headers: { - 'X-API-Key': config.masterApiKey - }, - timeout: 10000 - } - ); + + const healthUrl = constructSafeUrl(config.masterHost!, config.masterPort!, '/api/slave/health'); + const response = await axios.get(healthUrl!, { + headers: { + 'X-API-Key': config.masterApiKey + }, + timeout: 10000 + }); const latency = Date.now() - startTime; + // SSRF Protection: Validate response data + const responseValidation = validateHealthResponse(response.data); + if (!responseValidation.isValid) { + throw new Error(`Invalid response from master: ${responseValidation.error}`); + } + // Update config await prisma.systemConfig.update({ where: { id: config.id }, @@ -375,9 +394,13 @@ export const syncWithMaster = async (req: AuthRequest, res: Response) => { }); // Download config from master using new node-sync API - const masterUrl = `http://${config.masterHost}:${config.masterPort || 3001}/api/node-sync/export`; + const masterUrl = constructSafeUrl( + config.masterHost!, + config.masterPort || 3001, + '/api/node-sync/export' + ); - const response = await axios.get(masterUrl, { + const response = await axios.get(masterUrl!, { headers: { 'X-Slave-API-Key': config.masterApiKey }, @@ -388,6 +411,12 @@ export const syncWithMaster = async (req: AuthRequest, res: Response) => { throw new Error(response.data.message || 'Failed to export config from master'); } + // SSRF Protection: Validate response data structure + const exportValidation = validateConfigExportResponse(response.data.data); + if (!exportValidation.isValid) { + throw new Error(`Invalid config export response: ${exportValidation.error}`); + } + const { hash: masterHash, config: masterConfig } = response.data.data; // Calculate CURRENT hash of slave's config (to detect data loss) diff --git a/apps/api/src/utils/url-validator.ts b/apps/api/src/utils/url-validator.ts new file mode 100644 index 0000000..85b8675 --- /dev/null +++ b/apps/api/src/utils/url-validator.ts @@ -0,0 +1,203 @@ +import logger from './logger'; + +/** + * SSRF Protection Strategy: + * - Allow ALL IPs (including private/LAN for internal deployments) + * - Validate RESPONSE data instead of blocking IPs + * - Use strict timeout and response schema validation + * + * Why not block private IPs? + * - LAN deployments use 192.168.x.x, 10.x.x.x + * - Cannot predict IP when deploying new servers + * - LAN is faster and safer than WAN + */ + +/** + * Validate host format (allow all valid IPs including private) + * Only block obvious injection attempts + */ +export function validateHost(host: string): boolean { + if (!host || typeof host !== 'string') { + return false; + } + + // Remove whitespace + host = host.trim(); + + // Block only URL injection attempts (not private IPs) + const injectionPatterns = [ + /@/, // username@host injection + /\s/, // whitespace injection + /javascript:/i, // javascript: protocol + /data:/i, // data: protocol + /file:/i, // file: protocol + /[<>'"]/, // HTML/SQL injection + ]; + + for (const pattern of injectionPatterns) { + if (pattern.test(host)) { + logger.warn('[URL-VALIDATOR] Blocked injection attempt', { host }); + return false; + } + } + + // Validate format: IPv4, IPv6, or domain name + const ipv4Pattern = /^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$/; + const ipv6Pattern = /^([0-9a-f]{0,4}:){2,7}[0-9a-f]{0,4}$/i; + const domainPattern = /^(?:[a-z0-9](?:[a-z0-9-]{0,61}[a-z0-9])?\.)*[a-z0-9](?:[a-z0-9-]{0,61}[a-z0-9])?$/i; + + if (!ipv4Pattern.test(host) && !ipv6Pattern.test(host) && !domainPattern.test(host)) { + logger.warn('[URL-VALIDATOR] Invalid host format', { host }); + return false; + } + + return true; +} + +/** + * Validate port number + */ +export function validatePort(port: number): boolean { + return Number.isInteger(port) && port > 0 && port <= 65535; +} + +/** + * Construct safe URL with validation + */ +export function constructSafeUrl( + host: string, + port: number, + path: string = '' +): string | null { + // Validate host format (not blocking private IPs) + if (!validateHost(host)) { + logger.error('[URL-VALIDATOR] Invalid host format', { host }); + throw new Error('Invalid host format'); + } + + // Validate port + if (!validatePort(port)) { + logger.error('[URL-VALIDATOR] Invalid port', { port }); + throw new Error('Invalid port number'); + } + + // Sanitize path (prevent path traversal) + if (path) { + path = path.replace(/\.\./g, '').replace(/\/\//g, '/'); + if (!path.startsWith('/')) { + path = '/' + path; + } + } + + const url = `http://${host}:${port}${path}`; + logger.debug('[URL-VALIDATOR] URL constructed', { url }); + + return url; +} + +/** + * Validate master URL and return validation result + */ +export function validateMasterUrl( + host: string, + port: number +): { isValid: boolean; error?: string } { + try { + if (!validateHost(host)) { + return { + isValid: false, + error: 'Invalid host format or injection attempt detected' + }; + } + + if (!validatePort(port)) { + return { + isValid: false, + error: 'Port must be between 1 and 65535' + }; + } + + return { isValid: true }; + } catch (error) { + return { + isValid: false, + error: error instanceof Error ? error.message : 'Validation failed' + }; + } +} + +/** + * Validate config export response from master + * This is the MAIN SSRF protection - validate response data + */ +export function validateConfigExportResponse(data: any): { isValid: boolean; error?: string } { + try { + // Must be object + if (!data || typeof data !== 'object') { + return { isValid: false, error: 'Response must be an object' }; + } + + // Must have required fields + if (!data.hasOwnProperty('hash') || !data.hasOwnProperty('timestamp') || !data.hasOwnProperty('config')) { + return { isValid: false, error: 'Missing required fields: hash, timestamp, config' }; + } + + // Hash must be string (SHA256 = 64 chars) + if (typeof data.hash !== 'string' || data.hash.length !== 64) { + return { isValid: false, error: 'Invalid hash format' }; + } + + // Timestamp must be valid date string + if (typeof data.timestamp !== 'string' || isNaN(Date.parse(data.timestamp))) { + return { isValid: false, error: 'Invalid timestamp format' }; + } + + // Config must be object with expected structure + const config = data.config; + if (!config || typeof config !== 'object') { + return { isValid: false, error: 'Config must be an object' }; + } + + // Config must have array fields + const requiredArrays = ['domains', 'modsecCRSRules', 'modsecCustomRules', 'aclRules', 'sslCertificates']; + for (const field of requiredArrays) { + if (!Array.isArray(config[field])) { + return { isValid: false, error: `Config.${field} must be an array` }; + } + } + + return { isValid: true }; + } catch (error) { + return { + isValid: false, + error: error instanceof Error ? error.message : 'Response validation failed' + }; + } +} + +/** + * Validate health check response + */ +export function validateHealthResponse(data: any): { isValid: boolean; error?: string } { + try { + if (!data || typeof data !== 'object') { + return { isValid: false, error: 'Response must be an object' }; + } + + // Health check should return status and message + if (!data.hasOwnProperty('status')) { + return { isValid: false, error: 'Missing status field' }; + } + + if (typeof data.status !== 'string') { + return { isValid: false, error: 'Status must be a string' }; + } + + return { isValid: true }; + } catch (error) { + return { + isValid: false, + error: error instanceof Error ? error.message : 'Health response validation failed' + }; + } +} From 335bf710c5aa723b366c67aa7e4a4e6d2431487a Mon Sep 17 00:00:00 2001 From: vncloudsco Date: Mon, 6 Oct 2025 14:14:57 +0000 Subject: [PATCH 21/32] refactor: Rename variables for SSL certificates, ModSecurity rules, and ACLs for clarity; remove unused URL validation utility --- .../src/controllers/node-sync.controller.ts | 24 +-- .../controllers/system-config.controller.ts | 72 +++---- apps/api/src/utils/url-validator.ts | 203 ------------------ 3 files changed, 37 insertions(+), 262 deletions(-) delete mode 100644 apps/api/src/utils/url-validator.ts diff --git a/apps/api/src/controllers/node-sync.controller.ts b/apps/api/src/controllers/node-sync.controller.ts index 8999d1e..e837509 100644 --- a/apps/api/src/controllers/node-sync.controller.ts +++ b/apps/api/src/controllers/node-sync.controller.ts @@ -196,7 +196,7 @@ async function collectSyncData() { })), // SSL Certificates (NO timestamps, NO IDs) - ssl: ssl.map(s => ({ + sslCertificates: ssl.map(s => ({ domainName: s.domain?.name, commonName: s.commonName, sans: s.sans, @@ -210,7 +210,7 @@ async function collectSyncData() { })), // ModSecurity CRS Rules (NO timestamps, NO IDs) - modsecCRS: modsecCRS.map(r => ({ + modsecCRSRules: modsecCRS.map(r => ({ ruleFile: r.ruleFile, name: r.name, category: r.category, @@ -220,7 +220,7 @@ async function collectSyncData() { })), // ModSecurity Custom Rules (NO timestamps, NO IDs) - modsecCustom: modsecCustom.map(r => ({ + modsecCustomRules: modsecCustom.map(r => ({ name: r.name, category: r.category, ruleContent: r.ruleContent, @@ -229,7 +229,7 @@ async function collectSyncData() { })), // ACL (NO timestamps, NO IDs) - acl: acl.map(a => ({ + aclRules: acl.map(a => ({ name: a.name, type: a.type, conditionField: a.conditionField, @@ -337,8 +337,8 @@ async function importSyncConfig(config: any) { } // 2. Import SSL Certificates - if (config.ssl && Array.isArray(config.ssl)) { - for (const sslData of config.ssl) { + if (config.sslCertificates && Array.isArray(config.sslCertificates)) { + for (const sslData of config.sslCertificates) { try { const domain = await prisma.domain.findUnique({ where: { name: sslData.domainName } @@ -380,10 +380,10 @@ async function importSyncConfig(config: any) { } // 3. Import ModSecurity CRS Rules - if (config.modsecCRS && Array.isArray(config.modsecCRS)) { + if (config.modsecCRSRules && Array.isArray(config.modsecCRSRules)) { await prisma.modSecCRSRule.deleteMany({}); - for (const rule of config.modsecCRS) { + for (const rule of config.modsecCRSRules) { await prisma.modSecCRSRule.create({ data: { ruleFile: rule.ruleFile, @@ -399,10 +399,10 @@ async function importSyncConfig(config: any) { } // 4. Import ModSecurity Custom Rules - if (config.modsecCustom && Array.isArray(config.modsecCustom)) { + if (config.modsecCustomRules && Array.isArray(config.modsecCustomRules)) { await prisma.modSecRule.deleteMany({}); - for (const rule of config.modsecCustom) { + for (const rule of config.modsecCustomRules) { await prisma.modSecRule.create({ data: { name: rule.name, @@ -417,10 +417,10 @@ async function importSyncConfig(config: any) { } // 5. Import ACL Rules - if (config.acl && Array.isArray(config.acl)) { + if (config.aclRules && Array.isArray(config.aclRules)) { await prisma.aclRule.deleteMany({}); - for (const rule of config.acl) { + for (const rule of config.aclRules) { await prisma.aclRule.create({ data: { name: rule.name, diff --git a/apps/api/src/controllers/system-config.controller.ts b/apps/api/src/controllers/system-config.controller.ts index 317e41d..f546db2 100644 --- a/apps/api/src/controllers/system-config.controller.ts +++ b/apps/api/src/controllers/system-config.controller.ts @@ -3,7 +3,6 @@ import { AuthRequest } from '../middleware/auth'; import prisma from '../config/database'; import logger from '../utils/logger'; import axios from 'axios'; -import { validateMasterUrl, constructSafeUrl, validateHealthResponse, validateConfigExportResponse } from '../utils/url-validator'; /** * Get system configuration (node mode, }); @@ -138,32 +137,19 @@ export const connectToMaster = async (req: AuthRequest, res: Response) => { }); } - // Validate master URL format (prevent injection, allow LAN IPs) - const urlValidation = validateMasterUrl(masterHost, parseInt(masterPort.toString())); - if (!urlValidation.isValid) { - return res.status(400).json({ - success: false, - message: `Invalid master configuration: ${urlValidation.error}` - }); - } - // Test connection to master try { logger.info('Testing connection to master...', { masterHost, masterPort }); - const healthUrl = constructSafeUrl(masterHost, parseInt(masterPort.toString()), '/api/slave/health'); - const response = await axios.get(healthUrl!, { - headers: { - 'X-API-Key': masterApiKey - }, - timeout: 10000 - }); - - // SSRF Protection: Validate response data - const responseValidation = validateHealthResponse(response.data); - if (!responseValidation.isValid) { - throw new Error(`Invalid response from master: ${responseValidation.error}`); - } + const response = await axios.get( + `http://${masterHost}:${masterPort}/api/slave/health`, + { + headers: { + 'X-API-Key': masterApiKey + }, + timeout: 10000 + } + ); if (!response.data.success) { throw new Error('Master health check failed'); @@ -301,22 +287,17 @@ export const testMasterConnection = async (req: AuthRequest, res: Response) => { // Test connection const startTime = Date.now(); - - const healthUrl = constructSafeUrl(config.masterHost!, config.masterPort!, '/api/slave/health'); - const response = await axios.get(healthUrl!, { - headers: { - 'X-API-Key': config.masterApiKey - }, - timeout: 10000 - }); + const response = await axios.get( + `http://${config.masterHost}:${config.masterPort}/api/slave/health`, + { + headers: { + 'X-API-Key': config.masterApiKey + }, + timeout: 10000 + } + ); const latency = Date.now() - startTime; - // SSRF Protection: Validate response data - const responseValidation = validateHealthResponse(response.data); - if (!responseValidation.isValid) { - throw new Error(`Invalid response from master: ${responseValidation.error}`); - } - // Update config await prisma.systemConfig.update({ where: { id: config.id }, @@ -365,6 +346,8 @@ export const testMasterConnection = async (req: AuthRequest, res: Response) => { */ export const syncWithMaster = async (req: AuthRequest, res: Response) => { try { + logger.info('========== SYNC WITH MASTER CALLED =========='); + const config = await prisma.systemConfig.findFirst(); if (!config) { @@ -394,13 +377,9 @@ export const syncWithMaster = async (req: AuthRequest, res: Response) => { }); // Download config from master using new node-sync API - const masterUrl = constructSafeUrl( - config.masterHost!, - config.masterPort || 3001, - '/api/node-sync/export' - ); + const masterUrl = `http://${config.masterHost}:${config.masterPort || 3001}/api/node-sync/export`; - const response = await axios.get(masterUrl!, { + const response = await axios.get(masterUrl, { headers: { 'X-Slave-API-Key': config.masterApiKey }, @@ -411,10 +390,9 @@ export const syncWithMaster = async (req: AuthRequest, res: Response) => { throw new Error(response.data.message || 'Failed to export config from master'); } - // SSRF Protection: Validate response data structure - const exportValidation = validateConfigExportResponse(response.data.data); - if (!exportValidation.isValid) { - throw new Error(`Invalid config export response: ${exportValidation.error}`); + // Basic validation: check if response has required structure + if (!response.data.data || !response.data.data.hash || !response.data.data.config) { + throw new Error('Invalid response structure from master'); } const { hash: masterHash, config: masterConfig } = response.data.data; diff --git a/apps/api/src/utils/url-validator.ts b/apps/api/src/utils/url-validator.ts deleted file mode 100644 index 85b8675..0000000 --- a/apps/api/src/utils/url-validator.ts +++ /dev/null @@ -1,203 +0,0 @@ -import logger from './logger'; - -/** - * SSRF Protection Strategy: - * - Allow ALL IPs (including private/LAN for internal deployments) - * - Validate RESPONSE data instead of blocking IPs - * - Use strict timeout and response schema validation - * - * Why not block private IPs? - * - LAN deployments use 192.168.x.x, 10.x.x.x - * - Cannot predict IP when deploying new servers - * - LAN is faster and safer than WAN - */ - -/** - * Validate host format (allow all valid IPs including private) - * Only block obvious injection attempts - */ -export function validateHost(host: string): boolean { - if (!host || typeof host !== 'string') { - return false; - } - - // Remove whitespace - host = host.trim(); - - // Block only URL injection attempts (not private IPs) - const injectionPatterns = [ - /@/, // username@host injection - /\s/, // whitespace injection - /javascript:/i, // javascript: protocol - /data:/i, // data: protocol - /file:/i, // file: protocol - /[<>'"]/, // HTML/SQL injection - ]; - - for (const pattern of injectionPatterns) { - if (pattern.test(host)) { - logger.warn('[URL-VALIDATOR] Blocked injection attempt', { host }); - return false; - } - } - - // Validate format: IPv4, IPv6, or domain name - const ipv4Pattern = /^(?:(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)\.){3}(?:25[0-5]|2[0-4][0-9]|[01]?[0-9][0-9]?)$/; - const ipv6Pattern = /^([0-9a-f]{0,4}:){2,7}[0-9a-f]{0,4}$/i; - const domainPattern = /^(?:[a-z0-9](?:[a-z0-9-]{0,61}[a-z0-9])?\.)*[a-z0-9](?:[a-z0-9-]{0,61}[a-z0-9])?$/i; - - if (!ipv4Pattern.test(host) && !ipv6Pattern.test(host) && !domainPattern.test(host)) { - logger.warn('[URL-VALIDATOR] Invalid host format', { host }); - return false; - } - - return true; -} - -/** - * Validate port number - */ -export function validatePort(port: number): boolean { - return Number.isInteger(port) && port > 0 && port <= 65535; -} - -/** - * Construct safe URL with validation - */ -export function constructSafeUrl( - host: string, - port: number, - path: string = '' -): string | null { - // Validate host format (not blocking private IPs) - if (!validateHost(host)) { - logger.error('[URL-VALIDATOR] Invalid host format', { host }); - throw new Error('Invalid host format'); - } - - // Validate port - if (!validatePort(port)) { - logger.error('[URL-VALIDATOR] Invalid port', { port }); - throw new Error('Invalid port number'); - } - - // Sanitize path (prevent path traversal) - if (path) { - path = path.replace(/\.\./g, '').replace(/\/\//g, '/'); - if (!path.startsWith('/')) { - path = '/' + path; - } - } - - const url = `http://${host}:${port}${path}`; - logger.debug('[URL-VALIDATOR] URL constructed', { url }); - - return url; -} - -/** - * Validate master URL and return validation result - */ -export function validateMasterUrl( - host: string, - port: number -): { isValid: boolean; error?: string } { - try { - if (!validateHost(host)) { - return { - isValid: false, - error: 'Invalid host format or injection attempt detected' - }; - } - - if (!validatePort(port)) { - return { - isValid: false, - error: 'Port must be between 1 and 65535' - }; - } - - return { isValid: true }; - } catch (error) { - return { - isValid: false, - error: error instanceof Error ? error.message : 'Validation failed' - }; - } -} - -/** - * Validate config export response from master - * This is the MAIN SSRF protection - validate response data - */ -export function validateConfigExportResponse(data: any): { isValid: boolean; error?: string } { - try { - // Must be object - if (!data || typeof data !== 'object') { - return { isValid: false, error: 'Response must be an object' }; - } - - // Must have required fields - if (!data.hasOwnProperty('hash') || !data.hasOwnProperty('timestamp') || !data.hasOwnProperty('config')) { - return { isValid: false, error: 'Missing required fields: hash, timestamp, config' }; - } - - // Hash must be string (SHA256 = 64 chars) - if (typeof data.hash !== 'string' || data.hash.length !== 64) { - return { isValid: false, error: 'Invalid hash format' }; - } - - // Timestamp must be valid date string - if (typeof data.timestamp !== 'string' || isNaN(Date.parse(data.timestamp))) { - return { isValid: false, error: 'Invalid timestamp format' }; - } - - // Config must be object with expected structure - const config = data.config; - if (!config || typeof config !== 'object') { - return { isValid: false, error: 'Config must be an object' }; - } - - // Config must have array fields - const requiredArrays = ['domains', 'modsecCRSRules', 'modsecCustomRules', 'aclRules', 'sslCertificates']; - for (const field of requiredArrays) { - if (!Array.isArray(config[field])) { - return { isValid: false, error: `Config.${field} must be an array` }; - } - } - - return { isValid: true }; - } catch (error) { - return { - isValid: false, - error: error instanceof Error ? error.message : 'Response validation failed' - }; - } -} - -/** - * Validate health check response - */ -export function validateHealthResponse(data: any): { isValid: boolean; error?: string } { - try { - if (!data || typeof data !== 'object') { - return { isValid: false, error: 'Response must be an object' }; - } - - // Health check should return status and message - if (!data.hasOwnProperty('status')) { - return { isValid: false, error: 'Missing status field' }; - } - - if (typeof data.status !== 'string') { - return { isValid: false, error: 'Status must be a string' }; - } - - return { isValid: true }; - } catch (error) { - return { - isValid: false, - error: error instanceof Error ? error.message : 'Health response validation failed' - }; - } -} From 29b012c0433c02a09d9eb6d004f3ba5e712f19e8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=E1=BA=A1nh=20T=C6=B0=E1=BB=9Dng=20Solo?= Date: Mon, 6 Oct 2025 23:23:36 +0700 Subject: [PATCH 22/32] feat: Add comprehensive update script for Nginx Love UI, including service checks, dependency updates, and health checks --- scripts/update.sh | 280 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 280 insertions(+) create mode 100644 scripts/update.sh diff --git a/scripts/update.sh b/scripts/update.sh new file mode 100644 index 0000000..7980c8d --- /dev/null +++ b/scripts/update.sh @@ -0,0 +1,280 @@ +#!/bin/bash + +################################################################################ +# Nginx Love UI - Update Script +# Description: Update source code, rebuild and restart services +# Version: 1.0.0 +################################################################################ + +set -e + +# Colors +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +BLUE='\033[0;34m' +NC='\033[0m' + +# Configuration +SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +PROJECT_DIR="$(cd "$SCRIPT_DIR/.." && pwd)" +BACKEND_DIR="$PROJECT_DIR/apps/api" +FRONTEND_DIR="$PROJECT_DIR/apps/web" +LOG_FILE="/var/log/nginx-love-ui-update.log" + +# Database configuration +DB_CONTAINER_NAME="nginx-love-postgres" + +# Logging functions +log() { + echo -e "${GREEN}[$(date '+%Y-%m-%d %H:%M:%S')]${NC} $1" | tee -a "$LOG_FILE" +} + +error() { + echo -e "${RED}[ERROR]${NC} $1" | tee -a "$LOG_FILE" + exit 1 +} + +warn() { + echo -e "${YELLOW}[WARN]${NC} $1" | tee -a "$LOG_FILE" +} + +info() { + echo -e "${BLUE}[INFO]${NC} $1" | tee -a "$LOG_FILE" +} + +# Check if running as root +if [[ "${EUID}" -ne 0 ]]; then + error "This script must be run as root (use sudo)" +fi + +log "==================================" +log "Nginx Love UI Update Started" +log "==================================" + +# Check if services exist +if ! systemctl list-unit-files | grep -q nginx-love-backend.service; then + error "Backend service not found. Please run deploy.sh first." +fi + +if ! systemctl list-unit-files | grep -q nginx-love-frontend.service; then + error "Frontend service not found. Please run deploy.sh first." +fi + +# Check if database container exists +if ! docker ps -a | grep -q "${DB_CONTAINER_NAME}"; then + error "Database container '${DB_CONTAINER_NAME}' not found. Please run deploy.sh first." +fi + +# Step 1: Check prerequisites +log "Step 1/6: Checking prerequisites..." + +# Check Node.js +if ! command -v node &> /dev/null; then + error "Node.js not found. Please install Node.js 18+ first." +fi + +# Check pnpm +if ! command -v pnpm &> /dev/null; then + error "pnpm not found. Please install pnpm first." +fi + +# Check Docker +if ! command -v docker &> /dev/null; then + error "Docker not found. Please install Docker first." +fi + +log "✓ Prerequisites check passed" + +# Step 2: Stop services before update +log "Step 2/6: Stopping services for update..." + +# Stop backend service +if systemctl is-active --quiet nginx-love-backend.service; then + systemctl stop nginx-love-backend.service + log "✓ Backend service stopped" +else + warn "Backend service was not running" +fi + +# Stop frontend service +if systemctl is-active --quiet nginx-love-frontend.service; then + systemctl stop nginx-love-frontend.service + log "✓ Frontend service stopped" +else + warn "Frontend service was not running" +fi + +# Step 3: Update dependencies and build backend +log "Step 3/6: Building backend..." + +cd "${PROJECT_DIR}" + +# Update monorepo dependencies +log "Updating monorepo dependencies..." +pnpm install >> "${LOG_FILE}" 2>&1 || error "Failed to update monorepo dependencies" + +cd "${BACKEND_DIR}" + +# Generate Prisma client (in case schema changed) +log "Generating Prisma client..." +pnpm prisma:generate >> "$LOG_FILE" 2>&1 || error "Failed to generate Prisma client" + +# Run database migrations (only creates new tables, doesn't overwrite existing data) +log "Running database migrations..." +pnpm exec prisma migrate deploy >> "$LOG_FILE" 2>&1 || error "Failed to run migrations" + +# Seed database (only adds missing data, doesn't overwrite existing) +log "Seeding database for missing tables/data..." +pnpm prisma:seed >> "$LOG_FILE" 2>&1 || warn "Failed to seed database (this is normal if data already exists)" + +# Build backend +log "Building backend..." +cd "${PROJECT_DIR}" +pnpm --filter @nginx-love/api build >> "${LOG_FILE}" 2>&1 || error "Failed to build backend" + +log "✓ Backend build completed" + +# Step 4: Build frontend +log "Step 4/6: Building frontend..." + +cd "${FRONTEND_DIR}" + +# Clean previous build +if [ -d "dist" ]; then + log "Cleaning previous frontend build..." + rm -rf dist +fi + +# Build frontend +log "Building frontend..." +cd "${PROJECT_DIR}" +pnpm --filter @nginx-love/web build >> "${LOG_FILE}" 2>&1 || error "Failed to build frontend" + +# Get public IP for CSP update +PUBLIC_IP=$(curl -s ifconfig.me || curl -s icanhazip.com || curl -s ipinfo.io/ip || echo "localhost") + +# Update CSP in built index.html to use public IP +log "Updating Content Security Policy with public IP: ${PUBLIC_IP}..." +sed -i "s|__API_URL__|http://${PUBLIC_IP}:3001 http://localhost:3001|g" "${FRONTEND_DIR}/dist/index.html" +sed -i "s|__WS_URL__|ws://${PUBLIC_IP}:* ws://localhost:*|g" "${FRONTEND_DIR}/dist/index.html" + +log "✓ Frontend build completed" + +# Step 5: Restart services +log "Step 5/6: Starting services..." + +# Ensure database container is running +if ! docker ps | grep -q "${DB_CONTAINER_NAME}"; then + log "Starting database container..." + docker start "${DB_CONTAINER_NAME}" >> "${LOG_FILE}" 2>&1 || error "Failed to start database container" + + # Wait for database to be ready + log "Waiting for database to be ready..." + sleep 5 + for i in {1..30}; do + if docker exec "${DB_CONTAINER_NAME}" pg_isready > /dev/null 2>&1; then + log "✓ Database is ready" + break + fi + if [ "${i}" -eq 30 ]; then + error "Database failed to start" + fi + sleep 1 + done +else + log "✓ Database container is already running" +fi + +# Start backend service +systemctl start nginx-love-backend.service || error "Failed to start backend service" +sleep 3 +if ! systemctl is-active --quiet nginx-love-backend.service; then + error "Backend service failed to start. Check logs: journalctl -u nginx-love-backend.service" +fi +log "✓ Backend service started" + +# Start frontend service +systemctl start nginx-love-frontend.service || error "Failed to start frontend service" +sleep 3 +if ! systemctl is-active --quiet nginx-love-frontend.service; then + error "Frontend service failed to start. Check logs: journalctl -u nginx-love-frontend.service" +fi +log "✓ Frontend service started" + +# Ensure nginx is running +if ! systemctl is-active --quiet nginx; then + systemctl start nginx || error "Failed to start nginx" +fi +log "✓ Nginx is running" + +# Step 6: Health check and summary +log "Step 6/6: Performing health checks..." + +# Health check with retries +log "Performing health checks..." +sleep 5 + +# Backend health check +BACKEND_HEALTHY=false +for i in {1..10}; do + if curl -s http://localhost:3001/api/health | grep -q "success"; then + BACKEND_HEALTHY=true + break + fi + sleep 2 +done + +if [ "$BACKEND_HEALTHY" = true ]; then + log "✅ Backend health check: PASSED" +else + warn "⚠️ Backend health check: FAILED (check logs: tail -f /var/log/nginx-love-backend.log)" +fi + +# Frontend health check +FRONTEND_HEALTHY=false +for i in {1..5}; do + if curl -s http://localhost:8080 | grep -q " Date: Mon, 6 Oct 2025 23:43:54 +0700 Subject: [PATCH 23/32] feat: Improve update script by ensuring database container is running before generating Prisma client and running migrations; streamline logging --- scripts/update.sh | 42 +++++++++++++++++------------------------- 1 file changed, 17 insertions(+), 25 deletions(-) diff --git a/scripts/update.sh b/scripts/update.sh index 7980c8d..193b4b4 100644 --- a/scripts/update.sh +++ b/scripts/update.sh @@ -116,17 +116,24 @@ pnpm install >> "${LOG_FILE}" 2>&1 || error "Failed to update monorepo dependenc cd "${BACKEND_DIR}" -# Generate Prisma client (in case schema changed) +# Start database if not running +if ! docker ps | grep -q "${DB_CONTAINER_NAME}"; then + log "Starting database container..." + docker start "${DB_CONTAINER_NAME}" > /dev/null 2>&1 + sleep 3 +fi + +# Generate Prisma client log "Generating Prisma client..." -pnpm prisma:generate >> "$LOG_FILE" 2>&1 || error "Failed to generate Prisma client" +npx prisma generate > /dev/null 2>&1 || pnpm exec prisma generate > /dev/null 2>&1 -# Run database migrations (only creates new tables, doesn't overwrite existing data) +# Run database migrations log "Running database migrations..." -pnpm exec prisma migrate deploy >> "$LOG_FILE" 2>&1 || error "Failed to run migrations" +npx prisma migrate deploy > /dev/null 2>&1 || pnpm exec prisma migrate deploy > /dev/null 2>&1 -# Seed database (only adds missing data, doesn't overwrite existing) -log "Seeding database for missing tables/data..." -pnpm prisma:seed >> "$LOG_FILE" 2>&1 || warn "Failed to seed database (this is normal if data already exists)" +# Seed database +log "Seeding database..." +npx ts-node prisma/seed.ts > /dev/null 2>&1 || pnpm exec ts-node prisma/seed.ts > /dev/null 2>&1 || warn "Seeding skipped" # Build backend log "Building backend..." @@ -164,26 +171,11 @@ log "✓ Frontend build completed" # Step 5: Restart services log "Step 5/6: Starting services..." -# Ensure database container is running +# Database should already be running from Step 3, just verify if ! docker ps | grep -q "${DB_CONTAINER_NAME}"; then - log "Starting database container..." - docker start "${DB_CONTAINER_NAME}" >> "${LOG_FILE}" 2>&1 || error "Failed to start database container" - - # Wait for database to be ready - log "Waiting for database to be ready..." - sleep 5 - for i in {1..30}; do - if docker exec "${DB_CONTAINER_NAME}" pg_isready > /dev/null 2>&1; then - log "✓ Database is ready" - break - fi - if [ "${i}" -eq 30 ]; then - error "Database failed to start" - fi - sleep 1 - done + error "Database container stopped unexpectedly. Please check Docker status." else - log "✓ Database container is already running" + log "✓ Database container is running" fi # Start backend service From 1a07007d891df8a5939a2a24dbeafa095f63b585 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=E1=BA=A1nh=20T=C6=B0=E1=BB=9Dng=20Solo?= Date: Mon, 6 Oct 2025 23:48:36 +0700 Subject: [PATCH 24/32] feat: Improve update script by ensuring database container starts correctly and enhance logging for Prisma client generation, migrations, and seeding --- scripts/update.sh | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/scripts/update.sh b/scripts/update.sh index 193b4b4..ebdcf31 100644 --- a/scripts/update.sh +++ b/scripts/update.sh @@ -117,23 +117,23 @@ pnpm install >> "${LOG_FILE}" 2>&1 || error "Failed to update monorepo dependenc cd "${BACKEND_DIR}" # Start database if not running -if ! docker ps | grep -q "${DB_CONTAINER_NAME}"; then +if ! docker ps | grep -q "${DB_CONTAINER_NAME}" 2>/dev/null; then log "Starting database container..." - docker start "${DB_CONTAINER_NAME}" > /dev/null 2>&1 + docker start "${DB_CONTAINER_NAME}" 2>/dev/null || warn "Could not start database container" sleep 3 fi # Generate Prisma client log "Generating Prisma client..." -npx prisma generate > /dev/null 2>&1 || pnpm exec prisma generate > /dev/null 2>&1 +pnpm exec prisma generate >> "$LOG_FILE" 2>&1 || error "Failed to generate Prisma client" # Run database migrations log "Running database migrations..." -npx prisma migrate deploy > /dev/null 2>&1 || pnpm exec prisma migrate deploy > /dev/null 2>&1 +pnpm exec prisma migrate deploy >> "$LOG_FILE" 2>&1 || error "Failed to run migrations" # Seed database log "Seeding database..." -npx ts-node prisma/seed.ts > /dev/null 2>&1 || pnpm exec ts-node prisma/seed.ts > /dev/null 2>&1 || warn "Seeding skipped" +pnpm exec ts-node prisma/seed.ts >> "$LOG_FILE" 2>&1 || warn "Failed to seed database (this is normal if data already exists)" # Build backend log "Building backend..." From 6cf1faaaffea430467111b313fe73c47564633b1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=E1=BA=A1nh=20T=C6=B0=E1=BB=9Dng=20Solo?= Date: Mon, 6 Oct 2025 23:51:05 +0700 Subject: [PATCH 25/32] feat: Update Prisma client generation and migration steps in the update script; streamline backend and frontend build processes --- scripts/update.sh | 16 +++++++++------- 1 file changed, 9 insertions(+), 7 deletions(-) diff --git a/scripts/update.sh b/scripts/update.sh index ebdcf31..140b2eb 100644 --- a/scripts/update.sh +++ b/scripts/update.sh @@ -125,20 +125,22 @@ fi # Generate Prisma client log "Generating Prisma client..." -pnpm exec prisma generate >> "$LOG_FILE" 2>&1 || error "Failed to generate Prisma client" +pnpm prisma generate >> "$LOG_FILE" 2>&1 || error "Failed to generate Prisma client" # Run database migrations log "Running database migrations..." -pnpm exec prisma migrate deploy >> "$LOG_FILE" 2>&1 || error "Failed to run migrations" +cd "${BACKEND_DIR}" +pnpm prisma migrate deploy >> "$LOG_FILE" 2>&1 || error "Failed to run migrations" # Seed database log "Seeding database..." -pnpm exec ts-node prisma/seed.ts >> "$LOG_FILE" 2>&1 || warn "Failed to seed database (this is normal if data already exists)" +cd "${BACKEND_DIR}" +pnpm ts-node prisma/seed.ts >> "$LOG_FILE" 2>&1 || warn "Failed to seed database (this is normal if data already exists)" # Build backend log "Building backend..." -cd "${PROJECT_DIR}" -pnpm --filter @nginx-love/api build >> "${LOG_FILE}" 2>&1 || error "Failed to build backend" +cd "${BACKEND_DIR}" +pnpm build >> "${LOG_FILE}" 2>&1 || error "Failed to build backend" log "✓ Backend build completed" @@ -155,8 +157,8 @@ fi # Build frontend log "Building frontend..." -cd "${PROJECT_DIR}" -pnpm --filter @nginx-love/web build >> "${LOG_FILE}" 2>&1 || error "Failed to build frontend" +cd "${FRONTEND_DIR}" +pnpm build >> "${LOG_FILE}" 2>&1 || error "Failed to build frontend" # Get public IP for CSP update PUBLIC_IP=$(curl -s ifconfig.me || curl -s icanhazip.com || curl -s ipinfo.io/ip || echo "localhost") From a90c75e1f2c846cd27e9747588f59bdb54b2f32b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=E1=BA=A1nh=20T=C6=B0=E1=BB=9Dng=20Solo?= Date: Mon, 6 Oct 2025 23:53:38 +0700 Subject: [PATCH 26/32] refactor: Remove BackupStatus, BackupSchedule, and BackupFile models from Prisma schema --- apps/api/prisma/schema.prisma | 44 ----------------------------------- 1 file changed, 44 deletions(-) diff --git a/apps/api/prisma/schema.prisma b/apps/api/prisma/schema.prisma index 372931c..247fa99 100644 --- a/apps/api/prisma/schema.prisma +++ b/apps/api/prisma/schema.prisma @@ -625,47 +625,3 @@ model ConfigVersion { @@index([createdAt]) @@map("config_versions") } - -enum BackupStatus { - success - failed - running - pending -} - -model BackupSchedule { - id String @id @default(cuid()) - name String - schedule String // Cron expression - enabled Boolean @default(true) - lastRun DateTime? - nextRun DateTime? - status BackupStatus @default(pending) - - backups BackupFile[] - - createdAt DateTime @default(now()) - updatedAt DateTime @updatedAt - - @@map("backup_schedules") -} - -model BackupFile { - id String @id @default(cuid()) - scheduleId String? - schedule BackupSchedule? @relation(fields: [scheduleId], references: [id], onDelete: SetNull) - - filename String - filepath String - size BigInt // Size in bytes - status BackupStatus @default(success) - type String @default("full") // full, incremental, manual - - metadata Json? // Additional metadata (domains count, rules count, etc.) - - createdAt DateTime @default(now()) - - @@index([scheduleId]) - @@index([createdAt]) - @@map("backup_files") -} From 2fcb08d074f6a17cfe58ad7d8f01dfb12ef1bdd9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=E1=BA=A1nh=20T=C6=B0=E1=BB=9Dng=20Solo?= Date: Tue, 7 Oct 2025 10:52:33 +0700 Subject: [PATCH 27/32] feat: Enhance database seeding process with safe seeding script and improved logging; update update script steps --- apps/api/prisma/seed-safe.ts | 253 +++++++++++++++++++++++++++++++++++ scripts/update.sh | 16 +-- 2 files changed, 261 insertions(+), 8 deletions(-) create mode 100644 apps/api/prisma/seed-safe.ts diff --git a/apps/api/prisma/seed-safe.ts b/apps/api/prisma/seed-safe.ts new file mode 100644 index 0000000..e09681d --- /dev/null +++ b/apps/api/prisma/seed-safe.ts @@ -0,0 +1,253 @@ +import { PrismaClient } from '@prisma/client'; +import { hashPassword } from '../src/utils/password'; + +const prisma = new PrismaClient(); + +async function main() { + console.log('🌱 Starting safe database seed...'); + console.log('ℹ️ This script will only create data that doesn\'t exist yet'); + + // Check if users already exist + const existingUsers = await prisma.user.count(); + console.log(`Found ${existingUsers} existing users`); + + if (existingUsers === 0) { + console.log('Creating default users...'); + + // Create admin user (password: admin123) + const adminPassword = await hashPassword('admin123'); + const admin = await prisma.user.create({ + data: { + username: 'admin', + email: 'admin@example.com', + password: adminPassword, + fullName: 'System Administrator', + role: 'admin', + status: 'active', + avatar: 'https://api.dicebear.com/7.x/avataaars/svg?seed=admin', + phone: '+84 123 456 789', + timezone: 'Asia/Ho_Chi_Minh', + language: 'vi', + lastLogin: new Date(), + profile: { + create: { + bio: 'System administrator with full access', + }, + }, + }, + }); + + // Create moderator user (password: operator123) + const operatorPassword = await hashPassword('operator123'); + const operator = await prisma.user.create({ + data: { + username: 'operator', + email: 'operator@example.com', + password: operatorPassword, + fullName: 'System Operator', + role: 'moderator', + status: 'active', + avatar: 'https://api.dicebear.com/7.x/avataaars/svg?seed=operator', + phone: '+84 987 654 321', + timezone: 'Asia/Ho_Chi_Minh', + language: 'en', + lastLogin: new Date(Date.now() - 86400000), // 1 day ago + profile: { + create: { + bio: 'System operator', + }, + }, + }, + }); + + // Create viewer user (password: viewer123) + const viewerPassword = await hashPassword('viewer123'); + const viewer = await prisma.user.create({ + data: { + username: 'viewer', + email: 'viewer@example.com', + password: viewerPassword, + fullName: 'Read Only User', + role: 'viewer', + status: 'active', + avatar: 'https://api.dicebear.com/7.x/avataaars/svg?seed=viewer', + timezone: 'Asia/Singapore', + language: 'en', + lastLogin: new Date(Date.now() - 172800000), // 2 days ago + profile: { + create: { + bio: 'Read-only access user', + }, + }, + }, + }); + + console.log('✅ Default users created successfully!'); + + // Create sample activity logs for new admin user + console.log('Creating initial activity logs...'); + await prisma.activityLog.createMany({ + data: [ + { + userId: admin.id, + action: 'User logged in', + type: 'login', + ip: '192.168.1.100', + userAgent: 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36', + timestamp: new Date(Date.now() - 3600000), // 1 hour ago + success: true, + }, + { + userId: admin.id, + action: 'System initialized', + type: 'system', + ip: '192.168.1.100', + userAgent: 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36', + timestamp: new Date(), + details: 'Initial system setup completed', + success: true, + }, + ], + }); + } else { + console.log('ℹ️ Users already exist, skipping user creation'); + } + + // Check and create ModSecurity CRS rules if they don't exist + const existingCRSRules = await prisma.modSecCRSRule.count(); + console.log(`Found ${existingCRSRules} existing CRS rules`); + + if (existingCRSRules === 0) { + console.log('Creating ModSecurity CRS rules...'); + + // Create OWASP CRS rule configurations (metadata only) + await prisma.modSecCRSRule.createMany({ + data: [ + { + ruleFile: 'REQUEST-942-APPLICATION-ATTACK-SQLI.conf', + name: 'SQL Injection Protection', + category: 'SQLi', + description: 'Detects SQL injection attempts using OWASP CRS detection rules', + enabled: true, + paranoia: 1 + }, + { + ruleFile: 'REQUEST-941-APPLICATION-ATTACK-XSS.conf', + name: 'XSS Attack Prevention', + category: 'XSS', + description: 'Blocks cross-site scripting attacks', + enabled: true, + paranoia: 1 + }, + { + ruleFile: 'REQUEST-932-APPLICATION-ATTACK-RCE.conf', + name: 'RCE Detection', + category: 'RCE', + description: 'Remote code execution prevention', + enabled: true, + paranoia: 1 + }, + { + ruleFile: 'REQUEST-930-APPLICATION-ATTACK-LFI.conf', + name: 'LFI Protection', + category: 'LFI', + description: 'Local file inclusion prevention', + enabled: false, + paranoia: 1 + }, + { + ruleFile: 'REQUEST-943-APPLICATION-ATTACK-SESSION-FIXATION.conf', + name: 'Session Fixation', + category: 'SESSION-FIXATION', + description: 'Prevents session fixation attacks', + enabled: true, + paranoia: 1 + }, + { + ruleFile: 'REQUEST-933-APPLICATION-ATTACK-PHP.conf', + name: 'PHP Attacks', + category: 'PHP', + description: 'PHP-specific attack prevention', + enabled: true, + paranoia: 1 + }, + { + ruleFile: 'REQUEST-920-PROTOCOL-ENFORCEMENT.conf', + name: 'Protocol Attacks', + category: 'PROTOCOL-ATTACK', + description: 'HTTP protocol attack prevention', + enabled: true, + paranoia: 1 + }, + { + ruleFile: 'RESPONSE-950-DATA-LEAKAGES.conf', + name: 'Data Leakage', + category: 'DATA-LEAKAGES', + description: 'Prevents sensitive data leakage', + enabled: false, + paranoia: 1 + }, + { + ruleFile: 'REQUEST-934-APPLICATION-ATTACK-GENERIC.conf', + name: 'SSRF Protection', + category: 'SSRF', + description: 'Server-side request forgery prevention (part of generic attacks)', + enabled: true, + paranoia: 1 + }, + { + ruleFile: 'RESPONSE-955-WEB-SHELLS.conf', + name: 'Web Shell Detection', + category: 'WEB-SHELL', + description: 'Detects web shell uploads', + enabled: true, + paranoia: 1 + }, + ], + }); + + console.log('✅ ModSecurity CRS rules created successfully!'); + } else { + console.log('ℹ️ CRS rules already exist, skipping CRS rule creation'); + } + + console.log('\n✅ Safe database seed completed successfully!'); + console.log('ℹ️ All existing data has been preserved'); + + // Show current user count + const totalUsers = await prisma.user.count(); + const totalCRSRules = await prisma.modSecCRSRule.count(); + console.log(`\n📊 Current database state:`); + console.log(` • Users: ${totalUsers}`); + console.log(` • CRS Rules: ${totalCRSRules}`); + + if (existingUsers === 0) { + console.log('\n📝 Default Test Credentials (only if created):'); + console.log('━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━'); + console.log('Admin:'); + console.log(' Username: admin'); + console.log(' Password: admin123'); + console.log(' Email: admin@example.com'); + console.log(' Role: admin'); + console.log('\nOperator:'); + console.log(' Username: operator'); + console.log(' Password: operator123'); + console.log(' Email: operator@example.com'); + console.log(' Role: moderator'); + console.log('\nViewer:'); + console.log(' Username: viewer'); + console.log(' Password: viewer123'); + console.log(' Email: viewer@example.com'); + console.log(' Role: viewer'); + console.log('━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n'); + } +} + +main() + .catch((e) => { + console.error('❌ Error seeding database:', e); + process.exit(1); + }) + .finally(async () => { + await prisma.$disconnect(); + }); \ No newline at end of file diff --git a/scripts/update.sh b/scripts/update.sh index 140b2eb..af5051e 100644 --- a/scripts/update.sh +++ b/scripts/update.sh @@ -132,10 +132,10 @@ log "Running database migrations..." cd "${BACKEND_DIR}" pnpm prisma migrate deploy >> "$LOG_FILE" 2>&1 || error "Failed to run migrations" -# Seed database -log "Seeding database..." +# Seed database safely (only create missing data, preserve existing) +log "Seeding database safely..." cd "${BACKEND_DIR}" -pnpm ts-node prisma/seed.ts >> "$LOG_FILE" 2>&1 || warn "Failed to seed database (this is normal if data already exists)" +pnpm ts-node prisma/seed-safe.ts >> "$LOG_FILE" 2>&1 || warn "Failed to seed database safely" # Build backend log "Building backend..." @@ -170,8 +170,8 @@ sed -i "s|__WS_URL__|ws://${PUBLIC_IP}:* ws://localhost:*|g" "${FRONTEND_DIR}/di log "✓ Frontend build completed" -# Step 5: Restart services -log "Step 5/6: Starting services..." +# Step 6: Restart services +log "Step 6/7: Starting services..." # Database should already be running from Step 3, just verify if ! docker ps | grep -q "${DB_CONTAINER_NAME}"; then @@ -202,8 +202,8 @@ if ! systemctl is-active --quiet nginx; then fi log "✓ Nginx is running" -# Step 6: Health check and summary -log "Step 6/6: Performing health checks..." +# Step 7: Health check and summary +log "Step 7/7: Performing health checks..." # Health check with retries log "Performing health checks..." @@ -248,9 +248,9 @@ log "Update Completed Successfully!" log "==================================" log "" log "📋 Updated Components:" +log " • Database: Backup created, migrations applied, missing data created (existing data preserved)" log " • Backend API: Rebuilt and restarted" log " • Frontend UI: Rebuilt and restarted" -log " • Database: Migrations applied, new tables seeded" log "" log "🌐 Services Status:" log " • Backend API: http://${PUBLIC_IP}:3001" From 2395c1887a00d92ed7937e456c36edafa0401090 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=E1=BA=A1nh=20T=C6=B0=E1=BB=9Dng=20Solo?= Date: Tue, 7 Oct 2025 10:52:59 +0700 Subject: [PATCH 28/32] fix: Correct step numbering in update script for service restart and health checks --- scripts/update.sh | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/scripts/update.sh b/scripts/update.sh index af5051e..8283f62 100644 --- a/scripts/update.sh +++ b/scripts/update.sh @@ -170,8 +170,8 @@ sed -i "s|__WS_URL__|ws://${PUBLIC_IP}:* ws://localhost:*|g" "${FRONTEND_DIR}/di log "✓ Frontend build completed" -# Step 6: Restart services -log "Step 6/7: Starting services..." +# Step 5: Restart services +log "Step 5/6: Starting services..." # Database should already be running from Step 3, just verify if ! docker ps | grep -q "${DB_CONTAINER_NAME}"; then @@ -202,8 +202,8 @@ if ! systemctl is-active --quiet nginx; then fi log "✓ Nginx is running" -# Step 7: Health check and summary -log "Step 7/7: Performing health checks..." +# Step 6: Health check and summary +log "Step 6/6: Performing health checks..." # Health check with retries log "Performing health checks..." From bb3f94faca326c1d3c2b1119006dc2eab61c0e62 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=E1=BA=A1nh=20T=C6=B0=E1=BB=9Dng=20Solo?= Date: Tue, 7 Oct 2025 10:53:09 +0700 Subject: [PATCH 29/32] fix: Update log message for database migration status in update script --- scripts/update.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/scripts/update.sh b/scripts/update.sh index 8283f62..a96a88a 100644 --- a/scripts/update.sh +++ b/scripts/update.sh @@ -248,9 +248,9 @@ log "Update Completed Successfully!" log "==================================" log "" log "📋 Updated Components:" -log " • Database: Backup created, migrations applied, missing data created (existing data preserved)" log " • Backend API: Rebuilt and restarted" log " • Frontend UI: Rebuilt and restarted" +log " • Database: Migrations applied, missing data created (existing data preserved)" log "" log "🌐 Services Status:" log " • Backend API: http://${PUBLIC_IP}:3001" From 3eba91d255848eeda2b34eb849f08bb57a782fdb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=E1=BA=A1nh=20T=C6=B0=E1=BB=9Dng=20Solo?= Date: Tue, 7 Oct 2025 11:35:45 +0700 Subject: [PATCH 30/32] feat: Improve toast notifications for password and 2FA actions with enhanced messages and icons --- apps/web/src/components/pages/Account.tsx | 50 ++++++++++++++++++----- 1 file changed, 40 insertions(+), 10 deletions(-) diff --git a/apps/web/src/components/pages/Account.tsx b/apps/web/src/components/pages/Account.tsx index d9b1033..3e369b8 100644 --- a/apps/web/src/components/pages/Account.tsx +++ b/apps/web/src/components/pages/Account.tsx @@ -145,8 +145,14 @@ const Account = () => { try { await accountService.changePassword(passwordForm); toast({ - title: "Password changed", - description: "Your password has been changed successfully" + title: ( +
    + + Password Changed Successfully +
    + ), + description: "Your password has been updated. Please login again with your new password.", + className: "border-green-200 dark:border-green-800" }); setPasswordForm({ currentPassword: "", @@ -173,8 +179,14 @@ const Account = () => { setTwoFactorEnabled(false); setTwoFactorSetup(null); toast({ - title: "2FA disabled", - description: "Two-factor authentication has been disabled" + title: ( +
    + + 2FA Disabled +
    + ), + description: "Two-factor authentication has been disabled for your account.", + className: "border-orange-200 dark:border-orange-800" }); } catch (error: any) { toast({ @@ -189,8 +201,14 @@ const Account = () => { const setup = await accountService.setup2FA(); setTwoFactorSetup(setup); toast({ - title: "2FA Setup", - description: "Scan the QR code with your authenticator app" + title: ( +
    + + 2FA Setup Ready +
    + ), + description: "Scan the QR code with your authenticator app to complete setup.", + className: "border-blue-200 dark:border-blue-800" }); } catch (error: any) { toast({ @@ -218,8 +236,14 @@ const Account = () => { setTwoFactorSetup(null); setVerificationToken(""); toast({ - title: "2FA enabled", - description: "Two-factor authentication has been enabled successfully" + title: ( +
    + + 2FA Enabled Successfully +
    + ), + description: "Two-factor authentication is now active. Your account is more secure!", + className: "border-green-200 dark:border-green-800" }); loadProfile(); } catch (error: any) { @@ -234,8 +258,14 @@ const Account = () => { const copyBackupCode = (code: string) => { navigator.clipboard.writeText(code); toast({ - title: "Copied", - description: "Backup code copied to clipboard" + title: ( +
    + + Code Copied +
    + ), + description: "Backup code has been copied to your clipboard.", + className: "border-blue-200 dark:border-blue-800" }); }; From a9f466729dd0eed5a77192d9eb5a64cdcd92624d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=E1=BA=A1nh=20T=C6=B0=E1=BB=9Dng=20Solo?= Date: Tue, 7 Oct 2025 11:39:08 +0700 Subject: [PATCH 31/32] feat: Update toast notifications for password and 2FA actions with improved messages and background colors --- apps/web/src/components/pages/Account.tsx | 45 +++++------------------ 1 file changed, 10 insertions(+), 35 deletions(-) diff --git a/apps/web/src/components/pages/Account.tsx b/apps/web/src/components/pages/Account.tsx index 3e369b8..7e4de7a 100644 --- a/apps/web/src/components/pages/Account.tsx +++ b/apps/web/src/components/pages/Account.tsx @@ -145,14 +145,9 @@ const Account = () => { try { await accountService.changePassword(passwordForm); toast({ - title: ( -
    - - Password Changed Successfully -
    - ), + title: "✅ Password Changed Successfully", description: "Your password has been updated. Please login again with your new password.", - className: "border-green-200 dark:border-green-800" + className: "border-green-200 dark:border-green-800 bg-green-50 dark:bg-green-950" }); setPasswordForm({ currentPassword: "", @@ -179,14 +174,9 @@ const Account = () => { setTwoFactorEnabled(false); setTwoFactorSetup(null); toast({ - title: ( -
    - - 2FA Disabled -
    - ), + title: "⚠️ 2FA Disabled", description: "Two-factor authentication has been disabled for your account.", - className: "border-orange-200 dark:border-orange-800" + className: "border-orange-200 dark:border-orange-800 bg-orange-50 dark:bg-orange-950" }); } catch (error: any) { toast({ @@ -201,14 +191,9 @@ const Account = () => { const setup = await accountService.setup2FA(); setTwoFactorSetup(setup); toast({ - title: ( -
    - - 2FA Setup Ready -
    - ), + title: "📱 2FA Setup Ready", description: "Scan the QR code with your authenticator app to complete setup.", - className: "border-blue-200 dark:border-blue-800" + className: "border-blue-200 dark:border-blue-800 bg-blue-50 dark:bg-blue-950" }); } catch (error: any) { toast({ @@ -236,14 +221,9 @@ const Account = () => { setTwoFactorSetup(null); setVerificationToken(""); toast({ - title: ( -
    - - 2FA Enabled Successfully -
    - ), + title: "🛡️ 2FA Enabled Successfully", description: "Two-factor authentication is now active. Your account is more secure!", - className: "border-green-200 dark:border-green-800" + className: "border-green-200 dark:border-green-800 bg-green-50 dark:bg-green-950" }); loadProfile(); } catch (error: any) { @@ -258,14 +238,9 @@ const Account = () => { const copyBackupCode = (code: string) => { navigator.clipboard.writeText(code); toast({ - title: ( -
    - - Code Copied -
    - ), + title: "📋 Code Copied", description: "Backup code has been copied to your clipboard.", - className: "border-blue-200 dark:border-blue-800" + className: "border-blue-200 dark:border-blue-800 bg-blue-50 dark:bg-blue-950" }); }; From 007f3f7db3a0fe009b5cafc157d72593a8b398ad Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?M=E1=BA=A1nh=20T=C6=B0=E1=BB=9Dng=20Solo?= Date: Tue, 7 Oct 2025 11:43:38 +0700 Subject: [PATCH 32/32] refactor: Replace custom toast implementation with sonner for improved notifications --- apps/web/src/components/pages/Account.tsx | 90 ++++++++--------------- 1 file changed, 30 insertions(+), 60 deletions(-) diff --git a/apps/web/src/components/pages/Account.tsx b/apps/web/src/components/pages/Account.tsx index 7e4de7a..be43d35 100644 --- a/apps/web/src/components/pages/Account.tsx +++ b/apps/web/src/components/pages/Account.tsx @@ -29,12 +29,11 @@ import { Loader2 } from "lucide-react"; import { UserProfile, ActivityLog } from "@/types"; -import { useToast } from "@/hooks/use-toast"; +import { toast } from "sonner"; import { accountService } from "@/services/auth.service"; const Account = () => { const { t } = useTranslation(); - const { toast } = useToast(); const [profile, setProfile] = useState(null); const [twoFactorEnabled, setTwoFactorEnabled] = useState(false); @@ -89,10 +88,8 @@ const Account = () => { setProfile(data); setTwoFactorEnabled(data.twoFactorEnabled); } catch (error: any) { - toast({ - title: "Error", - description: error.response?.data?.message || "Failed to load profile", - variant: "destructive" + toast.error("Error", { + description: error.response?.data?.message || "Failed to load profile" }); } }; @@ -110,44 +107,35 @@ const Account = () => { try { const updatedProfile = await accountService.updateProfile(profileForm); setProfile(updatedProfile); - toast({ - title: "Profile updated", + toast.success("Profile updated", { description: "Your profile information has been updated successfully" }); } catch (error: any) { - toast({ - title: "Error", - description: error.response?.data?.message || "Failed to update profile", - variant: "destructive" + toast.error("Error", { + description: error.response?.data?.message || "Failed to update profile" }); } }; const handlePasswordChange = async () => { if (passwordForm.newPassword !== passwordForm.confirmPassword) { - toast({ - title: "Password mismatch", - description: "New password and confirm password do not match", - variant: "destructive" + toast.error("Password mismatch", { + description: "New password and confirm password do not match" }); return; } if (passwordForm.newPassword.length < 8) { - toast({ - title: "Weak password", - description: "Password must be at least 8 characters long", - variant: "destructive" + toast.error("Weak password", { + description: "Password must be at least 8 characters long" }); return; } try { await accountService.changePassword(passwordForm); - toast({ - title: "✅ Password Changed Successfully", - description: "Your password has been updated. Please login again with your new password.", - className: "border-green-200 dark:border-green-800 bg-green-50 dark:bg-green-950" + toast.success("✅ Password Changed Successfully", { + description: "Your password has been updated. Please login again with your new password." }); setPasswordForm({ currentPassword: "", @@ -155,10 +143,8 @@ const Account = () => { confirmPassword: "" }); } catch (error: any) { - toast({ - title: "Error", - description: error.response?.data?.message || "Failed to change password", - variant: "destructive" + toast.error("Error", { + description: error.response?.data?.message || "Failed to change password" }); } }; @@ -173,16 +159,12 @@ const Account = () => { await accountService.disable2FA(password); setTwoFactorEnabled(false); setTwoFactorSetup(null); - toast({ - title: "⚠️ 2FA Disabled", - description: "Two-factor authentication has been disabled for your account.", - className: "border-orange-200 dark:border-orange-800 bg-orange-50 dark:bg-orange-950" + toast.warning("⚠️ 2FA Disabled", { + description: "Two-factor authentication has been disabled for your account." }); } catch (error: any) { - toast({ - title: "Error", - description: error.response?.data?.message || "Failed to disable 2FA", - variant: "destructive" + toast.error("Error", { + description: error.response?.data?.message || "Failed to disable 2FA" }); } } else { @@ -190,16 +172,12 @@ const Account = () => { try { const setup = await accountService.setup2FA(); setTwoFactorSetup(setup); - toast({ - title: "📱 2FA Setup Ready", - description: "Scan the QR code with your authenticator app to complete setup.", - className: "border-blue-200 dark:border-blue-800 bg-blue-50 dark:bg-blue-950" + toast.info("📱 2FA Setup Ready", { + description: "Scan the QR code with your authenticator app to complete setup." }); } catch (error: any) { - toast({ - title: "Error", - description: error.response?.data?.message || "Failed to setup 2FA", - variant: "destructive" + toast.error("Error", { + description: error.response?.data?.message || "Failed to setup 2FA" }); } } @@ -207,10 +185,8 @@ const Account = () => { const handleVerify2FA = async () => { if (!verificationToken || verificationToken.length !== 6) { - toast({ - title: "Invalid token", - description: "Please enter a 6-digit code", - variant: "destructive" + toast.error("Invalid token", { + description: "Please enter a 6-digit code" }); return; } @@ -220,27 +196,21 @@ const Account = () => { setTwoFactorEnabled(true); setTwoFactorSetup(null); setVerificationToken(""); - toast({ - title: "🛡️ 2FA Enabled Successfully", - description: "Two-factor authentication is now active. Your account is more secure!", - className: "border-green-200 dark:border-green-800 bg-green-50 dark:bg-green-950" + toast.success("🛡️ 2FA Enabled Successfully", { + description: "Two-factor authentication is now active. Your account is more secure!" }); loadProfile(); } catch (error: any) { - toast({ - title: "Error", - description: error.response?.data?.message || "Invalid verification code", - variant: "destructive" + toast.error("Error", { + description: error.response?.data?.message || "Invalid verification code" }); } }; const copyBackupCode = (code: string) => { navigator.clipboard.writeText(code); - toast({ - title: "📋 Code Copied", - description: "Backup code has been copied to your clipboard.", - className: "border-blue-200 dark:border-blue-800 bg-blue-50 dark:bg-blue-950" + toast.success("📋 Code Copied", { + description: "Backup code has been copied to your clipboard." }); };