diff --git a/backend/Cargo.lock b/backend/Cargo.lock index d4ccb85c..ebf0c98f 100644 --- a/backend/Cargo.lock +++ b/backend/Cargo.lock @@ -482,6 +482,7 @@ dependencies = [ "bytes", "cookie", "futures-util", + "headers", "http", "http-body", "http-body-util", @@ -1884,6 +1885,30 @@ dependencies = [ "hashbrown 0.15.5", ] +[[package]] +name = "headers" +version = "0.4.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b3314d5adb5d94bcdf56771f2e50dbbc80bb4bdf88967526706205ac9eff24eb" +dependencies = [ + "base64 0.22.1", + "bytes", + "headers-core", + "http", + "httpdate", + "mime", + "sha1", +] + +[[package]] +name = "headers-core" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "54b4a22553d4242c49fddb9ba998a99962b5cc6f22cb5a3482bec22522403ce4" +dependencies = [ + "http", +] + [[package]] name = "heck" version = "0.5.0" diff --git a/backend/Cargo.toml b/backend/Cargo.toml index a85519bf..4b38dd81 100644 --- a/backend/Cargo.toml +++ b/backend/Cargo.toml @@ -131,7 +131,7 @@ argon2 = "0.5.3" password-hash = "0.5.0" lazy_static = "1.5.0" rand_core = "0.9.3" -axum-extra = {version = "0.10.3", features = ["cookie"]} +axum-extra = { version = "0.10.3", features = ["cookie", "typed-header"] } time = "0.3.44" tower-sessions-sqlx-store = { version = "0.15", features = ["postgres"] } secrecy = "0.10.3" diff --git a/backend/migrations/20251118225043_save-topology.sql b/backend/migrations/20251118225043_save-topology.sql new file mode 100644 index 00000000..c04a9e96 --- /dev/null +++ b/backend/migrations/20251118225043_save-topology.sql @@ -0,0 +1,36 @@ +CREATE TABLE topologies ( + id UUID PRIMARY KEY DEFAULT gen_random_uuid(), + network_id UUID NOT NULL REFERENCES networks(id) ON DELETE CASCADE, + name TEXT NOT NULL, + edges JSONB NOT NULL, + nodes JSONB NOT NULL, + options JSONB NOT NULL, + hosts JSONB NOT NULL, + subnets JSONB NOT NULL, + services JSONB NOT NULL, + groups JSONB NOT NULL, + is_stale BOOLEAN, + last_refreshed TIMESTAMPTZ NOT NULL DEFAULT NOW(), + is_locked BOOLEAN, + locked_at TIMESTAMPTZ, + locked_by UUID, + removed_hosts UUID[], + removed_services UUID[], + removed_subnets UUID[], + removed_groups UUID[], + parent_id UUID, + created_at TIMESTAMPTZ NOT NULL DEFAULT NOW(), + updated_at TIMESTAMPTZ NOT NULL DEFAULT NOW() +); + +CREATE INDEX IF NOT EXISTS idx_topologies_network ON topologies(network_id); + +-- Migration to change hosts.services from JSONB to UUID[] +-- Converts JSONB array to UUID array, handles NULL and non-array cases + +ALTER TABLE hosts + ALTER COLUMN services TYPE UUID[] + USING CASE + WHEN services IS NULL THEN NULL + ELSE translate(services::text, '[]"', '{}')::UUID[] + END; \ No newline at end of file diff --git a/backend/src/bin/server.rs b/backend/src/bin/server.rs index bdff4a23..c0b27a06 100644 --- a/backend/src/bin/server.rs +++ b/backend/src/bin/server.rs @@ -1,4 +1,4 @@ -use std::{sync::Arc, time::Duration}; +use std::{net::SocketAddr, sync::Arc, time::Duration}; use axum::{ Extension, Router, @@ -6,6 +6,7 @@ use axum::{ }; use clap::Parser; use netvisor::server::{ + auth::middleware::AuthenticatedEntity, billing::types::base::{BillingPlan, BillingRate, Price}, config::{AppState, CliArgs, ServerConfig}, organizations::r#impl::base::{Organization, OrganizationBase}, @@ -273,7 +274,12 @@ async fn main() -> anyhow::Result<()> { // Spawn server in background tokio::spawn(async move { - axum::serve(listener, app).await.unwrap(); + axum::serve( + listener, + app.into_make_service_with_connect_info::(), + ) + .await + .unwrap(); }); // Start cron for discovery scheduler @@ -312,17 +318,23 @@ async fn main() -> anyhow::Result<()> { // First load - populate user and org if all_users.is_empty() { let organization = organization_service - .create(Organization::new(OrganizationBase { - stripe_customer_id: None, - plan: None, - plan_status: None, - name: "My Organization".to_string(), - is_onboarded: false, - })) + .create( + Organization::new(OrganizationBase { + stripe_customer_id: None, + plan: None, + plan_status: None, + name: "My Organization".to_string(), + is_onboarded: false, + }), + AuthenticatedEntity::System, + ) .await?; user_service - .create_user(User::new(UserBase::new_seed(organization.id))) + .create( + User::new(UserBase::new_seed(organization.id)), + AuthenticatedEntity::System, + ) .await?; } else { tracing::debug!("Server already has data, skipping seed data"); diff --git a/backend/src/daemon/discovery/service/network.rs b/backend/src/daemon/discovery/service/network.rs index 9c9f476e..921d3f16 100644 --- a/backend/src/daemon/discovery/service/network.rs +++ b/backend/src/daemon/discovery/service/network.rs @@ -314,7 +314,7 @@ impl DiscoveryRunner { total_ips = %total_ips, scanned = %scanned, discovered = %successful_discoveries.len(), - "📊 Scan complete" + "Scan complete" ); Ok(successful_discoveries) @@ -356,10 +356,10 @@ impl DiscoveryRunner { Ok((open_ports, endpoint_responses)) => { if !open_ports.is_empty() || !endpoint_responses.is_empty() { tracing::info!( - "Processing host {} with {} open ports and {} endpoint responses", - ip, - open_ports.len(), - endpoint_responses.len() + ip = %ip, + open_port_count = %open_ports.len(), + endpoint_response_count = %endpoint_responses.len(), + "Processing host", ); // Check cancellation before processing diff --git a/backend/src/daemon/discovery/types/base.rs b/backend/src/daemon/discovery/types/base.rs index 0cd393e3..a513e007 100644 --- a/backend/src/daemon/discovery/types/base.rs +++ b/backend/src/daemon/discovery/types/base.rs @@ -4,7 +4,7 @@ use chrono::{DateTime, Utc}; use serde::{Deserialize, Serialize}; use uuid::Uuid; -#[derive(Debug, Clone, Serialize, Deserialize, Copy)] +#[derive(Debug, Clone, Serialize, Deserialize, Copy, PartialEq, Eq, Hash)] pub enum DiscoveryPhase { Pending, // Initial state, set by server; all subsequent states until Finished are set by Daemon Starting, diff --git a/backend/src/daemon/utils/base.rs b/backend/src/daemon/utils/base.rs index 44276133..d3d6d9be 100644 --- a/backend/src/daemon/utils/base.rs +++ b/backend/src/daemon/utils/base.rs @@ -226,22 +226,20 @@ pub trait DaemonUtils { } else { // Use automatic tracing::info!( - "Using automatic concurrent_scans={} with port_batch={} per host \ - (FD limit: {}, available: {}, FDs per host: {})", - optimal_concurrent, - port_batch_bounded, - fd_limit, - available, - fds_per_host + concurrent_scans = %optimal_concurrent, + port_batch = %port_batch_bounded, + fd_limit = %fd_limit, + fd_available = %available, + fds_per_host = %fds_per_host, + "Using automatic concurrent_scans", ); optimal_concurrent }; - if result == 1 { + if result < 5 { tracing::warn!( - "Very low concurrency (1 host). File descriptor limit is {}. \ - Consider increasing for better performance.", - fd_limit + fd_limit = %fd_limit, + "Very low concurrency. Consider increasing for better performance.", ); } diff --git a/backend/src/server/api_keys/handlers.rs b/backend/src/server/api_keys/handlers.rs index e78b90b1..b80f9a79 100644 --- a/backend/src/server/api_keys/handlers.rs +++ b/backend/src/server/api_keys/handlers.rs @@ -3,7 +3,9 @@ use crate::server::{ auth::middleware::RequireMember, config::AppState, shared::{ - handlers::traits::{CrudHandlers, delete_handler, get_all_handler, get_by_id_handler}, + handlers::traits::{ + CrudHandlers, bulk_delete_handler, delete_handler, get_all_handler, get_by_id_handler, + }, services::traits::CrudService, types::api::{ApiError, ApiResponse, ApiResult}, }, @@ -24,6 +26,7 @@ pub fn create_router() -> Router> { .route("/{id}", put(update_handler)) .route("/{id}", delete(delete_handler::)) .route("/{id}", get(get_by_id_handler::)) + .route("/bulk-delete", post(bulk_delete_handler::)) } pub async fn create_handler( @@ -39,21 +42,17 @@ pub async fn create_handler( ); let service = ApiKey::get_service(&state); - let api_key = service.create(api_key).await.map_err(|e| { - tracing::error!( - error = %e, - user_id = %user.user_id, - "Failed to create API key" - ); - ApiError::internal_error(&e.to_string()) - })?; - - tracing::info!( - api_key_id = %api_key.id, - api_key_name = %api_key.base.name, - user_id = %user.user_id, - "API key created via API (key shown to user)" - ); + let api_key = service + .create(api_key, user.clone().into()) + .await + .map_err(|e| { + tracing::error!( + error = %e, + user_id = %user.user_id, + "Failed to create API key" + ); + ApiError::internal_error(&e.to_string()) + })?; Ok(Json(ApiResponse::success(ApiKeyResponse { key: api_key.base.key.clone(), @@ -66,28 +65,25 @@ pub async fn rotate_key_handler( RequireMember(user): RequireMember, Path(api_key_id): Path, ) -> ApiResult>> { - tracing::info!( + tracing::debug!( api_key_id = %api_key_id, user_id = %user.user_id, "API key rotation request received" ); let service = ApiKey::get_service(&state); - let key = service.rotate_key(api_key_id).await.map_err(|e| { - tracing::error!( - api_key_id = %api_key_id, - user_id = %user.user_id, - error = %e, - "Failed to rotate API key" - ); - ApiError::internal_error(&e.to_string()) - })?; - - tracing::info!( - api_key_id = %api_key_id, - user_id = %user.user_id, - "API key rotated via API (new key shown to user)" - ); + let key = service + .rotate_key(api_key_id, user.clone().into()) + .await + .map_err(|e| { + tracing::error!( + api_key_id = %api_key_id, + user_id = %user.user_id, + error = %e, + "Failed to rotate API key" + ); + ApiError::internal_error(&e.to_string()) + })?; Ok(Json(ApiResponse::success(key))) } @@ -131,22 +127,18 @@ pub async fn update_handler( // Preserve the key - don't allow it to be changed via update request.base.key = existing.base.key; - let updated = service.update(&mut request).await.map_err(|e| { - tracing::error!( - api_key_id = %id, - user_id = %user.user_id, - error = %e, - "Failed to update API key" - ); - ApiError::internal_error(&e.to_string()) - })?; - - tracing::info!( - api_key_id = %id, - api_key_name = %updated.base.name, - user_id = %user.user_id, - "API key updated via API" - ); + let updated = service + .update(&mut request, user.clone().into()) + .await + .map_err(|e| { + tracing::error!( + api_key_id = %id, + user_id = %user.user_id, + error = %e, + "Failed to update API key" + ); + ApiError::internal_error(&e.to_string()) + })?; Ok(Json(ApiResponse::success(updated))) } diff --git a/backend/src/server/api_keys/impl/base.rs b/backend/src/server/api_keys/impl/base.rs index 5eb805ea..0fa01868 100644 --- a/backend/src/server/api_keys/impl/base.rs +++ b/backend/src/server/api_keys/impl/base.rs @@ -4,7 +4,9 @@ use chrono::{DateTime, Utc}; use serde::{Deserialize, Serialize, Serializer}; use uuid::Uuid; -#[derive(Debug, Clone, Serialize, Deserialize)] +use crate::server::shared::entities::ChangeTriggersTopologyStaleness; + +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)] pub struct ApiKeyBase { #[serde(serialize_with = "serialize_api_key_status")] pub key: String, @@ -22,7 +24,7 @@ where serializer.serialize_str("***REDACTED***") } -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)] pub struct ApiKey { pub id: Uuid, pub updated_at: DateTime, @@ -36,3 +38,9 @@ impl Display for ApiKey { write!(f, "{}: {}", self.base.name, self.id) } } + +impl ChangeTriggersTopologyStaleness for ApiKey { + fn triggers_staleness(&self, _other: Option) -> bool { + false + } +} diff --git a/backend/src/server/api_keys/service.rs b/backend/src/server/api_keys/service.rs index d6792067..d8fc7518 100644 --- a/backend/src/server/api_keys/service.rs +++ b/backend/src/server/api_keys/service.rs @@ -1,12 +1,19 @@ use anyhow::{Result, anyhow}; use async_trait::async_trait; +use chrono::Utc; use std::sync::Arc; use uuid::Uuid; use crate::server::{ api_keys::r#impl::base::{ApiKey, ApiKeyBase}, + auth::middleware::AuthenticatedEntity, shared::{ - services::traits::CrudService, + entities::ChangeTriggersTopologyStaleness, + events::{ + bus::EventBus, + types::{EntityEvent, EntityOperation}, + }, + services::traits::{CrudService, EventBusService}, storage::{ generic::GenericPostgresStorage, traits::{StorableEntity, Storage}, @@ -16,25 +23,29 @@ use crate::server::{ pub struct ApiKeyService { storage: Arc>, + event_bus: Arc, } -#[async_trait] -impl CrudService for ApiKeyService { - fn storage(&self) -> &Arc> { - &self.storage +impl EventBusService for ApiKeyService { + fn event_bus(&self) -> &Arc { + &self.event_bus } -} -impl ApiKeyService { - pub fn new(storage: Arc>) -> Self { - Self { storage } + fn get_network_id(&self, entity: &ApiKey) -> Option { + Some(entity.base.network_id) + } + fn get_organization_id(&self, _entity: &ApiKey) -> Option { + None } +} - pub fn generate_api_key(&self) -> String { - Uuid::new_v4().simple().to_string() +#[async_trait] +impl CrudService for ApiKeyService { + fn storage(&self) -> &Arc> { + &self.storage } - pub async fn create(&self, api_key: ApiKey) -> Result { + async fn create(&self, api_key: ApiKey, authentication: AuthenticatedEntity) -> Result { let key = self.generate_api_key(); tracing::debug!( @@ -53,18 +64,42 @@ impl ApiKeyService { }); let created = self.storage.create(&api_key).await?; - - tracing::info!( - api_key_id = %created.id, - api_key_name = %created.base.name, - network_id = %created.base.network_id, - "API key created" - ); + let trigger_stale = created.triggers_staleness(None); + + self.event_bus() + .publish_entity(EntityEvent { + id: Uuid::new_v4(), + entity_type: created.clone().into(), + entity_id: created.id(), + network_id: self.get_network_id(&created), + organization_id: self.get_organization_id(&created), + operation: EntityOperation::Created, + timestamp: Utc::now(), + metadata: serde_json::json!({ + "trigger_stale": trigger_stale + }), + authentication, + }) + .await?; Ok(created) } +} + +impl ApiKeyService { + pub fn new(storage: Arc>, event_bus: Arc) -> Self { + Self { storage, event_bus } + } - pub async fn rotate_key(&self, api_key_id: Uuid) -> Result { + pub fn generate_api_key(&self) -> String { + Uuid::new_v4().simple().to_string() + } + + pub async fn rotate_key( + &self, + api_key_id: Uuid, + authentication: AuthenticatedEntity, + ) -> Result { tracing::info!( api_key_id = %api_key_id, "Rotating API key" @@ -75,13 +110,7 @@ impl ApiKeyService { api_key.base.key = new_key.clone(); - self.update(&mut api_key).await?; - - tracing::info!( - api_key_id = %api_key_id, - api_key_name = %api_key.base.name, - "API key rotated successfully" - ); + let _updated = self.update(&mut api_key, authentication).await?; Ok(new_key) } else { diff --git a/backend/src/server/auth/handlers.rs b/backend/src/server/auth/handlers.rs index 72ca1d19..1b89ec68 100644 --- a/backend/src/server/auth/handlers.rs +++ b/backend/src/server/auth/handlers.rs @@ -5,8 +5,8 @@ use crate::server::{ ForgotPasswordRequest, LoginRequest, OidcAuthorizeParams, OidcCallbackParams, RegisterRequest, ResetPasswordRequest, UpdateEmailPasswordRequest, }, + middleware::AuthenticatedUser, oidc::OidcPendingAuth, - service::hash_password, }, config::AppState, organizations::handlers::process_pending_invite, @@ -18,11 +18,12 @@ use crate::server::{ }; use axum::{ Router, - extract::{Query, State}, + extract::{ConnectInfo, Query, State}, response::{Json, Redirect}, routing::{get, post}, }; -use std::sync::Arc; +use axum_extra::{TypedHeader, headers::UserAgent}; +use std::{net::SocketAddr, sync::Arc}; use tower_sessions::Session; use url::Url; use uuid::Uuid; @@ -44,6 +45,8 @@ pub fn create_router() -> Router> { async fn register( State(state): State>, + ConnectInfo(addr): ConnectInfo, + user_agent: Option>, session: Session, Json(request): Json, ) -> ApiResult>> { @@ -51,6 +54,9 @@ async fn register( return Err(ApiError::forbidden("User registration is disabled")); } + let ip = addr.ip(); + let user_agent = user_agent.map(|u| u.to_string()); + let (org_id, permissions) = match process_pending_invite(&state, &session).await { Ok(Some((org_id, permissions))) => (Some(org_id), Some(permissions)), Ok(_) => (None, None), @@ -65,7 +71,7 @@ async fn register( let user = state .services .auth_service - .register(request, org_id, permissions) + .register(request, org_id, permissions, ip, user_agent) .await?; session @@ -78,10 +84,19 @@ async fn register( async fn login( State(state): State>, + ConnectInfo(addr): ConnectInfo, + user_agent: Option>, session: Session, Json(request): Json, ) -> ApiResult>> { - let user = state.services.auth_service.login(request).await?; + let ip = addr.ip(); + let user_agent = user_agent.map(|u| u.to_string()); + + let user = state + .services + .auth_service + .login(request, ip, user_agent) + .await?; session .insert("user_id", user.id) @@ -91,7 +106,23 @@ async fn login( Ok(Json(ApiResponse::success(user))) } -async fn logout(session: Session) -> ApiResult>> { +async fn logout( + State(state): State>, + ConnectInfo(addr): ConnectInfo, + user_agent: Option>, + session: Session, +) -> ApiResult>> { + if let Ok(Some(user_id)) = session.get::("user_id").await { + let ip = addr.ip(); + let user_agent = user_agent.map(|u| u.to_string()); + + state + .services + .auth_service + .logout(user_id, ip, user_agent) + .await?; + } + session .delete() .await @@ -123,6 +154,9 @@ async fn get_current_user( async fn update_password_auth( State(state): State>, session: Session, + ConnectInfo(addr): ConnectInfo, + user_agent: Option>, + auth_user: AuthenticatedUser, Json(request): Json, ) -> ApiResult>> { let user_id: Uuid = session @@ -131,34 +165,43 @@ async fn update_password_auth( .map_err(|e| ApiError::internal_error(&format!("Failed to read session: {}", e)))? .ok_or_else(|| ApiError::unauthorized("Not authenticated".to_string()))?; - let mut user = state - .services - .user_service - .get_by_id(&user_id) - .await? - .ok_or_else(|| ApiError::not_found("User not found".to_string()))?; - - if let Some(password) = request.password { - user.set_password(hash_password(&password)?); - } - - if let Some(email) = request.email { - user.base.email = email - } + let ip = addr.ip(); + let user_agent = user_agent.map(|u| u.to_string()); - state.services.user_service.update(&mut user).await?; + let user = state + .services + .auth_service + .update_password( + user_id, + request.password, + request.email, + ip, + user_agent, + auth_user, + ) + .await?; Ok(Json(ApiResponse::success(user))) } async fn forgot_password( State(state): State>, + ConnectInfo(addr): ConnectInfo, + user_agent: Option>, Json(request): Json, ) -> ApiResult>> { + let ip = addr.ip(); + let user_agent = user_agent.map(|u| u.to_string()); + state .services .auth_service - .initiate_password_reset(&request.email, state.config.public_url.clone()) + .initiate_password_reset( + &request.email, + state.config.public_url.clone(), + ip, + user_agent, + ) .await?; Ok(Json(ApiResponse::success(()))) @@ -166,13 +209,18 @@ async fn forgot_password( async fn reset_password( State(state): State>, + ConnectInfo(addr): ConnectInfo, + user_agent: Option>, session: Session, Json(request): Json, ) -> ApiResult>> { + let ip = addr.ip(); + let user_agent = user_agent.map(|u| u.to_string()); + let user = state .services .auth_service - .complete_password_reset(&request.token, &request.password) + .complete_password_reset(&request.token, &request.password, ip, user_agent) .await?; session @@ -224,8 +272,13 @@ async fn oidc_authorize( async fn oidc_callback( State(state): State>, session: Session, + ConnectInfo(addr): ConnectInfo, + user_agent: Option>, Query(params): Query, ) -> Result { + let ip = addr.ip(); + let user_agent = user_agent.map(|u| u.to_string()); + let oidc_service = match state.services.oidc_service.as_ref() { Some(service) => service, None => { @@ -298,7 +351,7 @@ async fn oidc_callback( })?; match oidc_service - .link_to_user(&user_id, ¶ms.code, pending_auth) + .link_to_user(&user_id, ¶ms.code, pending_auth, ip, user_agent) .await { Ok(_) => { @@ -335,7 +388,14 @@ async fn oidc_callback( }; match oidc_service - .login_or_register(¶ms.code, pending_auth, org_id, permissions) + .login_or_register( + ¶ms.code, + pending_auth, + org_id, + permissions, + ip, + user_agent, + ) .await { Ok(user) => { @@ -370,7 +430,12 @@ async fn oidc_callback( async fn unlink_oidc_account( State(state): State>, session: Session, + ConnectInfo(addr): ConnectInfo, + user_agent: Option>, ) -> ApiResult>> { + let ip = addr.ip(); + let user_agent = user_agent.map(|u| u.to_string()); + let oidc_service = state .services .oidc_service @@ -384,7 +449,7 @@ async fn unlink_oidc_account( .ok_or_else(|| ApiError::unauthorized("Not authenticated".to_string()))?; let updated_user = oidc_service - .unlink_from_user(&user_id) + .unlink_from_user(&user_id, ip, user_agent) .await .map_err(|e| ApiError::internal_error(&format!("Failed to unlink OIDC: {}", e)))?; diff --git a/backend/src/server/auth/middleware.rs b/backend/src/server/auth/middleware.rs index 71349163..5ec0a7cd 100644 --- a/backend/src/server/auth/middleware.rs +++ b/backend/src/server/auth/middleware.rs @@ -1,9 +1,11 @@ +use std::fmt::Display; + use crate::server::{ billing::types::base::BillingPlan, config::AppState, organizations::r#impl::base::Organization, shared::{services::traits::CrudService, storage::filter::EntityFilter, types::api::ApiError}, - users::r#impl::permissions::UserOrgPermissions, + users::r#impl::{base::User, permissions::UserOrgPermissions}, }; use axum::{ extract::FromRequestParts, @@ -11,6 +13,8 @@ use axum::{ response::{IntoResponse, Response}, }; use chrono::Utc; +use serde::Deserialize; +use serde::Serialize; use tower_sessions::Session; use uuid::Uuid; @@ -23,7 +27,7 @@ impl IntoResponse for AuthError { } /// Represents either an authenticated user or daemon -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] pub enum AuthenticatedEntity { User { user_id: Uuid, @@ -31,7 +35,23 @@ pub enum AuthenticatedEntity { permissions: UserOrgPermissions, network_ids: Vec, }, - Daemon(Uuid), // network_id + Daemon { + network_id: Uuid, + api_key_id: Uuid, + }, // network_id + System, + Anonymous, +} + +impl Display for AuthenticatedEntity { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + AuthenticatedEntity::Anonymous => write!(f, "Anonymous"), + AuthenticatedEntity::System => write!(f, "System"), + AuthenticatedEntity::Daemon { .. } => write!(f, "Daemon"), + AuthenticatedEntity::User { .. } => write!(f, "User"), + } + } } impl AuthenticatedEntity { @@ -46,15 +66,25 @@ impl AuthenticatedEntity { pub fn entity_id(&self) -> String { match self { AuthenticatedEntity::User { user_id, .. } => user_id.to_string(), - AuthenticatedEntity::Daemon(network_id) => format!("Daemon for network {}", network_id), + AuthenticatedEntity::Daemon { + network_id, + api_key_id, + } => format!( + "Daemon for network {} using API key {}", + network_id, api_key_id + ), + AuthenticatedEntity::System => "System".to_string(), + AuthenticatedEntity::Anonymous => "Anonymous".to_string(), } } /// Get network_ids that daemon / user have access to pub fn network_ids(&self) -> Vec { match self { - AuthenticatedEntity::Daemon(id) => vec![*id], + AuthenticatedEntity::Daemon { network_id, .. } => vec![*network_id], AuthenticatedEntity::User { network_ids, .. } => network_ids.clone(), + AuthenticatedEntity::System => vec![], + AuthenticatedEntity::Anonymous => vec![], } } @@ -65,7 +95,18 @@ impl AuthenticatedEntity { /// Check if this is a daemon pub fn is_daemon(&self) -> bool { - matches!(self, AuthenticatedEntity::Daemon(_)) + matches!(self, AuthenticatedEntity::Daemon { .. }) + } +} + +impl From for AuthenticatedEntity { + fn from(value: User) -> Self { + AuthenticatedEntity::User { + user_id: value.id, + organization_id: value.base.organization_id, + permissions: value.base.permissions, + network_ids: vec![], + } } } @@ -94,7 +135,7 @@ where { let network_id = api_key.base.network_id; let service = app_state.services.api_key_service.clone(); - + let api_key_id = api_key.id; // Check expiration if let Some(expires_at) = api_key.base.expires_at && chrono::Utc::now() > expires_at @@ -102,7 +143,9 @@ where // Update enabled asynchronously (don't block auth) api_key.base.is_enabled = false; tokio::spawn(async move { - let _ = service.update(&mut api_key).await; + let _ = service + .update(&mut api_key, AuthenticatedEntity::System) + .await; }); return Err(AuthError(ApiError::unauthorized( "API key has expired".to_string(), @@ -118,10 +161,15 @@ where // Update last used asynchronously (don't block auth) api_key.base.last_used = Some(Utc::now()); tokio::spawn(async move { - let _ = service.update(&mut api_key).await; + let _ = service + .update(&mut api_key, AuthenticatedEntity::System) + .await; }); - return Ok(AuthenticatedEntity::Daemon(network_id)); + return Ok(AuthenticatedEntity::Daemon { + network_id, + api_key_id, + }); } // Invalid API key return Err(AuthError(ApiError::unauthorized( @@ -169,6 +217,7 @@ where } /// Extractor that only accepts authenticated users (rejects daemons) +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] pub struct AuthenticatedUser { pub user_id: Uuid, pub organization_id: Uuid, @@ -208,7 +257,7 @@ where permissions, network_ids, }), - AuthenticatedEntity::Daemon(_) => Err(AuthError(ApiError::unauthorized( + _ => Err(AuthError(ApiError::unauthorized( "User authentication required".to_string(), ))), } @@ -216,11 +265,18 @@ where } /// Extractor that only accepts authenticated daemons (rejects users) -pub struct AuthenticatedDaemon(pub Uuid); +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +pub struct AuthenticatedDaemon { + pub network_id: Uuid, + pub api_key_id: Uuid, +} impl From for AuthenticatedEntity { fn from(value: AuthenticatedDaemon) -> Self { - AuthenticatedEntity::Daemon(value.0) + AuthenticatedEntity::Daemon { + network_id: value.network_id, + api_key_id: value.api_key_id, + } } } @@ -234,8 +290,14 @@ where let entity = AuthenticatedEntity::from_request_parts(parts, state).await?; match entity { - AuthenticatedEntity::Daemon(network_id) => Ok(AuthenticatedDaemon(network_id)), - AuthenticatedEntity::User { .. } => Err(AuthError(ApiError::unauthorized( + AuthenticatedEntity::Daemon { + network_id, + api_key_id, + } => Ok(AuthenticatedDaemon { + network_id, + api_key_id, + }), + _ => Err(AuthError(ApiError::unauthorized( "Daemon authentication required".to_string(), ))), } @@ -270,13 +332,16 @@ where entity: user.into(), }) } - AuthenticatedEntity::Daemon(network_id) => { + AuthenticatedEntity::Daemon { network_id, .. } => { // Daemons only have access to their single network Ok(MemberOrDaemon { network_ids: vec![network_id], entity, }) } + _ => Err(AuthError(ApiError::forbidden( + "Member or Daemon permission required", + ))), } } } diff --git a/backend/src/server/auth/oidc.rs b/backend/src/server/auth/oidc.rs index 84ed3b43..0a5a1bb4 100644 --- a/backend/src/server/auth/oidc.rs +++ b/backend/src/server/auth/oidc.rs @@ -1,4 +1,5 @@ use anyhow::{Error, Result, anyhow}; +use chrono::Utc; use email_address::EmailAddress; use openidconnect::{ AuthenticationFlow, AuthorizationCode, ClientId, ClientSecret, CsrfToken, IssuerUrl, Nonce, @@ -7,12 +8,22 @@ use openidconnect::{ reqwest::Client as ReqwestClient, }; use serde::{Deserialize, Serialize}; -use std::{str::FromStr, sync::Arc}; +use std::{net::IpAddr, str::FromStr, sync::Arc}; use uuid::Uuid; use crate::server::{ - auth::service::AuthService, - users::r#impl::{base::User, permissions::UserOrgPermissions}, + auth::{middleware::AuthenticatedEntity, service::AuthService}, + shared::{ + events::{ + bus::EventBus, + types::{AuthEvent, AuthOperation}, + }, + services::traits::CrudService, + }, + users::{ + r#impl::{base::User, permissions::UserOrgPermissions}, + service::UserService, + }, }; #[derive(Debug, Serialize, Deserialize)] @@ -32,31 +43,19 @@ pub struct OidcPendingAuth { #[derive(Clone)] pub struct OidcService { - issuer_url: String, - client_id: String, - client_secret: String, - redirect_url: String, - provider_name: String, - auth_service: Arc, + pub issuer_url: String, + pub client_id: String, + pub client_secret: String, + pub redirect_url: String, + pub provider_name: String, + pub auth_service: Arc, + pub user_service: Arc, + pub event_bus: Arc, } impl OidcService { - pub fn new( - issuer_url: String, - client_id: String, - client_secret: String, - redirect_url: String, - provider_name: String, - auth_service: Arc, - ) -> Self { - Self { - issuer_url, - client_id, - client_secret, - redirect_url, - provider_name, - auth_service, - } + pub fn new(params: OidcService) -> Self { + params } /// Generate authorization URL for user to visit @@ -155,6 +154,8 @@ impl OidcService { user_id: &Uuid, code: &str, pending_auth: OidcPendingAuth, + ip: IpAddr, + user_agent: Option, ) -> Result { let user_info = self.exchange_code(code, pending_auth).await?; @@ -174,11 +175,36 @@ impl OidcService { return Ok(existing_user); } - // Link OIDC to current user - self.auth_service + let mut user = self .user_service - .link_oidc(user_id, user_info.subject, self.provider_name.clone()) - .await + .get_by_id(user_id) + .await? + .ok_or_else(|| anyhow::anyhow!("User not found"))?; + + user.base.oidc_provider = Some(self.provider_name.clone()); + user.base.oidc_subject = Some(user_info.subject); + user.base.oidc_linked_at = Some(chrono::Utc::now()); + + let authentication: AuthenticatedEntity = user.clone().into(); + + self.event_bus + .publish_auth(AuthEvent { + id: Uuid::new_v4(), + user_id: Some(user.id), + organization_id: Some(user.base.organization_id), + timestamp: Utc::now(), + operation: AuthOperation::OidcLinked, + ip_address: ip, + user_agent, + metadata: serde_json::json!({ + "method": "oidc", + "provider": self.provider_name + }), + authentication: authentication.clone(), + }) + .await?; + + self.user_service.update(&mut user, authentication).await } /// Login or register user via OIDC @@ -188,16 +214,35 @@ impl OidcService { pending_auth: OidcPendingAuth, org_id: Option, permissions: Option, + ip: IpAddr, + user_agent: Option, ) -> Result { let user_info = self.exchange_code(code, pending_auth).await?; - // Check if user exists with this OIDC account + // Check if user exists with this OIDC account, login if so if let Some(user) = self .auth_service .user_service .get_user_by_oidc(&user_info.subject) .await? { + self.event_bus + .publish_auth(AuthEvent { + id: Uuid::new_v4(), + user_id: Some(user.id), + organization_id: Some(user.base.organization_id), + timestamp: Utc::now(), + operation: AuthOperation::LoginSuccess, + ip_address: ip, + user_agent, + metadata: serde_json::json!({ + "method": "oidc", + "provider": self.provider_name + }), + authentication: user.clone().into(), + }) + .await?; + return Ok(user); } @@ -212,20 +257,83 @@ impl OidcService { Ok::(EmailAddress::new_unchecked(fallback_email_str)) })?; - // Register new user via OIDC - self.auth_service - .register_with_oidc( + // Register new user + let user = self + .auth_service + .provision_user( email, - user_info.subject, - self.provider_name.clone(), + None, + Some(user_info.subject), + Some(self.provider_name.clone()), org_id, permissions, ) - .await + .await?; + + self.event_bus + .publish_auth(AuthEvent { + id: Uuid::new_v4(), + user_id: Some(user.id), + organization_id: Some(user.base.organization_id), + timestamp: Utc::now(), + operation: AuthOperation::Register, + ip_address: ip, + user_agent, + metadata: serde_json::json!({ + "method": "oidc", + "provider": self.provider_name + }), + authentication: user.clone().into(), + }) + .await?; + + Ok(user) } /// Unlink OIDC from user - pub async fn unlink_from_user(&self, user_id: &Uuid) -> Result { - self.auth_service.user_service.unlink_oidc(user_id).await + pub async fn unlink_from_user( + &self, + user_id: &Uuid, + ip: IpAddr, + user_agent: Option, + ) -> Result { + let mut user = self + .user_service + .get_by_id(user_id) + .await? + .ok_or_else(|| anyhow::anyhow!("User not found"))?; + + // Require password before unlinking + if user.base.password_hash.is_none() { + return Err(anyhow::anyhow!( + "Cannot unlink OIDC - no password set. Set a password first." + )); + } + + user.base.oidc_provider = None; + user.base.oidc_subject = None; + user.base.oidc_linked_at = None; + user.updated_at = chrono::Utc::now(); + + let authentication: AuthenticatedEntity = user.clone().into(); + + self.event_bus + .publish_auth(AuthEvent { + id: Uuid::new_v4(), + user_id: Some(user.id), + organization_id: Some(user.base.organization_id), + timestamp: Utc::now(), + operation: AuthOperation::OidcUnlinked, + ip_address: ip, + user_agent, + metadata: serde_json::json!({ + "method": "oidc", + "provider": self.provider_name + }), + authentication: authentication.clone(), + }) + .await?; + + self.user_service.update(&mut user, authentication).await } } diff --git a/backend/src/server/auth/service.rs b/backend/src/server/auth/service.rs index edcfe786..2bc1ce2b 100644 --- a/backend/src/server/auth/service.rs +++ b/backend/src/server/auth/service.rs @@ -1,16 +1,26 @@ use crate::server::{ - auth::r#impl::api::{LoginRequest, RegisterRequest}, + auth::{ + r#impl::api::{LoginRequest, RegisterRequest}, + middleware::{AuthenticatedEntity, AuthenticatedUser}, + }, email::service::EmailService, organizations::{ r#impl::base::{Organization, OrganizationBase}, service::OrganizationService, }, shared::{ + events::{ + bus::EventBus, + types::{AuthEvent, AuthOperation}, + }, services::traits::CrudService, storage::{filter::EntityFilter, traits::StorableEntity}, }, users::{ - r#impl::{base::User, permissions::UserOrgPermissions}, + r#impl::{ + base::{User, UserBase}, + permissions::UserOrgPermissions, + }, service::UserService, }, }; @@ -19,8 +29,9 @@ use argon2::{ Argon2, password_hash::{PasswordHash, PasswordHasher, PasswordVerifier, SaltString, rand_core::OsRng}, }; +use chrono::Utc; use email_address::EmailAddress; -use std::{collections::HashMap, sync::Arc, time::Instant}; +use std::{collections::HashMap, net::IpAddr, sync::Arc, time::Instant}; use tokio::sync::RwLock; use uuid::Uuid; use validator::Validate; @@ -31,6 +42,7 @@ pub struct AuthService { email_service: Option>, login_attempts: Arc>>, password_reset_tokens: Arc>>, + event_bus: Arc, } impl AuthService { @@ -41,6 +53,7 @@ impl AuthService { user_service: Arc, organization_service: Arc, email_service: Option>, + event_bus: Arc, ) -> Self { Self { user_service, @@ -48,6 +61,7 @@ impl AuthService { email_service, login_attempts: Arc::new(RwLock::new(HashMap::new())), password_reset_tokens: Arc::new(RwLock::new(HashMap::new())), + event_bus, } } @@ -57,6 +71,8 @@ impl AuthService { request: RegisterRequest, org_id: Option, permissions: Option, + ip: IpAddr, + user_agent: Option, ) -> Result { request .validate() @@ -73,40 +89,38 @@ impl AuthService { } // Provision user with password - self.provision_user( - request.email, - Some(hash_password(&request.password)?), - None, - None, - org_id, - permissions, - ) - .await - } + let user = self + .provision_user( + request.email, + Some(hash_password(&request.password)?), + None, + None, + org_id, + permissions, + ) + .await?; - /// Register a new user with OIDC - pub async fn register_with_oidc( - &self, - email: EmailAddress, - oidc_subject: String, - oidc_provider: String, - org_id: Option, - permissions: Option, - ) -> Result { - // Provision user with OIDC - self.provision_user( - email, - None, - Some(oidc_subject), - Some(oidc_provider), - org_id, - permissions, - ) - .await + self.event_bus + .publish_auth(AuthEvent { + id: Uuid::new_v4(), + user_id: Some(user.id), + organization_id: Some(user.base.organization_id), + timestamp: Utc::now(), + operation: AuthOperation::Register, + ip_address: ip, + user_agent, + metadata: serde_json::json!({ + "method": "password", + }), + authentication: user.clone().into(), + }) + .await?; + + Ok(user) } /// Core user provisioning logic - handles both password and OIDC registration - async fn provision_user( + pub async fn provision_user( &self, email: EmailAddress, password_hash: Option, @@ -141,22 +155,27 @@ impl AuthService { seed_user.base.oidc_linked_at = Some(chrono::Utc::now()); } - self.user_service.update(&mut seed_user).await + self.user_service + .update(&mut seed_user, AuthenticatedEntity::System) + .await } else { // If being invited, use provied org ID, otherwise create a new one - let org_id = if let Some(org_id) = org_id { + let organization_id = if let Some(org_id) = org_id { org_id } else { // Create new organization for this user let organization = self .organization_service - .create(Organization::new(OrganizationBase { - stripe_customer_id: None, - name: "My Organization".to_string(), - plan: None, - plan_status: None, - is_onboarded: false, - })) + .create( + Organization::new(OrganizationBase { + stripe_customer_id: None, + name: "My Organization".to_string(), + plan: None, + plan_status: None, + is_onboarded: false, + }), + AuthenticatedEntity::System, + ) .await?; organization.id }; @@ -167,11 +186,28 @@ impl AuthService { // Create user based on auth method if let Some(hash) = password_hash { self.user_service - .create_user_with_password(email, hash, org_id, permissions) + .create( + User::new(UserBase::new_password( + email, + hash, + organization_id, + permissions, + )), + AuthenticatedEntity::System, + ) .await - } else if let Some(subject) = oidc_subject { + } else if let Some(oidc_subject) = oidc_subject { self.user_service - .create_user_with_oidc(email, subject, oidc_provider, org_id, permissions) + .create( + User::new(UserBase::new_oidc( + email, + oidc_subject, + oidc_provider, + organization_id, + permissions, + )), + AuthenticatedEntity::System, + ) .await } else { Err(anyhow!("Must provide either password or OIDC credentials")) @@ -180,7 +216,12 @@ impl AuthService { } /// Login with username and password - pub async fn login(&self, request: LoginRequest) -> Result { + pub async fn login( + &self, + request: LoginRequest, + ip: IpAddr, + user_agent: Option, + ) -> Result { request .validate() .map_err(|e| anyhow!("Validation failed: {}", e))?; @@ -196,11 +237,45 @@ impl AuthService { Ok(user) => { // Success - clear attempts self.login_attempts.write().await.remove(&request.email); - tracing::info!("User {} logged in successfully", user.id); + + self.event_bus + .publish_auth(AuthEvent { + id: Uuid::new_v4(), + user_id: Some(user.id), + organization_id: Some(user.base.organization_id), + timestamp: Utc::now(), + operation: AuthOperation::LoginSuccess, + ip_address: ip, + user_agent, + metadata: serde_json::json!({ + "method": "password", + }), + authentication: user.clone().into(), + }) + .await?; + Ok(user) } Err(e) => { // Failure - increment attempts + + self.event_bus + .publish_auth(AuthEvent { + id: Uuid::new_v4(), + user_id: None, + organization_id: None, + timestamp: Utc::now(), + operation: AuthOperation::LoginFailed, + ip_address: ip, + user_agent, + metadata: serde_json::json!({ + "method": "password", + "email": request.email + }), + authentication: AuthenticatedEntity::Anonymous, + }) + .await?; + let mut attempts = self.login_attempts.write().await; let entry = attempts .entry(request.email.clone()) @@ -237,6 +312,7 @@ impl AuthService { .user_service .get_all(EntityFilter::unfiltered()) .await?; + let user = all_users .iter() .find(|u| u.base.email == request.email) @@ -255,8 +331,56 @@ impl AuthService { Ok(user.clone()) } + pub async fn update_password( + &self, + user_id: Uuid, + password: Option, + email: Option, + ip: IpAddr, + user_agent: Option, + authentication: AuthenticatedUser, + ) -> Result { + let mut user = self + .user_service + .get_by_id(&user_id) + .await? + .ok_or_else(|| anyhow::anyhow!("User not found".to_string()))?; + + if let Some(password) = password { + user.set_password(hash_password(&password)?); + } + + if let Some(email) = email { + user.base.email = email + } + + self.event_bus + .publish_auth(AuthEvent { + id: Uuid::new_v4(), + user_id: Some(user.id), + organization_id: Some(user.base.organization_id), + timestamp: Utc::now(), + operation: AuthOperation::PasswordChanged, + ip_address: ip, + user_agent, + metadata: serde_json::json!({}), + authentication: authentication.clone().into(), + }) + .await?; + + self.user_service + .update(&mut user, authentication.into()) + .await + } + /// Initiate password reset process - generates a token - pub async fn initiate_password_reset(&self, email: &EmailAddress, url: String) -> Result<()> { + pub async fn initiate_password_reset( + &self, + email: &EmailAddress, + url: String, + ip: IpAddr, + user_agent: Option, + ) -> Result<()> { let email_service = self .email_service .as_ref() @@ -278,6 +402,20 @@ impl AuthService { } }; + self.event_bus + .publish_auth(AuthEvent { + id: Uuid::new_v4(), + user_id: Some(user.id), + organization_id: Some(user.base.organization_id), + timestamp: Utc::now(), + operation: AuthOperation::PasswordResetRequested, + ip_address: ip, + user_agent, + metadata: serde_json::json!({}), + authentication: AuthenticatedEntity::Anonymous, + }) + .await?; + let token = Uuid::new_v4().to_string(); let mut tokens = self.password_reset_tokens.write().await; tokens.insert(token.clone(), (user.id, Instant::now())); @@ -297,7 +435,13 @@ impl AuthService { } /// Reset password using token - pub async fn complete_password_reset(&self, token: &str, new_password: &str) -> Result { + pub async fn complete_password_reset( + &self, + token: &str, + new_password: &str, + ip: IpAddr, + user_agent: Option, + ) -> Result { let mut tokens = self.password_reset_tokens.write().await; let (user_id, created_at) = tokens .remove(token) @@ -315,14 +459,55 @@ impl AuthService { .await? .ok_or_else(|| anyhow!("User not found"))?; + self.event_bus + .publish_auth(AuthEvent { + id: Uuid::new_v4(), + user_id: Some(user.id), + organization_id: Some(user.base.organization_id), + timestamp: Utc::now(), + operation: AuthOperation::PasswordResetCompleted, + ip_address: ip, + user_agent, + metadata: serde_json::json!({}), + authentication: user.clone().into(), + }) + .await?; + // Update password let hashed_password = hash_password(new_password)?; user.set_password(hashed_password); - self.user_service.update(&mut user).await?; + self.user_service + .update(&mut user, AuthenticatedEntity::System) + .await?; Ok(user.clone()) } + pub async fn logout( + &self, + user_id: Uuid, + ip: IpAddr, + user_agent: Option, + ) -> Result<()> { + if let Ok(Some(user)) = self.user_service.get_by_id(&user_id).await { + self.event_bus + .publish_auth(AuthEvent { + id: Uuid::new_v4(), + user_id: Some(user.id), + organization_id: Some(user.base.organization_id), + timestamp: Utc::now(), + operation: AuthOperation::LoggedOut, + ip_address: ip, + user_agent, + metadata: serde_json::json!({}), + authentication: user.into(), + }) + .await?; + } + + Ok(()) + } + /// Cleanup old login attempts (called periodically from background task) pub async fn cleanup_old_login_attempts(&self) { let mut attempts = self.login_attempts.write().await; diff --git a/backend/src/server/billing/handlers.rs b/backend/src/server/billing/handlers.rs index c9e89b7f..525c8ec0 100644 --- a/backend/src/server/billing/handlers.rs +++ b/backend/src/server/billing/handlers.rs @@ -51,7 +51,13 @@ async fn create_checkout_session( } let session = billing_service - .create_checkout_session(user.organization_id, request.plan, success_url, cancel_url) + .create_checkout_session( + user.organization_id, + request.plan, + success_url, + cancel_url, + user.into(), + ) .await?; Ok(Json(ApiResponse::success(session.url.unwrap()))) diff --git a/backend/src/server/billing/service.rs b/backend/src/server/billing/service.rs index 15b6f12c..1bb47208 100644 --- a/backend/src/server/billing/service.rs +++ b/backend/src/server/billing/service.rs @@ -1,3 +1,4 @@ +use crate::server::auth::middleware::AuthenticatedEntity; use crate::server::billing::types::base::BillingPlan; use crate::server::networks::service::NetworkService; use crate::server::organizations::service::OrganizationService; @@ -157,9 +158,12 @@ impl BillingService { plan: BillingPlan, success_url: String, cancel_url: String, + authentication: AuthenticatedEntity, ) -> Result { // Get or create Stripe customer - let customer_id = self.get_or_create_customer(organization_id).await?; + let customer_id = self + .get_or_create_customer(organization_id, authentication) + .await?; tracing::info!( organization_id = %organization_id, @@ -227,7 +231,11 @@ impl BillingService { } /// Get existing customer or create new one - async fn get_or_create_customer(&self, organization_id: Uuid) -> Result { + async fn get_or_create_customer( + &self, + organization_id: Uuid, + authentication: AuthenticatedEntity, + ) -> Result { // Check if org already has stripe_customer_id let mut organization = self .organization_service @@ -264,7 +272,9 @@ impl BillingService { organization.base.stripe_customer_id = Some(customer.id.to_string()); - self.organization_service.update(&mut organization).await?; + self.organization_service + .update(&mut organization, authentication) + .await?; Ok(customer.id) } @@ -356,7 +366,9 @@ impl BillingService { for network in networks { if !keep_ids.contains(&network.id) { - self.network_service.delete(&network.id).await?; + self.network_service + .delete(&network.id, AuthenticatedEntity::System) + .await?; tracing::info!( organization_id = %org_id, network_id = %network.id, @@ -375,7 +387,9 @@ impl BillingService { for user in &mut users { if user.base.permissions != UserOrgPermissions::Owner { user.base.permissions = UserOrgPermissions::None; - self.user_service.update(user).await?; + self.user_service + .update(user, AuthenticatedEntity::System) + .await?; } } } @@ -387,7 +401,9 @@ impl BillingService { for user in &mut users { if user.base.permissions != UserOrgPermissions::Owner { user.base.permissions = UserOrgPermissions::Visualizer; - self.user_service.update(user).await?; + self.user_service + .update(user, AuthenticatedEntity::System) + .await?; } } } @@ -395,10 +411,12 @@ impl BillingService { BillingPlan::Community { .. } => {} } - organization.base.plan_status = Some(sub.status); + organization.base.plan_status = Some(sub.status.to_string()); organization.base.plan = Some(plan); - self.organization_service.update(&mut organization).await?; + self.organization_service + .update(&mut organization, AuthenticatedEntity::System) + .await?; tracing::info!( "Updated organization {} subscription status to {}", @@ -425,9 +443,11 @@ impl BillingService { .revoke_org_invites(&organization.id) .await?; - organization.base.plan_status = Some(SubscriptionStatus::Canceled); + organization.base.plan_status = Some(SubscriptionStatus::Canceled.to_string()); - self.organization_service.update(&mut organization).await?; + self.organization_service + .update(&mut organization, AuthenticatedEntity::System) + .await?; tracing::info!( organization_id = %org_id, diff --git a/backend/src/server/billing/types/base.rs b/backend/src/server/billing/types/base.rs index e7f8022d..c18605d1 100644 --- a/backend/src/server/billing/types/base.rs +++ b/backend/src/server/billing/types/base.rs @@ -1,12 +1,21 @@ +use crate::server::shared::types::metadata::{EntityMetadataProvider, HasId, TypeMetadataProvider}; use serde::{Deserialize, Serialize}; use std::fmt::Display; +use std::hash::Hash; use stripe_product::price::CreatePriceRecurringInterval; use strum::{Display, EnumDiscriminants, EnumIter, IntoStaticStr}; -use crate::server::shared::types::metadata::{EntityMetadataProvider, HasId, TypeMetadataProvider}; - #[derive( - Debug, Clone, Copy, Serialize, Deserialize, Display, IntoStaticStr, EnumIter, EnumDiscriminants, + Debug, + Clone, + Copy, + Serialize, + Deserialize, + Display, + IntoStaticStr, + EnumIter, + EnumDiscriminants, + Eq, )] #[serde(tag = "type")] pub enum BillingPlan { @@ -22,6 +31,13 @@ impl PartialEq for BillingPlan { } } +impl Hash for BillingPlan { + fn hash(&self, state: &mut H) { + self.price().hash(state); + self.trial_days().hash(state); + } +} + impl Default for BillingPlan { fn default() -> Self { BillingPlan::Community { @@ -34,12 +50,19 @@ impl Default for BillingPlan { } } -#[derive(Debug, Clone, Serialize, Deserialize, Default, Copy)] +#[derive(Debug, Clone, Serialize, Deserialize, Default, Copy, Eq)] pub struct Price { pub cents: i64, pub rate: BillingRate, } +impl Hash for Price { + fn hash(&self, state: &mut H) { + self.cents.hash(state); + self.rate.hash(state); + } +} + impl PartialEq for Price { fn eq(&self, other: &Self) -> bool { self.cents == other.cents && self.rate == other.rate @@ -61,7 +84,7 @@ impl Display for Price { } } -#[derive(Debug, Clone, Serialize, Deserialize, Display, Default, Copy, PartialEq)] +#[derive(Debug, Clone, Serialize, Deserialize, Display, Default, Copy, PartialEq, Eq, Hash)] pub enum BillingRate { #[default] Month, diff --git a/backend/src/server/daemons/handlers.rs b/backend/src/server/daemons/handlers.rs index 72e814be..f25f5dd3 100644 --- a/backend/src/server/daemons/handlers.rs +++ b/backend/src/server/daemons/handlers.rs @@ -1,5 +1,5 @@ use crate::server::{ - auth::middleware::AuthenticatedDaemon, + auth::middleware::{AuthenticatedDaemon, AuthenticatedEntity}, config::AppState, daemons::r#impl::{ api::{ @@ -15,7 +15,8 @@ use crate::server::{ hosts::r#impl::base::{Host, HostBase}, shared::{ handlers::traits::{ - create_handler, delete_handler, get_all_handler, get_by_id_handler, update_handler, + bulk_delete_handler, create_handler, delete_handler, get_all_handler, + get_by_id_handler, update_handler, }, services::traits::CrudService, storage::traits::StorableEntity, @@ -39,6 +40,7 @@ pub fn create_router() -> Router> { .route("/{id}", put(update_handler::)) .route("/{id}", delete(delete_handler::)) .route("/{id}", get(get_by_id_handler::)) + .route("/bulk-delete", post(bulk_delete_handler::)) .route("/register", post(register_daemon)) .route("/{id}/heartbeat", post(receive_heartbeat)) .route("/{id}/update-capabilities", post(update_capabilities)) @@ -50,7 +52,7 @@ const DAILY_MIDNIGHT_CRON: &str = "0 0 0 * * *"; /// Register a new daemon async fn register_daemon( State(state): State>, - _daemon: AuthenticatedDaemon, + auth_daemon: AuthenticatedDaemon, Json(request): Json, ) -> ApiResult>> { let service = &state.services.daemon_service; @@ -63,7 +65,7 @@ async fn register_daemon( let (host, _) = state .services .host_service - .create_host_with_services(dummy_host, Vec::new()) + .create_host_with_services(dummy_host, Vec::new(), auth_daemon.clone().into()) .await?; let mut daemon = Daemon::new(DaemonBase { @@ -79,69 +81,82 @@ async fn register_daemon( daemon.id = request.daemon_id; let registered_daemon = service - .create(daemon) + .create(daemon, auth_daemon.into()) .await .map_err(|e| ApiError::internal_error(&format!("Failed to register daemon: {}", e)))?; let discovery_service = state.services.discovery_service.clone(); let self_report_discovery = discovery_service - .create_discovery(Discovery::new(DiscoveryBase { - run_type: RunType::Scheduled { - cron_schedule: DAILY_MIDNIGHT_CRON.to_string(), - last_run: None, - enabled: true, - }, - discovery_type: DiscoveryType::SelfReport { host_id: host.id }, - name: format!("Self Report @ {}", request.daemon_ip), - daemon_id: request.daemon_id, - network_id: request.network_id, - })) + .create_discovery( + Discovery::new(DiscoveryBase { + run_type: RunType::Scheduled { + cron_schedule: DAILY_MIDNIGHT_CRON.to_string(), + last_run: None, + enabled: true, + }, + discovery_type: DiscoveryType::SelfReport { host_id: host.id }, + name: format!("Self Report @ {}", request.daemon_ip), + daemon_id: request.daemon_id, + network_id: request.network_id, + }), + AuthenticatedEntity::System, + ) .await?; discovery_service - .start_session(self_report_discovery) + .start_session(self_report_discovery, AuthenticatedEntity::System) .await?; if request.capabilities.has_docker_socket { let docker_discovery = discovery_service - .create_discovery(Discovery::new(DiscoveryBase { + .create_discovery( + Discovery::new(DiscoveryBase { + run_type: RunType::Scheduled { + cron_schedule: DAILY_MIDNIGHT_CRON.to_string(), + last_run: None, + enabled: true, + }, + discovery_type: DiscoveryType::Docker { + host_id: host.id, + host_naming_fallback: HostNamingFallback::BestService, + }, + name: format!("Docker @ {}", request.daemon_ip), + daemon_id: request.daemon_id, + network_id: request.network_id, + }), + AuthenticatedEntity::System, + ) + .await?; + + discovery_service + .start_session(docker_discovery, AuthenticatedEntity::System) + .await?; + } + + let network_discovery = discovery_service + .create_discovery( + Discovery::new(DiscoveryBase { run_type: RunType::Scheduled { cron_schedule: DAILY_MIDNIGHT_CRON.to_string(), last_run: None, enabled: true, }, - discovery_type: DiscoveryType::Docker { - host_id: host.id, + discovery_type: DiscoveryType::Network { + subnet_ids: None, host_naming_fallback: HostNamingFallback::BestService, }, - name: format!("Docker @ {}", request.daemon_ip), + name: format!("Network Scan @ {}", request.daemon_ip), daemon_id: request.daemon_id, network_id: request.network_id, - })) - .await?; - - discovery_service.start_session(docker_discovery).await?; - } - - let network_discovery = discovery_service - .create_discovery(Discovery::new(DiscoveryBase { - run_type: RunType::Scheduled { - cron_schedule: DAILY_MIDNIGHT_CRON.to_string(), - last_run: None, - enabled: true, - }, - discovery_type: DiscoveryType::Network { - subnet_ids: None, - host_naming_fallback: HostNamingFallback::BestService, - }, - name: format!("Network Scan @ {}", request.daemon_ip), - daemon_id: request.daemon_id, - network_id: request.network_id, - })) + }), + AuthenticatedEntity::System, + ) .await?; - discovery_service.start_session(network_discovery).await?; + discovery_service + .start_session(network_discovery, AuthenticatedEntity::System) + .await?; Ok(Json(ApiResponse::success(DaemonRegistrationResponse { daemon: registered_daemon, @@ -151,7 +166,7 @@ async fn register_daemon( async fn update_capabilities( State(state): State>, - _daemon: AuthenticatedDaemon, + auth_daemon: AuthenticatedDaemon, Path(id): Path, Json(updated_capabilities): Json, ) -> ApiResult>> { @@ -170,7 +185,7 @@ async fn update_capabilities( daemon.base.capabilities = updated_capabilities; - service.update(&mut daemon).await?; + service.update(&mut daemon, auth_daemon.into()).await?; Ok(Json(ApiResponse::success(()))) } @@ -178,7 +193,7 @@ async fn update_capabilities( /// Receive heartbeat from daemon async fn receive_heartbeat( State(state): State>, - _daemon: AuthenticatedDaemon, + auth_daemon: AuthenticatedDaemon, Path(id): Path, ) -> ApiResult>> { let service = &state.services.daemon_service; @@ -192,7 +207,7 @@ async fn receive_heartbeat( daemon.base.last_seen = Utc::now(); service - .update(&mut daemon) + .update(&mut daemon, auth_daemon.into()) .await .map_err(|e| ApiError::internal_error(&format!("Failed to update heartbeat: {}", e)))?; @@ -201,7 +216,7 @@ async fn receive_heartbeat( async fn receive_work_request( State(state): State>, - _daemon: AuthenticatedDaemon, + auth_daemon: AuthenticatedDaemon, Path(id): Path, Json(daemon_id): Json, ) -> ApiResult, bool)>>> { @@ -216,7 +231,7 @@ async fn receive_work_request( daemon.base.last_seen = Utc::now(); service - .update(&mut daemon) + .update(&mut daemon, auth_daemon.clone().into()) .await .map_err(|e| ApiError::internal_error(&format!("Failed to update heartbeat: {}", e)))?; @@ -225,14 +240,23 @@ async fn receive_work_request( .discovery_service .get_sessions_for_daemon(&daemon_id) .await; - let cancel = state + let (cancel, session_id_to_cancel) = state .services .discovery_service .pull_cancellation_for_daemon(&daemon_id) .await; - Ok(Json(ApiResponse::success(( - sessions.first().cloned(), - cancel, - )))) + let next_session = sessions.first().cloned(); + + service + .receive_work_request( + daemon, + cancel, + session_id_to_cancel, + next_session.clone(), + auth_daemon.into(), + ) + .await?; + + Ok(Json(ApiResponse::success((next_session, cancel)))) } diff --git a/backend/src/server/daemons/impl/api.rs b/backend/src/server/daemons/impl/api.rs index c149ea1b..38dccec4 100644 --- a/backend/src/server/daemons/impl/api.rs +++ b/backend/src/server/daemons/impl/api.rs @@ -14,7 +14,7 @@ use serde::{Deserialize, Serialize}; use uuid::Uuid; /// Daemon registration request from daemon to server -#[derive(Debug, Clone, Serialize, Deserialize, Default)] +#[derive(Debug, Clone, Serialize, Deserialize, Default, PartialEq, Eq, Hash)] pub struct DaemonCapabilities { #[serde(default)] pub has_docker_socket: bool, @@ -73,7 +73,7 @@ pub struct DaemonDiscoveryResponse { } /// Progress update from daemon to server during discovery -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)] pub struct DiscoveryUpdatePayload { pub session_id: Uuid, pub daemon_id: Uuid, diff --git a/backend/src/server/daemons/impl/base.rs b/backend/src/server/daemons/impl/base.rs index 72a1374e..507032c3 100644 --- a/backend/src/server/daemons/impl/base.rs +++ b/backend/src/server/daemons/impl/base.rs @@ -6,9 +6,11 @@ use serde::{Deserialize, Serialize}; use strum::Display; use uuid::Uuid; -use crate::server::daemons::r#impl::api::DaemonCapabilities; +use crate::server::{ + daemons::r#impl::api::DaemonCapabilities, shared::entities::ChangeTriggersTopologyStaleness, +}; -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)] pub struct DaemonBase { pub host_id: Uuid, pub network_id: Uuid, @@ -20,7 +22,7 @@ pub struct DaemonBase { pub mode: DaemonMode, } -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)] pub struct Daemon { pub id: Uuid, pub updated_at: DateTime, @@ -36,10 +38,16 @@ impl Display for Daemon { } #[derive( - Debug, Display, Copy, Clone, Serialize, Deserialize, Default, PartialEq, Eq, ValueEnum, + Debug, Display, Copy, Clone, Serialize, Deserialize, Default, PartialEq, Eq, ValueEnum, Hash, )] pub enum DaemonMode { #[default] Push, Pull, } + +impl ChangeTriggersTopologyStaleness for Daemon { + fn triggers_staleness(&self, _other: Option) -> bool { + false + } +} diff --git a/backend/src/server/daemons/service.rs b/backend/src/server/daemons/service.rs index ae38b304..e51ed59c 100644 --- a/backend/src/server/daemons/service.rs +++ b/backend/src/server/daemons/service.rs @@ -1,26 +1,47 @@ use crate::{ daemon::runtime::types::InitializeDaemonRequest, server::{ + auth::middleware::AuthenticatedEntity, daemons::r#impl::{ - api::{DaemonDiscoveryRequest, DaemonDiscoveryResponse}, + api::{DaemonDiscoveryRequest, DaemonDiscoveryResponse, DiscoveryUpdatePayload}, base::Daemon, }, hosts::r#impl::ports::PortBase, services::r#impl::endpoints::{ApplicationProtocol, Endpoint}, shared::{ - services::traits::CrudService, storage::generic::GenericPostgresStorage, + events::{ + bus::EventBus, + types::{EntityEvent, EntityOperation}, + }, + services::traits::{CrudService, EventBusService}, + storage::generic::GenericPostgresStorage, types::api::ApiResponse, }, }, }; use anyhow::{Error, Result}; use async_trait::async_trait; +use chrono::Utc; use std::sync::Arc; use uuid::Uuid; pub struct DaemonService { daemon_storage: Arc>, client: reqwest::Client, + event_bus: Arc, +} + +impl EventBusService for DaemonService { + fn event_bus(&self) -> &Arc { + &self.event_bus + } + + fn get_network_id(&self, entity: &Daemon) -> Option { + Some(entity.base.network_id) + } + fn get_organization_id(&self, _entity: &Daemon) -> Option { + None + } } #[async_trait] @@ -31,10 +52,14 @@ impl CrudService for DaemonService { } impl DaemonService { - pub fn new(daemon_storage: Arc>) -> Self { + pub fn new( + daemon_storage: Arc>, + event_bus: Arc, + ) -> Self { Self { daemon_storage, client: reqwest::Client::new(), + event_bus, } } @@ -43,6 +68,7 @@ impl DaemonService { &self, daemon_id: &Uuid, request: DaemonDiscoveryRequest, + authentication: AuthenticatedEntity, ) -> Result<(), Error> { let daemon = self .get_by_id(daemon_id) @@ -80,18 +106,32 @@ impl DaemonService { ); } - tracing::info!( - "Discovery request sent to daemon {} for session {}", - daemon.id, - request.session_id - ); + let daemon_ref = &daemon; + + self.event_bus() + .publish_entity(EntityEvent { + id: Uuid::new_v4(), + entity_id: *daemon_id, + network_id: self.get_network_id(daemon_ref), + organization_id: self.get_organization_id(daemon_ref), + entity_type: daemon.into(), + operation: EntityOperation::DiscoveryStarted, + timestamp: Utc::now(), + metadata: serde_json::json!({ + "session_id": request.session_id + }), + authentication, + }) + .await?; + Ok(()) } pub async fn send_discovery_cancellation( &self, - daemon: &Daemon, + daemon: Daemon, session_id: Uuid, + authentication: AuthenticatedEntity, ) -> Result<(), anyhow::Error> { let endpoint = Endpoint { ip: Some(daemon.base.ip), @@ -115,6 +155,69 @@ impl DaemonService { ); } + self.event_bus() + .publish_entity(EntityEvent { + id: Uuid::new_v4(), + entity_id: daemon.id, + network_id: self.get_network_id(&daemon), + organization_id: self.get_organization_id(&daemon), + entity_type: daemon.into(), + operation: EntityOperation::DiscoveryCancelled, + timestamp: Utc::now(), + metadata: serde_json::json!({ + "session_id": session_id + }), + authentication, + }) + .await?; + + Ok(()) + } + + pub async fn receive_work_request( + &self, + daemon: Daemon, + cancel: bool, + cancellation_session_id: Uuid, + next_session: Option, + authentication: AuthenticatedEntity, + ) -> Result<(), Error> { + if cancel { + self.event_bus() + .publish_entity(EntityEvent { + id: Uuid::new_v4(), + entity_id: daemon.id, + network_id: self.get_network_id(&daemon), + organization_id: self.get_organization_id(&daemon), + entity_type: daemon.clone().into(), + operation: EntityOperation::DiscoveryCancelled, + timestamp: Utc::now(), + metadata: serde_json::json!({ + "session_id": cancellation_session_id + }), + authentication: authentication.clone(), + }) + .await?; + } + + if let Some(session) = next_session { + self.event_bus() + .publish_entity(EntityEvent { + id: Uuid::new_v4(), + entity_id: daemon.id, + network_id: self.get_network_id(&daemon), + organization_id: self.get_organization_id(&daemon), + entity_type: daemon.into(), + operation: EntityOperation::DiscoveryStarted, + timestamp: Utc::now(), + metadata: serde_json::json!({ + "session_id": session.session_id + }), + authentication, + }) + .await?; + } + Ok(()) } diff --git a/backend/src/server/discovery/handlers.rs b/backend/src/server/discovery/handlers.rs index 7e170de5..5a7eb17f 100644 --- a/backend/src/server/discovery/handlers.rs +++ b/backend/src/server/discovery/handlers.rs @@ -5,7 +5,8 @@ use crate::server::{ discovery::r#impl::{base::Discovery, types::RunType}, shared::{ handlers::traits::{ - create_handler, delete_handler, get_all_handler, get_by_id_handler, update_handler, + bulk_delete_handler, create_handler, delete_handler, get_all_handler, + get_by_id_handler, update_handler, }, services::traits::CrudService, types::api::{ApiError, ApiResponse, ApiResult}, @@ -32,6 +33,7 @@ pub fn create_router() -> Router> { .route("/", get(get_all_handler::)) .route("/{id}", put(update_handler::)) .route("/{id}", delete(delete_handler::)) + .route("/bulk-delete", post(bulk_delete_handler::)) .route("/{id}", get(get_by_id_handler::)) .route("/start-session", post(start_session)) .route("/active-sessions", get(get_active_sessions)) @@ -59,7 +61,7 @@ async fn receive_discovery_update( /// Endpoint to start a discovery session async fn start_session( State(state): State>, - RequireMember(_user): RequireMember, + RequireMember(user): RequireMember, Json(discovery_id): Json, ) -> ApiResult>> { let mut discovery = state @@ -85,13 +87,13 @@ async fn start_session( let update = state .services .discovery_service - .start_session(discovery.clone()) + .start_session(discovery.clone(), user.clone().into()) .await?; state .services .discovery_service - .update_discovery(discovery) + .update_discovery(discovery, user.into()) .await?; Ok(Json(ApiResponse::success(update))) @@ -139,13 +141,13 @@ async fn get_active_sessions( /// Cancel an active discovery session async fn cancel_discovery( State(state): State>, - RequireMember(_user): RequireMember, + RequireMember(user): RequireMember, Path(session_id): Path, ) -> ApiResult>> { state .services .discovery_service - .cancel_session(session_id) + .cancel_session(session_id, user.into()) .await?; tracing::info!("Discovery session was {} cancelled", session_id); diff --git a/backend/src/server/discovery/impl/base.rs b/backend/src/server/discovery/impl/base.rs index 782e16cf..c540210e 100644 --- a/backend/src/server/discovery/impl/base.rs +++ b/backend/src/server/discovery/impl/base.rs @@ -4,9 +4,12 @@ use chrono::{DateTime, Utc}; use serde::{Deserialize, Serialize}; use uuid::Uuid; -use crate::server::discovery::r#impl::types::{DiscoveryType, RunType}; +use crate::server::{ + discovery::r#impl::types::{DiscoveryType, RunType}, + shared::entities::ChangeTriggersTopologyStaleness, +}; -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, Hash, PartialEq, Eq)] pub struct DiscoveryBase { pub discovery_type: DiscoveryType, pub run_type: RunType, @@ -15,7 +18,7 @@ pub struct DiscoveryBase { pub network_id: Uuid, } -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)] pub struct Discovery { pub id: Uuid, pub created_at: DateTime, @@ -41,3 +44,9 @@ impl Display for Discovery { write!(f, "Discovery {}: {}", self.base.name, self.id) } } + +impl ChangeTriggersTopologyStaleness for Discovery { + fn triggers_staleness(&self, _other: Option) -> bool { + false + } +} diff --git a/backend/src/server/discovery/impl/types.rs b/backend/src/server/discovery/impl/types.rs index 342e5fb5..bb3d38e1 100644 --- a/backend/src/server/discovery/impl/types.rs +++ b/backend/src/server/discovery/impl/types.rs @@ -4,12 +4,10 @@ use serde::Serialize; use strum::{Display, EnumDiscriminants, EnumIter, IntoStaticStr}; use uuid::Uuid; +use crate::server::shared::entities::EntityDiscriminants; use crate::server::{ daemons::r#impl::api::DiscoveryUpdatePayload, - shared::{ - entities::Entity, - types::metadata::{EntityMetadataProvider, HasId, TypeMetadataProvider}, - }, + shared::types::metadata::{EntityMetadataProvider, HasId, TypeMetadataProvider}, }; #[derive( @@ -50,7 +48,7 @@ pub enum HostNamingFallback { BestService, } -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)] #[serde(tag = "type")] pub enum RunType { Scheduled { @@ -74,11 +72,11 @@ impl HasId for DiscoveryType { impl EntityMetadataProvider for DiscoveryType { fn color(&self) -> &'static str { - Entity::Discovery.color() + EntityDiscriminants::Discovery.color() } fn icon(&self) -> &'static str { - Entity::Discovery.icon() + EntityDiscriminants::Discovery.icon() } } diff --git a/backend/src/server/discovery/service.rs b/backend/src/server/discovery/service.rs index 9424b7e8..3703d5ab 100644 --- a/backend/src/server/discovery/service.rs +++ b/backend/src/server/discovery/service.rs @@ -1,6 +1,10 @@ +use crate::server::auth::middleware::AuthenticatedEntity; use crate::server::daemons::r#impl::base::DaemonMode; use crate::server::discovery::r#impl::types::RunType; -use crate::server::shared::services::traits::CrudService; +use crate::server::shared::entities::ChangeTriggersTopologyStaleness; +use crate::server::shared::events::bus::EventBus; +use crate::server::shared::events::types::{EntityEvent, EntityOperation}; +use crate::server::shared::services::traits::{CrudService, EventBusService}; use crate::server::shared::storage::filter::EntityFilter; use crate::server::shared::storage::generic::GenericPostgresStorage; use crate::server::shared::storage::traits::{StorableEntity, Storage}; @@ -28,9 +32,23 @@ pub struct DiscoveryService { daemon_service: Arc, sessions: RwLock>, // session_id -> session state mapping daemon_sessions: RwLock>>, // daemon_id -> session_id mapping - daemon_pull_cancellations: RwLock>, // daemon_id -> boolean mapping for pull mode cancellations of current session on daemon + daemon_pull_cancellations: RwLock>, // daemon_id -> (boolean, session_id) mapping for pull mode cancellations of current session on daemon update_tx: broadcast::Sender, scheduler: Option>>, + event_bus: Arc, +} + +impl EventBusService for DiscoveryService { + fn event_bus(&self) -> &Arc { + &self.event_bus + } + + fn get_network_id(&self, entity: &Discovery) -> Option { + Some(entity.base.network_id) + } + fn get_organization_id(&self, _entity: &Discovery) -> Option { + None + } } #[async_trait] @@ -44,6 +62,7 @@ impl DiscoveryService { pub async fn new( discovery_storage: Arc>, daemon_service: Arc, + event_bus: Arc, ) -> Result> { let (tx, _rx) = broadcast::channel(100); // Buffer 100 messages let scheduler = JobScheduler::new().await?; @@ -56,11 +75,59 @@ impl DiscoveryService { daemon_pull_cancellations: RwLock::new(HashMap::new()), update_tx: tx, scheduler: Some(Arc::new(RwLock::new(scheduler))), + event_bus, })) } + /// Expose stream to handler + pub fn subscribe(&self) -> broadcast::Receiver { + self.update_tx.subscribe() + } + + /// Get session state + pub async fn get_session(&self, session_id: &Uuid) -> Option { + self.sessions.read().await.get(session_id).cloned() + } + + /// Get session state + pub async fn get_all_sessions(&self, network_ids: &[Uuid]) -> Vec { + let all_sessions = self.sessions.read().await; + all_sessions + .values() + .filter(|v| network_ids.contains(&v.network_id)) + .cloned() + .collect() + } + + pub async fn get_sessions_for_daemon(&self, daemon_id: &Uuid) -> Vec { + let daemon_session_ids = self.daemon_sessions.read().await; + let session_ids = daemon_session_ids + .get(daemon_id) + .cloned() + .unwrap_or_default(); + + let all_sessions = self.sessions.read().await; + + all_sessions + .iter() + .filter(|(session_id, _)| session_ids.contains(session_id)) + .map(|(_, session)| session.clone()) + .collect() + } + + pub async fn pull_cancellation_for_daemon(&self, daemon_id: &Uuid) -> (bool, Uuid) { + let mut daemon_cancellation_ids = self.daemon_pull_cancellations.write().await; + daemon_cancellation_ids + .remove(daemon_id) + .unwrap_or((false, Uuid::nil())) + } + /// Create a new scheduled discovery - pub async fn create_discovery(self: &Arc, discovery: Discovery) -> Result { + pub async fn create_discovery( + self: &Arc, + discovery: Discovery, + authentication: AuthenticatedEntity, + ) -> Result { let mut created_discovery = if discovery.id == Uuid::nil() { self.discovery_storage .create(&Discovery::new(discovery.base)) @@ -89,11 +156,24 @@ impl DiscoveryService { return Ok(disabled_discovery); } - tracing::info!( - "Created discovery {}: {}", - created_discovery.base.name, - created_discovery.id - ); + let trigger_stale = created_discovery.triggers_staleness(None); + + self.event_bus() + .publish_entity(EntityEvent { + id: Uuid::new_v4(), + entity_id: created_discovery.id(), + network_id: self.get_network_id(&created_discovery), + organization_id: self.get_organization_id(&created_discovery), + entity_type: created_discovery.clone().into(), + operation: EntityOperation::Created, + timestamp: Utc::now(), + metadata: serde_json::json!({ + "trigger_stale": trigger_stale + }), + authentication, + }) + .await?; + Ok(created_discovery) } @@ -101,11 +181,15 @@ impl DiscoveryService { pub async fn update_discovery( self: &Arc, mut discovery: Discovery, + authentication: AuthenticatedEntity, ) -> Result { - discovery.updated_at = Utc::now(); + let current = self + .get_by_id(&discovery.id) + .await? + .ok_or_else(|| anyhow::anyhow!("Could not find discovery {}", discovery))?; // If it's a scheduled discovery, need to reschedule - if matches!(discovery.base.run_type, RunType::Scheduled { .. }) { + let updated = if matches!(discovery.base.run_type, RunType::Scheduled { .. }) { // Remove old schedule first if let Some(scheduler) = &self.scheduler { let _ = scheduler.write().await.remove(&discovery.id).await; @@ -125,26 +209,43 @@ impl DiscoveryService { disabled_discovery.id, e ); - - return Ok(disabled_discovery); } - tracing::info!( - "Updated and rescheduled discovery {}: {}", - updated.base.name, - updated.id - ); - Ok(updated) + updated } else { // For non-scheduled, just update let updated = self.discovery_storage.update(&mut discovery).await?; tracing::info!("Updated discovery {}: {}", updated.base.name, updated.id); - Ok(updated) - } + updated + }; + + let trigger_stale = updated.triggers_staleness(Some(current)); + + self.event_bus() + .publish_entity(EntityEvent { + id: Uuid::new_v4(), + entity_id: updated.id(), + network_id: self.get_network_id(&updated), + organization_id: self.get_organization_id(&updated), + entity_type: updated.clone().into(), + operation: EntityOperation::Updated, + timestamp: Utc::now(), + metadata: serde_json::json!({ + "trigger_stale": trigger_stale + }), + authentication, + }) + .await?; + + Ok(updated) } /// Delete group - pub async fn delete_discovery(self: &Arc, id: &Uuid) -> Result<(), Error> { + pub async fn delete_discovery( + self: &Arc, + id: &Uuid, + authentication: AuthenticatedEntity, + ) -> Result<(), Error> { let discovery = self .get_by_id(id) .await? @@ -159,11 +260,24 @@ impl DiscoveryService { } self.discovery_storage.delete(id).await?; - tracing::info!( - "Deleted discovery {}: {}", - discovery.base.name, - discovery.id - ); + + let trigger_stale = discovery.triggers_staleness(None); + + self.event_bus() + .publish_entity(EntityEvent { + id: Uuid::new_v4(), + entity_id: discovery.id(), + network_id: self.get_network_id(&discovery), + organization_id: self.get_organization_id(&discovery), + entity_type: discovery.into(), + operation: EntityOperation::Deleted, + timestamp: Utc::now(), + metadata: serde_json::json!({ + "trigger_stale": trigger_stale + }), + authentication, + }) + .await?; Ok(()) } @@ -254,7 +368,10 @@ impl DiscoveryService { Box::pin(async move { tracing::info!("Running scheduled discovery {}", &discovery.id); - match service.start_session(discovery.clone()).await { + match service + .start_session(discovery.clone(), AuthenticatedEntity::System) + .await + { Ok(_) => { // Update last_run if let RunType::Scheduled { @@ -285,51 +402,11 @@ impl DiscoveryService { Ok(job_id) } - /// Expose stream to handler - pub fn subscribe(&self) -> broadcast::Receiver { - self.update_tx.subscribe() - } - - /// Get session state - pub async fn get_session(&self, session_id: &Uuid) -> Option { - self.sessions.read().await.get(session_id).cloned() - } - - /// Get session state - pub async fn get_all_sessions(&self, network_ids: &[Uuid]) -> Vec { - let all_sessions = self.sessions.read().await; - all_sessions - .values() - .filter(|v| network_ids.contains(&v.network_id)) - .cloned() - .collect() - } - - pub async fn get_sessions_for_daemon(&self, daemon_id: &Uuid) -> Vec { - let daemon_session_ids = self.daemon_sessions.read().await; - let session_ids = daemon_session_ids - .get(daemon_id) - .cloned() - .unwrap_or_default(); - - let all_sessions = self.sessions.read().await; - - all_sessions - .iter() - .filter(|(session_id, _)| session_ids.contains(session_id)) - .map(|(_, session)| session.clone()) - .collect() - } - - pub async fn pull_cancellation_for_daemon(&self, daemon_id: &Uuid) -> bool { - let mut daemon_cancellation_ids = self.daemon_pull_cancellations.write().await; - daemon_cancellation_ids.remove(daemon_id).unwrap_or(false) - } - /// Create a new discovery session pub async fn start_session( &self, discovery: Discovery, + authentication: AuthenticatedEntity, ) -> Result { let session_id = Uuid::new_v4(); @@ -382,17 +459,13 @@ impl DiscoveryService { discovery_type: discovery.base.discovery_type, session_id, }, + authentication, ) .await?; } let _ = self.update_tx.send(session_payload.clone()); - tracing::info!( - "Created discovery session {} for daemon {}", - session_id, - discovery.base.daemon_id - ); Ok(session_payload) } @@ -408,11 +481,11 @@ impl DiscoveryService { let daemon_id = session.daemon_id; tracing::debug!( - "Updated session {}: {} ({}/{})", - update.session_id, - update.phase, - update.processed, - update.total_to_process + session_id = %update.session_id, + phase = %update.phase, + processed = %update.processed, + total_to_process = %update.total_to_process, + "Updated session", ); let _ = self.update_tx.send(update.clone()); @@ -452,11 +525,21 @@ impl DiscoveryService { e ); } else { - tracing::debug!( - "Created historical discovery record {} for session {}", - historical_discovery.id, - session.session_id - ); + self.event_bus() + .publish_entity(EntityEvent { + id: Uuid::new_v4(), + entity_id: historical_discovery.id(), + network_id: self.get_network_id(&historical_discovery), + organization_id: self.get_organization_id(&historical_discovery), + entity_type: historical_discovery.into(), + operation: EntityOperation::Created, + timestamp: Utc::now(), + metadata: serde_json::json!({ + "type": "historical" + }), + authentication: AuthenticatedEntity::System, + }) + .await?; } // Get next session info BEFORE trying to send request @@ -507,6 +590,7 @@ impl DiscoveryService { discovery_type, session_id, }, + AuthenticatedEntity::System, ) .await?; } @@ -515,7 +599,11 @@ impl DiscoveryService { Ok(()) } - pub async fn cancel_session(&self, session_id: Uuid) -> Result<(), Error> { + pub async fn cancel_session( + &self, + session_id: Uuid, + authentication: AuthenticatedEntity, + ) -> Result<(), Error> { // Get the session let session = match self.get_session(&session_id).await { Some(session) => session, @@ -576,7 +664,7 @@ impl DiscoveryService { match daemon.base.mode { DaemonMode::Push => { self.daemon_service - .send_discovery_cancellation(&daemon, session_id) + .send_discovery_cancellation(daemon, session_id, authentication) .await .map_err(|e| { anyhow!( @@ -600,7 +688,7 @@ impl DiscoveryService { .write() .await .entry(daemon_id) - .insert_entry(true); + .insert_entry((true, session_id)); tracing::info!( "Marked session {} for cancellation on next pull by daemon {}", diff --git a/backend/src/server/groups/handlers.rs b/backend/src/server/groups/handlers.rs index 65cffc2f..99670429 100644 --- a/backend/src/server/groups/handlers.rs +++ b/backend/src/server/groups/handlers.rs @@ -4,7 +4,8 @@ use axum::routing::{delete, get, post, put}; use crate::server::config::AppState; use crate::server::groups::r#impl::base::Group; use crate::server::shared::handlers::traits::{ - create_handler, delete_handler, get_all_handler, get_by_id_handler, update_handler, + bulk_delete_handler, create_handler, delete_handler, get_all_handler, get_by_id_handler, + update_handler, }; use std::sync::Arc; @@ -15,4 +16,5 @@ pub fn create_router() -> Router> { .route("/{id}", put(update_handler::)) .route("/{id}", delete(delete_handler::)) .route("/{id}", get(get_by_id_handler::)) + .route("/bulk-delete", post(bulk_delete_handler::)) } diff --git a/backend/src/server/groups/impl/base.rs b/backend/src/server/groups/impl/base.rs index 79996a85..ae106f9e 100644 --- a/backend/src/server/groups/impl/base.rs +++ b/backend/src/server/groups/impl/base.rs @@ -1,5 +1,6 @@ use std::fmt::Display; +use crate::server::shared::entities::ChangeTriggersTopologyStaleness; use crate::server::shared::types::entities::EntitySource; use crate::server::topology::types::edges::EdgeStyle; use crate::server::{ @@ -10,7 +11,7 @@ use serde::{Deserialize, Serialize}; use uuid::Uuid; use validator::Validate; -#[derive(Debug, Clone, Serialize, Validate, Deserialize)] +#[derive(Debug, Clone, Serialize, Validate, Deserialize, PartialEq, Eq, Hash)] pub struct GroupBase { #[validate(length(min = 0, max = 100))] pub name: String, @@ -26,7 +27,7 @@ pub struct GroupBase { pub edge_style: EdgeStyle, } -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)] pub struct Group { pub id: Uuid, pub created_at: DateTime, @@ -40,3 +41,22 @@ impl Display for Group { write!(f, "Group {}: {}", self.base.name, self.id) } } + +impl Group { + pub fn bindings(&self) -> Vec { + match &self.base.group_type { + GroupType::HubAndSpoke { service_bindings } => service_bindings.to_vec(), + GroupType::RequestPath { service_bindings } => service_bindings.to_vec(), + } + } +} + +impl ChangeTriggersTopologyStaleness for Group { + fn triggers_staleness(&self, other: Option) -> bool { + if let Some(other_group) = other { + self.bindings() != other_group.bindings() + } else { + true + } + } +} diff --git a/backend/src/server/groups/impl/types.rs b/backend/src/server/groups/impl/types.rs index f210b83b..e5617f2d 100644 --- a/backend/src/server/groups/impl/types.rs +++ b/backend/src/server/groups/impl/types.rs @@ -1,4 +1,4 @@ -use crate::server::shared::entities::Entity; +use crate::server::shared::entities::EntityDiscriminants; use crate::server::shared::types::metadata::{EntityMetadataProvider, HasId, TypeMetadataProvider}; use serde::{Deserialize, Serialize}; use strum_macros::{EnumDiscriminants, EnumIter, IntoStaticStr}; @@ -37,8 +37,8 @@ impl HasId for GroupTypeDiscriminants { impl EntityMetadataProvider for GroupTypeDiscriminants { fn color(&self) -> &'static str { match self { - GroupTypeDiscriminants::RequestPath => Entity::Group.color(), - GroupTypeDiscriminants::HubAndSpoke => Entity::Group.color(), + GroupTypeDiscriminants::RequestPath => EntityDiscriminants::Group.color(), + GroupTypeDiscriminants::HubAndSpoke => EntityDiscriminants::Group.color(), } } diff --git a/backend/src/server/groups/service.rs b/backend/src/server/groups/service.rs index dee4c31d..e2f4d532 100644 --- a/backend/src/server/groups/service.rs +++ b/backend/src/server/groups/service.rs @@ -1,13 +1,41 @@ use async_trait::async_trait; +use chrono::Utc; use std::sync::Arc; +use uuid::Uuid; use crate::server::{ + auth::middleware::AuthenticatedEntity, groups::r#impl::base::Group, - shared::{services::traits::CrudService, storage::generic::GenericPostgresStorage}, + shared::{ + entities::ChangeTriggersTopologyStaleness, + events::{ + bus::EventBus, + types::{EntityEvent, EntityOperation}, + }, + services::traits::{CrudService, EventBusService}, + storage::{ + generic::GenericPostgresStorage, + traits::{StorableEntity, Storage}, + }, + }, }; pub struct GroupService { group_storage: Arc>, + event_bus: Arc, +} + +impl EventBusService for GroupService { + fn event_bus(&self) -> &Arc { + &self.event_bus + } + + fn get_network_id(&self, entity: &Group) -> Option { + Some(entity.base.network_id) + } + fn get_organization_id(&self, _entity: &Group) -> Option { + None + } } #[async_trait] @@ -15,10 +43,48 @@ impl CrudService for GroupService { fn storage(&self) -> &Arc> { &self.group_storage } + + async fn update( + &self, + updates: &mut Group, + authentication: AuthenticatedEntity, + ) -> Result { + let current = self + .get_by_id(&updates.id) + .await? + .ok_or_else(|| anyhow::anyhow!("Could not find group to update"))?; + + let updated = self.storage().update(updates).await?; + let trigger_stale = updated.triggers_staleness(Some(current)); + + self.event_bus() + .publish_entity(EntityEvent { + id: Uuid::new_v4(), + entity_id: updated.id(), + network_id: self.get_network_id(&updated), + organization_id: self.get_organization_id(&updated), + entity_type: updated.clone().into(), + operation: EntityOperation::Updated, + timestamp: Utc::now(), + metadata: serde_json::json!({ + "trigger_stale": trigger_stale + }), + authentication, + }) + .await?; + + Ok(updated) + } } impl GroupService { - pub fn new(group_storage: Arc>) -> Self { - Self { group_storage } + pub fn new( + group_storage: Arc>, + event_bus: Arc, + ) -> Self { + Self { + group_storage, + event_bus, + } } } diff --git a/backend/src/server/hosts/handlers.rs b/backend/src/server/hosts/handlers.rs index ee606c75..cae682ec 100644 --- a/backend/src/server/hosts/handlers.rs +++ b/backend/src/server/hosts/handlers.rs @@ -1,5 +1,7 @@ use crate::server::auth::middleware::{MemberOrDaemon, RequireMember}; -use crate::server::shared::handlers::traits::{CrudHandlers, get_all_handler, get_by_id_handler}; +use crate::server::shared::handlers::traits::{ + CrudHandlers, bulk_delete_handler, get_all_handler, get_by_id_handler, +}; use crate::server::shared::services::traits::CrudService; use crate::server::shared::storage::filter::EntityFilter; use crate::server::shared::storage::traits::StorableEntity; @@ -17,7 +19,6 @@ use axum::{ routing::{post, put}, }; use futures::future::try_join_all; -use itertools::{Either, Itertools}; use std::sync::Arc; use uuid::Uuid; use validator::Validate; @@ -29,6 +30,7 @@ pub fn create_router() -> Router> { .route("/{id}", get(get_by_id_handler::)) .route("/", post(create_host)) .route("/{id}", put(update_host)) + .route("/bulk-delete", post(bulk_delete_handler::)) .route( "/{destination_host}/consolidate/{other_host}", put(consolidate_hosts), @@ -37,7 +39,7 @@ pub fn create_router() -> Router> { async fn create_host( State(state): State>, - MemberOrDaemon { .. }: MemberOrDaemon, + MemberOrDaemon { entity, .. }: MemberOrDaemon, Json(request): Json, ) -> ApiResult>> { let host_service = &state.services.host_service; @@ -55,7 +57,7 @@ async fn create_host( } let (host, services) = host_service - .create_host_with_services(request.host, request.services.unwrap_or_default()) + .create_host_with_services(request.host, request.services.unwrap_or_default(), entity) .await?; Ok(Json(ApiResponse::success(HostWithServicesRequest { @@ -66,7 +68,7 @@ async fn create_host( async fn update_host( State(state): State>, - RequireMember(_user): RequireMember, + RequireMember(user): RequireMember, Json(mut request): Json, ) -> ApiResult>> { let host_service = &state.services.host_service; @@ -74,34 +76,40 @@ async fn update_host( // If services is None, don't update services if let Some(services) = request.services { - let (create_futures, update_futures): (Vec<_>, Vec<_>) = - services.into_iter().partition_map(|s| { - if s.id == Uuid::nil() { - let service = Service::new(s.base); - Either::Left(service_service.create_service(service)) - } else { - Either::Right(service_service.update_service(s)) - } - }); - + let mut created_service_ids = Vec::new(); + let mut updated_service_ids = Vec::new(); + let mut create_futures = Vec::new(); + + for mut s in services { + let user = user.clone(); + if s.id == Uuid::nil() { + let service = Service::new(s.base); + create_futures.push(service_service.create(service, user.into())); + } else { + // Execute updates sequentially + let updated = service_service.update(&mut s, user.into()).await?; + updated_service_ids.push(updated.id); + } + } + + // Execute creates concurrently let created_services = try_join_all(create_futures).await?; - let updated_services = try_join_all(update_futures).await?; + created_service_ids.extend(created_services.iter().map(|s| s.id)); - request.host.base.services = created_services - .iter() - .chain(updated_services.iter()) - .map(|s| s.id) + request.host.base.services = created_service_ids + .into_iter() + .chain(updated_service_ids) .collect(); } - let updated_host = host_service.update_host(request.host).await?; + let updated_host = host_service.update(&mut request.host, user.into()).await?; Ok(Json(ApiResponse::success(updated_host))) } async fn consolidate_hosts( State(state): State>, - RequireMember(_user): RequireMember, + RequireMember(user): RequireMember, Path((destination_host_id, other_host_id)): Path<(Uuid, Uuid)>, ) -> ApiResult>> { let host_service = &state.services.host_service; @@ -126,7 +134,7 @@ async fn consolidate_hosts( })?; let updated_host = host_service - .consolidate_hosts(destination_host, other_host) + .consolidate_hosts(destination_host, other_host, user.into()) .await?; Ok(Json(ApiResponse::success(updated_host))) @@ -134,7 +142,7 @@ async fn consolidate_hosts( pub async fn delete_handler( State(state): State>, - RequireMember(_user): RequireMember, + RequireMember(user): RequireMember, Path(id): Path, ) -> ApiResult>> { let service = Host::get_service(&state); @@ -156,7 +164,7 @@ pub async fn delete_handler( .ok_or_else(|| ApiError::not_found(format!("Host '{}' not found", id)))?; service - .delete(&id) + .delete(&id, user.into()) .await .map_err(|e| ApiError::internal_error(&e.to_string()))?; diff --git a/backend/src/server/hosts/impl/base.rs b/backend/src/server/hosts/impl/base.rs index 6bee7fc0..d3593b0d 100644 --- a/backend/src/server/hosts/impl/base.rs +++ b/backend/src/server/hosts/impl/base.rs @@ -1,4 +1,5 @@ use crate::server::hosts::r#impl::virtualization::HostVirtualization; +use crate::server::shared::entities::ChangeTriggersTopologyStaleness; use crate::server::shared::types::api::deserialize_empty_string_as_none; use crate::server::shared::types::entities::EntitySource; use crate::server::subnets::r#impl::base::Subnet; @@ -169,3 +170,17 @@ impl Host { self.base.services.push(service_id); } } + +impl ChangeTriggersTopologyStaleness for Host { + fn triggers_staleness(&self, other: Option) -> bool { + if let Some(other_host) = other { + self.base.services != other_host.base.services + || self.base.hostname != other_host.base.hostname + || self.base.interfaces != other_host.base.interfaces + || self.base.virtualization != other_host.base.virtualization + || self.base.hidden != other_host.base.hidden + } else { + true + } + } +} diff --git a/backend/src/server/hosts/impl/ports.rs b/backend/src/server/hosts/impl/ports.rs index d2e62838..ec950ddd 100644 --- a/backend/src/server/hosts/impl/ports.rs +++ b/backend/src/server/hosts/impl/ports.rs @@ -6,10 +6,8 @@ use strum_macros::{Display, EnumDiscriminants, EnumIter, IntoStaticStr}; use uuid::Uuid; use validator::Validate; -use crate::server::shared::{ - entities::Entity, - types::metadata::{EntityMetadataProvider, HasId, TypeMetadataProvider}, -}; +use crate::server::shared::entities::EntityDiscriminants; +use crate::server::shared::types::metadata::{EntityMetadataProvider, HasId, TypeMetadataProvider}; #[derive( Copy, @@ -472,10 +470,10 @@ impl HasId for PortBase { impl EntityMetadataProvider for PortBase { fn color(&self) -> &'static str { - Entity::Port.color() + EntityDiscriminants::Port.color() } fn icon(&self) -> &'static str { - Entity::Port.icon() + EntityDiscriminants::Port.icon() } } diff --git a/backend/src/server/hosts/impl/storage.rs b/backend/src/server/hosts/impl/storage.rs index 8fc5e608..eae64632 100644 --- a/backend/src/server/hosts/impl/storage.rs +++ b/backend/src/server/hosts/impl/storage.rs @@ -114,9 +114,6 @@ impl StorableEntity for Host { fn from_row(row: &PgRow) -> Result { // Parse JSON fields safely - let services: Vec = - serde_json::from_value(row.get::("services")) - .map_err(|e| anyhow::anyhow!("Failed to deserialize services: {}", e))?; let interfaces: Vec = serde_json::from_value(row.get::("interfaces")) .map_err(|e| anyhow::anyhow!("Failed to deserialize interfaces: {}", e))?; @@ -143,7 +140,7 @@ impl StorableEntity for Host { hostname: row.get("hostname"), target, hidden: row.get("hidden"), - services, + services: row.get("services"), ports, virtualization, interfaces, diff --git a/backend/src/server/hosts/impl/virtualization.rs b/backend/src/server/hosts/impl/virtualization.rs index 8b48c709..b1e7f27f 100644 --- a/backend/src/server/hosts/impl/virtualization.rs +++ b/backend/src/server/hosts/impl/virtualization.rs @@ -5,7 +5,7 @@ use uuid::Uuid; use validator::Validate; use crate::server::shared::{ - entities::Entity, + concepts::Concept, types::metadata::{EntityMetadataProvider, HasId, TypeMetadataProvider}, }; @@ -30,10 +30,10 @@ impl HasId for HostVirtualization { impl EntityMetadataProvider for HostVirtualization { fn color(&self) -> &'static str { - Entity::Virtualization.color() + Concept::Virtualization.color() } fn icon(&self) -> &'static str { - Entity::Virtualization.icon() + Concept::Virtualization.icon() } } diff --git a/backend/src/server/hosts/mod.rs b/backend/src/server/hosts/mod.rs index d86b9b5e..4b18718d 100644 --- a/backend/src/server/hosts/mod.rs +++ b/backend/src/server/hosts/mod.rs @@ -1,5 +1,6 @@ pub mod handlers; pub mod r#impl; pub mod service; +pub mod subscriber; #[cfg(test)] pub mod tests; diff --git a/backend/src/server/hosts/service.rs b/backend/src/server/hosts/service.rs index 04f189fd..983c9308 100644 --- a/backend/src/server/hosts/service.rs +++ b/backend/src/server/hosts/service.rs @@ -1,17 +1,27 @@ use crate::server::{ + auth::middleware::AuthenticatedEntity, daemons::service::DaemonService, hosts::r#impl::base::Host, services::{r#impl::base::Service, service::ServiceService}, shared::{ - services::traits::CrudService, - storage::{filter::EntityFilter, generic::GenericPostgresStorage, traits::Storage}, + entities::ChangeTriggersTopologyStaleness, + events::{ + bus::EventBus, + types::{EntityEvent, EntityOperation}, + }, + services::traits::{CrudService, EventBusService}, + storage::{ + filter::EntityFilter, + generic::GenericPostgresStorage, + traits::{StorableEntity, Storage}, + }, types::entities::{EntitySource, EntitySourceDiscriminants}, }, }; use anyhow::{Error, Result, anyhow}; use async_trait::async_trait; +use chrono::Utc; use futures::future::{join_all, try_join_all}; -use itertools::{Either, Itertools}; use std::{collections::HashMap, sync::Arc}; use strum::IntoDiscriminant; use tokio::sync::Mutex; @@ -22,6 +32,20 @@ pub struct HostService { service_service: Arc, daemon_service: Arc, host_locks: Arc>>>>, + event_bus: Arc, +} + +impl EventBusService for HostService { + fn event_bus(&self) -> &Arc { + &self.event_bus + } + + fn get_network_id(&self, entity: &Host) -> Option { + Some(entity.base.network_id) + } + fn get_organization_id(&self, _entity: &Host) -> Option { + None + } } #[async_trait] @@ -29,6 +53,107 @@ impl CrudService for HostService { fn storage(&self) -> &Arc> { &self.storage } + + /// Create a new host + async fn create(&self, host: Host, authentication: AuthenticatedEntity) -> Result { + // Manually created and needs actual UUID + let host = if host.id == Uuid::nil() { + Host::new(host.base.clone()) + } else { + host + }; + + let lock = self.get_host_lock(&host.id).await; + let _guard = lock.lock().await; + + tracing::trace!("Creating host {:?}", host); + + let filter = EntityFilter::unfiltered().network_ids(&[host.base.network_id]); + let all_hosts = self.storage.get_all(filter).await?; + + let host_from_storage = match all_hosts.into_iter().find(|h| host.eq(h)) { + // If both are from discovery, or if they have the same ID, upsert data + Some(existing_host) + if (host.base.source.discriminant() == EntitySourceDiscriminants::Discovery + && existing_host.base.source.discriminant() + == EntitySourceDiscriminants::Discovery) + || host.id == existing_host.id => + { + tracing::warn!( + "Duplicate host for {}: {} found, {}: {} - upserting discovery data...", + host.base.name, + host.id, + existing_host.base.name, + existing_host.id + ); + + self.upsert_host(existing_host, host, authentication) + .await? + } + _ => { + let created = self.storage.create(&host).await?; + let trigger_stale = created.triggers_staleness(None); + + self.event_bus() + .publish_entity(EntityEvent { + id: Uuid::new_v4(), + entity_id: created.id(), + network_id: self.get_network_id(&created), + organization_id: self.get_organization_id(&created), + entity_type: created.into(), + operation: EntityOperation::Created, + timestamp: Utc::now(), + metadata: serde_json::json!({ + "trigger_stale": trigger_stale + }), + authentication, + }) + .await?; + + host + } + }; + + Ok(host_from_storage) + } + + async fn update( + &self, + updates: &mut Host, + authentication: AuthenticatedEntity, + ) -> Result { + let lock = self.get_host_lock(&updates.id).await; + let _guard = lock.lock().await; + + let current_host = self + .get_by_id(&updates.id) + .await? + .ok_or_else(|| anyhow!("Host '{}' not found", updates.id))?; + + self.update_host_services(¤t_host, updates, authentication.clone()) + .await?; + + let updated = self.storage.update(updates).await?; + let trigger_stale = updated.triggers_staleness(Some(current_host)); + + self.event_bus() + .publish_entity(EntityEvent { + id: Uuid::new_v4(), + entity_id: updated.id(), + network_id: self.get_network_id(&updated), + organization_id: self.get_organization_id(&updated), + entity_type: updated.clone().into(), + operation: EntityOperation::Updated, + timestamp: Utc::now(), + metadata: serde_json::json!({ + "trigger_stale": trigger_stale + }), + authentication, + }) + .await?; + + Ok(updated) + } } impl HostService { @@ -36,12 +161,14 @@ impl HostService { storage: Arc>, service_service: Arc, daemon_service: Arc, + event_bus: Arc, ) -> Self { Self { storage, service_service, daemon_service, host_locks: Arc::new(Mutex::new(HashMap::new())), + event_bus, } } @@ -57,9 +184,10 @@ impl HostService { &self, host: Host, services: Vec, + authentication: AuthenticatedEntity, ) -> Result<(Host, Vec)> { // Create host first (handles duplicates via upsert_host) - let mut created_host = self.create_host(host.clone()).await?; + let mut created_host = self.create(host.clone(), authentication.clone()).await?; // Create services, handling case where created_host was upserted instead of created anew (ie during discovery), which means that host ID + interfaces/port IDs // are different from what's mapped to the service and they need to be updated @@ -72,7 +200,7 @@ impl HostService { let create_service_futures: Vec<_> = transferred_services .into_iter() - .map(|s| self.service_service.create_service(s)) + .map(|s| self.service_service.create(s, authentication.clone())) .collect(); let created_services = try_join_all(create_service_futures).await?; @@ -99,80 +227,20 @@ impl HostService { Ok((host_with_final_services, created_services)) } - /// Create a new host - pub async fn create_host(&self, host: Host) -> Result { - // Manually created and needs actual UUID - let host = if host.id == Uuid::nil() { - Host::new(host.base.clone()) - } else { - host - }; - - let lock = self.get_host_lock(&host.id).await; - let _guard = lock.lock().await; - - tracing::trace!("Creating host {:?}", host); - - let filter = EntityFilter::unfiltered().network_ids(&[host.base.network_id]); - let all_hosts = self.storage.get_all(filter).await?; - - let host_from_storage = match all_hosts.into_iter().find(|h| host.eq(h)) { - // If both are from discovery, or if they have the same ID, upsert data - Some(existing_host) - if (host.base.source.discriminant() == EntitySourceDiscriminants::Discovery - && existing_host.base.source.discriminant() - == EntitySourceDiscriminants::Discovery) - || host.id == existing_host.id => - { - tracing::warn!( - "Duplicate host for {}: {} found, {}: {} - upserting discovery data...", - host.base.name, - host.id, - existing_host.base.name, - existing_host.id - ); - - self.upsert_host(existing_host, host).await? - } - _ => { - self.storage.create(&host).await?; - tracing::info!("Created host {}: {}", host.base.name, host.id); - tracing::trace!("Result: {:?}", host); - host - } - }; - - Ok(host_from_storage) - } - - pub async fn update_host(&self, mut host: Host) -> Result { - let lock = self.get_host_lock(&host.id).await; - let _guard = lock.lock().await; - - tracing::trace!("Updating host {:?}", host); - - let current_host = self - .get_by_id(&host.id) - .await? - .ok_or_else(|| anyhow!("Host '{}' not found", host.id))?; - - self.update_host_services(¤t_host, &host).await?; - - self.storage.update(&mut host).await?; - - tracing::info!("Updated host {:?}: {:?}", host.base.name, host.id); - tracing::trace!("Result: {:?}", host); - - Ok(host) - } - /// Merge new discovery data with existing host - async fn upsert_host(&self, mut existing_host: Host, new_host_data: Host) -> Result { + async fn upsert_host( + &self, + mut existing_host: Host, + new_host_data: Host, + authentication: AuthenticatedEntity, + ) -> Result { let mut interface_updates = 0; let mut port_updates = 0; let mut hostname_update = false; let mut description_update = false; + let host_before_updates = existing_host.clone(); + tracing::trace!( "Upserting new host data {:?} to host {:?}", new_host_data, @@ -262,13 +330,23 @@ impl HostService { } if !data.is_empty() { - tracing::info!( - host_id = %existing_host.id, - host_name = %existing_host.base.name, - updates = %data.join(", "), - "Upserted discovery data to host" - ); - tracing::trace!("Result: {:?}", existing_host); + let trigger_stale = existing_host.triggers_staleness(Some(host_before_updates)); + + self.event_bus() + .publish_entity(EntityEvent { + id: Uuid::new_v4(), + entity_id: existing_host.id(), + network_id: self.get_network_id(&existing_host), + organization_id: self.get_organization_id(&existing_host), + entity_type: existing_host.clone().into(), + operation: EntityOperation::Updated, + timestamp: Utc::now(), + metadata: serde_json::json!({ + "trigger_stale": trigger_stale + }), + authentication, + }) + .await?; } else { tracing::debug!( "No new data to upsert from host {} to {}", @@ -284,6 +362,7 @@ impl HostService { &self, destination_host: Host, other_host: Host, + authentication: AuthenticatedEntity, ) -> Result { if destination_host.id == other_host.id { return Err(anyhow!("Can't consolidate a host with itself")); @@ -313,12 +392,18 @@ impl HostService { if let Some(mut other_host_daemon) = other_host_daemon { other_host_daemon.base.host_id = destination_host.id; - self.daemon_service.update(&mut other_host_daemon).await?; + self.daemon_service + .update(&mut other_host_daemon, authentication.clone()) + .await?; } // Add bindings, interfaces, sources from old host to new let updated_host = self - .upsert_host(destination_host.clone(), other_host.clone()) + .upsert_host( + destination_host.clone(), + other_host.clone(), + authentication.clone(), + ) .await?; // Update host_id, network_id, and interface/port binding IDs to what's available on new host @@ -337,33 +422,49 @@ impl HostService { let prepped_for_transfer_services: Vec = join_all(service_transfer_futures).await; - let ((upsert_futures, delete_futures), update_futures): ((Vec<_>, Vec<_>), Vec<_>) = - prepped_for_transfer_services + // First, execute updates sequentially + for prepped_service in &prepped_for_transfer_services { + if !destination_host_services .iter() - .partition_map(|prepped_service| { - // If there's an existing service on the host, upsert the transferred service so to avoid duplicates - // If not, just update the transferred service - if let Some(existing_service) = destination_host_services - .iter() - .find(|s| *s == prepped_service) - { - Either::Left(( + .any(|s| s == prepped_service) + { + let mut owned_service = prepped_service.clone(); + self.service_service + .update(&mut owned_service, authentication.clone()) + .await?; + } + } + + // Then collect upsert/delete futures for concurrent execution + let (upsert_futures, delete_futures): (Vec<_>, Vec<_>) = prepped_for_transfer_services + .iter() + .filter_map(|prepped_service| { + destination_host_services + .iter() + .find(|s| *s == prepped_service) + .map(|existing_service| { + ( + self.service_service.upsert_service( + existing_service.clone(), + prepped_service.clone(), + authentication.clone(), + ), self.service_service - .upsert_service(existing_service.clone(), prepped_service.clone()), - self.service_service.delete_service(&prepped_service.id), - )) - } else { - Either::Right(self.service_service.update_service(prepped_service.clone())) - } - }); - - // Save the updated services to DB - let _upserted_services = try_join_all(upsert_futures).await?; - let _deleted_services = try_join_all(delete_futures).await?; - let _updated_services = try_join_all(update_futures).await?; + .delete(&prepped_service.id, authentication.clone()), + ) + }) + }) + .unzip(); + + // Execute upsert/delete concurrently + let (_, _) = tokio::join!( + futures::future::join_all(upsert_futures), + futures::future::join_all(delete_futures), + ); // Delete host, ignore services because they are just being moved to other host - self.delete_host(&other_host.id, false).await?; + self.delete_host(&other_host.id, false, authentication) + .await?; tracing::info!( source_host_id = %other_host.id, source_host_name = %other_host.base.name, @@ -376,7 +477,12 @@ impl HostService { Ok(updated_host) } - async fn update_host_services(&self, current_host: &Host, updates: &Host) -> Result<(), Error> { + async fn update_host_services( + &self, + current_host: &Host, + updates: &Host, + authentication: AuthenticatedEntity, + ) -> Result<(), Error> { let host_filter = EntityFilter::unfiltered().host_id(¤t_host.id); let services = self.service_service.get_all(host_filter).await?; @@ -394,7 +500,7 @@ impl HostService { let delete_service_futures = delete_services .iter() - .map(|s| self.service_service.delete_service(&s.id)); + .map(|s| self.service_service.delete(&s.id, authentication.clone())); try_join_all(delete_service_futures).await?; @@ -402,11 +508,12 @@ impl HostService { let service_service = self.service_service.clone(); let current_host = current_host.clone(); let updates = updates.clone(); + let authentication = authentication.clone(); async move { - let updated = service_service + let mut updated = service_service .reassign_service_interface_bindings(service, ¤t_host, &updates) .await; - service_service.update_service(updated).await + service_service.update(&mut updated, authentication).await } }); @@ -429,7 +536,12 @@ impl HostService { Ok(()) } - pub async fn delete_host(&self, id: &Uuid, delete_services: bool) -> Result<()> { + pub async fn delete_host( + &self, + id: &Uuid, + delete_services: bool, + authentication: AuthenticatedEntity, + ) -> Result<()> { let host_filter = EntityFilter::unfiltered().host_id(id); if self.daemon_service.get_one(host_filter).await?.is_some() { return Err(anyhow!( @@ -447,18 +559,33 @@ impl HostService { if delete_services { for service_id in &host.base.services { - let _ = self.service_service.delete_service(service_id).await; + let _ = self + .service_service + .delete(service_id, authentication.clone()) + .await; } } self.storage.delete(id).await?; - tracing::info!( - host_id = %host.id, - host_name = %host.base.name, - service_count = %host.base.services.len(), - deleted_services = %delete_services, - "Host deleted" - ); + + let trigger_stale = host.triggers_staleness(None); + + self.event_bus() + .publish_entity(EntityEvent { + id: Uuid::new_v4(), + entity_id: host.id(), + network_id: self.get_network_id(&host), + organization_id: self.get_organization_id(&host), + entity_type: host.into(), + operation: EntityOperation::Deleted, + timestamp: Utc::now(), + metadata: serde_json::json!({ + "trigger_stale": trigger_stale + }), + authentication, + }) + .await?; + Ok(()) } } diff --git a/backend/src/server/hosts/subscriber.rs b/backend/src/server/hosts/subscriber.rs new file mode 100644 index 00000000..34284b6f --- /dev/null +++ b/backend/src/server/hosts/subscriber.rs @@ -0,0 +1,104 @@ +use std::collections::HashMap; + +use anyhow::Error; +use async_trait::async_trait; + +use crate::server::{ + auth::middleware::AuthenticatedEntity, + hosts::service::HostService, + shared::{ + entities::EntityDiscriminants, + events::{ + bus::{EventFilter, EventSubscriber}, + types::{EntityOperation, Event}, + }, + services::traits::CrudService, + storage::filter::EntityFilter, + }, +}; + +#[async_trait] +impl EventSubscriber for HostService { + fn event_filter(&self) -> EventFilter { + EventFilter::entity_only(HashMap::from([( + EntityDiscriminants::Subnet, + Some(vec![EntityOperation::Deleted]), + )])) + } + + async fn handle_events(&self, events: Vec) -> Result<(), Error> { + if events.is_empty() { + return Ok(()); + } + + // Collect all deleted subnet IDs and affected network IDs + let mut deleted_subnets = std::collections::HashSet::new(); + let mut network_ids = std::collections::HashSet::new(); + + for event in events { + if let Event::Entity(entity_event) = event { + deleted_subnets.insert(entity_event.entity_id); + if let Some(network_id) = entity_event.network_id { + network_ids.insert(network_id); + } + + tracing::debug!( + entity_type = %entity_event.entity_type, + entity_operation = %entity_event.operation, + subnet_id = %entity_event.entity_id, + "Host subscriber handling subnet deletion event", + ); + } + } + + // Process all affected networks + for network_id in network_ids { + let filter = EntityFilter::unfiltered().network_ids(&[network_id]); + let hosts = self.get_all(filter).await?; + + let mut updated_count = 0; + + for mut host in hosts { + // Check if host has interfaces referencing any deleted subnet + let has_deleted_subnet = host + .base + .interfaces + .iter() + .any(|i| deleted_subnets.contains(&i.base.subnet_id)); + + if has_deleted_subnet { + // Remove interfaces for all deleted subnets in this batch + host.base.interfaces = host + .base + .interfaces + .iter() + .filter(|i| !deleted_subnets.contains(&i.base.subnet_id)) + .cloned() + .collect(); + + self.update(&mut host, AuthenticatedEntity::System).await?; + updated_count += 1; + } + } + + if updated_count > 0 { + tracing::info!( + deleted_subnets = deleted_subnets.len(), + affected_hosts = updated_count, + network_id = %network_id, + "Cleaned up host interfaces referencing deleted subnets" + ); + } + } + + Ok(()) + } + + fn debounce_window_ms(&self) -> u64 { + 50 // Small window to batch multiple subnet deletions + } + + fn name(&self) -> &str { + "subnet_deleted_interface_removal" + } +} diff --git a/backend/src/server/hosts/tests.rs b/backend/src/server/hosts/tests.rs index 7ecb8191..7ff7e68f 100644 --- a/backend/src/server/hosts/tests.rs +++ b/backend/src/server/hosts/tests.rs @@ -2,6 +2,7 @@ use serial_test::serial; use crate::{ server::{ + auth::middleware::AuthenticatedEntity, services::r#impl::bindings::Binding, shared::{ services::traits::CrudService, @@ -19,12 +20,12 @@ async fn test_host_deduplication_on_create() { let organization = services .organization_service - .create(organization()) + .create(organization(), AuthenticatedEntity::System) .await .unwrap(); let network = services .network_service - .create(network(&organization.id)) + .create(network(&organization.id), AuthenticatedEntity::System) .await .unwrap(); @@ -39,7 +40,7 @@ async fn test_host_deduplication_on_create() { }; let (created1, _) = services .host_service - .create_host_with_services(host1.clone(), vec![]) + .create_host_with_services(host1.clone(), vec![], AuthenticatedEntity::System) .await .unwrap(); @@ -50,7 +51,7 @@ async fn test_host_deduplication_on_create() { }; let (created2, _) = services .host_service - .create_host_with_services(host2.clone(), vec![]) + .create_host_with_services(host2.clone(), vec![], AuthenticatedEntity::System) .await .unwrap(); @@ -69,12 +70,12 @@ async fn test_host_upsert_merges_new_data() { let organization = services .organization_service - .create(organization()) + .create(organization(), AuthenticatedEntity::System) .await .unwrap(); let network = services .network_service - .create(network(&organization.id)) + .create(network(&organization.id), AuthenticatedEntity::System) .await .unwrap(); @@ -86,14 +87,14 @@ async fn test_host_upsert_merges_new_data() { let subnet1 = subnet(&network.id); services .subnet_service - .create(subnet1.clone()) + .create(subnet1.clone(), AuthenticatedEntity::System) .await .unwrap(); host1.base.interfaces = vec![interface(&subnet1.id)]; let (created, _) = services .host_service - .create_host_with_services(host1.clone(), vec![]) + .create_host_with_services(host1.clone(), vec![], AuthenticatedEntity::System) .await .unwrap(); @@ -105,14 +106,14 @@ async fn test_host_upsert_merges_new_data() { let subnet2 = subnet(&network.id); services .subnet_service - .create(subnet2.clone()) + .create(subnet2.clone(), AuthenticatedEntity::System) .await .unwrap(); host2.base.interfaces = vec![interface(&subnet1.id), interface(&subnet2.id)]; let (upserted, _) = services .host_service - .create_host_with_services(host2.clone(), vec![]) + .create_host_with_services(host2.clone(), vec![], AuthenticatedEntity::System) .await .unwrap(); @@ -133,19 +134,19 @@ async fn test_host_consolidation() { let organization = services .organization_service - .create(organization()) + .create(organization(), AuthenticatedEntity::System) .await .unwrap(); let network = services .network_service - .create(network(&organization.id)) + .create(network(&organization.id), AuthenticatedEntity::System) .await .unwrap(); let subnet_obj = subnet(&network.id); services .subnet_service - .create(subnet_obj.clone()) + .create(subnet_obj.clone(), AuthenticatedEntity::System) .await .unwrap(); @@ -154,7 +155,7 @@ async fn test_host_consolidation() { let (created1, _) = services .host_service - .create_host_with_services(host1.clone(), vec![]) + .create_host_with_services(host1.clone(), vec![], AuthenticatedEntity::System) .await .unwrap(); @@ -169,7 +170,7 @@ async fn test_host_consolidation() { let (created2, created_svcs) = services .host_service - .create_host_with_services(host2.clone(), vec![svc]) + .create_host_with_services(host2.clone(), vec![svc], AuthenticatedEntity::System) .await .unwrap(); @@ -178,7 +179,11 @@ async fn test_host_consolidation() { // Consolidate host2 into host1 let consolidated = services .host_service - .consolidate_hosts(created1.clone(), created2.clone()) + .consolidate_hosts( + created1.clone(), + created2.clone(), + AuthenticatedEntity::System, + ) .await .unwrap(); diff --git a/backend/src/server/logging/mod.rs b/backend/src/server/logging/mod.rs new file mode 100644 index 00000000..97c7fa9b --- /dev/null +++ b/backend/src/server/logging/mod.rs @@ -0,0 +1,2 @@ +pub mod service; +pub mod subscriber; diff --git a/backend/src/server/logging/service.rs b/backend/src/server/logging/service.rs new file mode 100644 index 00000000..3270e68c --- /dev/null +++ b/backend/src/server/logging/service.rs @@ -0,0 +1,14 @@ +#[derive(Clone)] +pub struct LoggingService {} + +impl LoggingService { + pub fn new() -> Self { + Self {} + } +} + +impl Default for LoggingService { + fn default() -> Self { + Self::new() + } +} diff --git a/backend/src/server/logging/subscriber.rs b/backend/src/server/logging/subscriber.rs new file mode 100644 index 00000000..d47c8730 --- /dev/null +++ b/backend/src/server/logging/subscriber.rs @@ -0,0 +1,35 @@ +use anyhow::Error; +use async_trait::async_trait; + +use crate::server::{ + logging::service::LoggingService, + shared::events::{ + bus::{EventFilter, EventSubscriber}, + types::Event, + }, +}; + +#[async_trait] +impl EventSubscriber for LoggingService { + fn event_filter(&self) -> EventFilter { + EventFilter::all() + } + + async fn handle_events(&self, events: Vec) -> Result<(), Error> { + // Log each event individually + for event in events { + event.log(); + tracing::debug!("{}", event); + } + + Ok(()) + } + + fn debounce_window_ms(&self) -> u64 { + 0 // No batching for logging - we want immediate logs + } + + fn name(&self) -> &str { + "logging" + } +} diff --git a/backend/src/server/mod.rs b/backend/src/server/mod.rs index b3dace28..5cb92c2c 100644 --- a/backend/src/server/mod.rs +++ b/backend/src/server/mod.rs @@ -8,6 +8,7 @@ pub mod email; pub mod github; pub mod groups; pub mod hosts; +pub mod logging; pub mod networks; pub mod organizations; pub mod services; diff --git a/backend/src/server/networks/handlers.rs b/backend/src/server/networks/handlers.rs index 1bc60a0f..d217e178 100644 --- a/backend/src/server/networks/handlers.rs +++ b/backend/src/server/networks/handlers.rs @@ -1,6 +1,6 @@ use crate::server::auth::middleware::{AuthenticatedUser, RequireMember}; use crate::server::shared::handlers::traits::{ - CrudHandlers, delete_handler, get_by_id_handler, update_handler, + CrudHandlers, bulk_delete_handler, delete_handler, get_by_id_handler, update_handler, }; use crate::server::shared::types::api::ApiError; use crate::server::{ @@ -28,6 +28,7 @@ pub fn create_router() -> Router> { .route("/{id}", put(update_handler::)) .route("/{id}", delete(delete_handler::)) .route("/{id}", get(get_by_id_handler::)) + .route("/bulk-delete", post(bulk_delete_handler::)) } pub async fn create_handler( @@ -67,7 +68,7 @@ pub async fn create_handler( let service = Network::get_service(&state); let created = service - .create(request) + .create(request, user.into()) .await .map_err(|e| ApiError::internal_error(&e.to_string()))?; diff --git a/backend/src/server/networks/impl.rs b/backend/src/server/networks/impl.rs index b583ffbf..dbf73eeb 100644 --- a/backend/src/server/networks/impl.rs +++ b/backend/src/server/networks/impl.rs @@ -1,6 +1,9 @@ use std::fmt::Display; -use crate::server::{networks::service::NetworkService, shared::handlers::traits::CrudHandlers}; +use crate::server::{ + networks::service::NetworkService, + shared::{entities::ChangeTriggersTopologyStaleness, handlers::traits::CrudHandlers}, +}; use chrono::{DateTime, Utc}; use serde::{Deserialize, Serialize}; use sqlx::Row; @@ -10,7 +13,7 @@ use validator::Validate; use crate::server::shared::storage::traits::{SqlValue, StorableEntity}; -#[derive(Debug, Clone, Serialize, Deserialize, Validate)] +#[derive(Debug, Clone, Serialize, Deserialize, Validate, PartialEq, Eq, Hash)] pub struct NetworkBase { #[validate(length(min = 0, max = 100))] pub name: String, @@ -28,7 +31,7 @@ impl NetworkBase { } } -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)] pub struct Network { pub id: Uuid, pub created_at: DateTime, @@ -51,6 +54,12 @@ impl CrudHandlers for Network { } } +impl ChangeTriggersTopologyStaleness for Network { + fn triggers_staleness(&self, _other: Option) -> bool { + false + } +} + impl StorableEntity for Network { type BaseData = NetworkBase; diff --git a/backend/src/server/networks/service.rs b/backend/src/server/networks/service.rs index df1a7318..1146cf4f 100644 --- a/backend/src/server/networks/service.rs +++ b/backend/src/server/networks/service.rs @@ -1,8 +1,10 @@ use crate::server::{ + auth::middleware::AuthenticatedEntity, hosts::service::HostService, networks::r#impl::Network, shared::{ - services::traits::CrudService, + events::bus::EventBus, + services::traits::{CrudService, EventBusService}, storage::{ generic::GenericPostgresStorage, seed_data::{ @@ -22,6 +24,20 @@ pub struct NetworkService { network_storage: Arc>, host_service: Arc, subnet_service: Arc, + event_bus: Arc, +} + +impl EventBusService for NetworkService { + fn event_bus(&self) -> &Arc { + &self.event_bus + } + + fn get_network_id(&self, _entity: &Network) -> Option { + None + } + fn get_organization_id(&self, entity: &Network) -> Option { + Some(entity.id) + } } #[async_trait] @@ -36,15 +52,21 @@ impl NetworkService { network_storage: Arc>, host_service: Arc, subnet_service: Arc, + event_bus: Arc, ) -> Self { Self { network_storage, host_service, subnet_service, + event_bus, } } - pub async fn seed_default_data(&self, network_id: Uuid) -> Result<()> { + pub async fn seed_default_data( + &self, + network_id: Uuid, + authenticated: AuthenticatedEntity, + ) -> Result<()> { tracing::info!("Seeding default data..."); let wan_subnet = create_wan_subnet(network_id); @@ -53,16 +75,20 @@ impl NetworkService { let (web_host, web_service) = create_internet_connectivity_host(&wan_subnet, network_id); let (remote_host, client_service) = create_remote_host(&remote_subnet, network_id); - self.subnet_service.create(wan_subnet).await?; - self.subnet_service.create(remote_subnet).await?; + self.subnet_service + .create(wan_subnet, authenticated.clone()) + .await?; + self.subnet_service + .create(remote_subnet, authenticated.clone()) + .await?; self.host_service - .create_host_with_services(dns_host, vec![dns_service]) + .create_host_with_services(dns_host, vec![dns_service], authenticated.clone()) .await?; self.host_service - .create_host_with_services(web_host, vec![web_service]) + .create_host_with_services(web_host, vec![web_service], authenticated.clone()) .await?; self.host_service - .create_host_with_services(remote_host, vec![client_service]) + .create_host_with_services(remote_host, vec![client_service], authenticated.clone()) .await?; tracing::info!("Default data seeded successfully"); diff --git a/backend/src/server/organizations/handlers.rs b/backend/src/server/organizations/handlers.rs index bb50a483..7b066f7c 100644 --- a/backend/src/server/organizations/handlers.rs +++ b/backend/src/server/organizations/handlers.rs @@ -1,10 +1,11 @@ +use crate::server::auth::middleware::AuthenticatedEntity; use crate::server::auth::middleware::{ AuthenticatedUser, InviteUsersFeature, RequireFeature, RequireMember, }; use crate::server::config::AppState; use crate::server::organizations::r#impl::api::CreateInviteRequest; use crate::server::organizations::r#impl::base::Organization; -use crate::server::organizations::r#impl::invites::OrganizationInvite; +use crate::server::organizations::r#impl::invites::Invite; use crate::server::shared::handlers::traits::{CrudHandlers, update_handler}; use crate::server::shared::services::traits::CrudService; use crate::server::shared::types::api::ApiError; @@ -55,7 +56,7 @@ async fn create_invite( RequireMember(user): RequireMember, RequireFeature { plan, .. }: RequireFeature, Json(request): Json, -) -> ApiResult>> { +) -> ApiResult>> { // We know they have either team_members or share_views enabled if !plan.features().team_members && request.permissions > UserOrgPermissions::Visualizer { return Err(ApiError::forbidden( @@ -77,6 +78,7 @@ async fn create_invite( user.organization_id, user.user_id, state.config.public_url.clone(), + user.into(), ) .await .map_err(|e| ApiError::internal_error(&e.to_string()))?; @@ -88,12 +90,12 @@ async fn create_invite( async fn get_invite( State(state): State>, RequireMember(_user): RequireMember, - Path(token): Path, -) -> ApiResult>> { + Path(id): Path, +) -> ApiResult>> { let invite = state .services .organization_service - .get_invite(&token) + .get_invite(id) .await .map_err(|e| ApiError::bad_request(&e.to_string()))?; @@ -104,7 +106,7 @@ async fn get_invite( async fn get_invites( State(state): State>, RequireMember(user): RequireMember, -) -> ApiResult>>> { +) -> ApiResult>>> { // Show user invites that they created or created for users with permissions lower than them let invites = state .services @@ -122,13 +124,13 @@ async fn get_invites( async fn revoke_invite( State(state): State>, RequireMember(user): RequireMember, - Path(token): Path, + Path(id): Path, ) -> ApiResult>> { // Get the invite to verify ownership let invite = state .services .organization_service - .get_invite(&token) + .get_invite(id) .await .map_err(|e| ApiError::bad_request(&e.to_string()))?; @@ -148,7 +150,7 @@ async fn revoke_invite( state .services .organization_service - .revoke_invite(&token) + .revoke_invite(id, user.into()) .await .map_err(|e| ApiError::internal_error(&e.to_string()))?; @@ -159,10 +161,10 @@ async fn revoke_invite( async fn accept_invite_link( State(state): State>, session: Session, - Path(token): Path, + Path(id): Path, ) -> Result { // Validate the invite and get organization_id - let invite = match state.services.organization_service.get_invite(&token).await { + let invite = match state.services.organization_service.get_invite(id).await { Ok(invite) => invite, Err(e) => { tracing::warn!("Invalid invite token: {}", e); @@ -202,7 +204,7 @@ async fn accept_invite_link( ))); } - if let Err(e) = session.insert("pending_invite_token", token.clone()).await { + if let Err(e) = session.insert("pending_invite_id", id).await { tracing::error!("Failed to save invite token to session: {}", e); return Err(Redirect::to(&format!( "/?error={}", @@ -261,7 +263,7 @@ async fn accept_invite_link( state .services .user_service - .update(&mut user) + .update(&mut user, AuthenticatedEntity::System) .await .map_err(|_| { Redirect::to(&format!( @@ -295,7 +297,7 @@ pub async fn process_pending_invite( _ => return Ok(None), // No pending invite }; - let invite_token = match session.get::("pending_invite_token").await { + let invite_id = match session.get::("pending_invite_id").await { Ok(Some(token)) => token, _ => return Ok(None), // No token stored }; @@ -317,7 +319,7 @@ pub async fn process_pending_invite( if let Err(e) = state .services .organization_service - .use_invite(&invite_token) + .use_invite(invite_id) .await { tracing::error!("Failed to mark invite as used: {}", e); @@ -325,7 +327,7 @@ pub async fn process_pending_invite( // Clear session data let _ = session.remove::("pending_org_invite").await; - let _ = session.remove::("pending_invite_token").await; + let _ = session.remove::("pending_invite_id").await; let _ = session.remove::("pending_invite_permissions").await; Ok(Some((pending_org_id, permissions))) diff --git a/backend/src/server/organizations/impl/base.rs b/backend/src/server/organizations/impl/base.rs index b6d77970..c21da5dd 100644 --- a/backend/src/server/organizations/impl/base.rs +++ b/backend/src/server/organizations/impl/base.rs @@ -1,23 +1,24 @@ use chrono::{DateTime, Utc}; use serde::{Deserialize, Serialize}; use std::fmt::Display; -use stripe_billing::SubscriptionStatus; use uuid::Uuid; use validator::Validate; -use crate::server::billing::types::base::BillingPlan; +use crate::server::{ + billing::types::base::BillingPlan, shared::entities::ChangeTriggersTopologyStaleness, +}; -#[derive(Debug, Clone, Serialize, Validate, Deserialize, Default)] +#[derive(Debug, Clone, Serialize, Validate, Deserialize, Default, PartialEq, Eq, Hash)] pub struct OrganizationBase { pub stripe_customer_id: Option, #[validate(length(min = 0, max = 100))] pub name: String, pub plan: Option, - pub plan_status: Option, + pub plan_status: Option, pub is_onboarded: bool, } -#[derive(Debug, Clone, Validate, Serialize, Deserialize)] +#[derive(Debug, Clone, Validate, Serialize, Deserialize, PartialEq, Eq, Hash)] pub struct Organization { pub id: Uuid, pub created_at: DateTime, @@ -32,3 +33,9 @@ impl Display for Organization { write!(f, "{:?}: {:?}", self.base.name, self.id) } } + +impl ChangeTriggersTopologyStaleness for Organization { + fn triggers_staleness(&self, _other: Option) -> bool { + false + } +} diff --git a/backend/src/server/organizations/impl/invites.rs b/backend/src/server/organizations/impl/invites.rs index 88f1d7dd..68ae04f4 100644 --- a/backend/src/server/organizations/impl/invites.rs +++ b/backend/src/server/organizations/impl/invites.rs @@ -2,11 +2,14 @@ use chrono::{DateTime, Utc}; use serde::{Deserialize, Serialize}; use uuid::Uuid; -use crate::server::users::r#impl::permissions::UserOrgPermissions; +use crate::server::{ + shared::entities::ChangeTriggersTopologyStaleness, + users::r#impl::permissions::UserOrgPermissions, +}; -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct OrganizationInvite { - pub token: String, +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)] +pub struct Invite { + pub id: Uuid, pub organization_id: Uuid, pub permissions: UserOrgPermissions, pub url: String, @@ -15,7 +18,7 @@ pub struct OrganizationInvite { pub expires_at: DateTime, } -impl OrganizationInvite { +impl Invite { pub fn new( organization_id: Uuid, url: String, @@ -25,7 +28,7 @@ impl OrganizationInvite { ) -> Self { let now = Utc::now(); Self { - token: Self::generate_token(), + id: Uuid::new_v4(), organization_id, permissions, created_by, @@ -45,8 +48,10 @@ impl OrganizationInvite { true } +} - fn generate_token() -> String { - nanoid::nanoid!(32) +impl ChangeTriggersTopologyStaleness for Invite { + fn triggers_staleness(&self, _other: Option) -> bool { + false } } diff --git a/backend/src/server/organizations/impl/storage.rs b/backend/src/server/organizations/impl/storage.rs index 4e1674dd..cf9ccde8 100644 --- a/backend/src/server/organizations/impl/storage.rs +++ b/backend/src/server/organizations/impl/storage.rs @@ -1,7 +1,6 @@ use chrono::{DateTime, Utc}; use sqlx::Row; use sqlx::postgres::PgRow; -use stripe_billing::SubscriptionStatus; use uuid::Uuid; use crate::server::{ @@ -81,7 +80,7 @@ impl StorableEntity for Organization { SqlValue::String(name), SqlValue::OptionalString(stripe_customer_id), SqlValue::OptionBillingPlan(plan), - SqlValue::OptionBillingPlanStatus(plan_status), + SqlValue::OptionalString(plan_status), SqlValue::Bool(is_onboarded), ], )) @@ -93,11 +92,6 @@ impl StorableEntity for Organization { .unwrap_or(None) .and_then(|v| serde_json::from_value(v).ok()); - let plan_status: Option = row - .try_get::, _>("plan") - .unwrap_or(None) - .and_then(|v| serde_json::from_str(&v).ok()); - Ok(Organization { id: row.get("id"), created_at: row.get("created_at"), @@ -106,7 +100,7 @@ impl StorableEntity for Organization { name: row.get("name"), stripe_customer_id: row.get("stripe_customer_id"), plan, - plan_status, + plan_status: row.get("plan_status"), is_onboarded: row.get("is_onboarded"), }, }) diff --git a/backend/src/server/organizations/service.rs b/backend/src/server/organizations/service.rs index 19581198..da03a7ee 100644 --- a/backend/src/server/organizations/service.rs +++ b/backend/src/server/organizations/service.rs @@ -1,4 +1,9 @@ -use crate::server::organizations::r#impl::invites::OrganizationInvite; +use crate::server::auth::middleware::AuthenticatedEntity; +use crate::server::organizations::r#impl::invites::Invite; +use crate::server::shared::entities::ChangeTriggersTopologyStaleness; +use crate::server::shared::events::bus::EventBus; +use crate::server::shared::events::types::{EntityEvent, EntityOperation}; +use crate::server::shared::services::traits::EventBusService; use crate::server::{ organizations::r#impl::{api::CreateInviteRequest, base::Organization}, shared::{services::traits::CrudService, storage::generic::GenericPostgresStorage}, @@ -13,7 +18,21 @@ use uuid::Uuid; pub struct OrganizationService { storage: Arc>, - invites: Arc>>, + invites: Arc>>, + event_bus: Arc, +} + +impl EventBusService for OrganizationService { + fn event_bus(&self) -> &Arc { + &self.event_bus + } + + fn get_network_id(&self, _entity: &Organization) -> Option { + None + } + fn get_organization_id(&self, entity: &Organization) -> Option { + Some(entity.id) + } } #[async_trait] @@ -24,18 +43,22 @@ impl CrudService for OrganizationService { } impl OrganizationService { - pub fn new(storage: Arc>) -> Self { + pub fn new( + storage: Arc>, + event_bus: Arc, + ) -> Self { Self { storage, invites: Arc::new(RwLock::new(HashMap::new())), + event_bus, } } - pub async fn get_invite(&self, token: &str) -> Result { + pub async fn get_invite(&self, id: Uuid) -> Result { let invites = self.invites.read().await; let invite = invites - .get(token) + .get(&id) .ok_or_else(|| anyhow!("Invalid or expired invite link"))?; if !invite.is_valid() { @@ -45,11 +68,11 @@ impl OrganizationService { Ok(invite.clone()) } - pub async fn use_invite(&self, token: &str) -> Result { + pub async fn use_invite(&self, id: Uuid) -> Result { let mut invites = self.invites.write().await; let invite = invites - .get_mut(token) + .get_mut(&id) .ok_or_else(|| anyhow!("Invalid or expired invite link"))?; if !invite.is_valid() { @@ -58,7 +81,27 @@ impl OrganizationService { let organization_id = invite.organization_id; - invites.remove(token); + let invite = invites + .remove(&id) + .ok_or_else(|| anyhow!("Invite not found"))?; + + let trigger_stale = invite.triggers_staleness(None); + + self.event_bus() + .publish_entity(EntityEvent { + id: Uuid::new_v4(), + entity_id: invite.id, + organization_id: Some(invite.organization_id), + entity_type: invite.into(), + network_id: None, + operation: EntityOperation::Deleted, + timestamp: Utc::now(), + metadata: serde_json::json!({ + "trigger_stale": trigger_stale + }), + authentication: AuthenticatedEntity::System, + }) + .await?; Ok(organization_id) } @@ -81,10 +124,11 @@ impl OrganizationService { organization_id: Uuid, user_id: Uuid, url: String, - ) -> Result { + authentication: AuthenticatedEntity, + ) -> Result { let expiration_hours = request.expiration_hours.unwrap_or(168); // Default 7 days - let invite = OrganizationInvite::new( + let invite = Invite::new( organization_id, url, user_id, @@ -93,22 +137,59 @@ impl OrganizationService { ); // Store invite - self.invites - .write() - .await - .insert(invite.token.clone(), invite.clone()); + self.invites.write().await.insert(invite.id, invite.clone()); + + let trigger_stale = invite.triggers_staleness(None); + + self.event_bus() + .publish_entity(EntityEvent { + id: Uuid::new_v4(), + entity_id: invite.id, + organization_id: Some(invite.organization_id), + entity_type: invite.clone().into(), + network_id: None, + operation: EntityOperation::Created, + timestamp: Utc::now(), + metadata: serde_json::json!({ + "trigger_stale": trigger_stale + }), + authentication, + }) + .await?; Ok(invite) } /// Revoke a specific invite - pub async fn revoke_invite(&self, token: &str) -> Result<(), Error> { + pub async fn revoke_invite( + &self, + id: Uuid, + authentication: AuthenticatedEntity, + ) -> Result<(), Error> { let mut invites = self.invites.write().await; - invites - .remove(token) + let invite = invites + .remove(&id) .ok_or_else(|| anyhow!("Invite not found"))?; + let trigger_stale = invite.triggers_staleness(None); + + self.event_bus() + .publish_entity(EntityEvent { + id: Uuid::new_v4(), + entity_id: invite.id, + organization_id: Some(invite.organization_id), + entity_type: invite.into(), + network_id: None, + operation: EntityOperation::Deleted, + timestamp: Utc::now(), + metadata: serde_json::json!({ + "trigger_stale": trigger_stale + }), + authentication, + }) + .await?; + Ok(()) } @@ -122,7 +203,7 @@ impl OrganizationService { } /// List all active invites for an organization - pub async fn list_invites(&self, organization_id: &Uuid) -> Vec { + pub async fn list_invites(&self, organization_id: &Uuid) -> Vec { let invites = self.invites.read().await; invites diff --git a/backend/src/server/services/definitions/elasticsearch.rs b/backend/src/server/services/definitions/elasticsearch.rs deleted file mode 100644 index 8b137891..00000000 --- a/backend/src/server/services/definitions/elasticsearch.rs +++ /dev/null @@ -1 +0,0 @@ - diff --git a/backend/src/server/services/definitions/influxdb.rs b/backend/src/server/services/definitions/influxdb.rs index 51524d9d..346b973f 100644 --- a/backend/src/server/services/definitions/influxdb.rs +++ b/backend/src/server/services/definitions/influxdb.rs @@ -1,4 +1,5 @@ use crate::server::hosts::r#impl::ports::PortBase; +use crate::server::services::definitions::{ServiceDefinitionFactory, create_service}; use crate::server::services::r#impl::categories::ServiceCategory; use crate::server::services::r#impl::definitions::ServiceDefinition; use crate::server::services::r#impl::patterns::Pattern; @@ -17,7 +18,7 @@ impl ServiceDefinition for InfluxDB { ServiceCategory::Database } fn discovery_pattern(&self) -> Pattern<'_> { - Pattern::Endpoint(PortBase::new_tcp(8086), "/ping", "", None) + Pattern::Port(PortBase::InfluxDb) } fn logo_url(&self) -> &'static str { "https://cdn.jsdelivr.net/gh/homarr-labs/dashboard-icons/svg/influxdb.svg" @@ -25,4 +26,9 @@ impl ServiceDefinition for InfluxDB { fn logo_needs_white_background(&self) -> bool { true } + fn is_generic(&self) -> bool { + true + } } + +inventory::submit!(ServiceDefinitionFactory::new(create_service::)); diff --git a/backend/src/server/services/definitions/mod.rs b/backend/src/server/services/definitions/mod.rs index 208c601f..3eb3ce5d 100644 --- a/backend/src/server/services/definitions/mod.rs +++ b/backend/src/server/services/definitions/mod.rs @@ -67,6 +67,7 @@ pub mod google_nest_repeater; pub mod google_nest_router; pub mod tp_link_eap; pub mod unifi_access_point; +pub mod unifi_controller; // NetworkSecurity pub mod crowdsec; @@ -128,6 +129,7 @@ pub mod veeam; // Media pub mod audiobookshelf; +pub mod bazarr; pub mod emby; pub mod immich; pub mod jellyfin; @@ -136,6 +138,7 @@ pub mod jellystat; pub mod komga; pub mod overseerr; pub mod plex; +pub mod sabnzbd; pub mod slskd; pub mod tautulli; @@ -164,7 +167,6 @@ pub mod rancher; // Database pub mod cassandra; pub mod couchdb; -pub mod elasticsearch; pub mod influxdb; pub mod mariadb; pub mod mongodb; @@ -229,6 +231,7 @@ pub mod linkstack; pub mod proxmox_datacenter_manager; // Monitoring +pub mod apc; pub mod coolercontrol; pub mod elastic_apm; pub mod gatus; diff --git a/backend/src/server/services/definitions/next_cloud.rs b/backend/src/server/services/definitions/next_cloud.rs index dcd25557..35c0ed5c 100644 --- a/backend/src/server/services/definitions/next_cloud.rs +++ b/backend/src/server/services/definitions/next_cloud.rs @@ -19,20 +19,12 @@ impl ServiceDefinition for NextCloud { } fn discovery_pattern(&self) -> Pattern<'_> { - Pattern::AnyOf(vec![ - Pattern::Endpoint( - PortBase::Http, - "/core/css/server.css", - "Nextcloud GmbH", - None, - ), - Pattern::Endpoint( - PortBase::Https, - "/core/css/server.css", - "Nextcloud GmbH", - None, - ), - ]) + Pattern::Endpoint( + PortBase::Http, + "/core/css/server.css", + "Nextcloud GmbH", + None, + ) } fn logo_url(&self) -> &'static str { diff --git a/backend/src/server/services/definitions/sabnzbd.rs b/backend/src/server/services/definitions/sabnzbd.rs new file mode 100644 index 00000000..1159601f --- /dev/null +++ b/backend/src/server/services/definitions/sabnzbd.rs @@ -0,0 +1,35 @@ +use crate::server::hosts::r#impl::ports::PortBase; +use crate::server::services::definitions::{ServiceDefinitionFactory, create_service}; +use crate::server::services::r#impl::categories::ServiceCategory; +use crate::server::services::r#impl::definitions::ServiceDefinition; +use crate::server::services::r#impl::patterns::Pattern; + +#[derive(Default, Clone, Eq, PartialEq, Hash)] +pub struct SABnzbd; + +impl ServiceDefinition for SABnzbd { + fn name(&self) -> &'static str { + "SABnzbd" + } + fn description(&self) -> &'static str { + "A NZB Files Downloader." + } + fn category(&self) -> ServiceCategory { + ServiceCategory::Media + } + + fn discovery_pattern(&self) -> Pattern<'_> { + Pattern::Endpoint( + PortBase::Http8080, + "/Content/manifest.json", + "SABnzbd", + None, + ) + } + + fn logo_url(&self) -> &'static str { + "https://cdn.jsdelivr.net/gh/homarr-labs/dashboard-icons/svg/sabnzbd.svg" + } +} + +inventory::submit!(ServiceDefinitionFactory::new(create_service::)); diff --git a/backend/src/server/services/definitions/unifi_controller.rs b/backend/src/server/services/definitions/unifi_controller.rs index 2fc043bd..0df6f2e6 100644 --- a/backend/src/server/services/definitions/unifi_controller.rs +++ b/backend/src/server/services/definitions/unifi_controller.rs @@ -19,7 +19,7 @@ impl ServiceDefinition for UnifiController { } fn discovery_pattern(&self) -> Pattern<'_> { - Pattern::Endpoint(PortBase::new_tcp(8443, None), "/manage", "UniFi") + Pattern::Endpoint(PortBase::Https8443, "/manage", "UniFi", None) } fn logo_url(&self) -> &'static str { diff --git a/backend/src/server/services/handlers.rs b/backend/src/server/services/handlers.rs index bda27107..818820fc 100644 --- a/backend/src/server/services/handlers.rs +++ b/backend/src/server/services/handlers.rs @@ -1,5 +1,6 @@ use crate::server::shared::handlers::traits::{ - create_handler, delete_handler, get_all_handler, get_by_id_handler, update_handler, + bulk_delete_handler, create_handler, delete_handler, get_all_handler, get_by_id_handler, + update_handler, }; use crate::server::{config::AppState, services::r#impl::base::Service}; use axum::Router; @@ -13,4 +14,5 @@ pub fn create_router() -> Router> { .route("/{id}", put(update_handler::)) .route("/{id}", delete(delete_handler::)) .route("/{id}", get(get_by_id_handler::)) + .route("/bulk-delete", post(bulk_delete_handler::)) } diff --git a/backend/src/server/services/impl/base.rs b/backend/src/server/services/impl/base.rs index 62855cce..e135d1cc 100644 --- a/backend/src/server/services/impl/base.rs +++ b/backend/src/server/services/impl/base.rs @@ -10,6 +10,7 @@ use crate::server::services::r#impl::patterns::{MatchConfidence, MatchReason, Ma use crate::server::services::r#impl::virtualization::{ DockerVirtualization, ServiceVirtualization, }; +use crate::server::shared::entities::ChangeTriggersTopologyStaleness; use crate::server::shared::storage::traits::StorableEntity; use crate::server::shared::types::entities::{DiscoveryMetadata, EntitySource}; use crate::server::subnets::r#impl::base::Subnet; @@ -47,7 +48,19 @@ impl Default for ServiceBase { } } -#[derive(Debug, Clone, Validate, Serialize, Deserialize)] +impl ChangeTriggersTopologyStaleness for Service { + fn triggers_staleness(&self, other: Option) -> bool { + if let Some(other_service) = other { + self.base.bindings != other_service.base.bindings + || self.base.host_id != other_service.base.host_id + || self.base.virtualization != other_service.base.virtualization + } else { + true + } + } +} + +#[derive(Debug, Clone, Validate, Serialize, Deserialize, Eq)] pub struct Service { pub id: Uuid, pub created_at: DateTime, diff --git a/backend/src/server/services/impl/categories.rs b/backend/src/server/services/impl/categories.rs index ae991932..a8f8c4fd 100644 --- a/backend/src/server/services/impl/categories.rs +++ b/backend/src/server/services/impl/categories.rs @@ -2,7 +2,7 @@ use serde::{Deserialize, Serialize}; use strum_macros::{Display, EnumDiscriminants, EnumIter, IntoStaticStr}; use crate::server::shared::{ - entities::Entity, + concepts::Concept, types::metadata::{EntityMetadataProvider, HasId}, }; @@ -81,21 +81,21 @@ impl EntityMetadataProvider for ServiceCategory { ServiceCategory::Storage => "HardDrive", ServiceCategory::Media => "PlayCircle", ServiceCategory::HomeAutomation => "Home", - ServiceCategory::Virtualization => Entity::Virtualization.icon(), + ServiceCategory::Virtualization => Concept::Virtualization.icon(), ServiceCategory::FileSharing => "Folder", // Network Services - ServiceCategory::DNS => Entity::Dns.icon(), - ServiceCategory::VPN => Entity::Vpn.icon(), + ServiceCategory::DNS => Concept::Dns.icon(), + ServiceCategory::VPN => Concept::Vpn.icon(), ServiceCategory::Monitoring => "Activity", ServiceCategory::AdBlock => "ShieldCheck", ServiceCategory::Backup => "DatabaseBackup", - ServiceCategory::ReverseProxy => Entity::ReverseProxy.icon(), + ServiceCategory::ReverseProxy => Concept::ReverseProxy.icon(), // End devices ServiceCategory::Workstation => "Monitor", ServiceCategory::Mobile => "Smartphone", - ServiceCategory::IoT => Entity::IoT.icon(), + ServiceCategory::IoT => Concept::IoT.icon(), ServiceCategory::Printer => "Printer", // Application @@ -126,21 +126,21 @@ impl EntityMetadataProvider for ServiceCategory { ServiceCategory::Storage => "green", ServiceCategory::Media => "blue", ServiceCategory::HomeAutomation => "blue", - ServiceCategory::Virtualization => Entity::Virtualization.color(), + ServiceCategory::Virtualization => Concept::Virtualization.color(), ServiceCategory::Backup => "gray", ServiceCategory::FileSharing => "blue", // Network Services - ServiceCategory::DNS => Entity::Dns.color(), - ServiceCategory::VPN => Entity::Vpn.color(), + ServiceCategory::DNS => Concept::Dns.color(), + ServiceCategory::VPN => Concept::Vpn.color(), ServiceCategory::Monitoring => "orange", - ServiceCategory::AdBlock => Entity::Dns.color(), - ServiceCategory::ReverseProxy => Entity::ReverseProxy.color(), + ServiceCategory::AdBlock => Concept::Dns.color(), + ServiceCategory::ReverseProxy => Concept::ReverseProxy.color(), // End devices ServiceCategory::Workstation => "green", ServiceCategory::Mobile => "blue", - ServiceCategory::IoT => Entity::IoT.color(), + ServiceCategory::IoT => Concept::IoT.color(), ServiceCategory::Printer => "gray", // Application diff --git a/backend/src/server/services/impl/definitions.rs b/backend/src/server/services/impl/definitions.rs index afa1d3cb..6dd52934 100644 --- a/backend/src/server/services/impl/definitions.rs +++ b/backend/src/server/services/impl/definitions.rs @@ -197,9 +197,20 @@ impl<'de> Deserialize<'de> for Box { D: serde::Deserializer<'de>, { let id = String::deserialize(deserializer)?; - ServiceDefinitionRegistry::find_by_id(&id).ok_or_else(|| { - serde::de::Error::custom(format!("Service definition not found: {}", id)) - }) + match ServiceDefinitionRegistry::find_by_id(&id) { + Some(def) => Ok(def), + None => { + // Log a warning but don't fail deserialization + tracing::warn!( + "Service definition not found: '{}'. Using UnknownServiceDefinition as fallback. \ + This may indicate a missing module declaration in mod.rs or a renamed service.", + id + ); + + // Return Default instead of failing + Ok(Box::new(DefaultServiceDefinition)) + } + } } } @@ -208,10 +219,10 @@ pub struct DefaultServiceDefinition; impl ServiceDefinition for DefaultServiceDefinition { fn name(&self) -> &'static str { - "Default Service" + "Missing Service" } fn description(&self) -> &'static str { - "Default service implementation" + "If you are seeing this, a service definition was removed. Please create an issue." } fn category(&self) -> ServiceCategory { ServiceCategory::Unknown @@ -220,474 +231,3 @@ impl ServiceDefinition for DefaultServiceDefinition { Pattern::None } } - -#[cfg(test)] -mod tests { - use strum::{IntoDiscriminant, IntoEnumIterator}; - - use crate::server::{ - hosts::r#impl::ports::PortBase, - services::{ - definitions::ServiceDefinitionRegistry, - r#impl::{definitions::ServiceDefinition, patterns::Pattern}, - }, - }; - use std::{ - collections::{HashMap, HashSet}, - fs::File, - io::BufReader, - path::PathBuf, - }; - - #[test] - fn test_all_service_definitions_register() { - // Get all registered services using inventory - let registry = ServiceDefinitionRegistry::all_service_definitions(); - - // Verify at least some services are registered - assert!( - !registry.is_empty(), - "No service definitions registered! Check inventory setup." - ); - - // Verify no duplicate names - let names: HashSet<_> = registry.iter().map(|s| s.name()).collect(); - assert_eq!( - names.len(), - registry.len(), - "Duplicate service definition names found!" - ); - - // Print registered services for debugging - println!("Registered {} services:", registry.len()); - for service in ®istry { - println!(" - {}", ServiceDefinition::name(service)); - } - } - - #[test] - fn test_service_definition_has_required_fields() { - let registry = ServiceDefinitionRegistry::all_service_definitions(); - - for service in registry { - // Every service must have non-empty name - assert!( - !ServiceDefinition::name(&service).is_empty(), - "Service has empty name" - ); - - // Name should be reasonable length (< 40 chars) - assert!( - service.name().len() < 40, - "Service name '{}' is too long; must be < 40 characters", - service.name() - ); - - // Every service must have description - assert!( - !service.description().is_empty(), - "Service '{}' has empty description", - service.name() - ); - - // Description should be reasonable length - assert!( - service.description().len() < 100, - "Service '{}' description is too long; must be < 100 characters", - service.name() - ); - } - } - - #[test] - fn test_service_patterns_use_appropriate_port_types() { - let registry = ServiceDefinitionRegistry::all_service_definitions(); - - // Build map of port numbers to their PortBase names by iterating - let well_known_ports: std::collections::HashMap = PortBase::iter() - .filter_map(|port_base| { - // Skip Custom variants - if matches!(port_base, PortBase::Custom(_)) { - None - } else { - Some((port_base, format!("PortBase::{}", port_base.discriminant()))) - } - }) - .collect(); - - for service in registry { - let pattern = service.discovery_pattern(); - let service_name = ServiceDefinition::name(&service); - - check_port_usage(&pattern, &well_known_ports, service_name); - } - } - - fn check_port_usage( - pattern: &Pattern, - well_known_ports: &std::collections::HashMap, - service_name: &str, - ) { - match pattern { - Pattern::Port(port_base) | Pattern::Endpoint(port_base, .., None) => { - if let PortBase::Custom(_) = port_base { - if let Some(named_constant) = well_known_ports.get(&port_base) { - panic!( - "Service '{}' uses custom port {} but should use {} instead", - service_name, port_base, named_constant - ); - } - } - } - Pattern::AnyOf(patterns) | Pattern::AllOf(patterns) => { - for p in patterns { - check_port_usage(p, well_known_ports, service_name); - } - } - Pattern::Not(p) => { - check_port_usage(p, well_known_ports, service_name); - } - _ => {} - } - } - - #[tokio::test] - async fn test_service_patterns_are_specific_enough() { - let registry = ServiceDefinitionRegistry::all_service_definitions(); - let words_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")) - .join("src") - .join("tests") - .join("words.json"); - - // Ensure the words file exists, download if necessary - if !words_path.exists() { - eprintln!("Words dictionary not found, downloading..."); - let url = - "https://raw.githubusercontent.com/dwyl/english-words/master/words_dictionary.json"; - - // Create the directory if it doesn't exist - if let Some(parent) = words_path.parent() { - std::fs::create_dir_all(parent).unwrap(); - } - - // Download and save the file - use async client instead - let response = reqwest::get(url) - .await - .expect("Failed to download words dictionary"); - let content = response.text().await.expect("Failed to read response body"); - std::fs::write(&words_path, content).expect("Failed to write words.json"); - eprintln!("Downloaded words dictionary to {:?}", words_path); - } - - let words_file = File::open(&words_path).unwrap(); - let reader = BufReader::new(words_file); - let words_map: HashMap = - serde_json::from_reader(reader).unwrap(); - let words: HashSet = words_map.into_keys().collect(); - - // Get all non-custom PortBase variants by iterating - let common_ports: Vec = PortBase::iter() - .filter_map(|port_base| { - // Skip Custom variants - if matches!(port_base, PortBase::Custom(_)) { - None - } else { - Some(port_base) - } - }) - .collect(); - - for service in registry { - // Generic services always pass - if service.is_generic() { - continue; - } - - let pattern = service.discovery_pattern(); - let service_name = ServiceDefinition::name(&service); - - check_pattern_specificity(&pattern, &common_ports, service_name, words.clone()); - } - } - - fn check_pattern_specificity( - pattern: &Pattern, - common_ports: &[PortBase], - service_name: &str, - words: HashSet, - ) { - match pattern { - // Port-only patterns on common ports without other criteria = fail - Pattern::Port(port_base) => { - if common_ports.contains(&port_base) { - panic!( - "Service '{}' uses port-only pattern on common port {} without additional criteria. \ - This could cause false positives. Consider using:\n\ - 1. Pattern::Endpoint with a unique path/response\n\ - 2. Pattern::AllOf combining port with other criteria\n\ - 3. Mark service as is_generic = true if it's truly generic (ie it represents the implementation of a protocol, not something provided by a specific vendor)", - service_name, - port_base.discriminant() - ); - } - } - - // AnyOf with only port patterns on common ports = fail - Pattern::AnyOf(patterns) => { - let all_are_common_port_patterns = patterns.iter().all(|p| { - if let Pattern::Port(port_base) = p { - common_ports.contains(&port_base) - } else { - false - } - }); - - if all_are_common_port_patterns && !patterns.is_empty() { - panic!( - "Service '{}' uses AnyOf with only common port patterns. \ - This could cause false positives. Use more specific patterns", - service_name - ); - } - - // Check each sub-pattern recursively - for p in patterns { - check_pattern_specificity(p, common_ports, service_name, words.clone()); - } - } - - // Endpoint patterns with common port/path and match strings that could lead to false positive = fail - Pattern::Endpoint(port_base, path, body_match_string, status_range) => { - let match_string_lower = body_match_string.to_lowercase(); - let is_short_match_string = match_string_lower.len() < 5; - - // Another service is likely to be listening on this port on other hosts, so need to be more stringent - let port_is_common = common_ports.contains(&port_base); - - // Path is unique/specific enough, even if match string alone is likely to cause false positives - let path_contains_service_name = path.contains(service_name); - - // Endpoint is probably not unique to service, and other services might respond to it - let is_common_endpoint = !path_contains_service_name - && port_is_common - && (*path == "/" || *path == "/api/" || *path == "/home/"); - - // Potential to false positive with dashboards that display service name - let match_string_is_service_name = - match_string_lower == service_name.to_lowercase(); - - // Non-compound strings have potential to false positive with dashboards that display service name - let match_string_is_singular = !match_string_lower.contains(" ") - && !match_string_lower.contains(".") - && !match_string_lower.contains("_") - && !match_string_lower.contains("-") - && !match_string_lower.contains(",") - && !match_string_lower.contains("/"); - - // Potential to false positive by being found in random strings displayed by other services - let is_substring_of_any_word = if is_short_match_string { - words.iter().any(|w| w.contains(&match_string_lower)) - && !path_contains_service_name - } else { - false - }; - - let expected_range = status_range.as_ref().unwrap_or(&(200..400)); - let range_includes_redirects = - expected_range.start < 400 && expected_range.end > 300; - - if is_short_match_string && range_includes_redirects && port_is_common { - panic!( - "Service '{}' uses a match string '{}' that is too short ({} characters) and also accepts redirects. - This could cause false positives. Please disallow redirects by passing Some(200..300) as the allowed status range - or update the match string to be longer.", - service_name, - body_match_string, - match_string_lower.len(), - ); - }; - - if is_common_endpoint && match_string_is_service_name { - panic!( - "Service '{}' uses a match string '{}' that is the same as the name of the service. This could cause false positives, \ - as dashboard services often will contain service names in their own endpoint responses, and as such could get detected as this service - Please provide a match string that contains text that distinguishes it from the service name", - service_name, body_match_string - ); - } - - if is_common_endpoint && match_string_is_singular { - panic!( - "Service '{}' uses a match string '{}' that is a singular word. This could cause false positives, \ - as dashboard services often will contain service names in their own endpoint responses, and as such could get detected as this service - Please provide a compound match string - multiple words separated by one of the following \ - delimiters: \".\", \"_\", \"/\", \",\", or \"-\"", - service_name, body_match_string - ); - } - - if is_common_endpoint && is_substring_of_any_word { - panic!( - "Service '{}' uses endpoint pattern at root path '/' on common port {} \ - with a match string '{}' that is a substring of at least one of a common english word. This could cause false positives. \ - Consider:\n\ - 1. Use a more specific path (e.g., '/api/status' instead of '/')\n\ - 2. Use a longer, more unique match string\n\ - 3. Use Pattern::AllOf to combine multiple criteria", - service_name, port_base, body_match_string - ); - } - } - - // Other patterns are generally fine - _ => {} - } - } - - #[test] - fn test_service_definition_serialization() { - let registry = ServiceDefinitionRegistry::all_service_definitions(); - - // Test that we can serialize and deserialize service definitions - for service in registry.iter().take(5) { - // Test first 5 to save time - // Serialize to JSON - let json = serde_json::to_string(&service) - .expect(&format!("Failed to serialize {}", service.name())); - - // Deserialize back - let deserialized: Box = serde_json::from_str(&json) - .expect(&format!("Failed to deserialize {}", service.name())); - - // Verify key fields match - assert_eq!( - service.name(), - deserialized.name(), - "Name mismatch after serialization" - ); - assert_eq!( - service.description(), - deserialized.description(), - "Description mismatch after serialization" - ); - } - } - #[tokio::test] - async fn test_service_definition_logo_urls_resolve() { - let registry = ServiceDefinitionRegistry::all_service_definitions(); - let client = reqwest::Client::builder() - .timeout(std::time::Duration::from_secs(5)) - .build() - .expect("Failed to create HTTP client"); - - const ALLOWED_DOMAINS: &[&str] = - &["cdn.jsdelivr.net", "simpleicons.org", "vectorlogo.zone"]; - - for service in registry { - let logo_url = service.logo_url(); - - // Skip services without logo URLs - if logo_url.is_empty() { - continue; - } - - // Check if it's a local file path or external URL - if logo_url.starts_with('/') { - // Local file path like /logos/netvisor-logo.png - assert!( - logo_url.starts_with("/logos/"), - "Service '{}' has local logo URL '{}' that doesn't start with /logos/", - ServiceDefinition::name(&service), - logo_url - ); - // We can't verify local files exist in tests, so just validate the path format - continue; - } - - // Must be a URL - parse it - let url = match reqwest::Url::parse(logo_url) { - Ok(url) => url, - Err(e) => { - panic!( - "Service '{}' has invalid logo URL '{}': {}", - ServiceDefinition::name(&service), - logo_url, - e - ); - } - }; - - // Check domain is in allowed list - let domain = url.domain().unwrap_or(""); - let is_allowed = ALLOWED_DOMAINS - .iter() - .any(|allowed| domain.ends_with(allowed)); - - assert!( - is_allowed, - "Service '{}' has logo URL '{}' from unauthorized domain '{}'. \ - Allowed domains: {}", - ServiceDefinition::name(&service), - logo_url, - domain, - ALLOWED_DOMAINS.join(", ") - ); - - // Attempt to fetch the logo URL - match client.head(logo_url).send().await { - Ok(response) => { - assert!( - response.status().is_success(), - "Service '{}' has logo URL '{}' that returned status {}", - ServiceDefinition::name(&service), - logo_url, - response.status() - ); - - // Verify Content-Type is an image - if let Some(content_type) = response.headers().get("content-type") { - let content_type_str = content_type.to_str().unwrap_or(""); - assert!( - content_type_str.starts_with("image/") - || content_type_str.starts_with("text/plain"), - "Service '{}' has logo URL '{}' with non-image Content-Type: {}", - ServiceDefinition::name(&service), - logo_url, - content_type_str - ); - } - } - Err(e) => { - panic!( - "Service '{}' has logo URL '{}' that failed to resolve: {}", - ServiceDefinition::name(&service), - logo_url, - e - ); - } - } - } - } - - #[test] - fn test_service_definition_description_starts_with_capital() { - let registry = ServiceDefinitionRegistry::all_service_definitions(); - - for service in registry { - let description = ServiceDefinition::description(&service); - - // Skip empty descriptions (already caught by another test) - if description.is_empty() { - continue; - } - - let first_char = description.chars().next().unwrap(); - assert!( - first_char.is_uppercase(), - "Service '{}' has description '{}' that doesn't start with a capital letter", - ServiceDefinition::name(&service), - description - ); - } - } -} diff --git a/backend/src/server/services/impl/mod.rs b/backend/src/server/services/impl/mod.rs index f3c93275..8982c443 100644 --- a/backend/src/server/services/impl/mod.rs +++ b/backend/src/server/services/impl/mod.rs @@ -6,4 +6,6 @@ pub mod endpoints; pub mod handlers; pub mod patterns; pub mod storage; +#[cfg(test)] +pub mod tests; pub mod virtualization; diff --git a/backend/src/server/services/impl/tests.rs b/backend/src/server/services/impl/tests.rs new file mode 100644 index 00000000..aefd2588 --- /dev/null +++ b/backend/src/server/services/impl/tests.rs @@ -0,0 +1,873 @@ +use strum::{IntoDiscriminant, IntoEnumIterator}; + +use crate::server::{ + hosts::r#impl::ports::PortBase, + services::{ + definitions::ServiceDefinitionRegistry, + r#impl::{definitions::ServiceDefinition, patterns::Pattern}, + }, +}; +use std::{ + collections::{HashMap, HashSet}, + fs::File, + io::BufReader, + path::PathBuf, +}; + +#[test] +fn test_all_service_definitions_register() { + use std::collections::HashMap; + use std::fs; + + // Get all registered services using inventory + let registry = ServiceDefinitionRegistry::all_service_definitions(); + + // Verify at least some services are registered + assert!( + !registry.is_empty(), + "No service definitions registered! Check inventory setup." + ); + + // Verify no duplicate names + let names: HashSet<_> = registry.iter().map(|s| s.name()).collect(); + assert_eq!( + names.len(), + registry.len(), + "Duplicate service definition names found!" + ); + + // Get all declared modules from mod.rs + let manifest_dir = PathBuf::from(env!("CARGO_MANIFEST_DIR")); + let definitions_dir = manifest_dir.join("src/server/services/definitions"); + let mod_rs_path = definitions_dir.join("mod.rs"); + let mod_rs_content = fs::read_to_string(&mod_rs_path).expect("Failed to read mod.rs"); + + let declared_modules: HashSet = mod_rs_content + .lines() + .filter_map(|line| { + let trimmed = line.trim(); + if trimmed.starts_with("pub mod ") && trimmed.ends_with(';') { + let module_name = trimmed + .trim_start_matches("pub mod ") + .trim_end_matches(';') + .trim() + .to_string(); + Some(module_name) + } else { + None + } + }) + .collect(); + + // Build map of declared module -> extracted service name + let mut declared_services: HashMap = HashMap::new(); + + for module in &declared_modules { + let file_path = definitions_dir.join(format!("{}.rs", module)); + + if let Ok(content) = fs::read_to_string(&file_path) { + if let Some(service_name) = extract_service_name(&content) { + declared_services.insert(module.clone(), service_name); + } + } + } + + // Check that all declared services are registered + let registered_names: HashSet = registry.iter().map(|s| s.name().to_string()).collect(); + + let mut not_registered = Vec::new(); + for (module, service_name) in &declared_services { + if !registered_names.contains(service_name) { + not_registered.push((module.clone(), service_name.clone())); + } + } + + if !not_registered.is_empty() { + panic!( + "Service definitions are declared in mod.rs but NOT registered with inventory::submit!:\n{}\n\n\ + Each service definition file must include:\n\ + inventory::submit!(ServiceDefinitionFactory::new(create_service::));", + not_registered + .iter() + .map(|(module, name)| format!(" - {} (service name: '{}')", module, name)) + .collect::>() + .join("\n") + ); + } +} + +#[test] +fn test_all_service_definition_files_can_be_parsed() { + use std::fs; + + let manifest_dir = PathBuf::from(env!("CARGO_MANIFEST_DIR")); + let definitions_dir = manifest_dir.join("src/server/services/definitions"); + + // Get all .rs files (excluding mod.rs and example.rs) + let rs_files: Vec = fs::read_dir(&definitions_dir) + .expect("Failed to read definitions directory") + .filter_map(|entry| { + let entry = entry.ok()?; + let path = entry.path(); + let stem = path.file_stem()?.to_string_lossy().to_string(); + + if path.extension()? == "rs" && stem != "mod" && stem != "example" { + Some(stem) + } else { + None + } + }) + .collect(); + + // Try to extract service name from each file + let mut parse_failures = Vec::new(); + let mut empty_files = Vec::new(); + + for filename in &rs_files { + let file_path = definitions_dir.join(format!("{}.rs", filename)); + + match fs::read_to_string(&file_path) { + Ok(content) => { + if content.trim().is_empty() { + empty_files.push(filename.clone()); + } else if extract_service_name(&content).is_none() { + parse_failures.push(filename.clone()); + } + } + Err(e) => { + panic!("Failed to read {}.rs: {}", filename, e); + } + } + } + + let mut errors = Vec::new(); + + if !empty_files.is_empty() { + errors.push(format!( + "Empty service definition files found:\n{}", + empty_files + .iter() + .map(|f| format!(" - {}.rs", f)) + .collect::>() + .join("\n") + )); + } + + if !parse_failures.is_empty() { + errors.push(format!( + "Service definition files without parseable fn name(&self) method:\n{}\n\n\ + Each service definition must implement:\n\ + fn name(&self) -> &'static str {{\n\ + \"Service Name\"\n\ + }}", + parse_failures + .iter() + .map(|f| format!(" - {}.rs", f)) + .collect::>() + .join("\n") + )); + } + + if !errors.is_empty() { + panic!("{}", errors.join("\n\n")); + } +} + +#[test] +fn test_service_definition_has_required_fields() { + let registry = ServiceDefinitionRegistry::all_service_definitions(); + + for service in registry { + // Every service must have non-empty name + assert!( + !ServiceDefinition::name(&service).is_empty(), + "Service has empty name" + ); + + // Name should be reasonable length (< 40 chars) + assert!( + service.name().len() < 40, + "Service name '{}' is too long; must be < 40 characters", + service.name() + ); + + // Every service must have description + assert!( + !service.description().is_empty(), + "Service '{}' has empty description", + service.name() + ); + + // Description should be reasonable length + assert!( + service.description().len() < 100, + "Service '{}' description is too long; must be < 100 characters", + service.name() + ); + } +} + +#[test] +fn test_service_patterns_use_appropriate_port_types() { + let registry = ServiceDefinitionRegistry::all_service_definitions(); + + // Build map of port numbers to their PortBase names by iterating + let well_known_ports: std::collections::HashMap = PortBase::iter() + .filter_map(|port_base| { + // Skip Custom variants + if matches!(port_base, PortBase::Custom(_)) { + None + } else { + Some((port_base, format!("PortBase::{}", port_base.discriminant()))) + } + }) + .collect(); + + for service in registry { + let pattern = service.discovery_pattern(); + let service_name = ServiceDefinition::name(&service); + + check_port_usage(&pattern, &well_known_ports, service_name); + } +} + +fn check_port_usage( + pattern: &Pattern, + well_known_ports: &std::collections::HashMap, + service_name: &str, +) { + match pattern { + Pattern::Port(port_base) | Pattern::Endpoint(port_base, .., None) => { + if let PortBase::Custom(_) = port_base { + if let Some(named_constant) = well_known_ports.get(&port_base) { + panic!( + "Service '{}' uses custom port {} but should use {} instead", + service_name, port_base, named_constant + ); + } + } + } + Pattern::AnyOf(patterns) | Pattern::AllOf(patterns) => { + for p in patterns { + check_port_usage(p, well_known_ports, service_name); + } + } + Pattern::Not(p) => { + check_port_usage(p, well_known_ports, service_name); + } + _ => {} + } +} + +#[tokio::test] +async fn test_service_patterns_are_specific_enough() { + let registry = ServiceDefinitionRegistry::all_service_definitions(); + let words_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")) + .join("src") + .join("tests") + .join("words.json"); + + // Ensure the words file exists, download if necessary + if !words_path.exists() { + eprintln!("Words dictionary not found, downloading..."); + let url = + "https://raw.githubusercontent.com/dwyl/english-words/master/words_dictionary.json"; + + // Create the directory if it doesn't exist + if let Some(parent) = words_path.parent() { + std::fs::create_dir_all(parent).unwrap(); + } + + // Download and save the file - use async client instead + let response = reqwest::get(url) + .await + .expect("Failed to download words dictionary"); + let content = response.text().await.expect("Failed to read response body"); + std::fs::write(&words_path, content).expect("Failed to write words.json"); + eprintln!("Downloaded words dictionary to {:?}", words_path); + } + + let words_file = File::open(&words_path).unwrap(); + let reader = BufReader::new(words_file); + let words_map: HashMap = serde_json::from_reader(reader).unwrap(); + let words: HashSet = words_map.into_keys().collect(); + + // Get all non-custom PortBase variants by iterating + let common_ports: Vec = PortBase::iter() + .filter_map(|port_base| { + // Skip Custom variants + if matches!(port_base, PortBase::Custom(_)) { + None + } else { + Some(port_base) + } + }) + .collect(); + + for service in registry { + // Generic services always pass + if service.is_generic() { + continue; + } + + let pattern = service.discovery_pattern(); + let service_name = ServiceDefinition::name(&service); + + check_pattern_specificity(&pattern, &common_ports, service_name, words.clone()); + } +} + +fn check_pattern_specificity( + pattern: &Pattern, + common_ports: &[PortBase], + service_name: &str, + words: HashSet, +) { + match pattern { + // Port-only patterns on common ports without other criteria = fail + Pattern::Port(port_base) => { + if common_ports.contains(&port_base) { + panic!( + "Service '{}' uses port-only pattern on common port {} without additional criteria. \ + This could cause false positives. Consider using:\n\ + 1. Pattern::Endpoint with a unique path/response\n\ + 2. Pattern::AllOf combining port with other criteria\n\ + 3. Mark service as is_generic = true if it's truly generic (ie it represents the implementation of a protocol, not something provided by a specific vendor)", + service_name, + port_base.discriminant() + ); + } + } + + // AnyOf with only port patterns on common ports = fail + Pattern::AnyOf(patterns) => { + let all_are_common_port_patterns = patterns.iter().all(|p| { + if let Pattern::Port(port_base) = p { + common_ports.contains(&port_base) + } else { + false + } + }); + + if all_are_common_port_patterns && !patterns.is_empty() { + panic!( + "Service '{}' uses AnyOf with only common port patterns. \ + This could cause false positives. Use more specific patterns", + service_name + ); + } + + // Check each sub-pattern recursively + for p in patterns { + check_pattern_specificity(p, common_ports, service_name, words.clone()); + } + } + + // Endpoint patterns with common port/path and match strings that could lead to false positive = fail + Pattern::Endpoint(port_base, path, body_match_string, status_range) => { + let match_string_lower = body_match_string.to_lowercase(); + let is_short_match_string = match_string_lower.len() < 5; + + // Another service is likely to be listening on this port on other hosts, so need to be more stringent + let port_is_common = common_ports.contains(&port_base); + + // Path is unique/specific enough, even if match string alone is likely to cause false positives + let path_contains_service_name = path.contains(service_name); + + // Endpoint is probably not unique to service, and other services might respond to it + let is_common_endpoint = !path_contains_service_name + && port_is_common + && (*path == "/" || *path == "/api/" || *path == "/home/"); + + // Potential to false positive with dashboards that display service name + let match_string_is_service_name = match_string_lower == service_name.to_lowercase(); + + // Non-compound strings have potential to false positive with dashboards that display service name + let match_string_is_singular = !match_string_lower.contains(" ") + && !match_string_lower.contains(".") + && !match_string_lower.contains("_") + && !match_string_lower.contains("-") + && !match_string_lower.contains(",") + && !match_string_lower.contains("/"); + + // Potential to false positive by being found in random strings displayed by other services + let is_substring_of_any_word = if is_short_match_string { + words.iter().any(|w| w.contains(&match_string_lower)) && !path_contains_service_name + } else { + false + }; + + let expected_range = status_range.as_ref().unwrap_or(&(200..400)); + let range_includes_redirects = expected_range.start < 400 && expected_range.end > 300; + + if is_short_match_string && range_includes_redirects && port_is_common { + panic!( + "Service '{}' uses a match string '{}' that is too short ({} characters) and also accepts redirects. + This could cause false positives. Please disallow redirects by passing Some(200..300) as the allowed status range + or update the match string to be longer.", + service_name, + body_match_string, + match_string_lower.len(), + ); + }; + + if is_common_endpoint && match_string_is_service_name { + panic!( + "Service '{}' uses a match string '{}' that is the same as the name of the service. This could cause false positives, \ + as dashboard services often will contain service names in their own endpoint responses, and as such could get detected as this service + Please provide a match string that contains text that distinguishes it from the service name", + service_name, body_match_string + ); + } + + if is_common_endpoint && match_string_is_singular { + panic!( + "Service '{}' uses a match string '{}' that is a singular word. This could cause false positives, \ + as dashboard services often will contain service names in their own endpoint responses, and as such could get detected as this service + Please provide a compound match string - multiple words separated by one of the following \ + delimiters: \".\", \"_\", \"/\", \",\", or \"-\"", + service_name, body_match_string + ); + } + + if is_common_endpoint && is_substring_of_any_word { + panic!( + "Service '{}' uses endpoint pattern at root path '/' on common port {} \ + with a match string '{}' that is a substring of at least one of a common english word. This could cause false positives. \ + Consider:\n\ + 1. Use a more specific path (e.g., '/api/status' instead of '/')\n\ + 2. Use a longer, more unique match string\n\ + 3. Use Pattern::AllOf to combine multiple criteria", + service_name, port_base, body_match_string + ); + } + } + + // Other patterns are generally fine + _ => {} + } +} + +#[test] +fn test_service_definition_serialization() { + let registry = ServiceDefinitionRegistry::all_service_definitions(); + + // Test that we can serialize and deserialize service definitions + for service in registry.iter().take(5) { + // Test first 5 to save time + // Serialize to JSON + let json = serde_json::to_string(&service) + .expect(&format!("Failed to serialize {}", service.name())); + + // Deserialize back + let deserialized: Box = serde_json::from_str(&json) + .expect(&format!("Failed to deserialize {}", service.name())); + + // Verify key fields match + assert_eq!( + service.name(), + deserialized.name(), + "Name mismatch after serialization" + ); + assert_eq!( + service.description(), + deserialized.description(), + "Description mismatch after serialization" + ); + } +} +#[tokio::test] +async fn test_service_definition_logo_urls_resolve() { + let registry = ServiceDefinitionRegistry::all_service_definitions(); + let client = reqwest::Client::builder() + .timeout(std::time::Duration::from_secs(5)) + .build() + .expect("Failed to create HTTP client"); + + const ALLOWED_DOMAINS: &[&str] = &["cdn.jsdelivr.net", "simpleicons.org", "vectorlogo.zone"]; + + for service in registry { + let logo_url = service.logo_url(); + + // Skip services without logo URLs + if logo_url.is_empty() { + continue; + } + + // Check if it's a local file path or external URL + if logo_url.starts_with('/') { + // Local file path like /logos/netvisor-logo.png + assert!( + logo_url.starts_with("/logos/"), + "Service '{}' has local logo URL '{}' that doesn't start with /logos/", + ServiceDefinition::name(&service), + logo_url + ); + // We can't verify local files exist in tests, so just validate the path format + continue; + } + + // Must be a URL - parse it + let url = match reqwest::Url::parse(logo_url) { + Ok(url) => url, + Err(e) => { + panic!( + "Service '{}' has invalid logo URL '{}': {}", + ServiceDefinition::name(&service), + logo_url, + e + ); + } + }; + + // Check domain is in allowed list + let domain = url.domain().unwrap_or(""); + let is_allowed = ALLOWED_DOMAINS + .iter() + .any(|allowed| domain.ends_with(allowed)); + + assert!( + is_allowed, + "Service '{}' has logo URL '{}' from unauthorized domain '{}'. \ + Allowed domains: {}", + ServiceDefinition::name(&service), + logo_url, + domain, + ALLOWED_DOMAINS.join(", ") + ); + + // Attempt to fetch the logo URL + match client.head(logo_url).send().await { + Ok(response) => { + assert!( + response.status().is_success(), + "Service '{}' has logo URL '{}' that returned status {}", + ServiceDefinition::name(&service), + logo_url, + response.status() + ); + + // Verify Content-Type is an image + if let Some(content_type) = response.headers().get("content-type") { + let content_type_str = content_type.to_str().unwrap_or(""); + assert!( + content_type_str.starts_with("image/") + || content_type_str.starts_with("text/plain"), + "Service '{}' has logo URL '{}' with non-image Content-Type: {}", + ServiceDefinition::name(&service), + logo_url, + content_type_str + ); + } + } + Err(e) => { + panic!( + "Service '{}' has logo URL '{}' that failed to resolve: {}", + ServiceDefinition::name(&service), + logo_url, + e + ); + } + } + } +} + +#[test] +fn test_service_definition_description_starts_with_capital() { + let registry = ServiceDefinitionRegistry::all_service_definitions(); + + for service in registry { + let description = ServiceDefinition::description(&service); + + // Skip empty descriptions (already caught by another test) + if description.is_empty() { + continue; + } + + let first_char = description.chars().next().unwrap(); + assert!( + first_char.is_uppercase(), + "Service '{}' has description '{}' that doesn't start with a capital letter", + ServiceDefinition::name(&service), + description + ); + } +} + +#[test] +fn test_all_service_definition_files_have_mod_declaration() { + use std::collections::HashSet; + use std::fs; + use std::path::PathBuf; + + // Get the definitions directory path + let manifest_dir = PathBuf::from(env!("CARGO_MANIFEST_DIR")); + let definitions_dir = manifest_dir.join("src/server/services/definitions"); + + // Read all .rs files in the definitions directory (excluding mod.rs) + let rs_files: HashSet = fs::read_dir(&definitions_dir) + .expect("Failed to read definitions directory") + .filter_map(|entry| { + let entry = entry.ok()?; + let path = entry.path(); + + // Only process .rs files that aren't mod.rs + if path.extension()? == "rs" + && path.file_stem()? != "mod" + && path.file_stem()? != "example" + { + Some(path.file_stem()?.to_string_lossy().to_string()) + } else { + None + } + }) + .collect(); + + // Read mod.rs and extract all module declarations + let mod_rs_path = definitions_dir.join("mod.rs"); + let mod_rs_content = fs::read_to_string(&mod_rs_path).expect("Failed to read mod.rs"); + + let declared_modules: HashSet = mod_rs_content + .lines() + .filter_map(|line| { + let trimmed = line.trim(); + // Match lines like "pub mod some_module;" + if trimmed.starts_with("pub mod ") && trimmed.ends_with(';') { + let module_name = trimmed + .trim_start_matches("pub mod ") + .trim_end_matches(';') + .trim(); + Some(module_name.to_string()) + } else { + None + } + }) + .collect(); + + // Find files without declarations + let undeclared: Vec<_> = rs_files.difference(&declared_modules).collect(); + + // Find declarations without files (shouldn't happen, but check anyway) + let missing_files: Vec<_> = declared_modules.difference(&rs_files).collect(); + + if !undeclared.is_empty() { + panic!( + "Service definition files exist but are not declared in mod.rs:\n{}\n\ + Add these lines to mod.rs:\n{}", + undeclared + .iter() + .map(|s| format!(" - {}.rs", s)) + .collect::>() + .join("\n"), + undeclared + .iter() + .map(|s| format!("pub mod {};", s)) + .collect::>() + .join("\n") + ); + } + + if !missing_files.is_empty() { + panic!( + "Module declarations in mod.rs have no corresponding file:\n{}", + missing_files + .iter() + .map(|s| format!(" - {}", s)) + .collect::>() + .join("\n") + ); + } +} + +#[test] +fn test_service_definition_ids_are_stable() { + use std::collections::HashMap; + use std::path::PathBuf; + use std::process::Command; + + let manifest_dir = PathBuf::from(env!("CARGO_MANIFEST_DIR")); + let definitions_dir = manifest_dir.join("src/server/services/definitions"); + + // Check if we're in a git repository + let git_check = Command::new("git") + .args(["rev-parse", "--git-dir"]) + .current_dir(&manifest_dir) + .output(); + + if git_check.is_err() || !git_check.unwrap().status.success() { + println!("⚠️ Not in a git repository, skipping ID stability test"); + return; + } + + // Check if origin/main exists + let remote_check = Command::new("git") + .args(["rev-parse", "--verify", "origin/main"]) + .current_dir(&manifest_dir) + .output(); + + if remote_check.is_err() || !remote_check.unwrap().status.success() { + println!("⚠️ origin/main not found, skipping ID stability test"); + return; + } + + // Get list of service definition files that existed in origin/main + let git_files = Command::new("git") + .args([ + "ls-tree", + "-r", + "--name-only", + "origin/main", + "backend/src/server/services/definitions/", + ]) + .current_dir(manifest_dir.parent().expect("No parent directory")) + .output() + .expect("Failed to list files from origin/main"); + + if !git_files.status.success() { + println!("⚠️ Failed to read origin/main, skipping ID stability test"); + return; + } + + let committed_files: Vec = String::from_utf8_lossy(&git_files.stdout) + .lines() + .filter(|line| { + line.ends_with(".rs") && !line.ends_with("/mod.rs") && !line.ends_with("/example.rs") + }) + .map(|line| { + line.split('/') + .last() + .unwrap() + .trim_end_matches(".rs") + .to_string() + }) + .collect(); + + if committed_files.is_empty() { + println!("⚠️ No committed service definitions found, skipping ID stability test"); + return; + } + + // For each committed file, extract the name() value from origin/main + let mut committed_service_ids: HashMap = HashMap::new(); + + for filename in &committed_files { + let file_path = format!("backend/src/server/services/definitions/{}.rs", filename); + + let file_content = Command::new("git") + .args(["show", &format!("origin/main:{}", file_path)]) + .current_dir(manifest_dir.parent().expect("No parent directory")) + .output() + .expect(&format!("Failed to read {} from origin/main", file_path)); + + if !file_content.status.success() { + continue; + } + + let content = String::from_utf8_lossy(&file_content.stdout); + + // Extract the name() return value + if let Some(name) = extract_service_name(&content) { + committed_service_ids.insert(filename.clone(), name); + } + } + + // Get current service definitions from registry + let registry = ServiceDefinitionRegistry::all_service_definitions(); + + // Now compare: for each file that existed in origin/main, check if the ID changed + let mut changed_ids = Vec::new(); + let mut removed_services = Vec::new(); + + for (filename, committed_id) in &committed_service_ids { + // Check if this service still exists in current registry + let current_has_id = registry.iter().any(|s| s.id() == committed_id); + + if !current_has_id { + // The ID from origin/main is no longer in the registry + // Check if the file still exists + let file_still_exists = definitions_dir.join(format!("{}.rs", filename)).exists(); + + if file_still_exists { + // File exists but ID changed - extract current ID + let current_file_content = + std::fs::read_to_string(definitions_dir.join(format!("{}.rs", filename))).ok(); + + if let Some(content) = current_file_content { + if let Some(new_id) = extract_service_name(&content) { + if new_id != *committed_id { + changed_ids.push(format!( + " - File '{}.rs': ID changed from '{}' to '{}'", + filename, committed_id, new_id + )); + } + } + } + } else { + removed_services.push(format!(" - '{}.rs' (ID was '{}')", filename, committed_id)); + } + } + } + + if !changed_ids.is_empty() { + panic!( + "Service definition IDs have changed (this breaks database compatibility):\n{}\n\n\ + Service IDs (derived from name() method) must remain stable once committed to origin/main.\n\ + Changing a service name breaks existing databases that reference the old ID.\n\n\ + If you must rename a service, you need to:\n\ + 1. Provide a database migration script\n\ + 2. Update all service records in the database to use the new ID\n\ + 3. Document this as a breaking change", + changed_ids.join("\n") + ); + } + + if !removed_services.is_empty() { + println!( + "⚠️ Service definition files were removed:\n{}\n\ + If this is intentional, ensure a migration handles orphaned records.", + removed_services.join("\n") + ); + } +} + +// Helper function to extract service name from Rust source code +fn extract_service_name(content: &str) -> Option { + // Use a simple regex-like approach to find: fn name(&self) ... { ... "name" ... } + // We need to handle whitespace and newlines between tokens + + // Find "fn name(&self)" + let fn_pos = content.find("fn name(&self)")?; + + // From there, find the opening brace + let after_fn = &content[fn_pos..]; + let brace_pos = after_fn.find('{')?; + + // Now find the first string literal after the brace + let after_brace = &after_fn[brace_pos + 1..]; + + // Find first quote + let first_quote = after_brace.find('"')?; + let after_first_quote = &after_brace[first_quote + 1..]; + + // Find closing quote (need to handle escaped quotes) + let mut end_pos = 0; + let chars: Vec = after_first_quote.chars().collect(); + + for i in 0..chars.len() { + if chars[i] == '"' { + // Check if it's escaped + if i > 0 && chars[i - 1] == '\\' { + continue; + } + end_pos = i; + break; + } + } + + if end_pos == 0 { + return None; + } + + let name: String = chars[..end_pos].iter().collect(); + Some(name) +} diff --git a/backend/src/server/services/impl/virtualization.rs b/backend/src/server/services/impl/virtualization.rs index f1b19a0f..692addb2 100644 --- a/backend/src/server/services/impl/virtualization.rs +++ b/backend/src/server/services/impl/virtualization.rs @@ -5,7 +5,7 @@ use uuid::Uuid; use validator::Validate; use crate::server::shared::{ - entities::Entity, + concepts::Concept, types::metadata::{EntityMetadataProvider, HasId, TypeMetadataProvider}, }; @@ -32,10 +32,10 @@ impl HasId for ServiceVirtualization { impl EntityMetadataProvider for ServiceVirtualization { fn color(&self) -> &'static str { - Entity::Virtualization.color() + Concept::Virtualization.color() } fn icon(&self) -> &'static str { - Entity::Virtualization.icon() + Concept::Virtualization.icon() } } diff --git a/backend/src/server/services/service.rs b/backend/src/server/services/service.rs index ae465a6a..b50e8ca0 100644 --- a/backend/src/server/services/service.rs +++ b/backend/src/server/services/service.rs @@ -1,4 +1,5 @@ use crate::server::{ + auth::middleware::AuthenticatedEntity, groups::{ r#impl::{base::Group, types::GroupType}, service::GroupService, @@ -9,14 +10,24 @@ use crate::server::{ }, services::r#impl::{base::Service, bindings::Binding, patterns::MatchDetails}, shared::{ - services::traits::CrudService, - storage::{filter::EntityFilter, generic::GenericPostgresStorage, traits::Storage}, + entities::ChangeTriggersTopologyStaleness, + events::{ + bus::EventBus, + types::{EntityEvent, EntityOperation}, + }, + services::traits::{CrudService, EventBusService}, + storage::{ + filter::EntityFilter, + generic::GenericPostgresStorage, + traits::{StorableEntity, Storage}, + }, types::entities::{EntitySource, EntitySourceDiscriminants}, }, }; use anyhow::anyhow; use anyhow::{Error, Result}; use async_trait::async_trait; +use chrono::Utc; use futures::lock::Mutex; use std::{ collections::HashMap, @@ -31,42 +42,39 @@ pub struct ServiceService { group_service: Arc, group_update_lock: Arc>, service_locks: Arc>>>>, + event_bus: Arc, } -#[async_trait] -impl CrudService for ServiceService { - fn storage(&self) -> &Arc> { - &self.storage +impl EventBusService for ServiceService { + fn event_bus(&self) -> &Arc { + &self.event_bus } -} -impl ServiceService { - pub fn new( - storage: Arc>, - group_service: Arc, - ) -> Self { - Self { - storage, - group_service, - host_service: OnceLock::new(), - group_update_lock: Arc::new(Mutex::new(())), - service_locks: Arc::new(Mutex::new(HashMap::new())), - } + fn get_network_id(&self, entity: &Service) -> Option { + Some(entity.base.network_id) } - - async fn get_service_lock(&self, service_id: &Uuid) -> Arc> { - let mut locks = self.service_locks.lock().await; - locks - .entry(*service_id) - .or_insert_with(|| Arc::new(Mutex::new(()))) - .clone() + fn get_organization_id(&self, _entity: &Service) -> Option { + None } +} - pub fn set_host_service(&self, host_service: Arc) -> Result<(), Arc> { - self.host_service.set(host_service) +#[async_trait] +impl CrudService for ServiceService { + fn storage(&self) -> &Arc> { + &self.storage } - pub async fn create_service(&self, service: Service) -> Result { + async fn create( + &self, + service: Service, + authentication: AuthenticatedEntity, + ) -> Result { + let service = if service.id == Uuid::nil() { + Service::new(service.base) + } else { + service + }; + let lock = self.get_service_lock(&service.id).await; let _guard = lock.lock().await; @@ -90,18 +98,30 @@ impl ServiceService { service, existing_service, ); - self.upsert_service(existing_service, service).await? + self.upsert_service(existing_service, service, authentication) + .await? } _ => { - self.storage.create(&service).await?; - tracing::info!( - service_id = %service.id, - service_name = %service.base.name, - host_id = %service.base.host_id, - binding_count = %service.base.bindings.len(), - "Service created" - ); - tracing::trace!("Result: {:?}", service); + let created = self.storage.create(&service).await?; + + let trigger_stale = created.triggers_staleness(None); + + self.event_bus() + .publish_entity(EntityEvent { + id: Uuid::new_v4(), + entity_id: created.id, + network_id: self.get_network_id(&created), + organization_id: self.get_organization_id(&created), + entity_type: created.into(), + operation: EntityOperation::Created, + timestamp: Utc::now(), + metadata: serde_json::json!({ + "trigger_stale": trigger_stale + }), + authentication, + }) + .await?; + service } }; @@ -109,13 +129,119 @@ impl ServiceService { Ok(service_from_storage) } + async fn update( + &self, + service: &mut Service, + authentication: AuthenticatedEntity, + ) -> Result { + let lock = self.get_service_lock(&service.id).await; + let _guard = lock.lock().await; + + tracing::trace!("Updating service: {:?}", service); + + let current_service = self + .get_by_id(&service.id) + .await? + .ok_or_else(|| anyhow!("Could not find service"))?; + + self.update_group_service_bindings(¤t_service, Some(service), authentication.clone()) + .await?; + + let updated = self.storage.update(service).await?; + let trigger_stale = updated.triggers_staleness(Some(current_service)); + + self.event_bus() + .publish_entity(EntityEvent { + id: Uuid::new_v4(), + entity_id: updated.id, + network_id: self.get_network_id(&updated), + organization_id: self.get_organization_id(&updated), + entity_type: updated.clone().into(), + operation: EntityOperation::Updated, + timestamp: Utc::now(), + metadata: serde_json::json!({ + "trigger_stale": trigger_stale + }), + authentication, + }) + .await?; + + Ok(updated) + } + + async fn delete(&self, id: &Uuid, authentication: AuthenticatedEntity) -> Result<()> { + let lock = self.get_service_lock(id).await; + let _guard = lock.lock().await; + + let service = self + .get_by_id(id) + .await? + .ok_or_else(|| anyhow::anyhow!("Service {} not found", id))?; + + self.update_group_service_bindings(&service, None, authentication.clone()) + .await?; + + self.storage.delete(id).await?; + + let trigger_stale = service.triggers_staleness(None); + + self.event_bus() + .publish_entity(EntityEvent { + id: Uuid::new_v4(), + entity_id: service.id, + network_id: self.get_network_id(&service), + organization_id: self.get_organization_id(&service), + entity_type: service.into(), + operation: EntityOperation::Deleted, + timestamp: Utc::now(), + metadata: serde_json::json!({ + "trigger_stale": trigger_stale + }), + authentication, + }) + .await?; + Ok(()) + } +} + +impl ServiceService { + pub fn new( + storage: Arc>, + group_service: Arc, + event_bus: Arc, + ) -> Self { + Self { + storage, + group_service, + host_service: OnceLock::new(), + group_update_lock: Arc::new(Mutex::new(())), + service_locks: Arc::new(Mutex::new(HashMap::new())), + event_bus, + } + } + + async fn get_service_lock(&self, service_id: &Uuid) -> Arc> { + let mut locks = self.service_locks.lock().await; + locks + .entry(*service_id) + .or_insert_with(|| Arc::new(Mutex::new(()))) + .clone() + } + + pub fn set_host_service(&self, host_service: Arc) -> Result<(), Arc> { + self.host_service.set(host_service) + } + pub async fn upsert_service( &self, mut existing_service: Service, new_service_data: Service, + authentication: AuthenticatedEntity, ) -> Result { let mut binding_updates = 0; + let service_before_updates = existing_service.clone(); + let lock = self.get_service_lock(&existing_service.id).await; let _guard = lock.lock().await; @@ -200,13 +326,23 @@ impl ServiceService { }; if !data.is_empty() { - tracing::info!( - service_id = %existing_service.id, - service_name = %existing_service.base.name, - updates = %data.join(", "), - "Upserted service with new data" - ); - tracing::debug!("Result {:?}", existing_service); + let trigger_stale = existing_service.triggers_staleness(Some(service_before_updates)); + + self.event_bus() + .publish_entity(EntityEvent { + id: Uuid::new_v4(), + entity_id: existing_service.id, + network_id: self.get_network_id(&existing_service), + organization_id: self.get_organization_id(&existing_service), + entity_type: existing_service.clone().into(), + operation: EntityOperation::Updated, + timestamp: Utc::now(), + metadata: serde_json::json!({ + "trigger_stale": trigger_stale + }), + authentication, + }) + .await?; } else { tracing::debug!( "Service upsert - no changes needed for {}", @@ -217,35 +353,11 @@ impl ServiceService { Ok(existing_service) } - pub async fn update_service(&self, mut service: Service) -> Result { - let lock = self.get_service_lock(&service.id).await; - let _guard = lock.lock().await; - - tracing::trace!("Updating service: {:?}", service); - - let current_service = self - .get_by_id(&service.id) - .await? - .ok_or_else(|| anyhow!("Could not find service"))?; - - self.update_group_service_bindings(¤t_service, Some(&service)) - .await?; - - self.storage.update(&mut service).await?; - tracing::info!( - service_id = %service.id, - service_name = %service.base.name, - host_id = %service.base.host_id, - "Service updated" - ); - tracing::trace!("Result: {:?}", service); - Ok(service) - } - async fn update_group_service_bindings( &self, current_service: &Service, updates: Option<&Service>, + authenticated: AuthenticatedEntity, ) -> Result<(), Error> { tracing::trace!( "Updating group bindings referencing {:?}, with changes {:?}", @@ -297,13 +409,19 @@ impl ServiceService { }) .collect(); - // Execute updates sequentially - for mut group in groups_to_update { - self.group_service.update(&mut group).await?; + if !groups_to_update.is_empty() { + // Execute updates sequentially + for mut group in groups_to_update { + self.group_service + .update(&mut group, authenticated.clone()) + .await?; + } + tracing::info!( + service = %current_service, + "Updated group bindings" + ); } - tracing::info!("Updated group bindings referencing {}", current_service); - Ok(()) } @@ -397,10 +515,10 @@ impl ServiceService { mutable_service.base.network_id = updated_host.base.network_id; tracing::info!( - "Reassigned service {} bindings for from host {} to host {}", - mutable_service, - original_host, - updated_host + service = %mutable_service, + origin_host = %original_host, + destination_host = %updated_host, + "Reassigned service bindings", ); tracing::trace!( @@ -412,25 +530,4 @@ impl ServiceService { mutable_service } - - pub async fn delete_service(&self, id: &Uuid) -> Result<()> { - let lock = self.get_service_lock(id).await; - let _guard = lock.lock().await; - - let service = self - .get_by_id(id) - .await? - .ok_or_else(|| anyhow::anyhow!("Service {} not found", id))?; - - self.update_group_service_bindings(&service, None).await?; - - self.storage.delete(id).await?; - tracing::info!( - "Deleted service {}: {} for host {}", - service.base.name, - service.id, - service.base.host_id - ); - Ok(()) - } } diff --git a/backend/src/server/services/tests.rs b/backend/src/server/services/tests.rs index 4628a647..0d67baad 100644 --- a/backend/src/server/services/tests.rs +++ b/backend/src/server/services/tests.rs @@ -2,6 +2,7 @@ use serial_test::serial; use crate::{ server::{ + auth::middleware::AuthenticatedEntity, groups::r#impl::types::GroupType, services::r#impl::{bindings::Binding, patterns::MatchDetails}, shared::{ @@ -19,19 +20,19 @@ async fn test_service_deduplication_on_create() { let organization = services .organization_service - .create(organization()) + .create(organization(), AuthenticatedEntity::System) .await .unwrap(); let network = services .network_service - .create(network(&organization.id)) + .create(network(&organization.id), AuthenticatedEntity::System) .await .unwrap(); let subnet_obj = subnet(&network.id); services .subnet_service - .create(subnet_obj.clone()) + .create(subnet_obj.clone(), AuthenticatedEntity::System) .await .unwrap(); @@ -53,7 +54,11 @@ async fn test_service_deduplication_on_create() { let (created_host, created1) = services .host_service - .create_host_with_services(host_obj.clone(), vec![svc1.clone()]) + .create_host_with_services( + host_obj.clone(), + vec![svc1.clone()], + AuthenticatedEntity::System, + ) .await .unwrap(); @@ -72,7 +77,7 @@ async fn test_service_deduplication_on_create() { let created2 = services .service_service - .create_service(svc2.clone()) + .create(svc2.clone(), AuthenticatedEntity::System) .await .unwrap(); @@ -92,19 +97,19 @@ async fn test_service_deletion_cleans_up_relationships() { let organization = services .organization_service - .create(organization()) + .create(organization(), AuthenticatedEntity::System) .await .unwrap(); let network = services .network_service - .create(network(&organization.id)) + .create(network(&organization.id), AuthenticatedEntity::System) .await .unwrap(); let subnet_obj = subnet(&network.id); let created_subnet = services .subnet_service - .create(subnet_obj.clone()) + .create(subnet_obj.clone(), AuthenticatedEntity::System) .await .unwrap(); @@ -121,7 +126,11 @@ async fn test_service_deletion_cleans_up_relationships() { services .host_service - .create_host_with_services(host_obj.clone(), vec![svc.clone()]) + .create_host_with_services( + host_obj.clone(), + vec![svc.clone()], + AuthenticatedEntity::System, + ) .await .unwrap(); @@ -136,12 +145,16 @@ async fn test_service_deletion_cleans_up_relationships() { group_obj.base.group_type = GroupType::RequestPath { service_bindings: vec![created_svc.base.bindings[0].id()], }; - let created_group = services.group_service.create(group_obj).await.unwrap(); + let created_group = services + .group_service + .create(group_obj, AuthenticatedEntity::System) + .await + .unwrap(); // Delete service services .service_service - .delete_service(&created_svc.id) + .delete(&created_svc.id, AuthenticatedEntity::System) .await .unwrap(); diff --git a/backend/src/server/shared/concepts.rs b/backend/src/server/shared/concepts.rs new file mode 100644 index 00000000..910f793a --- /dev/null +++ b/backend/src/server/shared/concepts.rs @@ -0,0 +1,60 @@ +use serde::{Deserialize, Serialize}; +use strum_macros::{Display, EnumDiscriminants, EnumIter, IntoStaticStr}; + +use crate::server::shared::types::metadata::{EntityMetadataProvider, HasId}; + +#[derive( + Debug, + Clone, + PartialEq, + Eq, + Hash, + EnumDiscriminants, + EnumIter, + IntoStaticStr, + Serialize, + Deserialize, + Display, +)] +#[strum_discriminants(derive(Display, Hash, EnumIter, IntoStaticStr))] +pub enum Concept { + Dns, + Vpn, + Gateway, + ReverseProxy, + IoT, + Storage, + Virtualization, +} + +impl HasId for Concept { + fn id(&self) -> &'static str { + self.into() + } +} + +impl EntityMetadataProvider for Concept { + fn color(&self) -> &'static str { + match self { + Concept::Dns => "emerald", + Concept::Vpn => "green", + Concept::Gateway => "teal", + Concept::ReverseProxy => "cyan", + Concept::IoT => "yellow", + Concept::Storage => "green", + Concept::Virtualization => "indigo", + } + } + + fn icon(&self) -> &'static str { + match self { + Concept::Dns => "Search", + Concept::Vpn => "VenetianMask", + Concept::Gateway => "Router", + Concept::ReverseProxy => "Split", + Concept::IoT => "Cpu", + Concept::Storage => "HardDrive", + Concept::Virtualization => "MonitorCog", + } + } +} diff --git a/backend/src/server/shared/entities.rs b/backend/src/server/shared/entities.rs index ee24550b..4eb4ff45 100644 --- a/backend/src/server/shared/entities.rs +++ b/backend/src/server/shared/entities.rs @@ -1,90 +1,189 @@ +use crate::server::groups::r#impl::base::Group; +use crate::server::hosts::r#impl::interfaces::Interface; +use crate::server::hosts::r#impl::ports::Port; +use crate::server::organizations::r#impl::invites::Invite; +use crate::server::services::r#impl::base::Service; +use crate::server::subnets::r#impl::base::Subnet; +use crate::server::topology::types::base::Topology; +use serde::{Deserialize, Serialize}; use strum_macros::{Display, EnumDiscriminants, EnumIter, IntoStaticStr}; -use crate::server::shared::types::metadata::{EntityMetadataProvider, HasId}; +use crate::server::{ + api_keys::r#impl::base::ApiKey, + daemons::r#impl::base::Daemon, + discovery::r#impl::base::Discovery, + hosts::r#impl::base::Host, + networks::r#impl::Network, + organizations::r#impl::base::Organization, + shared::types::metadata::{EntityMetadataProvider, HasId}, + users::r#impl::base::User, +}; -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, EnumDiscriminants, EnumIter, IntoStaticStr)] -#[strum_discriminants(derive(Display))] +// Trait use to determine whether a given property change on an entity should trigger a rebuild of topology +pub trait ChangeTriggersTopologyStaleness { + fn triggers_staleness(&self, _other: Option) -> bool; +} + +#[derive( + Debug, + Clone, + PartialEq, + Eq, + Hash, + EnumDiscriminants, + IntoStaticStr, + Serialize, + Deserialize, + Display, +)] +#[strum_discriminants(derive(Display, Hash, EnumIter, IntoStaticStr))] pub enum Entity { - Network, - ApiKey, - User, - Discovery, - Daemon, - - Host, - Service, - Port, - Interface, - - Subnet, - Group, - Topology, - - Dns, - Vpn, - Gateway, - ReverseProxy, - IoT, - Storage, - Virtualization, -} - -impl HasId for Entity { + Organization(Organization), + Invite(Invite), + Network(Network), + ApiKey(ApiKey), + User(User), + Discovery(Discovery), + Daemon(Daemon), + + Host(Host), + Service(Service), + Port(Port), + Interface(Interface), + + Subnet(Subnet), + Group(Group), + Topology(Topology), +} + +impl HasId for EntityDiscriminants { fn id(&self) -> &'static str { self.into() } } -impl EntityMetadataProvider for Entity { +impl EntityMetadataProvider for EntityDiscriminants { fn color(&self) -> &'static str { match self { - Entity::Network => "gray", - Entity::Daemon => "green", - Entity::Discovery => "green", - Entity::ApiKey => "yellow", - Entity::User => "blue", - - Entity::Host => "blue", - Entity::Service => "purple", - Entity::Interface => "cyan", - Entity::Port => "cyan", - - Entity::Dns => "emerald", - Entity::Vpn => "green", - Entity::Gateway => "teal", - Entity::ReverseProxy => "cyan", - - Entity::Subnet => "orange", - Entity::Group => "rose", - Entity::Topology => "pink", - - Entity::IoT => "yellow", - Entity::Storage => "green", - Entity::Virtualization => "indigo", + EntityDiscriminants::Organization => "blue", + EntityDiscriminants::Network => "gray", + EntityDiscriminants::Daemon => "green", + EntityDiscriminants::Discovery => "green", + EntityDiscriminants::ApiKey => "yellow", + EntityDiscriminants::User => "blue", + EntityDiscriminants::Invite => "green", + + EntityDiscriminants::Host => "blue", + EntityDiscriminants::Service => "purple", + EntityDiscriminants::Interface => "cyan", + EntityDiscriminants::Port => "cyan", + + EntityDiscriminants::Subnet => "orange", + EntityDiscriminants::Group => "rose", + EntityDiscriminants::Topology => "pink", } } fn icon(&self) -> &'static str { match self { - Entity::Network => "Globe", - Entity::User => "User", - Entity::ApiKey => "Key", - Entity::Daemon => "SatelliteDish", - Entity::Discovery => "Radar", - Entity::Host => "Server", - Entity::Service => "Layers", - Entity::Interface => "Binary", - Entity::Dns => "Search", - Entity::Vpn => "VenetianMask", - Entity::Port => "EthernetPort", - Entity::Gateway => "Router", - Entity::ReverseProxy => "Split", - Entity::Subnet => "Network", - Entity::Group => "Group", - Entity::Topology => "ChartNetwork", - Entity::IoT => "Cpu", - Entity::Storage => "HardDrive", - Entity::Virtualization => "MonitorCog", + EntityDiscriminants::Organization => "Building", + EntityDiscriminants::Network => "Globe", + EntityDiscriminants::User => "User", + EntityDiscriminants::Invite => "UserPlus", + EntityDiscriminants::ApiKey => "Key", + EntityDiscriminants::Daemon => "SatelliteDish", + EntityDiscriminants::Discovery => "Radar", + EntityDiscriminants::Host => "Server", + EntityDiscriminants::Service => "Layers", + EntityDiscriminants::Interface => "Binary", + EntityDiscriminants::Port => "EthernetPort", + EntityDiscriminants::Subnet => "Network", + EntityDiscriminants::Group => "Group", + EntityDiscriminants::Topology => "ChartNetwork", } } } + +impl From for Entity { + fn from(value: Organization) -> Self { + Self::Organization(value) + } +} + +impl From for Entity { + fn from(value: Invite) -> Self { + Self::Invite(value) + } +} + +impl From for Entity { + fn from(value: Network) -> Self { + Self::Network(value) + } +} + +impl From for Entity { + fn from(value: ApiKey) -> Self { + Self::ApiKey(value) + } +} + +impl From for Entity { + fn from(value: User) -> Self { + Self::User(value) + } +} + +impl From for Entity { + fn from(value: Discovery) -> Self { + Self::Discovery(value) + } +} + +impl From for Entity { + fn from(value: Daemon) -> Self { + Self::Daemon(value) + } +} + +impl From for Entity { + fn from(value: Host) -> Self { + Self::Host(value) + } +} + +impl From for Entity { + fn from(value: Service) -> Self { + Self::Service(value) + } +} + +impl From for Entity { + fn from(value: Port) -> Self { + Self::Port(value) + } +} + +impl From for Entity { + fn from(value: Interface) -> Self { + Self::Interface(value) + } +} + +impl From for Entity { + fn from(value: Subnet) -> Self { + Self::Subnet(value) + } +} + +impl From for Entity { + fn from(value: Group) -> Self { + Self::Group(value) + } +} + +impl From for Entity { + fn from(value: Topology) -> Self { + Self::Topology(value) + } +} diff --git a/backend/src/server/shared/events/bus.rs b/backend/src/server/shared/events/bus.rs new file mode 100644 index 00000000..52bc6429 --- /dev/null +++ b/backend/src/server/shared/events/bus.rs @@ -0,0 +1,291 @@ +use std::{collections::HashMap, sync::Arc, time::Duration}; + +use strum::IntoDiscriminant; +use tokio::sync::RwLock; + +use anyhow::Result; +use async_trait::async_trait; +use tokio::sync::broadcast; +use uuid::Uuid; + +use crate::server::shared::{ + entities::EntityDiscriminants, + events::types::{AuthEvent, AuthOperation, EntityEvent, EntityOperation, Event}, +}; + +// Trait for event subscribers +#[async_trait] +pub trait EventSubscriber: Send + Sync { + /// Return the types of events this subscriber cares about + fn event_filter(&self) -> EventFilter; + + /// Handle a batch of events (vec will have 1 element if debounce_window_ms = 0) + async fn handle_events(&self, events: Vec) -> Result<()>; + + /// Optional: debounce window in milliseconds (default: 0 = no batching) + fn debounce_window_ms(&self) -> u64 { + 0 + } + + /// Optional: subscriber name for debugging + fn name(&self) -> &str; +} + +#[derive(Debug, Clone)] +pub struct EventFilter { + // None = match all values (ignore as a filter) + pub entity_operations: Option>>>, + pub auth_operations: Option>, + pub network_ids: Option>, +} + +impl EventFilter { + pub fn all() -> Self { + Self { + entity_operations: None, + auth_operations: None, + network_ids: None, + } + } + + pub fn entity_only( + entity_operations: HashMap>>, + ) -> Self { + Self { + entity_operations: Some(entity_operations), + auth_operations: None, + network_ids: None, + } + } + + pub fn auth_only(auth_operations: Vec) -> Self { + Self { + entity_operations: None, + auth_operations: Some(auth_operations), + network_ids: None, + } + } + + pub fn matches(&self, event: &Event) -> bool { + match event { + Event::Entity(entity_event) => self.matches_entity(entity_event), + Event::Auth(auth_event) => self.matches_auth(auth_event), + } + } + + fn matches_entity(&self, event: &EntityEvent) -> bool { + // Check network filter + if let Some(networks) = &self.network_ids + && let Some(network_id) = event.network_id + && !networks.contains(&network_id) + { + return false; + } + + // Check entity operation filter + if let Some(entity_operations) = &self.entity_operations { + if let Some(operations) = entity_operations.get(&event.entity_type.discriminant()) { + if operations.is_none() { + return true; + } else if let Some(operations) = operations + && operations.contains(&event.operation) + { + return true; + } + } + return false; + } + + true + } + + fn matches_auth(&self, event: &AuthEvent) -> bool { + // Check network filter (using organization_id for auth events) + if let Some(networks) = &self.network_ids + && let Some(org_id) = event.organization_id + && !networks.contains(&org_id) + { + return false; + } + + // Check auth operation filter + if let Some(auth_operations) = &self.auth_operations { + return auth_operations.contains(&event.operation); + } + + true + } +} + +/// Internal: Manages batching state for a subscriber +struct SubscriberState { + subscriber: Arc, + pending_events: Arc>>, +} + +impl SubscriberState { + fn new(subscriber: Arc) -> Self { + let debounce_ms = subscriber.debounce_window_ms(); + let pending = Arc::new(RwLock::new(Vec::new())); + + if debounce_ms > 0 { + // Spawn background flush task for subscribers with batching + let pending_clone = pending.clone(); + let subscriber_clone = subscriber.clone(); + let debounce_window = Duration::from_millis(debounce_ms); + + tokio::spawn(async move { + let mut interval = tokio::time::interval(debounce_window); + loop { + interval.tick().await; + Self::flush_batch(&subscriber_clone, &pending_clone).await; + } + }); + } + + Self { + subscriber, + pending_events: pending, + } + } + + async fn flush_batch(subscriber: &Arc, pending: &Arc>>) { + let events: Vec = { + let mut p = pending.write().await; + if p.is_empty() { + return; + } + + // Deduplicate events (requires PartialEq on Event) + let mut unique_events = Vec::new(); + for event in p.drain(..) { + if !unique_events.contains(&event) { + unique_events.push(event); + } + } + unique_events + }; + + if events.is_empty() { + return; + } + + tracing::debug!( + subscriber = %subscriber.name(), + event_count = events.len(), + "Subscriber processing event batch" + ); + + if let Err(e) = subscriber.handle_events(events).await { + tracing::error!( + subscriber = %subscriber.name(), + error = %e, + "Subscriber failed to handle batched events", + ); + } + } + + async fn add_event(&self, event: Event) { + let debounce_window = self.subscriber.debounce_window_ms(); + + if debounce_window == 0 { + // No batching - handle immediately + if let Err(e) = self.subscriber.handle_events(vec![event]).await { + tracing::error!( + subscriber = %self.subscriber.name(), + error = %e, + "Subscriber failed to handle event", + ); + } + } else { + // Add to batch + let mut pending = self.pending_events.write().await; + pending.push(event); + } + } +} + +pub struct EventBus { + sender: broadcast::Sender, + subscribers: Arc>>, +} + +impl Default for EventBus { + fn default() -> Self { + Self::new() + } +} + +impl EventBus { + pub fn new() -> Self { + let (sender, _) = broadcast::channel(1000); + + Self { + sender, + subscribers: Arc::new(RwLock::new(Vec::new())), + } + } + + /// Register a subscriber + pub async fn register_subscriber(&self, subscriber: Arc) { + let state = SubscriberState::new(subscriber.clone()); + let mut subscribers = self.subscribers.write().await; + subscribers.push(state); + + tracing::info!( + subscriber = %subscriber.name(), + debounce_ms = subscriber.debounce_window_ms(), + "Registered event subscriber", + ); + } + + /// Publish an entity event + pub async fn publish_entity(&self, event: EntityEvent) -> Result<()> { + self.publish(Event::Entity(Box::new(event))).await + } + + /// Publish an auth event + pub async fn publish_auth(&self, event: AuthEvent) -> Result<()> { + self.publish(Event::Auth(event)).await + } + + /// Publish an event to all subscribers + async fn publish(&self, event: Event) -> Result<()> { + match &event { + Event::Entity(e) => { + tracing::debug!( + operation = %e.operation, + entity_type = %e.entity_type, + entity_id = %e.entity_id, + "Publishing entity event", + ); + } + Event::Auth(e) => { + tracing::debug!( + operation = ?e.operation, + user_id = ?e.user_id, + "Publishing auth event", + ); + } + } + + // Send to broadcast channel (non-blocking) + let _ = self.sender.send(event.clone()); + + // Notify subscribers + let subscribers = self.subscribers.read().await; + + for state in subscribers.iter() { + if state.subscriber.event_filter().matches(&event) { + state.add_event(event.clone()).await; + } + } + + Ok(()) + } + + /// Get a receiver for raw event stream (useful for SSE) + pub fn subscribe_channel(&self) -> broadcast::Receiver { + self.sender.subscribe() + } +} diff --git a/backend/src/server/shared/events/mod.rs b/backend/src/server/shared/events/mod.rs new file mode 100644 index 00000000..2fe40530 --- /dev/null +++ b/backend/src/server/shared/events/mod.rs @@ -0,0 +1,2 @@ +pub mod bus; +pub mod types; diff --git a/backend/src/server/shared/events/types.rs b/backend/src/server/shared/events/types.rs new file mode 100644 index 00000000..723fb691 --- /dev/null +++ b/backend/src/server/shared/events/types.rs @@ -0,0 +1,206 @@ +use crate::server::{auth::middleware::AuthenticatedEntity, shared::entities::Entity}; +use chrono::{DateTime, Utc}; +use serde::Serialize; +use std::{fmt::Display, net::IpAddr}; +use strum::IntoDiscriminant; +use uuid::Uuid; + +#[derive(Debug, Clone, Serialize)] +pub enum Event { + Entity(Box), + Auth(AuthEvent), +} + +impl Event { + pub fn id(&self) -> Uuid { + match self { + Event::Auth(a) => a.id, + Event::Entity(e) => e.id, + } + } + + pub fn log(&self) { + match self { + Event::Entity(event) => { + let network_id_str = event + .network_id + .map(|n| n.to_string()) + .unwrap_or("N/A".to_string()); + let org_id_str = event + .organization_id + .map(|n| n.to_string()) + .unwrap_or("N/A".to_string()); + + tracing::info!( + entity_type = %event.entity_type, + entity_id = %event.entity_id, + network_id = %network_id_str, + organization_id = %org_id_str, + operation = %event.operation, + "Entity Event Logged" + ); + } + Event::Auth(event) => { + let user_id_str = event + .user_id + .map(|n| n.to_string()) + .unwrap_or("N/A".to_string()); + let user_agent_str = event + .user_agent + .as_ref() + .map(|u| u.to_owned()) + .unwrap_or("unknown".to_string()); + let org_id_str = event + .organization_id + .map(|n| n.to_string()) + .unwrap_or("N/A".to_string()); + + tracing::info!( + ip = %event.ip_address, + organization_id = %org_id_str, + user_id = %user_id_str, + user_agent = %user_agent_str, + operation = %event.operation, + "Auth Event Logged" + ); + } + } + } +} + +impl Display for Event { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Event::Auth(a) => write!( + f, + "{{ id: {}, user_id: {}, organization_id: {}, operation: {}, timestamp: {}, ip_address: {}, user_agent: {}, metadata: {}, authentication: {} }}", + a.id, + a.user_id + .map(|u| u.to_string()) + .unwrap_or("None".to_string()), + a.organization_id + .map(|u| u.to_string()) + .unwrap_or("None".to_string()), + a.operation, + a.timestamp, + a.ip_address, + a.user_agent.clone().unwrap_or("Unknown".to_string()), + a.metadata, + a.authentication + ), + Event::Entity(e) => write!( + f, + "{{ id: {}, entity_type: {}, entity_id: {}, network_id: {}, organization_id: {}, operation: {}, timestamp: {}, metadata: {}, authentication: {} }}", + e.id, + e.entity_type.discriminant(), + e.entity_id, + e.network_id + .map(|u| u.to_string()) + .unwrap_or("None".to_string()), + e.organization_id + .map(|u| u.to_string()) + .unwrap_or("None".to_string()), + e.operation, + e.timestamp, + e.metadata, + e.authentication + ), + } + } +} + +impl PartialEq for Event { + fn eq(&self, other: &Self) -> bool { + match (self, other) { + (Event::Auth(a1), Event::Auth(a2)) => a1 == a2, + (Event::Entity(e1), Event::Entity(e2)) => e1 == e2, + _ => false, + } + } +} + +#[derive(Debug, Clone, Serialize, PartialEq, Eq, strum::Display)] +pub enum AuthOperation { + Register, + LoginSuccess, + LoginFailed, + PasswordResetRequested, + PasswordResetCompleted, + PasswordChanged, + EmailVerified, + SessionExpired, + OidcLinked, + OidcUnlinked, + LoggedOut, +} + +#[derive(Debug, Clone, Serialize)] +pub struct AuthEvent { + pub id: Uuid, + pub user_id: Option, // None for failed login with unknown user + pub organization_id: Option, + pub operation: AuthOperation, + pub timestamp: DateTime, + pub ip_address: IpAddr, + pub user_agent: Option, + pub metadata: serde_json::Value, + pub authentication: AuthenticatedEntity, +} + +impl PartialEq for AuthEvent { + fn eq(&self, other: &Self) -> bool { + self.user_id == other.user_id + && self.organization_id == other.organization_id + && self.operation == other.operation + && self.ip_address == other.ip_address + && self.user_agent == other.user_agent + && self.metadata == other.metadata + && self.authentication == other.authentication + } +} + +#[derive(Debug, Clone, Serialize, PartialEq, Eq, strum::Display)] +pub enum EntityOperation { + Get, + GetAll, + Created, + Updated, + Deleted, + DiscoveryStarted, + DiscoveryCancelled, + Custom(&'static str), +} + +#[derive(Debug, Clone, Serialize, Eq)] +pub struct EntityEvent { + pub id: Uuid, + pub entity_type: Entity, + pub entity_id: Uuid, + pub network_id: Option, // Some entities might belong to an org, not a network + pub organization_id: Option, // Some entities might belong to a network, not an org + pub operation: EntityOperation, + pub timestamp: DateTime, + pub authentication: AuthenticatedEntity, + pub metadata: serde_json::Value, +} + +impl PartialEq for EntityEvent { + fn eq(&self, other: &Self) -> bool { + self.entity_id == other.entity_id + && self.network_id == other.network_id + && self.organization_id == other.organization_id + && self.operation == other.operation + && self.authentication == other.authentication + && self.metadata == other.metadata + } +} + +impl Display for EntityEvent { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "Event: {{ id: {}, entity_type: {}, entity_id: {} }}", + self.id, self.entity_type, self.entity_id + ) + } +} diff --git a/backend/src/server/shared/handlers/factory.rs b/backend/src/server/shared/handlers/factory.rs index 0315bc13..a1a9a66a 100644 --- a/backend/src/server/shared/handlers/factory.rs +++ b/backend/src/server/shared/handlers/factory.rs @@ -1,5 +1,5 @@ use crate::server::api_keys::r#impl::base::{ApiKey, ApiKeyBase}; -use crate::server::auth::middleware::{AuthenticatedUser, RequireOwner}; +use crate::server::auth::middleware::{AuthenticatedEntity, AuthenticatedUser, RequireOwner}; use crate::server::billing::types::base::BillingPlan; use crate::server::billing::types::features::Feature; use crate::server::config::PublicConfigResponse; @@ -10,12 +10,14 @@ use crate::server::hosts::r#impl::ports::PortBase; use crate::server::networks::r#impl::{Network, NetworkBase}; use crate::server::organizations::r#impl::base::Organization; use crate::server::services::definitions::ServiceDefinitionRegistry; -use crate::server::shared::entities::Entity; +use crate::server::shared::concepts::Concept; +use crate::server::shared::entities::EntityDiscriminants; use crate::server::shared::services::traits::CrudService; use crate::server::shared::storage::traits::StorableEntity; use crate::server::shared::types::api::{ApiError, ApiResult}; use crate::server::shared::types::metadata::{MetadataProvider, MetadataRegistry}; use crate::server::subnets::r#impl::types::SubnetType; +use crate::server::topology::types::base::{Topology, TopologyBase}; use crate::server::topology::types::edges::EdgeType; use crate::server::users::r#impl::permissions::UserOrgPermissions; use crate::server::{ @@ -78,7 +80,10 @@ async fn get_metadata_registry(_user: AuthenticatedUser) -> Json Deserialize<'de> where - Self: Display, + Self: Display + ChangeTriggersTopologyStaleness, + Entity: From, { /// Get the service from AppState (must implement CrudService) type Service: CrudService + Send + Sync; @@ -42,7 +44,8 @@ where /// Create a standard CRUD router pub fn create_crud_router() -> Router> where - T: CrudHandlers + 'static, + T: CrudHandlers + 'static + ChangeTriggersTopologyStaleness, + Entity: From, { Router::new() .route("/", post(create_handler::)) @@ -50,6 +53,7 @@ where .route("/{id}", put(update_handler::)) .route("/{id}", delete(delete_handler::)) .route("/{id}", get(get_by_id_handler::)) + .route("/bulk-delete", post(bulk_delete_handler::)) } pub async fn create_handler( @@ -58,7 +62,8 @@ pub async fn create_handler( Json(request): Json, ) -> ApiResult>> where - T: CrudHandlers + 'static, + T: CrudHandlers + 'static + ChangeTriggersTopologyStaleness, + Entity: From, { if let Err(err) = request.validate() { tracing::warn!( @@ -81,22 +86,18 @@ where ); let service = T::get_service(&state); - let created = service.create(request).await.map_err(|e| { - tracing::error!( - entity_type = T::table_name(), - user_id = %user.user_id, - error = %e, - "Failed to create entity" - ); - ApiError::internal_error(&e.to_string()) - })?; - - tracing::info!( - entity_type = T::table_name(), - entity_id = %created.id(), - user_id = %user.user_id, - "Entity created via API" - ); + let created = service + .create(request, user.clone().into()) + .await + .map_err(|e| { + tracing::error!( + entity_type = T::table_name(), + user_id = %user.user_id, + error = %e, + "Failed to create entity" + ); + ApiError::internal_error(&e.to_string()) + })?; Ok(Json(ApiResponse::success(created))) } @@ -106,7 +107,8 @@ pub async fn get_all_handler( user: AuthenticatedUser, ) -> ApiResult>>> where - T: CrudHandlers + 'static, + T: CrudHandlers + 'static + ChangeTriggersTopologyStaleness, + Entity: From, { tracing::debug!( entity_type = T::table_name(), @@ -128,13 +130,6 @@ where ApiError::internal_error(&e.to_string()) })?; - tracing::debug!( - entity_type = T::table_name(), - user_id = %user.user_id, - count = %entities.len(), - "Entities fetched successfully" - ); - Ok(Json(ApiResponse::success(entities))) } @@ -144,7 +139,8 @@ pub async fn get_by_id_handler( Path(id): Path, ) -> ApiResult>> where - T: CrudHandlers + 'static, + T: CrudHandlers + 'static + ChangeTriggersTopologyStaleness, + Entity: From, { tracing::debug!( entity_type = T::table_name(), @@ -177,13 +173,6 @@ where ApiError::not_found(format!("{} '{}' not found", T::entity_name(), id)) })?; - tracing::debug!( - entity_type = T::table_name(), - entity_id = %id, - user_id = %user.user_id, - "Entity fetched successfully" - ); - Ok(Json(ApiResponse::success(entity))) } @@ -194,7 +183,8 @@ pub async fn update_handler( Json(mut request): Json, ) -> ApiResult>> where - T: CrudHandlers + 'static, + T: CrudHandlers + 'static + ChangeTriggersTopologyStaleness, + Entity: From, { tracing::debug!( entity_type = T::table_name(), @@ -229,34 +219,31 @@ where ApiError::not_found(format!("{} '{}' not found", T::entity_name(), id)) })?; - let updated = service.update(&mut request).await.map_err(|e| { - tracing::error!( - entity_type = T::table_name(), - entity_id = %id, - user_id = %user.user_id, - error = %e, - "Failed to update entity" - ); - ApiError::internal_error(&e.to_string()) - })?; - - tracing::info!( - entity_type = T::table_name(), - entity_id = %id, - user_id = %user.user_id, - "Entity updated via API" - ); + let updated = service + .update(&mut request, user.clone().into()) + .await + .map_err(|e| { + tracing::error!( + entity_type = T::table_name(), + entity_id = %id, + user_id = %user.user_id, + error = %e, + "Failed to update entity" + ); + ApiError::internal_error(&e.to_string()) + })?; Ok(Json(ApiResponse::success(updated))) } pub async fn delete_handler( State(state): State>, - RequireMember(_user): RequireMember, + RequireMember(user): RequireMember, Path(id): Path, ) -> ApiResult>> where - T: CrudHandlers + 'static, + T: CrudHandlers + 'static + ChangeTriggersTopologyStaleness, + Entity: From, { let service = T::get_service(&state); @@ -282,14 +269,14 @@ where ApiError::not_found(format!("{} '{}' not found", T::entity_name(), id)) })?; - tracing::info!( + tracing::debug!( entity_type = T::table_name(), entity_id = %id, entity_name = %entity, "Delete request received" ); - service.delete(&id).await.map_err(|e| { + service.delete(&id, user.into()).await.map_err(|e| { tracing::error!( entity_type = T::table_name(), entity_id = %id, @@ -301,3 +288,49 @@ where Ok(Json(ApiResponse::success(()))) } + +pub async fn bulk_delete_handler( + State(state): State>, + RequireMember(user): RequireMember, + Json(ids): Json>, +) -> ApiResult>> +where + T: CrudHandlers + 'static, + Entity: From, +{ + if ids.is_empty() { + return Err(ApiError::bad_request("No IDs provided for bulk delete")); + } + + tracing::debug!( + entity_type = T::table_name(), + user_id = %user.user_id, + count = ids.len(), + "Bulk delete request received" + ); + + let service = T::get_service(&state); + let deleted_count = service + .delete_many(&ids, user.clone().into()) + .await + .map_err(|e| { + tracing::error!( + entity_type = T::table_name(), + user_id = %user.user_id, + error = %e, + "Failed to bulk delete entities" + ); + ApiError::internal_error(&e.to_string()) + })?; + + Ok(Json(ApiResponse::success(BulkDeleteResponse { + deleted_count, + requested_count: ids.len(), + }))) +} + +#[derive(Serialize)] +pub struct BulkDeleteResponse { + pub deleted_count: usize, + pub requested_count: usize, +} diff --git a/backend/src/server/shared/mod.rs b/backend/src/server/shared/mod.rs index c2637fc0..7c2f76ae 100644 --- a/backend/src/server/shared/mod.rs +++ b/backend/src/server/shared/mod.rs @@ -1,4 +1,6 @@ +pub mod concepts; pub mod entities; +pub mod events; pub mod handlers; pub mod services; pub mod storage; diff --git a/backend/src/server/shared/services/factory.rs b/backend/src/server/shared/services/factory.rs index 6dd7f23c..842d7b9f 100644 --- a/backend/src/server/shared/services/factory.rs +++ b/backend/src/server/shared/services/factory.rs @@ -8,10 +8,11 @@ use crate::server::{ email::service::EmailService, groups::service::GroupService, hosts::service::HostService, + logging::service::LoggingService, networks::service::NetworkService, organizations::service::OrganizationService, services::service::ServiceService, - shared::storage::factory::StorageFactory, + shared::{events::bus::EventBus, storage::factory::StorageFactory}, subnets::service::SubnetService, topology::service::main::TopologyService, users::service::UserService, @@ -35,34 +36,54 @@ pub struct ServiceFactory { pub oidc_service: Option>, pub billing_service: Option>, pub email_service: Option>, + pub event_bus: Arc, + pub logging_service: Arc, } impl ServiceFactory { pub async fn new(storage: &StorageFactory, config: Option) -> Result { - let api_key_service = Arc::new(ApiKeyService::new(storage.api_keys.clone())); - let daemon_service = Arc::new(DaemonService::new(storage.daemons.clone())); - let group_service = Arc::new(GroupService::new(storage.groups.clone())); - let organization_service = - Arc::new(OrganizationService::new(storage.organizations.clone())); + let event_bus = Arc::new(EventBus::new()); + + let logging_service = Arc::new(LoggingService::new()); + + let api_key_service = Arc::new(ApiKeyService::new( + storage.api_keys.clone(), + event_bus.clone(), + )); + let daemon_service = Arc::new(DaemonService::new( + storage.daemons.clone(), + event_bus.clone(), + )); + let group_service = Arc::new(GroupService::new(storage.groups.clone(), event_bus.clone())); + let organization_service = Arc::new(OrganizationService::new( + storage.organizations.clone(), + event_bus.clone(), + )); // Already implements Arc internally due to scheduler + sessions - let discovery_service = - DiscoveryService::new(storage.discovery.clone(), daemon_service.clone()).await?; + let discovery_service = DiscoveryService::new( + storage.discovery.clone(), + daemon_service.clone(), + event_bus.clone(), + ) + .await?; let service_service = Arc::new(ServiceService::new( storage.services.clone(), group_service.clone(), + event_bus.clone(), )); let host_service = Arc::new(HostService::new( storage.hosts.clone(), service_service.clone(), daemon_service.clone(), + event_bus.clone(), )); let subnet_service = Arc::new(SubnetService::new( storage.subnets.clone(), - host_service.clone(), + event_bus.clone(), )); let _ = service_service.set_host_service(host_service.clone()); @@ -72,14 +93,17 @@ impl ServiceFactory { subnet_service.clone(), group_service.clone(), service_service.clone(), + storage.topologies.clone(), + event_bus.clone(), )); let network_service = Arc::new(NetworkService::new( storage.networks.clone(), host_service.clone(), subnet_service.clone(), + event_bus.clone(), )); - let user_service = Arc::new(UserService::new(storage.users.clone())); + let user_service = Arc::new(UserService::new(storage.users.clone(), event_bus.clone())); let billing_service = config.clone().and_then(|c| { if let Some(strip_secret) = c.stripe_secret @@ -112,6 +136,7 @@ impl ServiceFactory { user_service.clone(), organization_service.clone(), email_service.clone(), + event_bus.clone(), )); let oidc_service = config.and_then(|c| { @@ -128,18 +153,28 @@ impl ServiceFactory { &c.oidc_client_secret, &c.oidc_provider_name, ) { - return Some(Arc::new(OidcService::new( - issuer_url.to_owned(), - client_id.to_owned(), - client_secret.to_owned(), - redirect_url.to_owned(), - provider_name.to_owned(), - auth_service.clone(), - ))); + return Some(Arc::new(OidcService::new(OidcService { + issuer_url: issuer_url.to_owned(), + client_id: client_id.to_owned(), + client_secret: client_secret.to_owned(), + redirect_url: redirect_url.to_owned(), + provider_name: provider_name.to_owned(), + auth_service: auth_service.clone(), + user_service: user_service.clone(), + event_bus: event_bus.clone(), + }))); } None }); + event_bus + .register_subscriber(topology_service.clone()) + .await; + + event_bus.register_subscriber(logging_service.clone()).await; + + event_bus.register_subscriber(host_service.clone()).await; + Ok(Self { user_service, auth_service, @@ -156,6 +191,8 @@ impl ServiceFactory { oidc_service, billing_service, email_service, + event_bus, + logging_service, }) } } diff --git a/backend/src/server/shared/services/traits.rs b/backend/src/server/shared/services/traits.rs index 2690070f..30e61760 100644 --- a/backend/src/server/shared/services/traits.rs +++ b/backend/src/server/shared/services/traits.rs @@ -1,19 +1,39 @@ +use anyhow::anyhow; use async_trait::async_trait; +use chrono::Utc; use std::{fmt::Display, sync::Arc}; use uuid::Uuid; -use crate::server::shared::storage::{ - filter::EntityFilter, - generic::GenericPostgresStorage, - traits::{StorableEntity, Storage}, +use crate::server::{ + auth::middleware::AuthenticatedEntity, + shared::{ + entities::{ChangeTriggersTopologyStaleness, Entity}, + events::{ + bus::EventBus, + types::{EntityEvent, EntityOperation}, + }, + storage::{ + filter::EntityFilter, + generic::GenericPostgresStorage, + traits::{StorableEntity, Storage}, + }, + }, }; +pub trait EventBusService> { + /// Event bus and helpers + fn event_bus(&self) -> &Arc; + + fn get_network_id(&self, entity: &T) -> Option; + fn get_organization_id(&self, entity: &T) -> Option; +} + /// Helper trait for services that use generic storage /// Provides default implementations for common CRUD operations #[async_trait] -pub trait CrudService +pub trait CrudService>: EventBusService where - T: Display, + T: Display + ChangeTriggersTopologyStaleness, { /// Get reference to the storage fn storage(&self) -> &Arc>; @@ -34,21 +54,32 @@ where } /// Delete entity by ID - async fn delete(&self, id: &Uuid) -> Result<(), anyhow::Error> { - // ADD logging before deletion + async fn delete( + &self, + id: &Uuid, + authentication: AuthenticatedEntity, + ) -> Result<(), anyhow::Error> { if let Some(entity) = self.get_by_id(id).await? { - tracing::info!( - entity_type = T::table_name(), - entity_id = %id, - entity_name = %entity, - "Deleting entity" - ); self.storage().delete(id).await?; - tracing::debug!( - entity_type = T::table_name(), - entity_id = %id, - "Entity deleted successfully" - ); + + let trigger_stale = entity.triggers_staleness(None); + + self.event_bus() + .publish_entity(EntityEvent { + id: Uuid::new_v4(), + entity_id: *id, + network_id: self.get_network_id(&entity), + organization_id: self.get_organization_id(&entity), + entity_type: entity.into(), + operation: EntityOperation::Deleted, + timestamp: Utc::now(), + metadata: serde_json::json!({ + "trigger_stale": trigger_stale + }), + authentication, + }) + .await?; + Ok(()) } else { Err(anyhow::anyhow!( @@ -60,51 +91,106 @@ where } /// Create entity - async fn create(&self, entity: T) -> Result { + async fn create( + &self, + entity: T, + authentication: AuthenticatedEntity, + ) -> Result { let entity = if entity.id() == Uuid::nil() { T::new(entity.get_base()) } else { entity }; - // ADD logging before creation - tracing::debug!( - entity_type = T::table_name(), - entity_id = %entity.id(), - entity_name = %entity, - "Creating entity" - ); - let created = self.storage().create(&entity).await?; + let trigger_stale = created.triggers_staleness(None); - tracing::info!( - entity_type = T::table_name(), - entity_id = %created.id(), - entity_name = %created, - "Entity created" - ); + self.event_bus() + .publish_entity(EntityEvent { + id: Uuid::new_v4(), + entity_id: created.id(), + network_id: self.get_network_id(&created), + organization_id: self.get_organization_id(&created), + entity_type: created.clone().into(), + operation: EntityOperation::Created, + timestamp: Utc::now(), + metadata: serde_json::json!({ + "trigger_stale": trigger_stale + }), + authentication, + }) + .await?; Ok(created) } /// Update entity - async fn update(&self, entity: &mut T) -> Result { - tracing::debug!( - entity_type = T::table_name(), - entity_id = %entity.id(), - entity_name = %entity, - "Updating entity" - ); - + async fn update( + &self, + entity: &mut T, + authentication: AuthenticatedEntity, + ) -> Result { + let current = self + .get_by_id(&entity.id()) + .await? + .ok_or_else(|| anyhow!("Could not find {}", entity))?; let updated = self.storage().update(entity).await?; - tracing::info!( - entity_type = T::table_name(), - entity_id = %updated.id(), - entity_name = %updated, - "Entity updated" - ); + let trigger_stale = updated.triggers_staleness(Some(current)); + + self.event_bus() + .publish_entity(EntityEvent { + id: Uuid::new_v4(), + entity_id: updated.id(), + network_id: self.get_network_id(&updated), + organization_id: self.get_organization_id(&updated), + entity_type: updated.clone().into(), + operation: EntityOperation::Updated, + timestamp: Utc::now(), + metadata: serde_json::json!({ + "trigger_stale": trigger_stale + }), + authentication, + }) + .await?; Ok(updated) } + + async fn delete_many( + &self, + ids: &[Uuid], + authentication: AuthenticatedEntity, + ) -> Result { + if ids.is_empty() { + return Ok(0); + } + + // Log which entities are being deleted + for id in ids { + if let Some(entity) = self.get_by_id(id).await? { + let trigger_stale = entity.triggers_staleness(None); + + self.event_bus() + .publish_entity(EntityEvent { + id: Uuid::new_v4(), + entity_id: *id, + network_id: self.get_network_id(&entity), + organization_id: self.get_organization_id(&entity), + entity_type: entity.into(), + operation: EntityOperation::Deleted, + timestamp: Utc::now(), + metadata: serde_json::json!({ + "trigger_stale": trigger_stale + }), + authentication: authentication.clone(), + }) + .await?; + } + } + + let deleted_count = self.storage().delete_many(ids).await?; + + Ok(deleted_count) + } } diff --git a/backend/src/server/shared/storage/factory.rs b/backend/src/server/shared/storage/factory.rs index 5af2d39e..1571c8a0 100644 --- a/backend/src/server/shared/storage/factory.rs +++ b/backend/src/server/shared/storage/factory.rs @@ -9,7 +9,7 @@ use crate::server::{ discovery::r#impl::base::Discovery, groups::r#impl::base::Group, hosts::r#impl::base::Host, networks::r#impl::Network, organizations::r#impl::base::Organization, services::r#impl::base::Service, shared::storage::generic::GenericPostgresStorage, - subnets::r#impl::base::Subnet, users::r#impl::base::User, + subnets::r#impl::base::Subnet, topology::types::base::Topology, users::r#impl::base::User, }; pub struct StorageFactory { @@ -24,6 +24,7 @@ pub struct StorageFactory { pub services: Arc>, pub organizations: Arc>, pub discovery: Arc>, + pub topologies: Arc>, } pub async fn create_session_store( @@ -62,6 +63,7 @@ impl StorageFactory { daemons: Arc::new(GenericPostgresStorage::new(pool.clone())), subnets: Arc::new(GenericPostgresStorage::new(pool.clone())), services: Arc::new(GenericPostgresStorage::new(pool.clone())), + topologies: Arc::new(GenericPostgresStorage::new(pool.clone())), }) } } diff --git a/backend/src/server/shared/storage/generic.rs b/backend/src/server/shared/storage/generic.rs index 8968579b..e842a612 100644 --- a/backend/src/server/shared/storage/generic.rs +++ b/backend/src/server/shared/storage/generic.rs @@ -64,10 +64,9 @@ where SqlValue::U16(v) => query.bind(Into::::into(*v)), SqlValue::I32(v) => query.bind(v), SqlValue::Bool(v) => query.bind(v), - SqlValue::Json(v) => query.bind(v), SqlValue::Timestamp(v) => query.bind(v), SqlValue::OptionTimestamp(v) => query.bind(v), - SqlValue::UuidArray(v) => query.bind(serde_json::to_value(v)?), + SqlValue::UuidArray(v) => query.bind(v.clone()), SqlValue::OptionalString(v) => query.bind(v), SqlValue::EntitySource(v) => query.bind(serde_json::to_value(v)?), SqlValue::IpCidr(v) => query.bind(serde_json::to_string(v)?), @@ -90,6 +89,13 @@ where SqlValue::OptionBillingPlan(v) => query.bind(serde_json::to_value(v)?), SqlValue::OptionBillingPlanStatus(v) => query.bind(serde_json::to_string(v)?), SqlValue::EdgeStyle(v) => query.bind(v.to_string()), + SqlValue::Nodes(v) => query.bind(serde_json::to_value(v)?), + SqlValue::Edges(v) => query.bind(serde_json::to_value(v)?), + SqlValue::TopologyOptions(v) => query.bind(serde_json::to_value(v)?), + SqlValue::Hosts(v) => query.bind(serde_json::to_value(v)?), + SqlValue::Subnets(v) => query.bind(serde_json::to_value(v)?), + SqlValue::Services(v) => query.bind(serde_json::to_value(v)?), + SqlValue::Groups(v) => query.bind(serde_json::to_value(v)?), }; Ok(value) @@ -111,7 +117,7 @@ where } query.execute(&self.pool).await?; - tracing::info!("Created {}: {}", T::table_name(), entity); + tracing::debug!("Created {}: {}", T::table_name(), entity); Ok(entity.clone()) } @@ -167,7 +173,7 @@ where query = Self::bind_value(query, value)?; } - tracing::info!("Updated {}", entity); + tracing::debug!("Updated {}", entity); query.execute(&self.pool).await?; Ok(entity.clone()) @@ -178,8 +184,33 @@ where sqlx::query(&query_str).bind(id).execute(&self.pool).await?; - tracing::info!("Deleted {} with id: {}", T::table_name(), id); + tracing::debug!("Deleted {} with id: {}", T::table_name(), id); Ok(()) } + + async fn delete_many(&self, ids: &[Uuid]) -> Result { + if ids.is_empty() { + return Ok(0); + } + + let query_str = format!("DELETE FROM {} WHERE id = ANY($1)", T::table_name()); + + let result = sqlx::query(&query_str) + .bind(ids) + .execute(&self.pool) + .await?; + + let deleted_count = result.rows_affected() as usize; + + tracing::debug!( + "Bulk deleted {} {}s (requested: {}, deleted: {})", + deleted_count, + T::table_name(), + ids.len(), + deleted_count + ); + + Ok(deleted_count) + } } diff --git a/backend/src/server/shared/storage/tests.rs b/backend/src/server/shared/storage/tests.rs index 3b140c7e..b7a517ef 100644 --- a/backend/src/server/shared/storage/tests.rs +++ b/backend/src/server/shared/storage/tests.rs @@ -1,3 +1,144 @@ +use crate::server::{ + api_keys::r#impl::base::ApiKey, daemons::r#impl::base::Daemon, + discovery::r#impl::base::Discovery, groups::r#impl::base::Group, hosts::r#impl::base::Host, + networks::r#impl::Network, organizations::r#impl::base::Organization, + services::r#impl::base::Service, shared::storage::traits::StorableEntity, + subnets::r#impl::base::Subnet, users::r#impl::base::User, +}; +use sqlx::postgres::PgRow; +use std::collections::HashMap; + +// Type alias for the deserialization function +#[allow(dead_code)] +type DeserializeFn = Box Result<(), anyhow::Error> + Send + Sync>; + +// Mapping from table name to deserialization function +#[allow(dead_code)] +fn get_entity_deserializers() -> HashMap<&'static str, DeserializeFn> { + let mut map: HashMap<&'static str, DeserializeFn> = HashMap::new(); + + map.insert( + ApiKey::table_name(), + Box::new(|row| { + ApiKey::from_row(row)?; + Ok(()) + }), + ); + + map.insert( + Daemon::table_name(), + Box::new(|row| { + Daemon::from_row(row)?; + Ok(()) + }), + ); + + map.insert( + Discovery::table_name(), + Box::new(|row| { + Discovery::from_row(row)?; + Ok(()) + }), + ); + + map.insert( + Group::table_name(), + Box::new(|row| { + Group::from_row(row)?; + Ok(()) + }), + ); + + map.insert( + Host::table_name(), + Box::new(|row| { + Host::from_row(row)?; + Ok(()) + }), + ); + + map.insert( + Network::table_name(), + Box::new(|row| { + Network::from_row(row)?; + Ok(()) + }), + ); + + map.insert( + Organization::table_name(), + Box::new(|row| { + Organization::from_row(row)?; + Ok(()) + }), + ); + + map.insert( + Service::table_name(), + Box::new(|row| { + Service::from_row(row)?; + Ok(()) + }), + ); + + map.insert( + Subnet::table_name(), + Box::new(|row| { + Subnet::from_row(row)?; + Ok(()) + }), + ); + + map.insert( + User::table_name(), + Box::new(|row| { + User::from_row(row)?; + Ok(()) + }), + ); + + map +} + +#[tokio::test] +pub async fn test_all_tables_have_entity_mapping() { + use crate::tests::setup_test_db; + + let (pool, _database_url, _container) = setup_test_db().await; + + // Get all tables from information_schema + let tables: Vec = sqlx::query_scalar( + "SELECT table_name FROM information_schema.tables + WHERE table_schema = 'public' + AND table_type = 'BASE TABLE' + AND table_name != '_sqlx_migrations'", + ) + .fetch_all(&pool) + .await + .expect("Failed to fetch table names"); + + let deserializers = get_entity_deserializers(); + + println!("Verifying entity mappings for all tables..."); + + let mut missing_mappings = Vec::new(); + for table in &tables { + if !deserializers.contains_key(table.as_str()) { + missing_mappings.push(table.clone()); + } + } + + if !missing_mappings.is_empty() { + panic!( + "The following tables are missing entity mappings in get_entity_deserializers():\n - {}\n\ + Please add them to the registry.", + missing_mappings.join("\n - ") + ); + } + + println!("✓ All {} tables have entity mappings", tables.len()); +} + #[tokio::test] pub async fn test_database_schema_backward_compatibility() { use crate::tests::SERVER_DB_FIXTURE; @@ -13,7 +154,6 @@ pub async fn test_database_schema_backward_compatibility() { let (pool, database_url, _container) = setup_test_db().await; - // Parse connection details let url = url::Url::parse(&database_url).unwrap(); let host = url.host_str().unwrap(); let port = url.port().unwrap(); @@ -21,7 +161,6 @@ pub async fn test_database_schema_backward_compatibility() { pool.close().await; - // Use psql which understands all pg_dump output including meta-commands let output = Command::new("psql") .arg("-h") .arg(host) @@ -47,49 +186,18 @@ pub async fn test_database_schema_backward_compatibility() { let pool = sqlx::PgPool::connect(&database_url).await.unwrap(); - // Verify tables - assert!( - sqlx::query("SELECT * FROM hosts") - .fetch_all(&pool) - .await - .is_ok() - ); - assert!( - sqlx::query("SELECT * FROM services") - .fetch_all(&pool) - .await - .is_ok() - ); - assert!( - sqlx::query("SELECT * FROM subnets") - .fetch_all(&pool) - .await - .is_ok() - ); - assert!( - sqlx::query("SELECT * FROM groups") - .fetch_all(&pool) - .await - .is_ok() - ); - assert!( - sqlx::query("SELECT * FROM daemons") - .fetch_all(&pool) - .await - .is_ok() - ); - assert!( - sqlx::query("SELECT * FROM networks") - .fetch_all(&pool) - .await - .is_ok() - ); - assert!( - sqlx::query("SELECT * FROM users") - .fetch_all(&pool) - .await - .is_ok() - ); + // Verify tables exist using the deserializers map + let deserializers = get_entity_deserializers(); + for table_name in deserializers.keys() { + assert!( + sqlx::query(&format!("SELECT * FROM {}", table_name)) + .fetch_all(&pool) + .await + .is_ok(), + "Failed to read table: {}", + table_name + ); + } println!("Successfully read all tables from latest release database"); @@ -103,3 +211,83 @@ pub async fn test_database_schema_backward_compatibility() { panic!("No database fixture found at {}", SERVER_DB_FIXTURE); } } + +#[tokio::test] +pub async fn test_struct_deserialization_backward_compatibility() { + use crate::tests::SERVER_DB_FIXTURE; + use crate::tests::setup_test_db; + use std::path::Path; + + let db_path = Path::new(SERVER_DB_FIXTURE); + + if db_path.exists() { + use std::process::Command; + + println!("Testing struct deserialization from migrated old schema"); + + let (pool, database_url, _container) = setup_test_db().await; + + let url = url::Url::parse(&database_url).unwrap(); + let host = url.host_str().unwrap(); + let port = url.port().unwrap(); + let database = url.path().trim_start_matches('/'); + + pool.close().await; + + // Restore old database + let output = Command::new("psql") + .arg("-h") + .arg(host) + .arg("-p") + .arg(port.to_string()) + .arg("-U") + .arg("postgres") + .arg("-d") + .arg(database) + .arg("-f") + .arg(db_path) + .env("PGPASSWORD", "password") + .output() + .expect("Failed to execute psql"); + + assert!( + output.status.success(), + "Failed to restore database:\n{}", + String::from_utf8_lossy(&output.stderr) + ); + + let pool = sqlx::PgPool::connect(&database_url).await.unwrap(); + + // Apply current migrations + sqlx::migrate!("./migrations") + .run(&pool) + .await + .expect("Failed to apply current schema"); + + println!("Testing deserialization of all entity types..."); + + let deserializers = get_entity_deserializers(); + + for (table_name, deserialize_fn) in deserializers.iter() { + let rows = sqlx::query(&format!("SELECT * FROM {}", table_name)) + .fetch_all(&pool) + .await + .expect(&format!("Failed to fetch {}", table_name)); + + for row in rows.iter() { + deserialize_fn(row) + .expect(&format!("Failed to deserialize row from {}", table_name)); + } + + println!( + "✓ Successfully deserialized {} rows from {}", + rows.len(), + table_name + ); + } + + println!("All entity types deserialized successfully from migrated schema"); + } else { + panic!("No database fixture found at {}", SERVER_DB_FIXTURE); + } +} diff --git a/backend/src/server/shared/storage/traits.rs b/backend/src/server/shared/storage/traits.rs index 2ee617e8..9cac8d01 100644 --- a/backend/src/server/shared/storage/traits.rs +++ b/backend/src/server/shared/storage/traits.rs @@ -1,29 +1,36 @@ use std::net::IpAddr; -use async_trait::async_trait; -use chrono::{DateTime, Utc}; -use cidr::IpCidr; -use email_address::EmailAddress; -use sqlx::postgres::PgRow; -use stripe_billing::SubscriptionStatus; -use uuid::Uuid; - +use crate::server::groups::r#impl::base::Group; +use crate::server::services::r#impl::base::Service; +use crate::server::subnets::r#impl::base::Subnet; use crate::server::{ billing::types::base::BillingPlan, daemons::r#impl::{api::DaemonCapabilities, base::DaemonMode}, discovery::r#impl::types::{DiscoveryType, RunType}, groups::r#impl::types::GroupType, hosts::r#impl::{ - interfaces::Interface, ports::Port, targets::HostTarget, virtualization::HostVirtualization, + base::Host, interfaces::Interface, ports::Port, targets::HostTarget, + virtualization::HostVirtualization, }, services::r#impl::{ bindings::Binding, definitions::ServiceDefinition, virtualization::ServiceVirtualization, }, shared::{storage::filter::EntityFilter, types::entities::EntitySource}, subnets::r#impl::types::SubnetType, - topology::types::edges::EdgeStyle, + topology::types::{ + base::TopologyOptions, + edges::{Edge, EdgeStyle}, + nodes::Node, + }, users::r#impl::permissions::UserOrgPermissions, }; +use async_trait::async_trait; +use chrono::{DateTime, Utc}; +use cidr::IpCidr; +use email_address::EmailAddress; +use sqlx::postgres::PgRow; +use stripe_billing::SubscriptionStatus; +use uuid::Uuid; #[async_trait] pub trait Storage: Send + Sync { @@ -33,6 +40,7 @@ pub trait Storage: Send + Sync { async fn get_one(&self, filter: EntityFilter) -> Result, anyhow::Error>; async fn update(&self, entity: &mut T) -> Result; async fn delete(&self, id: &Uuid) -> Result<(), anyhow::Error>; + async fn delete_many(&self, ids: &[Uuid]) -> Result; } pub trait StorableEntity: Sized + Clone + Send + Sync + 'static { @@ -69,7 +77,6 @@ pub enum SqlValue { I32(i32), U16(u16), Bool(bool), - Json(serde_json::Value), Email(EmailAddress), Timestamp(DateTime), OptionTimestamp(Option>), @@ -94,4 +101,11 @@ pub enum SqlValue { OptionBillingPlanStatus(Option), EdgeStyle(EdgeStyle), DaemonMode(DaemonMode), + Nodes(Vec), + Edges(Vec), + TopologyOptions(TopologyOptions), + Hosts(Vec), + Subnets(Vec), + Services(Vec), + Groups(Vec), } diff --git a/backend/src/server/shared/types/metadata.rs b/backend/src/server/shared/types/metadata.rs index 01c50247..ba2ba464 100644 --- a/backend/src/server/shared/types/metadata.rs +++ b/backend/src/server/shared/types/metadata.rs @@ -12,6 +12,7 @@ pub struct MetadataRegistry { pub billing_plans: Vec, pub features: Vec, pub permissions: Vec, + pub concepts: Vec, } #[derive(Serialize, Debug, Clone)] diff --git a/backend/src/server/subnets/handlers.rs b/backend/src/server/subnets/handlers.rs index c21aa380..ee10ff99 100644 --- a/backend/src/server/subnets/handlers.rs +++ b/backend/src/server/subnets/handlers.rs @@ -1,6 +1,6 @@ use crate::server::auth::middleware::{AuthenticatedEntity, MemberOrDaemon}; use crate::server::shared::handlers::traits::{ - CrudHandlers, delete_handler, get_by_id_handler, update_handler, + CrudHandlers, bulk_delete_handler, delete_handler, get_by_id_handler, update_handler, }; use crate::server::shared::types::api::ApiError; use crate::server::{ @@ -23,6 +23,7 @@ pub fn create_router() -> Router> { .route("/{id}", put(update_handler::)) .route("/{id}", delete(delete_handler::)) .route("/{id}", get(get_by_id_handler::)) + .route("/bulk-delete", post(bulk_delete_handler::)) } pub async fn create_handler( @@ -53,7 +54,7 @@ pub async fn create_handler( ); let service = Subnet::get_service(&state); - let created = service.create(request).await.map_err(|e| { + let created = service.create(request, entity.clone()).await.map_err(|e| { tracing::error!( error = %e, entity_id = %entity.entity_id(), @@ -76,11 +77,27 @@ async fn get_all_subnets( State(state): State>, entity: AuthenticatedEntity, ) -> ApiResult>>> { - tracing::debug!( - entity_id = %entity.entity_id(), - network_count = %entity.network_ids().len(), - "Get all subnets request received" - ); + match &entity { + AuthenticatedEntity::User { user_id, .. } => { + tracing::debug!( + entity_type = "subnet", + user_id = %user_id, + "Get all request received" + ); + } + AuthenticatedEntity::Daemon { .. } => { + tracing::debug!( + entity_type = "subnet", + daemon_id = %entity.entity_id(), + "Get all request received" + ); + } + _ => { + return Err(ApiError::internal_error( + "Invalid authentication for request to /subnets/", + )); + } + } let service = &state.services.subnet_service; let filter = EntityFilter::unfiltered().network_ids(&entity.network_ids()); @@ -94,11 +111,29 @@ async fn get_all_subnets( ApiError::internal_error(&e.to_string()) })?; - tracing::debug!( - entity_id = %entity.entity_id(), - subnet_count = %subnets.len(), - "Subnets fetched successfully" - ); + match &entity { + AuthenticatedEntity::User { user_id, .. } => { + tracing::debug!( + user_id = %user_id, + entity_type = "subnet", + subnet_count = %subnets.len(), + "Entities fetched successfully" + ); + } + AuthenticatedEntity::Daemon { .. } => { + tracing::debug!( + entity_type = "subnet", + daemon_id = %entity.entity_id(), + subnet_count = %subnets.len(), + "Entities fetched successfully" + ); + } + _ => { + return Err(ApiError::internal_error( + "Invalid authentication for request to /subnets/", + )); + } + } Ok(Json(ApiResponse::success(subnets))) } diff --git a/backend/src/server/subnets/impl/base.rs b/backend/src/server/subnets/impl/base.rs index 9908b011..c0a33a2f 100644 --- a/backend/src/server/subnets/impl/base.rs +++ b/backend/src/server/subnets/impl/base.rs @@ -2,6 +2,7 @@ use std::fmt::Display; use std::net::Ipv4Addr; use crate::server::discovery::r#impl::types::DiscoveryType; +use crate::server::shared::entities::ChangeTriggersTopologyStaleness; use crate::server::shared::storage::traits::StorableEntity; use crate::server::shared::types::api::deserialize_empty_string_as_none; use crate::server::shared::types::entities::{DiscoveryMetadata, EntitySource}; @@ -42,7 +43,7 @@ impl Default for SubnetBase { } } -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, Eq)] pub struct Subnet { pub id: Uuid, pub created_at: DateTime, @@ -137,3 +138,9 @@ impl Display for Subnet { write!(f, "Subnet {}: {}", self.base.name, self.id) } } + +impl ChangeTriggersTopologyStaleness for Subnet { + fn triggers_staleness(&self, _other: Option) -> bool { + false + } +} diff --git a/backend/src/server/subnets/impl/types.rs b/backend/src/server/subnets/impl/types.rs index f9a6afab..bb26e390 100644 --- a/backend/src/server/subnets/impl/types.rs +++ b/backend/src/server/subnets/impl/types.rs @@ -2,7 +2,8 @@ use serde::{Deserialize, Serialize}; use strum::{Display, EnumDiscriminants, EnumIter, IntoStaticStr}; use crate::server::shared::{ - entities::Entity, + concepts::Concept, + entities::EntityDiscriminants, types::metadata::{EntityMetadataProvider, HasId, TypeMetadataProvider}, }; @@ -131,20 +132,20 @@ impl EntityMetadataProvider for SubnetType { fn color(&self) -> &'static str { match self { SubnetType::Internet => "blue", - SubnetType::Remote => Entity::Subnet.color(), + SubnetType::Remote => EntityDiscriminants::Subnet.color(), - SubnetType::Gateway => Entity::Gateway.color(), - SubnetType::VpnTunnel => Entity::Vpn.color(), + SubnetType::Gateway => Concept::Gateway.color(), + SubnetType::VpnTunnel => Concept::Vpn.color(), SubnetType::Dmz => "rose", - SubnetType::Lan => Entity::Subnet.color(), - SubnetType::IoT => Entity::IoT.color(), + SubnetType::Lan => EntityDiscriminants::Subnet.color(), + SubnetType::IoT => Concept::IoT.color(), SubnetType::Guest => "green", SubnetType::WiFi => "teal", SubnetType::Management => "gray", - SubnetType::DockerBridge => Entity::Virtualization.color(), - SubnetType::Storage => Entity::Storage.color(), + SubnetType::DockerBridge => Concept::Virtualization.color(), + SubnetType::Storage => Concept::Storage.color(), SubnetType::Unknown => "gray", SubnetType::None => "gray", @@ -153,23 +154,23 @@ impl EntityMetadataProvider for SubnetType { fn icon(&self) -> &'static str { match self { SubnetType::Internet => "Globe", - SubnetType::Remote => Entity::Subnet.icon(), + SubnetType::Remote => EntityDiscriminants::Subnet.icon(), - SubnetType::Gateway => Entity::Gateway.icon(), - SubnetType::VpnTunnel => Entity::Vpn.icon(), - SubnetType::Dmz => Entity::Subnet.icon(), + SubnetType::Gateway => Concept::Gateway.icon(), + SubnetType::VpnTunnel => Concept::Vpn.icon(), + SubnetType::Dmz => EntityDiscriminants::Subnet.icon(), - SubnetType::Lan => Entity::Subnet.icon(), - SubnetType::IoT => Entity::IoT.icon(), + SubnetType::Lan => EntityDiscriminants::Subnet.icon(), + SubnetType::IoT => Concept::IoT.icon(), SubnetType::Guest => "User", SubnetType::WiFi => "WiFi", SubnetType::Management => "ServerCog", SubnetType::DockerBridge => "Box", - SubnetType::Storage => Entity::Storage.icon(), + SubnetType::Storage => Concept::Storage.icon(), - SubnetType::Unknown => Entity::Subnet.icon(), - SubnetType::None => Entity::Subnet.icon(), + SubnetType::Unknown => EntityDiscriminants::Subnet.icon(), + SubnetType::None => EntityDiscriminants::Subnet.icon(), } } } diff --git a/backend/src/server/subnets/service.rs b/backend/src/server/subnets/service.rs index 9d8d9223..6275b43c 100644 --- a/backend/src/server/subnets/service.rs +++ b/backend/src/server/subnets/service.rs @@ -1,8 +1,13 @@ use crate::server::{ + auth::middleware::AuthenticatedEntity, discovery::r#impl::types::DiscoveryType, - hosts::service::HostService, shared::{ - services::traits::CrudService, + entities::ChangeTriggersTopologyStaleness, + events::{ + bus::EventBus, + types::{EntityEvent, EntityOperation}, + }, + services::traits::{CrudService, EventBusService}, storage::{ filter::EntityFilter, generic::GenericPostgresStorage, @@ -14,13 +19,26 @@ use crate::server::{ }; use anyhow::Result; use async_trait::async_trait; -use futures::future::try_join_all; +use chrono::Utc; use std::sync::Arc; use uuid::Uuid; pub struct SubnetService { storage: Arc>, - host_service: Arc, + event_bus: Arc, +} + +impl EventBusService for SubnetService { + fn event_bus(&self) -> &Arc { + &self.event_bus + } + + fn get_network_id(&self, entity: &Subnet) -> Option { + Some(entity.base.network_id) + } + fn get_organization_id(&self, _entity: &Subnet) -> Option { + None + } } #[async_trait] @@ -29,7 +47,11 @@ impl CrudService for SubnetService { &self.storage } - async fn create(&self, subnet: Subnet) -> Result { + async fn create( + &self, + subnet: Subnet, + authentication: AuthenticatedEntity, + ) -> Result { let filter = EntityFilter::unfiltered().network_ids(&[subnet.base.network_id]); let all_subnets = self.storage.get_all(filter).await?; @@ -102,78 +124,35 @@ impl CrudService for SubnetService { } // If there's no existing subnet, create a new one _ => { - self.storage.create(&subnet).await?; - tracing::info!( - subnet_id = %subnet.id, - subnet_name = %subnet.base.name, - subnet_cidr = %subnet.base.cidr, - network_id = %subnet.base.network_id, - "Subnet created" - ); - subnet - } - }; - Ok(subnet_from_storage) - } + let created = self.storage.create(&subnet).await?; - async fn delete(&self, id: &Uuid) -> Result<()> { - let subnet = self - .get_by_id(id) - .await? - .ok_or_else(|| anyhow::anyhow!("Subnet not found"))?; - - tracing::info!( - subnet_id = %subnet.id, - subnet_name = %subnet.base.name, - subnet_cidr = %subnet.base.cidr, - "Deleting subnet" - ); + let trigger_stale = created.triggers_staleness(None); - let filter = EntityFilter::unfiltered().network_ids(&[subnet.base.network_id]); + self.event_bus() + .publish_entity(EntityEvent { + id: Uuid::new_v4(), + entity_id: created.id, + network_id: self.get_network_id(&created), + organization_id: self.get_organization_id(&created), + entity_type: created.into(), + operation: EntityOperation::Created, + timestamp: Utc::now(), + metadata: serde_json::json!({ + "trigger_stale": trigger_stale + }), + authentication, + }) + .await?; - let hosts = self.host_service.get_all(filter).await?; - let update_futures = hosts.into_iter().filter_map(|mut host| { - let has_subnet = host.base.interfaces.iter().any(|i| &i.base.subnet_id == id); - if has_subnet { - host.base.interfaces = host - .base - .interfaces - .iter() - .filter(|i| &i.base.subnet_id != id) - .cloned() - .collect(); - return Some(self.host_service.update_host(host)); + subnet } - None - }); - - let updated_hosts = try_join_all(update_futures).await?; - - tracing::debug!( - subnet_id = %subnet.id, - affected_hosts = %updated_hosts.len(), - "Cleaned up host interfaces referencing subnet" - ); - - self.storage.delete(id).await?; - tracing::info!( - subnet_id = %subnet.id, - subnet_name = %subnet.base.name, - affected_hosts = %updated_hosts.len(), - "Subnet deleted" - ); - Ok(()) + }; + Ok(subnet_from_storage) } } impl SubnetService { - pub fn new( - storage: Arc>, - host_service: Arc, - ) -> Self { - Self { - storage, - host_service, - } + pub fn new(storage: Arc>, event_bus: Arc) -> Self { + Self { storage, event_bus } } } diff --git a/backend/src/server/topology/handlers.rs b/backend/src/server/topology/handlers.rs index e5cb9979..af9f210c 100644 --- a/backend/src/server/topology/handlers.rs +++ b/backend/src/server/topology/handlers.rs @@ -1,25 +1,227 @@ use crate::server::{ - auth::middleware::AuthenticatedUser, + auth::middleware::{AuthenticatedUser, RequireMember}, config::AppState, - shared::types::api::{ApiResponse, ApiResult}, - topology::types::api::TopologyOptions, + shared::{ + handlers::traits::{ + CrudHandlers, delete_handler, get_all_handler, get_by_id_handler, update_handler, + }, + services::traits::CrudService, + storage::traits::StorableEntity, + types::api::{ApiError, ApiResponse, ApiResult}, + }, + topology::{service::main::BuildGraphParams, types::base::Topology}, }; -use axum::{Router, extract::State, response::Json, routing::post}; -use std::sync::Arc; +use axum::{ + Router, + extract::State, + response::{ + Json, Sse, + sse::{Event, KeepAlive}, + }, + routing::{delete, get, post, put}, +}; +use futures::{Stream, stream}; +use std::{convert::Infallible, sync::Arc}; pub fn create_router() -> Router> { - Router::new().route("/", post(get_topology)) + Router::new() + .route("/", post(create_handler)) + .route("/", get(get_all_handler::)) + .route("/{id}", put(update_handler::)) + .route("/{id}", delete(delete_handler::)) + .route("/{id}", get(get_by_id_handler::)) + .route("/{id}/refresh", post(refresh)) + .route("/{id}/rebuild", post(rebuild)) + .route("/{id}/lock", post(lock)) + .route("/{id}/unlock", post(unlock)) + .route("/stream", get(staleness_stream)) +} + +pub async fn create_handler( + State(state): State>, + RequireMember(user): RequireMember, + Json(mut topology): Json, +) -> ApiResult>> { + if let Err(err) = topology.validate() { + tracing::warn!( + entity_type = Topology::table_name(), + user_id = %user.user_id, + error = %err, + "Entity validation failed" + ); + return Err(ApiError::bad_request(&format!( + "{} validation failed: {}", + Topology::entity_name(), + err + ))); + } + + tracing::debug!( + entity_type = Topology::table_name(), + user_id = %user.user_id, + "Create request received" + ); + + let service = Topology::get_service(&state); + + let (hosts, subnets, groups) = service.get_entity_data(topology.base.network_id).await?; + + let services = service + .get_service_data(topology.base.network_id, &topology.base.options) + .await?; + + let (nodes, edges) = service.build_graph(BuildGraphParams { + options: &topology.base.options, + hosts: &hosts, + subnets: &subnets, + services: &services, + groups: &groups, + old_edges: &[], + old_nodes: &[], + }); + + topology.base.hosts = hosts; + topology.base.services = services; + topology.base.subnets = subnets; + topology.base.groups = groups; + topology.base.edges = edges; + topology.base.nodes = nodes; + topology.clear_stale(); + + let created = service + .create(topology, user.clone().into()) + .await + .map_err(|e| { + tracing::error!( + entity_type = Topology::table_name(), + user_id = %user.user_id, + error = %e, + "Failed to create entity" + ); + ApiError::internal_error(&e.to_string()) + })?; + + tracing::info!( + entity_type = Topology::table_name(), + entity_id = %created.id(), + user_id = %user.user_id, + "Entity created via API" + ); + + Ok(Json(ApiResponse::success(created))) +} + +/// Refresh entity data. Only used when cosmetic properties (ie group color/line routing, entity names) are changed +async fn refresh( + State(state): State>, + RequireMember(user): RequireMember, + Json(mut topology): Json, +) -> ApiResult>> { + let service = Topology::get_service(&state); + + let (hosts, subnets, groups) = service.get_entity_data(topology.base.network_id).await?; + + let services = service + .get_service_data(topology.base.network_id, &topology.base.options) + .await?; + + topology.base.hosts = hosts; + topology.base.services = services; + topology.base.subnets = subnets; + topology.base.groups = groups; + + service.update(&mut topology, user.into()).await?; + + // Return will be handled through event subscriber which triggers SSE + + Ok(Json(ApiResponse::success(()))) +} + +/// Recalculate node and edges and refresh entity data +async fn rebuild( + State(state): State>, + RequireMember(user): RequireMember, + Json(mut topology): Json, +) -> ApiResult>> { + let service = Topology::get_service(&state); + + let (hosts, subnets, groups) = service.get_entity_data(topology.base.network_id).await?; + + let services = service + .get_service_data(topology.base.network_id, &topology.base.options) + .await?; + + let (nodes, edges) = service.build_graph(BuildGraphParams { + options: &topology.base.options, + hosts: &hosts, + subnets: &subnets, + services: &services, + groups: &groups, + old_nodes: &topology.base.nodes, + old_edges: &topology.base.edges, + }); + + topology.base.hosts = hosts; + topology.base.services = services; + topology.base.subnets = subnets; + topology.base.groups = groups; + topology.base.edges = edges; + topology.base.nodes = nodes; + topology.clear_stale(); + + service.update(&mut topology, user.into()).await?; + + // Return will be handled through event subscriber which triggers SSE + + Ok(Json(ApiResponse::success(()))) +} + +async fn lock( + State(state): State>, + RequireMember(user): RequireMember, + Json(mut topology): Json, +) -> ApiResult>> { + let service = Topology::get_service(&state); + + topology.lock(user.user_id); + + let updated = service.update(&mut topology, user.into()).await?; + + Ok(Json(ApiResponse::success(updated))) +} + +async fn unlock( + State(state): State>, + RequireMember(user): RequireMember, + Json(mut topology): Json, +) -> ApiResult>> { + let service = Topology::get_service(&state); + + topology.unlock(); + + let updated = service.update(&mut topology, user.into()).await?; + + Ok(Json(ApiResponse::success(updated))) } -async fn get_topology( +async fn staleness_stream( State(state): State>, _user: AuthenticatedUser, - Json(request): Json, -) -> ApiResult>> { - let service = &state.services.topology_service; - let graph = service.build_graph(request).await?; +) -> Sse>> { + let rx = state + .services + .topology_service + .subscribe_staleness_changes(); - let json = serde_json::to_value(&graph)?; + let stream = stream::unfold(rx, |mut rx| async move { + match rx.recv().await { + Ok(update) => { + let json = serde_json::to_string(&update).ok()?; + Some((Ok(Event::default().data(json)), rx)) + } + Err(_) => None, + } + }); - Ok(Json(ApiResponse::success(json))) + Sse::new(stream).keep_alive(KeepAlive::default()) } diff --git a/backend/src/server/topology/service/context.rs b/backend/src/server/topology/service/context.rs index 473aa2f9..bd717f38 100644 --- a/backend/src/server/topology/service/context.rs +++ b/backend/src/server/topology/service/context.rs @@ -8,7 +8,7 @@ use crate::server::{ }, subnets::r#impl::base::Subnet, topology::types::{ - api::TopologyOptions, + base::TopologyOptions, edges::Edge, nodes::{Node, NodeType}, }, @@ -140,9 +140,10 @@ impl<'a> TopologyContext<'a> { if let Some(host) = self.hosts.iter().find(|h| h.id == s.base.host_id) { return (self .options + .request .left_zone_service_categories .contains(&s.base.service_definition.category()) - || (self.options.show_gateway_in_left_zone + || (self.options.request.show_gateway_in_left_zone && s.base.service_definition.is_gateway())) && subnet.has_interface_with_service(host, s); } diff --git a/backend/src/server/topology/service/edge_builder.rs b/backend/src/server/topology/service/edge_builder.rs index e5e19290..ef97188e 100644 --- a/backend/src/server/topology/service/edge_builder.rs +++ b/backend/src/server/topology/service/edge_builder.rs @@ -140,6 +140,7 @@ impl EdgeBuilder { if ctx.interface_will_have_node(&origin_interface.id) { return vec![Edge { + id: Uuid::new_v4(), source: origin_interface.id, target: *first_subnet_id, edge_type: EdgeType::ServiceVirtualization { @@ -186,6 +187,7 @@ impl EdgeBuilder { && ctx.interface_will_have_node(&container_binding_interface_id) { return Some(Edge { + id: Uuid::new_v4(), source: origin_interface.id, target: container_binding_interface_id, edge_type: EdgeType::ServiceVirtualization { @@ -277,6 +279,7 @@ impl EdgeBuilder { )?; return Some(Edge { + id: Uuid::new_v4(), source: *proxmox_service_interface_id, target: i.id, edge_type: EdgeType::HostVirtualization { @@ -342,6 +345,7 @@ impl EdgeBuilder { )?; Some(Edge { + id: Uuid::new_v4(), source: origin_interface.id, target: interface.id, edge_type: EdgeType::Interface { host_id: host.id }, @@ -453,6 +457,7 @@ impl EdgeBuilder { }; return Some(Edge { + id: Uuid::new_v4(), source: source_interface, target: target_interface, edge_type: match group.base.group_type { diff --git a/backend/src/server/topology/service/main.rs b/backend/src/server/topology/service/main.rs index 7e0764b2..2c0b5eae 100644 --- a/backend/src/server/topology/service/main.rs +++ b/backend/src/server/topology/service/main.rs @@ -1,30 +1,73 @@ use std::{collections::HashMap, sync::Arc}; use anyhow::Error; -use petgraph::{Graph, graph::NodeIndex}; +use async_trait::async_trait; +use petgraph::{Graph, graph::NodeIndex, visit::EdgeRef}; +use tokio::sync::broadcast; use uuid::Uuid; use crate::server::{ - groups::service::GroupService, - hosts::service::HostService, + groups::{r#impl::base::Group, service::GroupService}, + hosts::{r#impl::base::Host, service::HostService}, services::{r#impl::base::Service, service::ServiceService}, - shared::{services::traits::CrudService, storage::filter::EntityFilter}, - subnets::service::SubnetService, + shared::{ + events::bus::EventBus, + services::traits::{CrudService, EventBusService}, + storage::{filter::EntityFilter, generic::GenericPostgresStorage}, + }, + subnets::{r#impl::base::Subnet, service::SubnetService}, topology::{ service::{ context::TopologyContext, edge_builder::EdgeBuilder, optimizer::main::TopologyOptimizer, planner::subnet_layout_planner::SubnetLayoutPlanner, }, - types::{api::TopologyOptions, edges::Edge, nodes::Node}, + types::{ + base::{Topology, TopologyOptions}, + edges::{Edge, EdgeHandle}, + nodes::Node, + }, }, }; pub struct TopologyService { + storage: Arc>, host_service: Arc, subnet_service: Arc, group_service: Arc, service_service: Arc, + event_bus: Arc, + pub staleness_tx: broadcast::Sender, +} + +impl EventBusService for TopologyService { + fn event_bus(&self) -> &Arc { + &self.event_bus + } + + fn get_network_id(&self, entity: &Topology) -> Option { + Some(entity.base.network_id) + } + fn get_organization_id(&self, _entity: &Topology) -> Option { + None + } +} + +#[async_trait] +impl CrudService for TopologyService { + fn storage(&self) -> &Arc> { + &self.storage + } +} + +pub struct BuildGraphParams<'a> { + pub options: &'a TopologyOptions, + pub hosts: &'a [Host], + pub subnets: &'a [Subnet], + pub services: &'a [Service], + pub groups: &'a [Group], + pub old_nodes: &'a [Node], + pub old_edges: &'a [Edge], } impl TopologyService { @@ -33,35 +76,73 @@ impl TopologyService { subnet_service: Arc, group_service: Arc, service_service: Arc, + storage: Arc>, + event_bus: Arc, ) -> Self { + let (staleness_tx, _) = broadcast::channel(100); Self { host_service, subnet_service, group_service, service_service, + storage, + event_bus, + staleness_tx, } } - pub async fn build_graph(&self, options: TopologyOptions) -> Result, Error> { - let network_filter = EntityFilter::unfiltered().network_ids(&options.network_ids); + pub fn subscribe_staleness_changes(&self) -> broadcast::Receiver { + self.staleness_tx.subscribe() + } + + pub async fn get_entity_data( + &self, + network_id: Uuid, + ) -> Result<(Vec, Vec, Vec), Error> { + let network_filter = EntityFilter::unfiltered().network_ids(&[network_id]); // Fetch all data let hosts = self.host_service.get_all(network_filter.clone()).await?; let subnets = self.subnet_service.get_all(network_filter.clone()).await?; let groups = self.group_service.get_all(network_filter.clone()).await?; - let services: Vec = self + + Ok((hosts, subnets, groups)) + } + + pub async fn get_service_data( + &self, + network_id: Uuid, + options: &TopologyOptions, + ) -> Result, Error> { + let network_filter = EntityFilter::unfiltered().network_ids(&[network_id]); + + Ok(self .service_service .get_all(network_filter.clone()) .await? - .into_iter() + .iter() .filter(|s| { !options + .request .hide_service_categories .contains(&s.base.service_definition.category()) }) - .collect(); + .cloned() + .collect()) + } + + pub fn build_graph(&self, params: BuildGraphParams) -> (Vec, Vec) { + let BuildGraphParams { + hosts, + subnets, + services, + groups, + old_edges, + old_nodes, + options, + } = params; // Create context to avoid parameter passing - let ctx = TopologyContext::new(&hosts, &subnets, &services, &groups, &options); + let ctx = TopologyContext::new(hosts, subnets, services, groups, options); // Create all edges (needed for anchor analysis) let mut all_edges = Vec::new(); @@ -73,7 +154,7 @@ impl TopologyService { let (container_edges, docker_bridge_host_subnet_id_to_group_on) = EdgeBuilder::create_containerized_service_edges( &ctx, - options.group_docker_bridges_by_host, + options.request.group_docker_bridges_by_host, ); all_edges.extend(container_edges); @@ -83,7 +164,7 @@ impl TopologyService { let (subnet_layouts, child_nodes) = layout_planner.create_subnet_child_nodes( &ctx, &mut all_edges, - options.group_docker_bridges_by_host, + options.request.group_docker_bridges_by_host, docker_bridge_host_subnet_id_to_group_on, ); @@ -109,6 +190,68 @@ impl TopologyService { // Add edges to graph EdgeBuilder::add_edges_to_graph(&mut graph, &node_indices, optimized_edges); - Ok(graph) + // Build previous graph to compare and deterine if user edits should be persisted + // If nodes have changed edges, assume they have moved and user edits are no longer applicable + let mut old_graph: Graph = Graph::new(); + let old_node_indices: HashMap = old_nodes + .iter() + .map(|node| { + let node_id = node.id; + let node_idx = old_graph.add_node(node.clone()); + (node_id, node_idx) + }) + .collect(); + + EdgeBuilder::add_edges_to_graph(&mut old_graph, &old_node_indices, old_edges.to_vec()); + + // Create a map of old edges by their source/target for quick lookup + let mut old_edges_map: HashMap<(Uuid, Uuid), &Edge> = HashMap::new(); + for edge_ref in old_graph.edge_references() { + let edge = edge_ref.weight(); + old_edges_map.insert((edge.source, edge.target), edge); + } + + // Preserve handles for nodes with unchanged edge count + // First, collect all the edges that need updating + let mut edges_to_update: Vec<(petgraph::prelude::EdgeIndex, EdgeHandle, EdgeHandle)> = + Vec::new(); + + for node in graph.node_weights() { + if let Some(old_idx) = old_node_indices.get(&node.id) + && let Some(new_idx) = node_indices.get(&node.id) + { + let old_edge_count = old_graph.edges(*old_idx).count(); + let new_edge_count = graph.edges(*new_idx).count(); + + if old_edge_count == new_edge_count { + // Collect edges that match + for edge_ref in graph.edges(*new_idx) { + let new_edge = edge_ref.weight(); + if let Some(old_edge) = + old_edges_map.get(&(new_edge.source, new_edge.target)) + { + edges_to_update.push(( + edge_ref.id(), + old_edge.source_handle, + old_edge.target_handle, + )); + } + } + } + } + } + + // Now apply the updates + for (edge_idx, source_handle, target_handle) in edges_to_update { + if let Some(edge) = graph.edge_weight_mut(edge_idx) { + edge.source_handle = source_handle; + edge.target_handle = target_handle; + } + } + + ( + graph.node_weights().cloned().collect(), + graph.edge_weights().cloned().collect(), + ) } } diff --git a/backend/src/server/topology/service/mod.rs b/backend/src/server/topology/service/mod.rs index 6531308a..1ada9dc5 100644 --- a/backend/src/server/topology/service/mod.rs +++ b/backend/src/server/topology/service/mod.rs @@ -3,3 +3,4 @@ pub mod edge_builder; pub mod main; pub mod optimizer; pub mod planner; +pub mod subscriber; diff --git a/backend/src/server/topology/service/planner/subnet_layout_planner.rs b/backend/src/server/topology/service/planner/subnet_layout_planner.rs index c1786c1e..a4172437 100644 --- a/backend/src/server/topology/service/planner/subnet_layout_planner.rs +++ b/backend/src/server/topology/service/planner/subnet_layout_planner.rs @@ -108,7 +108,7 @@ impl SubnetLayoutPlanner { .collect(); let hide_docker_bridge_vm_header = *subnet_type == SubnetType::DockerBridge - && ctx.options.hide_vm_title_on_docker_container; + && ctx.options.request.hide_vm_title_on_docker_container; if !hide_docker_bridge_vm_header { // If they have at least one interface on a common subnet @@ -230,9 +230,9 @@ impl SubnetLayoutPlanner { for interface in &host.base.interfaces { let subnet = ctx.get_subnet_by_id(interface.base.subnet_id); let subnet_type = subnet.map(|s| s.base.subnet_type).unwrap_or_default(); + let services = ctx.services; - let interface_bound_services: Vec<&Service> = ctx - .services + let interface_bound_services: Vec<&Service> = services .iter() .filter(|s| { // Services with a binding to the interface @@ -264,7 +264,7 @@ impl SubnetLayoutPlanner { &interface_bound_services, interface.id, header_text.is_some(), - ctx.options.hide_ports, + ctx.options.request.hide_ports, ), header: header_text, interface_id: Some(interface.id), diff --git a/backend/src/server/topology/service/subscriber.rs b/backend/src/server/topology/service/subscriber.rs new file mode 100644 index 00000000..8f19a47c --- /dev/null +++ b/backend/src/server/topology/service/subscriber.rs @@ -0,0 +1,201 @@ +use std::collections::HashMap; + +use anyhow::Error; +use async_trait::async_trait; +use uuid::Uuid; + +use crate::server::{ + auth::middleware::AuthenticatedEntity, + shared::{ + entities::{Entity, EntityDiscriminants}, + events::{ + bus::{EventFilter, EventSubscriber}, + types::{EntityOperation, Event}, + }, + services::traits::CrudService, + storage::filter::EntityFilter as StorageFilter, + }, + topology::service::main::TopologyService, +}; + +#[derive(Default)] +struct TopologyChanges { + updated_hosts: bool, + updated_services: bool, + updated_subnets: bool, + updated_groups: bool, + removed_hosts: std::collections::HashSet, + removed_services: std::collections::HashSet, + removed_subnets: std::collections::HashSet, + removed_groups: std::collections::HashSet, + should_mark_stale: bool, +} + +#[async_trait] +impl EventSubscriber for TopologyService { + fn event_filter(&self) -> EventFilter { + EventFilter::entity_only(HashMap::from([ + (EntityDiscriminants::Host, None), + (EntityDiscriminants::Service, None), + (EntityDiscriminants::Subnet, None), + (EntityDiscriminants::Group, None), + ( + EntityDiscriminants::Topology, + Some(vec![EntityOperation::Updated]), + ), + ])) + } + + async fn handle_events(&self, events: Vec) -> Result<(), Error> { + if events.is_empty() { + return Ok(()); + } + + // Collect all affected network IDs + let mut network_ids = std::collections::HashSet::new(); + + // Group events by network_id -> topology changes + let mut topology_updates: HashMap = HashMap::new(); + + for event in events { + if let Event::Entity(entity_event) = event + && let Some(network_id) = entity_event.network_id + { + // Check if any event triggers staleness + let trigger_stale = entity_event + .metadata + .get("trigger_stale") + .and_then(|v| serde_json::from_value::(v.clone()).ok()) + .unwrap_or(false); + + // Topology updates from changes to options should be applied immediately and not processed alongside + // other changes, otherwise another call to topology_service.update will be made which will trigger + // an infinite loop + if let Entity::Topology(mut topology) = entity_event.entity_type { + if trigger_stale { + topology.base.is_stale = true; + } + + topology.base.services = self + .get_service_data(network_id, &topology.base.options) + .await?; + + let _ = self.staleness_tx.send(topology).inspect_err(|e| { + tracing::debug!("Staleness notification skipped (no receivers): {}", e) + }); + continue; + } + + network_ids.insert(network_id); + + let changes = topology_updates.entry(network_id).or_default(); + + // Track removed entities + if entity_event.operation == EntityOperation::Deleted { + match entity_event.entity_type { + Entity::Host(_) => changes.removed_hosts.insert(entity_event.entity_id), + Entity::Service(_) => { + changes.removed_services.insert(entity_event.entity_id) + } + Entity::Subnet(_) => changes.removed_subnets.insert(entity_event.entity_id), + Entity::Group(_) => changes.removed_groups.insert(entity_event.entity_id), + _ => false, + }; + } + + if trigger_stale { + // User will be prompted to update entities + changes.should_mark_stale = true; + } else { + // It's safe to automatically update entities + match entity_event.entity_type { + Entity::Host(_) => changes.updated_hosts = true, + Entity::Service(_) => changes.updated_services = true, + Entity::Subnet(_) => changes.updated_subnets = true, + Entity::Group(_) => changes.updated_groups = true, + _ => (), + }; + } + } + } + + // Apply changes to all topologies in affected networks + for network_id in network_ids { + let network_filter = StorageFilter::unfiltered().network_ids(&[network_id]); + let topologies = self.get_all(network_filter).await?; + + let (hosts, subnets, groups) = self.get_entity_data(network_id).await?; + + if let Some(changes) = topology_updates.get(&network_id) { + for mut topology in topologies { + let services = self + .get_service_data(network_id, &topology.base.options) + .await?; + + // Apply removed entities + for host_id in &changes.removed_hosts { + if !topology.base.removed_hosts.contains(host_id) { + topology.base.removed_hosts.push(*host_id); + } + } + for service_id in &changes.removed_services { + if !topology.base.removed_services.contains(service_id) { + topology.base.removed_services.push(*service_id); + } + } + for subnet_id in &changes.removed_subnets { + if !topology.base.removed_subnets.contains(subnet_id) { + topology.base.removed_subnets.push(*subnet_id); + } + } + for group_id in &changes.removed_groups { + if !topology.base.removed_groups.contains(group_id) { + topology.base.removed_groups.push(*group_id); + } + } + + // Mark stale if needed + if changes.should_mark_stale { + topology.base.is_stale = true; + } + + if changes.updated_hosts { + topology.base.hosts = hosts.clone() + } + + if changes.updated_services { + topology.base.services = services + } + + if changes.updated_subnets { + topology.base.subnets = subnets.clone() + } + + if changes.updated_groups { + topology.base.groups = groups.clone(); + } + + // Update topology in database + let updated = self + .update(&mut topology, AuthenticatedEntity::System) + .await?; + + // Send the UPDATED topology to SSE + let _ = self.staleness_tx.send(updated).inspect_err(|e| { + tracing::debug!("Staleness notification skipped (no receivers): {}", e) + }); + } + } + } + + Ok(()) + } + + fn debounce_window_ms(&self) -> u64 { + 200 // Batch events within 200ms window + } + + fn name(&self) -> &str { + "topology_validation" + } +} diff --git a/backend/src/server/topology/types/api.rs b/backend/src/server/topology/types/api.rs index d1d6040e..9223bb68 100644 --- a/backend/src/server/topology/types/api.rs +++ b/backend/src/server/topology/types/api.rs @@ -1,15 +1,27 @@ use serde::{Deserialize, Serialize}; use uuid::Uuid; -use crate::server::services::r#impl::categories::ServiceCategory; +use crate::server::topology::types::base::Topology; #[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq, Eq, Hash)] -pub struct TopologyOptions { - pub network_ids: Vec, - pub group_docker_bridges_by_host: bool, - pub hide_vm_title_on_docker_container: bool, - pub hide_ports: bool, - pub left_zone_service_categories: Vec, - pub hide_service_categories: Vec, - pub show_gateway_in_left_zone: bool, +pub struct TopologyUpdate { + topology_id: Uuid, + is_stale: bool, + removed_hosts: Vec, + removed_services: Vec, + removed_subnets: Vec, + removed_groups: Vec, +} + +impl From for TopologyUpdate { + fn from(value: Topology) -> Self { + Self { + removed_groups: value.base.removed_groups, + removed_hosts: value.base.removed_hosts, + removed_services: value.base.removed_services, + removed_subnets: value.base.removed_subnets, + is_stale: value.base.is_stale, + topology_id: value.id, + } + } } diff --git a/backend/src/server/topology/types/base.rs b/backend/src/server/topology/types/base.rs index a02562d7..0b2b79ff 100644 --- a/backend/src/server/topology/types/base.rs +++ b/backend/src/server/topology/types/base.rs @@ -1,26 +1,164 @@ -use crate::server::topology::types::api::TopologyOptions; +use crate::server::groups::r#impl::base::Group; +use crate::server::hosts::r#impl::base::Host; +use crate::server::services::r#impl::base::Service; +use crate::server::services::r#impl::categories::ServiceCategory; +use crate::server::shared::entities::ChangeTriggersTopologyStaleness; +use crate::server::subnets::r#impl::base::Subnet; use crate::server::topology::types::edges::Edge; +use crate::server::topology::types::edges::EdgeTypeDiscriminants; use crate::server::topology::types::nodes::Node; use chrono::{DateTime, Utc}; use serde::{Deserialize, Serialize}; -use std::hash::Hash; +use std::{fmt::Display, hash::Hash}; use uuid::Uuid; use validator::Validate; +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)] +pub struct Topology { + pub id: Uuid, + pub created_at: DateTime, + pub updated_at: DateTime, + #[serde(flatten)] + pub base: TopologyBase, +} + #[derive(Debug, Clone, Validate, Serialize, Deserialize, Eq, PartialEq, Hash)] pub struct TopologyBase { #[validate(length(min = 0, max = 100))] - pub name: String, // "Home LAN", "VPN Network", etc. + pub name: String, pub options: TopologyOptions, + pub network_id: Uuid, pub nodes: Vec, pub edges: Vec, + pub hosts: Vec, + pub subnets: Vec, + pub services: Vec, + pub groups: Vec, + pub is_stale: bool, + pub last_refreshed: DateTime, + pub is_locked: bool, + pub locked_at: Option>, + pub locked_by: Option, + pub removed_hosts: Vec, + pub removed_subnets: Vec, + pub removed_services: Vec, + pub removed_groups: Vec, + pub parent_id: Option, } -#[derive(Debug, Clone, Serialize, Deserialize)] -pub struct Topology { - pub id: Uuid, - pub created_at: DateTime, - pub updated_at: DateTime, - #[serde(flatten)] - pub base: TopologyBase, +impl TopologyBase { + pub fn new(name: String, network_id: Uuid) -> Self { + Self { + name, + network_id, + options: TopologyOptions::default(), + nodes: vec![], + edges: vec![], + hosts: vec![], + subnets: vec![], + services: vec![], + groups: vec![], + is_stale: true, + last_refreshed: Utc::now(), + is_locked: false, + locked_at: None, + locked_by: None, + removed_hosts: vec![], + removed_subnets: vec![], + removed_services: vec![], + removed_groups: vec![], + parent_id: None, + } + } +} + +impl ChangeTriggersTopologyStaleness for Topology { + fn triggers_staleness(&self, other: Option) -> bool { + if let Some(other_topology) = other { + self.base.options.request != other_topology.base.options.request + } else { + false + } + } +} + +impl Topology { + pub fn lock(&mut self, locked_by: Uuid) { + self.base.is_locked = true; + self.base.locked_at = Some(Utc::now()); + self.base.locked_by = Some(locked_by) + } + + pub fn unlock(&mut self) { + self.base.is_locked = false; + self.base.locked_at = None; + self.base.locked_by = None; + } + + pub fn clear_stale(&mut self) { + self.base.removed_groups = vec![]; + self.base.removed_hosts = vec![]; + self.base.removed_services = vec![]; + self.base.removed_subnets = vec![]; + self.base.is_stale = false; + self.base.last_refreshed = Utc::now() + } +} + +impl Display for Topology { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "Topology {{ id: {}, name: {} }}", + self.id, self.base.name + ) + } +} + +#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq, Eq, Hash)] +pub struct TopologyOptions { + pub local: TopologyLocalOptions, + pub request: TopologyRequestOptions, +} + +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, Hash)] +pub struct TopologyLocalOptions { + pub left_zone_title: String, + pub no_fade_edges: bool, + pub hide_resize_handles: bool, + pub hide_edge_types: Vec, +} + +impl Default for TopologyLocalOptions { + fn default() -> Self { + Self { + left_zone_title: "Infrastructure".to_string(), + no_fade_edges: false, + hide_resize_handles: false, + hide_edge_types: Vec::new(), + } + } +} + +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, Hash)] +pub struct TopologyRequestOptions { + pub group_docker_bridges_by_host: bool, + pub hide_vm_title_on_docker_container: bool, + pub hide_ports: bool, + pub left_zone_service_categories: Vec, + pub hide_service_categories: Vec, + pub show_gateway_in_left_zone: bool, +} + +impl Default for TopologyRequestOptions { + fn default() -> Self { + Self { + group_docker_bridges_by_host: false, + hide_vm_title_on_docker_container: false, + hide_ports: false, + left_zone_service_categories: vec![ServiceCategory::DNS, ServiceCategory::ReverseProxy], + hide_service_categories: Vec::new(), + show_gateway_in_left_zone: true, + } + } } diff --git a/backend/src/server/topology/types/edges.rs b/backend/src/server/topology/types/edges.rs index f05343bb..651c8888 100644 --- a/backend/src/server/topology/types/edges.rs +++ b/backend/src/server/topology/types/edges.rs @@ -1,7 +1,8 @@ use crate::server::{ groups::r#impl::types::GroupTypeDiscriminants, shared::{ - entities::Entity, + concepts::Concept, + entities::EntityDiscriminants, types::metadata::{EntityMetadataProvider, HasId, TypeMetadataProvider}, }, subnets::r#impl::base::Subnet, @@ -13,6 +14,7 @@ use uuid::Uuid; #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, Hash)] pub struct Edge { + pub id: Uuid, pub source: Uuid, pub target: Uuid, #[serde(flatten)] @@ -263,11 +265,11 @@ impl HasId for EdgeType { impl EntityMetadataProvider for EdgeType { fn color(&self) -> &'static str { match self { - EdgeType::RequestPath { .. } => Entity::Group.color(), - EdgeType::HubAndSpoke { .. } => Entity::Group.color(), - EdgeType::Interface { .. } => Entity::Host.color(), - EdgeType::HostVirtualization { .. } => Entity::Virtualization.color(), - EdgeType::ServiceVirtualization { .. } => Entity::Virtualization.color(), + EdgeType::RequestPath { .. } => EntityDiscriminants::Group.color(), + EdgeType::HubAndSpoke { .. } => EntityDiscriminants::Group.color(), + EdgeType::Interface { .. } => EntityDiscriminants::Host.color(), + EdgeType::HostVirtualization { .. } => Concept::Virtualization.color(), + EdgeType::ServiceVirtualization { .. } => Concept::Virtualization.color(), } } @@ -275,9 +277,9 @@ impl EntityMetadataProvider for EdgeType { match self { EdgeType::RequestPath { .. } => GroupTypeDiscriminants::RequestPath.icon(), EdgeType::HubAndSpoke { .. } => GroupTypeDiscriminants::HubAndSpoke.icon(), - EdgeType::Interface { .. } => Entity::Host.icon(), - EdgeType::HostVirtualization { .. } => Entity::Virtualization.icon(), - EdgeType::ServiceVirtualization { .. } => Entity::Virtualization.icon(), + EdgeType::Interface { .. } => EntityDiscriminants::Host.icon(), + EdgeType::HostVirtualization { .. } => Concept::Virtualization.icon(), + EdgeType::ServiceVirtualization { .. } => Concept::Virtualization.icon(), } } } diff --git a/backend/src/server/topology/types/handlers.rs b/backend/src/server/topology/types/handlers.rs new file mode 100644 index 00000000..43130bb0 --- /dev/null +++ b/backend/src/server/topology/types/handlers.rs @@ -0,0 +1,21 @@ +use crate::server::shared::storage::traits::StorableEntity; +use crate::server::{ + shared::handlers::traits::CrudHandlers, + topology::{service::main::TopologyService, types::base::Topology}, +}; + +impl CrudHandlers for Topology { + type Service = TopologyService; + + fn get_service(state: &crate::server::config::AppState) -> &Self::Service { + &state.services.topology_service + } + + fn entity_name() -> &'static str { + Self::table_name() + } + + fn validate(&self) -> Result<(), String> { + Ok(()) + } +} diff --git a/backend/src/server/topology/types/mod.rs b/backend/src/server/topology/types/mod.rs index cd328ad7..758c56bf 100644 --- a/backend/src/server/topology/types/mod.rs +++ b/backend/src/server/topology/types/mod.rs @@ -1,5 +1,7 @@ pub mod api; pub mod base; pub mod edges; +pub mod handlers; pub mod layout; pub mod nodes; +pub mod storage; diff --git a/backend/src/server/topology/types/storage.rs b/backend/src/server/topology/types/storage.rs new file mode 100644 index 00000000..8fc56a2f --- /dev/null +++ b/backend/src/server/topology/types/storage.rs @@ -0,0 +1,185 @@ +use crate::server::groups::r#impl::base::Group; +use crate::server::services::r#impl::base::Service; +use crate::server::subnets::r#impl::base::Subnet; +use crate::server::{ + hosts::r#impl::base::Host, + shared::storage::traits::{SqlValue, StorableEntity}, + topology::types::{ + base::{Topology, TopologyBase, TopologyOptions}, + edges::Edge, + nodes::Node, + }, +}; +use chrono::{DateTime, Utc}; +use sqlx::Row; +use sqlx::postgres::PgRow; +use uuid::Uuid; + +impl StorableEntity for Topology { + type BaseData = TopologyBase; + + fn table_name() -> &'static str { + "topologies" + } + + fn get_base(&self) -> Self::BaseData { + self.base.clone() + } + + fn new(base: Self::BaseData) -> Self { + let now = chrono::Utc::now(); + + Self { + id: Uuid::new_v4(), + created_at: now, + updated_at: now, + base, + } + } + + fn id(&self) -> Uuid { + self.id + } + + fn created_at(&self) -> DateTime { + self.created_at + } + + fn updated_at(&self) -> DateTime { + self.updated_at + } + + fn set_updated_at(&mut self, time: DateTime) { + self.updated_at = time; + } + + fn to_params(&self) -> Result<(Vec<&'static str>, Vec), anyhow::Error> { + let Self { + id, + created_at, + updated_at, + base: + Self::BaseData { + name, + network_id, + nodes, + edges, + options, + hosts, + services, + subnets, + groups, + is_stale, + last_refreshed, + is_locked, + locked_at, + locked_by, + removed_hosts, + removed_services, + removed_subnets, + removed_groups, + parent_id, + }, + } = self.clone(); + + Ok(( + vec![ + "id", + "created_at", + "updated_at", + "name", + "network_id", + "nodes", + "edges", + "options", + "hosts", + "subnets", + "groups", + "services", + "is_stale", + "last_refreshed", + "is_locked", + "locked_at", + "locked_by", + "removed_hosts", + "removed_services", + "removed_subnets", + "removed_groups", + "parent_id", + ], + vec![ + SqlValue::Uuid(id), + SqlValue::Timestamp(created_at), + SqlValue::Timestamp(updated_at), + SqlValue::String(name), + SqlValue::Uuid(network_id), + SqlValue::Nodes(nodes), + SqlValue::Edges(edges), + SqlValue::TopologyOptions(options), + SqlValue::Hosts(hosts), + SqlValue::Subnets(subnets), + SqlValue::Groups(groups), + SqlValue::Services(services), + SqlValue::Bool(is_stale), + SqlValue::Timestamp(last_refreshed), + SqlValue::Bool(is_locked), + SqlValue::OptionTimestamp(locked_at), + SqlValue::OptionalUuid(locked_by), + SqlValue::UuidArray(removed_hosts), + SqlValue::UuidArray(removed_services), + SqlValue::UuidArray(removed_subnets), + SqlValue::UuidArray(removed_groups), + SqlValue::OptionalUuid(parent_id), + ], + )) + } + + fn from_row(row: &PgRow) -> Result { + // Parse JSON fields safely + let nodes: Vec = serde_json::from_value(row.get::("nodes")) + .map_err(|e| anyhow::anyhow!("Failed to deserialize nodes: {}", e))?; + let edges: Vec = serde_json::from_value(row.get::("edges")) + .map_err(|e| anyhow::anyhow!("Failed to deserialize edges: {}", e))?; + let options: TopologyOptions = + serde_json::from_value(row.get::("options")) + .map_err(|e| anyhow::anyhow!("Failed to deserialize options: {}", e))?; + + let hosts: Vec = serde_json::from_value(row.get::("hosts")) + .map_err(|e| anyhow::anyhow!("Failed to deserialize hosts: {}", e))?; + let subnets: Vec = + serde_json::from_value(row.get::("subnets")) + .map_err(|e| anyhow::anyhow!("Failed to deserialize subnets: {}", e))?; + let services: Vec = + serde_json::from_value(row.get::("services")) + .map_err(|e| anyhow::anyhow!("Failed to deserialize services: {}", e))?; + let groups: Vec = serde_json::from_value(row.get::("groups")) + .map_err(|e| anyhow::anyhow!("Failed to deserialize groups: {}", e))?; + + Ok(Topology { + id: row.get("id"), + created_at: row.get("created_at"), + updated_at: row.get("updated_at"), + base: TopologyBase { + name: row.get("name"), + network_id: row.get("network_id"), + is_stale: row.get("is_stale"), + last_refreshed: row.get("last_refreshed"), + is_locked: row.get("is_locked"), + locked_at: row.get("locked_at"), + locked_by: row.get("locked_by"), + removed_groups: row.get("removed_groups"), + removed_hosts: row.get("removed_hosts"), + removed_services: row.get("removed_services"), + removed_subnets: row.get("removed_subnets"), + parent_id: row.get("parent_id"), + nodes, + edges, + hosts, + subnets, + services, + groups, + options, + }, + }) + } +} diff --git a/backend/src/server/users/handlers.rs b/backend/src/server/users/handlers.rs index 63291d38..6d6d9824 100644 --- a/backend/src/server/users/handlers.rs +++ b/backend/src/server/users/handlers.rs @@ -1,5 +1,7 @@ use crate::server::auth::middleware::{AuthenticatedUser, RequireAdmin, RequireMember}; -use crate::server::shared::handlers::traits::{CrudHandlers, delete_handler, get_by_id_handler}; +use crate::server::shared::handlers::traits::{ + CrudHandlers, bulk_delete_handler, delete_handler, get_by_id_handler, +}; use crate::server::shared::storage::filter::EntityFilter; use crate::server::shared::types::api::ApiError; use crate::server::users::r#impl::base::User; @@ -13,7 +15,7 @@ use crate::server::{ }; use anyhow::anyhow; use axum::extract::Path; -use axum::routing::{delete, get, put}; +use axum::routing::{delete, get, post, put}; use axum::{Router, extract::State, response::Json}; use std::sync::Arc; use uuid::Uuid; @@ -24,6 +26,7 @@ pub fn create_router() -> Router> { .route("/{id}", put(update_user)) .route("/{id}", delete(delete_user)) .route("/{id}", get(get_by_id_handler::)) + .route("/bulk-delete", post(bulk_delete_handler::)) } pub async fn get_all_users( @@ -110,7 +113,7 @@ pub async fn update_user( } let updated = service - .update(&mut request) + .update(&mut request, user.into()) .await .map_err(|e| ApiError::internal_error(&e.to_string()))?; diff --git a/backend/src/server/users/impl/base.rs b/backend/src/server/users/impl/base.rs index 3cff19c0..f2331950 100644 --- a/backend/src/server/users/impl/base.rs +++ b/backend/src/server/users/impl/base.rs @@ -2,7 +2,10 @@ use std::fmt::Display; use std::str::FromStr; use crate::server::{ - shared::storage::traits::{SqlValue, StorableEntity}, + shared::{ + entities::ChangeTriggersTopologyStaleness, + storage::traits::{SqlValue, StorableEntity}, + }, users::r#impl::permissions::UserOrgPermissions, }; use anyhow::{Error, Result}; @@ -14,7 +17,7 @@ use sqlx::postgres::PgRow; use uuid::Uuid; use validator::Validate; -#[derive(Debug, Clone, Serialize, Deserialize, Validate)] +#[derive(Debug, Clone, Serialize, Deserialize, Validate, PartialEq, Eq, Hash)] pub struct UserBase { pub email: EmailAddress, pub organization_id: Uuid, @@ -93,7 +96,7 @@ impl UserBase { } } -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Hash)] pub struct User { pub id: Uuid, pub created_at: DateTime, @@ -115,6 +118,12 @@ impl Display for User { } } +impl ChangeTriggersTopologyStaleness for User { + fn triggers_staleness(&self, _other: Option) -> bool { + false + } +} + impl StorableEntity for User { type BaseData = UserBase; diff --git a/backend/src/server/users/impl/permissions.rs b/backend/src/server/users/impl/permissions.rs index 734669dd..2766a6bb 100644 --- a/backend/src/server/users/impl/permissions.rs +++ b/backend/src/server/users/impl/permissions.rs @@ -3,12 +3,22 @@ use std::{cmp::Ordering, str::FromStr}; use strum::{Display, EnumIter, IntoEnumIterator, IntoStaticStr}; use crate::server::shared::{ - entities::Entity, + entities::EntityDiscriminants, types::metadata::{EntityMetadataProvider, HasId, TypeMetadataProvider}, }; #[derive( - Debug, Clone, Copy, Serialize, Deserialize, Display, PartialEq, Eq, EnumIter, IntoStaticStr, + Debug, + Clone, + Copy, + Serialize, + Deserialize, + Display, + PartialEq, + Eq, + EnumIter, + IntoStaticStr, + Hash, )] pub enum UserOrgPermissions { Owner, @@ -75,11 +85,11 @@ impl HasId for UserOrgPermissions { impl EntityMetadataProvider for UserOrgPermissions { fn color(&self) -> &'static str { - Entity::User.color() + EntityDiscriminants::User.color() } fn icon(&self) -> &'static str { - Entity::User.icon() + EntityDiscriminants::User.icon() } } diff --git a/backend/src/server/users/service.rs b/backend/src/server/users/service.rs index 471cdd6f..36ab6f40 100644 --- a/backend/src/server/users/service.rs +++ b/backend/src/server/users/service.rs @@ -1,55 +1,53 @@ use crate::server::{ + auth::middleware::AuthenticatedEntity, shared::{ - services::traits::CrudService, + entities::ChangeTriggersTopologyStaleness, + events::{ + bus::EventBus, + types::{EntityEvent, EntityOperation}, + }, + services::traits::{CrudService, EventBusService}, storage::{ filter::EntityFilter, generic::GenericPostgresStorage, traits::{StorableEntity, Storage}, }, }, - users::r#impl::{ - base::{User, UserBase}, - permissions::UserOrgPermissions, - }, + users::r#impl::{base::User, permissions::UserOrgPermissions}, }; use anyhow::Error; use anyhow::Result; use async_trait::async_trait; -use email_address::EmailAddress; +use chrono::Utc; use std::sync::Arc; use uuid::Uuid; pub struct UserService { user_storage: Arc>, + event_bus: Arc, } -#[async_trait] -impl CrudService for UserService { - fn storage(&self) -> &Arc> { - &self.user_storage +impl EventBusService for UserService { + fn event_bus(&self) -> &Arc { + &self.event_bus } -} -impl UserService { - pub fn new(user_storage: Arc>) -> Self { - Self { user_storage } + fn get_network_id(&self, _entity: &User) -> Option { + None } - - pub async fn get_user_by_oidc(&self, oidc_subject: &str) -> Result> { - let oidc_filter = EntityFilter::unfiltered().oidc_subject(oidc_subject.to_string()); - self.user_storage.get_one(oidc_filter).await + fn get_organization_id(&self, entity: &User) -> Option { + Some(entity.base.organization_id) } +} - pub async fn get_organization_owners(&self, organization_id: &Uuid) -> Result> { - let filter: EntityFilter = EntityFilter::unfiltered() - .organization_id(organization_id) - .user_permissions(&UserOrgPermissions::Owner); - - self.user_storage.get_all(filter).await +#[async_trait] +impl CrudService for UserService { + fn storage(&self) -> &Arc> { + &self.user_storage } /// Create a new user - pub async fn create_user(&self, user: User) -> Result { + async fn create(&self, user: User, authentication: AuthenticatedEntity) -> Result { let existing_user = self .user_storage .get_one(EntityFilter::unfiltered().email(&user.base.email)) @@ -61,87 +59,47 @@ impl UserService { )); } - self.user_storage.create(&User::new(user.base)).await - } - - /// Create new user with OIDC (no password) - pub async fn create_user_with_oidc( - &self, - email: EmailAddress, - oidc_subject: String, - oidc_provider: Option, - organization_id: Uuid, - permissions: UserOrgPermissions, - ) -> Result { - let user = User::new(UserBase::new_oidc( - email, - oidc_subject, - oidc_provider, - organization_id, - permissions, - )); + let created = self.user_storage.create(&User::new(user.base)).await?; + let trigger_stale = created.triggers_staleness(None); + + self.event_bus() + .publish_entity(EntityEvent { + id: Uuid::new_v4(), + entity_id: created.id, + network_id: self.get_network_id(&created), + organization_id: self.get_organization_id(&created), + entity_type: created.clone().into(), + operation: EntityOperation::Created, + timestamp: Utc::now(), + metadata: serde_json::json!({ + "trigger_stale": trigger_stale + }), + authentication, + }) + .await?; - self.create_user(user).await + Ok(created) } +} - /// Create new user with password (no OIDC) - pub async fn create_user_with_password( - &self, - email: EmailAddress, - password_hash: String, - organization_id: Uuid, - permissions: UserOrgPermissions, - ) -> Result { - let user = User::new(UserBase::new_password( - email, - password_hash, - organization_id, - permissions, - )); - - self.create_user(user).await +impl UserService { + pub fn new(user_storage: Arc>, event_bus: Arc) -> Self { + Self { + user_storage, + event_bus, + } } - /// Link OIDC to existing user - pub async fn link_oidc( - &self, - user_id: &Uuid, - oidc_subject: String, - oidc_provider: String, - ) -> Result { - let mut user = self - .get_by_id(user_id) - .await? - .ok_or_else(|| anyhow::anyhow!("User not found"))?; - - user.base.oidc_provider = Some(oidc_provider); - user.base.oidc_subject = Some(oidc_subject); - user.base.oidc_linked_at = Some(chrono::Utc::now()); - - self.user_storage.update(&mut user).await?; - Ok(user) + pub async fn get_user_by_oidc(&self, oidc_subject: &str) -> Result> { + let oidc_filter = EntityFilter::unfiltered().oidc_subject(oidc_subject.to_string()); + self.user_storage.get_one(oidc_filter).await } - /// Unlink OIDC from user (requires password to be set) - pub async fn unlink_oidc(&self, user_id: &Uuid) -> Result { - let mut user = self - .get_by_id(user_id) - .await? - .ok_or_else(|| anyhow::anyhow!("User not found"))?; - - // Require password before unlinking - if user.base.password_hash.is_none() { - return Err(anyhow::anyhow!( - "Cannot unlink OIDC - no password set. Set a password first." - )); - } - - user.base.oidc_provider = None; - user.base.oidc_subject = None; - user.base.oidc_linked_at = None; - user.updated_at = chrono::Utc::now(); + pub async fn get_organization_owners(&self, organization_id: &Uuid) -> Result> { + let filter: EntityFilter = EntityFilter::unfiltered() + .organization_id(organization_id) + .user_permissions(&UserOrgPermissions::Owner); - self.user_storage.update(&mut user).await?; - Ok(user) + self.user_storage.get_all(filter).await } } diff --git a/backend/src/tests/daemon_config.json b/backend/src/tests/daemon_config.json index 56ba7771..5061c2ea 100644 --- a/backend/src/tests/daemon_config.json +++ b/backend/src/tests/daemon_config.json @@ -1,6 +1,6 @@ { "server_url": "http://server:60072", - "network_id": "f94407b3-bad9-4338-bbfc-7ad5cb0c039a", + "network_id": "169c1636-c0f2-4883-950c-6d3a40053110", "server_target": null, "server_port": null, "daemon_port": 60073, @@ -9,10 +9,10 @@ "heartbeat_interval": 30, "bind_address": "0.0.0.0", "concurrent_scans": 15, - "id": "62c6aac2-3f4c-41e7-a741-d0be2d8c0db0", + "id": "1dd07be9-cbe2-446c-877d-6df902e372fb", "last_heartbeat": null, - "host_id": "7196b058-3317-4da1-a13e-09e60d5cc77c", - "daemon_api_key": "0e065bb45698437d8f85d3c11cde6626", + "host_id": "d69fac63-ee47-40a2-a09a-773d45109cc4", + "daemon_api_key": "ca1f36a8fe5c4958b608375891f41661", "docker_proxy": null, "mode": "Push" } \ No newline at end of file diff --git a/backend/src/tests/netvisor.sql b/backend/src/tests/netvisor.sql index 989135fc..bf1fe9a5 100644 --- a/backend/src/tests/netvisor.sql +++ b/backend/src/tests/netvisor.sql @@ -2,7 +2,7 @@ -- PostgreSQL database dump -- -\restrict lAZKgSAeM5hq62C1gqi2XwedG103NUJLKDgNJYiEQyRVHgLbbzWsShfqwVIjEzP +\restrict iT1L5Xfu7uX8R7DBaYH8wyZJM8XMSgKaqFOpXGfCFJRoXlLxXBHbY4EydMBB7jt -- Dumped from database version 17.7 -- Dumped by pg_dump version 17.7 @@ -20,6 +20,7 @@ SET client_min_messages = warning; SET row_security = off; ALTER TABLE IF EXISTS ONLY public.users DROP CONSTRAINT IF EXISTS users_organization_id_fkey; +ALTER TABLE IF EXISTS ONLY public.topologies DROP CONSTRAINT IF EXISTS topologies_network_id_fkey; ALTER TABLE IF EXISTS ONLY public.subnets DROP CONSTRAINT IF EXISTS subnets_network_id_fkey; ALTER TABLE IF EXISTS ONLY public.services DROP CONSTRAINT IF EXISTS services_network_id_fkey; ALTER TABLE IF EXISTS ONLY public.services DROP CONSTRAINT IF EXISTS services_host_id_fkey; @@ -33,6 +34,7 @@ ALTER TABLE IF EXISTS ONLY public.api_keys DROP CONSTRAINT IF EXISTS api_keys_ne DROP INDEX IF EXISTS public.idx_users_organization; DROP INDEX IF EXISTS public.idx_users_oidc_provider_subject; DROP INDEX IF EXISTS public.idx_users_email_lower; +DROP INDEX IF EXISTS public.idx_topologies_network; DROP INDEX IF EXISTS public.idx_subnets_network; DROP INDEX IF EXISTS public.idx_services_network; DROP INDEX IF EXISTS public.idx_services_host_id; @@ -48,6 +50,7 @@ DROP INDEX IF EXISTS public.idx_api_keys_network; DROP INDEX IF EXISTS public.idx_api_keys_key; ALTER TABLE IF EXISTS ONLY tower_sessions.session DROP CONSTRAINT IF EXISTS session_pkey; ALTER TABLE IF EXISTS ONLY public.users DROP CONSTRAINT IF EXISTS users_pkey; +ALTER TABLE IF EXISTS ONLY public.topologies DROP CONSTRAINT IF EXISTS topologies_pkey; ALTER TABLE IF EXISTS ONLY public.subnets DROP CONSTRAINT IF EXISTS subnets_pkey; ALTER TABLE IF EXISTS ONLY public.services DROP CONSTRAINT IF EXISTS services_pkey; ALTER TABLE IF EXISTS ONLY public.organizations DROP CONSTRAINT IF EXISTS organizations_pkey; @@ -61,6 +64,7 @@ ALTER TABLE IF EXISTS ONLY public.api_keys DROP CONSTRAINT IF EXISTS api_keys_ke ALTER TABLE IF EXISTS ONLY public._sqlx_migrations DROP CONSTRAINT IF EXISTS _sqlx_migrations_pkey; DROP TABLE IF EXISTS tower_sessions.session; DROP TABLE IF EXISTS public.users; +DROP TABLE IF EXISTS public.topologies; DROP TABLE IF EXISTS public.subnets; DROP TABLE IF EXISTS public.services; DROP TABLE IF EXISTS public.organizations; @@ -190,7 +194,7 @@ CREATE TABLE public.hosts ( description text, target jsonb NOT NULL, interfaces jsonb, - services jsonb, + services uuid[], ports jsonb, source jsonb NOT NULL, virtualization jsonb, @@ -296,6 +300,38 @@ CREATE TABLE public.subnets ( ALTER TABLE public.subnets OWNER TO postgres; +-- +-- Name: topologies; Type: TABLE; Schema: public; Owner: postgres +-- + +CREATE TABLE public.topologies ( + id uuid DEFAULT gen_random_uuid() NOT NULL, + network_id uuid NOT NULL, + name text NOT NULL, + edges jsonb NOT NULL, + nodes jsonb NOT NULL, + options jsonb NOT NULL, + hosts jsonb NOT NULL, + subnets jsonb NOT NULL, + services jsonb NOT NULL, + groups jsonb NOT NULL, + is_stale boolean, + last_refreshed timestamp with time zone DEFAULT now() NOT NULL, + is_locked boolean, + locked_at timestamp with time zone, + locked_by uuid, + removed_hosts uuid[], + removed_services uuid[], + removed_subnets uuid[], + removed_groups uuid[], + parent_id uuid, + created_at timestamp with time zone DEFAULT now() NOT NULL, + updated_at timestamp with time zone DEFAULT now() NOT NULL +); + + +ALTER TABLE public.topologies OWNER TO postgres; + -- -- Name: users; Type: TABLE; Schema: public; Owner: postgres -- @@ -348,24 +384,25 @@ ALTER TABLE tower_sessions.session OWNER TO postgres; -- COPY public._sqlx_migrations (version, description, installed_on, success, checksum, execution_time) FROM stdin; -20251006215000 users 2025-11-19 23:32:54.300542+00 t \\x4f13ce14ff67ef0b7145987c7b22b588745bf9fbb7b673450c26a0f2f9a36ef8ca980e456c8d77cfb1b2d7a4577a64d7 3390316 -20251006215100 networks 2025-11-19 23:32:54.30461+00 t \\xeaa5a07a262709f64f0c59f31e25519580c79e2d1a523ce72736848946a34b17dd9adc7498eaf90551af6b7ec6d4e0e3 3713737 -20251006215151 create hosts 2025-11-19 23:32:54.308648+00 t \\x6ec7487074c0724932d21df4cf1ed66645313cf62c159a7179e39cbc261bcb81a24f7933a0e3cf58504f2a90fc5c1962 3762878 -20251006215155 create subnets 2025-11-19 23:32:54.312737+00 t \\xefb5b25742bd5f4489b67351d9f2494a95f307428c911fd8c5f475bfb03926347bdc269bbd048d2ddb06336945b27926 3564590 -20251006215201 create groups 2025-11-19 23:32:54.316652+00 t \\x0a7032bf4d33a0baf020e905da865cde240e2a09dda2f62aa535b2c5d4b26b20be30a3286f1b5192bd94cd4a5dbb5bcd 3708206 -20251006215204 create daemons 2025-11-19 23:32:54.320694+00 t \\xcfea93403b1f9cf9aac374711d4ac72d8a223e3c38a1d2a06d9edb5f94e8a557debac3668271f8176368eadc5105349f 4278937 -20251006215212 create services 2025-11-19 23:32:54.32532+00 t \\xd5b07f82fc7c9da2782a364d46078d7d16b5c08df70cfbf02edcfe9b1b24ab6024ad159292aeea455f15cfd1f4740c1d 4883509 -20251029193448 user-auth 2025-11-19 23:32:54.330535+00 t \\xfde8161a8db89d51eeade7517d90a41d560f19645620f2298f78f116219a09728b18e91251ae31e46a47f6942d5a9032 3470214 -20251030044828 daemon api 2025-11-19 23:32:54.334316+00 t \\x181eb3541f51ef5b038b2064660370775d1b364547a214a20dde9c9d4bb95a1c273cd4525ef29e61fa65a3eb4fee0400 1676323 -20251030170438 host-hide 2025-11-19 23:32:54.336288+00 t \\x87c6fda7f8456bf610a78e8e98803158caa0e12857c5bab466a5bb0004d41b449004a68e728ca13f17e051f662a15454 1076640 -20251102224919 create discovery 2025-11-19 23:32:54.337666+00 t \\xb32a04abb891aba48f92a059fae7341442355ca8e4af5d109e28e2a4f79ee8e11b2a8f40453b7f6725c2dd6487f26573 9268893 -20251106235621 normalize-daemon-cols 2025-11-19 23:32:54.347274+00 t \\x5b137118d506e2708097c432358bf909265b3cf3bacd662b02e2c81ba589a9e0100631c7801cffd9c57bb10a6674fb3b 2522124 -20251107034459 api keys 2025-11-19 23:32:54.350094+00 t \\x3133ec043c0c6e25b6e55f7da84cae52b2a72488116938a2c669c8512c2efe72a74029912bcba1f2a2a0a8b59ef01dde 10725899 -20251107222650 oidc-auth 2025-11-19 23:32:54.36118+00 t \\xd349750e0298718cbcd98eaff6e152b3fb45c3d9d62d06eedeb26c75452e9ce1af65c3e52c9f2de4bd532939c2f31096 20024548 -20251110181948 orgs-billing 2025-11-19 23:32:54.381546+00 t \\x5bbea7a2dfc9d00213bd66b473289ddd66694eff8a4f3eaab937c985b64c5f8c3ad2d64e960afbb03f335ac6766687aa 10027372 -20251113223656 group-enhancements 2025-11-19 23:32:54.391931+00 t \\xbe0699486d85df2bd3edc1f0bf4f1f096d5b6c5070361702c4d203ec2bb640811be88bb1979cfe51b40805ad84d1de65 993434 -20251117032720 daemon-mode 2025-11-19 23:32:54.393217+00 t \\xdd0d899c24b73d70e9970e54b2c748d6b6b55c856ca0f8590fe990da49cc46c700b1ce13f57ff65abd6711f4bd8a6481 1093611 -20251118143058 set-default-plan 2025-11-19 23:32:54.394615+00 t \\xd19142607aef84aac7cfb97d60d29bda764d26f513f2c72306734c03cec2651d23eee3ce6cacfd36ca52dbddc462f917 1264388 +20251006215000 users 2025-11-22 05:43:48.457668+00 t \\x4f13ce14ff67ef0b7145987c7b22b588745bf9fbb7b673450c26a0f2f9a36ef8ca980e456c8d77cfb1b2d7a4577a64d7 4725095 +20251006215100 networks 2025-11-22 05:43:48.463753+00 t \\xeaa5a07a262709f64f0c59f31e25519580c79e2d1a523ce72736848946a34b17dd9adc7498eaf90551af6b7ec6d4e0e3 4870373 +20251006215151 create hosts 2025-11-22 05:43:48.468989+00 t \\x6ec7487074c0724932d21df4cf1ed66645313cf62c159a7179e39cbc261bcb81a24f7933a0e3cf58504f2a90fc5c1962 4550465 +20251006215155 create subnets 2025-11-22 05:43:48.47397+00 t \\xefb5b25742bd5f4489b67351d9f2494a95f307428c911fd8c5f475bfb03926347bdc269bbd048d2ddb06336945b27926 4713851 +20251006215201 create groups 2025-11-22 05:43:48.479135+00 t \\x0a7032bf4d33a0baf020e905da865cde240e2a09dda2f62aa535b2c5d4b26b20be30a3286f1b5192bd94cd4a5dbb5bcd 6298983 +20251006215204 create daemons 2025-11-22 05:43:48.485844+00 t \\xcfea93403b1f9cf9aac374711d4ac72d8a223e3c38a1d2a06d9edb5f94e8a557debac3668271f8176368eadc5105349f 4807546 +20251006215212 create services 2025-11-22 05:43:48.491035+00 t \\xd5b07f82fc7c9da2782a364d46078d7d16b5c08df70cfbf02edcfe9b1b24ab6024ad159292aeea455f15cfd1f4740c1d 5889097 +20251029193448 user-auth 2025-11-22 05:43:48.497286+00 t \\xfde8161a8db89d51eeade7517d90a41d560f19645620f2298f78f116219a09728b18e91251ae31e46a47f6942d5a9032 7823826 +20251030044828 daemon api 2025-11-22 05:43:48.505447+00 t \\x181eb3541f51ef5b038b2064660370775d1b364547a214a20dde9c9d4bb95a1c273cd4525ef29e61fa65a3eb4fee0400 1668178 +20251030170438 host-hide 2025-11-22 05:43:48.507434+00 t \\x87c6fda7f8456bf610a78e8e98803158caa0e12857c5bab466a5bb0004d41b449004a68e728ca13f17e051f662a15454 1203209 +20251102224919 create discovery 2025-11-22 05:43:48.509024+00 t \\xb32a04abb891aba48f92a059fae7341442355ca8e4af5d109e28e2a4f79ee8e11b2a8f40453b7f6725c2dd6487f26573 11800605 +20251106235621 normalize-daemon-cols 2025-11-22 05:43:48.521173+00 t \\x5b137118d506e2708097c432358bf909265b3cf3bacd662b02e2c81ba589a9e0100631c7801cffd9c57bb10a6674fb3b 1865677 +20251107034459 api keys 2025-11-22 05:43:48.523447+00 t \\x3133ec043c0c6e25b6e55f7da84cae52b2a72488116938a2c669c8512c2efe72a74029912bcba1f2a2a0a8b59ef01dde 8372329 +20251107222650 oidc-auth 2025-11-22 05:43:48.532248+00 t \\xd349750e0298718cbcd98eaff6e152b3fb45c3d9d62d06eedeb26c75452e9ce1af65c3e52c9f2de4bd532939c2f31096 28884420 +20251110181948 orgs-billing 2025-11-22 05:43:48.561771+00 t \\x5bbea7a2dfc9d00213bd66b473289ddd66694eff8a4f3eaab937c985b64c5f8c3ad2d64e960afbb03f335ac6766687aa 11642928 +20251113223656 group-enhancements 2025-11-22 05:43:48.573825+00 t \\xbe0699486d85df2bd3edc1f0bf4f1f096d5b6c5070361702c4d203ec2bb640811be88bb1979cfe51b40805ad84d1de65 1138748 +20251117032720 daemon-mode 2025-11-22 05:43:48.575551+00 t \\xdd0d899c24b73d70e9970e54b2c748d6b6b55c856ca0f8590fe990da49cc46c700b1ce13f57ff65abd6711f4bd8a6481 1219840 +20251118143058 set-default-plan 2025-11-22 05:43:48.577211+00 t \\xd19142607aef84aac7cfb97d60d29bda764d26f513f2c72306734c03cec2651d23eee3ce6cacfd36ca52dbddc462f917 1264734 +20251118225043 save-topology 2025-11-22 05:43:48.578761+00 t \\x011a594740c69d8d0f8b0149d49d1b53cfbf948b7866ebd84403394139cb66a44277803462846b06e762577adc3e61a3 9338796 \. @@ -374,7 +411,7 @@ COPY public._sqlx_migrations (version, description, installed_on, success, check -- COPY public.api_keys (id, key, network_id, name, created_at, updated_at, last_used, expires_at, is_enabled) FROM stdin; -d0e5e6b5-e66b-48bd-8c0c-26b5247ac930 0e065bb45698437d8f85d3c11cde6626 f94407b3-bad9-4338-bbfc-7ad5cb0c039a Integrated Daemon API Key 2025-11-19 23:32:57.127527+00 2025-11-19 23:34:04.51201+00 2025-11-19 23:34:04.511636+00 \N t +a5f847c2-c702-4407-87ef-b6518527fffe ca1f36a8fe5c4958b608375891f41661 169c1636-c0f2-4883-950c-6d3a40053110 Integrated Daemon API Key 2025-11-22 05:43:51.674319+00 2025-11-22 05:44:45.056692+00 2025-11-22 05:44:45.055948+00 \N t \. @@ -383,7 +420,7 @@ d0e5e6b5-e66b-48bd-8c0c-26b5247ac930 0e065bb45698437d8f85d3c11cde6626 f94407b3-b -- COPY public.daemons (id, network_id, host_id, ip, port, created_at, last_seen, capabilities, updated_at, mode) FROM stdin; -62c6aac2-3f4c-41e7-a741-d0be2d8c0db0 f94407b3-bad9-4338-bbfc-7ad5cb0c039a 7196b058-3317-4da1-a13e-09e60d5cc77c "172.25.0.4" 60073 2025-11-19 23:32:57.179357+00 2025-11-19 23:32:57.179355+00 {"has_docker_socket": false, "interfaced_subnet_ids": ["f9f59dd7-2e67-4e82-aa8b-3d9d5d2e5933"]} 2025-11-19 23:32:57.197292+00 "Push" +1dd07be9-cbe2-446c-877d-6df902e372fb 169c1636-c0f2-4883-950c-6d3a40053110 d69fac63-ee47-40a2-a09a-773d45109cc4 "172.25.0.4" 60073 2025-11-22 05:43:51.726152+00 2025-11-22 05:43:51.726151+00 {"has_docker_socket": false, "interfaced_subnet_ids": ["a54ff32d-28fe-49c8-b699-225be914a185"]} 2025-11-22 05:43:51.770662+00 "Push" \. @@ -392,10 +429,10 @@ COPY public.daemons (id, network_id, host_id, ip, port, created_at, last_seen, c -- COPY public.discovery (id, network_id, daemon_id, run_type, discovery_type, name, created_at, updated_at) FROM stdin; -d8d9b387-25f8-4d1f-8b7e-406ffefbbc83 f94407b3-bad9-4338-bbfc-7ad5cb0c039a 62c6aac2-3f4c-41e7-a741-d0be2d8c0db0 {"type": "Scheduled", "enabled": true, "last_run": null, "cron_schedule": "0 0 0 * * *"} {"type": "SelfReport", "host_id": "7196b058-3317-4da1-a13e-09e60d5cc77c"} Self Report @ 172.25.0.4 2025-11-19 23:32:57.180718+00 2025-11-19 23:32:57.180718+00 -aa979531-b8a5-4328-9eb1-1f89eabee4bf f94407b3-bad9-4338-bbfc-7ad5cb0c039a 62c6aac2-3f4c-41e7-a741-d0be2d8c0db0 {"type": "Scheduled", "enabled": true, "last_run": null, "cron_schedule": "0 0 0 * * *"} {"type": "Network", "subnet_ids": null, "host_naming_fallback": "BestService"} Network Scan @ 172.25.0.4 2025-11-19 23:32:57.187198+00 2025-11-19 23:32:57.187198+00 -dfb94381-7591-43b8-80a9-f955d78adffc f94407b3-bad9-4338-bbfc-7ad5cb0c039a 62c6aac2-3f4c-41e7-a741-d0be2d8c0db0 {"type": "Historical", "results": {"error": null, "phase": "Complete", "daemon_id": "62c6aac2-3f4c-41e7-a741-d0be2d8c0db0", "processed": 1, "network_id": "f94407b3-bad9-4338-bbfc-7ad5cb0c039a", "session_id": "e64dc3d6-7b23-4440-9b9d-1255129bb1d4", "started_at": "2025-11-19T23:32:57.186849674Z", "finished_at": "2025-11-19T23:32:57.243744495Z", "discovery_type": {"type": "SelfReport", "host_id": "7196b058-3317-4da1-a13e-09e60d5cc77c"}, "total_to_process": 1}} {"type": "SelfReport", "host_id": "7196b058-3317-4da1-a13e-09e60d5cc77c"} Discovery Run 2025-11-19 23:32:57.186849+00 2025-11-19 23:32:57.245121+00 -12a9c028-f621-49cb-9502-ac54d1469ade f94407b3-bad9-4338-bbfc-7ad5cb0c039a 62c6aac2-3f4c-41e7-a741-d0be2d8c0db0 {"type": "Historical", "results": {"error": null, "phase": "Complete", "daemon_id": "62c6aac2-3f4c-41e7-a741-d0be2d8c0db0", "processed": 11, "network_id": "f94407b3-bad9-4338-bbfc-7ad5cb0c039a", "session_id": "807e651e-0783-421d-9f47-359d5e1db5f0", "started_at": "2025-11-19T23:32:57.252659275Z", "finished_at": "2025-11-19T23:34:04.510764904Z", "discovery_type": {"type": "Network", "subnet_ids": null, "host_naming_fallback": "BestService"}, "total_to_process": 16}} {"type": "Network", "subnet_ids": null, "host_naming_fallback": "BestService"} Discovery Run 2025-11-19 23:32:57.252659+00 2025-11-19 23:34:04.511941+00 +10f87675-c882-4b8d-b8eb-8f2f3ed03351 169c1636-c0f2-4883-950c-6d3a40053110 1dd07be9-cbe2-446c-877d-6df902e372fb {"type": "Scheduled", "enabled": true, "last_run": null, "cron_schedule": "0 0 0 * * *"} {"type": "SelfReport", "host_id": "d69fac63-ee47-40a2-a09a-773d45109cc4"} Self Report @ 172.25.0.4 2025-11-22 05:43:51.727754+00 2025-11-22 05:43:51.727754+00 +8df26956-2c78-4300-9638-e5fded47dcde 169c1636-c0f2-4883-950c-6d3a40053110 1dd07be9-cbe2-446c-877d-6df902e372fb {"type": "Scheduled", "enabled": true, "last_run": null, "cron_schedule": "0 0 0 * * *"} {"type": "Network", "subnet_ids": null, "host_naming_fallback": "BestService"} Network Scan @ 172.25.0.4 2025-11-22 05:43:51.73562+00 2025-11-22 05:43:51.73562+00 +773ad504-5482-43f5-8b52-e30b303f0bbd 169c1636-c0f2-4883-950c-6d3a40053110 1dd07be9-cbe2-446c-877d-6df902e372fb {"type": "Historical", "results": {"error": null, "phase": "Complete", "daemon_id": "1dd07be9-cbe2-446c-877d-6df902e372fb", "processed": 1, "network_id": "169c1636-c0f2-4883-950c-6d3a40053110", "session_id": "9cfbdcf5-cb3d-48a5-8dbb-d639953c06d2", "started_at": "2025-11-22T05:43:51.735152452Z", "finished_at": "2025-11-22T05:43:51.784747423Z", "discovery_type": {"type": "SelfReport", "host_id": "d69fac63-ee47-40a2-a09a-773d45109cc4"}, "total_to_process": 1}} {"type": "SelfReport", "host_id": "d69fac63-ee47-40a2-a09a-773d45109cc4"} Discovery Run 2025-11-22 05:43:51.735152+00 2025-11-22 05:43:51.786976+00 +8f1b33fa-d696-4b1a-a02e-28a604b2542d 169c1636-c0f2-4883-950c-6d3a40053110 1dd07be9-cbe2-446c-877d-6df902e372fb {"type": "Historical", "results": {"error": null, "phase": "Complete", "daemon_id": "1dd07be9-cbe2-446c-877d-6df902e372fb", "processed": 13, "network_id": "169c1636-c0f2-4883-950c-6d3a40053110", "session_id": "53554442-a32e-4750-9f1c-289fbb9a8d9d", "started_at": "2025-11-22T05:43:51.795758416Z", "finished_at": "2025-11-22T05:44:45.055039140Z", "discovery_type": {"type": "Network", "subnet_ids": null, "host_naming_fallback": "BestService"}, "total_to_process": 16}} {"type": "Network", "subnet_ids": null, "host_naming_fallback": "BestService"} Discovery Run 2025-11-22 05:43:51.795758+00 2025-11-22 05:44:45.056201+00 \. @@ -412,14 +449,13 @@ COPY public.groups (id, network_id, name, description, group_type, created_at, u -- COPY public.hosts (id, network_id, name, hostname, description, target, interfaces, services, ports, source, virtualization, created_at, updated_at, hidden) FROM stdin; -c1584db9-398b-4eaf-99f1-601481685fcf f94407b3-bad9-4338-bbfc-7ad5cb0c039a Cloudflare DNS \N \N {"type": "ServiceBinding", "config": "7f6f10ee-a546-4b63-885b-dc4ba3852ac2"} [{"id": "0da9276e-38f0-4cd2-9ec2-d829d3a47fc7", "name": "Internet", "subnet_id": "af6fe772-b27b-4aa9-bea2-9cdff1d452f4", "ip_address": "1.1.1.1", "mac_address": null}] ["beefc1ab-f7d3-475c-bacb-55c1e2497548"] [{"id": "3dbee162-0264-4bdd-870b-c55537e2fd4d", "type": "DnsUdp", "number": 53, "protocol": "Udp"}] {"type": "System"} null 2025-11-19 23:32:57.109555+00 2025-11-19 23:32:57.118236+00 f -1ed19ab2-8571-4866-8294-e878d2b72490 f94407b3-bad9-4338-bbfc-7ad5cb0c039a Google.com \N \N {"type": "ServiceBinding", "config": "e7f3eb32-56a8-4e89-8a8f-be208de56fe3"} [{"id": "f58135c7-8cd8-483d-9598-19a21aa387af", "name": "Internet", "subnet_id": "af6fe772-b27b-4aa9-bea2-9cdff1d452f4", "ip_address": "203.0.113.221", "mac_address": null}] ["0b16f609-0a4b-4c34-ba35-7361238d04c2"] [{"id": "0fdf3e3e-9427-4f9c-bc51-e72d497d8b21", "type": "Https", "number": 443, "protocol": "Tcp"}] {"type": "System"} null 2025-11-19 23:32:57.109561+00 2025-11-19 23:32:57.123063+00 f -c966e55e-1d55-45ba-9834-4913a01d97c3 f94407b3-bad9-4338-bbfc-7ad5cb0c039a Mobile Device \N A mobile device connecting from a remote network {"type": "ServiceBinding", "config": "fe66aebf-7c08-4412-a17e-5343518e3b47"} [{"id": "58c8bff4-cff6-4fa9-9c69-c2152a8237f8", "name": "Remote Network", "subnet_id": "97a944fb-691b-4109-bcc0-c559d282133c", "ip_address": "203.0.113.8", "mac_address": null}] ["8dd44f7b-c5e9-4b5b-bda0-2ec8a24a5d51"] [{"id": "bb49dd89-3d8f-4f06-bcd1-7f58eacb5dcc", "type": "Custom", "number": 0, "protocol": "Tcp"}] {"type": "System"} null 2025-11-19 23:32:57.109566+00 2025-11-19 23:32:57.126768+00 f -37f494eb-eec7-48f6-a466-1ddf7b965300 f94407b3-bad9-4338-bbfc-7ad5cb0c039a homeassistant-discovery.netvisor_netvisor-dev homeassistant-discovery.netvisor_netvisor-dev \N {"type": "Hostname"} [{"id": "c2c0c2b9-b6ef-4324-860e-eb4023dcd0c2", "name": null, "subnet_id": "f9f59dd7-2e67-4e82-aa8b-3d9d5d2e5933", "ip_address": "172.25.0.5", "mac_address": "32:5B:68:7A:33:86"}] ["ab847e05-4db2-43b5-bc80-c8c4b2e280bd"] [{"id": "5d713bf4-24d7-4c5e-a33b-f2825d427dd0", "type": "Custom", "number": 8123, "protocol": "Tcp"}] {"type": "Discovery", "metadata": [{"date": "2025-11-19T23:33:14.173425530Z", "type": "Network", "daemon_id": "62c6aac2-3f4c-41e7-a741-d0be2d8c0db0", "subnet_ids": null, "host_naming_fallback": "BestService"}]} null 2025-11-19 23:33:14.173428+00 2025-11-19 23:33:28.843367+00 f -9b3dcca7-c0b2-4b50-9bdc-5050ccf00d0d f94407b3-bad9-4338-bbfc-7ad5cb0c039a netvisor-postgres-dev-1.netvisor_netvisor-dev netvisor-postgres-dev-1.netvisor_netvisor-dev \N {"type": "Hostname"} [{"id": "1b4c4739-c0e6-4a0b-a1fd-c43cbde68610", "name": null, "subnet_id": "f9f59dd7-2e67-4e82-aa8b-3d9d5d2e5933", "ip_address": "172.25.0.6", "mac_address": "C6:67:1C:A0:F1:40"}] ["2f06a2aa-5ff7-4724-9c12-afe70799ddc4"] [{"id": "6464217a-c686-402d-9a7d-782e5373e1e5", "type": "PostgreSQL", "number": 5432, "protocol": "Tcp"}] {"type": "Discovery", "metadata": [{"date": "2025-11-19T23:33:28.964846546Z", "type": "Network", "daemon_id": "62c6aac2-3f4c-41e7-a741-d0be2d8c0db0", "subnet_ids": null, "host_naming_fallback": "BestService"}]} null 2025-11-19 23:33:28.964848+00 2025-11-19 23:33:43.605204+00 f -452b8825-df24-4cff-a34a-9a665be361da f94407b3-bad9-4338-bbfc-7ad5cb0c039a netvisor-server-1.netvisor_netvisor-dev netvisor-server-1.netvisor_netvisor-dev \N {"type": "Hostname"} [{"id": "20473b35-d764-4bf7-b04d-f5c150cf0809", "name": null, "subnet_id": "f9f59dd7-2e67-4e82-aa8b-3d9d5d2e5933", "ip_address": "172.25.0.3", "mac_address": "3E:1C:F4:AD:D6:76"}] ["be06478c-8e68-486e-b3d9-4556f302cbf2"] [{"id": "7dc1169e-8843-4967-8adb-0acfb2097542", "type": "Custom", "number": 60072, "protocol": "Tcp"}] {"type": "Discovery", "metadata": [{"date": "2025-11-19T23:32:59.483076353Z", "type": "Network", "daemon_id": "62c6aac2-3f4c-41e7-a741-d0be2d8c0db0", "subnet_ids": null, "host_naming_fallback": "BestService"}]} null 2025-11-19 23:32:59.483079+00 2025-11-19 23:33:28.836141+00 f -7196b058-3317-4da1-a13e-09e60d5cc77c f94407b3-bad9-4338-bbfc-7ad5cb0c039a 172.25.0.4 51afe52ff83c NetVisor daemon {"type": "None"} [{"id": "d64dd9a2-5754-4155-8a4d-b8e92d2b05ea", "name": "eth0", "subnet_id": "f9f59dd7-2e67-4e82-aa8b-3d9d5d2e5933", "ip_address": "172.25.0.4", "mac_address": "AA:D9:C9:5D:EA:7C"}] ["0a899c93-e5bb-4946-a7d3-2f6515fa6a84", "f9349ce3-029a-4bb6-80be-11a655edebd9"] [{"id": "6f00174f-9d7b-425b-a485-97ef386b13e4", "type": "Custom", "number": 60073, "protocol": "Tcp"}] {"type": "Discovery", "metadata": [{"date": "2025-11-19T23:33:28.828039811Z", "type": "Network", "daemon_id": "62c6aac2-3f4c-41e7-a741-d0be2d8c0db0", "subnet_ids": null, "host_naming_fallback": "BestService"}, {"date": "2025-11-19T23:32:57.198827308Z", "type": "SelfReport", "host_id": "7196b058-3317-4da1-a13e-09e60d5cc77c", "daemon_id": "62c6aac2-3f4c-41e7-a741-d0be2d8c0db0"}]} null 2025-11-19 23:32:57.134954+00 2025-11-19 23:33:28.964742+00 f -f42af7f7-70df-4d6b-bffa-50036f9ca2f6 f94407b3-bad9-4338-bbfc-7ad5cb0c039a runnervmg1sw1 runnervmg1sw1 \N {"type": "Hostname"} [{"id": "b88b0983-8764-4cba-8f05-7beca2a6c8f3", "name": null, "subnet_id": "f9f59dd7-2e67-4e82-aa8b-3d9d5d2e5933", "ip_address": "172.25.0.1", "mac_address": "5E:BC:E8:72:DC:DF"}] ["51dd2367-719d-487f-b8f4-13e080201e06", "36e28abf-480f-4bef-979e-f0f4787a3ea7"] [{"id": "baec9eb9-9de4-42f3-afcd-90394a238f66", "type": "Custom", "number": 60072, "protocol": "Tcp"}, {"id": "175145cf-b572-48e2-bb7f-dbb6794a41e7", "type": "Custom", "number": 8123, "protocol": "Tcp"}, {"id": "18a94eec-8ffb-48ce-b1df-91823c9416b5", "type": "Ssh", "number": 22, "protocol": "Tcp"}] {"type": "Discovery", "metadata": [{"date": "2025-11-19T23:33:49.758874400Z", "type": "Network", "daemon_id": "62c6aac2-3f4c-41e7-a741-d0be2d8c0db0", "subnet_ids": null, "host_naming_fallback": "BestService"}]} null 2025-11-19 23:33:49.758877+00 2025-11-19 23:34:04.508326+00 f +cc22b055-17cc-4b32-b485-0297aefbe854 169c1636-c0f2-4883-950c-6d3a40053110 Cloudflare DNS \N \N {"type": "ServiceBinding", "config": "637ed9c5-6538-4b29-8c02-17c20c6c4eea"} [{"id": "9cbf8a2a-3789-4af3-896b-3796556ad10f", "name": "Internet", "subnet_id": "eab7b1b7-69ac-41c3-bf72-2a3dfcd899a5", "ip_address": "1.1.1.1", "mac_address": null}] {9819289a-fb18-4d18-bd8a-14f5e5c41223} [{"id": "8884a744-70f2-49a1-bf64-f82a5144469e", "type": "DnsUdp", "number": 53, "protocol": "Udp"}] {"type": "System"} null 2025-11-22 05:43:51.654082+00 2025-11-22 05:43:51.66396+00 f +fb413bb0-3f0b-4995-8021-fb71331c650d 169c1636-c0f2-4883-950c-6d3a40053110 Google.com \N \N {"type": "ServiceBinding", "config": "84a39a44-f283-47a0-b197-14bc9c4e0b21"} [{"id": "abf254c9-2111-4718-b14a-1ff5060d518c", "name": "Internet", "subnet_id": "eab7b1b7-69ac-41c3-bf72-2a3dfcd899a5", "ip_address": "203.0.113.66", "mac_address": null}] {a15873fd-ab98-4f5f-8714-741b74d8edf1} [{"id": "dbc87c22-f23f-43ce-a43a-db8acd111c68", "type": "Https", "number": 443, "protocol": "Tcp"}] {"type": "System"} null 2025-11-22 05:43:51.654089+00 2025-11-22 05:43:51.669393+00 f +80578e78-ccd8-460a-a681-71e9fdfbd729 169c1636-c0f2-4883-950c-6d3a40053110 Mobile Device \N A mobile device connecting from a remote network {"type": "ServiceBinding", "config": "61700bf0-a593-4d73-90f8-ef795c9f9b0b"} [{"id": "aa9e7988-50b4-445a-85b8-4ebb21e912d8", "name": "Remote Network", "subnet_id": "7fd63fdb-9b7e-4711-9fa7-7acfbcc1453f", "ip_address": "203.0.113.20", "mac_address": null}] {41cc388e-6eac-45b5-a492-360fcca36c53} [{"id": "12388c3a-670f-4385-ab63-6e2d6c48d968", "type": "Custom", "number": 0, "protocol": "Tcp"}] {"type": "System"} null 2025-11-22 05:43:51.654094+00 2025-11-22 05:43:51.67345+00 f +d38f346e-d180-423e-aa51-08d1ecf79c6c 169c1636-c0f2-4883-950c-6d3a40053110 netvisor-postgres-dev-1.netvisor_netvisor-dev netvisor-postgres-dev-1.netvisor_netvisor-dev \N {"type": "Hostname"} [{"id": "459ef547-3027-4b2c-9fd0-4bf5aba88ec2", "name": null, "subnet_id": "a54ff32d-28fe-49c8-b699-225be914a185", "ip_address": "172.25.0.6", "mac_address": "1E:99:71:B6:32:8E"}] {9684901c-4ccf-4ea8-9617-59e6ceccba40} [{"id": "67c96777-50f7-4444-9e9b-e2a399d0bc51", "type": "PostgreSQL", "number": 5432, "protocol": "Tcp"}] {"type": "Discovery", "metadata": [{"date": "2025-11-22T05:44:09.072864461Z", "type": "Network", "daemon_id": "1dd07be9-cbe2-446c-877d-6df902e372fb", "subnet_ids": null, "host_naming_fallback": "BestService"}]} null 2025-11-22 05:44:09.072867+00 2025-11-22 05:44:24.066996+00 f +d69fac63-ee47-40a2-a09a-773d45109cc4 169c1636-c0f2-4883-950c-6d3a40053110 172.25.0.4 9c153bc0e8ec NetVisor daemon {"type": "None"} [{"id": "4acc6696-ac08-46d4-81e8-d8e57df434f1", "name": "eth0", "subnet_id": "a54ff32d-28fe-49c8-b699-225be914a185", "ip_address": "172.25.0.4", "mac_address": "E6:6B:9A:05:11:B8"}] {9a18439a-8621-4039-ad8a-4795e29ee17f} [{"id": "60af8fbd-f774-4aba-ac11-52ec112c1fa8", "type": "Custom", "number": 60073, "protocol": "Tcp"}] {"type": "Discovery", "metadata": [{"date": "2025-11-22T05:43:51.772709379Z", "type": "SelfReport", "host_id": "d69fac63-ee47-40a2-a09a-773d45109cc4", "daemon_id": "1dd07be9-cbe2-446c-877d-6df902e372fb"}]} null 2025-11-22 05:43:51.681919+00 2025-11-22 05:43:51.782439+00 f +581da0e8-37c8-41a8-8e18-065fe49d4e5c 169c1636-c0f2-4883-950c-6d3a40053110 netvisor-server-1.netvisor_netvisor-dev netvisor-server-1.netvisor_netvisor-dev \N {"type": "Hostname"} [{"id": "d0928414-0bfe-42b7-9986-8739466663a6", "name": null, "subnet_id": "a54ff32d-28fe-49c8-b699-225be914a185", "ip_address": "172.25.0.3", "mac_address": "A2:F2:C9:5A:E2:5A"}] {38c64825-56d2-4de4-a88b-4c0e9f0afb6a} [{"id": "25675680-50a4-4671-9527-0465f3ec0a5a", "type": "Custom", "number": 60072, "protocol": "Tcp"}] {"type": "Discovery", "metadata": [{"date": "2025-11-22T05:43:54.028085368Z", "type": "Network", "daemon_id": "1dd07be9-cbe2-446c-877d-6df902e372fb", "subnet_ids": null, "host_naming_fallback": "BestService"}]} null 2025-11-22 05:43:54.028088+00 2025-11-22 05:44:08.96515+00 f +71a8ccad-cec6-4772-87ec-ed2a3c4a696c 169c1636-c0f2-4883-950c-6d3a40053110 runnervmg1sw1 runnervmg1sw1 \N {"type": "Hostname"} [{"id": "3b8d6a32-3795-4966-b370-d5142fe1d905", "name": null, "subnet_id": "a54ff32d-28fe-49c8-b699-225be914a185", "ip_address": "172.25.0.1", "mac_address": "02:61:5D:F5:91:4C"}] {afbd2a53-af6c-43ec-be62-1d0dba17baad,3149002a-62d1-4990-81f3-db22dc820d8a} [{"id": "3b0c68d3-e04e-43bb-8769-7aac77cfbfc4", "type": "Custom", "number": 8123, "protocol": "Tcp"}, {"id": "796485cb-db71-40ec-972d-3de1fd1383a9", "type": "Custom", "number": 60072, "protocol": "Tcp"}, {"id": "8409d0b4-4a4e-4247-90b9-2183a9cebebc", "type": "Ssh", "number": 22, "protocol": "Tcp"}] {"type": "Discovery", "metadata": [{"date": "2025-11-22T05:44:30.205810658Z", "type": "Network", "daemon_id": "1dd07be9-cbe2-446c-877d-6df902e372fb", "subnet_ids": null, "host_naming_fallback": "BestService"}]} null 2025-11-22 05:44:30.205813+00 2025-11-22 05:44:45.0524+00 f \. @@ -428,7 +464,7 @@ f42af7f7-70df-4d6b-bffa-50036f9ca2f6 f94407b3-bad9-4338-bbfc-7ad5cb0c039a runner -- COPY public.networks (id, name, created_at, updated_at, is_default, organization_id) FROM stdin; -f94407b3-bad9-4338-bbfc-7ad5cb0c039a My Network 2025-11-19 23:32:57.108153+00 2025-11-19 23:32:57.108153+00 f 3ad46102-4f4d-416b-a29b-02929af141f9 +169c1636-c0f2-4883-950c-6d3a40053110 My Network 2025-11-22 05:43:51.650161+00 2025-11-22 05:43:51.650161+00 f 8148c683-bf5c-49be-83f3-63511dfa27aa \. @@ -437,7 +473,7 @@ f94407b3-bad9-4338-bbfc-7ad5cb0c039a My Network 2025-11-19 23:32:57.108153+00 20 -- COPY public.organizations (id, name, stripe_customer_id, plan, plan_status, created_at, updated_at, is_onboarded) FROM stdin; -3ad46102-4f4d-416b-a29b-02929af141f9 My Organization \N {"type": "Community", "price": {"rate": "Month", "cents": 0}, "trial_days": 0} null 2025-11-19 23:32:54.45019+00 2025-11-19 23:32:57.106667+00 t +8148c683-bf5c-49be-83f3-63511dfa27aa My Organization \N {"type": "Community", "price": {"rate": "Month", "cents": 0}, "trial_days": 0} \N 2025-11-22 05:43:48.645519+00 2025-11-22 05:43:51.648848+00 t \. @@ -446,15 +482,14 @@ COPY public.organizations (id, name, stripe_customer_id, plan, plan_status, crea -- COPY public.services (id, network_id, created_at, updated_at, name, host_id, bindings, service_definition, virtualization, source) FROM stdin; -beefc1ab-f7d3-475c-bacb-55c1e2497548 f94407b3-bad9-4338-bbfc-7ad5cb0c039a 2025-11-19 23:32:57.109557+00 2025-11-19 23:32:57.109557+00 Cloudflare DNS c1584db9-398b-4eaf-99f1-601481685fcf [{"id": "7f6f10ee-a546-4b63-885b-dc4ba3852ac2", "type": "Port", "port_id": "3dbee162-0264-4bdd-870b-c55537e2fd4d", "interface_id": "0da9276e-38f0-4cd2-9ec2-d829d3a47fc7"}] "Dns Server" null {"type": "System"} -0b16f609-0a4b-4c34-ba35-7361238d04c2 f94407b3-bad9-4338-bbfc-7ad5cb0c039a 2025-11-19 23:32:57.109562+00 2025-11-19 23:32:57.109562+00 Google.com 1ed19ab2-8571-4866-8294-e878d2b72490 [{"id": "e7f3eb32-56a8-4e89-8a8f-be208de56fe3", "type": "Port", "port_id": "0fdf3e3e-9427-4f9c-bc51-e72d497d8b21", "interface_id": "f58135c7-8cd8-483d-9598-19a21aa387af"}] "Web Service" null {"type": "System"} -8dd44f7b-c5e9-4b5b-bda0-2ec8a24a5d51 f94407b3-bad9-4338-bbfc-7ad5cb0c039a 2025-11-19 23:32:57.109568+00 2025-11-19 23:32:57.109568+00 Mobile Device c966e55e-1d55-45ba-9834-4913a01d97c3 [{"id": "fe66aebf-7c08-4412-a17e-5343518e3b47", "type": "Port", "port_id": "bb49dd89-3d8f-4f06-bcd1-7f58eacb5dcc", "interface_id": "58c8bff4-cff6-4fa9-9c69-c2152a8237f8"}] "Client" null {"type": "System"} -be06478c-8e68-486e-b3d9-4556f302cbf2 f94407b3-bad9-4338-bbfc-7ad5cb0c039a 2025-11-19 23:33:03.967735+00 2025-11-19 23:33:03.967735+00 NetVisor Server API 452b8825-df24-4cff-a34a-9a665be361da [{"id": "a8855bb6-c323-4257-aec8-98f0ac1d624c", "type": "Port", "port_id": "7dc1169e-8843-4967-8adb-0acfb2097542", "interface_id": "20473b35-d764-4bf7-b04d-f5c150cf0809"}] "NetVisor Server API" null {"type": "DiscoveryWithMatch", "details": {"reason": {"data": "Response for 172.25.0.3:60072/api/health contained \\"netvisor\\" in body", "type": "reason"}, "confidence": "High"}, "metadata": [{"date": "2025-11-19T23:33:03.967725548Z", "type": "Network", "daemon_id": "62c6aac2-3f4c-41e7-a741-d0be2d8c0db0", "subnet_ids": null, "host_naming_fallback": "BestService"}]} -ab847e05-4db2-43b5-bc80-c8c4b2e280bd f94407b3-bad9-4338-bbfc-7ad5cb0c039a 2025-11-19 23:33:28.82684+00 2025-11-19 23:33:28.82684+00 Home Assistant 37f494eb-eec7-48f6-a466-1ddf7b965300 [{"id": "2ccdbf0a-afc0-4b9d-a003-72073427374e", "type": "Port", "port_id": "5d713bf4-24d7-4c5e-a33b-f2825d427dd0", "interface_id": "c2c0c2b9-b6ef-4324-860e-eb4023dcd0c2"}] "Home Assistant" null {"type": "DiscoveryWithMatch", "details": {"reason": {"data": "Response for 172.25.0.5:8123/ contained \\"home assistant\\" in body", "type": "reason"}, "confidence": "High"}, "metadata": [{"date": "2025-11-19T23:33:28.826830967Z", "type": "Network", "daemon_id": "62c6aac2-3f4c-41e7-a741-d0be2d8c0db0", "subnet_ids": null, "host_naming_fallback": "BestService"}]} -0a899c93-e5bb-4946-a7d3-2f6515fa6a84 f94407b3-bad9-4338-bbfc-7ad5cb0c039a 2025-11-19 23:32:57.198842+00 2025-11-19 23:33:28.963604+00 NetVisor Daemon API 7196b058-3317-4da1-a13e-09e60d5cc77c [{"id": "41807ed9-b8c3-4913-b87f-ae278b41796a", "type": "Port", "port_id": "6f00174f-9d7b-425b-a485-97ef386b13e4", "interface_id": "d64dd9a2-5754-4155-8a4d-b8e92d2b05ea"}] "NetVisor Daemon API" null {"type": "DiscoveryWithMatch", "details": {"reason": {"data": "NetVisor Daemon self-report", "type": "reason"}, "confidence": "Certain"}, "metadata": [{"date": "2025-11-19T23:33:28.828505906Z", "type": "Network", "daemon_id": "62c6aac2-3f4c-41e7-a741-d0be2d8c0db0", "subnet_ids": null, "host_naming_fallback": "BestService"}, {"date": "2025-11-19T23:32:57.198841074Z", "type": "SelfReport", "host_id": "7196b058-3317-4da1-a13e-09e60d5cc77c", "daemon_id": "62c6aac2-3f4c-41e7-a741-d0be2d8c0db0"}]} -2f06a2aa-5ff7-4724-9c12-afe70799ddc4 f94407b3-bad9-4338-bbfc-7ad5cb0c039a 2025-11-19 23:33:43.596871+00 2025-11-19 23:33:43.596871+00 PostgreSQL 9b3dcca7-c0b2-4b50-9bdc-5050ccf00d0d [{"id": "60d12f22-c4fc-4a87-8bf1-d182ea0d0c75", "type": "Port", "port_id": "6464217a-c686-402d-9a7d-782e5373e1e5", "interface_id": "1b4c4739-c0e6-4a0b-a1fd-c43cbde68610"}] "PostgreSQL" null {"type": "DiscoveryWithMatch", "details": {"reason": {"data": ["Generic service", [{"data": "Port 5432/tcp is open but is used in other service match patterns", "type": "reason"}]], "type": "container"}, "confidence": "NotApplicable"}, "metadata": [{"date": "2025-11-19T23:33:43.596861453Z", "type": "Network", "daemon_id": "62c6aac2-3f4c-41e7-a741-d0be2d8c0db0", "subnet_ids": null, "host_naming_fallback": "BestService"}]} -36e28abf-480f-4bef-979e-f0f4787a3ea7 f94407b3-bad9-4338-bbfc-7ad5cb0c039a 2025-11-19 23:34:04.500524+00 2025-11-19 23:34:04.500524+00 Home Assistant f42af7f7-70df-4d6b-bffa-50036f9ca2f6 [{"id": "aea0def9-a8cd-4cdd-a76e-a7c16958ed4b", "type": "Port", "port_id": "175145cf-b572-48e2-bb7f-dbb6794a41e7", "interface_id": "b88b0983-8764-4cba-8f05-7beca2a6c8f3"}] "Home Assistant" null {"type": "DiscoveryWithMatch", "details": {"reason": {"data": "Response for 172.25.0.1:8123/ contained \\"home assistant\\" in body", "type": "reason"}, "confidence": "High"}, "metadata": [{"date": "2025-11-19T23:34:04.500514477Z", "type": "Network", "daemon_id": "62c6aac2-3f4c-41e7-a741-d0be2d8c0db0", "subnet_ids": null, "host_naming_fallback": "BestService"}]} -51dd2367-719d-487f-b8f4-13e080201e06 f94407b3-bad9-4338-bbfc-7ad5cb0c039a 2025-11-19 23:33:54.154381+00 2025-11-19 23:33:54.154381+00 NetVisor Server API f42af7f7-70df-4d6b-bffa-50036f9ca2f6 [{"id": "a04f4c13-f58b-4052-9b23-ca717a445a70", "type": "Port", "port_id": "baec9eb9-9de4-42f3-afcd-90394a238f66", "interface_id": "b88b0983-8764-4cba-8f05-7beca2a6c8f3"}] "NetVisor Server API" null {"type": "DiscoveryWithMatch", "details": {"reason": {"data": "Response for 172.25.0.1:60072/api/health contained \\"netvisor\\" in body", "type": "reason"}, "confidence": "High"}, "metadata": [{"date": "2025-11-19T23:33:54.154371621Z", "type": "Network", "daemon_id": "62c6aac2-3f4c-41e7-a741-d0be2d8c0db0", "subnet_ids": null, "host_naming_fallback": "BestService"}]} +9819289a-fb18-4d18-bd8a-14f5e5c41223 169c1636-c0f2-4883-950c-6d3a40053110 2025-11-22 05:43:51.654084+00 2025-11-22 05:43:51.654084+00 Cloudflare DNS cc22b055-17cc-4b32-b485-0297aefbe854 [{"id": "637ed9c5-6538-4b29-8c02-17c20c6c4eea", "type": "Port", "port_id": "8884a744-70f2-49a1-bf64-f82a5144469e", "interface_id": "9cbf8a2a-3789-4af3-896b-3796556ad10f"}] "Dns Server" null {"type": "System"} +a15873fd-ab98-4f5f-8714-741b74d8edf1 169c1636-c0f2-4883-950c-6d3a40053110 2025-11-22 05:43:51.65409+00 2025-11-22 05:43:51.65409+00 Google.com fb413bb0-3f0b-4995-8021-fb71331c650d [{"id": "84a39a44-f283-47a0-b197-14bc9c4e0b21", "type": "Port", "port_id": "dbc87c22-f23f-43ce-a43a-db8acd111c68", "interface_id": "abf254c9-2111-4718-b14a-1ff5060d518c"}] "Web Service" null {"type": "System"} +41cc388e-6eac-45b5-a492-360fcca36c53 169c1636-c0f2-4883-950c-6d3a40053110 2025-11-22 05:43:51.654095+00 2025-11-22 05:43:51.654095+00 Mobile Device 80578e78-ccd8-460a-a681-71e9fdfbd729 [{"id": "61700bf0-a593-4d73-90f8-ef795c9f9b0b", "type": "Port", "port_id": "12388c3a-670f-4385-ab63-6e2d6c48d968", "interface_id": "aa9e7988-50b4-445a-85b8-4ebb21e912d8"}] "Client" null {"type": "System"} +9a18439a-8621-4039-ad8a-4795e29ee17f 169c1636-c0f2-4883-950c-6d3a40053110 2025-11-22 05:43:51.772735+00 2025-11-22 05:43:51.772735+00 NetVisor Daemon API d69fac63-ee47-40a2-a09a-773d45109cc4 [{"id": "151fb392-edd0-4c0b-9a00-a25e9f92d6bc", "type": "Port", "port_id": "60af8fbd-f774-4aba-ac11-52ec112c1fa8", "interface_id": "4acc6696-ac08-46d4-81e8-d8e57df434f1"}] "NetVisor Daemon API" null {"type": "DiscoveryWithMatch", "details": {"reason": {"data": "NetVisor Daemon self-report", "type": "reason"}, "confidence": "Certain"}, "metadata": [{"date": "2025-11-22T05:43:51.772734477Z", "type": "SelfReport", "host_id": "d69fac63-ee47-40a2-a09a-773d45109cc4", "daemon_id": "1dd07be9-cbe2-446c-877d-6df902e372fb"}]} +38c64825-56d2-4de4-a88b-4c0e9f0afb6a 169c1636-c0f2-4883-950c-6d3a40053110 2025-11-22 05:44:08.945444+00 2025-11-22 05:44:08.945444+00 NetVisor Server API 581da0e8-37c8-41a8-8e18-065fe49d4e5c [{"id": "562962ec-dc4e-47f6-9ba7-8b84f14190dc", "type": "Port", "port_id": "25675680-50a4-4671-9527-0465f3ec0a5a", "interface_id": "d0928414-0bfe-42b7-9986-8739466663a6"}] "NetVisor Server API" null {"type": "DiscoveryWithMatch", "details": {"reason": {"data": "Response for 172.25.0.3:60072/api/health contained \\"netvisor\\" in body", "type": "reason"}, "confidence": "High"}, "metadata": [{"date": "2025-11-22T05:44:08.945437540Z", "type": "Network", "daemon_id": "1dd07be9-cbe2-446c-877d-6df902e372fb", "subnet_ids": null, "host_naming_fallback": "BestService"}]} +9684901c-4ccf-4ea8-9617-59e6ceccba40 169c1636-c0f2-4883-950c-6d3a40053110 2025-11-22 05:44:24.056871+00 2025-11-22 05:44:24.056871+00 PostgreSQL d38f346e-d180-423e-aa51-08d1ecf79c6c [{"id": "1aa0f9f8-f0b7-40c6-84d4-e6580e9ec166", "type": "Port", "port_id": "67c96777-50f7-4444-9e9b-e2a399d0bc51", "interface_id": "459ef547-3027-4b2c-9fd0-4bf5aba88ec2"}] "PostgreSQL" null {"type": "DiscoveryWithMatch", "details": {"reason": {"data": ["Generic service", [{"data": "Port 5432/tcp is open but is used in other service match patterns", "type": "reason"}]], "type": "container"}, "confidence": "NotApplicable"}, "metadata": [{"date": "2025-11-22T05:44:24.056862405Z", "type": "Network", "daemon_id": "1dd07be9-cbe2-446c-877d-6df902e372fb", "subnet_ids": null, "host_naming_fallback": "BestService"}]} +afbd2a53-af6c-43ec-be62-1d0dba17baad 169c1636-c0f2-4883-950c-6d3a40053110 2025-11-22 05:44:33.206717+00 2025-11-22 05:44:33.206717+00 Home Assistant 71a8ccad-cec6-4772-87ec-ed2a3c4a696c [{"id": "a9832b55-53b1-4b11-90b8-698370a00a39", "type": "Port", "port_id": "3b0c68d3-e04e-43bb-8769-7aac77cfbfc4", "interface_id": "3b8d6a32-3795-4966-b370-d5142fe1d905"}] "Home Assistant" null {"type": "DiscoveryWithMatch", "details": {"reason": {"data": "Response for 172.25.0.1:8123/ contained \\"home assistant\\" in body", "type": "reason"}, "confidence": "High"}, "metadata": [{"date": "2025-11-22T05:44:33.206707395Z", "type": "Network", "daemon_id": "1dd07be9-cbe2-446c-877d-6df902e372fb", "subnet_ids": null, "host_naming_fallback": "BestService"}]} +3149002a-62d1-4990-81f3-db22dc820d8a 169c1636-c0f2-4883-950c-6d3a40053110 2025-11-22 05:44:45.043523+00 2025-11-22 05:44:45.043523+00 NetVisor Server API 71a8ccad-cec6-4772-87ec-ed2a3c4a696c [{"id": "0e159ad3-d0ba-453a-b6f5-c7d2ab1b7f06", "type": "Port", "port_id": "796485cb-db71-40ec-972d-3de1fd1383a9", "interface_id": "3b8d6a32-3795-4966-b370-d5142fe1d905"}] "NetVisor Server API" null {"type": "DiscoveryWithMatch", "details": {"reason": {"data": "Response for 172.25.0.1:60072/api/health contained \\"netvisor\\" in body", "type": "reason"}, "confidence": "High"}, "metadata": [{"date": "2025-11-22T05:44:45.043513742Z", "type": "Network", "daemon_id": "1dd07be9-cbe2-446c-877d-6df902e372fb", "subnet_ids": null, "host_naming_fallback": "BestService"}]} \. @@ -463,9 +498,18 @@ ab847e05-4db2-43b5-bc80-c8c4b2e280bd f94407b3-bad9-4338-bbfc-7ad5cb0c039a 2025-1 -- COPY public.subnets (id, network_id, created_at, updated_at, cidr, name, description, subnet_type, source) FROM stdin; -af6fe772-b27b-4aa9-bea2-9cdff1d452f4 f94407b3-bad9-4338-bbfc-7ad5cb0c039a 2025-11-19 23:32:57.109487+00 2025-11-19 23:32:57.109487+00 "0.0.0.0/0" Internet This subnet uses the 0.0.0.0/0 CIDR as an organizational container for services running on the internet (e.g., public DNS servers, cloud services, etc.). "Internet" {"type": "System"} -97a944fb-691b-4109-bcc0-c559d282133c f94407b3-bad9-4338-bbfc-7ad5cb0c039a 2025-11-19 23:32:57.109492+00 2025-11-19 23:32:57.109492+00 "0.0.0.0/0" Remote Network This subnet uses the 0.0.0.0/0 CIDR as an organizational container for hosts on remote networks (e.g., mobile connections, friend's networks, public WiFi, etc.). "Remote" {"type": "System"} -f9f59dd7-2e67-4e82-aa8b-3d9d5d2e5933 f94407b3-bad9-4338-bbfc-7ad5cb0c039a 2025-11-19 23:32:57.187036+00 2025-11-19 23:32:57.187036+00 "172.25.0.0/28" 172.25.0.0/28 \N "Lan" {"type": "Discovery", "metadata": [{"date": "2025-11-19T23:32:57.187034588Z", "type": "SelfReport", "host_id": "7196b058-3317-4da1-a13e-09e60d5cc77c", "daemon_id": "62c6aac2-3f4c-41e7-a741-d0be2d8c0db0"}]} +eab7b1b7-69ac-41c3-bf72-2a3dfcd899a5 169c1636-c0f2-4883-950c-6d3a40053110 2025-11-22 05:43:51.654028+00 2025-11-22 05:43:51.654028+00 "0.0.0.0/0" Internet This subnet uses the 0.0.0.0/0 CIDR as an organizational container for services running on the internet (e.g., public DNS servers, cloud services, etc.). "Internet" {"type": "System"} +7fd63fdb-9b7e-4711-9fa7-7acfbcc1453f 169c1636-c0f2-4883-950c-6d3a40053110 2025-11-22 05:43:51.654032+00 2025-11-22 05:43:51.654032+00 "0.0.0.0/0" Remote Network This subnet uses the 0.0.0.0/0 CIDR as an organizational container for hosts on remote networks (e.g., mobile connections, friend's networks, public WiFi, etc.). "Remote" {"type": "System"} +a54ff32d-28fe-49c8-b699-225be914a185 169c1636-c0f2-4883-950c-6d3a40053110 2025-11-22 05:43:51.735331+00 2025-11-22 05:43:51.735331+00 "172.25.0.0/28" 172.25.0.0/28 \N "Lan" {"type": "Discovery", "metadata": [{"date": "2025-11-22T05:43:51.735330925Z", "type": "SelfReport", "host_id": "d69fac63-ee47-40a2-a09a-773d45109cc4", "daemon_id": "1dd07be9-cbe2-446c-877d-6df902e372fb"}]} +\. + + +-- +-- Data for Name: topologies; Type: TABLE DATA; Schema: public; Owner: postgres +-- + +COPY public.topologies (id, network_id, name, edges, nodes, options, hosts, subnets, services, groups, is_stale, last_refreshed, is_locked, locked_at, locked_by, removed_hosts, removed_services, removed_subnets, removed_groups, parent_id, created_at, updated_at) FROM stdin; +37e49910-6ad8-4590-9989-5f19882aabd0 169c1636-c0f2-4883-950c-6d3a40053110 My Topology [] [] {"local": {"no_fade_edges": false, "hide_edge_types": [], "left_zone_title": "Infrastructure", "hide_resize_handles": false}, "request": {"hide_ports": false, "hide_service_categories": [], "show_gateway_in_left_zone": true, "group_docker_bridges_by_host": false, "left_zone_service_categories": ["DNS", "ReverseProxy"], "hide_vm_title_on_docker_container": false}} [] [{"id": "eab7b1b7-69ac-41c3-bf72-2a3dfcd899a5", "cidr": "0.0.0.0/0", "name": "Internet", "source": {"type": "System"}, "created_at": "2025-11-22T05:43:51.654028Z", "network_id": "169c1636-c0f2-4883-950c-6d3a40053110", "updated_at": "2025-11-22T05:43:51.654028Z", "description": "This subnet uses the 0.0.0.0/0 CIDR as an organizational container for services running on the internet (e.g., public DNS servers, cloud services, etc.).", "subnet_type": "Internet"}, {"id": "7fd63fdb-9b7e-4711-9fa7-7acfbcc1453f", "cidr": "0.0.0.0/0", "name": "Remote Network", "source": {"type": "System"}, "created_at": "2025-11-22T05:43:51.654032Z", "network_id": "169c1636-c0f2-4883-950c-6d3a40053110", "updated_at": "2025-11-22T05:43:51.654032Z", "description": "This subnet uses the 0.0.0.0/0 CIDR as an organizational container for hosts on remote networks (e.g., mobile connections, friend's networks, public WiFi, etc.).", "subnet_type": "Remote"}, {"id": "a54ff32d-28fe-49c8-b699-225be914a185", "cidr": "172.25.0.0/28", "name": "172.25.0.0/28", "source": {"type": "Discovery", "metadata": [{"date": "2025-11-22T05:43:51.735330925Z", "type": "SelfReport", "host_id": "d69fac63-ee47-40a2-a09a-773d45109cc4", "daemon_id": "1dd07be9-cbe2-446c-877d-6df902e372fb"}]}, "created_at": "2025-11-22T05:43:51.735331Z", "network_id": "169c1636-c0f2-4883-950c-6d3a40053110", "updated_at": "2025-11-22T05:43:51.735331Z", "description": null, "subnet_type": "Lan"}] [] [] t 2025-11-22 05:43:51.651516+00 f \N \N {} {} {} {} \N 2025-11-22 05:43:51.651517+00 2025-11-22 05:44:24.245248+00 \. @@ -474,7 +518,7 @@ f9f59dd7-2e67-4e82-aa8b-3d9d5d2e5933 f94407b3-bad9-4338-bbfc-7ad5cb0c039a 2025-1 -- COPY public.users (id, created_at, updated_at, password_hash, oidc_provider, oidc_subject, oidc_linked_at, email, organization_id, permissions) FROM stdin; -239a134c-0972-4dfc-a10f-3444b123274d 2025-11-19 23:32:54.452081+00 2025-11-19 23:32:57.09543+00 $argon2id$v=19$m=19456,t=2,p=1$dshwmj4/NJa9HtNMU5+97g$DvuKqyxJjID2O6Va7x75k1/Zm2j2sT89f3mI4xYhzBM \N \N \N user@example.com 3ad46102-4f4d-416b-a29b-02929af141f9 Owner +c0dfd3a6-71a4-40be-9694-4bd8b12eeaf9 2025-11-22 05:43:48.647634+00 2025-11-22 05:43:51.636455+00 $argon2id$v=19$m=19456,t=2,p=1$ULnH/etDLj/4aaRNUn+fOg$Ajdiy/fFuyioHESksEL/8kw2qntnq7ZunASWZa/w+BE \N \N \N user@example.com 8148c683-bf5c-49be-83f3-63511dfa27aa Owner \. @@ -483,7 +527,7 @@ COPY public.users (id, created_at, updated_at, password_hash, oidc_provider, oid -- COPY tower_sessions.session (id, data, expiry_date) FROM stdin; -WzPPyVr6bc0V9Jym1f3GSA \\x93c41048c6fdd5a69cf415cd6dfa5ac9cf335b81a7757365725f6964d92432333961313334632d303937322d346466632d613130662d33343434623132333237346499cd07e9cd0161172039ce05c6c1d3000000 2025-12-19 23:32:57.096911+00 +ZxIARZNWIDiyDqEB6JRfwg \\x93c410c25f94e801a10eb2382056934500126781a7757365725f6964d92463306466643361362d373161342d343062652d393639342d34626438623132656561663999cd07e9cd0164052b33ce260943cb000000 2025-12-22 05:43:51.638141+00 \. @@ -575,6 +619,14 @@ ALTER TABLE ONLY public.subnets ADD CONSTRAINT subnets_pkey PRIMARY KEY (id); +-- +-- Name: topologies topologies_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.topologies + ADD CONSTRAINT topologies_pkey PRIMARY KEY (id); + + -- -- Name: users users_pkey; Type: CONSTRAINT; Schema: public; Owner: postgres -- @@ -682,6 +734,13 @@ CREATE INDEX idx_services_network ON public.services USING btree (network_id); CREATE INDEX idx_subnets_network ON public.subnets USING btree (network_id); +-- +-- Name: idx_topologies_network; Type: INDEX; Schema: public; Owner: postgres +-- + +CREATE INDEX idx_topologies_network ON public.topologies USING btree (network_id); + + -- -- Name: idx_users_email_lower; Type: INDEX; Schema: public; Owner: postgres -- @@ -783,6 +842,14 @@ ALTER TABLE ONLY public.subnets ADD CONSTRAINT subnets_network_id_fkey FOREIGN KEY (network_id) REFERENCES public.networks(id) ON DELETE CASCADE; +-- +-- Name: topologies topologies_network_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: postgres +-- + +ALTER TABLE ONLY public.topologies + ADD CONSTRAINT topologies_network_id_fkey FOREIGN KEY (network_id) REFERENCES public.networks(id) ON DELETE CASCADE; + + -- -- Name: users users_organization_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: postgres -- @@ -795,5 +862,5 @@ ALTER TABLE ONLY public.users -- PostgreSQL database dump complete -- -\unrestrict lAZKgSAeM5hq62C1gqi2XwedG103NUJLKDgNJYiEQyRVHgLbbzWsShfqwVIjEzP +\unrestrict iT1L5Xfu7uX8R7DBaYH8wyZJM8XMSgKaqFOpXGfCFJRoXlLxXBHbY4EydMBB7jt diff --git a/ui/src/app.css b/ui/src/app.css index f6996931..c1a93608 100644 --- a/ui/src/app.css +++ b/ui/src/app.css @@ -58,6 +58,26 @@ @apply disabled:cursor-not-allowed disabled:opacity-50 disabled:hover:border-green-800/50 disabled:hover:bg-green-950/40 disabled:hover:text-green-400; } + /* Warning action button - mirrors btn-danger pattern for yellow/warning states */ + .btn-warning { + @apply bg-yellow-950/40 hover:bg-yellow-950/60; + @apply border border-yellow-800/50 hover:border-yellow-700; + @apply rounded-md px-4 py-2 font-medium text-yellow-400 hover:text-yellow-300; + @apply transition-colors duration-150; + @apply inline-flex items-center justify-center gap-2; + @apply disabled:cursor-not-allowed disabled:opacity-50 disabled:hover:border-yellow-800/50 disabled:hover:bg-yellow-950/40 disabled:hover:text-yellow-400; + } + + /* Info action button - mirrors btn-primary pattern for blue/info states */ + .btn-info { + @apply bg-blue-950/40 hover:bg-blue-950/60; + @apply border border-blue-800/50 hover:border-blue-700; + @apply rounded-md px-4 py-2 font-medium text-blue-400 hover:text-blue-300; + @apply transition-colors duration-150; + @apply inline-flex items-center justify-center gap-2; + @apply disabled:cursor-not-allowed disabled:opacity-50 disabled:hover:border-blue-800/50 disabled:hover:bg-blue-950/40 disabled:hover:text-blue-400; + } + /* Icon buttons - minimal style for icon-only actions */ .btn-icon { @apply rounded p-2 text-gray-500 hover:bg-gray-800 hover:text-gray-200; @@ -84,7 +104,7 @@ /* Icon button - primary variant (for emphasized icon actions) */ .btn-icon-primary { - @apply rounded p-2 text-gray-500 hover:bg-purple-950/30 hover:text-purple-400; + @apply rounded p-2 text-gray-500 hover:bg-blue-950/30 hover:text-blue-400; @apply transition-colors duration-150; @apply inline-flex items-center justify-center; @apply disabled:cursor-not-allowed disabled:opacity-40 disabled:hover:bg-transparent disabled:hover:text-gray-500; diff --git a/ui/src/lib/features/api_keys/components/ApiKeyCard.svelte b/ui/src/lib/features/api_keys/components/ApiKeyCard.svelte index 99b126f4..8fc5404a 100644 --- a/ui/src/lib/features/api_keys/components/ApiKeyCard.svelte +++ b/ui/src/lib/features/api_keys/components/ApiKeyCard.svelte @@ -10,6 +10,8 @@ export let onDelete: (apiKey: ApiKey) => void = () => {}; export let onEdit: (apiKey: ApiKey) => void = () => {}; export let viewMode: 'card' | 'list'; + export let selected: boolean; + export let onSelectionChange: (selected: boolean) => void = () => {}; // Build card data $: cardData = { @@ -44,19 +46,19 @@ ], actions: [ { - label: 'Delete Api Key', + label: 'Delete', icon: Trash2, class: 'btn-icon-danger', onClick: () => onDelete(apiKey) }, { - label: 'Edit Api Key', + label: 'Edit', icon: Edit, - class: 'btn-icon-danger', + class: 'btn-icon', onClick: () => onEdit(apiKey) } ] }; - + diff --git a/ui/src/lib/features/api_keys/components/ApiKeyTab.svelte b/ui/src/lib/features/api_keys/components/ApiKeyTab.svelte index d0408959..7b564a8c 100644 --- a/ui/src/lib/features/api_keys/components/ApiKeyTab.svelte +++ b/ui/src/lib/features/api_keys/components/ApiKeyTab.svelte @@ -9,8 +9,9 @@ import DataControls from '$lib/shared/components/data/DataControls.svelte'; import CreateApiKeyModal from './ApiKeyModal.svelte'; import type { ApiKey } from '../types/base'; - import { apiKeys, deleteApiKey, getApiKeys, updateApiKey } from '../store'; + import { apiKeys, bulkDeleteApiKeys, deleteApiKey, getApiKeys, updateApiKey } from '../store'; import ApiKeyCard from './ApiKeyCard.svelte'; + import { Plus } from 'lucide-svelte'; const loading = loadData([getApiKeys, getDaemons]); @@ -44,6 +45,12 @@ editingApiKey = apiKey; } + async function handleBulkDelete(ids: string[]) { + if (confirm(`Are you sure you want to delete ${ids.length} Api Keys?`)) { + await bulkDeleteApiKeys(ids); + } + } + const apiKeyFields: FieldConfig[] = [ { key: 'name', @@ -69,17 +76,13 @@
- - + + + + + {#if $loading} @@ -92,11 +95,24 @@ cta="Create your first API Key" /> {:else} - - {#snippet children(item: ApiKey, viewMode: 'card' | 'list')} + item.id} + > + {#snippet children( + item: ApiKey, + viewMode: 'card' | 'list', + isSelected: boolean, + onSelectionChange: (selected: boolean) => void + )} diff --git a/ui/src/lib/features/api_keys/store.ts b/ui/src/lib/features/api_keys/store.ts index 1e5da681..8f406be5 100644 --- a/ui/src/lib/features/api_keys/store.ts +++ b/ui/src/lib/features/api_keys/store.ts @@ -2,7 +2,7 @@ import { derived, get, writable, type Readable } from 'svelte/store'; import { api } from '../../shared/utils/api'; import type { ApiKey } from './types/base'; import { utcTimeZoneSentinel, uuidv4Sentinel } from '$lib/shared/utils/formatting'; -import { currentNetwork } from '../networks/store'; +import { networks } from '../networks/store'; export const apiKeys = writable([]); @@ -23,6 +23,17 @@ export async function deleteApiKey(id: string) { return result; } +export async function bulkDeleteApiKeys(ids: string[]) { + const result = await api.request( + `/auth/keys/bulk-delete`, + apiKeys, + (_, current) => current.filter((k) => !ids.includes(k.id)), + { method: 'POST', body: JSON.stringify(ids) } + ); + + return result; +} + export async function updateApiKey(apiKey: ApiKey) { const result = await api.request( `/auth/keys/${apiKey.id}`, @@ -70,7 +81,7 @@ export function createEmptyApiKeyFormData(): ApiKey { updated_at: utcTimeZoneSentinel, expires_at: null, last_used: null, - network_id: get(currentNetwork).id, + network_id: get(networks)[0].id || '', key: '', is_enabled: true }; diff --git a/ui/src/lib/features/daemons/components/DaemonCard.svelte b/ui/src/lib/features/daemons/components/DaemonCard.svelte index d0fdb967..36c89e31 100644 --- a/ui/src/lib/features/daemons/components/DaemonCard.svelte +++ b/ui/src/lib/features/daemons/components/DaemonCard.svelte @@ -2,8 +2,8 @@ import GenericCard from '$lib/shared/components/data/GenericCard.svelte'; import type { Daemon } from '$lib/features/daemons/types/base'; import { getDaemonIsRunningDiscovery } from '$lib/features/daemons/store'; - import { sessions } from '$lib/features/discovery/SSEStore'; - import { entities } from '$lib/shared/stores/metadata'; + import { sessions } from '$lib/features/discovery/sse'; + import { concepts, entities } from '$lib/shared/stores/metadata'; import { networks } from '$lib/features/networks/store'; import { formatTimestamp } from '$lib/shared/utils/formatting'; import { getHostFromId } from '$lib/features/hosts/store'; @@ -13,6 +13,8 @@ export let daemon: Daemon; export let onDelete: (daemon: Daemon) => void = () => {}; export let viewMode: 'card' | 'list'; + export let selected: boolean; + export let onSelectionChange: (selected: boolean) => void = () => {}; $: hostStore = getHostFromId(daemon.host_id); $: host = $hostStore; @@ -52,7 +54,7 @@ ? { id: daemon.id, label: 'True', - color: entities.getColorHelper('Virtualization').string + color: concepts.getColorHelper('Virtualization').string } : { id: daemon.id, @@ -86,7 +88,7 @@ ], actions: [ { - label: 'Delete Daemon', + label: 'Delete', icon: Trash2, class: 'btn-icon-danger', onClick: () => onDelete(daemon), @@ -96,4 +98,4 @@ }; - + diff --git a/ui/src/lib/features/daemons/components/DaemonTab.svelte b/ui/src/lib/features/daemons/components/DaemonTab.svelte index 9fe731d2..1050f071 100644 --- a/ui/src/lib/features/daemons/components/DaemonTab.svelte +++ b/ui/src/lib/features/daemons/components/DaemonTab.svelte @@ -2,7 +2,12 @@ import TabHeader from '$lib/shared/components/layout/TabHeader.svelte'; import Loading from '$lib/shared/components/feedback/Loading.svelte'; import EmptyState from '$lib/shared/components/layout/EmptyState.svelte'; - import { daemons, deleteDaemon, getDaemons } from '$lib/features/daemons/store'; + import { + bulkDeleteDaemons, + daemons, + deleteDaemon, + getDaemons + } from '$lib/features/daemons/store'; import type { Daemon } from '$lib/features/daemons/types/base'; import { loadData } from '$lib/shared/utils/dataLoader'; import { getNetworks, networks } from '$lib/features/networks/store'; @@ -11,6 +16,7 @@ import { getHosts } from '$lib/features/hosts/store'; import type { FieldConfig } from '$lib/shared/components/data/types'; import DataControls from '$lib/shared/components/data/DataControls.svelte'; + import { Plus } from 'lucide-svelte'; const loading = loadData([getNetworks, getDaemons, getHosts]); @@ -33,6 +39,12 @@ daemon = null; } + async function handleBulkDelete(ids: string[]) { + if (confirm(`Are you sure you want to delete ${ids.length} Daemons?`)) { + await bulkDeleteDaemons(ids); + } + } + const daemonFields: FieldConfig[] = [ { key: 'name', @@ -58,16 +70,13 @@
- + + + + + {#if $loading} @@ -81,9 +90,26 @@ cta="Create your first daemon" /> {:else} - - {#snippet children(item: Daemon, viewMode: 'card' | 'list')} - + item.id} + > + {#snippet children( + item: Daemon, + viewMode: 'card' | 'list', + isSelected: boolean, + onSelectionChange: (selected: boolean) => void + )} + {/snippet} {/if} diff --git a/ui/src/lib/features/daemons/store.ts b/ui/src/lib/features/daemons/store.ts index 596393cd..83a5ba45 100644 --- a/ui/src/lib/features/daemons/store.ts +++ b/ui/src/lib/features/daemons/store.ts @@ -18,6 +18,17 @@ export async function deleteDaemon(id: string) { ); } +export async function bulkDeleteDaemons(ids: string[]) { + const result = await api.request( + `/daemons/bulk-delete`, + daemons, + (_, current) => current.filter((k) => !ids.includes(k.id)), + { method: 'POST', body: JSON.stringify(ids) } + ); + + return result; +} + export function getDaemonIsRunningDiscovery( daemon_id: string | null, sessions: DiscoveryUpdatePayload[] diff --git a/ui/src/lib/features/discovery/components/cards/DiscoveryHistoryCard.svelte b/ui/src/lib/features/discovery/components/cards/DiscoveryHistoryCard.svelte index f04b4289..a714ad78 100644 --- a/ui/src/lib/features/discovery/components/cards/DiscoveryHistoryCard.svelte +++ b/ui/src/lib/features/discovery/components/cards/DiscoveryHistoryCard.svelte @@ -9,6 +9,8 @@ export let viewMode: 'card' | 'list'; export let discovery: Discovery; export let onView: (discovery: Discovery) => void = () => {}; + export let selected: boolean; + export let onSelectionChange: (selected: boolean) => void = () => {}; $: results = discovery.run_type.type == 'Historical' ? discovery.run_type.results : null; @@ -57,4 +59,4 @@ }; - + diff --git a/ui/src/lib/features/discovery/components/cards/DiscoveryScheduledCard.svelte b/ui/src/lib/features/discovery/components/cards/DiscoveryScheduledCard.svelte index 98b46ff0..a0d9fc93 100644 --- a/ui/src/lib/features/discovery/components/cards/DiscoveryScheduledCard.svelte +++ b/ui/src/lib/features/discovery/components/cards/DiscoveryScheduledCard.svelte @@ -12,6 +12,8 @@ export let onEdit: (discovery: Discovery) => void = () => {}; export let onDelete: (discovery: Discovery) => void = () => {}; export let onRun: (discovery: Discovery) => void = () => {}; + export let selected: boolean; + export let onSelectionChange: (selected: boolean) => void = () => {}; $: cardData = { title: discovery.name, @@ -65,4 +67,4 @@ }; - + diff --git a/ui/src/lib/features/discovery/components/cards/DiscoverySessionCard.svelte b/ui/src/lib/features/discovery/components/cards/DiscoverySessionCard.svelte index 4cf70f83..1931672b 100644 --- a/ui/src/lib/features/discovery/components/cards/DiscoverySessionCard.svelte +++ b/ui/src/lib/features/discovery/components/cards/DiscoverySessionCard.svelte @@ -1,6 +1,6 @@ - + diff --git a/ui/src/lib/features/groups/components/GroupEditModal/EdgeStyleForm.svelte b/ui/src/lib/features/groups/components/GroupEditModal/EdgeStyleForm.svelte index b2f6ee6b..c9ea6fc2 100644 --- a/ui/src/lib/features/groups/components/GroupEditModal/EdgeStyleForm.svelte +++ b/ui/src/lib/features/groups/components/GroupEditModal/EdgeStyleForm.svelte @@ -5,6 +5,7 @@ export let formData: Group; export let collapsed: boolean = false; + export let editable: boolean = true; const edgeStyleOptions: Array<{ value: 'Straight' | 'SmoothStep' | 'Step' | 'Bezier' | 'SimpleBezier'; @@ -51,8 +52,9 @@ + + {#if $loading} @@ -125,10 +136,23 @@ cta="Create your first group" /> {:else} - - {#snippet children(item: Group, viewMode: 'card' | 'list')} + item.id} + > + {#snippet children( + item: Group, + viewMode: 'card' | 'list', + isSelected: boolean, + onSelectionChange: (selected: boolean) => void + )} handleEditGroup(item)} onDelete={() => handleDeleteGroup(item)} diff --git a/ui/src/lib/features/groups/store.ts b/ui/src/lib/features/groups/store.ts index bee9fae8..0116faa2 100644 --- a/ui/src/lib/features/groups/store.ts +++ b/ui/src/lib/features/groups/store.ts @@ -3,8 +3,8 @@ import { api } from '../../shared/utils/api'; import type { Group } from '$lib/features/groups/types/base'; import { utcTimeZoneSentinel, uuidv4Sentinel } from '$lib/shared/utils/formatting'; import { getServices } from '../services/store'; -import { currentNetwork } from '../networks/store'; import { entities } from '$lib/shared/stores/metadata'; +import { networks } from '../networks/store'; export const groups = writable([]); @@ -57,6 +57,17 @@ export async function deleteGroup(id: string) { return result; } +export async function bulkDeleteGroups(ids: string[]) { + const result = await api.request( + `/groups/bulk-delete`, + groups, + (_, current) => current.filter((k) => !ids.includes(k.id)), + { method: 'POST', body: JSON.stringify(ids) } + ); + + return result; +} + export function createEmptyGroupFormData(): Group { return { id: uuidv4Sentinel, @@ -69,7 +80,7 @@ export function createEmptyGroupFormData(): Group { source: { type: 'Manual' }, - network_id: get(currentNetwork).id, + network_id: get(networks)[0].id || '', color: entities.getColorHelper('Group').string, edge_style: 'Straight' }; diff --git a/ui/src/lib/features/hosts/components/HostCard.svelte b/ui/src/lib/features/hosts/components/HostCard.svelte index f89a66e5..e28a7821 100644 --- a/ui/src/lib/features/hosts/components/HostCard.svelte +++ b/ui/src/lib/features/hosts/components/HostCard.svelte @@ -3,159 +3,183 @@ import { formatInterface, getHostTargetString, hosts } from '../store'; import type { Host } from '../types/base'; import GenericCard from '$lib/shared/components/data/GenericCard.svelte'; - import { entities, serviceDefinitions } from '$lib/shared/stores/metadata'; + import { concepts, entities, serviceDefinitions } from '$lib/shared/stores/metadata'; import type { Group } from '$lib/features/groups/types/base'; import { getServiceById, getServicesForHost } from '$lib/features/services/store'; - import { get } from 'svelte/store'; import { daemons } from '$lib/features/daemons/store'; - export let host: Host; - export let hostGroups: Group[] = []; - export let onEdit: (host: Host) => void = () => {}; - export let onDelete: (host: Host) => void = () => {}; - export let onHide: (host: Host) => void = () => {}; - export let onConsolidate: (host: Host) => void = () => {}; - export let viewMode: 'card' | 'list'; + let { + host, + hostGroups = [], + onEdit = () => {}, + onDelete = () => {}, + onHide = () => {}, + onConsolidate = () => {}, + viewMode, + selected, + onSelectionChange = () => {} + }: { + host: Host; + hostGroups?: Group[]; + onEdit?: (host: Host) => void; + onDelete?: (host: Host) => void; + onHide?: (host: Host) => void; + onConsolidate?: (host: Host) => void; + viewMode: 'card' | 'list'; + selected: boolean; + onSelectionChange?: (selected: boolean) => void; + } = $props(); - export let hasDaemon = $daemons.some((d) => d.host_id == host.id); + let hasDaemon = $derived($daemons.some((d) => d.host_id == host.id)); - $: hostServicesStore = getServicesForHost(host.id); - $: hostServices = $hostServicesStore; - $: servicesThatManageVmsIds = hostServices - .filter( - (sv) => - serviceDefinitions.getItem(sv.service_definition)?.metadata.manages_virtualization == 'vms' - ) - .map((sv) => sv.id); - $: servicesThatManageContainersIds = hostServices - .filter( - (sv) => - serviceDefinitions.getItem(sv.service_definition)?.metadata.manages_virtualization == - 'containers' - ) - .map((sv) => sv.id); - - $: vms = $hosts.filter( - (h) => - h.virtualization && - h.virtualization?.type == 'Proxmox' && - servicesThatManageVmsIds.includes(h.virtualization.details.service_id) - ); - $: containers = hostServices.filter( - (s) => - s.virtualization && - s.virtualization?.type == 'Docker' && - servicesThatManageContainersIds.includes(s.virtualization.details.service_id) + // Get stores at top level + let hostServicesStore = $derived(getServicesForHost(host.id)); + let hostTargetStringStore = $derived(getHostTargetString(host)); + let virtualizationServiceStore = $derived( + host.virtualization !== null ? getServiceById(host.virtualization.details.service_id) : null ); - $: containerIds = containers.map((c) => c.id); - // Build card data - $: cardData = { - title: host.name, - ...(host.virtualization !== null - ? { - subtitle: - 'VM Managed By ' + get(getServiceById(host.virtualization.details.service_id))?.name || - 'Unknown Service' - } - : {}), - link: host.target.type != 'None' ? `http://${get(getHostTargetString(host))}` : undefined, - iconColor: entities.getColorHelper('Host').icon, - Icon: - serviceDefinitions.getIconComponent(hostServices[0]?.service_definition) || - entities.getIconComponent('Host'), - fields: [ - { - label: 'Description', - value: host.description - }, - { - label: 'Groups', - value: hostGroups.map((group: Group) => ({ - id: group.id, - label: group.name, - color: entities.getColorHelper('Group').string - })), - emptyText: 'No groups assigned' - }, - { - label: 'VMs', - value: vms.map((h) => { - return { - id: h.id, - label: h.name, - color: entities.getColorHelper('Virtualization').string - }; - }), - emptyText: 'No VMs assigned' - }, - { - label: 'Services', - value: hostServices - .filter((sv) => !containerIds.includes(sv.id)) - .map((sv) => { + // Consolidate all reactive computations into a single derived to prevent cascading updates + let cardData = $derived.by(() => { + const hostServices = $hostServicesStore; + const hostTargetString = $hostTargetStringStore; + const virtualizationService = virtualizationServiceStore ? $virtualizationServiceStore : null; + + const servicesThatManageVmsIds = hostServices + .filter( + (sv) => + serviceDefinitions.getItem(sv.service_definition)?.metadata.manages_virtualization == + 'vms' + ) + .map((sv) => sv.id); + + const servicesThatManageContainersIds = hostServices + .filter( + (sv) => + serviceDefinitions.getItem(sv.service_definition)?.metadata.manages_virtualization == + 'containers' + ) + .map((sv) => sv.id); + + const vms = $hosts.filter( + (h) => + h.virtualization && + h.virtualization?.type == 'Proxmox' && + servicesThatManageVmsIds.includes(h.virtualization.details.service_id) + ); + + const containers = hostServices.filter( + (s) => + s.virtualization && + s.virtualization?.type == 'Docker' && + servicesThatManageContainersIds.includes(s.virtualization.details.service_id) + ); + + const containerIds = containers.map((c) => c.id); + + return { + title: host.name, + ...(host.virtualization !== null && virtualizationService + ? { + subtitle: 'VM Managed By ' + virtualizationService.name || 'Unknown Service' + } + : {}), + link: host.target.type != 'None' ? `http://${hostTargetString}` : undefined, + iconColor: entities.getColorHelper('Host').icon, + Icon: + serviceDefinitions.getIconComponent(hostServices[0]?.service_definition) || + entities.getIconComponent('Host'), + fields: [ + { + label: 'Description', + value: host.description + }, + { + label: 'Groups', + value: hostGroups.map((group: Group) => ({ + id: group.id, + label: group.name, + color: entities.getColorHelper('Group').string + })), + emptyText: 'No groups assigned' + }, + { + label: 'VMs', + value: vms.map((h) => { return { - id: sv.id, - label: sv.name, - color: entities.getColorHelper('Service').string + id: h.id, + label: h.name, + color: concepts.getColorHelper('Virtualization').string }; - }) - .sort((a) => (containerIds.includes(a.id) ? 1 : -1)), - emptyText: 'No services assigned' - }, - { - label: 'Containers', - value: containers - .map((c) => { + }), + emptyText: 'No VMs assigned' + }, + { + label: 'Services', + value: hostServices + .filter((sv) => !containerIds.includes(sv.id)) + .map((sv) => { + return { + id: sv.id, + label: sv.name, + color: entities.getColorHelper('Service').string + }; + }) + .sort((a) => (containerIds.includes(a.id) ? 1 : -1)), + emptyText: 'No services assigned' + }, + { + label: 'Containers', + value: containers + .map((c) => { + return { + id: c.id, + label: c.name, + color: concepts.getColorHelper('Virtualization').string + }; + }) + .sort((a) => (containerIds.includes(a.id) ? 1 : -1)), + emptyText: 'No containers' + }, + { + label: 'Interfaces', + value: host.interfaces.map((i) => { return { - id: c.id, - label: c.name, - color: entities.getColorHelper('Virtualization').string + id: i.id, + label: formatInterface(i), + color: entities.getColorHelper('Interface').string }; - }) - .sort((a) => (containerIds.includes(a.id) ? 1 : -1)), - emptyText: 'No containers' - }, - { - label: 'Interfaces', - value: host.interfaces.map((i) => { - return { - id: i.id, - label: formatInterface(i), - color: entities.getColorHelper('Interface').string - }; - }), - emptyText: 'No interfaces' - } - ], - actions: [ - { - label: hasDaemon - ? "Can't delete a host with an associated daemon. Delete the daemon first." - : 'Delete Host', - icon: Trash2, - class: 'btn-icon-danger', - onClick: () => onDelete(host), - disabled: hasDaemon - }, - { - label: 'Consolidate', - icon: Replace, - onClick: () => onConsolidate(host) - }, - { - label: 'Hide Host', - icon: Eye, - class: host.hidden ? 'text-blue-400' : '', - onClick: () => onHide(host) - }, - { - label: 'Edit Host', - icon: Edit, - onClick: () => onEdit(host) - } - ] - }; + }), + emptyText: 'No interfaces' + } + ], + actions: [ + { + label: 'Delete', + icon: Trash2, + class: 'btn-icon-danger', + onClick: () => onDelete(host), + disabled: hasDaemon + }, + { + label: 'Consolidate', + icon: Replace, + onClick: () => onConsolidate(host) + }, + { + label: 'Hide', + icon: Eye, + class: host.hidden ? 'text-blue-400' : '', + onClick: () => onHide(host) + }, + { + label: 'Edit', + icon: Edit, + onClick: () => onEdit(host) + } + ] + }; + }); - + diff --git a/ui/src/lib/features/hosts/components/HostConsolidationModal.svelte b/ui/src/lib/features/hosts/components/HostConsolidationModal.svelte index 0579dabb..1553e374 100644 --- a/ui/src/lib/features/hosts/components/HostConsolidationModal.svelte +++ b/ui/src/lib/features/hosts/components/HostConsolidationModal.svelte @@ -1,5 +1,4 @@ - + diff --git a/ui/src/lib/features/networks/components/NetworksTab.svelte b/ui/src/lib/features/networks/components/NetworksTab.svelte index 91892ee7..5209a278 100644 --- a/ui/src/lib/features/networks/components/NetworksTab.svelte +++ b/ui/src/lib/features/networks/components/NetworksTab.svelte @@ -4,6 +4,7 @@ import EmptyState from '$lib/shared/components/layout/EmptyState.svelte'; import { loadData } from '$lib/shared/utils/dataLoader'; import { + bulkDeleteNetworks, createNetwork, deleteNetwork, getNetworks, @@ -19,6 +20,7 @@ import NetworkEditModal from './NetworkEditModal.svelte'; import DataControls from '$lib/shared/components/data/DataControls.svelte'; import type { FieldConfig } from '$lib/shared/components/data/types'; + import { Plus } from 'lucide-svelte'; const loading = loadData([getNetworks, getHosts, getDaemons, getSubnets, getGroups]); @@ -45,6 +47,12 @@ showCreateNetworkModal = true; } + async function handleBulkDelete(ids: string[]) { + if (confirm(`Are you sure you want to delete ${ids.length} Networks?`)) { + await bulkDeleteNetworks(ids); + } + } + async function handleNetworkCreate(data: Network) { const result = await createNetwork(data); if (result?.success) { @@ -81,16 +89,13 @@
- + + + + + {#if $loading} @@ -107,12 +112,21 @@ item.id} > - {#snippet children(item: Network, viewMode: 'card' | 'list')} + {#snippet children( + item: Network, + viewMode: 'card' | 'list', + isSelected: boolean, + onSelectionChange: (selected: boolean) => void + )} diff --git a/ui/src/lib/features/networks/store.ts b/ui/src/lib/features/networks/store.ts index 448e596b..b4eb9cfb 100644 --- a/ui/src/lib/features/networks/store.ts +++ b/ui/src/lib/features/networks/store.ts @@ -5,20 +5,14 @@ import { currentUser } from '../auth/store'; import { utcTimeZoneSentinel, uuidv4Sentinel } from '$lib/shared/utils/formatting'; export const networks = writable([]); -export const currentNetwork = writable(); export async function getNetworks() { const user = get(currentUser); if (user) { - const result = await api.request(`/networks`, networks, (networks) => networks, { + await api.request(`/networks`, networks, (networks) => networks, { method: 'GET' }); - - if (result && result.success && result.data) { - const current = get(networks).find((n) => n.is_default) || get(networks)[0]; - currentNetwork.set(current); - } } } @@ -55,6 +49,17 @@ export async function deleteNetwork(id: string) { return result; } +export async function bulkDeleteNetworks(ids: string[]) { + const result = await api.request( + `/networks/bulk-delete`, + networks, + (_, current) => current.filter((k) => !ids.includes(k.id)), + { method: 'POST', body: JSON.stringify(ids) } + ); + + return result; +} + export function createEmptyNetworkFormData(): Network { return { id: uuidv4Sentinel, diff --git a/ui/src/lib/features/organizations/types.ts b/ui/src/lib/features/organizations/types.ts index f814b404..8df7757b 100644 --- a/ui/src/lib/features/organizations/types.ts +++ b/ui/src/lib/features/organizations/types.ts @@ -63,6 +63,7 @@ export interface CreateInviteRequest { } export interface OrganizationInvite { + id: string; token: string; permissions: UserOrgPermissions; url: string; diff --git a/ui/src/lib/features/services/components/ServiceCard.svelte b/ui/src/lib/features/services/components/ServiceCard.svelte index a18480f9..3f87eea9 100644 --- a/ui/src/lib/features/services/components/ServiceCard.svelte +++ b/ui/src/lib/features/services/components/ServiceCard.svelte @@ -15,6 +15,8 @@ export let onDelete: (service: Service) => void = () => {}; export let onEdit: (service: Service) => void = () => {}; export let viewMode: 'card' | 'list'; + export let selected: boolean; + export let onSelectionChange: (selected: boolean) => void = () => {}; $: ports = host.ports.filter((p) => service.bindings @@ -86,19 +88,19 @@ ], actions: [ { - label: 'Edit Service', - icon: Edit, - class: 'btn-icon', - onClick: () => onEdit(service) - }, - { - label: 'Delete Service', + label: 'Delete', icon: Trash2, class: 'btn-icon-danger', onClick: () => onDelete(service) + }, + { + label: 'Edit', + icon: Edit, + class: 'btn-icon', + onClick: () => onEdit(service) } ] }; - + diff --git a/ui/src/lib/features/services/components/ServiceTab.svelte b/ui/src/lib/features/services/components/ServiceTab.svelte index 75347922..165ddfa1 100644 --- a/ui/src/lib/features/services/components/ServiceTab.svelte +++ b/ui/src/lib/features/services/components/ServiceTab.svelte @@ -4,6 +4,7 @@ import EmptyState from '$lib/shared/components/layout/EmptyState.svelte'; import { loadData } from '$lib/shared/utils/dataLoader'; import { + bulkDeleteServices, deleteService, getServices, services, @@ -56,6 +57,12 @@ } } + async function handleBulkDelete(ids: string[]) { + if (confirm(`Are you sure you want to delete ${ids.length} Services?`)) { + await bulkDeleteServices(ids); + } + } + // Define field configuration for the DataTableControls const serviceFields: FieldConfig[] = [ { @@ -139,13 +146,22 @@ items={$services} fields={serviceFields} storageKey="netvisor-services-table-state" + onBulkDelete={handleBulkDelete} + getItemId={(item) => item.id} > - {#snippet children(item: Service, viewMode: 'card' | 'list')} + {#snippet children( + item: Service, + viewMode: 'card' | 'list', + isSelected: boolean, + onSelectionChange: (selected: boolean) => void + )} {@const host = serviceHosts.get(item.id)} {#if host} ([]); @@ -27,6 +27,17 @@ export async function deleteService(id: string) { ); } +export async function bulkDeleteServices(ids: string[]) { + const result = await api.request( + `/services/bulk-delete`, + services, + (_, current) => current.filter((k) => !ids.includes(k.id)), + { method: 'POST', body: JSON.stringify(ids) } + ); + + return result; +} + // Update a service export async function updateService(data: Service) { console.log(1); @@ -48,7 +59,7 @@ export function createDefaultService( id: uuidv4Sentinel, created_at: utcTimeZoneSentinel, updated_at: utcTimeZoneSentinel, - network_id: get(currentNetwork).id, + network_id: get(networks)[0].id || '', host_id, is_gateway: false, service_definition: serviceType, diff --git a/ui/src/lib/features/subnets/components/SubnetCard.svelte b/ui/src/lib/features/subnets/components/SubnetCard.svelte index d0b87cea..106ac8d4 100644 --- a/ui/src/lib/features/subnets/components/SubnetCard.svelte +++ b/ui/src/lib/features/subnets/components/SubnetCard.svelte @@ -11,6 +11,8 @@ export let onEdit: (subnet: Subnet) => void = () => {}; export let onDelete: (subnet: Subnet) => void = () => {}; export let viewMode: 'card' | 'list'; + export let selected: boolean; + export let onSelectionChange: (selected: boolean) => void = () => {}; $: allServices = getServicesForSubnet(subnet); $: serviceLabelsStore = formatServiceLabels($allServices.map((s) => s.id)); @@ -78,13 +80,13 @@ actions: [ { - label: 'Delete Subnet', + label: 'Delete', icon: Trash2, class: 'btn-icon-danger', onClick: () => onDelete(subnet) }, { - label: 'Edit Subnet', + label: 'Edit', icon: Edit, onClick: () => onEdit(subnet) } @@ -92,4 +94,4 @@ }; - + diff --git a/ui/src/lib/features/subnets/components/SubnetTab.svelte b/ui/src/lib/features/subnets/components/SubnetTab.svelte index e9d94375..a7c72bf8 100644 --- a/ui/src/lib/features/subnets/components/SubnetTab.svelte +++ b/ui/src/lib/features/subnets/components/SubnetTab.svelte @@ -1,5 +1,12 @@ - diff --git a/ui/src/lib/features/topology/components/RefreshConflictsModal.svelte b/ui/src/lib/features/topology/components/RefreshConflictsModal.svelte new file mode 100644 index 00000000..6401a3c0 --- /dev/null +++ b/ui/src/lib/features/topology/components/RefreshConflictsModal.svelte @@ -0,0 +1,108 @@ + + + + + + + +
+ + + + + + + + +
+ + +
+ +
+ + +
+
+
+
diff --git a/ui/src/lib/features/topology/components/StateBadge.svelte b/ui/src/lib/features/topology/components/StateBadge.svelte new file mode 100644 index 00000000..5da21e7f --- /dev/null +++ b/ui/src/lib/features/topology/components/StateBadge.svelte @@ -0,0 +1,20 @@ + + + diff --git a/ui/src/lib/features/topology/components/TopologyDetailsForm.svelte b/ui/src/lib/features/topology/components/TopologyDetailsForm.svelte new file mode 100644 index 00000000..14f30084 --- /dev/null +++ b/ui/src/lib/features/topology/components/TopologyDetailsForm.svelte @@ -0,0 +1,62 @@ + + +
+ + +
+ +
+ + +
diff --git a/ui/src/lib/features/topology/components/TopologyModal.svelte b/ui/src/lib/features/topology/components/TopologyModal.svelte new file mode 100644 index 00000000..3456e873 --- /dev/null +++ b/ui/src/lib/features/topology/components/TopologyModal.svelte @@ -0,0 +1,76 @@ + + + + + + + +
+ +
+
diff --git a/ui/src/lib/features/topology/components/TopologyTab.svelte b/ui/src/lib/features/topology/components/TopologyTab.svelte index 5d511b82..1f28e562 100644 --- a/ui/src/lib/features/topology/components/TopologyTab.svelte +++ b/ui/src/lib/features/topology/components/TopologyTab.svelte @@ -1,39 +1,269 @@
- + + +
+ {#if $topology} + + {/if} + + {#if $topology && !$topology.is_locked} +
+
+ +
+
+ {/if} + + + {#if $topology && stateConfig} +
+
+ +
+ {#if $topology.is_locked && $topology.locked_at} +
+ {formatTimestamp($topology.locked_at)} + by {$users.find((u) => u.id == $topology.locked_by)?.email} +
+ {:else} + {formatTimestamp($topology.last_refreshed)} + {/if} +
+ {/if} + + {#if $topologies && $topology} +
+ +
+ {/if} + + {#if $topology} + + {/if} + + + + {#if $topology} + + {/if} +
+
+
+ + + {#if $topology && stateConfig} + {#if stateConfig.type === 'locked'} + + {:else if stateConfig.type === 'stale_conflicts'} + + {:else if stateConfig.type === 'stale_safe'} + + {/if} + {/if} + {#if $loading} - {:else} + {:else if $topology}
+ {:else} +
+ No topology selected. Create one to get started. +
{/if}
+ + + +{#if $topology} + (isRefreshConflictsOpen = false)} + /> +{/if} diff --git a/ui/src/lib/features/topology/components/panel/TopologyOptionsPanel.svelte b/ui/src/lib/features/topology/components/panel/TopologyOptionsPanel.svelte index d8bef326..b5beaf2c 100644 --- a/ui/src/lib/features/topology/components/panel/TopologyOptionsPanel.svelte +++ b/ui/src/lib/features/topology/components/panel/TopologyOptionsPanel.svelte @@ -61,9 +61,13 @@ {:else if activeTab === 'inspector'} {#if $selectedNode} - + {#key $selectedNode.id} + + {/key} {:else if $selectedEdge} - + {#key $selectedEdge.id} + + {/key} {:else}
Click on a node or edge to inspect it diff --git a/ui/src/lib/features/topology/components/panel/inspectors/InspectorNode.svelte b/ui/src/lib/features/topology/components/panel/inspectors/InspectorNode.svelte index e4e9a392..ae241f1d 100644 --- a/ui/src/lib/features/topology/components/panel/inspectors/InspectorNode.svelte +++ b/ui/src/lib/features/topology/components/panel/inspectors/InspectorNode.svelte @@ -1,7 +1,7 @@
- {#if $group && localGroup} + {#if group && localGroup} Group
- +
Edge Style -
- + {#if getTopologyStateInfo($topology).type != 'fresh'} + + {/if} +
+
Services - {#each $group.service_bindings as binding (binding)} + {#each group.service_bindings as binding (binding)} {@const bindingService = get(getServiceForBinding(binding))} {@const bindingHost = bindingService ? getHostFromId(bindingService.id) : null} {#if bindingService && bindingHost} diff --git a/ui/src/lib/features/topology/components/panel/inspectors/edges/InspectorEdgeHostVirtualization.svelte b/ui/src/lib/features/topology/components/panel/inspectors/edges/InspectorEdgeHostVirtualization.svelte index 6ed9ca58..37ae2ddb 100644 --- a/ui/src/lib/features/topology/components/panel/inspectors/edges/InspectorEdgeHostVirtualization.svelte +++ b/ui/src/lib/features/topology/components/panel/inspectors/edges/InspectorEdgeHostVirtualization.svelte @@ -1,33 +1,34 @@
- {#if $vmService} + {#if vmService} VM Service
{/if} - {#if $hypervisorHost} + {#if hypervisorHost} Hypervisor Host
- +
{/if}
diff --git a/ui/src/lib/features/topology/components/panel/inspectors/edges/InspectorEdgeInterface.svelte b/ui/src/lib/features/topology/components/panel/inspectors/edges/InspectorEdgeInterface.svelte index ee80111c..28bef32d 100644 --- a/ui/src/lib/features/topology/components/panel/inspectors/edges/InspectorEdgeInterface.svelte +++ b/ui/src/lib/features/topology/components/panel/inspectors/edges/InspectorEdgeInterface.svelte @@ -1,20 +1,16 @@
diff --git a/ui/src/lib/features/topology/components/panel/inspectors/edges/InspectorEdgeServiceVirtualization.svelte b/ui/src/lib/features/topology/components/panel/inspectors/edges/InspectorEdgeServiceVirtualization.svelte index 68e36a4d..b9cf5deb 100644 --- a/ui/src/lib/features/topology/components/panel/inspectors/edges/InspectorEdgeServiceVirtualization.svelte +++ b/ui/src/lib/features/topology/components/panel/inspectors/edges/InspectorEdgeServiceVirtualization.svelte @@ -1,12 +1,11 @@
- {#if $containerizingHost} + {#if containerizingHost} Docker Host
- +
{/if} - {#if $containerizingService} + {#if containerizingService} Docker Service
diff --git a/ui/src/lib/features/topology/components/panel/inspectors/edges/nodes/InspectorInterfaceNode.svelte b/ui/src/lib/features/topology/components/panel/inspectors/nodes/InspectorInterfaceNode.svelte similarity index 80% rename from ui/src/lib/features/topology/components/panel/inspectors/edges/nodes/InspectorInterfaceNode.svelte rename to ui/src/lib/features/topology/components/panel/inspectors/nodes/InspectorInterfaceNode.svelte index da7f0a80..f5a9d982 100644 --- a/ui/src/lib/features/topology/components/panel/inspectors/edges/nodes/InspectorInterfaceNode.svelte +++ b/ui/src/lib/features/topology/components/panel/inspectors/nodes/InspectorInterfaceNode.svelte @@ -1,36 +1,36 @@ diff --git a/ui/src/lib/features/topology/components/panel/inspectors/edges/nodes/InspectorSubnetNode.svelte b/ui/src/lib/features/topology/components/panel/inspectors/nodes/InspectorSubnetNode.svelte similarity index 79% rename from ui/src/lib/features/topology/components/panel/inspectors/edges/nodes/InspectorSubnetNode.svelte rename to ui/src/lib/features/topology/components/panel/inspectors/nodes/InspectorSubnetNode.svelte index 75771679..034d69e2 100644 --- a/ui/src/lib/features/topology/components/panel/inspectors/edges/nodes/InspectorSubnetNode.svelte +++ b/ui/src/lib/features/topology/components/panel/inspectors/nodes/InspectorSubnetNode.svelte @@ -1,13 +1,12 @@
diff --git a/ui/src/lib/features/topology/components/panel/options/OptionsContent.svelte b/ui/src/lib/features/topology/components/panel/options/OptionsContent.svelte index 0fad857e..2ca26ff3 100644 --- a/ui/src/lib/features/topology/components/panel/options/OptionsContent.svelte +++ b/ui/src/lib/features/topology/components/panel/options/OptionsContent.svelte @@ -1,6 +1,5 @@ -{#if !hideEdge} +{#if isSelected} + + +{/if} + +{#if !hideEdge && !reconnecting} {#if useMultiColorDash} diff --git a/ui/src/lib/features/topology/components/InterfaceNode.svelte b/ui/src/lib/features/topology/components/visualization/InterfaceNode.svelte similarity index 58% rename from ui/src/lib/features/topology/components/InterfaceNode.svelte rename to ui/src/lib/features/topology/components/visualization/InterfaceNode.svelte index 27920b99..56d867ec 100644 --- a/ui/src/lib/features/topology/components/InterfaceNode.svelte +++ b/ui/src/lib/features/topology/components/visualization/InterfaceNode.svelte @@ -1,43 +1,40 @@ -{#if nodeData} +{#if nodeRenderData}
- {#if nodeData.headerText} + {#if nodeRenderData.headerText}
- {nodeData.headerText} + {nodeRenderData.headerText}
{/if} @@ -111,13 +126,13 @@ class="flex flex-col items-center justify-around px-3 py-2" style="flex: 1 1 0; min-height: 0;" > - {#if nodeData.showServices} + {#if nodeRenderData.showServices}
- {#each nodeData.services as service (service.id)} + {#each nodeRenderData.services as service (service.id)} {@const ServiceIcon = serviceDefinitions.getIconComponent(service.service_definition)}
- {#if !$topologyOptions.request_options.hide_ports && service.bindings.filter((b) => b.type == 'Port').length > 0} + {#if !$topologyOptions.request.hide_ports && service.bindings.filter((b) => b.type == 'Port').length > 0} {service.bindings .map((b) => { if ( - (b.interface_id == nodeData.interface_id || b.interface_id == null) && + (b.interface_id == nodeRenderData.interface_id || b.interface_id == null) && b.type == 'Port' ) { const port = get(getPortFromId(b.port_id)); @@ -160,30 +175,30 @@
- {nodeData.bodyText} + {nodeRenderData.bodyText}
{/if}
- {#if nodeData.footerText} + {#if nodeRenderData.footerText}
- {nodeData.footerText} + {nodeRenderData.footerText}
{/if}
{/if} - - - - + + + + - - - - + + + + diff --git a/ui/src/lib/features/topology/components/SubnetNode.svelte b/ui/src/lib/features/topology/components/visualization/SubnetNode.svelte similarity index 83% rename from ui/src/lib/features/topology/components/SubnetNode.svelte rename to ui/src/lib/features/topology/components/visualization/SubnetNode.svelte index 23eee2d5..0630017c 100644 --- a/ui/src/lib/features/topology/components/SubnetNode.svelte +++ b/ui/src/lib/features/topology/components/visualization/SubnetNode.svelte @@ -1,21 +1,28 @@ {#if subnetRenderData} @@ -89,9 +113,10 @@ {/if}
- {#if resizeHandleZoomLevel && !$topologyOptions.hide_resize_handles} + {#if resizeHandleZoomLevel && !$topologyOptions.local.hide_resize_handles} { - await loadTopologyData(); - }); - $effect(() => { - if ($topology?.edges || $topology?.nodes) { + if ($topology && ($topology.edges || $topology.nodes)) { void loadTopologyData(); } }); @@ -57,9 +59,10 @@ void $selectedNode; void $selectedEdge; - if ($topology?.edges) { + if ($topology && ($topology.edges || $topology.nodes)) { const currentEdges = get(edges); - updateConnectedNodes($selectedNode, $selectedEdge, currentEdges); + const currentNodes = get(nodes); + updateConnectedNodes($selectedNode, $selectedEdge, currentEdges, currentNodes); // Update edge animated state based on selection const updatedEdges = currentEdges.map((edge) => { @@ -86,22 +89,24 @@ async function loadTopologyData() { try { - if ($topology?.nodes && $topology?.edges) { + if ($topology && ($topology.edges || $topology.nodes)) { // Create nodes FIRST - const allNodes: Node[] = $topology.nodes.map((node): Node => { - return { - id: node.id, - type: node.node_type, - position: { x: node.position.x, y: node.position.y }, - width: node.size.x, - height: node.size.y, - expandParent: true, - deletable: false, - parentId: node.node_type == 'InterfaceNode' ? node.subnet_id : undefined, - extent: node.node_type == 'InterfaceNode' ? 'parent' : undefined, - data: node - }; - }); + const allNodes: Node[] = $topology.nodes.map((node) => ({ + id: node.id, + type: node.node_type, + position: { x: node.position.x, y: node.position.y }, + width: node.size.x, + height: node.size.y, + expandParent: true, + deletable: false, + parentId: node.node_type == 'InterfaceNode' ? node.subnet_id : undefined, + extent: node.node_type == 'InterfaceNode' ? 'parent' : undefined, + data: node + })); + + // Save current edge animated states before clearing + const currentEdges = get(edges); + const animatedStates = new Map(currentEdges.map((edge) => [edge.id, edge.animated])); // Clear edges FIRST edges.set([]); @@ -118,10 +123,8 @@ // Create edges with markers const flowEdges: Edge[] = $topology.edges - .filter( - ([, , edge]: [number, number, TopologyEdge]) => edge.edge_type != 'HostVirtualization' - ) - .map(([, , edge]: [number, number, TopologyEdge], index: number): Edge => { + .filter((edge) => edge.edge_type != 'HostVirtualization') + .map((edge: TopologyEdge, index: number) => { const edgeType = edge.edge_type as string; const edgeMetadata = edgeTypes.getMetadata(edgeType); const edgeColorHelper = edgeTypes.getColorHelper(edgeType); @@ -139,6 +142,8 @@ color: edgeColorHelper.rgb } as EdgeMarkerType); + const edgeId = `edge-${index}`; + return { id: `edge-${index}`, source: edge.source, @@ -150,7 +155,7 @@ type: 'custom', label: edge.label, data: { ...edge, edgeIndex: index }, - animated: false, + animated: animatedStates.get(edgeId) ?? false, interactionWidth: 50 }; }); @@ -162,6 +167,44 @@ } } + async function onNodeDragEnd({ + targetNode + }: { + targetNode: Node | null; + nodes: Node[]; + event: MouseEvent | TouchEvent; + }) { + let movedNode = $topology.nodes.find((node) => node.id == targetNode?.id); + if (movedNode && targetNode && targetNode.position) { + movedNode.position.x = targetNode?.position.x; + movedNode.position.y = targetNode?.position.y; + await updateTopology($topology); + } + } + + async function onReconnect(edge: Edge, newConnection: Connection) { + const edgeData = edge.data as TopologyEdge; + + if ($selectedEdge && edge.id === $selectedEdge.id) { + let topologyEdge = $topology.edges.find((e) => e.id == edgeData.id); + if ( + topologyEdge && + newConnection.source == topologyEdge.source && + newConnection.target == topologyEdge.target && + newConnection.sourceHandle && + newConnection.targetHandle + ) { + topologyEdge.source_handle = newConnection.sourceHandle as EdgeHandle; + topologyEdge.target_handle = newConnection.targetHandle as EdgeHandle; + $topology = { + ...$topology, + edges: [...$topology.edges] + }; + await updateTopology($topology); + } + } + } + function onNodeClick({ node }: { node: Node; event: MouseEvent | TouchEvent }) { selectedNode.set(node); selectedEdge.set(null); @@ -209,12 +252,14 @@ onnodeclick={onNodeClick} onedgepointerenter={hoveredEdge} onedgepointerleave={hoveredEdge} - fitView + onnodedragstop={onNodeDragEnd} + onreconnect={onReconnect} + fitView={true} minZoom={0.1} noPanClass="nopan" snapGrid={[25, 25]} nodesDraggable={true} - nodesConnectable={false} + nodesConnectable={true} elementsSelectable={true} > @@ -222,6 +267,7 @@ diff --git a/ui/src/lib/features/topology/interactions.ts b/ui/src/lib/features/topology/interactions.ts index b8b26e9c..b92a6345 100644 --- a/ui/src/lib/features/topology/interactions.ts +++ b/ui/src/lib/features/topology/interactions.ts @@ -2,7 +2,7 @@ import { writable, get } from 'svelte/store'; import type { Edge } from '@xyflow/svelte'; import type { Node } from '@xyflow/svelte'; import { edgeTypes, subnetTypes } from '$lib/shared/stores/metadata'; -import type { TopologyEdge } from './types/base'; +import type { TopologyEdge, TopologyNode } from './types/base'; import { getInterfacesOnSubnet, getSubnetFromId } from '../subnets/store'; import { getHostFromInterfaceId } from '../hosts/store'; @@ -43,13 +43,24 @@ function getVirtualizedContainerNodes(dockerHostInterfaceId: string): Set(); // If a node is selected if (selectedNode) { connected.add(selectedNode.id); + const nodeData = selectedNode.data as TopologyNode; + + if (nodeData.node_type == 'SubnetNode') { + allNodes.forEach((n) => { + const nd = n.data as TopologyNode; + if (nd.node_type == 'InterfaceNode' && nd.subnet_id == nodeData.id) { + connected.add(nd.id); + } + }); + } for (const edge of allEdges) { const edgeData = edge.data as TopologyEdge; diff --git a/ui/src/lib/features/topology/sse.ts b/ui/src/lib/features/topology/sse.ts new file mode 100644 index 00000000..25e0c0dd --- /dev/null +++ b/ui/src/lib/features/topology/sse.ts @@ -0,0 +1,90 @@ +import { BaseSSEManager, type SSEConfig } from '$lib/shared/utils/sse'; +import { topologies, topology } from './store'; +import { get } from 'svelte/store'; +import type { Topology } from './types/base'; + +class TopologySSEManager extends BaseSSEManager { + private stalenessTimers: Map> = new Map(); + private readonly DEBOUNCE_MS = 300; + + protected createConfig(): SSEConfig { + return { + url: '/api/topology/stream', + onMessage: (update) => { + // If the update says it's NOT stale, apply immediately (it's a full refresh) + if (!update.is_stale) { + this.applyFullUpdate(update); + return; + } + + // For staleness updates, debounce them + const existingTimer = this.stalenessTimers.get(update.id); + if (existingTimer) { + clearTimeout(existingTimer); + } + + const timer = setTimeout(() => { + this.applyPartialUpdate(update.id, { + removed_groups: update.removed_groups, + removed_hosts: update.removed_hosts, + removed_services: update.removed_services, + removed_subnets: update.removed_subnets, + is_stale: update.is_stale, + options: update.options + }); + this.stalenessTimers.delete(update.id); + }, this.DEBOUNCE_MS); + + this.stalenessTimers.set(update.id, timer); + }, + onError: (error) => { + console.error('Topology SSE error:', error); + }, + onOpen: () => { + console.log('Topology SSE connected'); + } + }; + } + + private applyFullUpdate(update: Topology) { + topologies.update((topos) => { + return topos.map((topo) => { + if (topo.id === update.id) { + return update; + } + return topo; + }); + }); + + const currentTopology = get(topology); + if (currentTopology && currentTopology.id === update.id) { + topology.set(update); + } + } + + private applyPartialUpdate(topologyId: string, updates: Partial) { + topologies.update((topos) => { + return topos.map((topo) => { + if (topo.id === topologyId) { + return { + ...topo, + ...updates + }; + } + return topo; + }); + }); + + const currentTopology = get(topology); + if (currentTopology && currentTopology.id === topologyId) { + topology.update((topo) => { + return { + ...topo, + ...updates + }; + }); + } + } +} + +export const topologySSEManager = new TopologySSEManager(); diff --git a/ui/src/lib/features/topology/state.ts b/ui/src/lib/features/topology/state.ts new file mode 100644 index 00000000..00282852 --- /dev/null +++ b/ui/src/lib/features/topology/state.ts @@ -0,0 +1,108 @@ +import { Lock, RefreshCcw } from 'lucide-svelte'; +import type { Topology } from './types/base'; +import type { IconComponent } from '$lib/shared/utils/types'; + +export type TopologyStateType = 'locked' | 'fresh' | 'stale_safe' | 'stale_conflicts'; + +export interface TopologyStateInfo { + type: TopologyStateType; + icon: IconComponent; + hoverIcon?: IconComponent; + color: 'blue' | 'green' | 'yellow' | 'red'; + class: string; + label: string; + buttonText: string; + hoverLabel?: string; +} + +export interface TopologyStateConfig extends TopologyStateInfo { + action: (() => void) | null; +} + +/** + * Determine the state info for a topology (without actions) + * This can be used in displays, lists, etc. + */ +export function getTopologyStateInfo(topology: Topology): TopologyStateInfo { + // Locked state + if (topology.is_locked) { + return { + type: 'locked', + icon: Lock, + color: 'blue', + class: 'btn-info', + buttonText: 'Locked', + label: 'Locked' + }; + } + + // Fresh state + if (!topology.is_stale) { + return { + type: 'fresh', + icon: RefreshCcw, + class: 'btn-secondary', + color: 'green', + buttonText: 'Rebuild', + label: 'Up to date' + }; + } + + // Check for conflicts + const hasConflicts = + topology.removed_hosts.length > 0 || + topology.removed_services.length > 0 || + topology.removed_subnets.length > 0 || + topology.removed_groups.length > 0; + + // Stale with conflicts + if (hasConflicts) { + return { + type: 'stale_conflicts', + icon: RefreshCcw, + color: 'red', + class: 'btn-danger', + buttonText: 'Rebuild', + label: 'Conflicts' + }; + } + + // Stale without conflicts + return { + type: 'stale_safe', + icon: RefreshCcw, + color: 'yellow', + class: 'btn-warning', + buttonText: 'Rebuild', + label: 'Stale' + }; +} + +/** + * Get full topology state config with actions + * This is used in the main topology page where actions are needed + */ +export function getTopologyState( + topology: Topology, + handlers: { + onRefresh: () => void; + onUnlock: () => void; + onReset: () => void; + onLock: () => void; + } +): TopologyStateConfig { + const stateInfo = getTopologyStateInfo(topology); + + // Map state types to actions + const actionMap: Record void) | null> = { + locked: handlers.onUnlock, + fresh: handlers.onReset, + stale_safe: handlers.onRefresh, + stale_conflicts: handlers.onRefresh + }; + + return { + ...stateInfo, + action: actionMap[stateInfo.type] + }; +} diff --git a/ui/src/lib/features/topology/store.ts b/ui/src/lib/features/topology/store.ts index 29538b57..13058966 100644 --- a/ui/src/lib/features/topology/store.ts +++ b/ui/src/lib/features/topology/store.ts @@ -1,10 +1,19 @@ import { get, writable } from 'svelte/store'; import { api } from '../../shared/utils/api'; import { type Edge, type Node } from '@xyflow/svelte'; -import { EdgeHandle, type TopologyResponse, type TopologyOptions } from './types/base'; +import { type Topology, type TopologyOptions } from './types/base'; import { networks } from '../networks/store'; import deepmerge from 'deepmerge'; import { browser } from '$app/environment'; +import { utcTimeZoneSentinel, uuidv4Sentinel } from '$lib/shared/utils/formatting'; + +let initialized = false; +let topologyInitialized = false; +let lastTopologyId = ''; + +export const topologies = writable([]); +export const topology = writable(); +export const selectedNetwork = writable(''); export const selectedNode = writable(null); export const selectedEdge = writable(null); @@ -14,66 +23,86 @@ const EXPANDED_STORAGE_KEY = 'netvisor_topology_options_expanded_state'; // Default options const defaultOptions: TopologyOptions = { - left_zone_title: 'Infrastructure', - hide_edge_types: [], - no_fade_edges: false, - hide_resize_handles: false, - request_options: { + local: { + left_zone_title: 'Infrastructure', + hide_edge_types: [], + no_fade_edges: false, + hide_resize_handles: false + }, + request: { group_docker_bridges_by_host: true, hide_ports: false, hide_vm_title_on_docker_container: false, show_gateway_in_left_zone: true, left_zone_service_categories: ['DNS', 'ReverseProxy'], - hide_service_categories: [], - network_ids: [] + hide_service_categories: [] } }; -export const topology = writable(); export const topologyOptions = writable(loadOptionsFromStorage()); export const optionsPanelExpanded = writable(loadExpandedFromStorage()); -// Initialize network_ids with the first network when networks are loaded -let networksInitialized = false; - -if (browser) { - networks.subscribe(($networks) => { - if (!networksInitialized && $networks.length > 0) { - networksInitialized = true; - topologyOptions.update((opts) => { - // Only set default if network_ids is empty - if (opts.request_options.network_ids.length === 0 && $networks[0]) { - opts.request_options.network_ids = [$networks[0].id]; - } - return opts; - }); - } - }); +function initializeSubscriptions() { + if (initialized) { + return; + } - let lastRequestOptions = JSON.stringify(get(topologyOptions).request_options); + initialized = true; - // Subscribe to options changes and save to localStorage - if (typeof window !== 'undefined') { - topologyOptions.subscribe((options) => { - saveOptionsToStorage(options); + if (browser) { + topologies.subscribe(($topologies) => { + if (!topologyInitialized && $topologies.length > 0) { + const currentTopology = $topologies[0]; + topology.set(currentTopology); + topologyOptions.set(currentTopology.options); + lastTopologyId = currentTopology.id; + topologyInitialized = true; + } }); - optionsPanelExpanded.subscribe((expanded) => { - saveExpandedToStorage(expanded); - }); + if (typeof window !== 'undefined') { + let optionsUpdateTimeout: ReturnType | null = null; - topologyOptions.subscribe(($options) => { - const current = JSON.stringify($options.request_options); - if (current !== lastRequestOptions) { - lastRequestOptions = current; - if (networksInitialized) getTopology(); - } - }); + topologyOptions.subscribe(async (options) => { + saveOptionsToStorage(options); + + // Clear any pending timeout + if (optionsUpdateTimeout) { + clearTimeout(optionsUpdateTimeout); + } + + // Debounce the API call + optionsUpdateTimeout = setTimeout(async () => { + const currentTopology = get(topology); + if (currentTopology) { + const updatedTopology = { + ...currentTopology, + options: options + }; + await updateTopology(updatedTopology); + } + }, 500); + }); + + topology.subscribe((topology) => { + if (topology && lastTopologyId != topology.id) { + lastTopologyId = topology.id; + topologyOptions.set(topology.options); + } + }); + + optionsPanelExpanded.subscribe((expanded) => { + saveExpandedToStorage(expanded); + }); + } } } +// Initialize immediately +initializeSubscriptions(); + export function resetTopologyOptions(): void { - networksInitialized = false; + // networksInitialized = false; topologyOptions.set(structuredClone(defaultOptions)); if (browser) { localStorage.removeItem(OPTIONS_STORAGE_KEY); @@ -139,18 +168,128 @@ function saveExpandedToStorage(expanded: boolean): void { } } -export async function getTopology() { - const options = get(topologyOptions); - return await api.request('/topology', topology, (topology) => topology, { +export async function refreshTopology(data: Topology) { + // Updated topology returns through SSE + await api.request( + `/topology/${data.id}/refresh`, + topologies, + (updated, current) => current.map((t) => (t.id == updated.id ? updated : t)), + { + method: 'POST', + body: JSON.stringify(data) + } + ); +} + +export async function lockTopology(data: Topology) { + const result = await api.request( + `/topology/${data.id}/lock`, + topologies, + (updated, current) => current.map((t) => (t.id == updated.id ? updated : t)), + { + method: 'POST', + body: JSON.stringify(data) + } + ); + + if (result && result.success && result.data && get(topology)?.id === data.id) { + topology.set(result.data); + } + + return result; +} + +export async function unlockTopology(data: Topology) { + const result = await api.request( + `/topology/${data.id}/unlock`, + topologies, + (updated, current) => current.map((t) => (t.id == updated.id ? updated : t)), + { + method: 'POST', + body: JSON.stringify(data) + } + ); + + if (result && result.success && result.data && get(topology)?.id === data.id) { + topology.set(result.data); + } + + return result; +} + +export async function getTopologies() { + await api.request('/topology', topologies, (topologies) => topologies, { + method: 'GET' + }); +} + +export async function rebuildTopology(data: Topology) { + // Updated topology returns through SSE + await api.request(`/topology/${data.id}/rebuild`, null, null, { method: 'POST', - body: JSON.stringify(options.request_options) + body: JSON.stringify(data) + }); +} + +export async function updateTopology(data: Topology) { + // Updated topology returns through SSE + await api.request(`/topology/${data.id}`, null, null, { + method: 'PUT', + body: JSON.stringify(data) }); } -// Cycle through anchor positions in logical order -export function getNextHandle(currentHandle: EdgeHandle): EdgeHandle { - const cycle = [EdgeHandle.Top, EdgeHandle.Right, EdgeHandle.Bottom, EdgeHandle.Left]; - const currentIndex = cycle.indexOf(currentHandle); - const nextIndex = (currentIndex + 1) % cycle.length; - return cycle[nextIndex]; +export async function createTopology(data: Topology) { + const result = await api.request( + `/topology`, + topologies, + (newTopology, current) => [...current, newTopology], + { method: 'POST', body: JSON.stringify(data) } + ); + + if (result && result.data && result.success) { + topology.set(result.data); + } + + return result; +} + +export async function deleteTopology(id: string) { + const result = await api.request( + `/topology/${id}`, + topologies, + (_, current) => current.filter((t) => t.id != id), + { method: 'DELETE' } + ); + + if (result && result.data && result.success && get(topologies).length > 0) { + topology.set(get(topologies)[0]); + } +} + +export function createEmptyTopologyFormData(): Topology { + return { + id: uuidv4Sentinel, + created_at: utcTimeZoneSentinel, + updated_at: utcTimeZoneSentinel, + name: '', + network_id: get(networks)[0]?.id || '', + edges: [], + nodes: [], + options: structuredClone(defaultOptions), + hosts: [], + services: [], + subnets: [], + groups: [], + is_stale: false, + last_refreshed: utcTimeZoneSentinel, + is_locked: false, + removed_groups: [], + removed_hosts: [], + removed_services: [], + removed_subnets: [], + locked_at: null, + locked_by: null, + parent_id: null + }; } diff --git a/ui/src/lib/features/topology/types/base.ts b/ui/src/lib/features/topology/types/base.ts index 0a7e6f1e..61cfa1fe 100644 --- a/ui/src/lib/features/topology/types/base.ts +++ b/ui/src/lib/features/topology/types/base.ts @@ -1,7 +1,35 @@ +import type { Group } from '$lib/features/groups/types/base'; +import type { Host } from '$lib/features/hosts/types/base'; import type { Service } from '$lib/features/services/types/base'; +import type { Subnet } from '$lib/features/subnets/types/base'; import type { ColorStyle } from '$lib/shared/utils/styling'; import type { IconComponent } from '$lib/shared/utils/types'; +export interface Topology { + edges: TopologyEdge[]; + nodes: TopologyNode[]; + options: TopologyOptions; + name: string; + id: string; + created_at: string; + updated_at: string; + network_id: string; + hosts: Host[]; + subnets: Subnet[]; + groups: Group[]; + services: Service[]; + is_stale: boolean; + last_refreshed: string; + is_locked: boolean; + locked_at: string | null; + locked_by: string | null; + removed_hosts: string[]; + removed_services: string[]; + removed_subnets: string[]; + removed_groups: string[]; + parent_id: string | null; +} + export interface NodeBase { id: string; node_type: string; @@ -10,17 +38,22 @@ export interface NodeBase { header: string | null; } -type NodeType = - | { - node_type: 'InterfaceNode'; - subnet_id: string; - host_id: string; - interface_id: string; - is_infra: boolean; - } - | { node_type: 'SubnetNode'; infra_width: number }; +type NodeType = InterfaceNode | SubnetNode; + +export interface InterfaceNode extends Record { + node_type: 'InterfaceNode'; + subnet_id: string; + host_id: string; + interface_id: string; + is_infra: boolean; +} + +export interface SubnetNode extends Record { + node_type: 'SubnetNode'; + infra_width: number; +} -type TopologyNode = NodeBase & NodeType & Record; +export type TopologyNode = NodeBase & NodeType & Record; export interface NodeRenderData { headerText: string | null; @@ -39,7 +72,8 @@ export interface SubnetRenderData { colorHelper: ColorStyle; } -interface TopologyEdgeBase extends Record { +interface EdgeBase extends Record { + id: string; source: string; label: string; target: string; @@ -49,11 +83,11 @@ interface TopologyEdgeBase extends Record { } export type TopologyEdge = - | (TopologyEdgeBase & RequestPathEdge) - | (TopologyEdgeBase & HubAndSpokeEdge) - | (TopologyEdgeBase & InterfaceEdge) - | (TopologyEdgeBase & ServiceVirtualizationEdge) - | (TopologyEdgeBase & HostVirtualizationEdge); + | (EdgeBase & RequestPathEdge) + | (EdgeBase & HubAndSpokeEdge) + | (EdgeBase & InterfaceEdge) + | (EdgeBase & ServiceVirtualizationEdge) + | (EdgeBase & HostVirtualizationEdge); export interface RequestPathEdge { edge_type: 'RequestPath'; @@ -85,13 +119,6 @@ export interface HostVirtualizationEdge { vm_service_id: string; } -export interface TopologyResponse { - edge_property: string; - edges: Array<[number, number, TopologyEdge]>; - node_holes: unknown[]; - nodes: TopologyNode[]; -} - export enum EdgeHandle { Top = 'Top', Right = 'Right', @@ -99,20 +126,23 @@ export enum EdgeHandle { Left = 'Left' } +export interface TopologyOptions { + local: TopologyLocalOptions; + request: TopologyRequestOptions; +} + +export interface TopologyLocalOptions { + left_zone_title: string; + no_fade_edges: boolean; + hide_resize_handles: boolean; + hide_edge_types: string[]; +} + export interface TopologyRequestOptions { group_docker_bridges_by_host: boolean; hide_vm_title_on_docker_container: boolean; hide_ports: boolean; - network_ids: string[]; show_gateway_in_left_zone: boolean; left_zone_service_categories: string[]; hide_service_categories: string[]; } - -export interface TopologyOptions { - left_zone_title: string; - no_fade_edges: boolean; - hide_resize_handles: boolean; - hide_edge_types: string[]; - request_options: TopologyRequestOptions; -} diff --git a/ui/src/lib/features/users/components/InviteCard.svelte b/ui/src/lib/features/users/components/InviteCard.svelte index e326dd31..057afd7f 100644 --- a/ui/src/lib/features/users/components/InviteCard.svelte +++ b/ui/src/lib/features/users/components/InviteCard.svelte @@ -48,7 +48,7 @@ ...(canManage ? [ { - label: 'Revoke Invite', + label: 'Revoke', icon: UserX, class: 'btn-icon-danger', onClick: () => handleRevokeInvite() @@ -59,4 +59,4 @@ }; - + diff --git a/ui/src/lib/features/users/components/UserCard.svelte b/ui/src/lib/features/users/components/UserCard.svelte index 3b0b41a6..d704595b 100644 --- a/ui/src/lib/features/users/components/UserCard.svelte +++ b/ui/src/lib/features/users/components/UserCard.svelte @@ -7,7 +7,17 @@ import { currentUser } from '$lib/features/auth/store'; import { deleteUser } from '../store'; - let { user, viewMode }: { user: User; viewMode: 'card' | 'list' } = $props(); + let { + user, + viewMode, + selected, + onSelectionChange + }: { + user: User; + viewMode: 'card' | 'list'; + selected: boolean; + onSelectionChange: (selected: boolean) => void; + } = $props(); // Force Svelte to track metadata reactivity $effect(() => { @@ -72,4 +82,4 @@ }); - + diff --git a/ui/src/lib/features/users/components/UserTab.svelte b/ui/src/lib/features/users/components/UserTab.svelte index 9fb9fd6d..c3dfb0fa 100644 --- a/ui/src/lib/features/users/components/UserTab.svelte +++ b/ui/src/lib/features/users/components/UserTab.svelte @@ -1,5 +1,5 @@
- + + + {#if canInviteUsers} + + {/if} + + {#if $loading} @@ -110,10 +115,21 @@ {:else} - - {#snippet children(item: UserOrInvite, viewMode: 'card' | 'list')} + item.id} + > + {#snippet children( + item: UserOrInvite, + viewMode: 'card' | 'list', + isSelected: boolean, + onSelectionChange: (selected: boolean) => void + )} {#if isUser(item)} - + {:else} {/if} diff --git a/ui/src/lib/features/users/store.ts b/ui/src/lib/features/users/store.ts index 2a9354b5..b66956ea 100644 --- a/ui/src/lib/features/users/store.ts +++ b/ui/src/lib/features/users/store.ts @@ -19,3 +19,14 @@ export async function deleteUser(id: string) { { method: 'DELETE' } ); } + +export async function bulkDeleteUsers(ids: string[]) { + const result = await api.request( + `/users/bulk-delete`, + users, + (_, current) => current.filter((k) => !ids.includes(k.id)), + { method: 'POST', body: JSON.stringify(ids) } + ); + + return result; +} diff --git a/ui/src/lib/features/users/types.ts b/ui/src/lib/features/users/types.ts index b2b6bbeb..e70b71d9 100644 --- a/ui/src/lib/features/users/types.ts +++ b/ui/src/lib/features/users/types.ts @@ -15,13 +15,15 @@ export interface User { export type UserOrgPermissions = 'Owner' | 'Admin' | 'Member' | 'Visualizer' | 'None'; export type UserOrInvite = - | { type: 'user'; data: User } - | { type: 'invite'; data: OrganizationInvite }; + | { type: 'user'; data: User; id: string } + | { type: 'invite'; data: OrganizationInvite; id: string }; -export function isUser(item: UserOrInvite): item is { type: 'user'; data: User } { +export function isUser(item: UserOrInvite): item is { type: 'user'; data: User; id: string } { return item.type === 'user'; } -export function isInvite(item: UserOrInvite): item is { type: 'invite'; data: OrganizationInvite } { +export function isInvite( + item: UserOrInvite +): item is { type: 'invite'; data: OrganizationInvite; id: string } { return item.type === 'invite'; } diff --git a/ui/src/lib/shared/components/data/DataControls.svelte b/ui/src/lib/shared/components/data/DataControls.svelte index 47ec2468..56eae9d2 100644 --- a/ui/src/lib/shared/components/data/DataControls.svelte +++ b/ui/src/lib/shared/components/data/DataControls.svelte @@ -6,7 +6,10 @@ ChevronDown, ChevronUp, LayoutGrid, - List + List, + Trash2, + CheckSquare, + Square } from 'lucide-svelte'; import type { FieldConfig } from './types'; import { onMount, type Snippet } from 'svelte'; @@ -17,12 +20,16 @@ items = $bindable([]), fields = $bindable([]), storageKey = null, - children + onBulkDelete = null, + children, + getItemId }: { items: T[]; fields: FieldConfig[]; storageKey?: string | null; - children: Snippet<[T, 'card' | 'list']>; // Snippet that takes two arguments (the item and viewMode) + onBulkDelete?: ((ids: string[]) => Promise) | null; + children: Snippet<[T, 'card' | 'list', boolean, (selected: boolean) => void]>; + getItemId: (item: T) => string; } = $props(); // Search state @@ -58,13 +65,16 @@ // View mode state let viewMode = $state<'card' | 'list'>('card'); + // Bulk selection state (always enabled when onBulkDelete is provided) + let selectedIds = new SvelteSet(); + // Serializable version of state for localStorage interface SerializableState { searchQuery: string; filterState: { [key: string]: { type: 'string' | 'boolean'; - values: string[]; // Convert Set to Array for JSON + values: string[]; showTrue?: boolean; showFalse?: boolean; }; @@ -95,7 +105,7 @@ const saved = state.filterState[key]; restoredFilterState[key] = { ...saved, - values: new SvelteSet(saved.values) // Convert Array back to Set + values: new SvelteSet(saved.values) }; }); filterState = restoredFilterState; @@ -135,7 +145,7 @@ const filter = filterState[key]; serializableFilterState[key] = { ...filter, - values: Array.from(filter.values) // Convert Set to Array for JSON + values: Array.from(filter.values) }; }); @@ -185,7 +195,7 @@ const unsubscribe = $effect.root(() => { $effect(() => { if (storageKey) { - // Track all state that should trigger saves - void() prevents lint errors + // Track all state that should trigger saves void searchQuery; void filterState; void sortState; @@ -211,7 +221,6 @@ if (field.getValue) { return field.getValue(item); } - // Default: try to access the key directly // eslint-disable-next-line @typescript-eslint/no-explicit-any return (item as any)[field.key] ?? null; } @@ -423,6 +432,36 @@ selectedGroupField = null; } + // Select all visible items + function selectAll() { + processedItems.forEach((item) => { + const itemId = getItemId(item); + if (itemId) selectedIds.add(itemId); + }); + } + + // Deselect all items + function selectNone() { + selectedIds.clear(); + } + + // Handle bulk delete + async function handleBulkDelete() { + if (!onBulkDelete || selectedIds.size === 0) return; + + try { + await onBulkDelete(Array.from(selectedIds)); + selectedIds.clear(); + } catch (error) { + console.error('Bulk delete failed:', error); + } + } + + // Derived states + let allSelected = $derived( + processedItems.length > 0 && selectedIds.size === processedItems.length + ); + // Check if any filters are active let hasActiveFilters = $derived( fields.some((field) => { @@ -478,6 +517,22 @@ {/if} + + {#if onBulkDelete} + + {/if} + @@ -541,6 +596,28 @@ {/if}
+ + {#if onBulkDelete && selectedIds.size > 0} +
+
+ + {selectedIds.size} + {selectedIds.size === 1 ? 'item' : 'items'} selected + + +
+ +
+ {/if} + {#if showFilters}
@@ -645,7 +722,16 @@ : 'grid grid-cols-1 gap-4 md:grid-cols-2 lg:grid-cols-3'} > {#each groupItems as item (item)} - {@render children(item, viewMode)} + + {@const itemId = getItemId(item)} + {@const isSelected = selectedIds.has(itemId)} + {@render children(item, viewMode, isSelected, (selected) => { + if (selected) { + selectedIds.add(itemId); + } else { + selectedIds.delete(itemId); + } + })} {/each}
@@ -659,7 +745,15 @@ : 'grid grid-cols-1 gap-4 md:grid-cols-2 lg:grid-cols-3'} > {#each processedItems as item (item)} - {@render children(item, viewMode)} + {@const itemId = getItemId(item)} + {@const isSelected = selectedIds.has(itemId)} + {@render children(item, viewMode, isSelected, (selected) => { + if (selected) { + selectedIds.add(itemId); + } else { + selectedIds.delete(itemId); + } + })} {/each}
{/if} diff --git a/ui/src/lib/shared/components/data/EntityList.svelte b/ui/src/lib/shared/components/data/EntityList.svelte new file mode 100644 index 00000000..944cbf9c --- /dev/null +++ b/ui/src/lib/shared/components/data/EntityList.svelte @@ -0,0 +1,21 @@ + + +{#if items.length > 0} +
+ {#if title.length > 0} +

+ {title} ({items.length}) +

+ {/if} +
    + {#each items as item (item.id)} +
  • + • {item.name} +
  • + {/each} +
+
+{/if} diff --git a/ui/src/lib/shared/components/data/GenericCard.svelte b/ui/src/lib/shared/components/data/GenericCard.svelte index 4d3acaaa..d69c174b 100644 --- a/ui/src/lib/shared/components/data/GenericCard.svelte +++ b/ui/src/lib/shared/components/data/GenericCard.svelte @@ -13,8 +13,11 @@ iconColor?: string; actions?: CardAction[]; fields?: CardField[]; - children?: Snippet; // Optional additional content + children?: Snippet; viewMode?: 'card' | 'list'; + selected?: boolean; + onSelectionChange?: (selected: boolean) => void; + selectable?: boolean; } let { @@ -27,7 +30,10 @@ actions = [], fields = [], children, - viewMode = 'card' + viewMode = 'card', + selected = false, + selectable = true, + onSelectionChange = () => {} }: Props = $props(); // Configuration for list view @@ -38,52 +44,77 @@ function isArrayValue(value: string | any[]): value is any[] { return Array.isArray(value); } + + function handleCheckboxChange(e: Event) { + const target = e.target as HTMLInputElement; + onSelectionChange(target.checked); + }
+ + {#if selectable} +
+ e.stopPropagation()} + class="h-5 w-5 cursor-pointer rounded border-gray-600 bg-gray-700 text-blue-600 focus:ring-2 focus:ring-blue-500" + /> +
+ {/if} +
{#if Icon} {/if} -
- {#if link} - - {title} - - {:else} -

- {title} -

- {/if} +
+
+
+ {#if link} + + {title} + + {:else} +

+ {title} +

+ {/if} +
+ {#if status} +
+ +
+ {/if} +
{#if subtitle}

{subtitle}

{/if} - {#if status && viewMode == 'list'} -
- -
- {/if}
- {#if status && viewMode === 'card'} - - {/if}
@@ -185,6 +216,8 @@
{/if} + + {#if actions.length > 0}
- {#each actions as action (action.label)} + {#each actions as action, index (action.label)} + {@const isLeftEdge = index === 0} + {@const isRightEdge = index === actions.length - 1} {/each}
diff --git a/ui/src/lib/shared/components/data/GithubStars.svelte b/ui/src/lib/shared/components/data/GithubStars.svelte index e119df97..d34d5dff 100644 --- a/ui/src/lib/shared/components/data/GithubStars.svelte +++ b/ui/src/lib/shared/components/data/GithubStars.svelte @@ -20,6 +20,8 @@ method: 'GET' }); + console.log(response); + if (response) { error = false; } else { diff --git a/ui/src/lib/shared/components/feedback/BaseInlineFeedback.svelte b/ui/src/lib/shared/components/feedback/BaseInlineFeedback.svelte new file mode 100644 index 00000000..78ab4d76 --- /dev/null +++ b/ui/src/lib/shared/components/feedback/BaseInlineFeedback.svelte @@ -0,0 +1,61 @@ + + +{#if !dismissed} +
+
+ +
+

{title}

+ {#if body} +

{body}

+ {/if} +
+ {#if dismissableKey} + + {/if} +
+
+{/if} diff --git a/ui/src/lib/shared/components/feedback/InlineDanger.svelte b/ui/src/lib/shared/components/feedback/InlineDanger.svelte new file mode 100644 index 00000000..1f67f211 --- /dev/null +++ b/ui/src/lib/shared/components/feedback/InlineDanger.svelte @@ -0,0 +1,24 @@ + + + diff --git a/ui/src/lib/shared/components/feedback/InlineInfo.svelte b/ui/src/lib/shared/components/feedback/InlineInfo.svelte index 81084526..dfd7a279 100644 --- a/ui/src/lib/shared/components/feedback/InlineInfo.svelte +++ b/ui/src/lib/shared/components/feedback/InlineInfo.svelte @@ -1,18 +1,24 @@ -
-
- -
-

{title}

- {#if body} -

{body}

- {/if} -
-
-
+ diff --git a/ui/src/lib/shared/components/feedback/InlineWarning.svelte b/ui/src/lib/shared/components/feedback/InlineWarning.svelte index be23e5fb..eb2397f1 100644 --- a/ui/src/lib/shared/components/feedback/InlineWarning.svelte +++ b/ui/src/lib/shared/components/feedback/InlineWarning.svelte @@ -1,18 +1,24 @@ -
-
- -
-

{title}

- {#if body} -

{body}

- {/if} -
-
-
+ diff --git a/ui/src/lib/shared/components/forms/selection/ListSelectItem.svelte b/ui/src/lib/shared/components/forms/selection/ListSelectItem.svelte index ea44e677..66deef79 100644 --- a/ui/src/lib/shared/components/forms/selection/ListSelectItem.svelte +++ b/ui/src/lib/shared/components/forms/selection/ListSelectItem.svelte @@ -26,7 +26,7 @@
-
+
{displayComponent.getLabel(item, context)} diff --git a/ui/src/lib/shared/components/forms/selection/display/ServiceDisplay.svelte b/ui/src/lib/shared/components/forms/selection/display/ServiceDisplay.svelte index 85f6f12e..1e7e19e5 100644 --- a/ui/src/lib/shared/components/forms/selection/display/ServiceDisplay.svelte +++ b/ui/src/lib/shared/components/forms/selection/display/ServiceDisplay.svelte @@ -1,5 +1,5 @@ + + + + diff --git a/ui/src/lib/shared/components/forms/selection/display/VirtualizationManagerServiceDisplay.svelte b/ui/src/lib/shared/components/forms/selection/display/VirtualizationManagerServiceDisplay.svelte index 8cb2329e..db43a941 100644 --- a/ui/src/lib/shared/components/forms/selection/display/VirtualizationManagerServiceDisplay.svelte +++ b/ui/src/lib/shared/components/forms/selection/display/VirtualizationManagerServiceDisplay.svelte @@ -1,5 +1,5 @@
@@ -19,15 +9,6 @@

{subtitle}

- {#each buttons as button (button)} - {#if button.ButtonComponent} - - {:else} - - {/if} - {/each} +
diff --git a/ui/src/lib/shared/stores/metadata.ts b/ui/src/lib/shared/stores/metadata.ts index 3d402e11..e11b4b67 100644 --- a/ui/src/lib/shared/stores/metadata.ts +++ b/ui/src/lib/shared/stores/metadata.ts @@ -30,6 +30,7 @@ export interface MetadataRegistry { edge_types: TypeMetadata[]; group_types: TypeMetadata[]; entities: EntityMetadata[]; + concepts: EntityMetadata[]; ports: TypeMetadata[]; discovery_types: TypeMetadata[]; billing_plans: TypeMetadata[]; @@ -266,6 +267,7 @@ export const discoveryTypes = createTypeMetadataHelpers('discovery_types'); export const billingPlans = createTypeMetadataHelpers('billing_plans'); export const features = createTypeMetadataHelpers('features'); export const permissions = createTypeMetadataHelpers('permissions'); +export const concepts = createEntityMetadataHelpers('concepts'); export async function getMetadata() { await api.request('/metadata', metadata, (metadata) => metadata, { diff --git a/ui/src/lib/shared/utils/dataLoader.ts b/ui/src/lib/shared/utils/dataLoader.ts index 74529f19..0873f43b 100644 --- a/ui/src/lib/shared/utils/dataLoader.ts +++ b/ui/src/lib/shared/utils/dataLoader.ts @@ -8,7 +8,7 @@ interface LoadDataOptions { // eslint-disable-next-line @typescript-eslint/no-explicit-any export function loadData(loaders: (() => Promise)[], options: LoadDataOptions = {}) { - const { loadingDelay = 500 } = options; + const { loadingDelay = 750 } = options; // If loadingDelay is 0, start with loading = true immediately const loading = writable(loadingDelay === 0); diff --git a/ui/src/lib/shared/utils/formatting.ts b/ui/src/lib/shared/utils/formatting.ts index 1b63b678..6812079a 100644 --- a/ui/src/lib/shared/utils/formatting.ts +++ b/ui/src/lib/shared/utils/formatting.ts @@ -33,7 +33,6 @@ export function formatTimestamp(timestamp: string): string { day: 'numeric', hour: '2-digit', minute: '2-digit', - second: '2-digit', hour12: false }); } catch { diff --git a/ui/src/lib/shared/utils/sse.ts b/ui/src/lib/shared/utils/sse.ts index 71fb3748..89465b7a 100644 --- a/ui/src/lib/shared/utils/sse.ts +++ b/ui/src/lib/shared/utils/sse.ts @@ -70,3 +70,39 @@ export class SSEClient { return this.eventSource?.readyState === EventSource.OPEN; } } + +/** + * Base SSE manager class that handles connection lifecycle + * Extend this for specific SSE use cases + */ +export abstract class BaseSSEManager { + protected client: SSEClient | null = null; + + /** + * Create the SSE configuration for this manager + * Must be implemented by subclasses + */ + protected abstract createConfig(): SSEConfig; + + connect() { + // Don't create a new client if already connected + if (this.isConnected()) { + return; + } + + const config = this.createConfig(); + this.client = new SSEClient(config); + this.client.connect(); + } + + disconnect() { + if (this.client) { + this.client.disconnect(); + this.client = null; + } + } + + isConnected(): boolean { + return this.client?.isConnected() ?? false; + } +} diff --git a/ui/src/routes/+page.svelte b/ui/src/routes/+page.svelte index 3d946356..af66a352 100644 --- a/ui/src/routes/+page.svelte +++ b/ui/src/routes/+page.svelte @@ -9,19 +9,20 @@ import { getServices, services } from '$lib/features/services/store'; import { watchStores } from '$lib/shared/utils/storeWatcher'; import { getNetworks } from '$lib/features/networks/store'; - import { startDiscoverySSE } from '$lib/features/discovery/SSEStore'; + import { discoverySSEManager } from '$lib/features/discovery/sse'; import { isAuthenticated, isCheckingAuth } from '$lib/features/auth/store'; - import type { Component } from 'svelte'; import { getMetadata } from '$lib/shared/stores/metadata'; + import { topologySSEManager } from '$lib/features/topology/sse'; // Read hash immediately during script initialization, before onMount const initialHash = typeof window !== 'undefined' ? window.location.hash.substring(1) : ''; let activeTab = $state(initialHash || 'topology'); - let activeComponent = $state(null); let appInitialized = $state(false); let sidebarCollapsed = $state(false); let dataLoadingStarted = $state(false); + // eslint-disable-next-line @typescript-eslint/no-explicit-any + let allTabs = $state>([]); // Update URL hash when activeTab changes $effect(() => { @@ -62,7 +63,8 @@ }) ].flatMap((w) => w); - startDiscoverySSE(); + topologySSEManager.connect(); + discoverySSEManager.connect(); appInitialized = true; } @@ -87,6 +89,9 @@ unsub(); }); + topologySSEManager.disconnect(); + discoverySSEManager.disconnect(); + if (typeof window !== 'undefined') { window.removeEventListener('hashchange', handleHashChange); } @@ -97,7 +102,7 @@
- +
@@ -107,10 +112,12 @@ class:ml-64={!sidebarCollapsed} >
- {#if activeComponent} - {@const ActiveTab = activeComponent} - - {/if} + + {#each allTabs as tab (tab.id)} +
+ +
+ {/each}
diff --git a/ui/static/services.json b/ui/static/services.json index 841a6f7f..088cd288 100644 --- a/ui/static/services.json +++ b/ui/static/services.json @@ -5,12 +5,6 @@ "logo_url": "/logos/netvisor-logo.png", "name": "NetVisor Server API" }, - { - "description": "NetVisor Daemon API for network scanning", - "discovery_pattern": "Endpoint response body from :60073/api/health contains netvisor", - "logo_url": "/logos/netvisor-logo.png", - "name": "NetVisor Daemon API" - }, { "description": "User invitation and management system for Jellyfin, Plex, Emby etc", "discovery_pattern": "Endpoint response body from :5690/static/manifest.json contains Wizarr", @@ -55,7 +49,7 @@ }, { "description": "Community-supported document management system", - "discovery_pattern": "Endpoint response body from :8000/ contains Paperless-ngx project", + "discovery_pattern": "Endpoint response body from :8000/static/frontend/en-US/manifest.webmanifest contains Paperless-ngx", "logo_url": "https://cdn.jsdelivr.net/gh/homarr-labs/dashboard-icons/svg/paperless-ngx.svg", "name": "Paperless-NGX" }, @@ -407,6 +401,12 @@ "logo_url": "https://cdn.jsdelivr.net/gh/homarr-labs/dashboard-icons/svg/cooler-control.svg", "name": "CoolerControl" }, + { + "description": "APC Network-Connected UPS", + "discovery_pattern": "Endpoint response body from :80/ contains Schneider Electric", + "logo_url": "https://cdn.jsdelivr.net/gh/homarr-labs/dashboard-icons/svg/apc.svg", + "name": "APC" + }, { "description": "A single pane of glass for managing clustered & non-clustered Proxmox nodes", "discovery_pattern": "Endpoint response body from :8443/ contains pdm-ui_bundle.js", @@ -689,6 +689,12 @@ "logo_url": "https://cdn.jsdelivr.net/gh/homarr-labs/dashboard-icons/svg/mariadb.svg", "name": "MariaDB" }, + { + "description": "Time series database", + "discovery_pattern": "8086/tcp is open", + "logo_url": "https://cdn.jsdelivr.net/gh/homarr-labs/dashboard-icons/svg/influxdb.svg", + "name": "InfluxDB" + }, { "description": "NoSQL document database", "discovery_pattern": "Endpoint response body from :5984/ contains couchdb", @@ -743,6 +749,12 @@ "logo_url": "https://cdn.jsdelivr.net/gh/homarr-labs/dashboard-icons/svg/docker.svg", "name": "Docker Swarm" }, + { + "description": "Docker", + "discovery_pattern": "No match pattern provided", + "logo_url": "https://cdn.jsdelivr.net/gh/homarr-labs/dashboard-icons/svg/docker.svg", + "name": "Docker" + }, { "description": "A generic docker container", "discovery_pattern": "All of: (Service is running in a docker container, A custom match pattern evaluated at runtime)", @@ -803,6 +815,12 @@ "logo_url": "https://cdn.jsdelivr.net/gh/homarr-labs/dashboard-icons/svg/slskd.svg", "name": "Slskd" }, + { + "description": "A NZB Files Downloader.", + "discovery_pattern": "Endpoint response body from :8080/Content/manifest.json contains SABnzbd", + "logo_url": "https://cdn.jsdelivr.net/gh/homarr-labs/dashboard-icons/svg/sabnzbd.svg", + "name": "SABnzbd" + }, { "description": "Media server for streaming personal content", "discovery_pattern": "Any of: (Endpoint response body from :32400/web/index.html contains Plex, Endpoint response status is between 401 and 401, and response from :32400 has header X-Plex-Protocol with value 1.0)", @@ -851,6 +869,12 @@ "logo_url": "https://cdn.jsdelivr.net/gh/homarr-labs/dashboard-icons/svg/emby.svg", "name": "Emby" }, + { + "description": "A companion application to Sonarr and Radarr that manages and downloads subtitles", + "discovery_pattern": "6767/tcp is open", + "logo_url": "https://cdn.jsdelivr.net/gh/homarr-labs/dashboard-icons/svg/bazarr.svg", + "name": "Bazarr" + }, { "description": "Self-hosted audiobook and podcast server.", "discovery_pattern": "13378/tcp is open", @@ -943,7 +967,7 @@ }, { "description": "Self-hosted cloud storage and collaboration platform", - "discovery_pattern": "Any of: (Endpoint response body from :80/core/css/server.css contains Nextcloud GmbH, Endpoint response body from :443/core/css/server.css contains Nextcloud GmbH)", + "discovery_pattern": "Endpoint response body from :80/core/css/server.css contains Nextcloud GmbH", "logo_url": "https://cdn.jsdelivr.net/gh/homarr-labs/dashboard-icons/svg/nextcloud.svg", "name": "NextCloud" }, @@ -1097,6 +1121,12 @@ "logo_url": "https://cdn.jsdelivr.net/gh/homarr-labs/dashboard-icons/svg/crowdsec.svg", "name": "CrowdSec" }, + { + "description": "Ubiquiti UniFi network controller", + "discovery_pattern": "Endpoint response body from :8443/manage contains UniFi", + "logo_url": "https://cdn.jsdelivr.net/gh/homarr-labs/dashboard-icons/svg/unifi.svg", + "name": "UniFi Controller" + }, { "description": "Ubiquiti UniFi wireless access point", "discovery_pattern": "All of: (MAC Address belongs to Ubiquiti Networks Inc, Endpoint response body from :80/ contains Unifi)", @@ -1170,9 +1200,9 @@ "name": "Dhcp Server" }, { - "description": "Docker", - "discovery_pattern": "No match pattern provided", - "logo_url": "https://cdn.jsdelivr.net/gh/homarr-labs/dashboard-icons/svg/docker.svg", - "name": "Docker" + "description": "NetVisor Daemon API for network scanning", + "discovery_pattern": "Endpoint response body from :60073/api/health contains netvisor", + "logo_url": "/logos/netvisor-logo.png", + "name": "NetVisor Daemon API" } ] \ No newline at end of file