diff --git a/.devcontainer/Dockerfile b/.devcontainer/Dockerfile index 44c88064..3ce0f921 100644 --- a/.devcontainer/Dockerfile +++ b/.devcontainer/Dockerfile @@ -1,18 +1,18 @@ -# Use the official ArchLinux image with a AUR helper installed -FROM ghcr.io/greyltc-org/archlinux-aur:paru +FROM debian:unstable ARG REMOTE_USER ARG REMOTE_UID ARG REMOTE_GID -RUN pacman-key --init -RUN pacman -Syu --noconfirm +# Update and upgrade the system +RUN apt update && apt upgrade -y -# Install required packages -RUN pacman -S base-devel git less nano openssh nano protobuf rustup jdk21-openjdk gradle mkdocs-material --noconfirm - -# Install gRPC UI -RUN aur-install grpcui-bin +# Install necessary packages +RUN apt install -y \ + build-essential git gh \ + protobuf-compiler \ + rustup \ + openjdk-21-jdk gradle RUN < Result<(), Box> { +fn main() -> Result<(), Box> { generate_build_info(); generate_grpc_code()?; Ok(()) @@ -14,10 +14,10 @@ fn main() -> Result<(), Box> { fn generate_build_info() { let out_dir = env::var("OUT_DIR").unwrap(); - let mut file = File::create(format!("{}/build_info.rs", out_dir)).unwrap(); + let mut file = File::create(format!("{out_dir}/build_info.rs")).unwrap(); - let commit = env::var("CURRENT_COMMIT").unwrap_or_else(|_| "unknown".to_string()); - let build = env::var("CURRENT_BUILD").unwrap_or_else(|_| "0".to_string()); + let commit = env::var("CURRENT_COMMIT").unwrap_or_else(|_| "unknown".to_owned()); + let build = env::var("CURRENT_BUILD").unwrap_or_else(|_| "0".to_owned()); let version = get_version_info().expect("Unable to get version information"); let protocol_version = @@ -28,14 +28,14 @@ fn generate_build_info() { writeln!(file, " major: {},", version.0).unwrap(); writeln!(file, " minor: {},", version.1).unwrap(); writeln!(file, " patch: {},", version.2).unwrap(); - writeln!(file, " build: {},", build).unwrap(); - writeln!(file, " commit: \"{}\",", commit).unwrap(); + writeln!(file, " build: {build},").unwrap(); + writeln!(file, " commit: \"{commit}\",").unwrap(); writeln!(file, " stage: Stage::{},", version.3).unwrap(); - writeln!(file, " protocol: {},", protocol_version).unwrap(); + writeln!(file, " protocol: {protocol_version},").unwrap(); writeln!(file, "}};").unwrap(); } -fn get_version_info() -> Result<(u16, u16, u16, String), Box> { +fn get_version_info() -> Result<(u16, u16, u16, String), Box> { let cargo_toml_content = fs::read_to_string("Cargo.toml")?; let cargo_toml: toml::Value = toml::from_str(&cargo_toml_content)?; @@ -53,7 +53,7 @@ fn get_version_info() -> Result<(u16, u16, u16, String), Box 1 { version_parts[1][0..1].to_uppercase() + &version_parts[1][1..] } else { - "Stable".to_string() + "Stable".to_owned() }; Ok(( version_numbers[0], @@ -66,7 +66,7 @@ fn get_version_info() -> Result<(u16, u16, u16, String), Box Result> { +fn get_protocol_version_info() -> Result> { let cargo_toml_content = fs::read_to_string("../Cargo.toml")?; let cargo_toml: toml::Value = toml::from_str(&cargo_toml_content)?; @@ -76,11 +76,11 @@ fn get_protocol_version_info() -> Result> { value.ok_or("Unable to get protocol version from Cargo.toml".into()) } -fn generate_grpc_code() -> Result<(), Box> { +fn generate_grpc_code() -> Result<(), Box> { tonic_build::configure() .build_server(false) .compile_protos( - &[format!("{}/admin/admin.proto", PROTO_PATH)], + &[format!("{PROTO_PATH}/manage/service.proto")], &[PROTO_PATH], )?; Ok(()) diff --git a/cli/src/application.rs b/cli/src/application.rs index a422317e..b82229d1 100644 --- a/cli/src/application.rs +++ b/cli/src/application.rs @@ -1,3 +1,5 @@ +use anyhow::Result; +use common::error::FancyError; use menu::{start::StartMenu, MenuResult}; use profile::Profiles; use simplelog::info; @@ -11,18 +13,22 @@ pub struct Cli { } impl Cli { - pub async fn new() -> Cli { - Cli { - profiles: Profiles::load_all(), - } + pub async fn new() -> Result { + Ok(Cli { + profiles: Profiles::init().await?, + }) } - pub async fn start(&mut self) { + pub async fn start(&mut self) -> Result<()> { loop { - if StartMenu::show(&mut self.profiles).await == MenuResult::Exit { - break; + match StartMenu::show(&mut self.profiles).await { + MenuResult::Exit => break, + MenuResult::Failed(error) => FancyError::print_fancy(&error, false), + _ => {} } } info!("ℹ Goodbye!"); + + Ok(()) } } diff --git a/cli/src/application/menu.rs b/cli/src/application/menu.rs index 8cfd19af..c0ffd645 100644 --- a/cli/src/application/menu.rs +++ b/cli/src/application/menu.rs @@ -1,7 +1,9 @@ use std::{fmt::Display, str::FromStr}; -use anyhow::Result; -use inquire::{validator::ValueRequiredValidator, Confirm, CustomType, MultiSelect, Select, Text}; +use anyhow::{Error, Result}; +use inquire::{ + validator::ValueRequiredValidator, Confirm, CustomType, InquireError, MultiSelect, Select, Text, +}; mod connection; mod create_profile; @@ -9,60 +11,67 @@ mod delete_profile; mod load_profile; pub mod start; -#[derive(PartialEq)] pub enum MenuResult { Success, - Aborted, - Failed, Exit, + Aborted, + Failed(Error), } pub struct MenuUtils; impl MenuUtils { - pub fn text(message: &str, help: &str) -> Result { + pub fn handle_error(error: InquireError) -> MenuResult { + match error { + InquireError::OperationCanceled | InquireError::OperationInterrupted => { + MenuResult::Exit + } + _ => MenuResult::Failed(error.into()), + } + } + + pub fn text(message: &str, help: &str) -> Result { Text::new(message) .with_validator(ValueRequiredValidator::default()) .with_help_message(help) .prompt() - .map_err(|error| error.into()) } pub fn parsed_value( message: &str, help: &str, error: &str, - ) -> Result { + ) -> Result { CustomType::::new(message) .with_error_message(error) .with_help_message(help) .prompt() - .map_err(|error| error.into()) } - pub fn confirm(message: &str) -> Result { + pub fn confirm(message: &str) -> Result { Confirm::new(message) .with_help_message("Type y or n") .prompt() - .map_err(|error| error.into()) } - pub fn select(message: &str, help: &str, options: Vec) -> Result { + pub fn select( + message: &str, + help: &str, + options: Vec, + ) -> Result { Select::new(message, options) .with_help_message(help) .prompt() - .map_err(|error| error.into()) } - pub fn select_no_help(message: &str, options: Vec) -> Result { - Select::new(message, options) - .prompt() - .map_err(|error| error.into()) + pub fn select_no_help(message: &str, options: Vec) -> Result { + Select::new(message, options).prompt() } - pub fn multi_select_no_help(message: &str, options: Vec) -> Result> { - MultiSelect::new(message, options) - .prompt() - .map_err(|error| error.into()) + pub fn multi_select_no_help( + message: &str, + options: Vec, + ) -> Result, InquireError> { + MultiSelect::new(message, options).prompt() } } diff --git a/cli/src/application/menu/connection.rs b/cli/src/application/menu/connection.rs index 23d88075..51feacd2 100644 --- a/cli/src/application/menu/connection.rs +++ b/cli/src/application/menu/connection.rs @@ -8,12 +8,12 @@ use crate::{ use super::MenuResult; -mod cloudlet; -mod deployment; mod general; +mod group; +mod node; mod resource; +mod server; mod start; -mod unit; mod user; pub struct ConnectionMenu; @@ -35,9 +35,9 @@ impl ConnectionMenu { ConnectionStartMenu::show(&mut profile, &mut connection, profiles).await } Err(error) => { - progress.fail(format!("Failed to connect to the controller: {}", error)); + progress.fail(format!("Failed to connect to the controller: {error}")); progress.end(); - MenuResult::Failed + MenuResult::Failed(error) } } } diff --git a/cli/src/application/menu/connection/cloudlet/create_cloudlet.rs b/cli/src/application/menu/connection/cloudlet/create_cloudlet.rs deleted file mode 100644 index 4db5349a..00000000 --- a/cli/src/application/menu/connection/cloudlet/create_cloudlet.rs +++ /dev/null @@ -1,151 +0,0 @@ -use anyhow::Result; -use inquire::{ - validator::{Validation, ValueRequiredValidator}, - Text, -}; -use loading::Loading; -use simplelog::debug; - -use crate::application::{ - menu::{MenuResult, MenuUtils}, - network::{proto::cloudlet_management::CloudletValue, EstablishedConnection}, - profile::{Profile, Profiles}, -}; - -pub struct CreateCloudletMenu; - -struct Data { - cloudlets: Vec, - drivers: Vec, -} - -impl CreateCloudletMenu { - pub async fn show( - profile: &mut Profile, - connection: &mut EstablishedConnection, - _profiles: &mut Profiles, - ) -> MenuResult { - let progress = Loading::default(); - progress.text(format!( - "Retrieving all existing cloudlets from the controller \"{}\"...", - profile.name - )); - - match Self::get_required_data(connection).await { - Ok(data) => { - progress.success("Data retrieved successfully 👍"); - progress.end(); - - match Self::collect_cloudlet(&data) { - Ok(cloudlet) => { - let progress = Loading::default(); - progress.text(format!( - "Creating cloudlet \"{}\" on the controller \"{}\"...", - cloudlet.name, profile.name - )); - - match connection.client.create_cloudlet(cloudlet).await { - Ok(_) => { - progress.success("Cloudlet created successfully 👍. Remember to set the cloudlet to active, or the controller won't start units."); - progress.end(); - MenuResult::Success - } - Err(error) => { - progress.fail(format!("{}", error)); - progress.end(); - MenuResult::Failed - } - } - } - Err(error) => { - debug!("{}", error); - MenuResult::Failed - } - } - } - Err(error) => { - progress.fail(format!("{}", error)); - progress.end(); - MenuResult::Failed - } - } - } - - async fn get_required_data(connection: &mut EstablishedConnection) -> Result { - let cloudlets = connection.client.get_cloudlets().await?; - let drivers = connection.client.get_drivers().await?; - Ok(Data { cloudlets, drivers }) - } - - fn collect_cloudlet(data: &Data) -> Result { - let name = Self::get_cloudlet_name(data.cloudlets.clone())?; - let driver = MenuUtils::select("Which driver should the controller use to communicate with the backend of this cloudlet?", "This is essential for the controller to know how to communicate with the backend of this cloudlet. For example, is it a Pterodactyl node or a simple Docker host?", data.drivers.to_vec())?; - let child = Self::get_child_node()?; - let memory = Self::get_memory_limit()?; - let max_allocations = Self::get_allocations_limit()?; - let controller_address = MenuUtils::parsed_value( - "What is the hostname or address where the unit can reach the controller once started?", - "Example: https://cloud.your-network.net", - "Please enter a valid URL", - )?; - - Ok(CloudletValue { - name, - driver, - memory, - max_allocations, - child, - controller_address, - }) - } - - fn get_cloudlet_name(used_names: Vec) -> Result { - Text::new("What would you like to name this cloudlet?") - .with_help_message("Examples: hetzner-01, home-01, local-01") - .with_validator(ValueRequiredValidator::default()) - .with_validator(move |name: &str| { - if used_names.contains(&name.to_string()) { - Ok(Validation::Invalid( - "A cloudlet with this name already exists".into(), - )) - } else { - Ok(Validation::Valid) - } - }) - .prompt() - .map_err(|error| error.into()) - } - - fn get_memory_limit() -> Result> { - match MenuUtils::confirm( - "Would you like to limit the amount of memory the controller can use on this cloudlet?", - )? { - false => Ok(None), - true => Ok(Some(MenuUtils::parsed_value( - "How much memory should the controller be allowed to use on this cloudlet?", - "Example: 1024", - "Please enter a valid number", - )?)), - } - } - - fn get_allocations_limit() -> Result> { - match MenuUtils::confirm("Would you like to limit the number of units the controller can start on this cloudlet?")? - { - false => Ok(None), - true => Ok(Some(MenuUtils::parsed_value("How many units should the controller be allowed to start on this cloudlet?", "Example: 15", "Please enter a valid number")?)) - } - } - - fn get_child_node() -> Result> { - match MenuUtils::confirm("Does the specified driver need additional information to determine which node it should use in the backend? This is required when a driver manages multiple nodes.")? { - false => Ok(None), - true => { - Ok(Some(Text::new("What is the name of the child node the controller should use?") - .with_help_message("Example: node0.gameservers.my-pterodactyl.net") - .with_validator(ValueRequiredValidator::default()) - .prompt()?)) - } - } - } -} diff --git a/cli/src/application/menu/connection/cloudlet/get_cloudlet.rs b/cli/src/application/menu/connection/cloudlet/get_cloudlet.rs deleted file mode 100644 index d4fdd497..00000000 --- a/cli/src/application/menu/connection/cloudlet/get_cloudlet.rs +++ /dev/null @@ -1,85 +0,0 @@ -use loading::Loading; -use simplelog::info; - -use crate::application::{ - menu::{MenuResult, MenuUtils}, - network::{proto::cloudlet_management::CloudletValue, EstablishedConnection}, - profile::{Profile, Profiles}, -}; - -pub struct GetCloudletMenu; - -impl GetCloudletMenu { - pub async fn show( - profile: &mut Profile, - connection: &mut EstablishedConnection, - _profiles: &mut Profiles, - ) -> MenuResult { - let progress = Loading::default(); - progress.text(format!( - "Retrieving available cloudlets from controller \"{}\"...", - profile.name - )); - - match connection.client.get_cloudlets().await { - Ok(cloudlets) => { - progress.success("Cloudlet data retrieved successfully 👍"); - progress.end(); - match MenuUtils::select_no_help( - "Select a cloudlet to view more details:", - cloudlets, - ) { - Ok(cloudlet) => { - let progress = Loading::default(); - progress.text(format!( - "Fetching details for cloudlet \"{}\" from controller \"{}\"...", - cloudlet, profile.name - )); - - match connection.client.get_cloudlet(&cloudlet).await { - Ok(details) => { - progress.success("Cloudlet details retrieved successfully 👍"); - progress.end(); - Self::display_details(&details); - MenuResult::Success - } - Err(err) => { - progress.fail(format!("{}", err)); - progress.end(); - MenuResult::Failed - } - } - } - Err(_) => MenuResult::Aborted, - } - } - Err(err) => { - progress.fail(format!("{}", err)); - progress.end(); - MenuResult::Failed - } - } - } - - fn display_details(cloudlet: &CloudletValue) { - info!(" 🖥 Cloudlet Information"); - info!(" Name: {}", cloudlet.name); - info!(" Driver: {}", cloudlet.driver); - if let Some(memory) = &cloudlet.memory { - info!(" Memory: {} MiB", memory); - } - if let Some(max_allocations) = &cloudlet.max_allocations { - info!( - " Max Allocations: {} Units", - max_allocations - ); - } - if let Some(child) = &cloudlet.child { - info!(" Child Node: {}", child); - } - info!( - " Controller Address: {}", - cloudlet.controller_address - ); - } -} diff --git a/cli/src/application/menu/connection/cloudlet/get_cloudlets.rs b/cli/src/application/menu/connection/cloudlet/get_cloudlets.rs deleted file mode 100644 index 15ee0986..00000000 --- a/cli/src/application/menu/connection/cloudlet/get_cloudlets.rs +++ /dev/null @@ -1,49 +0,0 @@ -use loading::Loading; -use simplelog::info; - -use crate::application::{ - menu::MenuResult, - network::EstablishedConnection, - profile::{Profile, Profiles}, -}; - -pub struct GetCloudletsMenu; - -impl GetCloudletsMenu { - pub async fn show( - profile: &mut Profile, - connection: &mut EstablishedConnection, - _profiles: &mut Profiles, - ) -> MenuResult { - let progress = Loading::default(); - progress.text(format!( - "Requesting cloudlet list from controller \"{}\"...", - profile.name - )); - - match connection.client.get_cloudlets().await { - Ok(cloudlets) => { - progress.success("Cloudlet data retrieved successfully 👍"); - progress.end(); - Self::display_cloudlets(&cloudlets); - MenuResult::Success - } - Err(error) => { - progress.fail(format!("{}", error)); - progress.end(); - MenuResult::Failed - } - } - } - - fn display_cloudlets(cloudlets: &[String]) { - info!(" 🖥 Available Cloudlets"); - if cloudlets.is_empty() { - info!(" No cloudlets found."); - } else { - for cloudlet in cloudlets { - info!(" - {}", cloudlet); - } - } - } -} diff --git a/cli/src/application/menu/connection/cloudlet/mod.rs b/cli/src/application/menu/connection/cloudlet/mod.rs deleted file mode 100644 index 0267c577..00000000 --- a/cli/src/application/menu/connection/cloudlet/mod.rs +++ /dev/null @@ -1,3 +0,0 @@ -pub mod create_cloudlet; -pub mod get_cloudlet; -pub mod get_cloudlets; diff --git a/cli/src/application/menu/connection/deployment/get_deployments.rs b/cli/src/application/menu/connection/deployment/get_deployments.rs deleted file mode 100644 index a840dc16..00000000 --- a/cli/src/application/menu/connection/deployment/get_deployments.rs +++ /dev/null @@ -1,49 +0,0 @@ -use loading::Loading; -use simplelog::info; - -use crate::application::{ - menu::MenuResult, - network::EstablishedConnection, - profile::{Profile, Profiles}, -}; - -pub struct GetDeploymentsMenu; - -impl GetDeploymentsMenu { - pub async fn show( - profile: &mut Profile, - connection: &mut EstablishedConnection, - _profiles: &mut Profiles, - ) -> MenuResult { - let progress = Loading::default(); - progress.text(format!( - "Requesting deployment list from controller \"{}\"...", - profile.name - )); - - match connection.client.get_deployments().await { - Ok(deployments) => { - progress.success("Deployment data retrieved successfully 👍"); - progress.end(); - Self::display_deployments(&deployments); - MenuResult::Success - } - Err(error) => { - progress.fail(format!("{}", error)); - progress.end(); - MenuResult::Failed - } - } - } - - fn display_deployments(deployments: &[String]) { - info!(" 🖥 Available Deployments"); - if deployments.is_empty() { - info!(" No deployments found."); - } else { - for deployment in deployments { - info!(" - {}", deployment); - } - } - } -} diff --git a/cli/src/application/menu/connection/deployment/mod.rs b/cli/src/application/menu/connection/deployment/mod.rs deleted file mode 100644 index 2363d306..00000000 --- a/cli/src/application/menu/connection/deployment/mod.rs +++ /dev/null @@ -1,3 +0,0 @@ -pub mod create_deployment; -pub mod get_deployment; -pub mod get_deployments; diff --git a/cli/src/application/menu/connection/general/get_versions.rs b/cli/src/application/menu/connection/general/get_versions.rs index a66da704..5c0a540c 100644 --- a/cli/src/application/menu/connection/general/get_versions.rs +++ b/cli/src/application/menu/connection/general/get_versions.rs @@ -27,7 +27,7 @@ impl GetVersionsMenu { match Self::get_required_data(connection).await { Ok((version, protocol)) => { - progress.success("Version data retrieved successfully 👍"); + progress.success("Data retrieved successfully 👍"); progress.end(); info!(" 🖥 Controller Info"); info!(" Version: {}", version); @@ -38,16 +38,16 @@ impl GetVersionsMenu { MenuResult::Success } Err(error) => { - progress.fail(format!("{}", error)); + progress.fail(format!("{error}")); progress.end(); - MenuResult::Failed + MenuResult::Failed(error) } } } async fn get_required_data(connection: &mut EstablishedConnection) -> Result<(String, u32)> { - let version = connection.client.get_controller_version().await?; - let protocol = connection.client.get_protocol_version().await?; + let version = connection.client.get_ctrl_ver().await?; + let protocol = connection.client.get_proto_ver().await?; Ok((version, protocol)) } } diff --git a/cli/src/application/menu/connection/general/request_stop.rs b/cli/src/application/menu/connection/general/request_stop.rs index 0497e1ed..ef6d5ee1 100644 --- a/cli/src/application/menu/connection/general/request_stop.rs +++ b/cli/src/application/menu/connection/general/request_stop.rs @@ -22,7 +22,7 @@ impl RequestStopMenu { let progress = Loading::default(); progress.text(format!("Stopping controller \"{}\"", profile.name)); match connection.client.request_stop().await { - Ok(_) => { + Ok(()) => { thread::sleep(Duration::from_secs(3)); progress.success("Controller stopped successfully 👍"); progress.end(); @@ -30,15 +30,15 @@ impl RequestStopMenu { } Err(error) => { progress.fail(format!( - "{}", - error + "{error}" )); progress.end(); - MenuResult::Failed + MenuResult::Failed(error) } } } - Ok(false) | Err(_) => MenuResult::Aborted, - } + Ok(false) => MenuResult::Aborted, + Err(error) => MenuUtils::handle_error(error), + } } } diff --git a/cli/src/application/menu/connection/deployment/create_deployment.rs b/cli/src/application/menu/connection/group/create_group.rs similarity index 52% rename from cli/src/application/menu/connection/deployment/create_deployment.rs rename to cli/src/application/menu/connection/group/create_group.rs index b98793fb..ba9881a5 100644 --- a/cli/src/application/menu/connection/deployment/create_deployment.rs +++ b/cli/src/application/menu/connection/group/create_group.rs @@ -3,23 +3,18 @@ use std::{fmt::Display, str::FromStr, vec}; use anyhow::{anyhow, Result}; use inquire::{ validator::{Validation, ValueRequiredValidator}, - MultiSelect, Text, + InquireError, MultiSelect, Text, }; use loading::Loading; -use simplelog::debug; use crate::application::{ menu::{MenuResult, MenuUtils}, network::{ proto::{ common::KeyValue, - deployment_management::{ - deployment_value::{Constraints, Scaling}, - DeploymentValue, - }, - unit_management::{ - unit_spec::{Fallback, Retention}, - UnitResources, UnitSpec, + manage::{ + group::{self, Constraints, Scaling}, + server::{DiskRetention, Fallback, Resources, Spec}, }, }, EstablishedConnection, @@ -27,14 +22,16 @@ use crate::application::{ profile::{Profile, Profiles}, }; -pub struct CreateDeploymentMenu; +use std::fmt::Write as _; + +pub struct CreateGroupMenu; struct Data { - deployments: Vec, - cloudlets: Vec, + groups: Vec, + nodes: Vec, } -impl CreateDeploymentMenu { +impl CreateGroupMenu { pub async fn show( profile: &mut Profile, connection: &mut EstablishedConnection, @@ -42,7 +39,7 @@ impl CreateDeploymentMenu { ) -> MenuResult { let progress = Loading::default(); progress.text(format!( - "Retrieving all existing deployments from the controller \"{}\"...", + "Retrieving all existing groups from the controller \"{}\"...", profile.name )); @@ -51,61 +48,55 @@ impl CreateDeploymentMenu { progress.success("Data retrieved successfully 👍"); progress.end(); - match Self::collect_deployment(&data) { - Ok(deployment) => { + match Self::collect_group(&data) { + Ok(group) => { let progress = Loading::default(); progress.text(format!( - "Creating deployment \"{}\" on the controller \"{}\"...", - deployment.name, profile.name + "Creating group \"{}\" on the controller \"{}\"...", + group.name, profile.name )); - match connection.client.create_deployment(deployment).await { - Ok(_) => { - progress.success("Deployment created successfully 👍. Remember to set the deployment to active, or the controller won't start units."); + match connection.client.create_group(group).await { + Ok(()) => { + progress.success("Group created successfully 👍. Remember to set the group to active, or the controller won't start servers."); progress.end(); MenuResult::Success } Err(error) => { - progress.fail(format!("{}", error)); + progress.fail(format!("{error}")); progress.end(); - MenuResult::Failed + MenuResult::Failed(error) } } } - Err(error) => { - debug!("{}", error); - MenuResult::Failed - } + Err(error) => MenuUtils::handle_error(error), } } Err(error) => { - progress.fail(format!("{}", error)); + progress.fail(format!("{error}")); progress.end(); - MenuResult::Failed + MenuResult::Failed(error) } } } async fn get_required_data(connection: &mut EstablishedConnection) -> Result { - let deployments = connection.client.get_deployments().await?; - let cloudlets = connection.client.get_cloudlets().await?; - Ok(Data { - deployments, - cloudlets, - }) + let groups = connection.client.get_groups().await?; + let nodes = connection.client.get_nodes().await?; + Ok(Data { groups, nodes }) } - fn collect_deployment(data: &Data) -> Result { - let name = Self::get_deployment_name(data.deployments.clone())?; - let cloudlets = Self::get_cloudlets(data.cloudlets.clone())?; + fn collect_group(data: &Data) -> Result { + let name = Self::get_group_name(data.groups.clone())?; + let nodes = Self::get_nodes(data.nodes.clone())?; let constraints = Self::collect_constraints()?; let scaling = Self::collect_scaling()?; let resources = Self::collect_resources()?; let spec = Self::collect_specification()?; - Ok(DeploymentValue { + Ok(group::Item { name, - cloudlets, + nodes, constraints: Some(constraints), scaling: Some(scaling), resources: Some(resources), @@ -113,147 +104,147 @@ impl CreateDeploymentMenu { }) } - fn get_deployment_name(used_names: Vec) -> Result { - Text::new("What would you like to name this deployment?") + fn get_group_name(used_names: Vec) -> Result { + Text::new("What would you like to name this group?") .with_help_message("Examples: lobby, mode-xyz") .with_validator(ValueRequiredValidator::default()) .with_validator(move |name: &str| { if used_names.contains(&name.to_string()) { Ok(Validation::Invalid( - "A deployment with this name already exists".into(), + "A group with this name already exists".into(), )) } else { Ok(Validation::Valid) } }) .prompt() - .map_err(|error| error.into()) } - fn get_cloudlets(cloudlets: Vec) -> Result> { - MultiSelect::new("What cloudlets should this deployment use?", cloudlets) - .prompt() - .map_err(|error| error.into()) + fn get_nodes(nodes: Vec) -> Result, InquireError> { + MultiSelect::new("What nodes should this group use?", nodes).prompt() } - fn collect_constraints() -> Result { - let minimum = MenuUtils::parsed_value( - "What is the minimum number of units that should always be online?", + fn collect_constraints() -> Result { + let min = MenuUtils::parsed_value( + "What is the minimum number of servers that should always be online?", "Example: 1", "Please enter a valid number", )?; - let maximum = MenuUtils::parsed_value( - "What is the maximum number of units that should always be online?", + let max = MenuUtils::parsed_value( + "What is the maximum number of servers that should always be online?", "Example: 10", "Please enter a valid number", )?; - let priority = MenuUtils::parsed_value("How important is this deployment compared to others? (This refers to one tick of the controller)", "Example: 0", "Please enter a valid number")?; + let prio = MenuUtils::parsed_value("How important is this group compared to others? (This refers to one tick of the controller)", "Example: 0", "Please enter a valid number")?; - Ok(Constraints { - minimum, - maximum, - priority, - }) + Ok(Constraints { min, max, prio }) } - fn collect_scaling() -> Result { - let start_threshold = MenuUtils::parsed_value::("At what percentage (0-100) of the max player count should the controller start a new unit?", "Example: 50", "Please enter a valid number")? / 100.0; - let stop_empty_units = - MenuUtils::confirm("Should the controller stop units that are empty for too long?")?; + fn collect_scaling() -> Result { + let start_threshold = MenuUtils::parsed_value::("At what percentage (0-100) of the max player count should the controller start a new server?", "Example: 50", "Please enter a valid number")? / 100.0; + let stop_empty = + MenuUtils::confirm("Should the controller stop servers that are empty for too long?")?; Ok(Scaling { start_threshold, - stop_empty_units, + stop_empty, }) } - fn collect_resources() -> Result { + fn collect_resources() -> Result { let memory = MenuUtils::parsed_value( - "How much memory should each unit have?", + "How much memory should each server have?", "Example: 2048", "Please enter a valid number", )?; let swap = MenuUtils::parsed_value( - "How much swap space should each unit have?", + "How much swap space should each server have?", "Example: 256", "Please enter a valid number", )?; let cpu = MenuUtils::parsed_value( - "How much CPU power should each unit have? (100 = one core)", + "How much CPU power should each server have? (100 = one core)", "Example: 500", "Please enter a valid number", )?; let io = MenuUtils::parsed_value( - "How many I/O operations should each unit be allowed to perform?", + "How many I/O operations should each server be allowed to perform?", "Example: 500", "Please enter a valid number", )?; let disk = MenuUtils::parsed_value( - "How much disk space should each unit use?", + "How much disk space should each server use?", "Example: 2048", "Please enter a valid number", )?; - let addresses = MenuUtils::parsed_value( - "How many addresses/ports should each unit have?", + let ports = MenuUtils::parsed_value( + "How many addresses/ports should each server have?", "Example: 5", "Please enter a valid number", )?; - Ok(UnitResources { + Ok(Resources { memory, swap, cpu, io, disk, - addresses, + ports, }) } - fn collect_specification() -> Result { - let image = MenuUtils::text("Which image should the unit use?", "Example: ubuntu:latest")?; + fn collect_specification() -> Result { + let img = MenuUtils::text( + "Which image should the server use?", + "Example: ubuntu:latest", + )?; let max_players = MenuUtils::parsed_value( - "What is the maximum number of players per unit?", + "What is the maximum number of players per server?", "Example: 20", "Please enter a valid number", )?; let settings = MenuUtils::parsed_value::( - "What settings should the controller pass to the driver when starting a unit?", + "What settings should the controller pass to the plugin when starting a server?", "Format: key=value,key=value,key=value,...", "Please check your syntax. Something seems wrong.", )? .key_values; - let environment = MenuUtils::parsed_value::( - "What environment variables should the controller pass to the driver when starting a unit?", + let env = MenuUtils::parsed_value::( + "What environment variables should the controller pass to the plugin when starting a server?", "Format: key=value,key=value,key=value,...", "Please check your syntax something is wrong", )? .key_values; - let disk_retention = MenuUtils::select_no_help( - "Should the unit's disk be retained after the unit stops?", - vec![Retention::Temporary, Retention::Permanent], + let retention = MenuUtils::select_no_help( + "Should the server's disk be retained after the server stops?", + vec![DiskRetention::Temporary, DiskRetention::Permanent], )?; let fallback = Self::collect_fallback()?; - Ok(UnitSpec { - image, + Ok(Spec { + img, max_players, settings, - environment, - disk_retention: Some(disk_retention as i32), - fallback: Some(fallback), + env, + retention: Some(retention as i32), + fallback, }) } - fn collect_fallback() -> Result { + fn collect_fallback() -> Result, InquireError> { let enabled = - MenuUtils::confirm("Should the controller treat these units as fallback units?")?; - let priority = MenuUtils::parsed_value( - "What is the priority of this fallback deployment?", + MenuUtils::confirm("Should the controller treat these servers as fallback servers?")?; + let prio = MenuUtils::parsed_value( + "What is the priority of this fallback group?", "Example: 0", "Please enter a valid number", )?; - Ok(Fallback { enabled, priority }) + if enabled { + Ok(Some(Fallback { prio })) + } else { + Ok(None) + } } } @@ -288,8 +279,8 @@ impl Display for KeyValueList { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { let mut result = String::new(); for pair in &self.key_values { - result.push_str(&format!("{}={},", pair.key, pair.value)); + write!(&mut result, "{}={},", pair.key, pair.value).expect("Failed to write to string"); } - write!(f, "{}", result) + write!(f, "{result}") } } diff --git a/cli/src/application/menu/connection/deployment/get_deployment.rs b/cli/src/application/menu/connection/group/get_group.rs similarity index 51% rename from cli/src/application/menu/connection/deployment/get_deployment.rs rename to cli/src/application/menu/connection/group/get_group.rs index 45566048..f0366bdd 100644 --- a/cli/src/application/menu/connection/deployment/get_deployment.rs +++ b/cli/src/application/menu/connection/group/get_group.rs @@ -3,13 +3,13 @@ use simplelog::{info, warn}; use crate::application::{ menu::{MenuResult, MenuUtils}, - network::{proto::deployment_management::DeploymentValue, EstablishedConnection}, + network::{proto::manage::group, EstablishedConnection}, profile::{Profile, Profiles}, }; -pub struct GetDeploymentMenu; +pub struct GetGroupMenu; -impl GetDeploymentMenu { +impl GetGroupMenu { pub async fn show( profile: &mut Profile, connection: &mut EstablishedConnection, @@ -17,106 +17,97 @@ impl GetDeploymentMenu { ) -> MenuResult { let progress = Loading::default(); progress.text(format!( - "Fetching all available deployments from the controller \"{}\"...", + "Fetching all available groups from the controller \"{}\"...", profile.name )); - match connection.client.get_deployments().await { - Ok(deployments) => { - progress.success("Deployment data retrieved successfully 👍"); + match connection.client.get_groups().await { + Ok(groups) => { + progress.success("Data retrieved successfully 👍"); progress.end(); - match MenuUtils::select_no_help( - "Select a deployment to view more details:", - deployments, - ) { - Ok(deployment) => { + match MenuUtils::select_no_help("Select a group to view more details:", groups) { + Ok(group) => { let progress = Loading::default(); progress.text(format!( - "Fetching details for deployment \"{}\" from controller \"{}\"...", - deployment, profile.name + "Fetching details for group \"{}\" from controller \"{}\"...", + group, profile.name )); - match connection.client.get_deployment(&deployment).await { - Ok(deployment_details) => { - progress.success("Deployment details retrieved successfully 👍"); + match connection.client.get_group(&group).await { + Ok(group_details) => { + progress.success("Group details retrieved successfully 👍"); progress.end(); - Self::display_details(&deployment_details); + Self::display_details(&group_details); MenuResult::Success } Err(error) => { - progress.fail(format!("{}", error)); + progress.fail(format!("{error}")); progress.end(); - MenuResult::Failed + MenuResult::Failed(error) } } } - Err(_) => MenuResult::Aborted, + Err(error) => MenuUtils::handle_error(error), } } Err(error) => { - progress.fail(format!("{}", error)); + progress.fail(format!("{error}")); progress.end(); - MenuResult::Failed + MenuResult::Failed(error) } } } - fn display_details(deployment_details: &DeploymentValue) { - info!(" 🖥 Deployment Details"); - info!(" Name: {}", deployment_details.name); + fn display_details(group: &group::Item) { + info!(" 🖥 Group Details"); + info!(" Name: {}", group.name); - if !deployment_details.cloudlets.is_empty() { - info!(" Cloudlets:"); - for cloudlet in &deployment_details.cloudlets { - info!(" - {}", cloudlet); - } + if group.nodes.is_empty() { + warn!(" Nodes: None"); } else { - warn!(" Cloudlets: None"); + info!(" Nodes:"); + for node in &group.nodes { + info!(" - {}", node); + } } - if let Some(constraints) = &deployment_details.constraints { + if let Some(constraints) = &group.constraints { info!(" Constraints:"); - info!(" Minimum: {}", constraints.minimum); - info!(" Maximum: {}", constraints.maximum); - info!(" Priority: {}", constraints.priority); + info!(" Minimum: {}", constraints.min); + info!(" Maximum: {}", constraints.max); + info!(" Priority: {}", constraints.prio); } else { warn!(" Constraints: None"); } - if let Some(scaling) = &deployment_details.scaling { + if let Some(scaling) = &group.scaling { info!(" Scaling:"); info!( " Start Threshold: {}%", scaling.start_threshold * 100.0 ); - info!( - " Stop Empty Units: {}", - scaling.stop_empty_units - ); + info!(" Stop Empty: {}", scaling.stop_empty); } else { warn!(" Scaling: None"); } - if let Some(resources) = &deployment_details.resources { + if let Some(resources) = &group.resources { info!(" Resources per Unit:"); info!(" Memory: {} MiB", resources.memory); info!(" Swap: {} MiB", resources.swap); info!(" CPU Cores: {}", resources.cpu / 100); info!(" IO: {}", resources.io); info!(" Disk Space: {} MiB", resources.disk); - info!( - " Addresses/Ports: {}", - resources.addresses - ); + info!(" Addresses/Ports: {}", resources.ports); } else { warn!(" Resources per Unit: None"); } - if let Some(spec) = &deployment_details.spec { + if let Some(spec) = &group.spec { info!(" Specification:"); - info!(" Image: {}", spec.image); + info!(" Image: {}", spec.img); info!( " Max Players per Unit: {}", spec.max_players @@ -126,18 +117,20 @@ impl GetDeploymentMenu { info!(" - {}: {}", setting.key, setting.value); } info!(" Environment Variables:"); - for env in &spec.environment { + for env in &spec.env { info!(" - {}: {}", env.key, env.value); } info!( " Disk Retention: {}", - spec.disk_retention.unwrap_or(0) + spec.retention.unwrap_or(0) ); - if let Some(fallback) = &spec.fallback { - info!(" Fallback:"); - info!(" Enabled: {}", fallback.enabled); - info!(" Priority: {}", fallback.priority); + if let Some(fallback) = spec.fallback { + info!(" Fallback: "); + info!(" Is fallback: Yes"); + info!(" Priority: {}", fallback.prio); + } else { + info!(" Fallback: None"); } } else { warn!(" Specification: None"); diff --git a/cli/src/application/menu/connection/group/get_groups.rs b/cli/src/application/menu/connection/group/get_groups.rs new file mode 100644 index 00000000..6c8604e5 --- /dev/null +++ b/cli/src/application/menu/connection/group/get_groups.rs @@ -0,0 +1,49 @@ +use loading::Loading; +use simplelog::info; + +use crate::application::{ + menu::MenuResult, + network::EstablishedConnection, + profile::{Profile, Profiles}, +}; + +pub struct GetGroupsMenu; + +impl GetGroupsMenu { + pub async fn show( + profile: &mut Profile, + connection: &mut EstablishedConnection, + _profiles: &mut Profiles, + ) -> MenuResult { + let progress = Loading::default(); + progress.text(format!( + "Requesting group list from controller \"{}\"...", + profile.name + )); + + match connection.client.get_groups().await { + Ok(groups) => { + progress.success("Data retrieved successfully 👍"); + progress.end(); + Self::display_groups(&groups); + MenuResult::Success + } + Err(error) => { + progress.fail(format!("{error}")); + progress.end(); + MenuResult::Failed(error) + } + } + } + + fn display_groups(groups: &[String]) { + info!(" 🖥 Available Groups"); + if groups.is_empty() { + info!(" No groups found."); + } else { + for group in groups { + info!(" - {}", group); + } + } + } +} diff --git a/cli/src/application/menu/connection/group/mod.rs b/cli/src/application/menu/connection/group/mod.rs new file mode 100644 index 00000000..25c3b52f --- /dev/null +++ b/cli/src/application/menu/connection/group/mod.rs @@ -0,0 +1,3 @@ +pub mod create_group; +pub mod get_group; +pub mod get_groups; diff --git a/cli/src/application/menu/connection/node/create_node.rs b/cli/src/application/menu/connection/node/create_node.rs new file mode 100644 index 00000000..b7395c12 --- /dev/null +++ b/cli/src/application/menu/connection/node/create_node.rs @@ -0,0 +1,150 @@ +use anyhow::Result; +use inquire::{ + validator::{Validation, ValueRequiredValidator}, + InquireError, Text, +}; +use loading::Loading; + +use crate::application::{ + menu::{MenuResult, MenuUtils}, + network::{proto::manage::node, EstablishedConnection}, + profile::{Profile, Profiles}, +}; + +pub struct CreateNodeMenu; + +struct Data { + nodes: Vec, + plugins: Vec, +} + +impl CreateNodeMenu { + pub async fn show( + profile: &mut Profile, + connection: &mut EstablishedConnection, + _profiles: &mut Profiles, + ) -> MenuResult { + let progress = Loading::default(); + progress.text(format!( + "Retrieving all existing nodes from the controller \"{}\"...", + profile.name + )); + + match Self::get_required_data(connection).await { + Ok(data) => { + progress.success("Data retrieved successfully 👍"); + progress.end(); + + match Self::collect_node(&data) { + Ok(node) => { + let progress = Loading::default(); + progress.text(format!( + "Creating node \"{}\" on the controller \"{}\"...", + node.name, profile.name + )); + + match connection.client.create_node(node).await { + Ok(()) => { + progress.success("Node created successfully 👍. Remember to set the node to active, or the controller won't start servers."); + progress.end(); + MenuResult::Success + } + Err(error) => { + progress.fail(format!("{error}")); + progress.end(); + MenuResult::Failed(error) + } + } + } + Err(error) => MenuUtils::handle_error(error), + } + } + Err(error) => { + progress.fail(format!("{error}")); + progress.end(); + MenuResult::Failed(error) + } + } + } + + async fn get_required_data(connection: &mut EstablishedConnection) -> Result { + let nodes = connection.client.get_nodes().await?; + let plugins = connection.client.get_plugins().await?; + Ok(Data { nodes, plugins }) + } + + fn collect_node(data: &Data) -> Result { + let name = Self::get_node_name(data.nodes.clone())?; + let plugin = MenuUtils::select("Which plugin should the controller use to communicate with the backend of this node?", "This is essential for the controller to know how to communicate with the backend of this node. For example, is it a Pterodactyl node or a simple Docker host?", data.plugins.clone())?; + let child = Self::get_child_node()?; + let memory = Self::get_memory_limit()?; + let max = Self::get_servers_limit()?; + let ctrl_addr = MenuUtils::parsed_value( + "What is the hostname or address where the server can reach the controller once started?", + "Example: https://cloud.your-network.net", + "Please enter a valid URL", + )?; + + Ok(node::Item { + name, + plugin, + memory, + max, + child, + ctrl_addr, + }) + } + + fn get_node_name(used_names: Vec) -> Result { + Text::new("What would you like to name this node?") + .with_help_message("Examples: hetzner-01, home-01, local-01") + .with_validator(ValueRequiredValidator::default()) + .with_validator(move |name: &str| { + if used_names.contains(&name.to_string()) { + Ok(Validation::Invalid( + "A node with this name already exists".into(), + )) + } else { + Ok(Validation::Valid) + } + }) + .prompt() + } + + fn get_memory_limit() -> Result, InquireError> { + if MenuUtils::confirm( + "Would you like to limit the amount of memory the controller can use on this node?", + )? { + Ok(Some(MenuUtils::parsed_value( + "How much memory should the controller be allowed to use on this node?", + "Example: 1024", + "Please enter a valid number", + )?)) + } else { + Ok(None) + } + } + + fn get_servers_limit() -> Result, InquireError> { + if MenuUtils::confirm( + "Would you like to limit the number of servers the controller can start on this node?", + )? { + Ok(Some(MenuUtils::parsed_value( + "How many servers should the controller be allowed to start on this node?", + "Example: 15", + "Please enter a valid number", + )?)) + } else { + Ok(None) + } + } + + fn get_child_node() -> Result, InquireError> { + if MenuUtils::confirm("Does the specified plugin need additional information to determine which node it should use in the backend? This is required when a plugin manages multiple nodes.")? { + Ok(Some(Text::new("What is the name of the child node the controller should use?") + .with_help_message("Example: node0.gameservers.my-pterodactyl.net") + .with_validator(ValueRequiredValidator::default()) + .prompt()?)) + } else { Ok(None) } + } +} diff --git a/cli/src/application/menu/connection/node/get_node.rs b/cli/src/application/menu/connection/node/get_node.rs new file mode 100644 index 00000000..ae642f6b --- /dev/null +++ b/cli/src/application/menu/connection/node/get_node.rs @@ -0,0 +1,76 @@ +use loading::Loading; +use simplelog::info; + +use crate::application::{ + menu::{MenuResult, MenuUtils}, + network::{proto::manage::node, EstablishedConnection}, + profile::{Profile, Profiles}, +}; + +pub struct GetNodeMenu; + +impl GetNodeMenu { + pub async fn show( + profile: &mut Profile, + connection: &mut EstablishedConnection, + _profiles: &mut Profiles, + ) -> MenuResult { + let progress = Loading::default(); + progress.text(format!( + "Retrieving available nodes from controller \"{}\"...", + profile.name + )); + + match connection.client.get_nodes().await { + Ok(nodes) => { + progress.success("Data retrieved successfully 👍"); + progress.end(); + match MenuUtils::select_no_help("Select a nodes to view more details:", nodes) { + Ok(node) => { + let progress = Loading::default(); + progress.text(format!( + "Fetching details for node \"{}\" from controller \"{}\"...", + node, profile.name + )); + + match connection.client.get_node(&node).await { + Ok(details) => { + progress.success("Node details retrieved successfully 👍"); + progress.end(); + Self::display_details(&details); + MenuResult::Success + } + Err(error) => { + progress.fail(format!("{error}")); + progress.end(); + MenuResult::Failed(error) + } + } + } + Err(error) => MenuUtils::handle_error(error), + } + } + Err(error) => { + progress.fail(format!("{error}")); + progress.end(); + MenuResult::Failed(error) + } + } + } + + fn display_details(node: &node::Item) { + info!(" 🖥 Node Information"); + info!(" Name: {}", node.name); + info!(" Plugin: {}", node.plugin); + if let Some(memory) = &node.memory { + info!(" Memory: {} MiB", memory); + } + if let Some(max) = &node.max { + info!(" Max Servers: {} Units", max); + } + if let Some(child) = &node.child { + info!(" Child Node: {}", child); + } + info!(" Controller Address: {}", node.ctrl_addr); + } +} diff --git a/cli/src/application/menu/connection/node/get_nodes.rs b/cli/src/application/menu/connection/node/get_nodes.rs new file mode 100644 index 00000000..d219333a --- /dev/null +++ b/cli/src/application/menu/connection/node/get_nodes.rs @@ -0,0 +1,49 @@ +use loading::Loading; +use simplelog::info; + +use crate::application::{ + menu::MenuResult, + network::EstablishedConnection, + profile::{Profile, Profiles}, +}; + +pub struct GetNodesMenu; + +impl GetNodesMenu { + pub async fn show( + profile: &mut Profile, + connection: &mut EstablishedConnection, + _profiles: &mut Profiles, + ) -> MenuResult { + let progress = Loading::default(); + progress.text(format!( + "Requesting nodes list from controller \"{}\"...", + profile.name + )); + + match connection.client.get_nodes().await { + Ok(nodes) => { + progress.success("Data retrieved successfully 👍"); + progress.end(); + Self::display_details(&nodes); + MenuResult::Success + } + Err(error) => { + progress.fail(format!("{error}")); + progress.end(); + MenuResult::Failed(error) + } + } + } + + fn display_details(nodes: &[String]) { + info!(" 🖥 Available Nodes"); + if nodes.is_empty() { + info!(" No nodes found."); + } else { + for node in nodes { + info!(" - {}", node); + } + } + } +} diff --git a/cli/src/application/menu/connection/node/mod.rs b/cli/src/application/menu/connection/node/mod.rs new file mode 100644 index 00000000..135a1f34 --- /dev/null +++ b/cli/src/application/menu/connection/node/mod.rs @@ -0,0 +1,3 @@ +pub mod create_node; +pub mod get_node; +pub mod get_nodes; diff --git a/cli/src/application/menu/connection/resource/delete_resource.rs b/cli/src/application/menu/connection/resource/delete_resource.rs index f28b6682..ad13943c 100644 --- a/cli/src/application/menu/connection/resource/delete_resource.rs +++ b/cli/src/application/menu/connection/resource/delete_resource.rs @@ -1,13 +1,13 @@ use anyhow::Result; +use inquire::InquireError; use loading::Loading; -use simplelog::debug; use crate::application::{ menu::{MenuResult, MenuUtils}, network::{ - proto::{ - resource_management::{DeleteResourceRequest, ResourceCategory}, - unit_management::SimpleUnitValue, + proto::manage::{ + resource::{Category, DelReq}, + server, }, EstablishedConnection, }, @@ -18,9 +18,9 @@ pub struct DeleteResourceMenu; // TODO: Maybe dont request everything at once, but only what is needed struct Data { - cloudlets: Vec, - deployments: Vec, - units: Vec, + nodes: Vec, + groups: Vec, + servers: Vec, } impl DeleteResourceMenu { @@ -46,79 +46,68 @@ impl DeleteResourceMenu { progress.text("Deleting resource..."); match connection.client.delete_resource(request).await { - Ok(_) => { + Ok(()) => { progress.success("Resource deleted successfully 👍."); progress.end(); MenuResult::Success } Err(error) => { - progress.fail(format!("{}", error)); + progress.fail(format!("{error}")); progress.end(); - MenuResult::Failed + MenuResult::Failed(error) } } } - Err(error) => { - debug!("{}", error); - MenuResult::Failed - } + Err(error) => MenuUtils::handle_error(error), } } Err(error) => { - progress.fail(format!("{}", error)); + progress.fail(format!("{error}")); progress.end(); - MenuResult::Failed + MenuResult::Failed(error) } } } async fn get_required_data(connection: &mut EstablishedConnection) -> Result { - let cloudlets = connection.client.get_cloudlets().await?; - let deployments = connection.client.get_deployments().await?; - let units = connection.client.get_units().await?; + let nodes = connection.client.get_nodes().await?; + let groups = connection.client.get_groups().await?; + let servers = connection.client.get_servers().await?; Ok(Data { - cloudlets, - deployments, - units, + nodes, + groups, + servers, }) } - fn collect_delete_resource(data: &Data) -> Result { + fn collect_delete_resource(data: &Data) -> Result { let category = MenuUtils::select_no_help( "What type of resource to you want to delete?", - vec![ - ResourceCategory::Cloudlet, - ResourceCategory::Deployment, - ResourceCategory::Unit, - ], + vec![Category::Node, Category::Group, Category::Server], )?; match category { - ResourceCategory::Cloudlet => { - let cloudlet = MenuUtils::select_no_help( - "Select the cloudlet to delete", - data.cloudlets.clone(), - )?; - Ok(DeleteResourceRequest { + Category::Node => { + let node = + MenuUtils::select_no_help("Select the node to delete", data.nodes.clone())?; + Ok(DelReq { category: category as i32, - id: cloudlet, + id: node, }) } - ResourceCategory::Deployment => { - let deployment = MenuUtils::select_no_help( - "Select the deployment to delete", - data.deployments.clone(), - )?; - Ok(DeleteResourceRequest { + Category::Group => { + let group = + MenuUtils::select_no_help("Select the group to delete", data.groups.clone())?; + Ok(DelReq { category: category as i32, - id: deployment, + id: group, }) } - ResourceCategory::Unit => { - let unit = - MenuUtils::select_no_help("Select the unit to delete", data.units.clone())?; - Ok(DeleteResourceRequest { + Category::Server => { + let server = + MenuUtils::select_no_help("Select the server to delete", data.servers.clone())?; + Ok(DelReq { category: category as i32, - id: unit.uuid, + id: server.id, }) } } diff --git a/cli/src/application/menu/connection/resource/mod.rs b/cli/src/application/menu/connection/resource/mod.rs index 024b03ce..0027950d 100644 --- a/cli/src/application/menu/connection/resource/mod.rs +++ b/cli/src/application/menu/connection/resource/mod.rs @@ -1,2 +1,2 @@ pub mod delete_resource; -pub mod set_resource_status; +pub mod set_resource; diff --git a/cli/src/application/menu/connection/resource/set_resource_status.rs b/cli/src/application/menu/connection/resource/set_resource.rs similarity index 50% rename from cli/src/application/menu/connection/resource/set_resource_status.rs rename to cli/src/application/menu/connection/resource/set_resource.rs index 37267708..ee082228 100644 --- a/cli/src/application/menu/connection/resource/set_resource_status.rs +++ b/cli/src/application/menu/connection/resource/set_resource.rs @@ -1,25 +1,25 @@ -use anyhow::{anyhow, Result}; +use anyhow::Result; +use inquire::InquireError; use loading::Loading; -use simplelog::debug; use crate::application::{ menu::{MenuResult, MenuUtils}, network::{ - proto::resource_management::{ResourceCategory, ResourceStatus, SetResourceStatusRequest}, + proto::manage::resource::{Category, SetReq}, EstablishedConnection, }, profile::{Profile, Profiles}, }; -pub struct SetResourceStatusMenu; +pub struct SetResourceMenu; // TODO: Maybe dont request everything at once, but only what is needed struct Data { - cloudlets: Vec, - deployments: Vec, + nodes: Vec, + groups: Vec, } -impl SetResourceStatusMenu { +impl SetResourceMenu { pub async fn show( profile: &mut Profile, connection: &mut EstablishedConnection, @@ -41,79 +41,65 @@ impl SetResourceStatusMenu { let progress = Loading::default(); progress.text("Changing resource..."); - match connection.client.set_resource_status(request).await { - Ok(_) => { + match connection.client.set_resource(request).await { + Ok(()) => { progress.success("Resource changed successfully 👍."); progress.end(); MenuResult::Success } Err(error) => { - progress.fail(format!("{}", error)); + progress.fail(format!("{error}")); progress.end(); - MenuResult::Failed + MenuResult::Failed(error) } } } - Err(error) => { - debug!("{}", error); - MenuResult::Failed - } + Err(error) => MenuUtils::handle_error(error), } } Err(error) => { - progress.fail(format!("{}", error)); + progress.fail(format!("{error}")); progress.end(); - MenuResult::Failed + MenuResult::Failed(error) } } } async fn get_required_data(connection: &mut EstablishedConnection) -> Result { - let cloudlets = connection.client.get_cloudlets().await?; - let deployments = connection.client.get_deployments().await?; - Ok(Data { - cloudlets, - deployments, - }) + let nodes = connection.client.get_nodes().await?; + let groups = connection.client.get_groups().await?; + Ok(Data { nodes, groups }) } - fn collect_set_resource_status_request(data: &Data) -> Result { - let status = MenuUtils::select_no_help( + fn collect_set_resource_status_request(data: &Data) -> Result { + let active = MenuUtils::select_no_help( "What is the new status of this resource?", - vec![ResourceStatus::Active, ResourceStatus::Inactive], + vec![true, false], )?; let category = MenuUtils::select_no_help( "What type of resource to you want to change?", - vec![ - ResourceCategory::Cloudlet, - ResourceCategory::Deployment, - ResourceCategory::Unit, - ], + vec![Category::Node, Category::Group, Category::Server], )?; match category { - ResourceCategory::Cloudlet => { - let cloudlet = MenuUtils::select_no_help( - "Select the cloudlet to change", - data.cloudlets.clone(), - )?; - Ok(SetResourceStatusRequest { + Category::Node => { + let node = + MenuUtils::select_no_help("Select the node to change", data.nodes.clone())?; + Ok(SetReq { category: category as i32, - id: cloudlet, - status: status as i32, + id: node, + active, }) } - ResourceCategory::Deployment => { - let deployment = MenuUtils::select_no_help( - "Select the deployment to change", - data.deployments.clone(), - )?; - Ok(SetResourceStatusRequest { + Category::Group => { + let group = + MenuUtils::select_no_help("Select the group to change", data.groups.clone())?; + Ok(SetReq { category: category as i32, - id: deployment, - status: status as i32, + id: group, + active, }) } - ResourceCategory::Unit => Err(anyhow!("Not implemented yet")), + Category::Server => Err(InquireError::OperationInterrupted), } } } diff --git a/cli/src/application/menu/connection/unit/get_unit.rs b/cli/src/application/menu/connection/server/get_server.rs similarity index 50% rename from cli/src/application/menu/connection/unit/get_unit.rs rename to cli/src/application/menu/connection/server/get_server.rs index b0956a53..2336fe57 100644 --- a/cli/src/application/menu/connection/unit/get_unit.rs +++ b/cli/src/application/menu/connection/server/get_server.rs @@ -1,26 +1,15 @@ -use std::fmt::Display; - use loading::Loading; use simplelog::{info, warn}; use crate::application::{ menu::{MenuResult, MenuUtils}, - network::{ - proto::unit_management::{SimpleUnitValue, UnitValue}, - EstablishedConnection, - }, + network::{proto::manage::server, EstablishedConnection}, profile::{Profile, Profiles}, }; -impl Display for SimpleUnitValue { - fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{}", self.name) - } -} +pub struct GetServerMenu; -pub struct GetUnitMenu; - -impl GetUnitMenu { +impl GetServerMenu { pub async fn show( profile: &mut Profile, connection: &mut EstablishedConnection, @@ -28,68 +17,65 @@ impl GetUnitMenu { ) -> MenuResult { let progress = Loading::default(); progress.text(format!( - "Retrieving available units from controller \"{}\"", + "Retrieving available servers from controller \"{}\"", profile.name )); - match connection.client.get_units().await { - Ok(units) => { - progress.success("Unit data retrieved successfully 👍"); + match connection.client.get_servers().await { + Ok(servers) => { + progress.success("Data retrieved successfully 👍"); progress.end(); - match MenuUtils::select_no_help("Select a unit to view more details:", units) { - Ok(unit) => { + match MenuUtils::select_no_help("Select a server to view more details:", servers) { + Ok(server) => { let progress = Loading::default(); progress.text(format!( - "Fetching details for unit \"{}\" from controller \"{}\"...", - profile.name, unit + "Fetching details for server \"{}\" from controller \"{}\"...", + profile.name, server )); - match connection.client.get_unit(&unit.uuid).await { - Ok(unit) => { - progress.success("Unit details retrieved successfully 👍"); + match connection.client.get_server(&server.id).await { + Ok(server) => { + progress.success("Details retrieved successfully 👍"); progress.end(); - Self::display_details(&unit); + Self::display_details(&server); MenuResult::Success } Err(error) => { - progress.fail(format!("{}", error)); + progress.fail(format!("{error}")); progress.end(); - MenuResult::Failed + MenuResult::Failed(error) } } } - Err(_) => MenuResult::Aborted, + Err(error) => MenuUtils::handle_error(error), } } Err(error) => { - progress.fail(format!("{}", error)); + progress.fail(format!("{error}")); progress.end(); - MenuResult::Failed + MenuResult::Failed(error) } } } - fn display_details(unit: &UnitValue) { - info!(" 🖥 Unit Info"); - info!(" Name: {}", unit.name); - info!(" UUID: {}", unit.uuid); - if let Some(deployment) = &unit.deployment { - info!(" Deployment: {}", deployment); + fn display_details(server: &server::Detail) { + info!(" 🖥 Server Info"); + info!(" Name: {}", server.name); + info!(" UUID: {}", server.id); + if let Some(group) = &server.group { + info!(" Group: {}", group); } else { - info!(" Deployment: None"); + info!(" Group: None"); } - info!(" Cloudlet: {}", unit.cloudlet); - if let Some(allocation) = &unit.allocation { + info!(" Node: {}", server.name); + if let Some(allocation) = &server.allocation { info!(" Allocation: "); info!(" Allocations: "); - for address in &allocation.addresses { - info!( - " - {}:{}", - address.host, address.port - ); + for port in &allocation.ports { + info!(" - {}:{}", port.host, port.port); } if let Some(resources) = allocation.resources { - info!(" Resources per unit: "); + info!(" Resources per server: "); info!(" Memory: {} MiB", resources.memory); info!(" Swap: {} MiB", resources.swap); info!( @@ -103,14 +89,14 @@ impl GetUnitMenu { ); info!( " Addresses/Ports: {}", - resources.addresses + resources.ports ); } else { - warn!(" Resources per unit: None"); + warn!(" Resources per server: None"); } if let Some(spec) = &allocation.spec { info!(" Specification: "); - info!(" Image: {}", spec.image); + info!(" Image: {}", spec.img); info!(" Settings: "); for setting in &spec.settings { info!( @@ -119,7 +105,7 @@ impl GetUnitMenu { ); } info!(" Environment Variables: "); - for environment in &spec.environment { + for environment in &spec.env { info!( " - {}: {}", environment.key, environment.value @@ -127,18 +113,14 @@ impl GetUnitMenu { } info!( " Disk Retention: {}", - spec.disk_retention.unwrap_or(0) + spec.retention.unwrap_or(0) ); if let Some(fallback) = spec.fallback { info!(" Fallback: "); - info!( - " Is fallback: {}", - fallback.enabled - ); - info!( - " Priority: {}", - fallback.priority - ); + info!(" Is fallback: Yes"); + info!(" Priority: {}", fallback.prio); + } else { + info!(" Fallback: None"); } } else { warn!(" Specification: None"); @@ -146,12 +128,9 @@ impl GetUnitMenu { } else { warn!(" Scaling: None"); } - info!( - " Connected Users: {}", - unit.connected_users - ); - info!(" Auth Token: {}", unit.auth_token); - info!(" State: {}", unit.state); - info!(" Rediness: {}", unit.rediness); + info!(" Connected Users: {}", server.users); + info!(" Auth Token: {}", server.token); + info!(" State: {}", server.state); + info!(" Ready: {}", server.ready); } } diff --git a/cli/src/application/menu/connection/server/get_servers.rs b/cli/src/application/menu/connection/server/get_servers.rs new file mode 100644 index 00000000..58647f00 --- /dev/null +++ b/cli/src/application/menu/connection/server/get_servers.rs @@ -0,0 +1,52 @@ +use loading::Loading; +use simplelog::info; + +use crate::application::{ + menu::MenuResult, + network::{proto::manage::server, EstablishedConnection}, + profile::{Profile, Profiles}, +}; + +pub struct GetServersMenu; + +impl GetServersMenu { + pub async fn show( + profile: &mut Profile, + connection: &mut EstablishedConnection, + _profiles: &mut Profiles, + ) -> MenuResult { + let progress = Loading::default(); + progress.text(format!( + "Requesting server list from controller \"{}\"", + profile.name + )); + + match connection.client.get_servers().await { + Ok(servers) => { + progress.success("Data retrieved successfully 👍"); + progress.end(); + Self::display_details(&servers); + MenuResult::Success + } + Err(error) => { + progress.fail(format!("{error}")); + progress.end(); + MenuResult::Failed(error) + } + } + } + + fn display_details(servers: &[server::Short]) { + info!(" 🖥 Servers"); + if servers.is_empty() { + info!(" No server found"); + } else { + for server in servers { + info!( + " - {}@{} ({})", + server.name, server.node, server.id + ); + } + } + } +} diff --git a/cli/src/application/menu/connection/server/mod.rs b/cli/src/application/menu/connection/server/mod.rs new file mode 100644 index 00000000..7aef546a --- /dev/null +++ b/cli/src/application/menu/connection/server/mod.rs @@ -0,0 +1,2 @@ +pub mod get_server; +pub mod get_servers; diff --git a/cli/src/application/menu/connection/start.rs b/cli/src/application/menu/connection/start.rs index 3b62a65e..699fc503 100644 --- a/cli/src/application/menu/connection/start.rs +++ b/cli/src/application/menu/connection/start.rs @@ -1,7 +1,5 @@ use std::fmt::Display; -use simplelog::debug; - use crate::application::{ menu::{MenuResult, MenuUtils}, network::EstablishedConnection, @@ -9,41 +7,35 @@ use crate::application::{ }; use super::{ - cloudlet::{ - create_cloudlet::CreateCloudletMenu, get_cloudlet::GetCloudletMenu, - get_cloudlets::GetCloudletsMenu, - }, - deployment::{ - create_deployment::CreateDeploymentMenu, get_deployment::GetDeploymentMenu, - get_deployments::GetDeploymentsMenu, - }, general::{get_versions::GetVersionsMenu, request_stop::RequestStopMenu}, - resource::{delete_resource::DeleteResourceMenu, set_resource_status::SetResourceStatusMenu}, - unit::{get_unit::GetUnitMenu, get_units::GetUnitsMenu}, - user::transfer_user::TransferUserMenu, + group::{create_group::CreateGroupMenu, get_group::GetGroupMenu, get_groups::GetGroupsMenu}, + node::{create_node::CreateNodeMenu, get_node::GetNodeMenu, get_nodes::GetNodesMenu}, + resource::{delete_resource::DeleteResourceMenu, set_resource::SetResourceMenu}, + server::{get_server::GetServerMenu, get_servers::GetServersMenu}, + user::transfer_users::TransferUsersMenu, }; enum Action { - // Resource Management - SetResourceStatus, + // Resource operations + SetResource, DeleteResource, - // Cloudlet Management - CreateCloudlet, - GetCloudlet, - GetCloudlets, + // Node operations + CreateNode, + GetNode, + GetNodes, - // Deployment Management - CreateDeployment, - GetDeployment, - GetDeployments, + // Group operations + CreateGroup, + GetGroup, + GetGroups, - // Unit Management - GetUnit, - GetUnits, + // Server operations + GetServer, + GetServers, - // User Management - TransferUser, + // Transfer operations + TransferUsers, // General RequestStop, @@ -56,21 +48,21 @@ enum Action { impl Display for Action { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - Action::SetResourceStatus => write!(f, "Set status of a certain Resource"), + Action::SetResource => write!(f, "Set status of a certain Resource"), Action::DeleteResource => write!(f, "Delete Resource"), - Action::CreateCloudlet => write!(f, "Create Cloudlet"), - Action::GetCloudlet => write!(f, "Get information about a certain Cloudlet"), - Action::GetCloudlets => write!(f, "Get all Cloudlets"), + Action::CreateNode => write!(f, "Create Node"), + Action::GetNode => write!(f, "Get information about a certain Node"), + Action::GetNodes => write!(f, "Get all Nodes"), - Action::CreateDeployment => write!(f, "Create Deployment"), - Action::GetDeployment => write!(f, "Get information about a certain Deployment"), - Action::GetDeployments => write!(f, "Get all Deployments"), + Action::CreateGroup => write!(f, "Create Group"), + Action::GetGroup => write!(f, "Get information about a certain Group"), + Action::GetGroups => write!(f, "Get all Groups"), - Action::GetUnit => write!(f, "Get information about a certain Unit"), - Action::GetUnits => write!(f, "Get all Units"), + Action::GetServer => write!(f, "Get information about a certain Server"), + Action::GetServers => write!(f, "Get all Servers"), - Action::TransferUser => write!(f, "Transfer a user to a different Unit"), + Action::TransferUsers => write!(f, "Transfer a users to a different Server"), Action::RequestStop => write!(f, "Request stop of Controller"), Action::GetVersions => write!(f, "Get versions"), @@ -89,8 +81,10 @@ impl ConnectionStartMenu { profiles: &mut Profiles, ) -> MenuResult { loop { - if Self::show_internal(profile, connection, profiles).await == MenuResult::Exit { - return MenuResult::Success; + match Self::show_internal(profile, connection, profiles).await { + MenuResult::Aborted | MenuResult::Exit => return MenuResult::Success, + MenuResult::Failed(error) => return MenuResult::Failed(error), + MenuResult::Success => {} } } } @@ -104,53 +98,42 @@ impl ConnectionStartMenu { "What do you want to do?", vec![ Action::RequestStop, - Action::TransferUser, - Action::SetResourceStatus, + Action::TransferUsers, + Action::SetResource, Action::DeleteResource, - Action::CreateCloudlet, - Action::CreateDeployment, - Action::GetCloudlet, - Action::GetDeployment, - Action::GetUnit, - Action::GetDeployments, - Action::GetCloudlets, - Action::GetUnits, + Action::CreateNode, + Action::CreateGroup, + Action::GetNode, + Action::GetGroup, + Action::GetServer, + Action::GetNodes, + Action::GetGroups, + Action::GetServers, Action::GetVersions, Action::DisconnectFromController, ], ) { Ok(selection) => match selection { - Action::SetResourceStatus => { - SetResourceStatusMenu::show(profile, connection, profiles).await - } + Action::SetResource => SetResourceMenu::show(profile, connection, profiles).await, Action::DeleteResource => { DeleteResourceMenu::show(profile, connection, profiles).await } - Action::CreateCloudlet => { - CreateCloudletMenu::show(profile, connection, profiles).await - } - Action::GetCloudlet => GetCloudletMenu::show(profile, connection, profiles).await, - Action::GetCloudlets => GetCloudletsMenu::show(profile, connection, profiles).await, - Action::CreateDeployment => { - CreateDeploymentMenu::show(profile, connection, profiles).await + Action::CreateNode => CreateNodeMenu::show(profile, connection, profiles).await, + Action::GetNode => GetNodeMenu::show(profile, connection, profiles).await, + Action::GetNodes => GetNodesMenu::show(profile, connection, profiles).await, + Action::CreateGroup => CreateGroupMenu::show(profile, connection, profiles).await, + Action::GetGroup => GetGroupMenu::show(profile, connection, profiles).await, + Action::GetGroups => GetGroupsMenu::show(profile, connection, profiles).await, + Action::GetServer => GetServerMenu::show(profile, connection, profiles).await, + Action::GetServers => GetServersMenu::show(profile, connection, profiles).await, + Action::TransferUsers => { + TransferUsersMenu::show(profile, connection, profiles).await } - Action::GetDeployment => { - GetDeploymentMenu::show(profile, connection, profiles).await - } - Action::GetDeployments => { - GetDeploymentsMenu::show(profile, connection, profiles).await - } - Action::GetUnit => GetUnitMenu::show(profile, connection, profiles).await, - Action::GetUnits => GetUnitsMenu::show(profile, connection, profiles).await, - Action::TransferUser => TransferUserMenu::show(profile, connection, profiles).await, Action::RequestStop => RequestStopMenu::show(profile, connection, profiles).await, Action::GetVersions => GetVersionsMenu::show(profile, connection, profiles).await, Action::DisconnectFromController => MenuResult::Exit, }, - Err(error) => { - debug!("{}", error); - MenuResult::Exit - } + Err(error) => MenuUtils::handle_error(error), } } } diff --git a/cli/src/application/menu/connection/unit/get_units.rs b/cli/src/application/menu/connection/unit/get_units.rs deleted file mode 100644 index 5d67d869..00000000 --- a/cli/src/application/menu/connection/unit/get_units.rs +++ /dev/null @@ -1,52 +0,0 @@ -use loading::Loading; -use simplelog::info; - -use crate::application::{ - menu::MenuResult, - network::{proto::unit_management::SimpleUnitValue, EstablishedConnection}, - profile::{Profile, Profiles}, -}; - -pub struct GetUnitsMenu; - -impl GetUnitsMenu { - pub async fn show( - profile: &mut Profile, - connection: &mut EstablishedConnection, - _profiles: &mut Profiles, - ) -> MenuResult { - let progress = Loading::default(); - progress.text(format!( - "Requesting unit list from controller \"{}\"", - profile.name - )); - - match connection.client.get_units().await { - Ok(units) => { - progress.success("Unit data retrieved successfully 👍"); - progress.end(); - Self::display_details(&units); - MenuResult::Success - } - Err(error) => { - progress.fail(format!("{}", error)); - progress.end(); - MenuResult::Failed - } - } - } - - fn display_details(units: &[SimpleUnitValue]) { - info!(" 🖥 Units"); - if units.is_empty() { - info!(" No units found"); - } else { - for unit in units { - info!( - " - {}@{} ({})", - unit.name, unit.cloudlet, unit.uuid - ); - } - } - } -} diff --git a/cli/src/application/menu/connection/unit/mod.rs b/cli/src/application/menu/connection/unit/mod.rs deleted file mode 100644 index 2f5ed64f..00000000 --- a/cli/src/application/menu/connection/unit/mod.rs +++ /dev/null @@ -1,2 +0,0 @@ -pub mod get_unit; -pub mod get_units; diff --git a/cli/src/application/menu/connection/user/mod.rs b/cli/src/application/menu/connection/user/mod.rs index 098c6e86..a1bd1efb 100644 --- a/cli/src/application/menu/connection/user/mod.rs +++ b/cli/src/application/menu/connection/user/mod.rs @@ -1 +1 @@ -pub mod transfer_user; +pub mod transfer_users; diff --git a/cli/src/application/menu/connection/user/transfer_user.rs b/cli/src/application/menu/connection/user/transfer_users.rs similarity index 52% rename from cli/src/application/menu/connection/user/transfer_user.rs rename to cli/src/application/menu/connection/user/transfer_users.rs index 3550041a..d97190aa 100644 --- a/cli/src/application/menu/connection/user/transfer_user.rs +++ b/cli/src/application/menu/connection/user/transfer_users.rs @@ -1,32 +1,30 @@ use anyhow::Result; +use inquire::InquireError; use loading::Loading; -use simplelog::debug; use crate::application::{ menu::{MenuResult, MenuUtils}, network::{ - proto::{ - transfer_management::{ - transfer_target_value::TargetType, TransferTargetValue, TransferUsersRequest, - }, - unit_management::SimpleUnitValue, - user_management::UserValue, + proto::manage::{ + server, + transfer::{self, target, TransferReq}, + user, }, EstablishedConnection, }, profile::{Profile, Profiles}, }; -pub struct TransferUserMenu; +pub struct TransferUsersMenu; // TODO: Maybe dont request everything at once, but only what is needed struct Data { - users: Vec, - units: Vec, - deployments: Vec, + users: Vec, + servers: Vec, + groups: Vec, } -impl TransferUserMenu { +impl TransferUsersMenu { pub async fn show( profile: &mut Profile, connection: &mut EstablishedConnection, @@ -48,90 +46,87 @@ impl TransferUserMenu { let progress = Loading::default(); progress.text(format!( "Transferring {} users to target \"{}\"...", - request.user_uuids.len(), + request.ids.len(), request.target.as_ref().unwrap() )); match connection.client.transfer_users(request).await { - Ok(_) => { + Ok(()) => { progress.success("User transferred successfully 👍."); progress.end(); MenuResult::Success } Err(error) => { - progress.fail(format!("{}", error)); + progress.fail(format!("{error}")); progress.end(); - MenuResult::Failed + MenuResult::Failed(error) } } } - Err(error) => { - debug!("{}", error); - MenuResult::Failed - } + Err(error) => MenuUtils::handle_error(error), } } Err(error) => { - progress.fail(format!("{}", error)); + progress.fail(format!("{error}")); progress.end(); - MenuResult::Failed + MenuResult::Failed(error) } } } async fn get_required_data(connection: &mut EstablishedConnection) -> Result { let users = connection.client.get_users().await?; - let units = connection.client.get_units().await?; - let deployments = connection.client.get_deployments().await?; + let servers = connection.client.get_servers().await?; + let groups = connection.client.get_groups().await?; Ok(Data { users, - units, - deployments, + servers, + groups, }) } - fn collect_transfer_request(data: &Data) -> Result { + fn collect_transfer_request(data: &Data) -> Result { let users = MenuUtils::multi_select_no_help("Select the users to transfer", data.users.clone())?; let target = Self::collect_transfer_target(data)?; - Ok(TransferUsersRequest { - user_uuids: users.iter().map(|user| user.uuid.clone()).collect(), + Ok(TransferReq { + ids: users.iter().map(|user| user.id.clone()).collect(), target: Some(target), }) } - fn collect_transfer_target(data: &Data) -> Result { + fn collect_transfer_target(data: &Data) -> Result { match MenuUtils::select_no_help( "Select the target type", vec![ - TargetType::Unit, - TargetType::Deployment, - TargetType::Fallback, + target::Type::Server, + target::Type::Group, + target::Type::Fallback, ], )? { - TargetType::Unit => { - let unit = MenuUtils::select_no_help( - "Select the unit to transfer the user to", - data.units.clone(), + target::Type::Server => { + let server = MenuUtils::select_no_help( + "Select the server to transfer the user to", + data.servers.clone(), )?; - Ok(TransferTargetValue { - target_type: TargetType::Unit as i32, - target: Some(unit.uuid), + Ok(transfer::Target { + r#type: target::Type::Server as i32, + target: Some(server.id), }) } - TargetType::Deployment => { - let deployment = MenuUtils::select_no_help( - "Select the deployment to transfer the user to", - data.deployments.clone(), + target::Type::Group => { + let group = MenuUtils::select_no_help( + "Select the group to transfer the user to", + data.groups.clone(), )?; - Ok(TransferTargetValue { - target_type: TargetType::Deployment as i32, - target: Some(deployment), + Ok(transfer::Target { + r#type: target::Type::Group as i32, + target: Some(group), }) } - TargetType::Fallback => Ok(TransferTargetValue { - target_type: TargetType::Fallback as i32, + target::Type::Fallback => Ok(transfer::Target { + r#type: target::Type::Fallback as i32, target: None, }), } diff --git a/cli/src/application/menu/create_profile.rs b/cli/src/application/menu/create_profile.rs index 1b9bcf96..547d6e02 100644 --- a/cli/src/application/menu/create_profile.rs +++ b/cli/src/application/menu/create_profile.rs @@ -3,7 +3,6 @@ use inquire::{ Password, Text, }; use loading::Loading; -use simplelog::debug; use crate::{ application::profile::{Profile, Profiles}, @@ -33,10 +32,7 @@ impl CreateProfileMenu { } let name = match prompt.prompt() { Ok(name) => name, - Err(error) => { - debug!("{}", error); - return MenuResult::Aborted; - } + Err(error) => return MenuUtils::handle_error(error), }; let authorization = match Password::new("What is the authorization token for this profile?") @@ -47,10 +43,7 @@ impl CreateProfileMenu { .prompt() { Ok(authorization) => authorization, - Err(error) => { - debug!("{}", error); - return MenuResult::Aborted; - } + Err(error) => return MenuUtils::handle_error(error), }; let url = match MenuUtils::parsed_value( @@ -59,10 +52,7 @@ impl CreateProfileMenu { "Please enter a valid URL", ) { Ok(url) => url, - Err(error) => { - debug!("{}", error); - return MenuResult::Aborted; - } + Err(error) => return MenuUtils::handle_error(error), }; let progress = Loading::default(); @@ -78,16 +68,16 @@ impl CreateProfileMenu { } } Err(error) => { - progress.fail(format!("Failed to connect to the controller: {}", error)); + progress.fail(format!("Failed to connect to the controller: {error}")); progress.end(); - return MenuResult::Failed; + return MenuResult::Failed(error); } } - progress.text(format!("Saving profile \"{}\"", name)); - if let Err(error) = profiles.create_profile(&profile) { - progress.fail(format!("Failed to create profile: {}", error)); + progress.text(format!("Saving profile \"{name}\"")); + if let Err(error) = profiles.create_profile(&profile).await { + progress.fail(format!("Failed to create profile: {error}")); progress.end(); - return MenuResult::Failed; + return MenuResult::Failed(error); } progress.success("Profile created successfully"); progress.end(); diff --git a/cli/src/application/menu/delete_profile.rs b/cli/src/application/menu/delete_profile.rs index 83311d9e..1624a3b7 100644 --- a/cli/src/application/menu/delete_profile.rs +++ b/cli/src/application/menu/delete_profile.rs @@ -1,5 +1,4 @@ use loading::Loading; -use simplelog::debug; use crate::application::profile::Profiles; @@ -8,7 +7,7 @@ use super::{MenuResult, MenuUtils}; pub struct DeleteProfileMenu; impl DeleteProfileMenu { - pub async fn show(profiles: &mut Profiles) -> MenuResult { + pub fn show(profiles: &mut Profiles) -> MenuResult { let options = profiles.profiles.clone(); match MenuUtils::select_no_help("What profile/controller do you want to delete?", options) { Ok(profile) => match MenuUtils::confirm("Do you really want to delete this profile?") { @@ -16,27 +15,24 @@ impl DeleteProfileMenu { let progress = Loading::default(); progress.text(format!("Deleting profile \"{}\"", profile.name)); match profiles.delete_profile(&profile) { - Ok(_) => { + Ok(()) => { progress.success("Profile deleted successfully"); progress.end(); MenuResult::Success } - Err(err) => { + Err(error) => { progress.fail(format!( - "Ops. Something went wrong while deleting the profile | {}", - err + "Ops. Something went wrong while deleting the profile | {error}" )); progress.end(); - MenuResult::Failed + MenuResult::Failed(error) } } } - Ok(false) | Err(_) => MenuResult::Aborted, + Ok(false) => MenuResult::Aborted, + Err(error) => MenuUtils::handle_error(error), }, - Err(err) => { - debug!("{}", err); - MenuResult::Aborted - } + Err(error) => MenuUtils::handle_error(error), } } } diff --git a/cli/src/application/menu/load_profile.rs b/cli/src/application/menu/load_profile.rs index 260097c5..28e88997 100644 --- a/cli/src/application/menu/load_profile.rs +++ b/cli/src/application/menu/load_profile.rs @@ -1,5 +1,3 @@ -use simplelog::debug; - use crate::application::profile::Profiles; use super::{connection::ConnectionMenu, MenuResult, MenuUtils}; @@ -14,10 +12,7 @@ impl LoadProfileMenu { options, ) { Ok(profile) => ConnectionMenu::show(profile, profiles).await, - Err(error) => { - debug!("{}", error); - MenuResult::Aborted - } + Err(error) => MenuUtils::handle_error(error), } } } diff --git a/cli/src/application/menu/start.rs b/cli/src/application/menu/start.rs index 125800aa..0f101d63 100644 --- a/cli/src/application/menu/start.rs +++ b/cli/src/application/menu/start.rs @@ -3,8 +3,6 @@ use std::{ vec, }; -use simplelog::debug; - use crate::application::profile::Profiles; use super::{ @@ -22,10 +20,10 @@ enum Selection { impl Display for Selection { fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result { match self { - Selection::LoadProfile => write!(f, "🖧 | Connect to existing controller"), - Selection::CreateProfile => write!(f, "+ | Add new controller"), - Selection::DeleteProfile => write!(f, "🗑 | Remove existing controller"), - Selection::Exit => write!(f, "✖ | Close application"), + Selection::LoadProfile => write!(f, "Connect to existing controller"), + Selection::CreateProfile => write!(f, "Add new controller"), + Selection::DeleteProfile => write!(f, "Remove existing controller"), + Selection::Exit => write!(f, "Close application"), } } } @@ -50,13 +48,10 @@ impl StartMenu { Ok(selection) => match selection { Selection::LoadProfile => LoadProfileMenu::show(profiles).await, Selection::CreateProfile => CreateProfileMenu::show(profiles).await, - Selection::DeleteProfile => DeleteProfileMenu::show(profiles).await, + Selection::DeleteProfile => DeleteProfileMenu::show(profiles), Selection::Exit => MenuResult::Exit, }, - Err(error) => { - debug!("{}", error); - MenuResult::Exit - } + Err(error) => MenuUtils::handle_error(error), } } } diff --git a/cli/src/application/network.rs b/cli/src/application/network.rs index db3b21f7..56b70a41 100644 --- a/cli/src/application/network.rs +++ b/cli/src/application/network.rs @@ -1,18 +1,14 @@ use std::fmt::Display; use anyhow::Result; -use proto::{ - admin_service_client::AdminServiceClient, - cloudlet_management::CloudletValue, - deployment_management::DeploymentValue, - resource_management::{ - DeleteResourceRequest, ResourceCategory, ResourceStatus, SetResourceStatusRequest, - }, - transfer_management::{ - transfer_target_value::TargetType, TransferTargetValue, TransferUsersRequest, - }, - unit_management::{unit_spec::Retention, SimpleUnitValue, UnitValue}, - user_management::UserValue, +use proto::manage::{ + group, + manage_service_client::ManageServiceClient, + node, + resource::{self, DelReq, SetReq}, + server::{self, DiskRetention}, + transfer::{self, target, TransferReq}, + user, }; use simplelog::warn; use tonic::{transport::Channel, Request}; @@ -22,11 +18,20 @@ use crate::VERSION; use super::profile::Profile; -#[allow(clippy::all)] pub mod proto { - use tonic::include_proto; + pub mod manage { + #![allow(clippy::all, clippy::pedantic)] + use tonic::include_proto; - include_proto!("admin"); + include_proto!("manage"); + } + + pub mod common { + #![allow(clippy::all, clippy::pedantic)] + use tonic::include_proto; + + include_proto!("common"); + } } pub struct EstablishedConnection { @@ -44,7 +49,7 @@ pub struct CloudConnection { //tls_config: Option, /* Client */ - client: Option>, + client: Option>, } impl CloudConnection { @@ -64,7 +69,7 @@ impl CloudConnection { let mut client = Self::from_profile(profile); client.connect().await?; - let protocol = match client.get_protocol_version().await { + let protocol = match client.get_proto_ver().await { Ok(version) => version, Err(error) => { warn!("⚠ Failed to get protocol version: {}", error); @@ -80,7 +85,7 @@ impl CloudConnection { } pub async fn connect(&mut self) -> Result<()> { - self.client = Some(AdminServiceClient::connect(self.address.to_string()).await?); + self.client = Some(ManageServiceClient::connect(self.address.to_string()).await?); Ok(()) } @@ -91,17 +96,13 @@ impl CloudConnection { Ok(()) } - pub async fn set_resource_status(&mut self, request: SetResourceStatusRequest) -> Result<()> { + pub async fn set_resource(&mut self, request: SetReq) -> Result<()> { let request = self.create_request(request); - self.client - .as_mut() - .unwrap() - .set_resource_status(request) - .await?; + self.client.as_mut().unwrap().set_resource(request).await?; Ok(()) } - pub async fn delete_resource(&mut self, request: DeleteResourceRequest) -> Result<()> { + pub async fn delete_resource(&mut self, request: DelReq) -> Result<()> { let request = self.create_request(request); self.client .as_mut() @@ -111,149 +112,141 @@ impl CloudConnection { Ok(()) } - pub async fn get_drivers(&mut self) -> Result> { + pub async fn get_plugins(&mut self) -> Result> { let request = self.create_request(()); Ok(self .client .as_mut() .unwrap() - .get_drivers(request) + .get_plugins(request) .await? .into_inner() - .drivers) + .plugins) } - pub async fn create_cloudlet(&mut self, cloudlet: CloudletValue) -> Result<()> { - let request = self.create_request(cloudlet); - self.client - .as_mut() - .unwrap() - .create_cloudlet(request) - .await?; + pub async fn create_node(&mut self, node: node::Item) -> Result<()> { + let request = self.create_request(node); + self.client.as_mut().unwrap().create_node(request).await?; Ok(()) } - pub async fn get_cloudlet(&mut self, name: &str) -> Result { + pub async fn get_node(&mut self, name: &str) -> Result { let request = self.create_request(name.to_string()); Ok(self .client .as_mut() .unwrap() - .get_cloudlet(request) + .get_node(request) .await? .into_inner()) } - pub async fn get_cloudlets(&mut self) -> Result> { + pub async fn get_nodes(&mut self) -> Result> { let request = self.create_request(()); Ok(self .client .as_mut() .unwrap() - .get_cloudlets(request) + .get_nodes(request) .await? .into_inner() - .cloudlets) + .nodes) } - pub async fn create_deployment(&mut self, deployment: DeploymentValue) -> Result<()> { - let request = self.create_request(deployment); - self.client - .as_mut() - .unwrap() - .create_deployment(request) - .await?; + pub async fn create_group(&mut self, group: group::Item) -> Result<()> { + let request = self.create_request(group); + self.client.as_mut().unwrap().create_group(request).await?; Ok(()) } - pub async fn get_deployment(&mut self, name: &str) -> Result { + pub async fn get_group(&mut self, name: &str) -> Result { let request = self.create_request(name.to_string()); Ok(self .client .as_mut() .unwrap() - .get_deployment(request) + .get_group(request) .await? .into_inner()) } - pub async fn get_deployments(&mut self) -> Result> { + pub async fn get_groups(&mut self) -> Result> { let request = self.create_request(()); Ok(self .client .as_mut() .unwrap() - .get_deployments(request) + .get_groups(request) .await? .into_inner() - .deployments) + .groups) } - pub async fn get_unit(&mut self, uuid: &str) -> Result { + pub async fn get_server(&mut self, uuid: &str) -> Result { let request = self.create_request(uuid.to_string()); Ok(self .client .as_mut() .unwrap() - .get_unit(request) + .get_server(request) .await? .into_inner()) } - pub async fn get_units(&mut self) -> Result> { + pub async fn get_servers(&mut self) -> Result> { let request = self.create_request(()); Ok(self .client .as_mut() .unwrap() - .get_units(request) + .get_servers(request) .await? .into_inner() - .units) + .servers) } - pub async fn get_protocol_version(&mut self) -> Result { + pub async fn get_users(&mut self) -> Result> { let request = self.create_request(()); Ok(self .client .as_mut() .unwrap() - .get_protocol_version(request) + .get_users(request) .await? - .into_inner()) + .into_inner() + .users) + } + + pub async fn transfer_users(&mut self, request: TransferReq) -> Result<()> { + let request = self.create_request(request); + self.client + .as_mut() + .unwrap() + .transfer_users(request) + .await?; + Ok(()) } - pub async fn get_controller_version(&mut self) -> Result { + pub async fn get_proto_ver(&mut self) -> Result { let request = self.create_request(()); Ok(self .client .as_mut() .unwrap() - .get_controller_version(request) + .get_proto_ver(request) .await? .into_inner()) } - pub async fn get_users(&mut self) -> Result> { + pub async fn get_ctrl_ver(&mut self) -> Result { let request = self.create_request(()); Ok(self .client .as_mut() .unwrap() - .get_users(request) + .get_ctrl_ver(request) .await? - .into_inner() - .users) - } - - pub async fn transfer_users(&mut self, request: TransferUsersRequest) -> Result<()> { - let request = self.create_request(request); - self.client - .as_mut() - .unwrap() - .transfer_users(request) - .await?; - Ok(()) + .into_inner()) } fn create_request(&self, data: T) -> Request { @@ -268,58 +261,63 @@ impl CloudConnection { } } -impl Display for Retention { +impl Display for DiskRetention { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - Retention::Temporary => write!(f, "Temporary"), - Retention::Permanent => write!(f, "Permanent"), + DiskRetention::Temporary => write!(f, "Temporary"), + DiskRetention::Permanent => write!(f, "Permanent"), } } } -impl Display for UserValue { +impl Display for user::Item { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(f, "{} ({})", self.name, self.uuid) + write!(f, "{} ({})", self.name, self.id) } } -impl Display for TargetType { +impl Display for server::Short { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - TargetType::Unit => write!(f, "Unit"), - TargetType::Deployment => write!(f, "Deployment"), - TargetType::Fallback => write!(f, "Fallback"), - } + write!(f, "{} ({})", self.name, self.id) } } -impl Display for TransferTargetValue { +impl Display for target::Type { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match TargetType::try_from(self.target_type) - .expect("There is something wrong with the target type") - { - TargetType::Unit => write!(f, "Unit ({})", self.target.as_ref().unwrap()), - TargetType::Deployment => write!(f, "Deployment ({})", self.target.as_ref().unwrap()), - TargetType::Fallback => write!(f, "Fallback"), + match self { + target::Type::Server => write!(f, "Server"), + target::Type::Group => write!(f, "Group"), + target::Type::Fallback => write!(f, "Fallback"), } } } -impl Display for ResourceCategory { +impl Display for transfer::Target { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - match self { - ResourceCategory::Cloudlet => write!(f, "Cloudlet"), - ResourceCategory::Deployment => write!(f, "Deployment"), - ResourceCategory::Unit => write!(f, "Unit"), + match target::Type::try_from(self.r#type) + .expect("There is something wrong with the target type") + { + target::Type::Server => write!( + f, + "Server ({})", + self.target.as_ref().unwrap_or(&String::from("None")) + ), + target::Type::Group => write!( + f, + "Group ({})", + self.target.as_ref().unwrap_or(&String::from("None")) + ), + target::Type::Fallback => write!(f, "Fallback"), } } } -impl Display for ResourceStatus { +impl Display for resource::Category { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { - ResourceStatus::Active => write!(f, "Active"), - ResourceStatus::Inactive => write!(f, "Inactive"), + resource::Category::Node => write!(f, "Node"), + resource::Category::Group => write!(f, "Group"), + resource::Category::Server => write!(f, "Server"), } } } diff --git a/cli/src/application/profile.rs b/cli/src/application/profile.rs index 8de8ffb3..b911a62a 100644 --- a/cli/src/application/profile.rs +++ b/cli/src/application/profile.rs @@ -4,12 +4,11 @@ use std::{ }; use anyhow::{anyhow, Result}; -use common::config::{LoadFromTomlFile, SaveToTomlFile}; -use loading::Loading; +use simplelog::debug; use stored::StoredProfile; use url::Url; -use crate::storage::Storage; +use crate::storage::{SaveToTomlFile, Storage}; use super::network::{CloudConnection, EstablishedConnection}; @@ -18,87 +17,37 @@ pub struct Profiles { } impl Profiles { - fn new() -> Self { - Profiles { profiles: vec![] } - } + pub async fn init() -> Result { + debug!("Loading profiles..."); + let mut profiles = vec![]; - pub fn load_all() -> Self { - let progress = Loading::default(); - progress.text("Loading profiles"); - - let mut profiles = Self::new(); - let profiles_directory = Storage::get_profiles_folder(); - if !profiles_directory.exists() { - if let Err(error) = fs::create_dir_all(&profiles_directory) { - progress.warn(format!( - "Failed to create deployments directory: {}", - &error - )); - return profiles; - } + let directory = Storage::profiles_folder(); + if !directory.exists() { + fs::create_dir_all(&directory)?; } - let entries = match fs::read_dir(&profiles_directory) { - Ok(entries) => entries, - Err(error) => { - progress.warn(format!("Failed to read deployments directory: {}", &error)); - return profiles; - } - }; - - for entry in entries { - let entry = match entry { - Ok(entry) => entry, - Err(error) => { - progress.warn(format!( - "Failed to read entry in profiles directory: {}", - &error - )); - continue; - } - }; - - let path = entry.path(); - if path.is_dir() { - continue; - } - - let id: String = match path.file_stem() { - Some(name) => name.to_string_lossy().to_string(), - None => continue, - }; - - let profile = match StoredProfile::load_from_file(&path) { - Ok(profile) => profile, - Err(error) => { - progress.warn(format!( - "Failed to load profile from file '{}': {}", - path.display(), - &error - )); - continue; - } - }; - - progress.text(format!("Loading profile '{}'", id)); - let profile = Profile::from(&id, &profile); - - profiles.add_profile(profile); + for (_, _, name, value) in Storage::for_each_content_toml::( + &directory, + "Failed to read profile from file", + ) + .await? + { + debug!("Loaded profile {}", name); + profiles.push(Profile::from(&name, &value)); } - progress.success(format!("Loaded {} profile(s)", profiles.profiles.len())); - progress.end(); - profiles + debug!("Loaded {} profile(s)", profiles.len()); + Ok(Self { profiles }) } - pub fn create_profile(&mut self, profile: &Profile) -> Result<()> { + pub async fn create_profile(&mut self, profile: &Profile) -> Result<()> { // Check if profile already exists if Self::is_id_used(&self.profiles, &profile.id) { return Err(anyhow!("Profile '{}' already exists", profile.name)); } let profile = profile.clone(); - profile.mark_dirty()?; + profile.mark_dirty().await?; self.add_profile(profile); Ok(()) } @@ -159,25 +108,27 @@ impl Profile { CloudConnection::establish_connection(self).await } - pub fn mark_dirty(&self) -> Result<()> { - self.save_to_file() + pub async fn mark_dirty(&self) -> Result<()> { + self.save_to_file().await } fn delete_file(&self) -> Result<()> { - let file_path = Storage::get_profile_file(&self.id); + let file_path = Storage::profile_file(&self.id); if file_path.exists() { fs::remove_file(file_path)?; } Ok(()) } - fn save_to_file(&self) -> Result<()> { + async fn save_to_file(&self) -> Result<()> { let stored_profile = StoredProfile { name: self.name.clone(), authorization: self.authorization.clone(), url: self.url.clone(), }; - stored_profile.save_to_file(&Storage::get_profile_file(&self.id), true) + stored_profile + .save(&Storage::profile_file(&self.id), true) + .await } pub fn compute_id(name: &str) -> String { @@ -200,10 +151,11 @@ impl Display for Profile { } mod stored { - use common::config::{LoadFromTomlFile, SaveToTomlFile}; use serde::{Deserialize, Serialize}; use url::Url; + use crate::storage::{LoadFromTomlFile, SaveToTomlFile}; + #[derive(Serialize, Deserialize)] pub struct StoredProfile { /* Settings */ diff --git a/cli/src/args.rs b/cli/src/args.rs deleted file mode 100644 index b141bad2..00000000 --- a/cli/src/args.rs +++ /dev/null @@ -1,7 +0,0 @@ -use clap::{ArgAction, Parser}; - -#[derive(Parser)] -pub struct Args { - #[clap(short, long, help = "Enable debug mode", action = ArgAction::SetTrue)] - pub debug: bool, -} diff --git a/cli/src/main.rs b/cli/src/main.rs index e4ef7e2e..8c215237 100644 --- a/cli/src/main.rs +++ b/cli/src/main.rs @@ -1,11 +1,12 @@ +#![warn(clippy::all, clippy::pedantic)] + +use anyhow::Result; use application::Cli; -use args::Args; -use clap::Parser; -use common::init::CloudInit; +use clap::{ArgAction, Parser}; +use common::{error::FancyError, init::CloudInit}; use storage::Storage; mod application; -mod args; mod storage; // Include the build information generated by build.rs @@ -15,10 +16,22 @@ pub const AUTHORS: [&str; 1] = ["HttpRafa"]; #[tokio::main] async fn main() { - let args = Args::parse(); - CloudInit::init_logging(args.debug, true, Storage::get_latest_log_file()); - CloudInit::print_ascii_art("Atomic Cloud CLI", &VERSION, &AUTHORS); + async fn run() -> Result<()> { + let args = Arguments::parse(); + CloudInit::init_logging(args.debug, true, Storage::latest_log_file()); + CloudInit::print_ascii_art("Atomic Cloud CLI", &VERSION, &AUTHORS); + + let mut cli = Cli::new().await?; + cli.start().await + } + + if let Err(error) = run().await { + FancyError::print_fancy(&error, true); + } +} - let mut cli = Cli::new().await; - cli.start().await +#[derive(Parser)] +pub struct Arguments { + #[clap(short, long, help = "Enable debug mode", action = ArgAction::SetTrue)] + pub debug: bool, } diff --git a/cli/src/storage.rs b/cli/src/storage.rs index f0f9d180..40a0958e 100644 --- a/cli/src/storage.rs +++ b/cli/src/storage.rs @@ -3,10 +3,15 @@ All the storage related functions are implemented here. This makes it easier to change them in the future */ -use std::path::PathBuf; +use std::path::{Path, PathBuf}; + +use anyhow::Result; +use serde::{de::DeserializeOwned, Serialize}; +use simplelog::warn; +use tokio::fs; /* Cli */ -const CLI_DIRECTORY: &str = "atomic-cli"; +const CLI_DIRECTORY: &str = "cli"; /* LOGS */ const LOGS_DIRECTORY: &str = "logs"; @@ -19,24 +24,77 @@ pub struct Storage; impl Storage { /* Base */ - pub fn get_cli_folder() -> PathBuf { + pub fn cli_folder() -> PathBuf { dirs::config_dir() .expect("Failed to get config directory for current user") .join(CLI_DIRECTORY) } /* Logs */ - pub fn get_latest_log_file() -> PathBuf { - Storage::get_cli_folder() + pub fn latest_log_file() -> PathBuf { + Storage::cli_folder() .join(LOGS_DIRECTORY) .join(LATEST_LOG_FILE) } /* Profiles */ - pub fn get_profiles_folder() -> PathBuf { - Storage::get_cli_folder().join(PROFILES_DIRECTORY) + pub fn profiles_folder() -> PathBuf { + Storage::cli_folder().join(PROFILES_DIRECTORY) + } + pub fn profile_file(name: &str) -> PathBuf { + Storage::profiles_folder().join(format!("{name}.toml")) } - pub fn get_profile_file(name: &str) -> PathBuf { - Storage::get_profiles_folder().join(format!("{}.toml", name)) + + pub async fn for_each_content_toml( + path: &Path, + error_message: &str, + ) -> Result> { + let mut result = Vec::new(); + let mut directory = fs::read_dir(path).await?; + while let Some(entry) = directory.next_entry().await? { + if entry.path().is_dir() { + continue; + } + match T::from_file(&entry.path()).await { + Ok(value) => { + let path = entry.path(); + match (path.file_name(), path.file_stem()) { + (Some(name), Some(stem)) => result.push(( + path.clone(), + name.to_string_lossy().to_string(), + stem.to_string_lossy().to_string(), + value, + )), + _ => { + warn!("Failed to read file names: {:?}", path); + } + } + } + Err(error) => { + warn!("{}@{:?}: {:?}", error_message, entry.path(), error); + } + } + } + Ok(result) + } +} + +pub trait SaveToTomlFile: Serialize { + async fn save(&self, path: &Path, create_parent: bool) -> Result<()> { + if create_parent { + if let Some(parent) = path.parent() { + fs::create_dir_all(parent).await?; + } + } + fs::write(path, toml::to_string(self)?).await?; + Ok(()) + } +} + +pub trait LoadFromTomlFile: DeserializeOwned { + async fn from_file(path: &Path) -> Result { + let data = fs::read_to_string(path).await?; + let config = toml::from_str(&data)?; + Ok(config) } } diff --git a/clients/jvm/api/src/main/java/io/atomic/cloud/api/objects/CloudDeployment.java b/clients/jvm/api/src/main/java/io/atomic/cloud/api/objects/CloudGroup.java similarity index 63% rename from clients/jvm/api/src/main/java/io/atomic/cloud/api/objects/CloudDeployment.java rename to clients/jvm/api/src/main/java/io/atomic/cloud/api/objects/CloudGroup.java index 5de36fc3..90d28a77 100644 --- a/clients/jvm/api/src/main/java/io/atomic/cloud/api/objects/CloudDeployment.java +++ b/clients/jvm/api/src/main/java/io/atomic/cloud/api/objects/CloudGroup.java @@ -1,6 +1,6 @@ package io.atomic.cloud.api.objects; -public interface CloudDeployment { +public interface CloudGroup { String name(); } diff --git a/clients/jvm/api/src/main/java/io/atomic/cloud/api/objects/CloudUnit.java b/clients/jvm/api/src/main/java/io/atomic/cloud/api/objects/CloudServer.java similarity index 76% rename from clients/jvm/api/src/main/java/io/atomic/cloud/api/objects/CloudUnit.java rename to clients/jvm/api/src/main/java/io/atomic/cloud/api/objects/CloudServer.java index 606922ba..873cd7ae 100644 --- a/clients/jvm/api/src/main/java/io/atomic/cloud/api/objects/CloudUnit.java +++ b/clients/jvm/api/src/main/java/io/atomic/cloud/api/objects/CloudServer.java @@ -2,7 +2,7 @@ import java.util.UUID; -public interface CloudUnit { +public interface CloudServer { String name(); diff --git a/clients/jvm/api/src/main/java/io/atomic/cloud/api/objects/LocalCloudUnit.java b/clients/jvm/api/src/main/java/io/atomic/cloud/api/objects/LocalCloudUnit.java index 5c7ec3eb..6880c68f 100644 --- a/clients/jvm/api/src/main/java/io/atomic/cloud/api/objects/LocalCloudUnit.java +++ b/clients/jvm/api/src/main/java/io/atomic/cloud/api/objects/LocalCloudUnit.java @@ -5,24 +5,18 @@ public interface LocalCloudUnit { /** - * Shut down this unit instance. - * This will stop the unit and transfer all players to a different unit. - * How the unit is shutdown depends on the disk retention policy. - * If the unit is marked as permanent, it will not be deleted. - * If the unit is not marked as permanent, it will be killed and deleted. - * @return a future to be completed once the unit has been shut down + * Shut down this server instance. + * This will stop the server and transfer all players to a different server. + * How the server is shutdown depends on the disk retention policy. + * If the server is marked as permanent, it will not be deleted. + * If the server is not marked as permanent, it will be killed and deleted. + * @return a future to be completed once the server has been shut down */ CompletableFuture shutdown(); /** - * Mark this unit as ready - * @return a future to be completed once the unit has been marked as ready + * Mark this server as ready/not ready + * @return a future to be completed once the server has been marked as ready/not ready */ - CompletableFuture markReady(); - - /** - * Mark this unit as not ready - * @return a future to be completed once the unit has been marked as not ready - */ - CompletableFuture markNotReady(); + CompletableFuture setReady(boolean ready); } diff --git a/clients/jvm/api/src/main/java/io/atomic/cloud/api/transfer/Transfers.java b/clients/jvm/api/src/main/java/io/atomic/cloud/api/transfer/Transfers.java index 97a2e243..555fffe6 100644 --- a/clients/jvm/api/src/main/java/io/atomic/cloud/api/transfer/Transfers.java +++ b/clients/jvm/api/src/main/java/io/atomic/cloud/api/transfer/Transfers.java @@ -1,31 +1,31 @@ package io.atomic.cloud.api.transfer; -import io.atomic.cloud.api.objects.CloudDeployment; -import io.atomic.cloud.api.objects.CloudUnit; +import io.atomic.cloud.api.objects.CloudGroup; +import io.atomic.cloud.api.objects.CloudServer; import java.util.UUID; import java.util.concurrent.CompletableFuture; public interface Transfers { /** - * Sends a request to the controller to transfer the specified users to a new unit. - * @param unit The target unit to which the users should be transferred. - * @param userUUID A list of user UUIDs to transfer. These users must belong to the current unit; otherwise, the controller will return an error. + * Sends a request to the controller to transfer the specified users to a new server. + * @param server The target server to which the users should be transferred. + * @param userUUID A list of user UUIDs to transfer. These users must belong to the current server; otherwise, the controller will return an error. * @return The number of users successfully transferred. */ - CompletableFuture transferUsersToUnit(CloudUnit unit, UUID... userUUID); + CompletableFuture transferUsersToServer(CloudServer server, UUID... userUUID); /** - * Sends a request to the controller to transfer the specified users to a new unit on specific deployment. - * @param deployment The target deployment to which the users should be transferred. - * @param userUUID A list of user UUIDs to transfer. These users must belong to the current unit; otherwise, the controller will return an error. + * Sends a request to the controller to transfer the specified users to a new server on specific group. + * @param group The target group to which the users should be transferred. + * @param userUUID A list of user UUIDs to transfer. These users must belong to the current server; otherwise, the controller will return an error. * @return The number of users successfully transferred. */ - CompletableFuture transferUsersToDeployment(CloudDeployment deployment, UUID... userUUID); + CompletableFuture transferUsersToGroup(CloudGroup group, UUID... userUUID); /** - * Sends a request to the controller to transfer the specified users to a new unit marked as fallback. - * @param userUUID A list of user UUIDs to transfer. These users must belong to the current unit; otherwise, the controller will return an error. + * Sends a request to the controller to transfer the specified users to a new server marked as fallback. + * @param userUUID A list of user UUIDs to transfer. These users must belong to the current server; otherwise, the controller will return an error. * @return The number of users successfully transferred. */ CompletableFuture transferUsersToFallback(UUID... userUUID); diff --git a/clients/jvm/build.gradle.kts b/clients/jvm/build.gradle.kts index 021445be..0c6f6775 100644 --- a/clients/jvm/build.gradle.kts +++ b/clients/jvm/build.gradle.kts @@ -25,7 +25,7 @@ allprojects { implementation("com.google.protobuf:protobuf-java:${project.properties["protobuf_version"]}") runtimeOnly("io.grpc:grpc-netty-shaded:${project.properties["grpc_version"]}") - // Jetbrains annotations + // JetBrains annotations compileOnly("org.jetbrains:annotations:${project.properties["jetbrains_annotations_version"]}") // Lombok @@ -80,7 +80,7 @@ allprojects { sourceSets { main { proto { - srcDir("$rootDir/../../protocol/grpc/unit/") + srcDir("$rootDir/../../protocol/grpc/") } } } diff --git a/clients/jvm/common/src/main/java/io/atomic/cloud/common/connection/CloudConnection.java b/clients/jvm/common/src/main/java/io/atomic/cloud/common/connection/CloudConnection.java index ccba7b2c..80120a42 100644 --- a/clients/jvm/common/src/main/java/io/atomic/cloud/common/connection/CloudConnection.java +++ b/clients/jvm/common/src/main/java/io/atomic/cloud/common/connection/CloudConnection.java @@ -1,10 +1,11 @@ package io.atomic.cloud.common.connection; +import com.google.protobuf.BoolValue; import com.google.protobuf.Empty; import com.google.protobuf.StringValue; import com.google.protobuf.UInt32Value; import io.atomic.cloud.common.cache.CachedObject; -import io.atomic.cloud.grpc.unit.*; +import io.atomic.cloud.grpc.client.*; import io.grpc.CallCredentials; import io.grpc.ManagedChannelBuilder; import io.grpc.Metadata; @@ -27,13 +28,13 @@ public class CloudConnection { private final URL address; private final String token; - private UnitServiceGrpc.UnitServiceStub client; + private ClientServiceGrpc.ClientServiceStub client; // Cache values private final CachedObject protocolVersion = new CachedObject<>(); private final CachedObject controllerVersion = new CachedObject<>(); - private final CachedObject unitsInfo = new CachedObject<>(); - private final CachedObject deploymentsInfo = new CachedObject<>(); + private final CachedObject serversInfo = new CachedObject<>(); + private final CachedObject groupsInfo = new CachedObject<>(); public void connect() { var channel = ManagedChannelBuilder.forAddress(this.address.getHost(), this.address.getPort()); @@ -43,7 +44,7 @@ public void connect() { channel.usePlaintext(); } - this.client = UnitServiceGrpc.newStub(channel.build()).withCallCredentials(new CallCredentials() { + this.client = ClientServiceGrpc.newStub(channel.build()).withCallCredentials(new CallCredentials() { @Override public void applyRequestMetadata(RequestInfo requestInfo, Executor executor, MetadataApplier applier) { var metadata = new Metadata(); @@ -53,27 +54,21 @@ public void applyRequestMetadata(RequestInfo requestInfo, Executor executor, Met }); } - public CompletableFuture beatHeart() { + public CompletableFuture beat() { var observer = new StreamObserverImpl(); - this.client.beatHeart(Empty.getDefaultInstance(), observer); + this.client.beat(Empty.getDefaultInstance(), observer); return observer.future(); } - public CompletableFuture markReady() { + public CompletableFuture setReady(boolean ready) { var observer = new StreamObserverImpl(); - this.client.markReady(Empty.getDefaultInstance(), observer); + this.client.setReady(BoolValue.of(ready), observer); return observer.future(); } - public CompletableFuture markNotReady() { + public CompletableFuture setRunning() { var observer = new StreamObserverImpl(); - this.client.markNotReady(Empty.getDefaultInstance(), observer); - return observer.future(); - } - - public CompletableFuture markRunning() { - var observer = new StreamObserverImpl(); - this.client.markRunning(Empty.getDefaultInstance(), observer); + this.client.setRunning(Empty.getDefaultInstance(), observer); return observer.future(); } @@ -83,112 +78,100 @@ public CompletableFuture requestStop() { return observer.future(); } - public CompletableFuture userConnected(UserManagement.UserConnectedRequest user) { + public CompletableFuture userConnected(User.ConnectedReq user) { var observer = new StreamObserverImpl(); this.client.userConnected(user, observer); return observer.future(); } - public CompletableFuture userDisconnected(UserManagement.UserDisconnectedRequest user) { + public CompletableFuture userDisconnected(User.DisconnectedReq user) { var observer = new StreamObserverImpl(); this.client.userDisconnected(user, observer); return observer.future(); } - public void subscribeToTransfers(StreamObserver observer) { + public void subscribeToTransfers(StreamObserver observer) { this.client.subscribeToTransfers(Empty.getDefaultInstance(), observer); } - public CompletableFuture transferUsers(TransferManagement.TransferUsersRequest transfer) { + public CompletableFuture transferUsers(Transfer.TransferReq transfer) { var observer = new StreamObserverImpl(); this.client.transferUsers(transfer, observer); return observer.future(); } - public CompletableFuture sendMessageToChannel(ChannelManagement.ChannelMessageValue message) { + public CompletableFuture publishMessage(Channel.Msg message) { var observer = new StreamObserverImpl(); - this.client.sendMessageToChannel(message, observer); - return observer.future(); - } - - public CompletableFuture unsubscribeFromChannel(String channel) { - var observer = new StreamObserverImpl(); - this.client.unsubscribeFromChannel(StringValue.of(channel), observer); + this.client.publishMessage(message, observer); return observer.future(); } - public void subscribeToChannel(String channel, StreamObserver observer) { + public void subscribeToChannel(String channel, StreamObserver observer) { this.client.subscribeToChannel(StringValue.of(channel), observer); } - public Optional getUnitsNow() { - var cached = this.unitsInfo.getValue(); + public synchronized Optional getServersNow() { + var cached = this.serversInfo.getValue(); if (cached.isEmpty()) { - this.getUnits(); // Request value from controller + this.getServers(); // Request value from controller } return cached; } - public CompletableFuture getUnits() { - var cached = this.unitsInfo.getValue(); + public synchronized CompletableFuture getServers() { + var cached = this.serversInfo.getValue(); if (cached.isPresent()) { return CompletableFuture.completedFuture(cached.get()); } - var observer = new StreamObserverImpl(); - this.client.getUnits(Empty.getDefaultInstance(), observer); + var observer = new StreamObserverImpl(); + this.client.getServers(Empty.getDefaultInstance(), observer); return observer.future().thenApply((value) -> { - this.unitsInfo.setValue(value); + this.serversInfo.setValue(value); return value; }); } - public Optional getDeploymentsNow() { - var cached = this.deploymentsInfo.getValue(); + public synchronized Optional getGroupsNow() { + var cached = this.groupsInfo.getValue(); if (cached.isEmpty()) { - this.getDeployments(); // Request value from controller + this.getGroups(); // Request value from controller } return cached; } - public CompletableFuture getDeployments() { - var cached = this.deploymentsInfo.getValue(); + public synchronized CompletableFuture getGroups() { + var cached = this.groupsInfo.getValue(); if (cached.isPresent()) { return CompletableFuture.completedFuture(cached.get()); } - var observer = new StreamObserverImpl(); - this.client.getDeployments(Empty.getDefaultInstance(), observer); + var observer = new StreamObserverImpl(); + this.client.getGroups(Empty.getDefaultInstance(), observer); return observer.future().thenApply((value) -> { - this.deploymentsInfo.setValue(value); + this.groupsInfo.setValue(value); return value; }); } - public CompletableFuture sendReset() { - var observer = new StreamObserverImpl(); - this.client.reset(Empty.getDefaultInstance(), observer); - return observer.future(); - } - - public synchronized CompletableFuture getProtocolVersion() { + public synchronized CompletableFuture getProtoVer() { var cached = this.protocolVersion.getValue(); if (cached.isPresent()) { return CompletableFuture.completedFuture(cached.get()); } var observer = new StreamObserverImpl(); - this.client.getProtocolVersion(Empty.getDefaultInstance(), observer); + this.client.getProtoVer(Empty.getDefaultInstance(), observer); return observer.future().thenApply((value) -> { this.protocolVersion.setValue(value); return value; }); } - public synchronized CompletableFuture getControllerVersion() { + public synchronized CompletableFuture getCtrlVer() { var cached = this.controllerVersion.getValue(); if (cached.isPresent()) { return CompletableFuture.completedFuture(cached.get()); } var observer = new StreamObserverImpl(); - this.client.getControllerVersion(Empty.getDefaultInstance(), observer); + this.client.getCtrlVer(Empty.getDefaultInstance(), observer); return observer.future().thenApply((value) -> { this.controllerVersion.setValue(value); return value; @@ -198,11 +181,11 @@ public synchronized CompletableFuture getControllerVersion() { @Contract(" -> new") public static @NotNull CloudConnection createFromEnv() { var address = System.getenv("CONTROLLER_ADDRESS"); - var token = System.getenv("UNIT_TOKEN"); + var token = System.getenv("SERVER_TOKEN"); if (address == null) { throw new IllegalStateException("CONTROLLER_ADDRESS not set"); } else if (token == null) { - throw new IllegalStateException("UNIT_TOKEN not set"); + throw new IllegalStateException("SERVER_TOKEN not set"); } URL url; diff --git a/clients/jvm/common/src/main/java/io/atomic/cloud/common/health/Heart.java b/clients/jvm/common/src/main/java/io/atomic/cloud/common/health/Heart.java index 657238c8..b87c0072 100644 --- a/clients/jvm/common/src/main/java/io/atomic/cloud/common/health/Heart.java +++ b/clients/jvm/common/src/main/java/io/atomic/cloud/common/health/Heart.java @@ -23,6 +23,6 @@ public void stop() { } public void beat() { - this.connection.beatHeart(); + this.connection.beat(); } } diff --git a/clients/jvm/common/src/main/java/io/atomic/cloud/common/objects/SimpleCloudDeployment.java b/clients/jvm/common/src/main/java/io/atomic/cloud/common/objects/SimpleCloudGroup.java similarity index 58% rename from clients/jvm/common/src/main/java/io/atomic/cloud/common/objects/SimpleCloudDeployment.java rename to clients/jvm/common/src/main/java/io/atomic/cloud/common/objects/SimpleCloudGroup.java index 91516a78..ffcccc2d 100644 --- a/clients/jvm/common/src/main/java/io/atomic/cloud/common/objects/SimpleCloudDeployment.java +++ b/clients/jvm/common/src/main/java/io/atomic/cloud/common/objects/SimpleCloudGroup.java @@ -1,12 +1,12 @@ package io.atomic.cloud.common.objects; -import io.atomic.cloud.api.objects.CloudDeployment; +import io.atomic.cloud.api.objects.CloudGroup; import lombok.AllArgsConstructor; import lombok.Getter; @AllArgsConstructor @Getter -public class SimpleCloudDeployment implements CloudDeployment { +public class SimpleCloudGroup implements CloudGroup { protected final String name; } diff --git a/clients/jvm/common/src/main/java/io/atomic/cloud/common/objects/SimpleCloudUnit.java b/clients/jvm/common/src/main/java/io/atomic/cloud/common/objects/SimpleCloudServer.java similarity index 67% rename from clients/jvm/common/src/main/java/io/atomic/cloud/common/objects/SimpleCloudUnit.java rename to clients/jvm/common/src/main/java/io/atomic/cloud/common/objects/SimpleCloudServer.java index 77ca14c9..1f369a9d 100644 --- a/clients/jvm/common/src/main/java/io/atomic/cloud/common/objects/SimpleCloudUnit.java +++ b/clients/jvm/common/src/main/java/io/atomic/cloud/common/objects/SimpleCloudServer.java @@ -1,13 +1,13 @@ package io.atomic.cloud.common.objects; -import io.atomic.cloud.api.objects.CloudUnit; +import io.atomic.cloud.api.objects.CloudServer; import java.util.UUID; import lombok.AllArgsConstructor; import lombok.Getter; @AllArgsConstructor @Getter -public class SimpleCloudUnit implements CloudUnit { +public class SimpleCloudServer implements CloudServer { protected final String name; protected final UUID uuid; diff --git a/clients/jvm/common/src/main/java/io/atomic/cloud/common/objects/SimpleLocalCloudUnit.java b/clients/jvm/common/src/main/java/io/atomic/cloud/common/objects/SimpleLocalCloudUnit.java index d3a53f1d..1ad0c28d 100644 --- a/clients/jvm/common/src/main/java/io/atomic/cloud/common/objects/SimpleLocalCloudUnit.java +++ b/clients/jvm/common/src/main/java/io/atomic/cloud/common/objects/SimpleLocalCloudUnit.java @@ -16,12 +16,7 @@ public CompletableFuture shutdown() { } @Override - public CompletableFuture markReady() { - return this.connection.markReady().thenRun(() -> {}); - } - - @Override - public CompletableFuture markNotReady() { - return this.connection.markNotReady().thenRun(() -> {}); + public CompletableFuture setReady(boolean ready) { + return this.connection.setReady(ready).thenRun(() -> {}); } } diff --git a/clients/jvm/common/src/main/java/io/atomic/cloud/common/transfer/TransferManager.java b/clients/jvm/common/src/main/java/io/atomic/cloud/common/transfer/TransferManager.java index f43dcfbe..fe3463ca 100644 --- a/clients/jvm/common/src/main/java/io/atomic/cloud/common/transfer/TransferManager.java +++ b/clients/jvm/common/src/main/java/io/atomic/cloud/common/transfer/TransferManager.java @@ -1,11 +1,11 @@ package io.atomic.cloud.common.transfer; import com.google.protobuf.UInt32Value; -import io.atomic.cloud.api.objects.CloudDeployment; -import io.atomic.cloud.api.objects.CloudUnit; +import io.atomic.cloud.api.objects.CloudGroup; +import io.atomic.cloud.api.objects.CloudServer; import io.atomic.cloud.api.transfer.Transfers; import io.atomic.cloud.common.connection.CloudConnection; -import io.atomic.cloud.grpc.unit.TransferManagement; +import io.atomic.cloud.grpc.client.Transfer; import java.util.UUID; import java.util.concurrent.CompletableFuture; import lombok.AllArgsConstructor; @@ -17,40 +17,39 @@ public class TransferManager implements Transfers { private final CloudConnection connection; @Override - public CompletableFuture transferUsersToUnit(@NotNull CloudUnit unit, UUID @NotNull ... userUUID) { - var builder = TransferManagement.TransferUsersRequest.newBuilder(); - builder.setTarget(TransferManagement.TransferTargetValue.newBuilder() - .setTargetType(TransferManagement.TransferTargetValue.TargetType.UNIT) - .setTarget(unit.uuid().toString()) + public CompletableFuture transferUsersToServer(@NotNull CloudServer server, UUID @NotNull ... userUUID) { + var builder = Transfer.TransferReq.newBuilder(); + builder.setTarget(Transfer.Target.newBuilder() + .setType(Transfer.Target.Type.SERVER) + .setTarget(server.uuid().toString()) .build()); for (UUID uuid : userUUID) { - builder.addUserUuids(uuid.toString()); + builder.addIds(uuid.toString()); } return this.connection.transferUsers(builder.build()).thenApply(UInt32Value::getValue); } @Override - public CompletableFuture transferUsersToDeployment( - @NotNull CloudDeployment deployment, UUID @NotNull ... userUUID) { - var builder = TransferManagement.TransferUsersRequest.newBuilder(); - builder.setTarget(TransferManagement.TransferTargetValue.newBuilder() - .setTargetType(TransferManagement.TransferTargetValue.TargetType.DEPLOYMENT) - .setTarget(deployment.name()) + public CompletableFuture transferUsersToGroup(@NotNull CloudGroup group, UUID @NotNull ... userUUID) { + var builder = Transfer.TransferReq.newBuilder(); + builder.setTarget(Transfer.Target.newBuilder() + .setType(Transfer.Target.Type.GROUP) + .setTarget(group.name()) .build()); for (UUID uuid : userUUID) { - builder.addUserUuids(uuid.toString()); + builder.addIds(uuid.toString()); } return this.connection.transferUsers(builder.build()).thenApply(UInt32Value::getValue); } @Override public CompletableFuture transferUsersToFallback(UUID @NotNull ... userUUID) { - var builder = TransferManagement.TransferUsersRequest.newBuilder(); - builder.setTarget(TransferManagement.TransferTargetValue.newBuilder() - .setTargetType(TransferManagement.TransferTargetValue.TargetType.FALLBACK) + var builder = Transfer.TransferReq.newBuilder(); + builder.setTarget(Transfer.Target.newBuilder() + .setType(Transfer.Target.Type.FALLBACK) .build()); for (UUID uuid : userUUID) { - builder.addUserUuids(uuid.toString()); + builder.addIds(uuid.toString()); } return this.connection.transferUsers(builder.build()).thenApply(UInt32Value::getValue); } diff --git a/clients/jvm/paper/src/main/java/io/atomic/cloud/paper/CloudPlugin.java b/clients/jvm/paper/src/main/java/io/atomic/cloud/paper/CloudPlugin.java index 3a102c0a..d13dcfd7 100644 --- a/clients/jvm/paper/src/main/java/io/atomic/cloud/paper/CloudPlugin.java +++ b/clients/jvm/paper/src/main/java/io/atomic/cloud/paper/CloudPlugin.java @@ -48,7 +48,6 @@ public void onLoad() { LOGGER.info("Connecting to controller..."); this.connection.connect(); - this.connection.sendReset(); this.heart.start(); } @@ -58,9 +57,9 @@ public void onEnable() { registerListeners(); // Mark server as running - this.connection.markRunning(); + this.connection.setRunning(); if (this.settings.autoReady()) { - this.connection.markReady(); + this.connection.setReady(true); } // Enable transfer system diff --git a/clients/jvm/paper/src/main/java/io/atomic/cloud/paper/command/CloudCommand.java b/clients/jvm/paper/src/main/java/io/atomic/cloud/paper/command/CloudCommand.java index e618f9aa..a0d00008 100644 --- a/clients/jvm/paper/src/main/java/io/atomic/cloud/paper/command/CloudCommand.java +++ b/clients/jvm/paper/src/main/java/io/atomic/cloud/paper/command/CloudCommand.java @@ -18,24 +18,20 @@ public static void register(@NotNull Commands commands) { var sender = context.getSource().getSender(); var connection = CloudPlugin.INSTANCE.connection(); - connection - .getControllerVersion() - .thenAcceptBoth(connection.getProtocolVersion(), (version, protocol) -> { - sender.sendMessage(Component.text("╔════════════════════", NamedTextColor.GRAY)); - sender.sendRichMessage("AtomicCloud"); - sender.sendRichMessage( - "Client Version | " - + CloudPlugin.INSTANCE - .getPluginMeta() - .getVersion() + ""); - sender.sendRichMessage( - "Controller Version | " - + version.getValue() + ""); - sender.sendRichMessage( - "Controller Protocol Version | " - + protocol.getValue() + ""); - sender.sendMessage(Component.text("╚════════════════════", NamedTextColor.GRAY)); - }); + connection.getCtrlVer().thenAcceptBoth(connection.getProtoVer(), (version, protocol) -> { + sender.sendMessage(Component.text("╔════════════════════", NamedTextColor.GRAY)); + sender.sendRichMessage("AtomicCloud"); + sender.sendRichMessage( + "Client Version | " + + CloudPlugin.INSTANCE.getPluginMeta().getVersion() + ""); + sender.sendRichMessage( + "Controller Version | " + + version.getValue() + ""); + sender.sendRichMessage( + "Controller Protocol Version | " + + protocol.getValue() + ""); + sender.sendMessage(Component.text("╚════════════════════", NamedTextColor.GRAY)); + }); return Command.SINGLE_SUCCESS; }) diff --git a/clients/jvm/paper/src/main/java/io/atomic/cloud/paper/command/DisposeCommand.java b/clients/jvm/paper/src/main/java/io/atomic/cloud/paper/command/DisposeCommand.java index d5c791ed..0516812d 100644 --- a/clients/jvm/paper/src/main/java/io/atomic/cloud/paper/command/DisposeCommand.java +++ b/clients/jvm/paper/src/main/java/io/atomic/cloud/paper/command/DisposeCommand.java @@ -1,7 +1,7 @@ package io.atomic.cloud.paper.command; import com.mojang.brigadier.Command; -import io.atomic.cloud.grpc.unit.TransferManagement; +import io.atomic.cloud.grpc.client.Transfer; import io.atomic.cloud.paper.CloudPlugin; import io.atomic.cloud.paper.permission.Permissions; import io.papermc.paper.command.brigadier.Commands; @@ -19,15 +19,15 @@ public static void register(@NotNull Commands commands) { var sender = context.getSource().getSender(); var connection = CloudPlugin.INSTANCE.connection(); - sender.sendRichMessage("Marking unit as not ready"); - connection.markNotReady().thenRun(() -> { - sender.sendRichMessage("Requesting to transfer all users to new units..."); - connection.transferUsers(TransferManagement.TransferUsersRequest.newBuilder() - .addAllUserUuids(Bukkit.getOnlinePlayers().stream() + sender.sendRichMessage("Marking server as not ready"); + connection.setReady(false).thenRun(() -> { + sender.sendRichMessage("Requesting to transfer all users to new servers..."); + connection.transferUsers(Transfer.TransferReq.newBuilder() + .addAllIds(Bukkit.getOnlinePlayers().stream() .map(item -> item.getUniqueId().toString()) .toList()) - .setTarget(TransferManagement.TransferTargetValue.newBuilder() - .setTargetType(TransferManagement.TransferTargetValue.TargetType.FALLBACK) + .setTarget(Transfer.Target.newBuilder() + .setType(Transfer.Target.Type.FALLBACK) .build()) .build()); CloudPlugin.SCHEDULER.schedule( diff --git a/clients/jvm/paper/src/main/java/io/atomic/cloud/paper/command/SendCommand.java b/clients/jvm/paper/src/main/java/io/atomic/cloud/paper/command/SendCommand.java index 9038045b..93708793 100644 --- a/clients/jvm/paper/src/main/java/io/atomic/cloud/paper/command/SendCommand.java +++ b/clients/jvm/paper/src/main/java/io/atomic/cloud/paper/command/SendCommand.java @@ -1,7 +1,7 @@ package io.atomic.cloud.paper.command; import com.mojang.brigadier.Command; -import io.atomic.cloud.grpc.unit.TransferManagement; +import io.atomic.cloud.grpc.client.Transfer; import io.atomic.cloud.paper.CloudPlugin; import io.atomic.cloud.paper.command.argument.TransferTargetArgument; import io.atomic.cloud.paper.permission.Permissions; @@ -26,15 +26,14 @@ public static void register(@NotNull Commands commands) { var users = context.getArgument("user", PlayerSelectorArgumentResolver.class) .resolve(context.getSource()); var userCount = users.size(); - var target = - context.getArgument("target", TransferManagement.TransferTargetValue.class); + var target = context.getArgument("target", Transfer.Target.class); sender.sendRichMessage("Transferring " + userCount + " users to " + formatTarget(target) + "..."); connection - .transferUsers(TransferManagement.TransferUsersRequest.newBuilder() - .addAllUserUuids(users.stream() + .transferUsers(Transfer.TransferReq.newBuilder() + .addAllIds(users.stream() .map(item -> item.getUniqueId() .toString()) .toList()) @@ -56,16 +55,16 @@ public static void register(@NotNull Commands commands) { } @Contract(pure = true) - private static @NotNull String formatTarget(TransferManagement.@NotNull TransferTargetValue target) { - switch (target.getTargetType()) { - case TransferManagement.TransferTargetValue.TargetType.FALLBACK -> { + private static @NotNull String formatTarget(Transfer.@NotNull Target target) { + switch (target.getType()) { + case Transfer.Target.Type.FALLBACK -> { return "fallback"; } - case TransferManagement.TransferTargetValue.TargetType.UNIT -> { - return "unit:" + target.getTarget(); + case Transfer.Target.Type.SERVER -> { + return "server:" + target.getTarget(); } - case TransferManagement.TransferTargetValue.TargetType.DEPLOYMENT -> { - return "deployment:" + target.getTarget(); + case Transfer.Target.Type.GROUP -> { + return "group:" + target.getTarget(); } } return "unknown"; diff --git a/clients/jvm/paper/src/main/java/io/atomic/cloud/paper/command/argument/TransferTargetArgument.java b/clients/jvm/paper/src/main/java/io/atomic/cloud/paper/command/argument/TransferTargetArgument.java index 1e7fa48e..716bb1a0 100644 --- a/clients/jvm/paper/src/main/java/io/atomic/cloud/paper/command/argument/TransferTargetArgument.java +++ b/clients/jvm/paper/src/main/java/io/atomic/cloud/paper/command/argument/TransferTargetArgument.java @@ -7,9 +7,9 @@ import com.mojang.brigadier.exceptions.SimpleCommandExceptionType; import com.mojang.brigadier.suggestion.Suggestions; import com.mojang.brigadier.suggestion.SuggestionsBuilder; -import io.atomic.cloud.grpc.unit.DeploymentInformation; -import io.atomic.cloud.grpc.unit.TransferManagement; -import io.atomic.cloud.grpc.unit.UnitInformation; +import io.atomic.cloud.grpc.client.Group; +import io.atomic.cloud.grpc.client.Server; +import io.atomic.cloud.grpc.client.Transfer; import io.atomic.cloud.paper.CloudPlugin; import io.papermc.paper.command.brigadier.MessageComponentSerializer; import io.papermc.paper.command.brigadier.argument.CustomArgumentType; @@ -20,17 +20,15 @@ import org.jetbrains.annotations.NotNull; @SuppressWarnings("UnstableApiUsage") -public class TransferTargetArgument - implements CustomArgumentType.Converted { +public class TransferTargetArgument implements CustomArgumentType.Converted { public static final TransferTargetArgument INSTANCE = new TransferTargetArgument(); @Override - public TransferManagement.@NotNull TransferTargetValue convert(@NotNull String value) - throws CommandSyntaxException { + public Transfer.@NotNull Target convert(@NotNull String value) throws CommandSyntaxException { if (value.equalsIgnoreCase("fallback")) { - return TransferManagement.TransferTargetValue.newBuilder() - .setTargetType(TransferManagement.TransferTargetValue.TargetType.FALLBACK) + return Transfer.Target.newBuilder() + .setType(Transfer.Target.Type.FALLBACK) .build(); } var valueSplit = value.split(":"); @@ -40,27 +38,27 @@ public class TransferTargetArgument } var type = valueSplit[0]; var identifier = valueSplit[1]; - if (type.equalsIgnoreCase("unit")) { - var cached = CloudPlugin.INSTANCE.connection().getUnitsNow(); - if (cached.isEmpty()) throw createException("Fetching available units..."); - var unit = cached.get().getUnitsList().stream() + if (type.equalsIgnoreCase("server")) { + var cached = CloudPlugin.INSTANCE.connection().getServersNow(); + if (cached.isEmpty()) throw createException("Fetching available servers..."); + var server = cached.get().getServersList().stream() .filter(item -> item.getName().equalsIgnoreCase(identifier)) .findFirst(); - if (unit.isEmpty()) throw createException("\"" + identifier + "\" does not exist"); - return TransferManagement.TransferTargetValue.newBuilder() - .setTargetType(TransferManagement.TransferTargetValue.TargetType.UNIT) - .setTarget(unit.get().getUuid()) + if (server.isEmpty()) throw createException("\"" + identifier + "\" does not exist"); + return Transfer.Target.newBuilder() + .setType(Transfer.Target.Type.SERVER) + .setTarget(server.get().getId()) .build(); - } else if (type.equalsIgnoreCase("deployment")) { - var cached = CloudPlugin.INSTANCE.connection().getDeploymentsNow(); - if (cached.isEmpty()) throw createException("Fetching available deployments..."); - var deployment = cached.get().getDeploymentsList().stream() + } else if (type.equalsIgnoreCase("group")) { + var cached = CloudPlugin.INSTANCE.connection().getGroupsNow(); + if (cached.isEmpty()) throw createException("Fetching available groups..."); + var group = cached.get().getGroupsList().stream() .filter(item -> item.equalsIgnoreCase(identifier)) .findFirst(); - if (deployment.isEmpty()) throw createException("\"" + identifier + "\" does not exist"); - return TransferManagement.TransferTargetValue.newBuilder() - .setTargetType(TransferManagement.TransferTargetValue.TargetType.DEPLOYMENT) - .setTarget(deployment.get()) + if (group.isEmpty()) throw createException("\"" + identifier + "\" does not exist"); + return Transfer.Target.newBuilder() + .setType(Transfer.Target.Type.GROUP) + .setTarget(group.get()) .build(); } throw createException("Unknown transfer target type: " + type); @@ -71,28 +69,27 @@ public class TransferTargetArgument @NotNull CommandContext context, @NotNull SuggestionsBuilder builder) { return CloudPlugin.INSTANCE .connection() - .getUnits() - .thenCombine(CloudPlugin.INSTANCE.connection().getDeployments(), SuggestionsData::new) + .getServers() + .thenCombine(CloudPlugin.INSTANCE.connection().getGroups(), SuggestionsData::new) .thenCompose(response -> { - response.units - .getUnitsList() - .forEach(unit -> builder.suggest( - "unit:" + unit.getName(), + response.servers + .getServersList() + .forEach(server -> builder.suggest( + "server:" + server.getName(), MessageComponentSerializer.message() - .serialize(Component.text(unit.getUuid()) + .serialize(Component.text(server.getId()) .color(NamedTextColor.BLUE)))); - response.deployments - .getDeploymentsList() - .forEach(deployment -> builder.suggest( - "deployment:" + deployment, + response.groups + .getGroupsList() + .forEach(group -> builder.suggest( + "group:" + group, MessageComponentSerializer.message() - .serialize( - Component.text(deployment).color(NamedTextColor.BLUE)))); + .serialize(Component.text(group).color(NamedTextColor.BLUE)))); builder.suggest( "fallback", MessageComponentSerializer.message() .serialize(Component.text( - "This option will try to transfer all users to a fallback unit") + "This option will try to transfer all users to a fallback server") .color(NamedTextColor.BLUE))); return builder.buildFuture(); }); @@ -108,6 +105,5 @@ public class TransferTargetArgument return new CommandSyntaxException(new SimpleCommandExceptionType(() -> message), () -> message); } - private record SuggestionsData( - UnitInformation.UnitListResponse units, DeploymentInformation.DeploymentListResponse deployments) {} + private record SuggestionsData(Server.List servers, Group.List groups) {} } diff --git a/clients/jvm/paper/src/main/java/io/atomic/cloud/paper/listener/PlayerEventsListener.java b/clients/jvm/paper/src/main/java/io/atomic/cloud/paper/listener/PlayerEventsListener.java index 7789e0f3..63e5300c 100644 --- a/clients/jvm/paper/src/main/java/io/atomic/cloud/paper/listener/PlayerEventsListener.java +++ b/clients/jvm/paper/src/main/java/io/atomic/cloud/paper/listener/PlayerEventsListener.java @@ -1,6 +1,6 @@ package io.atomic.cloud.paper.listener; -import io.atomic.cloud.grpc.unit.UserManagement; +import io.atomic.cloud.grpc.client.User; import io.atomic.cloud.paper.CloudPlugin; import org.bukkit.event.EventHandler; import org.bukkit.event.EventPriority; @@ -14,9 +14,9 @@ public class PlayerEventsListener implements Listener { @EventHandler(priority = EventPriority.LOWEST) public void onPlayerJoin(@NotNull PlayerJoinEvent event) { var player = event.getPlayer(); - var user = UserManagement.UserConnectedRequest.newBuilder() + var user = User.ConnectedReq.newBuilder() .setName(player.getName()) - .setUuid(player.getUniqueId().toString()) + .setId(player.getUniqueId().toString()) .build(); CloudPlugin.INSTANCE.connection().userConnected(user); } @@ -24,8 +24,8 @@ public void onPlayerJoin(@NotNull PlayerJoinEvent event) { @EventHandler(priority = EventPriority.HIGHEST) private void onPlayerLeft(@NotNull PlayerQuitEvent event) { var player = event.getPlayer(); - var user = UserManagement.UserDisconnectedRequest.newBuilder() - .setUuid(player.getUniqueId().toString()) + var user = User.DisconnectedReq.newBuilder() + .setId(player.getUniqueId().toString()) .build(); CloudPlugin.INSTANCE.connection().userDisconnected(user); } diff --git a/clients/jvm/paper/src/main/java/io/atomic/cloud/paper/transfer/TransferHandler.java b/clients/jvm/paper/src/main/java/io/atomic/cloud/paper/transfer/TransferHandler.java index 051d8132..483e4480 100644 --- a/clients/jvm/paper/src/main/java/io/atomic/cloud/paper/transfer/TransferHandler.java +++ b/clients/jvm/paper/src/main/java/io/atomic/cloud/paper/transfer/TransferHandler.java @@ -1,14 +1,14 @@ package io.atomic.cloud.paper.transfer; import io.atomic.cloud.common.connection.CloudConnection; -import io.atomic.cloud.grpc.unit.TransferManagement; +import io.atomic.cloud.grpc.client.Transfer; import io.atomic.cloud.paper.CloudPlugin; import io.grpc.stub.StreamObserver; import java.util.UUID; import lombok.RequiredArgsConstructor; @RequiredArgsConstructor -public class TransferHandler implements StreamObserver { +public class TransferHandler implements StreamObserver { private final CloudConnection cloudConnection; @@ -18,14 +18,13 @@ public void enable() { } @Override - public void onNext(TransferManagement.ResolvedTransferResponse resolvedTransfer) { + public void onNext(Transfer.TransferRes resolvedTransfer) { try { - var uuid = UUID.fromString(resolvedTransfer.getUserUuid()); + var uuid = UUID.fromString(resolvedTransfer.getId()); var player = CloudPlugin.INSTANCE.getServer().getPlayer(uuid); if (player == null) { CloudPlugin.LOGGER.error( - "Failed to handle transfer request for user {}: Player not found", - resolvedTransfer.getUserUuid()); + "Failed to handle transfer request for user {}: Player not found", resolvedTransfer.getId()); return; } @@ -37,7 +36,7 @@ public void onNext(TransferManagement.ResolvedTransferResponse resolvedTransfer) resolvedTransfer.getPort()); } catch (Throwable throwable) { CloudPlugin.LOGGER.error( - "Failed to handle transfer request for user {}: {}", resolvedTransfer.getUserUuid(), throwable); + "Failed to handle transfer request for user {}: {}", resolvedTransfer.getId(), throwable); } } diff --git a/clients/wrapper/Cargo.toml b/clients/wrapper/Cargo.toml index 12fc481c..63829be4 100644 --- a/clients/wrapper/Cargo.toml +++ b/clients/wrapper/Cargo.toml @@ -17,10 +17,10 @@ anyhow = "1.0.95" ctrlc = "3.4.5" # User system -uuid = { version = "1.12.0", features = ["v4"] } +uuid = { version = "1.12.1", features = ["v4"] } # Command line arguments -clap = { version = "4.5.26", features = ["derive"] } +clap = { version = "4.5.27", features = ["derive"] } # Regex parsing regex = "1.11.1" diff --git a/clients/wrapper/build.rs b/clients/wrapper/build.rs index 09a6dea2..aaf63977 100644 --- a/clients/wrapper/build.rs +++ b/clients/wrapper/build.rs @@ -79,6 +79,6 @@ fn get_protocol_version_info() -> Result> { fn generate_grpc_code() -> Result<(), Box> { tonic_build::configure() .build_server(false) - .compile_protos(&[format!("{}/unit/unit.proto", PROTO_PATH)], &[PROTO_PATH])?; + .compile_protos(&[format!("{}/client/service.proto", PROTO_PATH)], &[PROTO_PATH])?; Ok(()) } diff --git a/clients/wrapper/src/application/network.rs b/clients/wrapper/src/application/network.rs index 6cc4f41a..00d6d541 100644 --- a/clients/wrapper/src/application/network.rs +++ b/clients/wrapper/src/application/network.rs @@ -7,7 +7,7 @@ use url::Url; use proto::{ transfer_management::ResolvedTransferResponse, - unit_service_client::UnitServiceClient, + server_service_client::UnitServiceClient, user_management::{UserConnectedRequest, UserDisconnectedRequest}, }; use tonic::{transport::Channel, Request, Response, Status, Streaming}; @@ -16,7 +16,7 @@ use tonic::{transport::Channel, Request, Response, Status, Streaming}; pub mod proto { use tonic::include_proto; - include_proto!("unit"); + include_proto!("server"); } pub type CloudConnectionHandle = Arc; @@ -55,7 +55,7 @@ impl CloudConnection { token = Some(value); } else { error!( - "Missing UNIT_TOKEN environment variable. Please set it to the token of this unit" + "Missing UNIT_TOKEN environment variable. Please set it to the token of this server" ); exit(1); } diff --git a/clients/wrapper/src/application/user.rs b/clients/wrapper/src/application/user.rs index 647b10b6..76065f88 100644 --- a/clients/wrapper/src/application/user.rs +++ b/clients/wrapper/src/application/user.rs @@ -22,7 +22,7 @@ impl Users { } pub async fn handle_connect(&mut self, name: String, uuid: Uuid) { - info!("{} connected to unit", name); + info!("{} connected to server", name); if let Err(error) = self .connection @@ -39,7 +39,7 @@ impl Users { pub async fn handle_disconnect(&mut self, name: String) { if let Some(user) = self.users.remove(&name) { - info!("{} disconnected from unit", user.name); + info!("{} disconnected from server", user.name); if let Err(error) = self .connection diff --git a/common/Cargo.toml b/common/Cargo.toml index d85cbefb..89975a4b 100644 --- a/common/Cargo.toml +++ b/common/Cargo.toml @@ -12,7 +12,7 @@ colored = "3.0.0" anyhow = "1.0.95" # Command line arguments -clap = { version = "4.5.26", features = ["derive"] } +clap = { version = "4.5.27", features = ["derive"] } # Configuration serde = { version = "1.0.217", features = ["derive"] } diff --git a/common/src/allocator.rs b/common/src/allocator.rs index 79e4d2fe..6be7b912 100644 --- a/common/src/allocator.rs +++ b/common/src/allocator.rs @@ -4,6 +4,30 @@ use std::{ ops::{AddAssign, Range}, }; +/// A generic number allocator that manages a range of values. +/// +/// It sequentially allocates numbers within a given half-open range (`[start, end)`). +/// When a number is released, it is stored and later reused before new numbers are allocated. +/// +/// # Type Parameters +/// +/// * `T` - A numeric type that implements `Copy`, `Ord`, `Hash`, `AddAssign`, and `From`. +/// +/// # Example +/// +/// ``` +/// use common::allocator::NumberAllocator; +/// +/// // Create an allocator for numbers 1 through 10 (exclusive). +/// let mut allocator = NumberAllocator::new(1..10); +/// +/// // Allocate a number. +/// let num = allocator.allocate().expect("Allocation failed"); +/// println!("Allocated: {}", num); +/// +/// // Release the number back to the allocator. +/// allocator.release(num); +/// ``` pub struct NumberAllocator { next: T, max: T, @@ -15,6 +39,9 @@ impl NumberAllocator where T: Copy + Ord + Hash + AddAssign + From, { + /// Constructs a new `NumberAllocator` with the specified range. + /// + /// The allocator will provide numbers starting from `range.start` up to, but not including, `range.end`. pub fn new(range: Range) -> Self { Self { next: range.start, @@ -24,24 +51,34 @@ where } } + /// Allocates and returns a number. + /// + /// If there are any numbers that have been released previously, the smallest one is reused. + /// Otherwise, the next sequential number is allocated. + /// + /// Returns `None` if no numbers are available (i.e. all numbers in the range are allocated). pub fn allocate(&mut self) -> Option { - if let Some(&id) = self.available.iter().next() { - self.available.remove(&id); - self.active.insert(id); - Some(id) + if let Some(&number) = self.available.iter().next() { + self.available.remove(&number); + self.active.insert(number); + Some(number) } else if self.next < self.max { - let id = self.next; + let number = self.next; self.next += T::from(1); - self.active.insert(id); - Some(id) + self.active.insert(number); + Some(number) } else { None } } - pub fn release(&mut self, value: T) { - if self.active.remove(&value) { - self.available.insert(value); + /// Releases a previously allocated number back to the allocator. + /// + /// If the number was active, it is removed from the active set and added to the available pool. + /// Released numbers are reused before new sequential numbers are allocated. + pub fn release(&mut self, number: T) { + if self.active.remove(&number) { + self.available.insert(number); } } } diff --git a/common/src/cache.rs b/common/src/cache.rs new file mode 100644 index 00000000..89d6165f --- /dev/null +++ b/common/src/cache.rs @@ -0,0 +1,36 @@ +use std::collections::VecDeque; + +pub struct FixedSizeCache { + items: VecDeque, + size: usize, +} + +impl FixedSizeCache { + pub fn new(size: usize) -> Self { + Self { + items: VecDeque::with_capacity(size), + size, + } + } + + pub fn push(&mut self, item: T) { + if self.items.len() == self.size { + self.items.pop_front(); + } + self.items.push_back(item); + } + + pub fn extend(&mut self, items: Vec) { + for item in items { + self.push(item); + } + } + + pub fn clone_items(&self) -> Vec { + self.items.iter().cloned().collect() + } + + pub fn has_data(&self) -> bool { + !self.items.is_empty() + } +} diff --git a/common/src/error.rs b/common/src/error.rs new file mode 100644 index 00000000..ef97907b --- /dev/null +++ b/common/src/error.rs @@ -0,0 +1,38 @@ +use std::backtrace::BacktraceStatus; + +use anyhow::Error; +use simplelog::error; + +pub struct FancyError(); + +impl FancyError { + pub fn print_fancy(error: &Error, critical: bool) { + let exit_message = if critical { + "An error occurred causing the application to exit. The application cannot continue after this error." + } else { + "An error occurred, but the application can continue. The application may not function as expected." + }; + + error!("{}", exit_message); + error!("If you believe this error was not caused by the runtime, for example: a missing network connection, please report this error to the developers."); + error!("Create a new issue on the GitHub repository at the following link: https://github.com/HttpRafa/atomic-cloud with the information below:"); + + error!("Error: {}", error); + error + .chain() + .skip(1) + .for_each(|error| error!(" Caused by: {}", error)); + + match error.backtrace().status() { + BacktraceStatus::Captured => { + error!("Backtrace:"); + format!("{}", error.backtrace()) + .lines() + .for_each(|line| error!("{}", line)); + } + _ => { + error!("Backtrace is not available. Ensure you run the program with `RUST_BACKTRACE=1` to enable backtraces."); + } + } + } +} diff --git a/common/src/config.rs b/common/src/file.rs similarity index 66% rename from common/src/config.rs rename to common/src/file.rs index 689084ca..3f9f340a 100644 --- a/common/src/config.rs +++ b/common/src/file.rs @@ -3,8 +3,8 @@ use std::{fs, path::Path}; use anyhow::Result; use serde::{de::DeserializeOwned, Serialize}; -pub trait SaveToTomlFile: Serialize { - fn save_to_file(&self, path: &Path, create_parent: bool) -> Result<()> { +pub trait SyncSaveToTomlFile: Serialize { + fn save(&self, path: &Path, create_parent: bool) -> Result<()> { if create_parent { if let Some(parent) = path.parent() { fs::create_dir_all(parent)?; @@ -15,8 +15,8 @@ pub trait SaveToTomlFile: Serialize { } } -pub trait LoadFromTomlFile: DeserializeOwned { - fn load_from_file(path: &Path) -> Result { +pub trait SyncLoadFromTomlFile: DeserializeOwned { + fn from_file(path: &Path) -> Result { let data = fs::read_to_string(path)?; let config = toml::from_str(&data)?; Ok(config) diff --git a/common/src/init.rs b/common/src/init.rs index c90c1edc..1eadcf52 100644 --- a/common/src/init.rs +++ b/common/src/init.rs @@ -94,7 +94,7 @@ impl CloudInit { "«{}» {} | {} by {}", "*".blue(), application.blue(), - format!("v{}", version).blue(), + format!("v{version}").blue(), authors.join(", ").blue() ); println!(); diff --git a/common/src/lib.rs b/common/src/lib.rs index c93acd08..663b6c62 100644 --- a/common/src/lib.rs +++ b/common/src/lib.rs @@ -1,6 +1,9 @@ pub mod allocator; -pub mod config; +pub mod cache; +pub mod error; +pub mod file; pub mod init; pub mod name; +pub mod network; pub mod tick; pub mod version; diff --git a/common/src/name.rs b/common/src/name.rs index 2cdb15cf..21b29d4a 100644 --- a/common/src/name.rs +++ b/common/src/name.rs @@ -7,12 +7,14 @@ pub struct TimedName { } impl TimedName { + #[must_use] pub fn new(cloud_identifier: &str, name: &str, permanent: bool) -> Self { Self { raw_name: name.to_string(), name: Self::generate(Some(cloud_identifier.to_string()), name, permanent), } } + #[must_use] pub fn new_no_identifier(name: &str, permanent: bool) -> Self { Self { raw_name: name.to_string(), @@ -26,19 +28,22 @@ impl TimedName { .unwrap() .as_secs(); match (cloud_identifier, permanent) { - (Some(identifier), true) => format!("{}@{}", name, identifier), + (Some(identifier), true) => format!("{name}@{identifier}"), (None, true) => name.to_string(), - (Some(identifier), false) => format!("{}@{}#{}", name, identifier, timestamp), - (None, false) => format!("{}#{}", name, timestamp), + (Some(identifier), false) => format!("{name}@{identifier}#{timestamp}"), + (None, false) => format!("{name}#{timestamp}"), } } + #[must_use] pub fn get_name(&self) -> &str { &self.name } + #[must_use] pub fn get_name_cloned(&self) -> String { self.name.clone() } + #[must_use] pub fn get_raw_name(&self) -> &str { &self.raw_name } diff --git a/common/src/network.rs b/common/src/network.rs new file mode 100644 index 00000000..317e6f37 --- /dev/null +++ b/common/src/network.rs @@ -0,0 +1,22 @@ +use std::fmt::Display; + +use serde::{Deserialize, Serialize}; + +#[derive(Serialize, Deserialize, Clone)] +pub struct HostAndPort { + pub host: S, + pub port: u16, +} + +impl HostAndPort { + #[must_use] + pub fn new(host: String, port: u16) -> Self { + Self { host, port } + } +} + +impl Display for HostAndPort { + fn fmt(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!(formatter, "{}:{}", self.host, self.port) + } +} diff --git a/controller/Cargo.toml b/controller/Cargo.toml index 6ede94d9..3afd3614 100644 --- a/controller/Cargo.toml +++ b/controller/Cargo.toml @@ -13,39 +13,44 @@ simplelog = { version = "0.12.2", features = ["paris"] } # Error handling anyhow = "1.0.95" +# Getters and bitflags +getset = "0.1.4" +bitflags = "2.8.0" + # Signal handling ctrlc = "3.4.5" # Unit system -uuid = { version = "1.12.0", features = ["v4"] } +uuid = { version = "1.13.1", features = ["v4"] } # Command line arguments -clap = { version = "4.5.26", features = ["derive"] } +clap = { version = "4.5.29", features = ["derive"] } # Regex parsing regex = "1.11.1" # Configuration serde = { version = "1.0.217", features = ["derive"] } -toml = "0.8.19" +toml = "0.8.20" # Async runtime -tokio = { version = "1.43.0", features = ["rt", "rt-multi-thread", "macros"] } +tokio = { version = "1.43.0", features = ["rt", "rt-multi-thread", "fs", "process", "macros"] } tokio-stream = "0.1.17" +futures = "0.3.31" # API url = { version = "2.5.4", features = ["serde"] } -prost = "0.13.4" +prost = "0.13.5" tonic = "0.12.3" # Drivers wasmtime = { version = "29.0.1", default-features = false, features = ["runtime", "component-model", "cranelift", "parallel-compilation", "cache"], optional = true } wasmtime-wasi = { version = "29.0.1", optional = true } -minreq = { version = "2.13.0", features = ["https-rustls"], optional = true } +minreq = { version = "2.13.2", features = ["https-rustls"], optional = true } [build-dependencies] -toml = "0.8.19" +toml = "0.8.20" tonic-build = "0.12.3" [features] -wasm-drivers = ["dep:wasmtime", "dep:wasmtime-wasi", "dep:minreq"] +wasm-plugins = ["dep:wasmtime", "dep:wasmtime-wasi", "dep:minreq"] \ No newline at end of file diff --git a/controller/build.rs b/controller/build.rs index 17a5c7a0..b2e79c4b 100644 --- a/controller/build.rs +++ b/controller/build.rs @@ -1,12 +1,12 @@ use std::{ env, fs::{self, File}, - io::Write, + io::Write as _, }; const PROTO_PATH: &str = "../protocol/grpc"; -fn main() -> Result<(), Box> { +fn main() -> Result<(), Box> { generate_build_info(); generate_grpc_code()?; Ok(()) @@ -14,10 +14,10 @@ fn main() -> Result<(), Box> { fn generate_build_info() { let out_dir = env::var("OUT_DIR").unwrap(); - let mut file = File::create(format!("{}/build_info.rs", out_dir)).unwrap(); + let mut file = File::create(format!("{out_dir}/build_info.rs")).unwrap(); - let commit = env::var("CURRENT_COMMIT").unwrap_or_else(|_| "unknown".to_string()); - let build = env::var("CURRENT_BUILD").unwrap_or_else(|_| "0".to_string()); + let commit = env::var("CURRENT_COMMIT").unwrap_or_else(|_| "unknown".to_owned()); + let build = env::var("CURRENT_BUILD").unwrap_or_else(|_| "0".to_owned()); let version = get_version_info().expect("Unable to get version information"); let protocol_version = @@ -28,14 +28,14 @@ fn generate_build_info() { writeln!(file, " major: {},", version.0).unwrap(); writeln!(file, " minor: {},", version.1).unwrap(); writeln!(file, " patch: {},", version.2).unwrap(); - writeln!(file, " build: {},", build).unwrap(); - writeln!(file, " commit: \"{}\",", commit).unwrap(); + writeln!(file, " build: {build},").unwrap(); + writeln!(file, " commit: \"{commit}\",").unwrap(); writeln!(file, " stage: Stage::{},", version.3).unwrap(); - writeln!(file, " protocol: {},", protocol_version).unwrap(); + writeln!(file, " protocol: {protocol_version},").unwrap(); writeln!(file, "}};").unwrap(); } -fn get_version_info() -> Result<(u16, u16, u16, String), Box> { +fn get_version_info() -> Result<(u16, u16, u16, String), Box> { let cargo_toml_content = fs::read_to_string("Cargo.toml")?; let cargo_toml: toml::Value = toml::from_str(&cargo_toml_content)?; @@ -53,7 +53,7 @@ fn get_version_info() -> Result<(u16, u16, u16, String), Box 1 { version_parts[1][0..1].to_uppercase() + &version_parts[1][1..] } else { - "Stable".to_string() + "Stable".to_owned() }; Ok(( version_numbers[0], @@ -66,7 +66,7 @@ fn get_version_info() -> Result<(u16, u16, u16, String), Box Result> { +fn get_protocol_version_info() -> Result> { let cargo_toml_content = fs::read_to_string("../Cargo.toml")?; let cargo_toml: toml::Value = toml::from_str(&cargo_toml_content)?; @@ -76,13 +76,13 @@ fn get_protocol_version_info() -> Result> { value.ok_or("Unable to get protocol version from Cargo.toml".into()) } -fn generate_grpc_code() -> Result<(), Box> { +fn generate_grpc_code() -> Result<(), Box> { tonic_build::configure() .build_client(false) .compile_protos( &[ - format!("{}/admin/admin.proto", PROTO_PATH), - format!("{}/unit/unit.proto", PROTO_PATH), + format!("{PROTO_PATH}/manage/service.proto"), + format!("{PROTO_PATH}/client/service.proto"), ], &[PROTO_PATH], )?; diff --git a/controller/configs/config.toml b/controller/configs/config.toml new file mode 100644 index 00000000..ee26e37e --- /dev/null +++ b/controller/configs/config.toml @@ -0,0 +1,37 @@ +# This identifier is used for the controller. +# It is useful when you have centralized storage for all controllers. +identifier = "%RANDOM%" + +[network] +# The controller will listen on this address. +bind = "0.0.0.0:8080" + +# The maximum time the controller will wait for the instance to start up. +# If this timeout is reached, the startup will be considered as failed. +[timeouts.startup] +secs = 150 +nanos = 0 + +# The maximum time the controller will wait for the instance to restart. +# If this timeout is reached, the restart will be considered as failed. +[timeouts.restart] +secs = 120 +nanos = 0 + +# The interval between heartbeats sent by the controller. +# NOTE: If you change this value, make sure to update the instance's heartbeat timeout as well. +[timeouts.heartbeat] +secs = 15 +nanos = 0 + +# The maximum time the controller will wait for a user to transfer to a different instance. +# If this timeout is reached, the transfer will be considered as failed. +[timeouts.transfer] +secs = 10 +nanos = 0 + +# The maximum time the controller will wait for an empty server to be filled. +# If this timeout is reached, the server will be stopped. +[timeouts.empty_server] +secs = 60 +nanos = 0 \ No newline at end of file diff --git a/controller/configs/wasm-engine.toml b/controller/configs/wasm-engine.toml new file mode 100644 index 00000000..5ac420d2 --- /dev/null +++ b/controller/configs/wasm-engine.toml @@ -0,0 +1,5 @@ +# For more settings, please refer to the documentation: +# https://bytecodealliance.github.io/wasmtime/cli-cache.html + +[cache] +enabled = true \ No newline at end of file diff --git a/controller/configs/wasm-plugins.toml b/controller/configs/wasm-plugins.toml new file mode 100644 index 00000000..52f04d66 --- /dev/null +++ b/controller/configs/wasm-plugins.toml @@ -0,0 +1,26 @@ +# This configuration is crucial for granting the plugins their required permissions +# https://httprafa.github.io/atomic-cloud/controller/plugins/wasm/permissions/ + +[[plugins]] +name = "local" +inherit_stdio = false +inherit_args = false +inherit_env = false +inherit_network = false +allow_ip_name_lookup = false +allow_http = false +allow_process = true +allow_remove_dir_all = true +mounts = [] + +[[plugins]] +name = "pelican" +inherit_stdio = false +inherit_args = false +inherit_env = false +inherit_network = true +allow_ip_name_lookup = true +allow_http = true +allow_process = false +allow_remove_dir_all = false +mounts = [] \ No newline at end of file diff --git a/controller/src/application.rs b/controller/src/application.rs index d401d831..11760657 100644 --- a/controller/src/application.rs +++ b/controller/src/application.rs @@ -1,219 +1,265 @@ -use anyhow::Error; -use auth::Auth; -use cloudlet::Cloudlets; -use deployment::Deployments; -use driver::Drivers; -use event::EventBus; -use simplelog::info; -use std::sync::atomic::{AtomicBool, Ordering}; -use std::sync::{Arc, RwLock, RwLockReadGuard, RwLockWriteGuard, Weak}; -use std::thread; -use std::time::{Duration, Instant}; -use tokio::runtime::{Builder, Runtime}; -use unit::Units; -use user::Users; - -use crate::config::Config; -use crate::network::NetworkStack; +use std::{ + sync::{ + atomic::{AtomicU8, Ordering}, + Arc, + }, + time::Duration, +}; + +use anyhow::Result; +use auth::manager::AuthManager; +use getset::{Getters, MutGetters}; +use group::manager::GroupManager; +use node::manager::NodeManager; +use plugin::manager::PluginManager; +use server::{manager::ServerManager, screen::manager::ScreenManager}; +use simplelog::{error, info}; +use subscriber::manager::SubscriberManager; +use tokio::{ + select, + sync::{mpsc, watch}, + time::{interval, Instant, MissedTickBehavior}, +}; +use user::manager::UserManager; + +use crate::{config::Config, network::NetworkStack, task::Task}; pub mod auth; -pub mod cloudlet; -pub mod deployment; -pub mod driver; -pub mod event; -pub mod unit; +pub mod group; +pub mod node; +pub mod plugin; +pub mod server; +pub mod subscriber; pub mod user; -static STARTUP_SLEEP: Duration = Duration::from_secs(1); -static SHUTDOWN_WAIT: Duration = Duration::from_secs(10); +const TICK_RATE: u64 = 10; +const TASK_BUFFER: usize = 128; -const TICK_RATE: u64 = 1; - -pub type ControllerHandle = Arc; -pub type WeakControllerHandle = Weak; +pub type TaskSender = mpsc::Sender; +#[derive(Getters, MutGetters)] pub struct Controller { - handle: WeakControllerHandle, - - /* Immutable */ - pub(crate) configuration: Config, - pub(crate) drivers: Drivers, + /* State */ + state: State, - /* Runtime State */ - runtime: RwLock>, - running: AtomicBool, + /* Tasks */ + tasks: (TaskSender, mpsc::Receiver), - /* Authentication */ - auth: Auth, + /* Shared Components */ + pub shared: Arc, - /* Accessed rarely */ - cloudlets: RwLock, - deployments: RwLock, + /* Components */ + pub plugins: PluginManager, + pub nodes: NodeManager, + pub groups: GroupManager, + pub servers: ServerManager, + pub users: UserManager, - /* Accessed frequently */ - units: Units, - users: Users, + /* Config */ + #[getset(get = "pub")] + config: Config, +} - /* Event Bus */ - event_bus: EventBus, +pub struct Shared { + pub auth: AuthManager, + pub subscribers: SubscriberManager, + pub screens: ScreenManager, } impl Controller { - pub fn new(configuration: Config) -> Arc { - Arc::new_cyclic(move |handle| { - let auth = Auth::load_all(); - let drivers = Drivers::load_all(configuration.identifier.as_ref().unwrap()); - let cloudlets = Cloudlets::load_all(handle.clone(), &drivers); - let deployments = Deployments::load_all(handle.clone(), &cloudlets); - let units = Units::new(handle.clone()); - let users = Users::new(handle.clone()); - let event_bus = EventBus::new(/*handle.clone()*/); - Self { - handle: handle.clone(), - configuration, - drivers, - runtime: RwLock::new(Some( - Builder::new_multi_thread() - .enable_all() - .build() - .expect("Failed to create Tokio runtime"), - )), - running: AtomicBool::new(true), - auth, - cloudlets: RwLock::new(cloudlets), - deployments: RwLock::new(deployments), - units, - users, - event_bus, - } + pub async fn init(config: Config) -> Result { + let shared = Arc::new(Shared { + auth: AuthManager::init().await?, + subscribers: SubscriberManager::init(), + screens: ScreenManager::init(), + }); + + let plugins = PluginManager::init(&config).await?; + let nodes = NodeManager::init(&plugins).await?; + let groups = GroupManager::init(&nodes).await?; + + let servers = ServerManager::init(); + let users = UserManager::init(); + + Ok(Self { + state: State::new(), + tasks: mpsc::channel(TASK_BUFFER), + shared, + plugins, + nodes, + groups, + servers, + users, + config, }) } - pub fn start(&self) { - // Set up signal handlers - self.setup_interrupts(); - - let network_handle = NetworkStack::start(self.handle.upgrade().unwrap()); - let tick_duration = Duration::from_millis(1000 / TICK_RATE); + pub async fn run(&mut self) -> Result<()> { + // Setup signal handlers + self.setup_handlers()?; - // Wait for 1 second before starting the tick loop - thread::sleep(STARTUP_SLEEP); + let network = NetworkStack::start(&self.config, &self.shared, &self.tasks.0); - while self.running.load(Ordering::Relaxed) { - let start_time = Instant::now(); - self.tick(); + // Main loop + let mut interval = interval(Duration::from_millis(1000 / TICK_RATE)); + interval.set_missed_tick_behavior(MissedTickBehavior::Delay); + while self.state.running { + self.state.tick(); // Check for exit votes - let elapsed_time = start_time.elapsed(); - if elapsed_time < tick_duration { - thread::sleep(tick_duration - elapsed_time); + select! { + _ = interval.tick() => self.tick().await?, + task = self.tasks.1.recv() => if let Some(task) = task { + task.run(self).await?; + }, + _ = self.state.signal.1.changed() => self.shutdown()?, } } - // Stop all units - info!("Stopping all units..."); - self.units.stop_all_instant(); + // Cleanup + self.cleanup(network).await?; - // Stop network stack - info!("Stopping network stack..."); - network_handle.shutdown(); + info!("Shutdown complete. Bye :)"); + Ok(()) + } - // Wait for all tokio task to finish - info!("Stopping async runtime..."); - (*self.runtime.write().unwrap()) - .take() - .unwrap() - .shutdown_timeout(SHUTDOWN_WAIT); + async fn tick(&mut self) -> Result<()> { + let start = Instant::now(); - // Let the drivers cleanup there messes - info!("Letting the drivers cleanup..."); - self.drivers.cleanup(); - } + // Tick plugin manager + self.plugins.tick().await?; - pub fn request_stop(&self) { - info!("Controller stop requested. Stopping..."); - self.running.store(false, Ordering::Relaxed); - } + // Tick node manager + self.nodes.tick()?; - pub fn lock_cloudlets(&self) -> RwLockReadGuard { - self.cloudlets - .read() - .expect("Failed to get lock to cloudlets") - } + // Tick group manager + self.groups.tick(&self.config, &mut self.servers)?; - pub fn lock_deployments(&self) -> RwLockReadGuard { - self.deployments - .read() - .expect("Failed to get lock to deployments") - } + // Tick server manager + self.servers + .tick( + &self.config, + &self.nodes, + &mut self.groups, + &mut self.users, + &self.shared, + ) + .await?; - pub fn lock_cloudlets_mut(&self) -> RwLockWriteGuard { - self.cloudlets - .write() - .expect("Failed to get lock to cloudlets") - } + // Tick user manager + self.users.tick(&self.config)?; - pub fn lock_deployments_mut(&self) -> RwLockWriteGuard { - self.deployments - .write() - .expect("Failed to get lock to deployments") - } + // Tick subscriber manager + self.shared.subscribers.tick().await?; - pub fn get_drivers(&self) -> &Drivers { - &self.drivers - } + // Tick screen manager + self.shared.screens.tick().await?; - pub fn get_auth(&self) -> &Auth { - &self.auth - } + // Check if tick took longer than expected + let elapsed = start.elapsed(); + #[allow(clippy::cast_possible_truncation)] + if elapsed.as_millis() as u64 > 1000 / TICK_RATE { + info!("Tick took longer than expected: {:?}", elapsed); + } - pub fn get_units(&self) -> &Units { - &self.units + Ok(()) } - pub fn get_users(&self) -> &Users { - &self.users - } + fn shutdown(&mut self) -> Result<()> { + info!("Starting shutdown sequence..."); - pub fn get_event_bus(&self) -> &EventBus { - &self.event_bus - } + // Shutdown group manager + self.groups.shutdown(self.state.vote())?; - pub fn get_runtime(&self) -> RwLockReadGuard> { - self.runtime.read().expect("Failed to get lock to runtime") + // Shutdown server manager + self.servers.shutdown(self.state.vote())?; + Ok(()) } - fn tick(&self) { - // Tick all drivers - self.drivers.tick(); + async fn cleanup(&mut self, network: NetworkStack) -> Result<()> { + info!("Starting cleanup sequence..."); + + // Cleanup user manager + self.users.cleanup()?; - // Tick all driver cloudlets - self.lock_cloudlets().tick(); + // Cleanup server manager + self.servers.cleanup()?; - // Check if all deployments have started there units etc.. - self.lock_deployments().tick(&self.units); + // Cleanup group manager + self.groups.cleanup()?; - // Check if all units have sent their heartbeats and start requested units if we can - self.units.tick(); + // Cleanup node manager + self.nodes.cleanup().await?; - // Check state of all users - self.users.tick(); + // Cleanup screen manager + self.shared.screens.cleanup().await?; + + // Cleanup plugin manager + self.plugins.cleanup().await?; + + // Shutdown network stack + network.shutdown().await?; + Ok(()) } - fn setup_interrupts(&self) { - // Set up signal handlers - let controller = self.handle.clone(); + fn setup_handlers(&self) -> Result<()> { + let sender = self.state.signal.0.clone(); ctrlc::set_handler(move || { - info!("Interrupt signal received. Stopping..."); - if let Some(controller) = controller.upgrade() { - controller.request_stop(); + info!("Received SIGINT, shutting down..."); + if let Err(error) = sender.send(false) { + error!("Failed to send shutdown signal: {}", error); } }) - .expect("Failed to set Ctrl+C handler"); + .map_err(std::convert::Into::into) + } + + pub fn signal_shutdown(&self) { + if let Err(error) = self.state.signal.0.send(false) { + error!("Failed to send shutdown signal: {}", error); + } + } +} + +struct State { + pub running: bool, + pub signal: (watch::Sender, watch::Receiver), + pub votes: (bool, u8, Arc), +} + +pub struct Voter(bool, Arc); +pub type OptVoter = Option; + +impl Voter { + pub fn vote(&mut self) -> bool { + if self.0 { + self.1.fetch_add(1, Ordering::Relaxed); + self.0 = false; + true + } else { + false + } } } -pub enum CreationResult { - Created, - AlreadyExists, - Denied(Error), +impl State { + #[must_use] + fn new() -> Self { + Self { + running: true, + signal: watch::channel(true), + votes: (false, 0, Arc::new(AtomicU8::new(0))), + } + } + + fn tick(&mut self) { + if self.votes.0 && self.votes.1 <= self.votes.2.load(Ordering::Relaxed) { + info!("Received enough votes to exit, initiating..."); + self.running = false; + } + } + + fn vote(&mut self) -> Voter { + self.votes.0 = true; + self.votes.1 += 1; + Voter(true, self.votes.2.clone()) + } } diff --git a/controller/src/application/auth.rs b/controller/src/application/auth.rs index 90396542..42a4e632 100644 --- a/controller/src/application/auth.rs +++ b/controller/src/application/auth.rs @@ -1,208 +1,40 @@ -use std::{ - collections::HashMap, - fs, - sync::{Arc, RwLock}, -}; +use std::sync::Arc; -use common::config::{LoadFromTomlFile, SaveToTomlFile}; -use simplelog::{error, info, warn}; -use stored::StoredUser; -use uuid::Uuid; +use server::AuthServer; +use user::AdminUser; -use crate::storage::Storage; +pub mod manager; +pub mod permissions; -use super::unit::{UnitHandle, WeakUnitHandle}; +pub mod server; +pub mod user; const DEFAULT_ADMIN_USERNAME: &str = "admin"; -pub type AuthUserHandle = Arc; -pub type AuthUnitHandle = Arc; +pub type AuthToken = String; +pub type OwnedAuthorization = Box; +pub type Authorization = Arc; -pub struct AuthUser { - pub username: String, - pub token: String, -} +pub trait GenericAuthorization { + fn get_server(&self) -> Option<&AuthServer>; + #[allow(unused)] + fn get_user(&self) -> Option<&AdminUser>; + fn is_type(&self, auth: AuthType) -> bool; -pub struct AuthUnit { - pub unit: WeakUnitHandle, - pub token: String, -} + #[allow(unused)] + fn is_allowed(&self, flag: u32) -> bool; -pub struct Auth { - pub users: RwLock>, - pub units: RwLock>, + fn recreate(&self) -> OwnedAuthorization; } -impl Auth { - pub fn new(users: HashMap) -> Self { - Auth { - users: RwLock::new(users), - units: RwLock::new(HashMap::new()), - } - } - - pub fn load_all() -> Self { - info!("Loading users..."); - - let users_directory = Storage::get_users_folder(); - if !users_directory.exists() { - if let Err(error) = fs::create_dir_all(&users_directory) { - warn!( - "Failed to create users directory: {}", - &error - ); - } - } - - let mut users = HashMap::new(); - let entries = match fs::read_dir(&users_directory) { - Ok(entries) => entries, - Err(error) => { - error!("Failed to read users directory: {}", &error); - return Auth::new(users); - } - }; - - for entry in entries { - let entry = match entry { - Ok(entry) => entry, - Err(error) => { - error!("Failed to read user entry: {}", &error); - continue; - } - }; - - let path = entry.path(); - if path.is_dir() { - continue; - } - - let name = match path.file_stem() { - Some(name) => name.to_string_lossy().to_string(), - None => continue, - }; - - let user = match StoredUser::load_from_file(&path) { - Ok(user) => user, - Err(error) => { - error!( - "Failed to read user {} from file({:?}): {}", - &name, - &path, - &error - ); - continue; - } - }; - - let user = AuthUser { - username: name.clone(), - token: user.token, - }; - if users - .values() - .any(|u| u.username.eq_ignore_ascii_case(&user.username)) - { - error!("User with the name {} already exists", &name); - continue; - } - users.insert(user.token.clone(), Arc::new(user)); - info!("Loaded user {}", &name); - } - - let amount = users.len(); - let auth = Auth::new(users); - if amount == 0 { - let user = auth - .register_user(DEFAULT_ADMIN_USERNAME) - .expect("Failed to create default admin user"); - info!("-----------------------------------"); - info!("No users found, created default admin user"); - info!("Username: {}", DEFAULT_ADMIN_USERNAME); - info!("Token: {}", &user.token); - info!("-----------------------------------"); - info!("Welcome to Atomic Cloud"); - info!("-----------------------------------"); - } - - info!("Loaded {} user(s)", amount); - auth - } - - pub fn get_user(&self, token: &str) -> Option { - self.users.read().unwrap().get(token).cloned() - } - - pub fn get_unit(&self, token: &str) -> Option { - self.units.read().unwrap().get(token).cloned() - } - - pub fn register_unit(&self, unit: WeakUnitHandle) -> AuthUnitHandle { - let token = format!( - "sctl_{}{}", - Uuid::new_v4().as_simple(), - Uuid::new_v4().as_simple() - ); - - let unit = Arc::new(AuthUnit { - unit, - token: token.clone(), - }); - self.units - .write() - .unwrap() - .insert(token.clone(), unit.clone()); - - unit - } - - pub fn unregister_unit(&self, unit: &UnitHandle) { - self.units.write().unwrap().retain(|_, value| { - if let Some(ref_unit) = value.unit.upgrade() { - !Arc::ptr_eq(&ref_unit, unit) - } else { - true - } - }) - } - - pub fn register_user(&self, username: &str) -> Option { - let token = format!( - "actl_{}{}", - Uuid::new_v4().as_simple(), - Uuid::new_v4().as_simple() - ); - let stored_user = StoredUser { - token: token.to_string(), - }; - let user_path = Storage::get_user_file(username); - if stored_user.save_to_file(&user_path, true).is_err() { - error!( - "Failed to save user to file: {}", - &user_path.display() - ); - return None; - } - - let user = Arc::new(AuthUser { - username: username.to_string(), - token: token.clone(), - }); - self.users.write().unwrap().insert(token, user.clone()); - - Some(user) - } +#[derive(PartialEq)] +pub enum AuthType { + User, + Server, } -mod stored { - use common::config::{LoadFromTomlFile, SaveToTomlFile}; - use serde::{Deserialize, Serialize}; - - #[derive(Serialize, Deserialize)] - pub struct StoredUser { - pub token: String, - } - - impl LoadFromTomlFile for StoredUser {} - impl SaveToTomlFile for StoredUser {} +#[derive(PartialEq)] +pub enum ActionResult { + Allowed, + Denied, } diff --git a/controller/src/application/auth/manager.rs b/controller/src/application/auth/manager.rs new file mode 100644 index 00000000..0284c1cf --- /dev/null +++ b/controller/src/application/auth/manager.rs @@ -0,0 +1,121 @@ +use std::{collections::HashMap, sync::Arc}; + +use anyhow::Result; +use simplelog::info; +use stored::StoredUser; +use tokio::{fs, sync::RwLock}; +use uuid::Uuid; + +use crate::{ + application::auth::DEFAULT_ADMIN_USERNAME, + storage::{SaveToTomlFile, Storage}, +}; + +use super::{server::AuthServer, AdminUser, AuthToken, Authorization, OwnedAuthorization}; + +pub struct AuthManager { + tokens: RwLock>, +} + +impl AuthManager { + pub async fn init() -> Result { + info!("Loading users..."); + let mut tokens = HashMap::new(); + + let directory = Storage::users_directory(); + if !directory.exists() { + fs::create_dir_all(&directory).await?; + } + + for (_, _, name, value) in Storage::for_each_content_toml::( + &directory, + "Failed to read user from file", + ) + .await? + { + info!("Loaded user {}", name); + tokens.insert(value.token().clone(), AdminUser::create(name)); + } + + if tokens.is_empty() { + let token = Self::create_user(DEFAULT_ADMIN_USERNAME).await?; + info!("-----------------------------------"); + info!("No users found, created default admin user"); + info!("Username: {}", DEFAULT_ADMIN_USERNAME); + info!("Token: {}", &token); + info!("-----------------------------------"); + info!("Welcome to Atomic Cloud"); + info!("-----------------------------------"); + tokens.insert(token, AdminUser::create(DEFAULT_ADMIN_USERNAME.to_string())); + } + + info!("Loaded {} user(s)", tokens.len()); + Ok(Self { + tokens: RwLock::new(tokens), + }) + } + + pub async fn has_access(&self, token: &str) -> Option { + self.tokens + .read() + .await + .get(token) + .map(|auth| Arc::new(auth.recreate())) + } + + pub async fn unregister(&self, token: &str) { + self.tokens.write().await.remove(token); + } + + pub async fn register_server(&self, uuid: Uuid) -> String { + let token = format!( + "sctl_{}{}", + Uuid::new_v4().as_simple(), + Uuid::new_v4().as_simple() + ); + + self.tokens + .write() + .await + .insert(token.clone(), AuthServer::create(uuid)); + + token + } + + async fn create_user(username: &str) -> Result { + let token = format!( + "actl_{}{}", + Uuid::new_v4().as_simple(), + Uuid::new_v4().as_simple() + ); + StoredUser::new(&token) + .save(&Storage::user_file(username), true) + .await?; + + Ok(token) + } +} + +mod stored { + use getset::Getters; + use serde::{Deserialize, Serialize}; + + use crate::storage::{LoadFromTomlFile, SaveToTomlFile}; + + #[derive(Serialize, Deserialize, Getters)] + pub struct StoredUser { + #[getset(get = "pub")] + token: String, + } + + impl StoredUser { + pub fn new(token: &str) -> Self { + Self { + token: token.to_string(), + } + } + } + + impl LoadFromTomlFile for StoredUser {} + impl SaveToTomlFile for StoredUser {} +} diff --git a/controller/src/application/auth/permissions.rs b/controller/src/application/auth/permissions.rs new file mode 100644 index 00000000..5c15722c --- /dev/null +++ b/controller/src/application/auth/permissions.rs @@ -0,0 +1,14 @@ +use bitflags::bitflags; + +bitflags! { + pub struct Permissions: u32 { + const READ = 1; + const WRITE = 1 << 1; + const EXECUTE = 1 << 2; + const DELETE = 1 << 3; + const CREATE = 1 << 4; + const MODIFY = 1 << 5; + const LIST = 1 << 6; + const ALL = Self::READ.bits() | Self::WRITE.bits() | Self::EXECUTE.bits() | Self::DELETE.bits() | Self::CREATE.bits() | Self::MODIFY.bits() | Self::LIST.bits(); + } +} diff --git a/controller/src/application/auth/server.rs b/controller/src/application/auth/server.rs new file mode 100644 index 00000000..085a8846 --- /dev/null +++ b/controller/src/application/auth/server.rs @@ -0,0 +1,36 @@ +use getset::Getters; +use uuid::Uuid; + +use super::{user::AdminUser, AuthType, GenericAuthorization, OwnedAuthorization}; + +#[derive(Getters)] +pub struct AuthServer { + #[getset(get = "pub")] + uuid: Uuid, +} + +impl GenericAuthorization for AuthServer { + fn is_allowed(&self, _flag: u32) -> bool { + true + } + + fn get_user(&self) -> Option<&AdminUser> { + None + } + fn get_server(&self) -> Option<&AuthServer> { + Some(self) + } + fn is_type(&self, auth: AuthType) -> bool { + auth == AuthType::Server + } + + fn recreate(&self) -> OwnedAuthorization { + AuthServer::create(self.uuid) + } +} + +impl AuthServer { + pub fn create(uuid: Uuid) -> OwnedAuthorization { + Box::new(Self { uuid }) + } +} diff --git a/controller/src/application/auth/user.rs b/controller/src/application/auth/user.rs new file mode 100644 index 00000000..27b9a271 --- /dev/null +++ b/controller/src/application/auth/user.rs @@ -0,0 +1,35 @@ +use getset::Getters; + +use super::{server::AuthServer, AuthType, GenericAuthorization, OwnedAuthorization}; + +#[derive(Getters)] +pub struct AdminUser { + #[getset(get = "pub")] + username: String, +} + +impl GenericAuthorization for AdminUser { + fn is_allowed(&self, _flag: u32) -> bool { + true + } + + fn get_user(&self) -> Option<&AdminUser> { + Some(self) + } + fn get_server(&self) -> Option<&AuthServer> { + None + } + fn is_type(&self, auth: AuthType) -> bool { + auth == AuthType::User + } + + fn recreate(&self) -> OwnedAuthorization { + AdminUser::create(self.username.clone()) + } +} + +impl AdminUser { + pub fn create(username: String) -> OwnedAuthorization { + Box::new(Self { username }) + } +} diff --git a/controller/src/application/cloudlet.rs b/controller/src/application/cloudlet.rs deleted file mode 100644 index 83328334..00000000 --- a/controller/src/application/cloudlet.rs +++ /dev/null @@ -1,468 +0,0 @@ -use std::{ - collections::HashMap, - fmt::Display, - fs, - sync::{Arc, RwLock, Weak}, -}; - -use anyhow::{anyhow, Result}; -use common::config::{LoadFromTomlFile, SaveToTomlFile}; -use serde::{Deserialize, Serialize}; -use simplelog::{error, info, warn}; -use stored::StoredCloudlet; -use url::Url; - -use super::{ - driver::{DriverCloudletHandle, DriverHandle, Drivers, GenericDriver}, - unit::{Resources, Spec, StartRequestHandle}, - CreationResult, WeakControllerHandle, -}; -use crate::storage::Storage; - -pub type CloudletHandle = Arc; -pub type WeakCloudletHandle = Weak; - -pub struct Cloudlets { - controller: WeakControllerHandle, - - cloudlets: HashMap, -} - -impl Cloudlets { - pub fn new(controller: WeakControllerHandle) -> Self { - Self { - controller, - cloudlets: HashMap::new(), - } - } - - /// This will try to load all the cloudletss stored as toml files from the cloudlets directory - /// - /// Any compilcations will be logged and the cloudlet will be skipped - pub fn load_all(controller: WeakControllerHandle, drivers: &Drivers) -> Self { - info!("Loading cloudlets..."); - - let mut cloudlets = Self::new(controller); - let cloudlets_directory = Storage::get_cloudlets_folder(); - if !cloudlets_directory.exists() { - if let Err(error) = fs::create_dir_all(&cloudlets_directory) { - warn!( - "Failed to create cloudlets directory: {}", - &error - ); - return cloudlets; - } - } - - let entries = match fs::read_dir(&cloudlets_directory) { - Ok(entries) => entries, - Err(error) => { - error!( - "Failed to read cloudlets directory: {}", - &error - ); - return cloudlets; - } - }; - - for entry in entries { - let entry = match entry { - Ok(entry) => entry, - Err(error) => { - error!("Failed to read cloudlet entry: {}", &error); - continue; - } - }; - - let path = entry.path(); - if path.is_dir() { - continue; - } - - let name = match path.file_stem() { - Some(name) => name.to_string_lossy().to_string(), - None => continue, - }; - - let cloudlet = match StoredCloudlet::load_from_file(&path) { - Ok(cloudlet) => cloudlet, - Err(error) => { - error!( - "Failed to read cloudlet {} from file({:?}): {}", - &name, - &path, - &error - ); - continue; - } - }; - - info!("Loading cloudlet {}", &name); - let cloudlet = match Cloudlet::try_from(&name, &cloudlet, drivers) { - Some(cloudlet) => cloudlet, - None => continue, - }; - - if let Err(error) = cloudlets.add_cloudlet(cloudlet) { - warn!( - "Failed to load cloudlet {} because it was denied by the driver", - &name - ); - warn!(" -> {}", &error); - } - } - - info!("Loaded {} cloudlet(s)", cloudlets.cloudlets.len()); - cloudlets - } - - pub fn tick(&self) { - for cloudlet in self.cloudlets.values() { - if let Err(error) = cloudlet.get_inner().tick() { - error!( - "Failed to tick cloudlet {}: {}", - cloudlet.name, error - ); - } - } - } - - pub fn get_amount(&self) -> usize { - self.cloudlets.len() - } - - pub fn get_cloudlets(&self) -> Vec { - self.cloudlets.values().cloned().collect() - } - - pub fn find_by_name(&self, name: &str) -> Option { - self.cloudlets.get(name).cloned() - } - - /// This can be used to retire or activate a cloudlet - /// - /// Retiring a cloudlet will remove it from the deployments that use it and stop all units on it - pub fn set_cloudlet_status( - &mut self, - cloudlet: &CloudletHandle, - status: LifecycleStatus, - ) -> Result<()> { - match status { - LifecycleStatus::Inactive => { - self.retire_cloudlet(cloudlet); - info!("Inactive cloudlet {}", cloudlet.name); - } - LifecycleStatus::Active => { - self.activate_cloudlet(cloudlet); - info!("Activated cloudlet {}", cloudlet.name); - } - } - *cloudlet.status.write().unwrap() = status; - cloudlet.mark_dirty()?; - Ok(()) - } - - /// This should only be called from set_cloudlet_status and delete_cloudlet - fn retire_cloudlet(&mut self, cloudlet: &CloudletHandle) { - let controller = self - .controller - .upgrade() - .expect("The controller is dead while still running code that requires it"); - { - controller - .lock_deployments() - .search_and_remove_cloudlet(cloudlet); - controller.get_units().stop_all_on_cloudlet(cloudlet); - } - } - - /// This should only be called from set_cloudlet_status - fn activate_cloudlet(&mut self, _cloudlet: &CloudletHandle) {} - - pub fn delete_cloudlet(&mut self, cloudlet: &CloudletHandle) -> Result<()> { - if *cloudlet - .status - .read() - .expect("Failed to lock status of cloudlet") - != LifecycleStatus::Inactive - { - return Err(anyhow!("Cloudlet is not inactive")); - } - self.retire_cloudlet(cloudlet); // Just to be sure - cloudlet.delete_file()?; - self.remove_cloudlet(cloudlet); - - let ref_count = Arc::strong_count(cloudlet); - if ref_count > 1 { - warn!( - "Cloudlet {} still has strong references[{}] this chould indicate a memory leak!", - cloudlet.name, - ref_count - ); - } - - info!("Deleted cloudlet {}", cloudlet.name); - Ok(()) - } - - pub fn create_cloudlet( - &mut self, - name: &str, - driver: Arc, - capabilities: Capabilities, - controller: RemoteController, - ) -> Result { - if self.cloudlets.contains_key(name) { - return Ok(CreationResult::AlreadyExists); - } - - let stored_cloudlet = StoredCloudlet { - driver: driver.name().to_string(), - capabilities, - status: LifecycleStatus::Inactive, - controller, - }; - let cloudlet = Cloudlet::from(name, &stored_cloudlet, driver); - - match self.add_cloudlet(cloudlet) { - Ok(_) => { - stored_cloudlet.save_to_file(&Storage::get_cloudlet_file(name), true)?; - info!("Created cloudlet {}", name); - Ok(CreationResult::Created) - } - Err(error) => Ok(CreationResult::Denied(error)), - } - } - - fn add_cloudlet(&mut self, mut cloudlet: Cloudlet) -> Result<()> { - match cloudlet.init() { - Ok(_) => { - self.cloudlets - .insert(cloudlet.name.clone(), Arc::new(cloudlet)); - Ok(()) - } - Err(error) => Err(error), - } - } - - fn remove_cloudlet(&mut self, cloudlet: &CloudletHandle) { - self.cloudlets.remove(&cloudlet.name); - } -} - -pub type AllocationHandle = Arc; - -pub struct Allocation { - pub addresses: Vec, - pub resources: Resources, - pub spec: Spec, -} - -impl Allocation { - pub fn primary_address(&self) -> &HostAndPort { - &self.addresses[0] - } -} - -pub struct Cloudlet { - /* Settings */ - pub name: String, - pub capabilities: Capabilities, - pub status: RwLock, - - /* Controller */ - pub controller: RemoteController, - - /* Driver handles */ - pub driver: DriverHandle, - inner: Option, - - /* Allocations made on this cloudlet */ - pub allocations: RwLock>, -} - -impl Cloudlet { - fn from(name: &str, stored_cloudlet: &StoredCloudlet, driver: Arc) -> Self { - Self { - name: name.to_string(), - capabilities: stored_cloudlet.capabilities.clone(), - status: RwLock::new(stored_cloudlet.status.clone()), - controller: stored_cloudlet.controller.clone(), - driver, - inner: None, - allocations: RwLock::new(Vec::new()), - } - } - - fn try_from(name: &str, stored_cloudlet: &StoredCloudlet, drivers: &Drivers) -> Option { - drivers - .find_by_name(&stored_cloudlet.driver) - .map(|driver| Self::from(name, stored_cloudlet, driver)) - .or_else(|| { - error!( - "Failed to load cloudlet {} because there is no loaded driver with the name {}", - &name, - &stored_cloudlet.driver - ); - None - }) - } - - pub fn init(&mut self) -> Result<()> { - match self.driver.init_cloudlet(self) { - Ok(value) => { - self.inner = Some(value); - Ok(()) - } - Err(error) => Err(error), - } - } - - pub fn allocate(&self, request: &StartRequestHandle) -> Result { - if *self.status.read().unwrap() == LifecycleStatus::Inactive { - warn!( - "Attempted to allocate resources on inactive cloudlet {}", - self.name - ); - return Err(anyhow!("Can not allocate resources on inactive cloudlet")); - } - - let mut allocations = self - .allocations - .write() - .expect("Failed to lock allocations"); - - if let Some(max_memory) = self.capabilities.memory { - let used_memory: u32 = allocations - .iter() - .map(|allocation| allocation.resources.memory) - .sum(); - if used_memory > max_memory { - return Err(anyhow!("Cloudlet has reached the memory limit")); - } - } - - if let Some(max_allocations) = self.capabilities.max_allocations { - if allocations.len() + 1 > max_allocations as usize { - return Err(anyhow!( - "Cloudlet has reached the maximum amount of allocations" - )); - } - } - - let addresses = self.inner.as_ref().unwrap().allocate_addresses(request)?; - if addresses.len() < request.resources.addresses as usize { - return Err(anyhow!( - "Cloudlet did not allocate the required amount of addresses" - )); - } - - let allocation = Arc::new(Allocation { - addresses, - resources: request.resources.clone(), - spec: request.spec.clone(), - }); - allocations.push(allocation.clone()); - Ok(allocation) - } - - pub fn deallocate(&self, allocation: &AllocationHandle) { - if let Err(error) = self - .inner - .as_ref() - .unwrap() - .deallocate_addresses(allocation.addresses.clone()) - { - error!("Failed to deallocate addresses: {}", &error); - } - self.allocations - .write() - .expect("Failed to lock allocations") - .retain(|alloc| !Arc::ptr_eq(alloc, allocation)); - } - - pub fn get_inner(&self) -> &DriverCloudletHandle { - self.inner.as_ref().unwrap() - } - - pub fn mark_dirty(&self) -> Result<()> { - self.save_to_file() - } - - fn delete_file(&self) -> Result<()> { - let file_path = Storage::get_cloudlet_file(&self.name); - if file_path.exists() { - fs::remove_file(file_path)?; - } - Ok(()) - } - - fn save_to_file(&self) -> Result<()> { - let stored_cloudlet = StoredCloudlet { - driver: self.driver.name().to_string(), - capabilities: self.capabilities.clone(), - status: self.status.read().unwrap().clone(), - controller: self.controller.clone(), - }; - stored_cloudlet.save_to_file(&Storage::get_cloudlet_file(&self.name), true) - } -} - -#[derive(Serialize, Deserialize, Clone, Default)] -pub struct Capabilities { - pub memory: Option, - pub max_allocations: Option, - pub child: Option, -} - -#[derive(Serialize, Deserialize, Clone, Default, PartialEq)] -pub enum LifecycleStatus { - #[serde(rename = "inactive")] - #[default] - Inactive, - #[serde(rename = "active")] - Active, -} - -#[derive(Serialize, Deserialize, Clone)] -pub struct RemoteController { - pub address: Url, -} - -#[derive(Serialize, Deserialize, Clone)] -pub struct HostAndPort { - pub host: S, - pub port: u16, -} - -impl HostAndPort { - pub fn new(host: String, port: u16) -> Self { - Self { host, port } - } -} - -impl Display for HostAndPort { - fn fmt(&self, formatter: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { - write!(formatter, "{}:{}", self.host, self.port) - } -} - -mod stored { - use super::{Capabilities, LifecycleStatus, RemoteController}; - use common::config::{LoadFromTomlFile, SaveToTomlFile}; - use serde::{Deserialize, Serialize}; - - #[derive(Serialize, Deserialize)] - pub struct StoredCloudlet { - /* Settings */ - pub driver: String, - pub capabilities: Capabilities, - pub status: LifecycleStatus, - - /* Controller */ - pub controller: RemoteController, - } - - impl LoadFromTomlFile for StoredCloudlet {} - impl SaveToTomlFile for StoredCloudlet {} -} diff --git a/controller/src/application/deployment.rs b/controller/src/application/deployment.rs deleted file mode 100644 index a8984825..00000000 --- a/controller/src/application/deployment.rs +++ /dev/null @@ -1,554 +0,0 @@ -use std::{ - collections::HashMap, - fs, - sync::{ - atomic::{AtomicBool, Ordering}, - Arc, RwLock, Weak, - }, - time::Instant, -}; - -use anyhow::{anyhow, Result}; -use common::{ - allocator::NumberAllocator, - config::{LoadFromTomlFile, SaveToTomlFile}, -}; -use serde::{Deserialize, Serialize}; -use shared::StoredDeployment; -use simplelog::{debug, error, info, warn}; - -use crate::storage::Storage; - -use super::{ - cloudlet::{CloudletHandle, Cloudlets, LifecycleStatus, WeakCloudletHandle}, - unit::{DeploymentRef, Resources, Spec, StartRequest, StartRequestHandle, UnitHandle, Units}, - CreationResult, WeakControllerHandle, -}; - -pub type DeploymentHandle = Arc; -pub type WeakDeploymentHandle = Weak; - -pub struct Deployments { - controller: WeakControllerHandle, - - deployments: HashMap, -} - -impl Deployments { - pub fn new(controller: WeakControllerHandle) -> Self { - Self { - controller, - deployments: HashMap::new(), - } - } - - pub fn load_all(controller: WeakControllerHandle, cloudlets: &Cloudlets) -> Self { - info!("Loading deployments..."); - - let mut deployments = Self::new(controller); - let deployments_directory = Storage::get_deployments_folder(); - if !deployments_directory.exists() { - if let Err(error) = fs::create_dir_all(&deployments_directory) { - warn!( - "Failed to create deployments directory: {}", - &error - ); - return deployments; - } - } - - let entries = match fs::read_dir(&deployments_directory) { - Ok(entries) => entries, - Err(error) => { - error!( - "Failed to read deployments directory: {}", - &error - ); - return deployments; - } - }; - - for entry in entries { - let entry = match entry { - Ok(entry) => entry, - Err(error) => { - error!( - "Failed to read deployment entry: {}", - &error - ); - continue; - } - }; - - let path = entry.path(); - if path.is_dir() { - continue; - } - - let name = match path.file_stem() { - Some(name) => name.to_string_lossy().to_string(), - None => continue, - }; - - let deployment = match StoredDeployment::load_from_file(&path) { - Ok(deployment) => deployment, - Err(error) => { - error!( - "Failed to read deployment {} from file({:?}): {}", - &name, - &path, - &error - ); - continue; - } - }; - - let deployment = match Deployment::try_from(&name, &deployment, cloudlets) { - Some(deployment) => deployment, - None => continue, - }; - - deployments.add_deployment(deployment); - info!("Loaded deployment {}", &name); - } - - info!( - "Loaded {} deployment(s)", - deployments.deployments.len() - ); - deployments - } - - pub fn get_amount(&self) -> usize { - self.deployments.len() - } - - pub fn get_deployments(&self) -> &HashMap { - &self.deployments - } - - pub fn tick(&self, units: &Units) { - for deployment in self.deployments.values() { - deployment.tick(&self.controller, units); - } - } - - pub fn find_by_name(&self, name: &str) -> Option { - self.deployments.get(name).cloned() - } - - pub fn set_deployment_status( - &mut self, - deployment: &DeploymentHandle, - status: LifecycleStatus, - ) -> Result<()> { - match status { - LifecycleStatus::Inactive => { - self.retire_deployment(deployment); - info!( - "Inactive deployment {}", - deployment.name - ); - } - LifecycleStatus::Active => { - self.activate_deployment(deployment); - info!( - "Activated deployment {}", - deployment.name - ); - } - } - *deployment.status.write().unwrap() = status; - deployment.mark_dirty()?; - Ok(()) - } - - fn retire_deployment(&mut self, deployment: &DeploymentHandle) { - let controller = self - .controller - .upgrade() - .expect("The controller is dead while still running code that requires it"); - { - let unit_manager = controller.get_units(); - let mut units = deployment.units.write().unwrap(); - for unit in units.iter() { - if let AssociatedUnit::Active(unit) = unit { - unit_manager.checked_unit_stop(unit); - } else if let AssociatedUnit::Queueing(request) = unit { - request.canceled.store(true, Ordering::Relaxed); - } - } - units.clear(); - } - } - - fn activate_deployment(&mut self, _deployment: &DeploymentHandle) {} - - pub fn delete_deployment(&mut self, deployment: &DeploymentHandle) -> Result<()> { - if *deployment - .status - .read() - .expect("Failed to lock status of deployment") - != LifecycleStatus::Inactive - { - return Err(anyhow!("Deployment is not inactive")); - } - self.retire_deployment(deployment); // Make sure all units are stopped - deployment.delete_file()?; - self.remove_deployment(deployment); - - let ref_count = Arc::strong_count(deployment); - if ref_count > 1 { - warn!( - "Deployment {} still has strong references[{}] this chould indicate a memory leak!", - deployment.name, - ref_count - ); - } - - info!("Deleted deployment {}", deployment.name); - Ok(()) - } - - pub fn create_deployment( - &mut self, - name: &str, - cloudlet_handles: Vec, - constraints: StartConstraints, - scaling: ScalingPolicy, - resources: Resources, - spec: Spec, - ) -> Result { - if cloudlet_handles.is_empty() { - return Ok(CreationResult::Denied(anyhow!("No cloudlet provided"))); - } - - if self.deployments.contains_key(name) { - return Ok(CreationResult::AlreadyExists); - } - - let cloudlets: Vec = cloudlet_handles - .iter() - .map(|cloudlet| cloudlet.name.clone()) - .collect(); - - let stored_deployment = StoredDeployment { - status: LifecycleStatus::Inactive, - cloudlets, - constraints, - scaling, - resources, - spec, - }; - let deployment = Deployment::from( - name, - &stored_deployment, - cloudlet_handles.iter().map(Arc::downgrade).collect(), - ); - - self.add_deployment(deployment); - stored_deployment.save_to_file(&Storage::get_deployment_file(name), true)?; - info!("Created deployment {}", name); - Ok(CreationResult::Created) - } - - pub fn search_and_remove_cloudlet(&self, cloudlet: &CloudletHandle) { - for deployment in self.deployments.values() { - deployment - .cloudlets - .write() - .expect("Failed to lock cloudlets list of deployment") - .retain(|handle| { - if let Some(strong_cloudlet) = handle.upgrade() { - return !Arc::ptr_eq(&strong_cloudlet, cloudlet); - } - false - }); - deployment - .mark_dirty() - .expect("Failed to mark deployment as dirty"); - } - } - - fn add_deployment(&mut self, deployment: DeploymentHandle) { - self.deployments - .insert(deployment.name.to_string(), deployment); - } - - fn remove_deployment(&mut self, deployment: &DeploymentHandle) { - self.deployments.remove(&deployment.name); - } -} - -#[derive(Serialize, Deserialize, Clone, Copy, Default)] -pub struct StartConstraints { - pub minimum: u32, - pub maximum: u32, - pub priority: i32, -} - -#[derive(Serialize, Deserialize, Clone, Copy, Default)] -pub struct ScalingPolicy { - pub enabled: bool, - pub start_threshold: f32, - pub stop_empty_units: bool, -} - -pub enum AssociatedUnit { - Queueing(StartRequestHandle), - Active(UnitHandle), -} - -pub struct Deployment { - handle: WeakDeploymentHandle, - - /* Settings */ - pub name: String, - pub status: RwLock, - - /* Where? */ - pub cloudlets: RwLock>, - pub constraints: StartConstraints, - pub scaling: ScalingPolicy, - - /* How? */ - pub resources: Resources, - pub spec: Spec, - - /* What do i need to know? */ - id_allocator: RwLock>, - units: RwLock>, -} - -impl Deployment { - fn from( - name: &str, - stored_deployment: &StoredDeployment, - cloudlets: Vec, - ) -> DeploymentHandle { - Arc::new_cyclic(|handle| Self { - handle: handle.clone(), - name: name.to_string(), - status: RwLock::new(stored_deployment.status.clone()), - cloudlets: RwLock::new(cloudlets), - constraints: stored_deployment.constraints, - scaling: stored_deployment.scaling, - resources: stored_deployment.resources.clone(), - spec: stored_deployment.spec.clone(), - id_allocator: RwLock::new(NumberAllocator::new(1..usize::MAX)), - units: RwLock::new(Vec::new()), - }) - } - - fn try_from( - name: &str, - stored_deployment: &StoredDeployment, - cloudlets: &Cloudlets, - ) -> Option { - let cloudlet_handles: Vec = stored_deployment - .cloudlets - .iter() - .filter_map(|name| { - cloudlets - .find_by_name(name) - .map(|handle| Arc::downgrade(&handle)) - }) - .collect(); - if cloudlet_handles.is_empty() { - return None; - } - Some(Self::from(name, stored_deployment, cloudlet_handles)) - } - - fn tick(&self, controller: &WeakControllerHandle, units: &Units) { - if *self.status.read().unwrap() == LifecycleStatus::Inactive { - // Do not tick this deployment because it is inactive - return; - } - - let mut deployment_units = self.units.write().expect("Failed to lock units"); - let mut id_allocator = self - .id_allocator - .write() - .expect("Failed to lock id allocator"); - let mut target_unit_count = self.constraints.minimum; - - // Apply scaling policy - if self.scaling.enabled { - for unit in deployment_units.iter() { - if let AssociatedUnit::Active(unit) = unit { - let player_ratio = unit.get_user_count() as f32 / self.spec.max_players as f32; - if player_ratio >= self.scaling.start_threshold { - target_unit_count += 1; // Unit has reached the threshold, start a new one - } - } - } - - if self.scaling.stop_empty_units && deployment_units.len() as u32 > target_unit_count { - let mut amount_to_stop = deployment_units.len() as u32 - target_unit_count; - - // We have more units than needed - // Check if a unit is empty and stop it after the configured timeout - if let Some(controller) = controller.upgrade() { - for unit in deployment_units.iter() { - if let AssociatedUnit::Active(unit) = unit { - let mut stop_flag = - unit.flags.stop.write().expect("Failed to lock stop flag"); - if unit.get_user_count() == 0 { - if let Some(stop_time) = stop_flag.as_ref() { - if &Instant::now() > stop_time && amount_to_stop > 0 { - debug!( - "Unit {} is empty and reached the timeout, stopping it...", - unit.name - ); - controller.get_units().checked_unit_stop(unit); - amount_to_stop -= 1; - } - } else { - debug!( - "Unit {} is empty, starting stop timer...", - unit.name - ); - stop_flag.replace( - Instant::now() - + controller.configuration.timings.empty_unit.unwrap(), - ); - } - } else if stop_flag.is_some() { - debug!( - "Unit {} is no longer empty, clearing stop timer...", - unit.name - ); - stop_flag.take(); - } - } - } - } - } - } - - // Check if we need to start more units - for requested in 0..(target_unit_count as usize).saturating_sub(deployment_units.len()) { - if (deployment_units.len() + requested) >= target_unit_count as usize { - break; - } - - let unit_id = id_allocator - .allocate() - .expect("We reached the maximum unit count. Wow this is a lot of units"); - let request = units.queue_unit(StartRequest { - canceled: AtomicBool::new(false), - when: None, - name: format!("{}-{}", self.name, unit_id), - cloudlets: self.cloudlets.read().unwrap().clone(), - deployment: Some(DeploymentRef { - unit_id, - deployment: self.handle.clone(), - }), - resources: self.resources.clone(), - spec: self.spec.clone(), - priority: self.constraints.priority, - }); - - // Add queueing unit to deployment - deployment_units.push(AssociatedUnit::Queueing(request)); - } - } - - pub fn set_unit_active(&self, unit: UnitHandle, request: &StartRequestHandle) { - let mut units = self.units.write().expect("Failed to lock units"); - units.retain(|queued_unit| { - if let AssociatedUnit::Queueing(start_request) = queued_unit { - return !Arc::ptr_eq(start_request, request); - } - true - }); - units.push(AssociatedUnit::Active(unit)); - } - - pub fn remove_unit(&self, unit: &UnitHandle) { - self.units - .write() - .expect("Failed to lock units") - .retain(|handle| { - if let AssociatedUnit::Active(s) = handle { - return !Arc::ptr_eq(s, unit); - } - true - }); - self.id_allocator - .write() - .expect("Failed to lock id allocator") - .release(unit.deployment.as_ref().unwrap().unit_id); - } - - pub fn get_free_unit(&self) -> Option { - let units = self.units.read().expect("Failed to lock units"); - for unit in units.iter() { - if let AssociatedUnit::Active(unit) = unit { - return Some(unit.clone()); - } - } - None - } - - pub fn mark_dirty(&self) -> Result<()> { - self.save_to_file() - } - - fn delete_file(&self) -> Result<()> { - let file_path = Storage::get_deployment_file(&self.name); - if file_path.exists() { - fs::remove_file(file_path)?; - } - Ok(()) - } - - fn save_to_file(&self) -> Result<()> { - let stored_deployment = StoredDeployment { - status: self.status.read().unwrap().clone(), - cloudlets: self - .cloudlets - .read() - .unwrap() - .iter() - .map(|cloudlet| cloudlet.upgrade().unwrap().name.clone()) - .collect(), - constraints: self.constraints, - scaling: self.scaling, - resources: self.resources.clone(), - spec: self.spec.clone(), - }; - stored_deployment.save_to_file(&Storage::get_deployment_file(&self.name), true) - } -} - -mod shared { - use common::config::{LoadFromTomlFile, SaveToTomlFile}; - use serde::{Deserialize, Serialize}; - - use crate::application::{ - cloudlet::LifecycleStatus, - unit::{Resources, Spec}, - }; - - use super::{ScalingPolicy, StartConstraints}; - - #[derive(Serialize, Deserialize)] - pub struct StoredDeployment { - /* Settings */ - pub status: LifecycleStatus, - - /* Where? */ - pub cloudlets: Vec, - pub constraints: StartConstraints, - pub scaling: ScalingPolicy, - - /* How? */ - pub resources: Resources, - pub spec: Spec, - } - - impl LoadFromTomlFile for StoredDeployment {} - impl SaveToTomlFile for StoredDeployment {} -} diff --git a/controller/src/application/driver.rs b/controller/src/application/driver.rs deleted file mode 100644 index c0686457..00000000 --- a/controller/src/application/driver.rs +++ /dev/null @@ -1,136 +0,0 @@ -use anyhow::Result; -use simplelog::error; -use simplelog::info; -use std::sync::Arc; -use tonic::async_trait; - -use crate::application::cloudlet::Cloudlet; -use crate::application::unit::StartRequestHandle; -use crate::application::unit::UnitHandle; - -#[cfg(feature = "wasm-drivers")] -use crate::application::driver::wasm::WasmDriver; - -use super::cloudlet::HostAndPort; - -mod process; - -#[cfg(feature = "wasm-drivers")] -mod wasm; - -pub struct Information { - authors: Vec, - version: String, - ready: bool, -} - -#[async_trait] -pub trait GenericDriver: Send + Sync { - fn name(&self) -> &String; - fn init(&self) -> Result; - fn init_cloudlet(&self, cloudlet: &Cloudlet) -> Result; - - /* Cleanup */ - fn cleanup(&self) -> Result<()>; - - /* Ticking */ - fn tick(&self) -> Result<()>; -} - -#[async_trait] -pub trait GenericCloudlet: Send + Sync { - /* Ticking */ - fn tick(&self) -> Result<()>; - - /* Prepare */ - fn allocate_addresses(&self, request: &StartRequestHandle) -> Result>; - fn deallocate_addresses(&self, addresses: Vec) -> Result<()>; - - /* Unitss */ - fn start_unit(&self, unit: &UnitHandle) -> Result<()>; - fn restart_unit(&self, unit: &UnitHandle) -> Result<()>; - fn stop_unit(&self, unit: &UnitHandle) -> Result<()>; -} - -pub type DriverHandle = Arc; -pub type DriverCloudletHandle = Arc; - -pub struct Drivers { - drivers: Vec, -} - -impl Drivers { - pub fn load_all(cloud_identifier: &str) -> Self { - info!("Loading drivers..."); - - let mut drivers = Vec::new(); - - #[cfg(feature = "wasm-drivers")] - WasmDriver::load_all(cloud_identifier, &mut drivers); - - info!("Loaded {} driver(s)", drivers.len()); - Self { drivers } - } - - pub fn cleanup(&self) { - for driver in &self.drivers { - if let Err(error) = driver.cleanup() { - error!( - "Failed to dispose resources of driver {}: {}", - driver.name(), - error - ); - } - } - } - - pub fn tick(&self) { - for driver in &self.drivers { - if let Err(error) = driver.tick() { - error!( - "Failed to tick driver {}: {}", - driver.name(), - error - ); - } - } - } - - pub fn find_by_name(&self, name: &str) -> Option> { - self.drivers - .iter() - .find(|driver| driver.name().eq_ignore_ascii_case(name)) - .cloned() - } - - pub fn get_drivers(&self) -> Vec { - self.drivers.clone() - } -} - -#[cfg(feature = "wasm-drivers")] -mod source { - use anyhow::Result; - use std::fmt::{Display, Formatter}; - use std::fs; - use std::path::{Path, PathBuf}; - - pub struct Source { - pub path: PathBuf, - pub code: Vec, - } - - impl Display for Source { - fn fmt(&self, formatter: &mut Formatter<'_>) -> std::fmt::Result { - write!(formatter, "{}", self.path.display()) - } - } - - impl Source { - pub fn from_file(path: &Path) -> Result { - let path = path.to_owned(); - let code = fs::read(&path)?; - Ok(Source { path, code }) - } - } -} diff --git a/controller/src/application/driver/process.rs b/controller/src/application/driver/process.rs deleted file mode 100644 index 8634ddf2..00000000 --- a/controller/src/application/driver/process.rs +++ /dev/null @@ -1,114 +0,0 @@ -use std::sync::mpsc::{self, Receiver}; -use std::thread; -use std::{ - io::{BufRead, BufReader, BufWriter, Read}, - marker::PhantomData, - process::{Child, ChildStderr, ChildStdin, ChildStdout}, - sync::Mutex, -}; - -use anyhow::{anyhow, Result}; - -pub struct DriverProcess { - /* Process */ - process: Child, - - /* Std Readers */ - stdout: ProcessStream, - stderr: ProcessStream, - - /* StdIn Writer */ - stdin: BufWriter, -} - -impl DriverProcess { - pub fn new(mut process: Child, direct: bool) -> Result { - let stdout = BufReader::new( - process - .stdout - .take() - .ok_or(anyhow!("Failed to take stdout from child process"))?, - ); - let stderr = BufReader::new( - process - .stderr - .take() - .ok_or(anyhow!("Failed to take stderr from child process"))?, - ); - let stdin = BufWriter::new( - process - .stdin - .take() - .ok_or(anyhow!("Failed to take stdin from child process"))?, - ); - - let (stdout, stderr) = if direct { - (ProcessStream::Direct(stdout), ProcessStream::Direct(stderr)) - } else { - ( - ProcessStream::Async(AsyncBufReader::new(stdout)), - ProcessStream::Async(AsyncBufReader::new(stderr)), - ) - }; - - Ok(Self { - process, - stdout, - stderr, - stdin, - }) - } - - pub fn get_process(&mut self) -> &mut Child { - &mut self.process - } - - pub fn get_stdout(&mut self) -> &mut ProcessStream { - &mut self.stdout - } - - pub fn get_stderr(&mut self) -> &mut ProcessStream { - &mut self.stderr - } - - pub fn get_stdin(&mut self) -> &mut BufWriter { - &mut self.stdin - } -} - -pub enum ProcessStream { - Direct(BufReader), - Async(AsyncBufReader), -} - -pub struct AsyncBufReader { - receiver: Mutex>, - phantom: PhantomData, -} - -impl AsyncBufReader { - pub fn new(mut reader: BufReader) -> Self { - let (sender, receiver) = mpsc::channel(); - - thread::spawn(move || { - let mut buffer = String::new(); - while reader.read_line(&mut buffer).unwrap_or(0) > 0 { - sender.send(buffer.clone()).unwrap(); - buffer.clear(); - } - }); - - AsyncBufReader { - receiver: Mutex::new(receiver), - phantom: PhantomData, - } - } - - pub fn try_recv(&self) -> Option { - self.receiver - .lock() - .expect("Failed to lock reader for readline") - .try_recv() - .ok() - } -} diff --git a/controller/src/application/driver/wasm.rs b/controller/src/application/driver/wasm.rs deleted file mode 100644 index 65f53bc7..00000000 --- a/controller/src/application/driver/wasm.rs +++ /dev/null @@ -1,481 +0,0 @@ -use std::collections::HashMap; -use std::fs; -use std::sync::{Arc, Mutex, RwLock, Weak}; - -use anyhow::{anyhow, Result}; -use cloudlet::WasmCloudlet; -use common::config::LoadFromTomlFile; -use config::WasmConfig; -use generated::exports::cloudlet::driver::bridge; -use generated::Driver; -use simplelog::{error, info, warn}; -use wasmtime::component::{Component, Linker, ResourceAny}; -use wasmtime::{Config, Engine, Store}; -use wasmtime_wasi::{DirPerms, FilePerms, ResourceTable, WasiCtx, WasiCtxBuilder, WasiView}; - -use super::process::DriverProcess; -use super::source::Source; -use super::{DriverCloudletHandle, GenericDriver, Information}; -use crate::application::cloudlet::{Capabilities, Cloudlet, HostAndPort, RemoteController}; -use crate::storage::Storage; - -mod config; - -mod cloudlet; -mod file; -mod http; -mod log; -mod platform; -mod process; - -pub mod generated { - use wasmtime::component::bindgen; - - bindgen!({ - world: "driver", - path: "../protocol/wit/", - }); -} - -/* Caching of compiled wasm artifacts and other configuration */ -const CONFIG_FILE: &str = "wasm.toml"; -const ENGINE_CONFIG_FILE: &str = "wasm-engine.toml"; -const DEFAULT_ENGINE_CONFIG: &str = r#"# For more settings, please refer to the documentation: -# https://bytecodealliance.github.io/wasmtime/cli-cache.html - -[cache] -enabled = true"#; -const DEFAULT_CONFIG: &str = r#"# This configuration is crucial for granting the drivers their required permissions -# https://httprafa.github.io/atomic-cloud/controller/drivers/wasm/permissions/ - -[[drivers]] -name = "pterodactyl" -inherit_stdio = false -inherit_args = false -inherit_env = false -inherit_network = true -allow_ip_name_lookup = true -allow_http = true -allow_process = false -allow_remove_dir_all = false -mounts = []"#; - -struct WasmDriverState { - handle: Weak, - wasi: WasiCtx, - table: ResourceTable, -} - -impl WasiView for WasmDriverState { - fn ctx(&mut self) -> &mut WasiCtx { - &mut self.wasi - } - fn table(&mut self) -> &mut ResourceTable { - &mut self.table - } -} - -impl generated::cloudlet::driver::types::Host for WasmDriverState {} - -impl generated::cloudlet::driver::api::Host for WasmDriverState { - fn get_name(&mut self) -> String { - self.handle.upgrade().unwrap().name.clone() - } -} - -struct WasmDriverHandle { - store: Store, - resource: ResourceAny, // This is delete when the store is dropped -} - -impl WasmDriverHandle { - fn new(store: Store, resource: ResourceAny) -> Self { - WasmDriverHandle { store, resource } - } - - fn get(&mut self) -> (ResourceAny, &mut Store) { - (self.resource, &mut self.store) - } -} - -pub struct WasmDriverData { - processes: RwLock>, -} - -pub struct WasmDriver { - own: Weak, - - name: String, - bindings: Driver, - handle: Mutex>, - - data: WasmDriverData, -} - -impl WasmDriver { - fn get_resource_and_store( - handle: &mut Option, - ) -> (ResourceAny, &mut Store) { - handle.as_mut().unwrap().get() - } -} - -impl GenericDriver for WasmDriver { - fn name(&self) -> &String { - &self.name - } - - fn init(&self) -> Result { - let mut handle = self.handle.lock().unwrap(); - let (resource, store) = Self::get_resource_and_store(&mut handle); - match self - .bindings - .cloudlet_driver_bridge() - .generic_driver() - .call_init(store, resource) - { - Ok(information) => Ok(information.into()), - Err(error) => Err(error), - } - } - - fn init_cloudlet(&self, cloudlet: &Cloudlet) -> Result { - let mut handle = self.handle.lock().unwrap(); - let (resource, store) = Self::get_resource_and_store(&mut handle); - match self - .bindings - .cloudlet_driver_bridge() - .generic_driver() - .call_init_cloudlet( - store, - resource, - &cloudlet.name, - &(&cloudlet.capabilities).into(), - &(&cloudlet.controller).into(), - )? { - Ok(cloudlet) => Ok(Arc::new(WasmCloudlet { - handle: self.own.clone(), - resource: cloudlet, - })), - Err(error) => Err(anyhow!(error)), - } - } - - fn cleanup(&self) -> Result<()> { - let mut handle = self.handle.lock().unwrap(); - let (resource, store) = Self::get_resource_and_store(&mut handle); - match self - .bindings - .cloudlet_driver_bridge() - .generic_driver() - .call_cleanup(store, resource) - { - Ok(result) => result.map_err(|errors| { - anyhow!(errors - .iter() - .map(|error| format!("Scope: {}, Message: {}", error.scope, error.message)) - .collect::>() - .join("\n")) - }), - Err(error) => Err(error), - } - } - - fn tick(&self) -> Result<()> { - let mut handle = self.handle.lock().unwrap(); - let (resource, store) = Self::get_resource_and_store(&mut handle); - match self - .bindings - .cloudlet_driver_bridge() - .generic_driver() - .call_tick(store, resource) - { - Ok(result) => result.map_err(|errors| { - anyhow!(errors - .iter() - .map(|error| format!("Scope: {}, Message: {}", error.scope, error.message)) - .collect::>() - .join("\n")) - }), - Err(error) => Err(error), - } - } -} - -impl WasmDriver { - fn new( - config: &WasmConfig, - cloud_identifier: &str, - name: &str, - source: &Source, - ) -> Result> { - let config_directory = Storage::get_config_folder_for_driver(name); - let data_directory = Storage::get_data_folder_for_driver(name); - if !config_directory.exists() { - fs::create_dir_all(&config_directory).unwrap_or_else(|error| { - warn!( - "Failed to create configs directory for driver {}: {}", - &name, &error - ) - }); - } - if !data_directory.exists() { - fs::create_dir_all(&data_directory).unwrap_or_else(|error| { - warn!( - "Failed to create data directory for driver {}: {}", - &name, &error - ) - }); - } - - let mut engine_config = Config::new(); - engine_config.wasm_component_model(true); - if let Err(error) = - engine_config.cache_config_load(Storage::get_configs_folder().join(ENGINE_CONFIG_FILE)) - { - warn!( - "Failed to enable caching for wasmtime engine: {}", - &error - ); - } - - let engine = Engine::new(&engine_config)?; - let component = Component::from_binary(&engine, &source.code)?; - - let mut linker = Linker::new(&engine); - wasmtime_wasi::add_to_linker_sync(&mut linker)?; - Driver::add_to_linker(&mut linker, |state: &mut WasmDriverState| state)?; - - let mut wasi = WasiCtxBuilder::new(); - if let Some(config) = config.get_config(name) { - if config.inherit_stdio { - wasi.inherit_stdio(); - } - if config.inherit_args { - wasi.inherit_args(); - } - if config.inherit_env { - wasi.inherit_env(); - } - if config.inherit_network { - wasi.inherit_network(); - } - if config.allow_ip_name_lookup { - wasi.allow_ip_name_lookup(true); - } - for mount in &config.mounts { - wasi.preopened_dir(&mount.host, &mount.guest, DirPerms::all(), FilePerms::all())?; - } - } - let wasi = wasi - .preopened_dir( - &config_directory, - "/configs/", - DirPerms::all(), - FilePerms::all(), - )? - .preopened_dir(&data_directory, "/data/", DirPerms::all(), FilePerms::all())? - .build(); - - let table = ResourceTable::new(); - - let mut store = Store::new( - &engine, - WasmDriverState { - handle: Weak::new(), - wasi, - table, - }, - ); - let bindings = Driver::instantiate(&mut store, &component, &linker)?; - let driver = Arc::new_cyclic(|handle| { - store.data_mut().handle = handle.clone(); - WasmDriver { - own: handle.clone(), - name: name.to_string(), - bindings, - handle: Mutex::new(None), - data: WasmDriverData { - processes: RwLock::new(HashMap::new()), - }, - } - }); - let driver_resource = driver - .bindings - .cloudlet_driver_bridge() - .generic_driver() - .call_constructor(&mut store, cloud_identifier)?; - driver - .handle - .lock() - .unwrap() - .replace(WasmDriverHandle::new(store, driver_resource)); - Ok(driver) - } - - pub fn load_all( - cloud_identifier: &str, - drivers: &mut Vec>, - ) -> WasmConfig { - // Check if cache configuration exists - { - let engine_config_file = Storage::get_configs_folder().join(ENGINE_CONFIG_FILE); - if !engine_config_file.exists() { - fs::write(&engine_config_file, DEFAULT_ENGINE_CONFIG).unwrap_or_else(|error| { - warn!( - "Failed to create default wasmtime configuration file: {}", - &error - ) - }); - } - } - let config_file = Storage::get_configs_folder().join(CONFIG_FILE); - if !config_file.exists() { - fs::write(&config_file, DEFAULT_CONFIG).unwrap_or_else(|error| { - warn!( - "Failed to create default wasm configuration file: {}", - &error - ) - }); - } - - let config = WasmConfig::load_from_file(&config_file).unwrap_or_else(|error| { - warn!( - "Failed to load wasm configuration file: {}", - &error - ); - WasmConfig::default() - }); - - let old_loaded = drivers.len(); - - let drivers_directory = Storage::get_drivers_folder(); - if !drivers_directory.exists() { - fs::create_dir_all(&drivers_directory).unwrap_or_else(|error| { - warn!( - "Failed to create drivers directory: {}", - &error - ) - }); - } - - let entries = match fs::read_dir(&drivers_directory) { - Ok(entries) => entries, - Err(error) => { - error!( - "Failed to read driver directory: {}", - &error - ); - return config; - } - }; - - for entry in entries { - let entry = match entry { - Ok(entry) => entry, - Err(error) => { - error!("Failed to read driver entry: {}", &error); - continue; - } - }; - - let path = entry.path(); - if path.is_dir() - || !path - .file_name() - .unwrap() - .to_string_lossy() - .ends_with(".wasm") - { - continue; - } - - let name = path.file_stem().unwrap().to_string_lossy().to_string(); - let source = match Source::from_file(&path) { - Ok(source) => source, - Err(error) => { - error!( - "Failed to read source code for driver {} from file({:?}): {}", - &name, - &path, - &error - ); - continue; - } - }; - - info!("Compiling driver {}...", &name); - let driver = WasmDriver::new(&config, cloud_identifier, &name, &source); - match driver { - Ok(driver) => match driver.init() { - Ok(info) => { - if info.ready { - info!( - "Loaded driver {} v{} by {}", - &driver.name, &info.version, - &info.authors.join(", ") - ); - drivers.push(driver); - } else { - warn!( - "Driver {} marked itself as not ready, skipping...", - &driver.name - ); - } - } - Err(error) => error!("Failed to load driver {}: {}", &name, &error), - }, - Err(error) => error!( - "Failed to compile driver {} at location {}: {}", - &name, - &source, - &error - ), - } - } - - if old_loaded == drivers.len() { - warn!( - "The Wasm driver feature is enabled, but no Wasm drivers were loaded." - ); - } - config - } -} - -impl From for Information { - fn from(val: bridge::Information) -> Self { - Information { - authors: val.authors, - version: val.version, - ready: val.ready, - } - } -} - -impl From<&Capabilities> for bridge::Capabilities { - fn from(val: &Capabilities) -> Self { - bridge::Capabilities { - memory: val.memory, - max_allocations: val.max_allocations, - child: val.child.clone(), - } - } -} - -impl From<&RemoteController> for bridge::RemoteController { - fn from(val: &RemoteController) -> Self { - bridge::RemoteController { - address: val.address.to_string(), - } - } -} - -impl From<&HostAndPort> for bridge::Address { - fn from(val: &HostAndPort) -> Self { - bridge::Address { - host: val.host.clone(), - port: val.port, - } - } -} diff --git a/controller/src/application/driver/wasm/cloudlet.rs b/controller/src/application/driver/wasm/cloudlet.rs deleted file mode 100644 index 686f0a13..00000000 --- a/controller/src/application/driver/wasm/cloudlet.rs +++ /dev/null @@ -1,231 +0,0 @@ -use std::sync::{Arc, Weak}; - -use anyhow::{anyhow, Result}; -use wasmtime::component::ResourceAny; - -use crate::application::{ - auth::AuthUnit, - cloudlet::{Allocation, HostAndPort}, - driver::GenericCloudlet, - unit::{ - KeyValue, Resources, Retention, Spec, StartRequest, StartRequestHandle, Unit, UnitHandle, - }, -}; - -use super::{ - generated::exports::cloudlet::driver::bridge::{self, Address}, - WasmDriver, -}; - -pub struct WasmCloudlet { - pub handle: Weak, - pub resource: ResourceAny, // This is delete if the handle is dropped -} - -impl GenericCloudlet for WasmCloudlet { - fn tick(&self) -> Result<()> { - if let Some(driver) = self.handle.upgrade() { - let mut handle = driver.handle.lock().unwrap(); - let (_, store) = WasmDriver::get_resource_and_store(&mut handle); - match driver - .bindings - .cloudlet_driver_bridge() - .generic_cloudlet() - .call_tick(store, self.resource) - { - Ok(result) => result.map_err(|errors| { - anyhow!(errors - .iter() - .map(|error| format!("Scope: {}, Message: {}", error.scope, error.message)) - .collect::>() - .join("\n")) - }), - Err(error) => Err(error), - } - } else { - Err(anyhow!("Failed to get handle to wasm driver")) - } - } - - fn allocate_addresses(&self, request: &StartRequestHandle) -> Result> { - if let Some(driver) = self.handle.upgrade() { - let mut handle = driver.handle.lock().unwrap(); - let (_, store) = WasmDriver::get_resource_and_store(&mut handle); - match driver - .bindings - .cloudlet_driver_bridge() - .generic_cloudlet() - .call_allocate_addresses(store, self.resource, &(request.into())) - { - Ok(Ok(addresses)) => addresses - .into_iter() - .map(|address| Ok(HostAndPort::new(address.host, address.port))) - .collect::>>(), - Ok(Err(error)) => Err(anyhow!(error)), - Err(error) => Err(error), - } - } else { - Err(anyhow!("Failed to get handle to wasm driver")) - } - } - - fn deallocate_addresses(&self, addresses: Vec) -> Result<()> { - if let Some(driver) = self.handle.upgrade() { - let mut handle = driver.handle.lock().unwrap(); - let (_, store) = WasmDriver::get_resource_and_store(&mut handle); - driver - .bindings - .cloudlet_driver_bridge() - .generic_cloudlet() - .call_deallocate_addresses( - store, - self.resource, - &addresses - .iter() - .map(|address| address.into()) - .collect::>(), - ) - } else { - Err(anyhow!("Failed to get handle to wasm driver")) - } - } - - fn start_unit(&self, unit: &UnitHandle) -> Result<()> { - if let Some(driver) = self.handle.upgrade() { - let mut handle = driver.handle.lock().unwrap(); - let (_, store) = WasmDriver::get_resource_and_store(&mut handle); - driver - .bindings - .cloudlet_driver_bridge() - .generic_cloudlet() - .call_start_unit(store, self.resource, &unit.into()) - } else { - Err(anyhow!("Failed to get handle to wasm driver")) - } - } - - fn restart_unit(&self, unit: &UnitHandle) -> Result<()> { - if let Some(driver) = self.handle.upgrade() { - let mut handle = driver.handle.lock().unwrap(); - let (_, store) = WasmDriver::get_resource_and_store(&mut handle); - driver - .bindings - .cloudlet_driver_bridge() - .generic_cloudlet() - .call_restart_unit(store, self.resource, &unit.into()) - } else { - Err(anyhow!("Failed to get handle to wasm driver")) - } - } - - fn stop_unit(&self, unit: &UnitHandle) -> Result<()> { - if let Some(driver) = self.handle.upgrade() { - let mut handle = driver.handle.lock().unwrap(); - let (_, store) = WasmDriver::get_resource_and_store(&mut handle); - driver - .bindings - .cloudlet_driver_bridge() - .generic_cloudlet() - .call_stop_unit(store, self.resource, &unit.into()) - } else { - Err(anyhow!("Failed to get handle to wasm driver")) - } - } -} - -impl From<&KeyValue> for bridge::KeyValue { - fn from(val: &KeyValue) -> Self { - bridge::KeyValue { - key: val.key.clone(), - value: val.value.clone(), - } - } -} - -impl From<&Retention> for bridge::Retention { - fn from(val: &Retention) -> Self { - match val { - Retention::Permanent => bridge::Retention::Permanent, - Retention::Temporary => bridge::Retention::Temporary, - } - } -} - -impl From<&Spec> for bridge::Spec { - fn from(val: &Spec) -> Self { - bridge::Spec { - settings: val.settings.iter().map(|setting| setting.into()).collect(), - environment: val.environment.iter().map(|env| env.into()).collect(), - disk_retention: (&val.disk_retention).into(), - image: val.image.clone(), - } - } -} - -impl From for bridge::Resources { - fn from(val: Resources) -> Self { - bridge::Resources { - memory: val.memory, - swap: val.swap, - cpu: val.cpu, - io: val.io, - disk: val.disk, - addresses: val.addresses, - } - } -} - -impl From> for bridge::Allocation { - fn from(val: Arc) -> Self { - bridge::Allocation { - addresses: val.addresses.iter().map(|address| address.into()).collect(), - resources: val.resources.clone().into(), - spec: (&val.spec).into(), - } - } -} - -impl From> for bridge::Auth { - fn from(val: Arc) -> Self { - bridge::Auth { - token: val.token.clone(), - } - } -} - -impl From<&Arc> for bridge::Unit { - fn from(val: &Arc) -> Self { - bridge::Unit { - name: val.name.clone(), - uuid: val.uuid.to_string(), - deployment: val.deployment.as_ref().map(|deployment| { - deployment - .deployment - .upgrade() - .expect("Deployment dropped while units of the deployment are still active") - .name - .clone() - }), - allocation: val.allocation.clone().into(), - auth: val.auth.clone().into(), - } - } -} - -impl From<&Arc> for bridge::UnitProposal { - fn from(val: &Arc) -> Self { - bridge::UnitProposal { - name: val.name.clone(), - deployment: val.deployment.as_ref().map(|deployment| { - deployment - .deployment - .upgrade() - .expect("Deployment dropped while units of the deployment are still active") - .name - .clone() - }), - resources: val.resources.clone().into(), - spec: (&val.spec).into(), - } - } -} diff --git a/controller/src/application/driver/wasm/config.rs b/controller/src/application/driver/wasm/config.rs deleted file mode 100644 index e833614f..00000000 --- a/controller/src/application/driver/wasm/config.rs +++ /dev/null @@ -1,64 +0,0 @@ -use common::config::{LoadFromTomlFile, SaveToTomlFile}; -use regex::Regex; -use serde::{Deserialize, Serialize}; -use simplelog::warn; - -#[derive(Serialize, Deserialize, Default)] -pub struct WasmConfig { - pub drivers: Vec, -} - -impl LoadFromTomlFile for WasmConfig {} -impl SaveToTomlFile for WasmConfig {} - -impl WasmConfig { - pub fn get_config(&self, name: &str) -> Option<&DriverConfig> { - self.drivers - .iter() - .find(|driver| match Regex::new(&driver.name) { - Ok(regex) => regex.is_match(name), - Err(error) => { - warn!("Failed to compile driver name regex: {}", error); - false - } - }) - } -} - -#[derive(Serialize, Deserialize)] -pub struct DriverConfig { - pub name: String, - pub inherit_stdio: bool, - pub inherit_args: bool, - pub inherit_env: bool, - pub inherit_network: bool, - pub allow_ip_name_lookup: bool, - pub allow_http: bool, - pub allow_process: bool, - pub allow_remove_dir_all: bool, - - pub mounts: Vec, -} - -impl Default for DriverConfig { - fn default() -> Self { - Self { - name: String::new(), - inherit_stdio: true, - inherit_args: true, - inherit_env: true, - inherit_network: true, - allow_ip_name_lookup: true, - allow_http: true, - allow_process: true, - allow_remove_dir_all: true, - mounts: Vec::new(), - } - } -} - -#[derive(Serialize, Deserialize)] -pub struct MountConfig { - pub host: String, - pub guest: String, -} diff --git a/controller/src/application/driver/wasm/file.rs b/controller/src/application/driver/wasm/file.rs deleted file mode 100644 index c548d9c1..00000000 --- a/controller/src/application/driver/wasm/file.rs +++ /dev/null @@ -1,17 +0,0 @@ -use std::fs; - -use super::{ - generated::cloudlet::driver::{ - self, - types::{Directory, ErrorMessage}, - }, - WasmDriverState, -}; - -impl driver::file::Host for WasmDriverState { - fn remove_dir_all(&mut self, directory: Directory) -> Result<(), ErrorMessage> { - let driver = self.handle.upgrade().ok_or("Failed to upgrade handle")?; - let path = self.get_directory(&driver.name, &directory)?; - fs::remove_dir_all(path).map_err(|error| format!("Failed to remove directory: {}", error)) - } -} diff --git a/controller/src/application/driver/wasm/http.rs b/controller/src/application/driver/wasm/http.rs deleted file mode 100644 index b1a866a9..00000000 --- a/controller/src/application/driver/wasm/http.rs +++ /dev/null @@ -1,57 +0,0 @@ -use simplelog::warn; - -use super::{ - generated::cloudlet::driver::{ - self, - http::{Header, Method, Response}, - }, - WasmDriverState, -}; - -impl driver::http::Host for WasmDriverState { - fn send_http_request( - &mut self, - method: Method, - url: String, - headers: Vec
, - body: Option>, - ) -> Option { - let driver = self.handle.upgrade().unwrap(); - let mut request = match method { - Method::Get => minreq::get(url), - Method::Patch => minreq::patch(url), - Method::Post => minreq::post(url), - Method::Put => minreq::put(url), - Method::Delete => minreq::delete(url), - }; - if let Some(body) = body { - request = request.with_body(body); - } - for header in headers { - request = request.with_header(&header.key, &header.value); - } - let response = match request.send() { - Ok(response) => response, - Err(error) => { - warn!( - "Failed to send HTTP request for driver {}: {}", - &driver.name, error - ); - return None; - } - }; - Some(Response { - status_code: response.status_code as u32, - reason_phrase: response.reason_phrase.clone(), - headers: response - .headers - .iter() - .map(|header| Header { - key: header.0.clone(), - value: header.1.clone(), - }) - .collect(), - bytes: response.into_bytes(), - }) - } -} diff --git a/controller/src/application/driver/wasm/log.rs b/controller/src/application/driver/wasm/log.rs deleted file mode 100644 index b52782b5..00000000 --- a/controller/src/application/driver/wasm/log.rs +++ /dev/null @@ -1,33 +0,0 @@ -use simplelog::{debug, error, info, warn}; - -use super::{ - generated::cloudlet::driver::{self, log::Level}, - WasmDriverState, -}; - -impl driver::log::Host for WasmDriverState { - fn log_string(&mut self, level: Level, message: String) { - match level { - Level::Info => info!( - "[{}] {}", - &self.handle.upgrade().unwrap().name.to_uppercase(), - message - ), - Level::Warn => warn!( - "[{}] {}", - &self.handle.upgrade().unwrap().name.to_uppercase(), - message - ), - Level::Error => error!( - "[{}] {}", - &self.handle.upgrade().unwrap().name.to_uppercase(), - message - ), - Level::Debug => debug!( - "[{}] {}", - &self.handle.upgrade().unwrap().name.to_uppercase(), - message - ), - } - } -} diff --git a/controller/src/application/driver/wasm/platform.rs b/controller/src/application/driver/wasm/platform.rs deleted file mode 100644 index 0b328f31..00000000 --- a/controller/src/application/driver/wasm/platform.rs +++ /dev/null @@ -1,14 +0,0 @@ -use super::{ - generated::cloudlet::driver::{self, platform::Os}, - WasmDriverState, -}; - -impl driver::platform::Host for WasmDriverState { - fn get_os(&mut self) -> Os { - if cfg!(target_os = "windows") { - Os::Windows - } else { - Os::Unix - } - } -} diff --git a/controller/src/application/driver/wasm/process.rs b/controller/src/application/driver/wasm/process.rs deleted file mode 100644 index 6bee2444..00000000 --- a/controller/src/application/driver/wasm/process.rs +++ /dev/null @@ -1,339 +0,0 @@ -use std::{ - collections::HashMap, - io::{BufRead, Read, Write}, - path::PathBuf, - process::{Command, Stdio}, -}; - -use simplelog::debug; - -use crate::{ - application::driver::process::{DriverProcess, ProcessStream}, - storage::Storage, -}; - -use super::{ - generated::cloudlet::driver::{ - self, - process::{Directory, KeyValue, ReaderMode, StdReader}, - types::{ErrorMessage, Reference}, - }, - WasmDriverState, -}; - -impl driver::process::Host for WasmDriverState { - fn spawn_process( - &mut self, - command: String, - args: Vec, - environment: Vec, - directory: Directory, - mode: ReaderMode, - ) -> Result { - let driver = self.handle.upgrade().ok_or("Failed to upgrade handle")?; - let process_dir = self.get_directory(&driver.name, &directory)?; - let environment: HashMap<_, _> = environment - .into_iter() - .map(|kv| (kv.key, kv.value)) - .collect(); - - debug!("Spawning process: {} {:?}", command, args); - let mut command = Command::new(command); - command - .args(args) - .current_dir(process_dir) - .envs(environment) - .stdin(Stdio::piped()) - .stdout(Stdio::piped()) - .stderr(Stdio::piped()); - - let process = command - .spawn() - .map_err(|e| format!("Failed to spawn process: {}", e))?; - let pid = process.id(); - - driver - .data - .processes - .write() - .map_err(|_| "Failed to acquire write lock on processes")? - .insert( - pid, - DriverProcess::new( - process, - match mode { - ReaderMode::Direct => true, - ReaderMode::Async => false, - }, - ) - .map_err(|error| error.to_string())?, - ); - - Ok(pid) - } - - fn kill_process(&mut self, pid: u32) -> Result<(), ErrorMessage> { - let driver = self.handle.upgrade().ok_or("Failed to upgrade handle")?; - let mut processes = driver - .data - .processes - .write() - .map_err(|_| "Failed to acquire write lock on processes")?; - - debug!("Killing process: {}", pid); - if let Some(mut process) = processes.remove(&pid) { - process - .get_process() - .kill() - .map_err(|e| format!("Failed to kill process: {}", e)) - } else { - Ok(()) - } - } - - fn drop_process(&mut self, pid: u32) -> Result { - let driver = self.handle.upgrade().ok_or("Failed to upgrade handle")?; - let mut processes = driver - .data - .processes - .write() - .map_err(|_| "Failed to acquire write lock on processes")?; - - debug!("Dropping process: {}", pid); - Ok(processes.remove(&pid).is_some()) - } - - fn try_wait(&mut self, pid: u32) -> Result, ErrorMessage> { - let driver = self.handle.upgrade().ok_or("Failed to upgrade handle")?; - let mut processes = driver - .data - .processes - .write() - .map_err(|_| "Failed to acquire write lock on processes")?; - - if let Some(process) = processes.get_mut(&pid) { - process - .get_process() - .try_wait() - .map_err(|e| format!("Failed to wait for process: {}", e)) - .map(|status| status.and_then(|s| s.code())) - } else { - Ok(None) - } - } - - fn read_line_direct( - &mut self, - pid: u32, - std: StdReader, - ) -> Result<(u32, String), ErrorMessage> { - let driver = self.handle.upgrade().ok_or("Failed to upgrade handle")?; - let mut processes = driver - .data - .processes - .write() - .map_err(|_| "Failed to acquire write lock on processes")?; - - if let Some(process) = processes.get_mut(&pid) { - let mut buffer = String::new(); - let bytes = match std { - StdReader::Stdout => match process.get_stdout() { - ProcessStream::Direct(stream) => stream, - ProcessStream::Async(_) => { - return Err("Cannot read from stream that is handeled async".to_string()) - } - } - .read_line(&mut buffer), - StdReader::Stderr => match process.get_stderr() { - ProcessStream::Direct(stream) => stream, - ProcessStream::Async(_) => { - return Err("Cannot read from stream that is handeled async".to_string()) - } - } - .read_line(&mut buffer), - } - .map_err(|error| format!("Failed to read from process: {}", error))?; - Ok((bytes as u32, buffer)) - } else { - Err("Process does not exist".to_string()) - } - } - - fn has_data_left_direct(&mut self, pid: u32, std: StdReader) -> Result { - let driver = self.handle.upgrade().ok_or("Failed to upgrade handle")?; - let mut processes = driver - .data - .processes - .write() - .map_err(|_| "Failed to acquire write lock on processes")?; - - if let Some(process) = processes.get_mut(&pid) { - let has_data = match std { - StdReader::Stdout => match process.get_stdout() { - ProcessStream::Direct(stream) => stream, - ProcessStream::Async(_) => { - return Err("Cannot read from stream that is handeled async".to_string()) - } - } - .has_data_left(), - StdReader::Stderr => match process.get_stderr() { - ProcessStream::Direct(stream) => stream, - ProcessStream::Async(_) => { - return Err("Cannot read from stream that is handeled async".to_string()) - } - } - .has_data_left(), - } - .map_err(|error| format!("Failed to check buffer of process: {}", error))?; - Ok(has_data) - } else { - Err("Process does not exist".to_string()) - } - } - - fn read_direct( - &mut self, - pid: u32, - buf_size: u32, - std: StdReader, - ) -> Result<(u32, Vec), ErrorMessage> { - let driver = self.handle.upgrade().ok_or("Failed to upgrade handle")?; - let mut processes = driver - .data - .processes - .write() - .map_err(|_| "Failed to acquire write lock on processes")?; - - if let Some(process) = processes.get_mut(&pid) { - let mut buffer = Vec::with_capacity(buf_size as usize); - let bytes = match std { - StdReader::Stdout => match process.get_stdout() { - ProcessStream::Direct(stream) => stream, - ProcessStream::Async(_) => { - return Err("Cannot read from stream that is handeled async".to_string()) - } - } - .read(&mut buffer), - StdReader::Stderr => match process.get_stderr() { - ProcessStream::Direct(stream) => stream, - ProcessStream::Async(_) => { - return Err("Cannot read from stream that is handeled async".to_string()) - } - } - .read(&mut buffer), - } - .map_err(|e| format!("Failed to read from process: {}", e))?; - Ok((bytes as u32, buffer)) - } else { - Err("Process does not exist".to_string()) - } - } - - fn read_to_end_direct( - &mut self, - pid: u32, - std: StdReader, - ) -> Result<(u32, Vec), ErrorMessage> { - let driver = self.handle.upgrade().ok_or("Failed to upgrade handle")?; - let mut processes = driver - .data - .processes - .write() - .map_err(|_| "Failed to acquire write lock on processes")?; - - if let Some(process) = processes.get_mut(&pid) { - let mut buffer = Vec::new(); - let bytes = match std { - StdReader::Stdout => match process.get_stdout() { - ProcessStream::Direct(stream) => stream, - ProcessStream::Async(_) => { - return Err("Cannot read from stream that is handeled async".to_string()) - } - } - .read_to_end(&mut buffer), - StdReader::Stderr => match process.get_stderr() { - ProcessStream::Direct(stream) => stream, - ProcessStream::Async(_) => { - return Err("Cannot read from stream that is handeled async".to_string()) - } - } - .read_to_end(&mut buffer), - } - .map_err(|e| format!("Failed to read from process: {}", e))?; - Ok((bytes as u32, buffer)) - } else { - Err("Process does not exist".to_string()) - } - } - - fn read_line_async( - &mut self, - pid: u32, - std: StdReader, - ) -> Result, ErrorMessage> { - let driver = self.handle.upgrade().ok_or("Failed to upgrade handle")?; - let mut processes = driver - .data - .processes - .write() - .map_err(|_| "Failed to acquire write lock on processes")?; - - if let Some(process) = processes.get_mut(&pid) { - Ok(match std { - StdReader::Stdout => match process.get_stdout() { - ProcessStream::Direct(_) => { - return Err("Cannot read from stream that is handeled directly".to_string()) - } - ProcessStream::Async(stream) => stream, - } - .try_recv(), - StdReader::Stderr => match process.get_stderr() { - ProcessStream::Direct(_) => { - return Err("Cannot read from stream that is handeled directly".to_string()) - } - ProcessStream::Async(stream) => stream, - } - .try_recv(), - }) - } else { - Err("Process does not exist".to_string()) - } - } - - fn write_stdin(&mut self, pid: u32, data: Vec) -> Result<(), ErrorMessage> { - let driver = self.handle.upgrade().ok_or("Failed to upgrade handle")?; - let mut processes = driver - .data - .processes - .write() - .map_err(|_| "Failed to acquire write lock on processes")?; - - if let Some(process) = processes.get_mut(&pid) { - process - .get_stdin() - .write_all(&data) - .map_err(|e| format!("Failed to write to stdin of process: {}", e))?; - Ok(()) - } else { - Err("Process does not exist".to_string()) - } - } -} - -impl WasmDriverState { - pub fn get_directory( - &self, - driver_name: &str, - directory: &Directory, - ) -> Result { - match &directory.reference { - Reference::Controller => Ok(PathBuf::from(".").join(&directory.path)), - Reference::Data => { - Ok(Storage::get_data_folder_for_driver(driver_name).join(&directory.path)) - } - Reference::Configs => { - Ok(Storage::get_config_folder_for_driver(driver_name).join(&directory.path)) - } - } - } -} diff --git a/controller/src/application/event.rs b/controller/src/application/event.rs deleted file mode 100644 index d5acd787..00000000 --- a/controller/src/application/event.rs +++ /dev/null @@ -1,151 +0,0 @@ -use simplelog::debug; -use uuid::Uuid; - -use super::unit::{UnitHandle, WeakUnitHandle}; - -use std::{ - any::Any, - collections::HashMap, - fmt::Debug, - hash::Hash, - sync::{Arc, RwLock}, -}; - -pub mod channel; -pub mod transfer; - -#[derive(Eq, PartialEq)] -pub enum EventKey { - Channel(String), - Transfer(Uuid), - //Custom(TypeId), -} - -pub trait Event: Any + Send + Sync + Debug {} - -pub type EventListener = Box; - -struct RegisteredListener { - unit: Option, - listener: Box, -} - -pub struct EventBus { - listeners: RwLock>>, -} - -impl EventBus { - pub fn new() -> Self { - Self { - listeners: RwLock::new(HashMap::new()), - } - } - - /*pub fn register_listener(&self, key: EventKey, listener: EventListener) { - let registered_listener = RegisteredListener { - unit: None, - listener: Box::new(listener), - }; - self.listeners - .write() - .unwrap() - .entry(key) - .or_default() - .push(registered_listener); - }*/ - - pub fn register_listener_under_unit( - &self, - key: EventKey, - unit: WeakUnitHandle, - listener: EventListener, - ) { - let registered_listener = RegisteredListener { - unit: Some(unit), - listener: Box::new(listener), - }; - self.listeners - .write() - .unwrap() - .entry(key) - .or_default() - .push(registered_listener); - } - - pub fn unregister_listener(&self, key: EventKey, unit: &UnitHandle) { - let mut listeners = self.listeners.write().unwrap(); - if let Some(registered_listeners) = listeners.get_mut(&key) { - registered_listeners.retain(|registered_listener| { - if let Some(weak_unit) = ®istered_listener.unit { - if let Some(strong_unit) = weak_unit.upgrade() { - if Arc::ptr_eq(unit, &strong_unit) { - return false; - } - } else { - return false; // Unit is dead - } - } - true - }); - } - } - - pub fn cleanup_unit(&self, unit: &UnitHandle) { - let mut listeners = self.listeners.write().unwrap(); - for (_, registered_listeners) in listeners.iter_mut() { - registered_listeners.retain(|registered_listener| { - if let Some(weak_unit) = ®istered_listener.unit { - if let Some(strong_unit) = weak_unit.upgrade() { - if Arc::ptr_eq(unit, &strong_unit) { - return false; - } - } else { - return false; // Unit is dead - } - } - true - }); - } - } - - pub fn dispatch(&self, key: &EventKey, event: &E) -> u32 { - debug!("[EVENTS] Dispatching event: {:?}", event); - - let mut count = 0; - let listeners = self.listeners.read().unwrap(); - if let Some(registered_listeners) = listeners.get(key) { - for registered_listener in registered_listeners { - if let Some(listener) = registered_listener - .listener - .downcast_ref::>() - { - listener(event); - count += 1; - } - } - } - count - } - - /*pub fn dispatch_custom(&self, event: &E) -> u32 { - self.dispatch(&EventKey::Custom(TypeId::of::()), event) - }*/ -} - -impl Hash for EventKey { - fn hash(&self, state: &mut H) { - match self { - EventKey::Channel(channel) => { - state.write_u8(0); - channel.hash(state); - } - EventKey::Transfer(unit) => { - state.write_u8(1); - unit.hash(state); - } /*EventKey::Custom(type_id) => { - state.write_u8(2); - type_id.hash(state); - }*/ - } - } -} diff --git a/controller/src/application/event/channel.rs b/controller/src/application/event/channel.rs deleted file mode 100644 index 3d1dcced..00000000 --- a/controller/src/application/event/channel.rs +++ /dev/null @@ -1,10 +0,0 @@ -use crate::network::unit::proto::channel_management::ChannelMessageValue; - -use super::Event; - -#[derive(Debug)] -pub struct ChannelMessageSended { - pub message: ChannelMessageValue, -} - -impl Event for ChannelMessageSended {} diff --git a/controller/src/application/event/transfer.rs b/controller/src/application/event/transfer.rs deleted file mode 100644 index 2e319e6a..00000000 --- a/controller/src/application/event/transfer.rs +++ /dev/null @@ -1,10 +0,0 @@ -use crate::application::user::transfer::Transfer; - -use super::Event; - -#[derive(Debug)] -pub struct UserTransferRequested { - pub transfer: Transfer, -} - -impl Event for UserTransferRequested {} diff --git a/controller/src/application/group.rs b/controller/src/application/group.rs new file mode 100644 index 00000000..50d1439c --- /dev/null +++ b/controller/src/application/group.rs @@ -0,0 +1,279 @@ +use anyhow::{anyhow, Result}; +use common::allocator::NumberAllocator; +use getset::Getters; +use manager::stored::StoredGroup; +use serde::{Deserialize, Serialize}; +use simplelog::{debug, info}; +use tokio::fs; +use uuid::Uuid; + +use crate::{ + application::server::manager::StopRequest, + config::Config, + resource::DeleteResourceError, + storage::{SaveToTomlFile, Storage}, +}; + +use super::{ + node::LifecycleStatus, + server::{ + manager::{ServerManager, StartRequest}, + NameAndUuid, Resources, Server, Spec, + }, +}; + +pub mod manager; + +#[derive(Getters)] +pub struct Group { + /* Settings */ + #[getset(get = "pub")] + name: String, + #[getset(get = "pub")] + status: LifecycleStatus, + + /* Where? */ + #[getset(get = "pub")] + nodes: Vec, + #[getset(get = "pub")] + constraints: StartConstraints, + #[getset(get = "pub")] + scaling: ScalingPolicy, + + /* How? */ + #[getset(get = "pub")] + resources: Resources, + #[getset(get = "pub")] + spec: Spec, + + /* What do i need to know? */ + id_allocator: NumberAllocator, + servers: Vec, +} + +impl Group { + #[allow( + clippy::cast_precision_loss, + clippy::cast_possible_truncation, + reason = "I have to clean this up" + )] + pub fn tick(&mut self, config: &Config, servers: &mut ServerManager) -> Result<()> { + if self.status == LifecycleStatus::Inactive { + // Do not tick this group because it is inactive + return Ok(()); + } + + let mut target_count = self.constraints.minimum; + + // Apply scaling policy + if self.scaling.enabled { + self.servers.retain(|server| match server { + AssociatedServer::Active(server) => { + servers.get_server(server.uuid()).is_some_and(|server| { + if *server.connected_users() as f32 / *self.spec.max_players() as f32 + >= self.scaling.start_threshold + { + target_count += 1; + } + true + }) + } + AssociatedServer::Queueing(_) => true, + }); + + if self.scaling.stop_empty_servers && self.servers.len() as u32 > target_count { + let mut to_stop = self.servers.len() as u32 - target_count; + let mut requests = vec![]; + self.servers.retain(|server| match server { + AssociatedServer::Active(server) => { + servers.get_server_mut(server.uuid()).is_some_and(|server| { + if server.connected_users() == &0 { + if server.flags().should_stop() && to_stop > 0 { + debug!( + "Server {} is empty and reached the timeout, stopping it...", + server.id() + ); + requests.push(StopRequest::new(None, server.id().clone())); + to_stop -= 1; + } else { + debug!( + "Server {} is empty, starting stop timer...", + server.id() + ); + server + .flags_mut() + .replace_stop(*config.empty_server_timeout()); + } + } else if server.flags().is_stop_set() { + debug!( + "Server {} is no longer empty, clearing stop timer...", + server.id() + ); + server.flags_mut().clear_stop(); + } + true + }) + } + AssociatedServer::Queueing(_) => true, + }); + servers.schedule_stops(requests); + } + } + + for requested in 0..(target_count as usize).saturating_sub(self.servers.len()) { + if (self.servers.len() + requested) >= target_count as usize { + break; + } + + let id = self.id_allocator.allocate().ok_or(anyhow!( + "We reached the maximum server count. Wow this is a lot of servers" + ))?; + let request = StartRequest::new( + None, + self.constraints.priority, + format!("{}-{}", self.name, id), + Some(self.name.clone()), + &self.nodes, + &self.resources, + &self.spec, + ); + self.servers + .push(AssociatedServer::Queueing(request.id().clone())); + debug!( + "Scheduled server({}) start for group {}", + request.id(), + self.name + ); + servers.schedule_start(request); + } + + Ok(()) + } + + pub async fn delete(&mut self) -> Result<(), DeleteResourceError> { + if self.status == LifecycleStatus::Active { + return Err(DeleteResourceError::StillActive); + } + if !self.servers.is_empty() { + return Err(DeleteResourceError::StillInUse); + } + let path = Storage::group_file(&self.name); + if path.exists() { + fs::remove_file(path) + .await + .map_err(|error| DeleteResourceError::Error(error.into()))?; + } + + Ok(()) + } + + pub async fn set_active(&mut self, active: bool, servers: &mut ServerManager) -> Result<()> { + if active && self.status == LifecycleStatus::Inactive { + // Activate group + + self.status = LifecycleStatus::Active; + self.save().await?; + info!("Group {} is now active", self.name); + } else if !active && self.status == LifecycleStatus::Active { + // Retire group + // Stop all servers and cancel all starts + self.servers.retain(|server| match server { + AssociatedServer::Active(server) => { + servers.schedule_stop(StopRequest::new(None, server.clone())); + true + } + AssociatedServer::Queueing(server) => { + servers.cancel_start(server.uuid()); + false + } + }); + + self.status = LifecycleStatus::Inactive; + self.save().await?; + info!("Group {} is now inactive", self.name); + } + + Ok(()) + } + + pub fn find_free_server<'a>(&self, servers: &'a ServerManager) -> Option<&'a Server> { + self.servers.iter().find_map(|server| match server { + AssociatedServer::Active(server) => servers.get_server(server.uuid()), + AssociatedServer::Queueing(_) => None, + }) + } + + pub fn set_server_active(&mut self, id: &NameAndUuid) { + self.servers.retain(|server| { + if let AssociatedServer::Queueing(server) = server { + if server.uuid() == id.uuid() { + return false; + } + } + true + }); + self.servers.push(AssociatedServer::Active(id.clone())); + } + + pub fn remove_server(&mut self, uuid: &Uuid) { + self.servers.retain(|server| { + if let AssociatedServer::Active(server) = server { + if server.uuid() == uuid { + return false; + } + } + true + }); + } + + pub async fn save(&self) -> Result<()> { + StoredGroup::from(self) + .save(&Storage::group_file(&self.name), true) + .await + } +} + +#[derive(Serialize, Deserialize, Clone, Getters)] +pub struct StartConstraints { + #[getset(get = "pub")] + minimum: u32, + #[getset(get = "pub")] + maximum: u32, + #[getset(get = "pub")] + priority: i32, +} + +#[derive(Serialize, Deserialize, Clone, Default, Getters)] +pub struct ScalingPolicy { + #[getset(get = "pub")] + enabled: bool, + #[getset(get = "pub")] + start_threshold: f32, + #[getset(get = "pub")] + stop_empty_servers: bool, +} + +pub enum AssociatedServer { + Queueing(NameAndUuid), + Active(NameAndUuid), +} + +impl StartConstraints { + pub fn new(minimum: u32, maximum: u32, priority: i32) -> Self { + Self { + minimum, + maximum, + priority, + } + } +} + +impl ScalingPolicy { + pub fn new(enabled: bool, start_threshold: f32, stop_empty_servers: bool) -> Self { + Self { + enabled, + start_threshold, + stop_empty_servers, + } + } +} diff --git a/controller/src/application/group/manager.rs b/controller/src/application/group/manager.rs new file mode 100644 index 00000000..c1799b19 --- /dev/null +++ b/controller/src/application/group/manager.rs @@ -0,0 +1,234 @@ +use std::{collections::HashMap, vec}; + +use anyhow::Result; +use common::allocator::NumberAllocator; +use simplelog::{info, warn}; +use stored::StoredGroup; +use tokio::fs; + +use crate::{ + application::{ + node::manager::NodeManager, + server::{manager::ServerManager, Resources, Spec}, + OptVoter, Voter, + }, + config::Config, + resource::{CreateResourceError, DeleteResourceError}, + storage::Storage, +}; + +use super::{Group, ScalingPolicy, StartConstraints}; + +pub struct GroupManager { + voter: OptVoter, + + groups: HashMap, +} + +impl GroupManager { + pub async fn init(nodes: &NodeManager) -> Result { + info!("Loading groups..."); + let mut groups = HashMap::new(); + + let directory = Storage::groups_directory(); + if !directory.exists() { + fs::create_dir_all(&directory).await?; + } + + for (_, _, name, mut value) in Storage::for_each_content_toml::( + &directory, + "Failed to read group from file", + ) + .await? + { + info!("Loading group {}", name); + + value.nodes_mut().retain(|node| { + if !nodes.has_node(node) { + warn!("Node {} is not loaded, skipping node {}", node, name); + return false; + } + true + }); + groups.insert(name.clone(), Group::new(&name, &value)); + } + + info!("Loaded {} group(s)", groups.len()); + Ok(Self { + voter: None, + groups, + }) + } + + pub async fn delete_group(&mut self, name: &str) -> Result<(), DeleteResourceError> { + let group = self + .get_group_mut(name) + .ok_or(DeleteResourceError::NotFound)?; + group.delete().await?; + self.groups.remove(name); + info!("Deleted group {}", name); + Ok(()) + } + + #[allow(clippy::too_many_arguments)] + pub async fn create_group( + &mut self, + name: &str, + constraints: &StartConstraints, + scaling: &ScalingPolicy, + resources: &Resources, + spec: &Spec, + g_nodes: &[String], + nodes: &NodeManager, + ) -> Result<(), CreateResourceError> { + if self.groups.contains_key(name) { + return Err(CreateResourceError::AlreadyExists); + } + + if nodes.verify_nodes(g_nodes) { + return Err(CreateResourceError::RequiredNodeNotLoaded); + } + let group = StoredGroup::new( + g_nodes.to_vec(), + constraints.clone(), + scaling.clone(), + resources.clone(), + spec.clone(), + ); + + let group = Group::new(name, &group); + group.save().await.map_err(CreateResourceError::Error)?; + self.groups.insert(name.to_string(), group); + info!("Created group {}", name); + Ok(()) + } + + pub fn is_node_used(&self, name: &str) -> bool { + let name = name.to_string(); + self.groups + .values() + .any(|group| group.nodes.contains(&name)) + } + + pub fn get_groups(&self) -> Vec<&Group> { + self.groups.values().collect() + } + + pub fn get_group(&self, name: &str) -> Option<&Group> { + self.groups.get(name) + } + pub fn get_group_mut(&mut self, name: &str) -> Option<&mut Group> { + self.groups.get_mut(name) + } +} + +impl Group { + pub fn new(name: &str, group: &StoredGroup) -> Self { + Self { + name: name.to_string(), + status: group.status().clone(), + nodes: group.nodes().clone(), + constraints: group.constraints().clone(), + scaling: group.scaling().clone(), + resources: group.resources().clone(), + spec: group.spec().clone(), + id_allocator: NumberAllocator::new(1..usize::MAX), + servers: vec![], + } + } +} + +// Ticking +impl GroupManager { + pub fn tick(&mut self, config: &Config, servers: &mut ServerManager) -> Result<()> { + if self.voter.is_some() { + // Do not tick if we are shutting down + return Ok(()); + } + + for group in self.groups.values_mut() { + group.tick(config, servers)?; + } + Ok(()) + } + + #[allow(clippy::unused_self, clippy::unnecessary_wraps)] + pub fn shutdown(&mut self, mut voter: Voter) -> Result<()> { + voter.vote(); + self.voter = Some(voter); + Ok(()) + } + + #[allow(clippy::unnecessary_wraps, clippy::unused_self)] + pub fn cleanup(&mut self) -> Result<()> { + Ok(()) + } +} + +pub(super) mod stored { + use getset::{Getters, MutGetters}; + use serde::{Deserialize, Serialize}; + + use crate::{ + application::{ + group::{Group, ScalingPolicy, StartConstraints}, + node::LifecycleStatus, + server::{Resources, Spec}, + }, + storage::{LoadFromTomlFile, SaveToTomlFile}, + }; + + #[derive(Serialize, Deserialize, Getters, MutGetters)] + pub struct StoredGroup { + /* Settings */ + #[getset(get = "pub", get_mut = "pub")] + status: LifecycleStatus, + + /* Where? */ + #[getset(get = "pub", get_mut = "pub")] + nodes: Vec, + #[getset(get = "pub", get_mut = "pub")] + constraints: StartConstraints, + #[getset(get = "pub", get_mut = "pub")] + scaling: ScalingPolicy, + + /* How? */ + #[getset(get = "pub", get_mut = "pub")] + resources: Resources, + #[getset(get = "pub", get_mut = "pub")] + spec: Spec, + } + + impl StoredGroup { + pub fn new( + nodes: Vec, + constraints: StartConstraints, + scaling: ScalingPolicy, + resources: Resources, + spec: Spec, + ) -> Self { + Self { + status: LifecycleStatus::Inactive, + nodes, + constraints, + scaling, + resources, + spec, + } + } + + pub fn from(group: &Group) -> Self { + Self { + status: group.status.clone(), + nodes: group.nodes.clone(), + constraints: group.constraints.clone(), + scaling: group.scaling.clone(), + resources: group.resources.clone(), + spec: group.spec.clone(), + } + } + } + + impl LoadFromTomlFile for StoredGroup {} + impl SaveToTomlFile for StoredGroup {} +} diff --git a/controller/src/application/node.rs b/controller/src/application/node.rs new file mode 100644 index 00000000..44e7f2a7 --- /dev/null +++ b/controller/src/application/node.rs @@ -0,0 +1,175 @@ +use anyhow::Result; +use common::network::HostAndPort; +use getset::Getters; +use manager::stored::StoredNode; +use serde::{Deserialize, Serialize}; +use simplelog::info; +use tokio::{fs, task::JoinHandle}; +use tonic::Status; +use url::Url; + +use crate::{ + resource::DeleteResourceError, + storage::{SaveToTomlFile, Storage}, +}; + +use super::{ + plugin::BoxedNode, + server::{manager::StartRequest, screen::BoxedScreen, Resources, Server, Spec}, +}; + +pub mod manager; + +#[derive(Getters)] +pub struct Node { + /* Plugin */ + #[getset(get = "pub")] + plugin: String, + instance: BoxedNode, + + /* Settings */ + #[getset(get = "pub")] + name: String, + #[getset(get = "pub")] + capabilities: Capabilities, + #[getset(get = "pub")] + status: LifecycleStatus, + + /* Controller */ + #[getset(get = "pub")] + controller: Url, +} + +impl Node { + #[allow(clippy::unnecessary_wraps)] + pub fn tick(&self) -> Result<()> { + // Always tick the node in the plugin + self.instance.tick(); + + if self.status == LifecycleStatus::Inactive { + // Do not tick this node because it is inactive + return Ok(()); + } + + Ok(()) + } + + pub async fn delete(&mut self) -> Result<(), DeleteResourceError> { + if self.status == LifecycleStatus::Active { + return Err(DeleteResourceError::StillActive); + } + let path = Storage::group_file(&self.name); + if path.exists() { + fs::remove_file(path) + .await + .map_err(|error| DeleteResourceError::Error(error.into()))?; + } + + Ok(()) + } + + pub async fn set_active(&mut self, active: bool) -> Result<()> { + if active && self.status == LifecycleStatus::Inactive { + // Activate node + + self.status = LifecycleStatus::Active; + self.save().await?; + info!("Node {} is now active", self.name); + } else if !active && self.status == LifecycleStatus::Active { + // Retire node + + self.status = LifecycleStatus::Inactive; + self.save().await?; + info!("Node {} is now inactive", self.name); + } + + Ok(()) + } + + pub fn allocate(&self, request: &StartRequest) -> JoinHandle>>> { + self.instance.allocate(request) + } + pub fn free(&self, ports: &[HostAndPort]) -> JoinHandle> { + self.instance.free(ports) + } + pub fn start(&self, server: &Server) -> JoinHandle> { + self.instance.start(server) + } + pub fn restart(&self, server: &Server) -> JoinHandle> { + self.instance.restart(server) + } + pub fn stop(&self, server: &Server) -> JoinHandle> { + self.instance.stop(server) + } + + pub async fn save(&self) -> Result<()> { + StoredNode::from(self) + .save(&Storage::node_file(&self.name), true) + .await + } + + pub async fn cleanup(&mut self) -> Result<()> { + self.instance.cleanup().await + } +} + +#[derive(Getters)] +pub struct Allocation { + #[getset(get = "pub")] + pub ports: Vec, + #[getset(get = "pub")] + pub resources: Resources, + #[getset(get = "pub")] + pub spec: Spec, +} + +#[derive(Serialize, Deserialize, Clone, Default, Getters)] +pub struct Capabilities { + #[getset(get = "pub")] + memory: Option, + #[getset(get = "pub")] + max_servers: Option, + #[getset(get = "pub")] + child: Option, +} + +#[derive(Serialize, Deserialize, Clone, Default, PartialEq)] +pub enum LifecycleStatus { + #[serde(rename = "inactive")] + #[default] + Inactive, + #[serde(rename = "active")] + Active, +} + +pub enum SetActiveError { + NodeInUseByGroup, + NodeInUseByServer, + Error(anyhow::Error), +} + +impl Allocation { + pub fn primary_port(&self) -> Option<&HostAndPort> { + self.ports.first() + } +} + +impl Capabilities { + pub fn new(memory: Option, max_servers: Option, child: Option) -> Self { + Self { + memory, + max_servers, + child, + } + } +} + +impl From for Status { + fn from(val: SetActiveError) -> Self { + match val { + SetActiveError::NodeInUseByGroup => Status::unavailable("Node in use by some group"), + SetActiveError::NodeInUseByServer => Status::unavailable("Node in use by some server"), + SetActiveError::Error(error) => Status::internal(format!("Error: {error}")), + } + } +} diff --git a/controller/src/application/node/manager.rs b/controller/src/application/node/manager.rs new file mode 100644 index 00000000..7398a2ac --- /dev/null +++ b/controller/src/application/node/manager.rs @@ -0,0 +1,229 @@ +use std::collections::HashMap; + +use anyhow::Result; +use simplelog::{error, info, warn}; +use stored::StoredNode; +use tokio::fs; +use url::Url; + +use crate::{ + application::{ + group::manager::GroupManager, + plugin::{manager::PluginManager, BoxedNode}, + server::manager::ServerManager, + }, + resource::{CreateResourceError, DeleteResourceError}, + storage::Storage, +}; + +use super::{Capabilities, Node}; + +pub struct NodeManager { + nodes: HashMap, +} + +impl NodeManager { + pub async fn init(plugins: &PluginManager) -> Result { + info!("Loading nodes..."); + let mut nodes = HashMap::new(); + + let directory = Storage::nodes_directory(); + if !directory.exists() { + fs::create_dir_all(&directory).await?; + } + + for (_, _, name, value) in Storage::for_each_content_toml::( + &directory, + "Failed to read node from file", + ) + .await? + { + info!("Loading node {}", name); + + let Some(plugin) = plugins.get_plugin(value.plugin()) else { + warn!( + "Plugin {} is not loaded, skipping node {}", + value.plugin(), + name + ); + continue; + }; + + match plugin + .init_node(&name, value.capabilities(), value.controller()) + .await + { + Ok(instance) => { + nodes.insert(name.clone(), Node::new(&name, &value, instance)); + } + Err(error) => error!("Failed to initialize node {}: {}", name, error), + } + } + + info!("Loaded {} node(s)", nodes.len()); + Ok(Self { nodes }) + } + + pub async fn delete_node( + &mut self, + name: &str, + servers: &ServerManager, + groups: &GroupManager, + ) -> Result<(), DeleteResourceError> { + if servers.is_node_used(name) { + return Err(DeleteResourceError::StillInUse); + } + if groups.is_node_used(name) { + return Err(DeleteResourceError::StillInUse); + } + let node = self + .get_node_mut(name) + .ok_or(DeleteResourceError::NotFound)?; + node.delete().await?; + if let Some(mut node) = self.nodes.remove(name) { + // Before we can drop the node we have to drop the wasm resources first + node.cleanup().await.map_err(DeleteResourceError::Error)?; + drop(node); // Drop the node + } + info!("Deleted node {}", name); + Ok(()) + } + + pub async fn create_node( + &mut self, + name: &str, + p_name: &str, + capabilities: &Capabilities, + controller: &Url, + plugins: &PluginManager, + ) -> Result<(), CreateResourceError> { + if self.nodes.contains_key(name) { + return Err(CreateResourceError::AlreadyExists); + } + + let Some(plugin) = plugins.get_plugin(p_name) else { + return Err(CreateResourceError::RequiredPluginNotLoaded); + }; + let node = StoredNode::new(p_name, capabilities.clone(), controller.clone()); + + let instance = match plugin + .init_node(name, node.capabilities(), node.controller()) + .await + { + Ok(instance) => instance, + Err(error) => return Err(CreateResourceError::Error(error)), + }; + + let node = Node::new(name, &node, instance); + node.save().await.map_err(CreateResourceError::Error)?; + self.nodes.insert(name.to_string(), node); + info!("Created node {}", name); + Ok(()) + } + + pub fn verify_nodes(&self, names: &[String]) -> bool { + for name in names { + if !self.nodes.contains_key(name) { + return true; + } + } + false + } + + pub fn get_nodes(&self) -> Vec<&Node> { + self.nodes.values().collect() + } + + pub fn has_node(&self, name: &str) -> bool { + self.nodes.contains_key(name) + } + + pub fn get_node(&self, name: &str) -> Option<&Node> { + self.nodes.get(name) + } + pub fn get_node_mut(&mut self, name: &str) -> Option<&mut Node> { + self.nodes.get_mut(name) + } +} + +impl Node { + pub fn new(name: &str, node: &StoredNode, instance: BoxedNode) -> Self { + Self { + plugin: node.plugin().to_string(), + instance, + name: name.to_owned(), + capabilities: node.capabilities().clone(), + status: node.status().clone(), + controller: node.controller().clone(), + } + } +} + +// Ticking +impl NodeManager { + pub fn tick(&mut self) -> Result<()> { + for node in self.nodes.values() { + node.tick()?; + } + Ok(()) + } + + pub async fn cleanup(&mut self) -> Result<()> { + for (_, mut node) in self.nodes.drain() { + // Before we can drop the node we have to drop the wasm resources first + node.cleanup().await?; + drop(node); // Drop the node + } + + Ok(()) + } +} + +pub(super) mod stored { + use getset::Getters; + use serde::{Deserialize, Serialize}; + use url::Url; + + use crate::{ + application::node::{Capabilities, LifecycleStatus, Node}, + storage::{LoadFromTomlFile, SaveToTomlFile}, + }; + + #[derive(Serialize, Deserialize, Getters)] + pub struct StoredNode { + /* Settings */ + #[getset(get = "pub")] + plugin: String, + #[getset(get = "pub")] + capabilities: Capabilities, + #[getset(get = "pub")] + status: LifecycleStatus, + + /* Controller */ + #[getset(get = "pub")] + controller: Url, + } + + impl StoredNode { + pub fn new(plugin: &str, capabilities: Capabilities, controller: Url) -> Self { + Self { + plugin: plugin.to_string(), + capabilities, + status: LifecycleStatus::Inactive, + controller, + } + } + + pub fn from(node: &Node) -> Self { + Self { + plugin: node.plugin.clone(), + capabilities: node.capabilities.clone(), + status: node.status.clone(), + controller: node.controller.clone(), + } + } + } + + impl LoadFromTomlFile for StoredNode {} + impl SaveToTomlFile for StoredNode {} +} diff --git a/controller/src/application/plugin.rs b/controller/src/application/plugin.rs new file mode 100644 index 00000000..1c41d190 --- /dev/null +++ b/controller/src/application/plugin.rs @@ -0,0 +1,70 @@ +use anyhow::Result; +use bitflags::bitflags; +use common::network::HostAndPort; +use tokio::task::JoinHandle; +use tonic::async_trait; +use url::Url; + +use super::{ + node::Capabilities, + server::{manager::StartRequest, screen::BoxedScreen, Server}, +}; + +pub mod manager; +mod runtime; + +pub type BoxedPlugin = Box; +pub type BoxedNode = Box; + +#[async_trait] +pub trait GenericPlugin { + async fn init(&self) -> Result; + async fn init_node( + &self, + name: &str, + capabilities: &Capabilities, + controller: &Url, + ) -> Result; + + /* Shutdown */ + fn shutdown(&self) -> JoinHandle>; + + /* Ticking */ + fn tick(&self) -> JoinHandle>; + + /* Management */ + async fn cleanup(&mut self) -> Result<()>; +} + +#[async_trait] +pub trait GenericNode { + /* Ticking */ + fn tick(&self) -> JoinHandle>; + + /* Prepare */ + fn allocate(&self, request: &StartRequest) -> JoinHandle>>; + fn free(&self, ports: &[HostAndPort]) -> JoinHandle>; + + /* Servers */ + fn start(&self, server: &Server) -> JoinHandle>; + fn restart(&self, server: &Server) -> JoinHandle>; + fn stop(&self, server: &Server) -> JoinHandle>; + + /* Memory */ + async fn cleanup(&mut self) -> Result<()>; +} + +pub struct Information { + authors: Vec, + version: String, + #[allow(unused)] + features: Features, + ready: bool, +} + +bitflags! { + pub struct Features: u32 { + const NODE = 1; + const ALL = Self::NODE.bits(); + } +} diff --git a/controller/src/application/plugin/manager.rs b/controller/src/application/plugin/manager.rs new file mode 100644 index 00000000..92c99399 --- /dev/null +++ b/controller/src/application/plugin/manager.rs @@ -0,0 +1,74 @@ +use std::collections::HashMap; + +use anyhow::Result; +use futures::future::join_all; +use simplelog::info; +use tick::Ticker; + +use crate::config::Config; + +use super::BoxedPlugin; + +#[cfg(feature = "wasm-plugins")] +use crate::application::plugin::runtime::wasm::init::init_wasm_plugins; + +mod tick; + +pub struct PluginManager { + plugins: HashMap, + + ticker: Ticker, +} + +impl PluginManager { + pub async fn init(config: &Config) -> Result { + info!("Loading plugins..."); + + let mut plugins = HashMap::new(); + + #[cfg(feature = "wasm-plugins")] + init_wasm_plugins(config, &mut plugins).await?; + + info!("Loaded {} plugin(s)", plugins.len()); + Ok(Self { + plugins, + ticker: Ticker::new(), + }) + } + + pub fn get_plugins_keys(&self) -> Vec<&String> { + self.plugins.keys().collect() + } + + pub fn get_plugin(&self, name: &str) -> Option<&BoxedPlugin> { + self.plugins.get(name) + } +} + +// Ticking +impl PluginManager { + #[allow(clippy::unnecessary_wraps)] + pub async fn tick(&mut self) -> Result<()> { + self.ticker.tick(&self.plugins).await?; + Ok(()) + } + + pub async fn cleanup(&mut self) -> Result<()> { + let tasks = join_all(self.plugins.values().map(|plugin| plugin.shutdown())).await; + + for task in tasks { + if let Err(error) = task { + return Err(error.into()); + } else if let Ok(Err(error)) = task { + return Err(error); + } + } + + for (_, mut plugin) in self.plugins.drain() { + // Before we can drop the plugin we have to drop the wasm resources first + plugin.cleanup().await?; + drop(plugin); // Drop the plugin + } + Ok(()) + } +} diff --git a/controller/src/application/plugin/manager/tick.rs b/controller/src/application/plugin/manager/tick.rs new file mode 100644 index 00000000..f40f64b6 --- /dev/null +++ b/controller/src/application/plugin/manager/tick.rs @@ -0,0 +1,40 @@ +use std::collections::HashMap; + +use anyhow::Result; +use common::error::FancyError; +use futures::future::join_all; +use simplelog::warn; +use tokio::task::JoinHandle; + +use crate::application::plugin::BoxedPlugin; + +pub struct Ticker(HashMap>>); + +impl Ticker { + pub fn new() -> Self { + Self(HashMap::new()) + } + + pub async fn tick(&mut self, plugins: &HashMap) -> Result<()> { + for result in join_all( + self.0 + .extract_if(|_, handle| handle.is_finished()) + .map(|(_, handle)| handle), + ) + .await + { + match result.map_err(Into::into) { + Ok(Ok(())) => {} + Ok(Err(error)) | Err(error) => { + warn!("Plugin failed to tick: {:?}", error); + FancyError::print_fancy(&error, false); + } + } + } + + for (name, plugin) in plugins { + self.0.entry(name.clone()).or_insert_with(|| plugin.tick()); + } + Ok(()) + } +} diff --git a/controller/src/application/plugin/runtime.rs b/controller/src/application/plugin/runtime.rs new file mode 100644 index 00000000..02d5effb --- /dev/null +++ b/controller/src/application/plugin/runtime.rs @@ -0,0 +1,37 @@ +#[cfg(feature = "wasm-plugins")] +pub(crate) mod wasm; + +#[cfg(feature = "wasm-plugins")] +pub(crate) mod source { + use std::{ + fmt::{self, Display, Formatter}, + fs, + path::{Path, PathBuf}, + }; + + use anyhow::Result; + + pub struct Source { + path: PathBuf, + source: Vec, + } + + impl Display for Source { + fn fmt(&self, formatter: &mut Formatter<'_>) -> fmt::Result { + write!(formatter, "{}", self.path.display()) + } + } + + impl Source { + pub fn from_file(path: &Path) -> Result { + Ok(Source { + path: path.to_owned(), + source: fs::read(path)?, + }) + } + + pub fn get_source(&self) -> &[u8] { + &self.source + } + } +} diff --git a/controller/src/application/plugin/runtime/wasm.rs b/controller/src/application/plugin/runtime/wasm.rs new file mode 100644 index 00000000..f18b62bd --- /dev/null +++ b/controller/src/application/plugin/runtime/wasm.rs @@ -0,0 +1,209 @@ +use std::sync::Arc; + +use anyhow::{anyhow, Result}; +use generated::exports::plugin::system::bridge; +use node::PluginNode; +use simplelog::error; +use tokio::{spawn, sync::Mutex, task::JoinHandle}; +use tonic::async_trait; +use url::Url; +use wasmtime::{component::ResourceAny, AsContextMut, Engine, Store}; +use wasmtime_wasi::{ResourceTable, WasiCtx, WasiView}; + +use crate::application::{ + node::Capabilities, + plugin::{BoxedNode, Features, GenericPlugin, Information}, +}; + +pub(crate) mod config; +mod epoch; +pub mod ext; +pub mod init; +mod node; + +#[allow(clippy::all)] +pub mod generated { + use wasmtime::component::bindgen; + + bindgen!({ + world: "plugin", + path: "../protocol/wit/", + async: true, + trappable_imports: true, + with: { + "plugin:system/process/process-builder": super::ext::process::ProcessBuilder, + "plugin:system/process/process": super::ext::process::Process, + } + }); +} + +pub(crate) struct PluginState { + /* Plugin */ + name: String, + + /* Wasmtime */ + wasi: WasiCtx, + resources: ResourceTable, +} + +pub(crate) struct Plugin { + dropped: bool, + + #[allow(unused)] + engine: Engine, + bindings: Arc, + store: Arc>>, + instance: ResourceAny, +} + +#[async_trait] +impl GenericPlugin for Plugin { + async fn init(&self) -> Result { + let (bindings, store, instance) = self.get(); + let mut store = store.lock().await; + match bindings + .plugin_system_bridge() + .generic_plugin() + .call_init(store.as_context_mut(), instance) + .await + { + Ok(information) => Ok(information.into()), + Err(error) => Err(error), + } + } + + async fn init_node( + &self, + name: &str, + capabilities: &Capabilities, + controller: &Url, + ) -> Result { + let (bindings, store, instance) = self.get(); + match bindings + .plugin_system_bridge() + .generic_plugin() + .call_init_node( + store.clone().lock().await.as_context_mut(), + instance, + name, + &capabilities.into(), + controller.as_ref(), + ) + .await? + { + Ok(instance) => Ok(Box::new(PluginNode::new(bindings, store, instance)) as BoxedNode), + Err(error) => Err(anyhow!(error)), + } + } + + fn shutdown(&self) -> JoinHandle> { + let (bindings, store, instance) = self.get(); + spawn(async move { + match bindings + .plugin_system_bridge() + .generic_plugin() + .call_shutdown(store.lock().await.as_context_mut(), instance) + .await + { + Ok(result) => result.map_err(|errors| { + anyhow!(errors + .iter() + .map(|error| format!("Scope: {}, Message: {}", error.scope, error.message)) + .collect::>() + .join("\n")) + }), + Err(error) => Err(error), + } + }) + } + + fn tick(&self) -> JoinHandle> { + let (bindings, store, instance) = self.get(); + spawn(async move { + match bindings + .plugin_system_bridge() + .generic_plugin() + .call_tick(store.lock().await.as_context_mut(), instance) + .await + { + Ok(result) => result.map_err(|errors| { + anyhow!(errors + .iter() + .map(|error| format!("Scope: {}, Message: {}", error.scope, error.message)) + .collect::>() + .join("\n")) + }), + Err(error) => Err(error), + } + }) + } + + async fn cleanup(&mut self) -> Result<()> { + self.instance + .resource_drop_async(self.store.lock().await.as_context_mut()) + .await?; + self.dropped = true; + + Ok(()) + } +} + +impl Drop for Plugin { + fn drop(&mut self) { + if !self.dropped { + error!("Resource was not dropped before being deallocated (memory leak)"); + } + } +} + +impl Plugin { + fn get( + &self, + ) -> ( + Arc, + Arc>>, + ResourceAny, + ) { + (self.bindings.clone(), self.store.clone(), self.instance) + } +} + +impl WasiView for PluginState { + fn ctx(&mut self) -> &mut WasiCtx { + &mut self.wasi + } + fn table(&mut self) -> &mut ResourceTable { + &mut self.resources + } +} + +impl From for Information { + fn from(val: bridge::Information) -> Self { + Information { + authors: val.authors, + version: val.version, + features: val.features.into(), + ready: val.ready, + } + } +} + +impl From for Features { + fn from(value: bridge::Features) -> Self { + let mut features = Features::empty(); + if value.contains(bridge::Features::NODE) { + features.insert(Features::NODE); + } + features + } +} + +impl From<&Capabilities> for bridge::Capabilities { + fn from(val: &Capabilities) -> Self { + bridge::Capabilities { + memory: *val.memory(), + max_servers: *val.max_servers(), + child: val.child().as_ref().map(std::string::ToString::to_string), + } + } +} diff --git a/controller/src/application/plugin/runtime/wasm/config.rs b/controller/src/application/plugin/runtime/wasm/config.rs new file mode 100644 index 00000000..1f596b98 --- /dev/null +++ b/controller/src/application/plugin/runtime/wasm/config.rs @@ -0,0 +1,129 @@ +use std::path::PathBuf; + +use anyhow::Result; +use regex::Regex; +use serde::{Deserialize, Serialize}; +use simplelog::warn; +use tokio::fs; + +use crate::storage::{LoadFromTomlFile, Storage}; + +const DEFAULT_PLUGINS_CONFIG: &str = include_str!(concat!( + env!("CARGO_MANIFEST_DIR"), + "/configs/wasm-plugins.toml" +)); +const DEFAULT_ENGINE_CONFIG: &str = include_str!(concat!( + env!("CARGO_MANIFEST_DIR"), + "/configs/wasm-engine.toml" +)); + +#[derive(Serialize, Deserialize)] +pub struct PluginsConfig { + plugins: Vec, +} + +#[allow( + clippy::struct_excessive_bools, + reason = "Mybe refactor this in the future to use bitflags" +)] +#[derive(Serialize, Deserialize)] +pub struct PluginConfig { + name: String, + inherit_stdio: bool, + inherit_args: bool, + inherit_env: bool, + inherit_network: bool, + allow_ip_name_lookup: bool, + allow_http: bool, + allow_process: bool, + allow_remove_dir_all: bool, + + mounts: Vec, +} + +impl PluginConfig { + pub fn has_inherit_stdio(&self) -> bool { + self.inherit_stdio + } + pub fn has_inherit_args(&self) -> bool { + self.inherit_args + } + pub fn has_inherit_env(&self) -> bool { + self.inherit_env + } + pub fn has_inherit_network(&self) -> bool { + self.inherit_network + } + pub fn has_allow_ip_name_lookup(&self) -> bool { + self.allow_ip_name_lookup + } + pub fn _has_allow_http(&self) -> bool { + self.allow_http + } + pub fn _has_allow_process(&self) -> bool { + self.allow_process + } + pub fn _has_allow_remove_dir_all(&self) -> bool { + self.allow_remove_dir_all + } + pub fn get_mounts(&self) -> &[Mount] { + &self.mounts + } +} + +#[derive(Serialize, Deserialize)] +pub struct Mount { + host: String, + guest: String, +} + +impl Mount { + pub fn get_host(&self) -> &str { + &self.host + } + pub fn get_guest(&self) -> &str { + &self.guest + } +} + +impl PluginsConfig { + pub async fn parse() -> Result { + let path = Storage::wasm_plugins_config_file(); + if path.exists() { + Self::from_file(&path).await + } else { + if let Some(parent) = path.parent() { + fs::create_dir_all(parent).await?; + } + fs::write(&path, DEFAULT_PLUGINS_CONFIG).await?; + Self::from_file(&path).await + } + } + + pub fn find_config(&self, name: &str) -> Option<&PluginConfig> { + self.plugins + .iter() + .find(|plugin| match Regex::new(&plugin.name) { + Ok(regex) => regex.is_match(name), + Err(error) => { + warn!("Failed to compile plugin name regex: {}", error); + false + } + }) + } +} + +pub async fn verify_engine_config() -> Result { + let path = Storage::wasm_engine_config_file(); + if path.exists() { + Ok(path) + } else { + if let Some(parent) = path.parent() { + fs::create_dir_all(parent).await?; + } + fs::write(&path, DEFAULT_ENGINE_CONFIG).await?; + Ok(path) + } +} + +impl LoadFromTomlFile for PluginsConfig {} diff --git a/controller/src/application/plugin/runtime/wasm/epoch.rs b/controller/src/application/plugin/runtime/wasm/epoch.rs new file mode 100644 index 00000000..68501551 --- /dev/null +++ b/controller/src/application/plugin/runtime/wasm/epoch.rs @@ -0,0 +1,43 @@ +use std::{thread, time::Duration}; + +use simplelog::debug; +use wasmtime::{Engine, EngineWeak}; + +const INCRMENT_EPOCH_INTERVAL: Duration = Duration::from_millis(100); + +pub struct EpochInvoker { + engines: Vec, +} + +impl EpochInvoker { + #[must_use] + pub fn new() -> Self { + Self { engines: vec![] } + } + + pub fn push(&mut self, engine: &Engine) { + self.engines.push(engine.weak()); + } + + pub fn spawn(mut self) { + debug!( + "Starting epoch invoker to increment epoch every {:?}", + INCRMENT_EPOCH_INTERVAL + ); + thread::spawn(move || loop { + thread::sleep(INCRMENT_EPOCH_INTERVAL); + self.engines.retain(|engine| { + if let Some(engine) = engine.upgrade() { + engine.increment_epoch(); + true + } else { + false + } + }); + if self.engines.is_empty() { + debug!("All engines dropped, stopping epoch invoker"); + break; + } + }); + } +} diff --git a/controller/src/application/plugin/runtime/wasm/ext.rs b/controller/src/application/plugin/runtime/wasm/ext.rs new file mode 100644 index 00000000..b2c63b9e --- /dev/null +++ b/controller/src/application/plugin/runtime/wasm/ext.rs @@ -0,0 +1,30 @@ +use std::path::PathBuf; + +use crate::storage::Storage; + +use super::{ + generated::plugin::system::{ + self, + types::{Directory, Reference}, + }, + PluginState, +}; + +mod file; +mod http; +mod log; +mod platform; +pub mod process; +pub mod screen; + +impl system::types::Host for PluginState {} + +impl PluginState { + pub fn get_directory(name: &str, directory: &Directory) -> PathBuf { + match &directory.reference { + Reference::Controller => PathBuf::from(".").join(&directory.path), + Reference::Data => Storage::data_directory_for_plugin(name).join(&directory.path), + Reference::Configs => Storage::config_directory_for_plugin(name).join(&directory.path), + } + } +} diff --git a/controller/src/application/plugin/runtime/wasm/ext/file.rs b/controller/src/application/plugin/runtime/wasm/ext/file.rs new file mode 100644 index 00000000..50e1b89e --- /dev/null +++ b/controller/src/application/plugin/runtime/wasm/ext/file.rs @@ -0,0 +1,18 @@ +use anyhow::Result; +use tokio::fs::remove_dir_all; + +use crate::application::plugin::runtime::wasm::{ + generated::plugin::system::{ + self, + types::{Directory, ErrorMessage}, + }, + PluginState, +}; + +impl system::file::Host for PluginState { + async fn remove_dir_all(&mut self, directory: Directory) -> Result> { + Ok(remove_dir_all(Self::get_directory(&self.name, &directory)) + .await + .map_err(|error| format!("Failed to remove directory: {error}"))) + } +} diff --git a/controller/src/application/plugin/runtime/wasm/ext/http.rs b/controller/src/application/plugin/runtime/wasm/ext/http.rs new file mode 100644 index 00000000..0e13e280 --- /dev/null +++ b/controller/src/application/plugin/runtime/wasm/ext/http.rs @@ -0,0 +1,63 @@ +use anyhow::Result; +use simplelog::warn; +use tokio::task::spawn_blocking; + +use crate::application::plugin::runtime::wasm::{ + generated::plugin::system::{ + self, + http::{Header, Method, Response}, + }, + PluginState, +}; + +impl system::http::Host for PluginState { + // TODO: Rewrite this function to use the reqwest crate instead of minreq + async fn send_http_request( + &mut self, + method: Method, + url: String, + headers: Vec
, + body: Option>, + ) -> Result> { + let name = self.name.clone(); + Ok(spawn_blocking(move || { + let mut request = match method { + Method::Get => minreq::get(url), + Method::Patch => minreq::patch(url), + Method::Post => minreq::post(url), + Method::Put => minreq::put(url), + Method::Delete => minreq::delete(url), + }; + if let Some(body) = body { + request = request.with_body(body); + } + for header in headers { + request = request.with_header(&header.key, &header.value); + } + let response = match request.send() { + Ok(response) => response, + Err(error) => { + warn!("Failed to send HTTP request for plugin {}: {}", name, error); + return None; + } + }; + Some(Response { + #[allow(clippy::cast_sign_loss)] + status_code: response.status_code as u32, + reason_phrase: response.reason_phrase.clone(), + headers: response + .headers + .iter() + .map(|header| Header { + key: header.0.clone(), + value: header.1.clone(), + }) + .collect(), + bytes: response.into_bytes(), + }) + }) + .await + .ok() + .flatten()) + } +} diff --git a/controller/src/application/plugin/runtime/wasm/ext/log.rs b/controller/src/application/plugin/runtime/wasm/ext/log.rs new file mode 100644 index 00000000..2e447fa7 --- /dev/null +++ b/controller/src/application/plugin/runtime/wasm/ext/log.rs @@ -0,0 +1,20 @@ +use anyhow::Result; +use simplelog::{debug, error, info, warn}; + +use crate::application::plugin::runtime::wasm::{ + generated::plugin::system::{self, log::Level}, + PluginState, +}; + +impl system::log::Host for PluginState { + async fn log_string(&mut self, level: Level, message: String) -> Result<()> { + match level { + Level::Info => info!("[{}] {}", self.name.to_uppercase(), message), + Level::Warn => warn!("[{}] {}", self.name.to_uppercase(), message), + Level::Error => error!("[{}] {}", self.name.to_uppercase(), message), + Level::Debug => debug!("[{}] {}", self.name.to_uppercase(), message), + } + + Ok(()) + } +} diff --git a/controller/src/application/plugin/runtime/wasm/ext/platform.rs b/controller/src/application/plugin/runtime/wasm/ext/platform.rs new file mode 100644 index 00000000..6589bfc5 --- /dev/null +++ b/controller/src/application/plugin/runtime/wasm/ext/platform.rs @@ -0,0 +1,16 @@ +use anyhow::Result; + +use crate::application::plugin::runtime::wasm::{ + generated::plugin::system::{self, platform::Os}, + PluginState, +}; + +impl system::platform::Host for PluginState { + async fn get_os(&mut self) -> Result { + if cfg!(target_os = "windows") { + Ok(Os::Windows) + } else { + Ok(Os::Unix) + } + } +} diff --git a/controller/src/application/plugin/runtime/wasm/ext/process.rs b/controller/src/application/plugin/runtime/wasm/ext/process.rs new file mode 100644 index 00000000..162ca041 --- /dev/null +++ b/controller/src/application/plugin/runtime/wasm/ext/process.rs @@ -0,0 +1,230 @@ +use std::{collections::HashMap, os::unix::process::ExitStatusExt, process::Stdio}; + +use anyhow::Result; +use simplelog::debug; +use tokio::{ + io::{AsyncBufReadExt, AsyncWriteExt, BufReader, BufWriter}, + process::{Child, ChildStderr, ChildStdin, ChildStdout, Command}, + spawn, + sync::mpsc::{channel, Receiver}, + task::JoinHandle, +}; +use wasmtime::component::Resource; + +use crate::application::plugin::runtime::wasm::{ + generated::plugin::system::{ + self, + process::ExitStatus, + types::{Directory, ErrorMessage}, + }, + PluginState, +}; + +const STREAM_BUFFER: usize = 64; + +pub struct ProcessBuilder { + command: String, + args: Vec, + environment: HashMap, + directory: Option, +} +pub struct Process { + child: Child, + streams: Streams, +} + +struct Streams { + tasks: (JoinHandle<()>, JoinHandle<()>), + stdin: Option>, + receiver: Receiver, +} + +impl Streams { + pub fn new( + stdin: Option, + stdout: Option, + stderr: Option, + ) -> Self { + let stdin = stdin.map(BufWriter::new); + let stdout = stdout.map(BufReader::new); + let stderr = stderr.map(BufReader::new); + + let (sender, receiver) = channel(STREAM_BUFFER); + + let stdout_task = spawn(Self::handle_stream(stdout, sender.clone(), "stdout")); + let stderr_task = spawn(Self::handle_stream(stderr, sender, "stderr")); + + Self { + tasks: (stdout_task, stderr_task), + stdin, + receiver, + } + } + + pub fn abort(&self) { + self.tasks.0.abort(); + self.tasks.1.abort(); + } + + async fn handle_stream( + mut reader: Option>, + sender: tokio::sync::mpsc::Sender, + stream_name: &str, + ) where + R: tokio::io::AsyncRead + Unpin, + { + if let Some(reader) = reader.as_mut() { + let mut buffer = String::new(); + loop { + match reader.read_line(&mut buffer).await { + Ok(0) => break, // EOF reached + Ok(_) => { + if let Err(error) = sender.send(buffer.clone()).await { + debug!("Failed to send {} line: {}", stream_name, error); + break; + } + buffer.clear(); + } + Err(error) => { + debug!("Error reading from {}: {}", stream_name, error); + break; + } + } + } + } + } +} + +impl system::process::Host for PluginState {} + +impl system::process::HostProcessBuilder for PluginState { + async fn new(&mut self, command: String) -> Result> { + Ok(self.resources.push(ProcessBuilder { + command, + args: Vec::new(), + environment: HashMap::new(), + directory: None, + })?) + } + async fn args(&mut self, instance: Resource, args: Vec) -> Result<()> { + self.resources.get_mut(&instance)?.args.extend(args); + Ok(()) + } + async fn environment( + &mut self, + instance: Resource, + environment: Vec<(String, String)>, + ) -> Result<()> { + self.resources + .get_mut(&instance)? + .environment + .extend(environment); + Ok(()) + } + async fn directory( + &mut self, + instance: Resource, + directory: Directory, + ) -> Result<()> { + self.resources.get_mut(&instance)?.directory = Some(directory); + Ok(()) + } + async fn spawn( + &mut self, + instance: Resource, + ) -> Result, ErrorMessage>> { + let builder = self.resources.get(&instance)?; + debug!("Spawning process: {} {:?}", builder.command, builder.args); + + let mut command = Command::new(&builder.command); + if let Some(directory) = &builder.directory { + command.current_dir(Self::get_directory(&self.name, directory)); + } + command + .args(&builder.args) + .envs(&builder.environment) + .stdin(Stdio::piped()) + .stdout(Stdio::piped()) + .stderr(Stdio::piped()); + let mut child = match command.spawn() { + Ok(child) => child, + Err(error) => return Ok(Err(format!("Failed to spawn process: {error}"))), + }; + + let streams = Streams::new(child.stdin.take(), child.stdout.take(), child.stderr.take()); + Ok(Ok(self.resources.push(Process { child, streams })?)) + } + async fn drop(&mut self, instance: Resource) -> Result<()> { + self.resources.delete(instance)?; + Ok(()) + } +} + +impl system::process::HostProcess for PluginState { + async fn kill(&mut self, instance: Resource) -> Result> { + Ok(self + .resources + .get_mut(&instance)? + .child + .kill() + .await + .map_err(|error| format!("Failed to kill process: {error}"))) + } + async fn try_wait( + &mut self, + instance: Resource, + ) -> Result, ErrorMessage>> { + Ok(self + .resources + .get_mut(&instance)? + .child + .try_wait() + .map(|status| { + status.map(|status| { + if let Some(code) = status.code() { + ExitStatus::Code(code) + } else if let Some(signal) = status.signal() { + ExitStatus::Signal(signal) + } else { + ExitStatus::Unknown + } + }) + }) + .map_err(|error| format!("Failed to try waiting for process: {error}"))) + } + async fn read_lines(&mut self, instance: Resource) -> Result> { + let process = self.resources.get_mut(&instance)?; + let mut lines = vec![]; + while let Ok(line) = process.streams.receiver.try_recv() { + lines.push(line); + } + Ok(lines) + } + async fn write_all( + &mut self, + instance: Resource, + data: Vec, + ) -> Result> { + if let Some(stdin) = &mut self.resources.get_mut(&instance)?.streams.stdin { + return Ok(stdin + .write_all(&data) + .await + .map_err(|error| format!("Failed to write to process: {error}"))); + } + Ok(Err("Process stdin is not available".to_string())) + } + async fn flush(&mut self, instance: Resource) -> Result> { + if let Some(stdin) = &mut self.resources.get_mut(&instance)?.streams.stdin { + return Ok(stdin + .flush() + .await + .map_err(|error| format!("Failed to write to process: {error}"))); + } + Ok(Err("Process stdin is not available".to_string())) + } + async fn drop(&mut self, instance: Resource) -> Result<()> { + let process = self.resources.delete(instance)?; + process.streams.abort(); + Ok(()) + } +} diff --git a/controller/src/application/plugin/runtime/wasm/ext/screen.rs b/controller/src/application/plugin/runtime/wasm/ext/screen.rs new file mode 100644 index 00000000..cb725c62 --- /dev/null +++ b/controller/src/application/plugin/runtime/wasm/ext/screen.rs @@ -0,0 +1,125 @@ +use std::{mem::replace, sync::Arc}; + +use anyhow::{anyhow, Result}; +use simplelog::error; +use tokio::{spawn, sync::Mutex}; +use tonic::async_trait; +use wasmtime::{component::ResourceAny, AsContextMut, Store}; + +use crate::application::{ + plugin::runtime::wasm::{ + generated::{self, exports::plugin::system::bridge::ScreenType}, + PluginState, + }, + server::screen::{GenericScreen, ScreenError, ScreenPullJoinHandle, ScreenWriteJoinHandle}, +}; + +pub struct PluginScreen { + dropped: bool, + + bindings: Arc, + store: Arc>>, + instance: ScreenType, +} + +impl PluginScreen { + pub fn new( + bindings: Arc, + store: Arc>>, + instance: ScreenType, + ) -> Self { + Self { + dropped: false, + bindings, + store, + instance, + } + } + + fn get( + &self, + ) -> ( + Arc, + Arc>>, + Option, + ) { + ( + self.bindings.clone(), + self.store.clone(), + match self.instance { + ScreenType::Supported(instance) => Some(instance), + ScreenType::Unsupported => None, + }, + ) + } +} + +#[async_trait] +impl GenericScreen for PluginScreen { + fn is_supported(&self) -> bool { + match &self.instance { + ScreenType::Unsupported => false, + ScreenType::Supported(_) => true, + } + } + + fn pull(&self) -> ScreenPullJoinHandle { + let (bindings, store, instance) = self.get(); + let Some(instance) = instance else { + return spawn(async { Err(ScreenError::Unsupported) }); + }; + spawn(async move { + match bindings + .plugin_system_screen() + .generic_screen() + .call_pull(store.lock().await.as_context_mut(), instance) + .await + .map_err(ScreenError::Error)? + { + Ok(result) => Ok(result), + Err(error) => Err(ScreenError::Error(anyhow!(error))), + } + }) + } + + fn write(&self, data: &[u8]) -> ScreenWriteJoinHandle { + let (bindings, store, instance) = self.get(); + let Some(instance) = instance else { + return spawn(async { Err(ScreenError::Unsupported) }); + }; + let data = data.to_vec(); + spawn(async move { + match bindings + .plugin_system_screen() + .generic_screen() + .call_write(store.lock().await.as_context_mut(), instance, &data) + .await + .map_err(ScreenError::Error)? + { + Ok(result) => Ok(result), + Err(error) => Err(ScreenError::Error(anyhow!(error))), + } + }) + } + + async fn cleanup(&mut self) -> Result<()> { + if let ScreenType::Supported(instance) = + replace(&mut self.instance, ScreenType::Unsupported) + { + instance + .resource_drop_async(self.store.lock().await.as_context_mut()) + .await?; + } + self.dropped = true; + + Ok(()) + } +} + +impl Drop for PluginScreen { + fn drop(&mut self) { + if !self.dropped { + error!("Resource was not dropped before being deallocated (memory leak)"); + } + } +} diff --git a/controller/src/application/plugin/runtime/wasm/init.rs b/controller/src/application/plugin/runtime/wasm/init.rs new file mode 100644 index 00000000..e2aedeea --- /dev/null +++ b/controller/src/application/plugin/runtime/wasm/init.rs @@ -0,0 +1,228 @@ +use std::{collections::HashMap, path::Path, sync::Arc}; + +use anyhow::Result; +use common::error::FancyError; +use simplelog::{error, info, warn}; +use tokio::{fs, sync::Mutex}; +use wasmtime::{ + component::{Component, Linker}, + Engine, Store, +}; +use wasmtime_wasi::{DirPerms, FilePerms, ResourceTable, WasiCtxBuilder}; + +use crate::{ + application::plugin::{runtime::source::Source, BoxedPlugin, GenericPlugin}, + config::Config, + storage::Storage, +}; + +use super::{ + config::{verify_engine_config, PluginsConfig}, + epoch::EpochInvoker, + generated, Plugin, PluginState, +}; + +pub async fn init_wasm_plugins( + global_config: &Config, + plugins: &mut HashMap, +) -> Result<()> { + // Verify and load required configuration files + verify_engine_config().await?; + let plugins_config = PluginsConfig::parse().await?; + + let directory = Storage::plugins_directory(); + if !directory.exists() { + fs::create_dir_all(&directory).await?; + } + + let amount = plugins.len(); + let mut invoker = EpochInvoker::new(); + for (path, _, name) in Storage::for_each_content(&directory).await? { + if !path + .extension() + .is_some_and(|ext| ext.eq_ignore_ascii_case("wasm")) + { + continue; + } + + let source = match Source::from_file(&path) { + Ok(source) => source, + Err(error) => { + error!( + "Failed to read source code for plugin {} from file({:?}): {}", + name, path, error + ); + continue; + } + }; + + let config_directory = Storage::config_directory_for_plugin(&name); + let data_directory = Storage::data_directory_for_plugin(&name); + if !config_directory.exists() { + fs::create_dir_all(&config_directory) + .await + .unwrap_or_else(|error| { + warn!( + "Failed to create configs directory for plugin {}: {}", + name, error + ); + }); + } + if !data_directory.exists() { + fs::create_dir_all(&data_directory) + .await + .unwrap_or_else(|error| { + warn!( + "Failed to create data directory for plugin {}: {}", + name, error + ); + }); + } + + info!("Compiling plugin {}...", name); + let plugin = Plugin::new( + &name, + &source, + global_config, + &plugins_config, + &data_directory, + &config_directory, + &mut invoker, + ) + .await; + match plugin { + Ok(mut plugin) => match plugin.init().await { + Ok(information) => { + if information.ready { + info!( + "Loaded plugin {} v{} by {}", + name, + information.version, + information.authors.join(", ") + ); + plugins.insert(name, Box::new(plugin)); + } else { + warn!("Plugin {} marked itself as not ready, skipping...", name); + if let Err(error) = plugin.cleanup().await { + error!("Failed to drop resources for plugin {}: {}", name, error); + FancyError::print_fancy(&error, false); + } + } + } + Err(error) => { + error!("Failed to initialize plugin {}: {}", name, error); + FancyError::print_fancy(&error, false); + } + }, + Err(error) => { + error!( + "Failed to compile plugin {} at location {}: {}", + name, source, error + ); + FancyError::print_fancy(&error, false); + } + } + } + + if amount == plugins.len() { + warn!("The Wasm plugins feature is enabled, but no Wasm plugins were loaded."); + } else { + invoker.spawn(); + } + + Ok(()) +} + +impl Plugin { + async fn new( + name: &str, + source: &Source, + global_config: &Config, + plugins_config: &PluginsConfig, + data_directory: &Path, + config_directory: &Path, + invoker: &mut EpochInvoker, + ) -> Result { + let mut engine_config = wasmtime::Config::new(); + engine_config + .wasm_component_model(true) + .async_support(true) + .epoch_interruption(true); + if let Err(error) = engine_config.cache_config_load(Storage::wasm_engine_config_file()) { + warn!("Failed to enable caching for wasmtime engine: {}", error); + } + + let engine = Engine::new(&engine_config)?; + let component = Component::from_binary(&engine, source.get_source())?; + + let mut linker = Linker::new(&engine); + wasmtime_wasi::add_to_linker_async(&mut linker)?; + generated::Plugin::add_to_linker(&mut linker, |state: &mut PluginState| state)?; + + let mut wasi = WasiCtxBuilder::new(); + if let Some(config) = plugins_config.find_config(name) { + if config.has_inherit_stdio() { + wasi.inherit_stdio(); + } + if config.has_inherit_args() { + wasi.inherit_args(); + } + if config.has_inherit_env() { + wasi.inherit_env(); + } + if config.has_inherit_network() { + wasi.inherit_network(); + } + if config.has_allow_ip_name_lookup() { + wasi.allow_ip_name_lookup(true); + } + for mount in config.get_mounts() { + wasi.preopened_dir( + mount.get_host(), + mount.get_guest(), + DirPerms::all(), + FilePerms::all(), + )?; + } + } + let wasi = wasi + .preopened_dir( + config_directory, + "/configs", + DirPerms::all(), + FilePerms::all(), + )? + .preopened_dir(data_directory, "/data", DirPerms::all(), FilePerms::all())? + .build(); + + let resources = ResourceTable::new(); + let mut store = Store::new( + &engine, + PluginState { + name: name.to_string(), + wasi, + resources, + }, + ); + store.epoch_deadline_async_yield_and_update(2); + + let bindings = + generated::Plugin::instantiate_async(&mut store, &component, &linker).await?; + let instance = bindings + .plugin_system_bridge() + .generic_plugin() + .call_constructor(&mut store, global_config.identifier()) + .await?; + + // Start thread that calls the increment epoch function + invoker.push(&engine); + + Ok(Plugin { + dropped: false, + engine, + bindings: Arc::new(bindings), + store: Arc::new(Mutex::new(store)), + instance, + }) + } +} diff --git a/controller/src/application/plugin/runtime/wasm/node.rs b/controller/src/application/plugin/runtime/wasm/node.rs new file mode 100644 index 00000000..932d0b4d --- /dev/null +++ b/controller/src/application/plugin/runtime/wasm/node.rs @@ -0,0 +1,276 @@ +use std::sync::Arc; + +use anyhow::{anyhow, Result}; +use common::network::HostAndPort; +use simplelog::error; +use tokio::{spawn, sync::Mutex, task::JoinHandle}; +use tonic::async_trait; +use wasmtime::{component::ResourceAny, AsContextMut, Store}; + +use crate::application::{ + node::Allocation, + plugin::{BoxedScreen, GenericNode}, + server::{manager::StartRequest, DiskRetention, Resources, Server, Spec}, +}; + +use super::{ + ext::screen::PluginScreen, + generated::{self, exports::plugin::system::bridge}, + PluginState, +}; + +pub struct PluginNode { + dropped: bool, + + bindings: Arc, + store: Arc>>, + instance: ResourceAny, +} + +impl PluginNode { + pub fn new( + bindings: Arc, + store: Arc>>, + instance: ResourceAny, + ) -> Self { + Self { + dropped: false, + bindings, + store, + instance, + } + } + + fn get( + &self, + ) -> ( + Arc, + Arc>>, + ResourceAny, + ) { + (self.bindings.clone(), self.store.clone(), self.instance) + } +} + +#[async_trait] +impl GenericNode for PluginNode { + fn tick(&self) -> JoinHandle> { + let (bindings, store, instance) = self.get(); + spawn(async move { + match bindings + .plugin_system_bridge() + .generic_node() + .call_tick(store.lock().await.as_context_mut(), instance) + .await + { + Ok(result) => result.map_err(|errors| { + anyhow!(errors + .iter() + .map(|error| format!("Scope: {}, Message: {}", error.scope, error.message)) + .collect::>() + .join("\n")) + }), + Err(error) => Err(error), + } + }) + } + + fn allocate(&self, request: &StartRequest) -> JoinHandle>> { + let proposal: bridge::ServerProposal = request.into(); + + let (bindings, store, instance) = self.get(); + spawn(async move { + match bindings + .plugin_system_bridge() + .generic_node() + .call_allocate(store.lock().await.as_context_mut(), instance, &proposal) + .await + { + Ok(result) => result + .map(|ports| { + ports + .into_iter() + .map(|port| HostAndPort::new(port.host, port.port)) + .collect() + }) + .map_err(|error| anyhow!(error)), + Err(error) => Err(error), + } + }) + } + + fn free(&self, ports: &[HostAndPort]) -> JoinHandle> { + let ports = ports + .iter() + .map(std::convert::Into::into) + .collect::>(); + + let (bindings, store, instance) = self.get(); + spawn(async move { + match bindings + .plugin_system_bridge() + .generic_node() + .call_free(store.lock().await.as_context_mut(), instance, &ports) + .await + { + Ok(()) => Ok(()), + Err(error) => Err(error), + } + }) + } + + fn start(&self, server: &Server) -> JoinHandle> { + let server = server.into(); + + let (bindings, store, instance) = self.get(); + spawn(async move { + match bindings + .plugin_system_bridge() + .generic_node() + .call_start(store.lock().await.as_context_mut(), instance, &server) + .await + { + Ok(screen) => Ok(Box::new(PluginScreen::new( + bindings.clone(), + store.clone(), + screen, + )) as BoxedScreen), + Err(error) => Err(error), + } + }) + } + + fn restart(&self, server: &Server) -> JoinHandle> { + let server = server.into(); + + let (bindings, store, instance) = self.get(); + spawn(async move { + match bindings + .plugin_system_bridge() + .generic_node() + .call_restart(store.lock().await.as_context_mut(), instance, &server) + .await + { + Ok(()) => Ok(()), + Err(error) => Err(error), + } + }) + } + + fn stop(&self, server: &Server) -> JoinHandle> { + let server = server.into(); + + let (bindings, store, instance) = self.get(); + spawn(async move { + match bindings + .plugin_system_bridge() + .generic_node() + .call_stop(store.lock().await.as_context_mut(), instance, &server) + .await + { + Ok(()) => Ok(()), + Err(error) => Err(error), + } + }) + } + + async fn cleanup(&mut self) -> Result<()> { + self.instance + .resource_drop_async(self.store.lock().await.as_context_mut()) + .await?; + self.dropped = true; + + Ok(()) + } +} + +impl Drop for PluginNode { + fn drop(&mut self) { + if !self.dropped { + error!("Resource was not dropped before being deallocated (memory leak)"); + } + } +} + +impl From<&HostAndPort> for bridge::Address { + fn from(val: &HostAndPort) -> Self { + bridge::Address { + host: val.host.clone(), + port: val.port, + } + } +} + +impl From<&DiskRetention> for bridge::DiskRetention { + fn from(val: &DiskRetention) -> Self { + match val { + DiskRetention::Permanent => bridge::DiskRetention::Permanent, + DiskRetention::Temporary => bridge::DiskRetention::Temporary, + } + } +} + +impl From<&Spec> for bridge::Spec { + fn from(val: &Spec) -> Self { + bridge::Spec { + settings: val + .settings() + .iter() + .map(|(key, value)| (key.clone(), value.clone())) + .collect(), + environment: val + .environment() + .iter() + .map(|(key, value)| (key.clone(), value.clone())) + .collect(), + disk_retention: val.disk_retention().into(), + image: val.image().clone(), + } + } +} + +impl From<&Resources> for bridge::Resources { + fn from(val: &Resources) -> Self { + bridge::Resources { + memory: *val.memory(), + swap: *val.swap(), + cpu: *val.cpu(), + io: *val.io(), + disk: *val.disk(), + ports: *val.ports(), + } + } +} + +impl From<&Allocation> for bridge::Allocation { + fn from(val: &Allocation) -> Self { + bridge::Allocation { + ports: val.ports.iter().map(std::convert::Into::into).collect(), + resources: val.resources().into(), + spec: (&val.spec).into(), + } + } +} + +impl From<&Server> for bridge::Server { + fn from(val: &Server) -> Self { + bridge::Server { + name: val.id().name().clone(), + uuid: val.id().uuid().to_string(), + group: val.group().clone(), + allocation: val.allocation().into(), + token: val.token().clone(), + } + } +} + +impl From<&StartRequest> for bridge::ServerProposal { + fn from(val: &StartRequest) -> Self { + bridge::ServerProposal { + name: val.id().name().clone(), + group: val.group().clone(), + resources: val.resources().into(), + spec: val.spec().into(), + } + } +} diff --git a/controller/src/application/server.rs b/controller/src/application/server.rs new file mode 100644 index 00000000..d529eab8 --- /dev/null +++ b/controller/src/application/server.rs @@ -0,0 +1,221 @@ +use std::{ + collections::HashMap, + fmt::{self, Display, Formatter}, + time::Duration, +}; + +use getset::{Getters, MutGetters, Setters}; +use serde::{Deserialize, Serialize}; +use tokio::time::Instant; +use uuid::Uuid; + +use crate::network::client::TransferMsg; + +use super::node::Allocation; + +pub mod manager; +pub mod screen; + +#[derive(Getters, Setters, MutGetters)] +pub struct Server { + /* Settings */ + #[getset(get = "pub")] + id: NameAndUuid, + #[getset(get = "pub")] + group: Option, + #[getset(get = "pub")] + node: String, + #[getset(get = "pub")] + allocation: Allocation, + #[getset(get = "pub")] + token: String, + + /* Users */ + #[getset(get = "pub", set = "pub")] + connected_users: u32, + + /* States */ + #[getset(get = "pub", get_mut = "pub")] + heart: Heart, + #[getset(get = "pub", get_mut = "pub")] + flags: Flags, + #[getset(get = "pub", get_mut = "pub", set = "pub")] + state: State, + #[getset(get = "pub", set = "pub")] + ready: bool, +} + +#[derive(Clone, Getters, MutGetters)] +pub struct NameAndUuid { + #[getset(get = "pub", get_mut = "pub")] + name: String, + #[getset(get = "pub", get_mut = "pub")] + uuid: Uuid, +} + +#[derive(Serialize, Deserialize, Clone, Default, Getters)] +pub struct Resources { + #[getset(get = "pub")] + memory: u32, + #[getset(get = "pub")] + swap: u32, + #[getset(get = "pub")] + cpu: u32, + #[getset(get = "pub")] + io: u32, + #[getset(get = "pub")] + disk: u32, + #[getset(get = "pub")] + ports: u32, +} + +#[derive(Serialize, Deserialize, Clone, Default)] +pub enum DiskRetention { + #[serde(rename = "temporary")] + #[default] + Temporary, + #[serde(rename = "permanent")] + Permanent, +} + +#[derive(PartialEq, Clone)] +pub enum State { + Starting, + Restarting, + Running, + Stopping, +} + +#[derive(Serialize, Deserialize, Clone, Default, Getters)] +pub struct FallbackPolicy { + #[getset(get = "pub")] + enabled: bool, + #[getset(get = "pub")] + priority: i32, +} + +#[derive(Serialize, Deserialize, Clone, Default, Getters)] +pub struct Spec { + #[getset(get = "pub")] + settings: HashMap, + #[getset(get = "pub")] + environment: HashMap, + #[getset(get = "pub")] + disk_retention: DiskRetention, + #[getset(get = "pub")] + image: String, + + #[getset(get = "pub")] + max_players: u32, + #[getset(get = "pub")] + fallback: FallbackPolicy, +} + +pub struct Heart { + next_beat: Instant, + timeout: Duration, +} + +impl Server { + pub fn new_transfer(&self, user: &Uuid) -> Option { + let port = self.allocation.primary_port()?; + Some(TransferMsg { + id: user.to_string(), + host: port.host.clone(), + port: u32::from(port.port), + }) + } +} + +#[derive(Default)] +pub struct Flags { + /* Required for the group system */ + pub stop: Option, +} + +impl Flags { + pub fn is_stop_set(&self) -> bool { + self.stop.is_some() + } + pub fn should_stop(&self) -> bool { + self.stop.is_some_and(|stop| stop < Instant::now()) + } + pub fn replace_stop(&mut self, timeout: Duration) { + self.stop = Some(Instant::now() + timeout); + } + pub fn clear_stop(&mut self) { + self.stop = None; + } +} + +impl Heart { + pub fn new(startup_time: Duration, timeout: Duration) -> Self { + Self { + next_beat: Instant::now() + startup_time, + timeout, + } + } + pub fn beat(&mut self) { + self.next_beat = Instant::now() + self.timeout; + } + pub fn is_dead(&self) -> bool { + Instant::now() > self.next_beat + } +} + +impl NameAndUuid { + pub fn generate(name: String) -> Self { + Self { + name, + uuid: Uuid::new_v4(), + } + } + pub fn new(name: String, uuid: Uuid) -> Self { + Self { name, uuid } + } +} + +impl Resources { + pub fn new(memory: u32, swap: u32, cpu: u32, io: u32, disk: u32, ports: u32) -> Self { + Self { + memory, + swap, + cpu, + io, + disk, + ports, + } + } +} + +impl FallbackPolicy { + pub fn new(enabled: bool, priority: i32) -> Self { + Self { enabled, priority } + } +} + +impl Spec { + pub fn new( + settings: HashMap, + environment: HashMap, + disk_retention: DiskRetention, + image: String, + max_players: u32, + fallback: FallbackPolicy, + ) -> Self { + Self { + settings, + environment, + disk_retention, + image, + max_players, + fallback, + } + } +} + +impl Display for NameAndUuid { + fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result { + write!(f, "{}", self.name) + } +} diff --git a/controller/src/application/server/manager.rs b/controller/src/application/server/manager.rs new file mode 100644 index 00000000..dd586484 --- /dev/null +++ b/controller/src/application/server/manager.rs @@ -0,0 +1,337 @@ +use std::{ + collections::{BinaryHeap, HashMap}, + sync::Arc, +}; + +use anyhow::Result; +use common::network::HostAndPort; +use getset::Getters; +use simplelog::{info, warn}; +use tokio::{task::JoinHandle, time::Instant}; +use uuid::Uuid; + +use crate::{ + application::{ + group::manager::GroupManager, node::manager::NodeManager, user::manager::UserManager, + OptVoter, Shared, Voter, + }, + config::Config, +}; + +use super::{screen::BoxedScreen, NameAndUuid, Resources, Server, Spec, State}; + +mod action; +mod restart; +mod start; +mod stop; + +pub struct ServerManager { + voter: OptVoter, + + /* Servers */ + servers: HashMap, + + /* Requests */ + start_requests: BinaryHeap, + restart_requests: Vec, + stop_requests: Vec, +} + +impl ServerManager { + pub fn init() -> Self { + Self { + voter: None, + servers: HashMap::new(), + start_requests: BinaryHeap::new(), + restart_requests: vec![], + stop_requests: vec![], + } + } + + pub fn is_node_used(&self, name: &str) -> bool { + self.servers.values().any(|server| server.node == name) + } + + pub fn find_fallback_server(&self, ignore: &Uuid) -> Option<&Server> { + self.servers + .values() + .filter(|server| { + server.id.uuid() != ignore + && server.ready + && server.state == State::Running + && server.allocation.spec.fallback.enabled + }) + .max_by_key(|server| server.allocation.spec.fallback.priority) + } + + pub fn get_servers(&self) -> Vec<&Server> { + self.servers.values().collect() + } + + pub fn get_server(&self, uuid: &Uuid) -> Option<&Server> { + self.servers.get(uuid) + } + pub fn get_server_mut(&mut self, uuid: &Uuid) -> Option<&mut Server> { + self.servers.get_mut(uuid) + } + + pub fn resolve_server(&self, uuid: &Uuid) -> Option { + self.servers.get(uuid).map(|server| server.id.clone()) + } + + pub fn cancel_start(&mut self, uuid: &Uuid) { + self.start_requests + .retain(|request| request.id.uuid() != uuid); + } + + pub fn schedule_start(&mut self, request: StartRequest) { + if self.voter.is_some() { + warn!( + "Ignoring start request for server {} as the server manager is shutting down.", + request.id + ); + return; + } + self.start_requests.push(request); + } + pub fn _schedule_restart(&mut self, request: RestartRequest) { + self.restart_requests.push(request); + } + pub fn schedule_stop(&mut self, request: StopRequest) { + self.stop_requests.push(request); + } + pub fn schedule_stops(&mut self, requests: Vec) { + self.stop_requests.extend(requests); + } +} + +// Ticking +impl ServerManager { + #[allow(clippy::too_many_lines)] + pub async fn tick( + &mut self, + config: &Config, + nodes: &NodeManager, + groups: &mut GroupManager, + users: &mut UserManager, + shared: &Arc, + ) -> Result<()> { + // Check health of servers + for server in self.servers.values() { + if server.heart.is_dead() { + match server.state { + State::Starting | State::Running => { + warn!("Unit {} failed to establish online status within the expected startup time of {:.2?}.", server.id, config.restart_timeout()); + } + _ => { + warn!("Server {} has not checked in for {:.2?}, indicating a potential error.", server.id, server.heart.timeout); + } + } + self.restart_requests + .push(RestartRequest::new(None, server.id().clone())); + } + } + + // Stop all servers that have been requested to stop + { + let mut requests = Vec::with_capacity(self.stop_requests.len()); + for mut request in self.stop_requests.drain(..) { + if Self::handle_stop_request( + &mut request, + &mut self.servers, + nodes, + groups, + users, + shared, + ) + .await? + { + requests.push(request); + } + } + self.stop_requests.extend(requests); + } + + // Restart all servers that have been requested to restart + { + let mut requests = Vec::with_capacity(self.restart_requests.len()); + for mut request in self.restart_requests.drain(..) { + if Self::handle_restart_request(&mut request, &mut self.servers, config, nodes) + .await? + { + requests.push(request); + } + } + self.restart_requests.extend(requests); + } + + // Start all servers that have been requested to start + { + let mut requests = Vec::with_capacity(self.start_requests.len()); + for mut request in self.start_requests.drain_sorted() { + if Self::handle_start_request( + &mut request, + &mut self.servers, + config, + nodes, + groups, + shared, + ) + .await? + { + requests.push(request); + } + } + self.start_requests.extend(requests); + } + + if let Some(voter) = &mut self.voter { + if self.servers.is_empty() && voter.vote() { + info!("All servers have been stopped. Ready to stop..."); + } + } + + Ok(()) + } + + #[allow(clippy::unnecessary_wraps, clippy::unused_self)] + pub fn shutdown(&mut self, voter: Voter) -> Result<()> { + self.voter = Some(voter); + + info!("Canceling all start requests..."); + self.start_requests.clear(); + self.restart_requests.clear(); + info!("Shutting down all servers..."); + let mut requests = Vec::with_capacity(self.servers.len()); + for server in self.servers.values() { + requests.push(StopRequest::new(None, server.id().clone())); + } + self.schedule_stops(requests); + + Ok(()) + } + + #[allow(clippy::unnecessary_wraps, clippy::unused_self)] + pub fn cleanup(&mut self) -> Result<()> { + Ok(()) + } +} + +#[derive(Getters)] +pub struct StartRequest { + /* Request */ + when: Option, + + /* Server */ + #[getset(get = "pub")] + id: NameAndUuid, + #[getset(get = "pub")] + group: Option, + #[getset(get = "pub")] + nodes: Vec, + #[getset(get = "pub")] + resources: Resources, + #[getset(get = "pub")] + spec: Spec, + #[getset(get = "pub")] + priority: i32, + + /* Stage */ + #[getset(get = "pub")] + stage: StartStage, +} + +#[derive(Getters)] +pub struct RestartRequest { + /* Request */ + when: Option, + server: NameAndUuid, + + /* Stage */ + #[getset(get = "pub")] + stage: ActionStage, +} + +#[derive(Getters)] +pub struct StopRequest { + /* Request */ + when: Option, + server: NameAndUuid, + + /* Stage */ + #[getset(get = "pub")] + stage: ActionStage, +} + +enum ActionStage { + Queued, + Freeing(JoinHandle>), + Running(JoinHandle>), +} + +enum StartStage { + Queued, + Allocating((usize, JoinHandle>>)), + Creating(JoinHandle>), +} + +impl StartRequest { + pub fn new( + when: Option, + priority: i32, + name: String, + group: Option, + nodes: &[String], + resources: &Resources, + spec: &Spec, + ) -> Self { + Self { + id: NameAndUuid::generate(name), + when, + priority, + group, + nodes: nodes.to_vec(), + resources: resources.clone(), + spec: spec.clone(), + stage: StartStage::Queued, + } + } +} + +impl RestartRequest { + pub fn new(when: Option, server: NameAndUuid) -> Self { + Self { + when, + server, + stage: ActionStage::Queued, + } + } +} + +impl StopRequest { + pub fn new(when: Option, server: NameAndUuid) -> Self { + Self { + when, + server, + stage: ActionStage::Queued, + } + } +} + +impl Ord for StartRequest { + fn cmp(&self, other: &Self) -> std::cmp::Ordering { + self.priority.cmp(&other.priority) + } +} +impl PartialOrd for StartRequest { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Eq for StartRequest {} +impl PartialEq for StartRequest { + fn eq(&self, other: &Self) -> bool { + self.priority == other.priority + } +} diff --git a/controller/src/application/server/manager/action.rs b/controller/src/application/server/manager/action.rs new file mode 100644 index 00000000..4758b14f --- /dev/null +++ b/controller/src/application/server/manager/action.rs @@ -0,0 +1,184 @@ +use std::{collections::HashMap, sync::Arc}; + +use anyhow::{anyhow, Result}; +use common::network::HostAndPort; +use simplelog::{error, warn}; +use tokio::task::JoinHandle; +use uuid::Uuid; + +use crate::{ + application::{ + group::manager::GroupManager, + node::{manager::NodeManager, Allocation}, + server::{screen::BoxedScreen, Flags, Heart, Server, State}, + user::manager::UserManager, + Shared, + }, + config::Config, +}; + +use super::{RestartRequest, ServerManager, StartRequest, StopRequest}; + +impl ServerManager { + pub fn allocate( + index: usize, + request: &StartRequest, + nodes: &NodeManager, + ) -> Result>>> { + if let Some(name) = request.nodes.get(index) { + let node = nodes.get_node(name); + if let Some(node) = node { + Ok(node.allocate(request)) + } else { + Err(anyhow!( + "Node {} not found while trying to allocate ports for server {}", + name, + request.id + )) + } + } else { + Err(anyhow!( + "Index of node in request is out of bounds. Has someone tampered with the request?" + )) + } + } + #[allow(clippy::too_many_arguments)] + pub async fn start( + index: usize, + request: &StartRequest, + ports: Vec, + servers: &mut HashMap, + config: &Config, + nodes: &NodeManager, + groups: &mut GroupManager, + shared: &Arc, + ) -> Result>> { + if let Some(name) = request.nodes.get(index) { + let node = nodes.get_node(name); + if let Some(node) = node { + let mut server = Server { + id: request.id.clone(), + group: request.group.clone(), + node: name.clone(), + allocation: Allocation { + ports, + resources: request.resources.clone(), + spec: request.spec.clone(), + }, + connected_users: 0, + token: shared.auth.register_server(request.id.uuid).await, + heart: Heart::new(*config.startup_timeout(), *config.heartbeat_timeout()), + state: State::Starting, + flags: Flags::default(), + ready: false, + }; + let handle = node.start(&server); + if let Some(group) = &server.group { + if let Some(group) = groups.get_group_mut(group) { + group.set_server_active(&server.id); + } else { + warn!("Group {} not found while trying to start server {}. Removing group from server", group, request.id); + server.group = None; + } + } + servers.insert(server.id.uuid, server); + Ok(handle) + } else { + Err(anyhow!( + "Node {} not found while trying to allocate ports for server {}", + name, + request.id + )) + } + } else { + Err(anyhow!( + "Index of node in request is out of bounds. Has someone tampered with the request?" + )) + } + } + pub fn restart( + request: &RestartRequest, + servers: &mut HashMap, + config: &Config, + nodes: &NodeManager, + ) -> Result>> { + if let Some(server) = servers.get_mut(request.server.uuid()) { + if let Some(node) = nodes.get_node(&server.node) { + server.state = State::Restarting; + server.heart = Heart::new(*config.startup_timeout(), *config.heartbeat_timeout()); + Ok(node.restart(server)) + } else { + Err(anyhow!( + "Node {} not found while trying to restart {}", + server.node, + request.server + )) + } + } else { + Err(anyhow!( + "Server {} not found while trying to restart", + request.server + )) + } + } + pub fn free( + request: &StopRequest, + servers: &mut HashMap, + nodes: &NodeManager, + ) -> Result>> { + if let Some(server) = servers.get_mut(request.server.uuid()) { + server.state = State::Stopping; + if let Some(node) = nodes.get_node(&server.node) { + Ok(node.free(&server.allocation.ports)) + } else { + Err(anyhow!( + "Node {} not found while trying to free resources {}", + server.node, + request.server + )) + } + } else { + Err(anyhow!( + "Server {} not found while trying to free resources", + request.server + )) + } + } + pub async fn stop( + request: &StopRequest, + servers: &mut HashMap, + nodes: &NodeManager, + groups: &mut GroupManager, + users: &mut UserManager, + shared: &Arc, + ) -> Result>> { + if let Some(server) = servers.get_mut(request.server.uuid()) { + if let Some(node) = nodes.get_node(&server.node) { + if let Some(group) = &server.group { + if let Some(group) = groups.get_group_mut(group) { + group.remove_server(server.id.uuid()); + } else { + error!("Group {} not found while trying to stop server {}. Removing group from server", group, server.id); + server.group = None; + } + } + shared.auth.unregister(&server.token).await; + + users.remove_users_on_server(server.id.uuid()); + + Ok(node.stop(server)) + } else { + Err(anyhow!( + "Node {} not found while trying to stop {}", + server.node, + request.server + )) + } + } else { + Err(anyhow!( + "Server {} not found while trying to stop", + request.server + )) + } + } +} diff --git a/controller/src/application/server/manager/restart.rs b/controller/src/application/server/manager/restart.rs new file mode 100644 index 00000000..9563d69e --- /dev/null +++ b/controller/src/application/server/manager/restart.rs @@ -0,0 +1,57 @@ +use std::{collections::HashMap, mem::replace}; + +use anyhow::Result; +use simplelog::{debug, warn}; +use tokio::time::Instant; +use uuid::Uuid; + +use crate::{ + application::{node::manager::NodeManager, server::Server}, + config::Config, +}; + +use super::{ActionStage, RestartRequest, ServerManager}; + +impl ServerManager { + // Return true if the request should be ticked again. + pub async fn handle_restart_request( + request: &mut RestartRequest, + servers: &mut HashMap, + config: &Config, + nodes: &NodeManager, + ) -> Result { + if let Some(when) = request.when { + if when > Instant::now() { + return Ok(true); + } + } + + // Cache old stage to compute the new stage based on the old stage + let stage = replace(&mut request.stage, ActionStage::Queued); + request.stage = match stage { + ActionStage::Queued => { + debug!("Restarting server {}", request.server); + match Self::restart(request, servers, config, nodes) { + Ok(handle) => ActionStage::Running(handle), + Err(error) => { + warn!("Failed to restart server {}: {}", request.server, error); + return Ok(false); + } + } + } + ActionStage::Running(handle) => { + if handle.is_finished() { + handle.await??; + debug!("Server {} has been restarted", request.server); + return Ok(false); + } + ActionStage::Running(handle) + } + ActionStage::Freeing(_) => { + warn!("Server {} is in an invalid state", request.server); + return Ok(false); + } + }; + Ok(true) + } +} diff --git a/controller/src/application/server/manager/start.rs b/controller/src/application/server/manager/start.rs new file mode 100644 index 00000000..b9983da2 --- /dev/null +++ b/controller/src/application/server/manager/start.rs @@ -0,0 +1,114 @@ +use std::{collections::HashMap, mem::replace, sync::Arc}; + +use anyhow::Result; +use simplelog::{debug, error, info, warn}; +use tokio::time::Instant; +use uuid::Uuid; + +use crate::{ + application::{ + group::manager::GroupManager, node::manager::NodeManager, server::Server, Shared, + }, + config::Config, +}; + +use super::{ServerManager, StartRequest, StartStage}; + +impl ServerManager { + // Return true if the request should be ticked again. + pub async fn handle_start_request( + request: &mut StartRequest, + servers: &mut HashMap, + config: &Config, + nodes: &NodeManager, + groups: &mut GroupManager, + shared: &Arc, + ) -> Result { + if request.nodes.is_empty() { + warn!("Server {} has no nodes available to start on.", request.id); + return Ok(false); + } + + if let Some(when) = request.when { + if when > Instant::now() { + return Ok(true); + } + } + + // Cache old stage to compute the new stage based on the old stage + let stage = replace(&mut request.stage, StartStage::Queued); + request.stage = match stage { + StartStage::Queued => { + debug!("Allocating resources for server {}", request.id); + match Self::allocate(0, request, nodes) { + Ok(handle) => StartStage::Allocating((0, handle)), + Err(error) => { + warn!( + "Failed to allocate resources for server {}: {}", + request.id, error + ); + return Ok(false); + } + } + } + StartStage::Allocating((index, handle)) => { + if handle.is_finished() { + let ports = handle.await?; + if let Ok(ports) = ports { + if let Some(port) = ports.first() { + info!("Starting server {} listening on port {}", request.id, port); + } + match Self::start( + index, request, ports, servers, config, nodes, groups, shared, + ) + .await + { + Ok(handle) => StartStage::Creating(handle), + Err(error) => { + warn!("Failed to create server {}: {}", request.id, error); + return Ok(false); + } + } + } else { + debug!( + "Driver failed to allocate resources for server {} on node {}", + request.id, request.nodes[index] + ); + if index + 1 >= request.nodes.len() { + error!( + "No more nodes to try for server {}. Giving up...", + request.id + ); + return Ok(false); + } + match Self::allocate(index + 1, request, nodes) { + Ok(handle) => StartStage::Allocating((index + 1, handle)), + Err(error) => { + warn!( + "Failed to allocate resources for server {}: {}", + request.id, error + ); + return Ok(false); + } + } + } + } else { + StartStage::Allocating((index, handle)) + } + } + StartStage::Creating(handle) => { + if handle.is_finished() { + // Register the screen with the shared screen manager + shared + .screens + .register_screen(request.id.uuid(), handle.await??) + .await; + debug!("Server {} has been started", request.id); + return Ok(false); + } + StartStage::Creating(handle) + } + }; + Ok(true) + } +} diff --git a/controller/src/application/server/manager/stop.rs b/controller/src/application/server/manager/stop.rs new file mode 100644 index 00000000..7049ae55 --- /dev/null +++ b/controller/src/application/server/manager/stop.rs @@ -0,0 +1,76 @@ +use std::{collections::HashMap, mem::replace, sync::Arc}; + +use anyhow::Result; +use simplelog::{debug, info, warn}; +use tokio::time::Instant; +use uuid::Uuid; + +use crate::application::{ + group::manager::GroupManager, node::manager::NodeManager, server::Server, + user::manager::UserManager, Shared, +}; + +use super::{ActionStage, ServerManager, StopRequest}; + +impl ServerManager { + // Return true if the request should be ticked again. + pub async fn handle_stop_request( + request: &mut StopRequest, + servers: &mut HashMap, + nodes: &NodeManager, + groups: &mut GroupManager, + users: &mut UserManager, + shared: &Arc, + ) -> Result { + if let Some(when) = request.when { + if when > Instant::now() { + return Ok(true); + } + } + + // Cache old stage to compute the new stage based on the old stage + let stage = replace(&mut request.stage, ActionStage::Queued); + request.stage = match stage { + ActionStage::Queued => { + debug!("Freeing resources for server {}", request.server); + match Self::free(request, servers, nodes) { + Ok(handle) => ActionStage::Freeing(handle), + Err(error) => { + warn!("Failed to free server {}: {}", request.server, error); + return Ok(false); + } + } + } + ActionStage::Freeing(handle) => { + if handle.is_finished() { + handle.await??; + info!("Stopping server {}", request.server); + match Self::stop(request, servers, nodes, groups, users, shared).await { + Ok(handle) => ActionStage::Running(handle), + Err(error) => { + warn!("Failed to stop server {}: {}", request.server, error); + return Ok(false); + } + } + } else { + ActionStage::Freeing(handle) + } + } + ActionStage::Running(handle) => { + if handle.is_finished() { + handle.await??; + // Remove the screen from the shared screen manager + shared + .screens + .unregister_screen(request.server.uuid()) + .await?; + servers.remove(request.server.uuid()); + debug!("Server {} has been stopped", request.server); + return Ok(false); + } + ActionStage::Running(handle) + } + }; + Ok(true) + } +} diff --git a/controller/src/application/server/screen.rs b/controller/src/application/server/screen.rs new file mode 100644 index 00000000..7c5f9a89 --- /dev/null +++ b/controller/src/application/server/screen.rs @@ -0,0 +1,35 @@ +use anyhow::Result; +use tokio::task::JoinHandle; +use tonic::{async_trait, Status}; + +pub mod manager; + +pub type BoxedScreen = Box; +pub type ScreenPullJoinHandle = JoinHandle, ScreenError>>; +pub type ScreenWriteJoinHandle = JoinHandle>; + +#[async_trait] +pub trait GenericScreen { + fn is_supported(&self) -> bool; + fn pull(&self) -> ScreenPullJoinHandle; + fn write(&self, data: &[u8]) -> ScreenWriteJoinHandle; + + /* Memory */ + async fn cleanup(&mut self) -> Result<()>; +} + +pub enum ScreenError { + Unsupported, + Error(anyhow::Error), +} + +impl From for Status { + fn from(val: ScreenError) -> Self { + match val { + ScreenError::Unsupported => { + Status::unimplemented("This feature is not supported by the plugin") + } + ScreenError::Error(error) => Status::internal(format!("Error: {error}")), + } + } +} diff --git a/controller/src/application/server/screen/manager.rs b/controller/src/application/server/screen/manager.rs new file mode 100644 index 00000000..81bcd8b6 --- /dev/null +++ b/controller/src/application/server/screen/manager.rs @@ -0,0 +1,180 @@ +use std::{collections::HashMap, time::Duration}; + +use anyhow::Result; +use common::cache::FixedSizeCache; +use futures::FutureExt; +use simplelog::warn; +use tokio::{ + sync::RwLock, + time::{interval, Interval, MissedTickBehavior}, +}; +use tokio_stream::wrappers::ReceiverStream; +use tonic::Status; +use uuid::Uuid; + +use crate::{application::subscriber::Subscriber, network::manage::ScreenLines}; + +use super::{BoxedScreen, ScreenPullJoinHandle, ScreenWriteJoinHandle}; + +const SCREEN_TICK_RATE: u64 = 2; + +type SubscriberHolder = Vec>; + +pub struct ScreenManager { + screens: RwLock>, +} + +impl ScreenManager { + pub fn init() -> Self { + Self { + screens: RwLock::new(HashMap::new()), + } + } + + pub async fn register_screen(&self, server: &Uuid, screen: BoxedScreen) { + if !screen.is_supported() { + return; + } + + self.screens + .write() + .await + .insert(*server, ActiveScreen::new(screen)); + } + + pub async fn unregister_screen(&self, server: &Uuid) -> Result<()> { + if let Some(mut screen) = self.screens.write().await.remove(server) { + // Before we can drop the screen we have to drop the wasm resources first + screen.cleanup().await?; + drop(screen); // Drop the screen + } + + Ok(()) + } + + pub async fn write(&self, server: &Uuid, data: &[u8]) -> Result { + let screens = self.screens.read().await; + let screen = screens.get(server).ok_or(Status::unimplemented( + "The plugin that handles this screen does not support it", + ))?; + Ok(screen.write(data)) + } + + pub async fn subscribe_screen( + &self, + server: &Uuid, + ) -> Result>, Status> { + let mut screens = self.screens.write().await; + let screen = screens.get_mut(server).ok_or(Status::unimplemented( + "The plugin that handles this screen does not support it", + ))?; + + let (subscriber, receiver) = Subscriber::create(); + screen.push(subscriber).await; + Ok(receiver) + } +} + +// Ticking +impl ScreenManager { + pub async fn tick(&self) -> Result<()> { + for screen in self.screens.write().await.values_mut() { + screen.tick().await?; + } + Ok(()) + } + + pub async fn cleanup(&self) -> Result<()> { + for (_, mut screen) in self.screens.write().await.drain() { + // Before we can drop the screen we have to drop the wasm resources first + screen.cleanup().await?; + drop(screen); // Drop the screen + } + Ok(()) + } +} + +struct ActiveScreen { + interval: Interval, + screen: BoxedScreen, + handle: Option, + subscribers: SubscriberHolder, + cache: FixedSizeCache, +} + +impl ActiveScreen { + pub fn new(screen: BoxedScreen) -> Self { + let mut interval = interval(Duration::from_millis(1000 / SCREEN_TICK_RATE)); + interval.set_missed_tick_behavior(MissedTickBehavior::Skip); + Self { + interval, + screen, + handle: None, + subscribers: vec![], + cache: FixedSizeCache::new(91), + } + } + + pub fn write(&self, data: &[u8]) -> ScreenWriteJoinHandle { + self.screen.write(data) + } + + pub async fn push(&mut self, subscriber: Subscriber) { + if self.cache.has_data() { + if let Err(error) = subscriber + .0 + .send(Ok(ScreenLines { + lines: self.cache.clone_items(), + })) + .await + { + warn!( + "Failed to send initial screen data to subscriber: {}", + error + ); + return; + } + } + + self.subscribers.push(subscriber); + } + + pub async fn tick(&mut self) -> Result<()> { + if self.interval.tick().now_or_never().is_none() { + // Skip tick + return Ok(()); + } + + // Remove all dead subscribers + self.subscribers.retain(Subscriber::is_alive); + + if self.subscribers.is_empty() { + // If no one is watching dont pull + return Ok(()); + } + + self.handle = match self.handle.take() { + Some(handle) if handle.is_finished() => { + let lines = handle.await?.map_err(Into::into); + { + let lines = lines.clone().map(|lines| ScreenLines { lines }); + for subscriber in &self.subscribers { + subscriber.0.send(lines.clone()).await?; + } + } + if let Ok(lines) = lines { + self.cache.extend(lines); + } + None + } + Some(handle) => Some(handle), + None => Some(self.screen.pull()), + }; + + Ok(()) + } + + pub async fn cleanup(&mut self) -> Result<()> { + self.screen.cleanup().await + } +} diff --git a/controller/src/application/subscriber.rs b/controller/src/application/subscriber.rs new file mode 100644 index 00000000..e72372f4 --- /dev/null +++ b/controller/src/application/subscriber.rs @@ -0,0 +1,20 @@ +use tokio::sync::mpsc::{channel, Sender}; +use tokio_stream::wrappers::ReceiverStream; +use tonic::Status; + +pub mod manager; + +const SUBSCRIPTION_BUFFER: usize = 64; + +pub struct Subscriber(pub Sender>); + +impl Subscriber { + pub fn create() -> (Self, ReceiverStream>) { + let (subscriber, receiver) = channel(SUBSCRIPTION_BUFFER); + (Self(subscriber), ReceiverStream::new(receiver)) + } + + pub fn is_alive(&self) -> bool { + !self.0.is_closed() + } +} diff --git a/controller/src/application/subscriber/manager.rs b/controller/src/application/subscriber/manager.rs new file mode 100644 index 00000000..88134456 --- /dev/null +++ b/controller/src/application/subscriber/manager.rs @@ -0,0 +1,102 @@ +use std::collections::HashMap; + +use anyhow::Result; +use simplelog::debug; +use tokio::sync::RwLock; +use tokio_stream::wrappers::ReceiverStream; +use tonic::Status; +use uuid::Uuid; + +use crate::network::client::{ChannelMsg, TransferMsg}; + +use super::Subscriber; + +type SubscriberHolder = RwLock>>>; + +pub struct SubscriberManager { + transfer: SubscriberHolder, + channel: SubscriberHolder, +} + +impl SubscriberManager { + pub fn init() -> Self { + Self { + transfer: RwLock::new(HashMap::new()), + channel: RwLock::new(HashMap::new()), + } + } + + pub async fn subscribe_transfer( + &self, + server: Uuid, + ) -> ReceiverStream> { + let (subscriber, receiver) = Subscriber::create(); + self.transfer + .write() + .await + .entry(server) + .or_default() + .push(subscriber); + receiver + } + + pub async fn subscribe_channel( + &self, + channel: String, + ) -> ReceiverStream> { + let (subscriber, receiver) = Subscriber::create(); + self.channel + .write() + .await + .entry(channel) + .or_default() + .push(subscriber); + receiver + } + + pub async fn publish_transfer(&self, server: &Uuid, message: TransferMsg) -> u32 { + let mut count = 0; + if let Some(subscribers) = self.transfer.read().await.get(server) { + for subscriber in subscribers { + if let Err(error) = subscriber.0.send(Ok(message.clone())).await { + debug!("Failed to send transfer message: {}", error); + continue; + } + count += 1; + } + } + count + } + + pub async fn publish_channel(&self, message: ChannelMsg) -> u32 { + let mut count = 0; + if let Some(subscribers) = self.channel.read().await.get(&message.channel) { + for subscriber in subscribers { + if let Err(error) = subscriber.0.send(Ok(message.clone())).await { + debug!("Failed to send channel message: {}", error); + continue; + } + count += 1; + } + } + count + } +} + +// Ticking +impl SubscriberManager { + pub async fn tick(&self) -> Result<()> { + // Cleanup dead subscribers + Self::cleanup(&self.channel).await; + Self::cleanup(&self.transfer).await; + + Ok(()) + } + + async fn cleanup(holder: &SubscriberHolder) { + holder.write().await.retain(|_, value| { + value.retain(Subscriber::is_alive); + !value.is_empty() + }); + } +} diff --git a/controller/src/application/unit.rs b/controller/src/application/unit.rs deleted file mode 100644 index 96b7bb95..00000000 --- a/controller/src/application/unit.rs +++ /dev/null @@ -1,560 +0,0 @@ -use std::{ - collections::{HashMap, VecDeque}, - sync::{ - atomic::{AtomicBool, AtomicU32, Ordering}, - Arc, RwLock, RwLockReadGuard, Weak, - }, - time::{Duration, Instant}, -}; - -use serde::{Deserialize, Serialize}; -use simplelog::{debug, error, info, warn}; -use uuid::Uuid; - -use super::{ - auth::AuthUnitHandle, - cloudlet::{AllocationHandle, CloudletHandle, WeakCloudletHandle}, - deployment::WeakDeploymentHandle, - ControllerHandle, WeakControllerHandle, -}; - -pub type UnitHandle = Arc; -pub type WeakUnitHandle = Weak; -pub type StartRequestHandle = Arc; - -pub struct Units { - controller: WeakControllerHandle, - - /* Units started by this atomic cloud instance */ - units: RwLock>, - - /* Units that should be started/stopped next controller tick */ - start_requests: RwLock>, - stop_requests: RwLock>, -} - -impl Units { - pub fn new(controller: WeakControllerHandle) -> Self { - Self { - controller, - units: RwLock::new(HashMap::new()), - start_requests: RwLock::new(VecDeque::new()), - stop_requests: RwLock::new(VecDeque::new()), - } - } - - pub fn tick(&self) { - // Get Controller handle - let controller = self - .controller - .upgrade() - .expect("Failed to upgrade controller"); - - // Check health of units - { - let dead_units = self.units.read().unwrap().values().filter(|unit| { - let health = unit.health.read().unwrap(); - if health.is_dead() { - match *unit.state.read().unwrap() { - State::Starting | State::Restarting => { - warn!("Unit {} failed to establish online status within the expected startup time of {:.2?}.", unit.name, controller.configuration.timings.restart.unwrap()); - } - _ => { - warn!("Unit {} has not checked in for {:.2?}, indicating a potential error.", unit.name, health.timeout); - } - } - true - } else { - false - } - }).cloned().collect::>(); - for unit in dead_units { - self.restart_unit(&unit); - } - } - - // Stop all units that have to be stopped - { - let mut requests = self.stop_requests.write().unwrap(); - requests.retain(|request| { - if let Some(when) = request.when { - if when > Instant::now() { - return true; - } - } - - self.stop_unit_nolock(request, &mut self.units.write().unwrap()); - false - }); - } - - // Sort requests by priority and process them - { - let mut requests = self.start_requests.write().unwrap(); - { - let contiguous = requests.make_contiguous(); - contiguous.sort_unstable_by_key(|req| req.priority); - contiguous.reverse(); - } - requests.retain(|request| { - if request.canceled.load(Ordering::Relaxed) { - debug!( - "Canceled start of unit {}", - request.name - ); - return false; - } - - if let Some(when) = request.when { - if when > Instant::now() { - return true; - } - } - - if request.cloudlets.is_empty() { - warn!( - "Failed to allocate resources for unit {} because no cloudlets were specified", - request.name - ); - return true; - } - - // Collect and sort cloudlets by the number of allocations - for cloudlet in &request.cloudlets { - let cloudlet = cloudlet.upgrade().unwrap(); - // Try to allocate resources on cloudlets - if let Ok(allocation) = cloudlet.allocate(request) { - // Start unit on the cloudlet - self.start_unit(request, allocation, &cloudlet); - return false; - } - } - warn!( - "Failed to allocate resources for unit {}", - request.name - ); - true - }); - } - } - - pub fn queue_unit(&self, request: StartRequest) -> StartRequestHandle { - let arc = Arc::new(request); - self.start_requests.write().unwrap().push_back(arc.clone()); - arc - } - - pub fn stop_all_instant(&self) { - self.units.write().unwrap().drain().for_each(|(_, unit)| { - self.stop_unit_internal(&StopRequest { when: None, unit }); - }); - } - - pub fn stop_all_on_cloudlet(&self, cloudlet: &CloudletHandle) { - self.units - .read() - .unwrap() - .values() - .filter(|unit| Arc::ptr_eq(&unit.cloudlet.upgrade().unwrap(), cloudlet)) - .for_each(|unit| { - self.stop_unit_now(unit.clone()); - }); - } - - fn stop_unit_nolock(&self, request: &StopRequest, units: &mut HashMap) { - self.stop_unit_internal(request); - units.remove(&request.unit.uuid); - } - - fn stop_unit_internal(&self, request: &StopRequest) { - let unit = &request.unit; - info!("Stopping unit {}", unit.name); - - // Remove resources allocated by unit from cloudlet - if let Some(cloudlet) = unit.cloudlet.upgrade() { - cloudlet.deallocate(&unit.allocation); - } - - // Send start request to cloudlet - // We do this async because the driver chould be running blocking code like network requests - let controller = self - .controller - .upgrade() - .expect("The controller is dead while still running code that requires it"); - { - let unit = unit.clone(); - controller - .get_runtime() - .as_ref() - .unwrap() - .spawn_blocking(move || stop_thread(unit)); - } - - // Remove unit from deployment and units list - if let Some(deployment) = &unit.deployment { - deployment.remove_unit(unit); - } - if let Some(controller) = self.controller.upgrade() { - controller.get_auth().unregister_unit(unit); - } - - // Remove users connected to the unit - controller.get_users().cleanup_users(unit); - // Remove subscribers from channels - controller.get_event_bus().cleanup_unit(unit); - - fn stop_thread(unit: UnitHandle) { - if let Some(cloudlet) = unit.cloudlet.upgrade() { - if let Err(error) = cloudlet.get_inner().stop_unit(&unit) { - error!( - "Failed to stop unit {}: {}", - unit.name, error - ); - } - } - } - } - - pub fn stop_unit_now(&self, unit: UnitHandle) { - self.stop_requests - .write() - .unwrap() - .push_back(StopRequest { when: None, unit }); - } - - pub fn _stop_unit(&self, when: Instant, unit: UnitHandle) { - self.stop_requests.write().unwrap().push_back(StopRequest { - when: Some(when), - unit, - }); - } - - pub fn restart_unit(&self, unit: &UnitHandle) { - info!("Restarting unit {}", unit.name); - - let controller = self - .controller - .upgrade() - .expect("Failed to upgrade controller"); - - *unit.state.write().unwrap() = State::Restarting; - *unit.health.write().unwrap() = Health::new( - controller.configuration.timings.restart.unwrap(), - controller.configuration.timings.healthbeat.unwrap(), - ); - - // Send restart request to cloudlet - // We do this async because the driver chould be running blocking code like network requests - if let Some(controller) = self.controller.upgrade() { - let unit = unit.clone(); - let copy = controller.clone(); - controller - .get_runtime() - .as_ref() - .unwrap() - .spawn_blocking(move || restart_thread(copy, unit)); - } - - fn restart_thread(controller: ControllerHandle, unit: UnitHandle) { - if let Some(cloudlet) = unit.cloudlet.upgrade() { - if let Err(error) = &cloudlet.get_inner().restart_unit(&unit) { - error!( - "Failed to restart unit {}: {}", - unit.name, error - ); - controller.get_units().stop_unit_now(unit); - } - } - } - } - - pub fn handle_heart_beat(&self, unit: &UnitHandle) { - debug!("Received heartbeat from unit {}", &unit.name); - - // Reset health - unit.health.write().unwrap().reset(); - - // Check were the unit is in the state machine - let mut state = unit.state.write().unwrap(); - if *state == State::Starting || *state == State::Restarting { - *state = State::Preparing; - info!("The unit {} is now loading", unit.name); - } - } - - pub fn mark_ready(&self, unit: &UnitHandle) { - if !unit.rediness.load(Ordering::Relaxed) { - debug!("The unit {} is ready", unit.name); - unit.rediness.store(true, Ordering::Relaxed); - } - } - - pub fn mark_not_ready(&self, unit: &UnitHandle) { - if unit.rediness.load(Ordering::Relaxed) { - debug!("The unit {} is no longer ready", unit.name); - unit.rediness.store(false, Ordering::Relaxed); - } - } - - pub fn mark_running(&self, unit: &UnitHandle) { - let mut state = unit.state.write().unwrap(); - if *state == State::Preparing { - info!("The unit {} is now running", unit.name); - *state = State::Running; - } - } - - pub fn checked_unit_stop(&self, unit: &UnitHandle) { - let mut state = unit.state.write().unwrap(); - if *state != State::Stopping { - self.mark_not_ready(unit); - *state = State::Stopping; - self.stop_unit_now(unit.clone()); - } - } - - pub fn find_fallback_unit(&self, excluded: &UnitHandle) -> Option { - // TODO: Also check if the unit have free slots - self.units - .read() - .unwrap() - .values() - .filter(|unit| { - !Arc::ptr_eq(unit, excluded) - && unit.allocation.spec.fallback.enabled - && unit.rediness.load(Ordering::Relaxed) - && *unit.state.read().unwrap() == State::Running - }) - .max_by_key(|unit| unit.allocation.spec.fallback.priority) - .cloned() - } - - pub fn get_unit(&self, uuid: Uuid) -> Option { - self.units.read().unwrap().get(&uuid).cloned() - } - - pub fn get_units(&self) -> RwLockReadGuard> { - self.units.read().expect("Failed to lock units") - } - - fn start_unit( - &self, - request: &StartRequestHandle, - allocation: AllocationHandle, - cloudlet: &CloudletHandle, - ) { - let controller = self - .controller - .upgrade() - .expect("Failed to upgrade controller"); - - info!( - "Spinning up unit {} on cloudlet {} listening on port {}", - request.name, - cloudlet.name, - allocation.primary_address().to_string() - ); - let unit = Arc::new_cyclic(|handle| { - // Create a token for the unit - let auth = self - .controller - .upgrade() - .expect("WAIT. We are creating a unit while the controller is dead?") - .get_auth() - .register_unit(handle.clone()); - - Unit { - name: request.name.clone(), - uuid: Uuid::new_v4(), - deployment: request.deployment.clone(), - cloudlet: Arc::downgrade(cloudlet), - allocation, - connected_users: AtomicU32::new(0), - auth, - health: RwLock::new(Health::new( - controller.configuration.timings.startup.unwrap(), - controller.configuration.timings.healthbeat.unwrap(), - )), - state: RwLock::new(State::Starting), - rediness: AtomicBool::new(false), - flags: Flags { - stop: RwLock::new(None), - }, - } - }); - - if let Some(deployment) = &request.deployment { - deployment.set_active(unit.clone(), request); - } - self.units.write().unwrap().insert(unit.uuid, unit.clone()); - - // Print unit information to the console for debugging - debug!("-----------------------------------"); - debug!("New unit added to controller"); - debug!("Name: {}", unit.name); - debug!("UUID: {}", unit.uuid.to_string()); - debug!("Token: {}", unit.auth.token); - debug!("-----------------------------------"); - - // Send start request to cloudlet - // We do this async because the driver chould be running blocking code like network requests - if let Some(controller) = self.controller.upgrade() { - let copy = controller.clone(); - controller - .get_runtime() - .as_ref() - .unwrap() - .spawn_blocking(move || start_thread(copy, unit)); - } - - fn start_thread(controller: ControllerHandle, unit: UnitHandle) { - if let Some(cloudlet) = unit.cloudlet.upgrade() { - if let Err(error) = cloudlet.get_inner().start_unit(&unit) { - error!( - "Failed to start unit {}: {}", - unit.name, error - ); - controller.get_units().stop_unit_now(unit); - } - } - } - } -} - -pub struct Unit { - pub name: String, - pub uuid: Uuid, - pub deployment: Option, - pub cloudlet: WeakCloudletHandle, - pub allocation: AllocationHandle, - - /* Users */ - pub connected_users: AtomicU32, - - /* Auth */ - pub auth: AuthUnitHandle, - - /* Health and State of the unit */ - pub health: RwLock, - pub state: RwLock, - pub flags: Flags, - pub rediness: AtomicBool, -} - -impl Unit { - pub fn get_user_count(&self) -> u32 { - self.connected_users.load(Ordering::Relaxed) - } -} - -pub struct Health { - pub next_checkin: Instant, - pub timeout: Duration, -} - -impl Health { - pub fn new(startup_time: Duration, timeout: Duration) -> Self { - Self { - next_checkin: Instant::now() + startup_time, - timeout, - } - } - pub fn reset(&mut self) { - self.next_checkin = Instant::now() + self.timeout; - } - pub fn is_dead(&self) -> bool { - Instant::now() > self.next_checkin - } -} - -pub struct StartRequest { - pub canceled: AtomicBool, - pub when: Option, - pub name: String, - pub deployment: Option, - pub cloudlets: Vec, - pub resources: Resources, - pub spec: Spec, - pub priority: i32, -} - -pub struct StopRequest { - pub when: Option, - pub unit: UnitHandle, -} - -#[derive(PartialEq, Clone)] -pub enum State { - Starting, - Preparing, - Restarting, - Running, - Stopping, -} - -pub struct Flags { - /* Required for the deployment system */ - pub stop: RwLock>, -} - -#[derive(Clone)] -pub struct DeploymentRef { - pub unit_id: usize, - pub deployment: WeakDeploymentHandle, -} - -impl DeploymentRef { - pub fn remove_unit(&self, unit: &UnitHandle) { - if let Some(deployment) = self.deployment.upgrade() { - deployment.remove_unit(unit); - } - } - - pub fn set_active(&self, unit: UnitHandle, request: &StartRequestHandle) { - if let Some(deployment) = self.deployment.upgrade() { - deployment.set_unit_active(unit, request); - } - } -} - -#[derive(Serialize, Deserialize, Default, Clone)] -pub struct Resources { - pub memory: u32, - pub swap: u32, - pub cpu: u32, - pub io: u32, - pub disk: u32, - pub addresses: u32, -} - -#[derive(Serialize, Deserialize, Clone, Default)] -pub enum Retention { - #[serde(rename = "temporary")] - #[default] - Temporary, - #[serde(rename = "permanent")] - Permanent, -} - -#[derive(Serialize, Deserialize, Clone, Default)] -pub struct FallbackPolicy { - pub enabled: bool, - pub priority: i32, -} - -#[derive(Serialize, Deserialize, Clone)] -pub struct KeyValue { - pub key: String, - pub value: String, -} - -#[derive(Serialize, Deserialize, Clone, Default)] -pub struct Spec { - pub settings: Vec, - pub environment: Vec, - pub disk_retention: Retention, - pub image: String, - - pub max_players: u32, - pub fallback: FallbackPolicy, -} diff --git a/controller/src/application/user.rs b/controller/src/application/user.rs index f68f550d..5949eb2e 100644 --- a/controller/src/application/user.rs +++ b/controller/src/application/user.rs @@ -1,200 +1,19 @@ -use std::{ - collections::HashMap, - ops::Deref, - sync::{atomic::Ordering, Arc, RwLock, Weak}, - time::Instant, -}; +use getset::Getters; +use tokio::time::Instant; -use simplelog::{debug, info, warn}; -use transfer::Transfer; -use uuid::Uuid; - -use super::{ - unit::{UnitHandle, WeakUnitHandle}, - WeakControllerHandle, -}; +use super::server::NameAndUuid; +pub mod manager; pub mod transfer; -pub type UserHandle = Arc; -pub type WeakUserHandle = Weak; - -pub struct Users { - controller: WeakControllerHandle, - - /* Users that joined some started unit */ - users: RwLock>, -} - -impl Users { - pub fn new(controller: WeakControllerHandle) -> Self { - Self { - controller, - users: RwLock::new(HashMap::new()), - } - } - - pub fn tick(&self) { - let controller = self - .controller - .upgrade() - .expect("Failed to upgrade controller"); - - let mut users = self.users.write().unwrap(); - users.retain(|_, user| { - if let CurrentUnit::Transfering(transfer) = user.unit.read().unwrap().deref() { - if Instant::now().duration_since(transfer.timestamp) - > controller.configuration.timings.transfer.unwrap() - { - if let Some(to) = transfer.to.upgrade() { - warn!( - "User {}[{}] failed to transfer to unit {} in time", - user.name, - user.uuid.to_string(), - to.name - ); - } - return false; - } - } - true - }); - } - - pub fn handle_user_connected(&self, unit: UnitHandle, name: String, uuid: Uuid) { - // Update unit that the user is connected to - unit.connected_users.fetch_add(1, Ordering::Relaxed); - - // Update internal user list - let mut users = self.users.write().unwrap(); - if let Some(user) = users.get(&uuid) { - let mut current_unit = user.unit.write().unwrap(); - match current_unit.deref() { - CurrentUnit::Connected(_) => { - *current_unit = CurrentUnit::Connected(Arc::downgrade(&unit)); - warn!( - "User {}[{}] was never flagged as transferring but switched to unit {}", - name, - uuid.to_string(), - unit.name - ); - } - CurrentUnit::Transfering(_) => { - *current_unit = CurrentUnit::Connected(Arc::downgrade(&unit)); - info!( - "User {}[{}] successfully transferred to unit {}", - name, - uuid.to_string(), - unit.name - ); - } - } - } else { - info!( - "User {}[{}] connected to unit {}", - name, - uuid.to_string(), - unit.name - ); - users.insert(uuid, self.create_user(name, uuid, &unit)); - } - } - - pub fn handle_user_disconnected(&self, unit: UnitHandle, uuid: Uuid) { - // Update unit that the user was connected to - unit.connected_users.fetch_sub(1, Ordering::Relaxed); - - // Update internal user list - let mut users = self.users.write().unwrap(); - if let Some(user) = users.get(&uuid).cloned() { - if let CurrentUnit::Connected(weak_unit) = user.unit.read().unwrap().deref() { - if let Some(strong_unit) = weak_unit.upgrade() { - // Verify if the user is connected to the unit that is saying he is disconnecting - if Arc::ptr_eq(&strong_unit, &unit) { - info!( - "User {}[{}] disconnected from unit {}", - user.name, - user.uuid.to_string(), - strong_unit.name, - ); - users.remove(&user.uuid); - } - } - } - } - } - - pub fn cleanup_users(&self, dead_unit: &UnitHandle) -> u32 { - let mut amount = 0; - self.users.write().unwrap().retain(|_, user| { - if let CurrentUnit::Connected(weak_unit) = user.unit.read().unwrap().deref() { - if let Some(unit) = weak_unit.upgrade() { - if Arc::ptr_eq(&unit, dead_unit) { - info!( - "User {}[{}] disconnected from unit {}", - user.name, - user.uuid.to_string(), - unit.name, - ); - amount += 1; - return false; - } - } else { - debug!( - "User {}[{}] is connected to a dead unit removing him", - user.name, - user.uuid.to_string() - ); - amount += 1; - return false; - } - } - true - }); - amount - } - - pub fn get_users(&self) -> Vec { - self.users.read().unwrap().values().cloned().collect() - } - - pub fn _get_users_on_unit(&self, unit: &UnitHandle) -> Vec { - self.users - .read() - .unwrap() - .values() - .filter(|user| { - if let CurrentUnit::Connected(weak_unit) = user.unit.read().unwrap().deref() { - if let Some(strong_unit) = weak_unit.upgrade() { - return Arc::ptr_eq(&strong_unit, unit); - } - } - false - }) - .cloned() - .collect() - } - - pub fn get_user(&self, uuid: Uuid) -> Option { - self.users.read().unwrap().get(&uuid).cloned() - } - - fn create_user(&self, name: String, uuid: Uuid, unit: &UnitHandle) -> UserHandle { - Arc::new(User { - name, - uuid, - unit: RwLock::new(CurrentUnit::Connected(Arc::downgrade(unit))), - }) - } -} - -pub enum CurrentUnit { - Connected(WeakUnitHandle), - Transfering(Transfer), +#[derive(Getters)] +pub struct User { + #[getset(get = "pub")] + id: NameAndUuid, + server: CurrentServer, } -pub struct User { - pub name: String, - pub uuid: Uuid, - pub unit: RwLock, +pub enum CurrentServer { + Connected(NameAndUuid), + Transfering((Instant, NameAndUuid)), } diff --git a/controller/src/application/user/manager.rs b/controller/src/application/user/manager.rs new file mode 100644 index 00000000..06516ecb --- /dev/null +++ b/controller/src/application/user/manager.rs @@ -0,0 +1,151 @@ +use std::collections::HashMap; + +use anyhow::Result; +use simplelog::{info, warn}; +use uuid::Uuid; + +use crate::{ + application::{ + auth::ActionResult, + server::{NameAndUuid, Server}, + }, + config::Config, +}; + +use super::{CurrentServer, User}; + +pub struct UserManager { + users: HashMap, +} + +impl UserManager { + pub fn init() -> Self { + Self { + users: HashMap::new(), + } + } + + pub fn remove_users_on_server(&mut self, server: &Uuid) -> u32 { + let mut amount = 0; + self.users.retain(|_, user| { + if let CurrentServer::Connected(current) = &user.server { + if current.uuid() == server { + info!( + "User {}[{}] disconnected from server {}", + user.id, + user.id.uuid().to_string(), + current.name(), + ); + amount += 1; + return false; + } + } + true + }); + amount + } + + pub fn user_connected(&mut self, server: &mut Server, id: NameAndUuid) { + // Update server user count + server.set_connected_users(server.connected_users() + 1); + + // Update internal user list + if let Some(user) = self.users.get_mut(id.uuid()) { + match &user.server { + CurrentServer::Connected(_) => { + warn!( + "User {}[{}] was never flagged as transferring but switched to server {}", + id, + id.uuid().to_string(), + server.id(), + ); + } + CurrentServer::Transfering(_) => { + info!( + "User {}[{}] successfully transferred to server {}", + id, + id.uuid().to_string(), + server.id(), + ); + } + } + user.server = CurrentServer::Connected(server.id().clone()); + } else { + info!( + "User {}[{}] connected to server {}", + id, + id.uuid().to_string(), + server.id() + ); + self.users.insert( + *id.uuid(), + User { + id, + server: CurrentServer::Connected(server.id().clone()), + }, + ); + } + } + + pub fn user_disconnected(&mut self, server: &mut Server, uuid: &Uuid) -> ActionResult { + // Update server user count + server.set_connected_users(server.connected_users() - 1); + + // Update internal user list + if let Some(user) = self.users.get(uuid) { + if let CurrentServer::Connected(current) = &user.server { + // Verify that the user is connected to the server + if current.uuid() == server.id().uuid() { + info!( + "User {}[{}] disconnected from server {}", + user.id, + user.id.uuid().to_string(), + server.id(), + ); + self.users.remove(uuid); + } else { + return ActionResult::Denied; + } + } + } + ActionResult::Allowed + } + + pub fn get_users(&self) -> Vec<&User> { + self.users.values().collect() + } + + pub fn _get_user(&self, uuid: &Uuid) -> Option<&User> { + self.users.get(uuid) + } + pub fn get_user_mut(&mut self, uuid: &Uuid) -> Option<&mut User> { + self.users.get_mut(uuid) + } +} + +// Ticking +impl UserManager { + #[allow(clippy::unnecessary_wraps)] + pub fn tick(&mut self, config: &Config) -> Result<()> { + self.users.retain(|_, user| { + if let CurrentServer::Transfering((timestamp, to)) = &user.server { + if timestamp.elapsed() >= *config.transfer_timeout() { + warn!( + "User {}[{}] transfer to server {} timed out", + user.id, + user.id.uuid(), + to, + ); + return false; + } + } + true + }); + Ok(()) + } + + #[allow(clippy::unnecessary_wraps, clippy::unused_self)] + pub fn cleanup(&mut self) -> Result<()> { + Ok(()) + } +} diff --git a/controller/src/application/user/transfer.rs b/controller/src/application/user/transfer.rs index 7b36cc27..8c1b0c38 100644 --- a/controller/src/application/user/transfer.rs +++ b/controller/src/application/user/transfer.rs @@ -1,126 +1,130 @@ -use std::time::Instant; -use std::{ops::Deref, sync::Arc}; +use std::sync::Arc; -use simplelog::{error, info, warn}; +use simplelog::info; +use tokio::time::Instant; +use tonic::Status; +use uuid::Uuid; -use crate::application::deployment::DeploymentHandle; use crate::application::{ - event::{transfer::UserTransferRequested, EventKey}, - unit::{UnitHandle, WeakUnitHandle}, + auth::Authorization, + group::manager::GroupManager, + server::{manager::ServerManager, NameAndUuid, Server}, + Shared, }; -use super::{CurrentUnit, UserHandle, Users, WeakUserHandle}; +use super::{CurrentServer, User}; -impl Users { - pub fn resolve_transfer(&self, user: &UserHandle, target: &TransferTarget) -> Option { - let from = { - let unit = user.unit.read().unwrap(); - if let CurrentUnit::Connected(from) = unit.deref() { - from.clone() +impl<'a> Transfer<'a> { + pub fn resolve( + auth: &Authorization, + user: &'a mut User, + target: &TransferTarget, + servers: &'a ServerManager, + groups: &GroupManager, + ) -> Result, ResolveError> { + // Check if auth is allowed to transfer user + if let Some(server) = auth.get_server() { + if let CurrentServer::Connected(current) = &user.server { + if current.uuid() != server.uuid() { + return Err(ResolveError::AccessDenied); + } } else { - return None; + return Err(ResolveError::AccessDenied); } + } + + let CurrentServer::Connected(from) = &user.server else { + return Err(ResolveError::UserNotFound); }; - match target { - TransferTarget::Unit(to) => { - return Some(Transfer::new( - Arc::downgrade(user), - from.clone(), - Arc::downgrade(to), - )); - } - TransferTarget::Deployment(deployment) => { - if let Some(to) = deployment.get_free_unit() { - return Some(Transfer::new( - Arc::downgrade(user), - from.clone(), - Arc::downgrade(&to), - )); - } else { - warn!("Failed to find free unit in deployment {} while resolving transfer of user {}", deployment.name, user.name); - } + let to = match target { + TransferTarget::Server(to) => { + servers.get_server(to).ok_or(ResolveError::ServerNotFound)? } - TransferTarget::Fallback => { - let controller = self - .controller - .upgrade() - .expect("Failed to upgrade controller. This should never happen"); - if let Some(fallback) = controller - .get_units() - .find_fallback_unit( - &from - .upgrade() - .expect("Failed to upgrade unit. This should never happen"), - ) - .map(TransferTarget::Unit) - { - return self.resolve_transfer(user, &fallback); - } else { - warn!("Failed to find fallback unit while resolving transfer of user {}", user.name); - } + TransferTarget::Group(group) => { + let group = groups.get_group(group).ok_or(ResolveError::GroupNotFound)?; + group + .find_free_server(servers) + .ok_or(ResolveError::NotServerAvailable)? } - } + TransferTarget::Fallback => servers + .find_fallback_server(from.uuid()) + .ok_or(ResolveError::NotServerAvailable)?, + }; - None + Ok(Transfer::new(user, from.clone(), to, Instant::now())) } - pub fn transfer_user(&self, transfer: Transfer) -> bool { - if let Some((user, from, to)) = transfer.get_strong() { - info!( - "Transfering user {} from {} to unit {}", - user.name, from.name, to.name - ); + pub async fn transfer_user( + transfer: &mut Transfer<'a>, + shared: &Arc, + ) -> Result<(), Status> { + info!( + "Transfering user {} from {} to server {}", + transfer.user.id, + transfer.from, + transfer.to.id() + ); + if let Some(data) = transfer.to.new_transfer(transfer.user.id.uuid()) { + shared + .subscribers + .publish_transfer(transfer.from.uuid(), data) + .await; - let controller = self - .controller - .upgrade() - .expect("Failed to upgrade controller. This should never happen"); - controller.get_event_bus().dispatch( - &EventKey::Transfer(from.uuid), - &UserTransferRequested { - transfer: transfer.clone(), - }, - ); - - *user.unit.write().unwrap() = CurrentUnit::Transfering(transfer); - return true; + transfer.user.server = + CurrentServer::Transfering((transfer.timestamp, transfer.to.id().clone())); + Ok(()) } else { - error!("Failed to transfer user because some required information is missing",); + Err(Status::unavailable( + "Target server seems to have no network address", + )) } - - false } } +pub enum ResolveError { + UserNotFound, + ServerNotFound, + NotServerAvailable, + GroupNotFound, + + AccessDenied, +} + pub enum TransferTarget { - Unit(UnitHandle), - Deployment(DeploymentHandle), + Server(Uuid), + Group(String), Fallback, } -#[derive(Clone, Debug)] -pub struct Transfer { - pub timestamp: Instant, - pub user: WeakUserHandle, - pub from: WeakUnitHandle, - pub to: WeakUnitHandle, +pub struct Transfer<'a> { + user: &'a mut User, + from: NameAndUuid, + to: &'a Server, + timestamp: Instant, } -impl Transfer { - pub fn new(user: WeakUserHandle, from: WeakUnitHandle, to: WeakUnitHandle) -> Self { +impl<'a> Transfer<'a> { + fn new(user: &'a mut User, from: NameAndUuid, to: &'a Server, timestamp: Instant) -> Self { Self { - timestamp: Instant::now(), user, from, to, + timestamp, } } +} - pub fn get_strong(&self) -> Option<(UserHandle, UnitHandle, UnitHandle)> { - let user = self.user.upgrade()?; - let from = self.from.upgrade()?; - let to = self.to.upgrade()?; - Some((user, from, to)) +impl From for Status { + fn from(val: ResolveError) -> Self { + match val { + ResolveError::UserNotFound => Status::not_found("User not found"), + ResolveError::ServerNotFound => Status::not_found("Server not found"), + ResolveError::NotServerAvailable => Status::unavailable("Server not available"), + ResolveError::GroupNotFound => Status::not_found("Group not found"), + ResolveError::AccessDenied => { + Status::permission_denied("Missing permissions to transfer user") + } + } } } diff --git a/controller/src/args.rs b/controller/src/args.rs deleted file mode 100644 index b141bad2..00000000 --- a/controller/src/args.rs +++ /dev/null @@ -1,7 +0,0 @@ -use clap::{ArgAction, Parser}; - -#[derive(Parser)] -pub struct Args { - #[clap(short, long, help = "Enable debug mode", action = ArgAction::SetTrue)] - pub debug: bool, -} diff --git a/controller/src/config.rs b/controller/src/config.rs index 5153845a..52e7445d 100644 --- a/controller/src/config.rs +++ b/controller/src/config.rs @@ -1,122 +1,76 @@ -use std::net::SocketAddr; -use std::time::Duration; +use std::{net::SocketAddr, time::Duration}; -use common::config::{LoadFromTomlFile, SaveToTomlFile}; -use serde::{Deserialize, Serialize}; -use simplelog::{error, warn}; -use uuid::Uuid; +use anyhow::Result; +use serde::Deserialize; +use tokio::fs; -use crate::storage::Storage; +use crate::storage::{LoadFromTomlFile, Storage}; -const DEFAULT_EXPECTED_STARTUP_TIME: Duration = Duration::from_secs(130); -const DEFAULT_EXPECTED_RESTART_TIME: Duration = Duration::from_secs(120); -const DEFAULT_HEALTH_CHECK_TIMEOUT: Duration = Duration::from_secs(15); -const DEFAULT_TRANSFER_TIMEOUT: Duration = Duration::from_secs(10); -const DEFAULT_EMPTY_UNIT_TIMEOUT: Duration = Duration::from_secs(120); +const DEFAULT_CONFIG: &str = + include_str!(concat!(env!("CARGO_MANIFEST_DIR"), "/configs/config.toml")); -const DEFAULT_BIND_ADDRESS: &str = "0.0.0.0"; -const DEFAULT_BIND_PORT: u16 = 12892; - -#[derive(Deserialize, Serialize, Default)] -pub struct NetworkConfig { - pub bind: Option, +#[derive(Deserialize)] +struct Network { + bind: SocketAddr, } -#[derive(Deserialize, Serialize, Default)] -pub struct Timings { - pub startup: Option, - pub restart: Option, - pub healthbeat: Option, - pub transfer: Option, - pub empty_unit: Option, +#[derive(Deserialize)] +struct Timeouts { + startup: Duration, + restart: Duration, + heartbeat: Duration, + transfer: Duration, + empty_server: Duration, } -#[derive(Deserialize, Serialize, Default)] +#[derive(Deserialize)] pub struct Config { - /* Cloud Identification */ - pub identifier: Option, - - /* Network */ - pub network: NetworkConfig, - - /* Timings */ - pub timings: Timings, + identifier: String, + network: Network, + timeouts: Timeouts, } impl Config { - fn load_or_empty() -> Self { - let path = Storage::get_primary_config_file(); - if !path.exists() { - return Self::default(); + pub async fn parse() -> Result { + let path = Storage::primary_config_file(); + if path.exists() { + Self::from_file(&path).await + } else { + if let Some(parent) = path.parent() { + fs::create_dir_all(parent).await?; + } + fs::write(&path, DEFAULT_CONFIG).await?; + Self::from_file(&path).await } - Self::load_from_file(&path).unwrap_or_else(|error| { - warn!( - "Failed to read configuration from file: {}", - error - ); - Self::default() - }) } - pub fn new_filled() -> Self { - let mut config = Self::load_or_empty(); + pub fn identifier(&self) -> &str { + &self.identifier + } - let mut save = false; - if config.identifier.is_none() { - config.identifier = Some(Uuid::new_v4().to_string()); - save = true; - } - if config.network.bind.is_none() { - config.network.bind = Some(SocketAddr::new( - DEFAULT_BIND_ADDRESS.parse().unwrap(), - DEFAULT_BIND_PORT, - )); - save = true; - } - if config.timings.startup.is_none() { - config.timings.startup = Some(DEFAULT_EXPECTED_STARTUP_TIME); - save = true; - } - if config.timings.restart.is_none() { - config.timings.restart = Some(DEFAULT_EXPECTED_RESTART_TIME); - save = true; - } - if config.timings.healthbeat.is_none() { - config.timings.healthbeat = Some(DEFAULT_HEALTH_CHECK_TIMEOUT); - save = true; - } - if config.timings.transfer.is_none() { - config.timings.transfer = Some(DEFAULT_TRANSFER_TIMEOUT); - save = true; - } - if config.timings.empty_unit.is_none() { - config.timings.empty_unit = Some(DEFAULT_EMPTY_UNIT_TIMEOUT); - save = true; - } - if save { - if let Err(error) = config.save_to_file(&Storage::get_primary_config_file(), true) { - error!( - "Failed to save generated configuration to file: {}", - &error - ); - } - } + pub fn network_bind(&self) -> &SocketAddr { + &self.network.bind + } - // Check config values are overridden by environment variables - if let Ok(identifier) = std::env::var("INSTANCE_IDENTIFIER") { - config.identifier = Some(identifier); - } - if let Ok(address) = std::env::var("BIND_ADDRESS") { - if let Ok(address) = address.parse() { - config.network.bind.replace(address); - } else { - error!("Failed to parse BIND_ADDRESS environment variable"); - } - } + pub fn startup_timeout(&self) -> &Duration { + &self.timeouts.startup + } + + pub fn restart_timeout(&self) -> &Duration { + &self.timeouts.restart + } + + pub fn heartbeat_timeout(&self) -> &Duration { + &self.timeouts.heartbeat + } + + pub fn transfer_timeout(&self) -> &Duration { + &self.timeouts.transfer + } - config + pub fn empty_server_timeout(&self) -> &Duration { + &self.timeouts.empty_server } } -impl SaveToTomlFile for Config {} impl LoadFromTomlFile for Config {} diff --git a/controller/src/main.rs b/controller/src/main.rs index e6963dca..a2f6553f 100644 --- a/controller/src/main.rs +++ b/controller/src/main.rs @@ -1,38 +1,53 @@ -#![feature(buf_read_has_data_left)] - -use std::time::Instant; - -use args::Args; -use clap::Parser; -use common::init::CloudInit; +#![feature(extract_if, hash_extract_if, binary_heap_drain_sorted)] +#![warn(clippy::all, clippy::pedantic)] +#![allow(clippy::ref_option)] + +use anyhow::Result; +use application::Controller; +use clap::{ArgAction, Parser}; +use common::{error::FancyError, init::CloudInit}; +use config::Config; use simplelog::info; use storage::Storage; - -use crate::application::Controller; -use crate::config::Config; +use tokio::time::Instant; mod application; -mod args; mod config; mod network; +mod resource; mod storage; +mod task; // Include the build information generated by build.rs include!(concat!(env!("OUT_DIR"), "/build_info.rs")); pub const AUTHORS: [&str; 1] = ["HttpRafa"]; -fn main() { - let args = Args::parse(); - CloudInit::init_logging(args.debug, false, Storage::get_latest_log_file()); - CloudInit::print_ascii_art("Atomic Cloud", &VERSION, &AUTHORS); +#[tokio::main] +async fn main() { + async fn run() -> Result<()> { + let arguments = Arguments::parse(); + CloudInit::init_logging(arguments.debug, false, Storage::latest_log_file()); + CloudInit::print_ascii_art("Atomic Cloud", &VERSION, &AUTHORS); + + let beginning = Instant::now(); + info!("Starting cloud version v{}...", VERSION); + info!("Initializing controller..."); - let start_time = Instant::now(); - info!("Starting cloud version v{}...", VERSION); - info!("Loading configuration..."); + let mut controller = Controller::init(Config::parse().await?).await?; + info!("Loaded cloud in {:.2?}", beginning.elapsed()); + controller.run().await?; + + Ok(()) + } + + if let Err(error) = run().await { + FancyError::print_fancy(&error, true); + } +} - let configuration = Config::new_filled(); - let controller = Controller::new(configuration); - info!("Loaded cloud in {:.2?}", start_time.elapsed()); - controller.start(); +#[derive(Parser)] +struct Arguments { + #[clap(short, long, help = "Enable debug mode", action = ArgAction::SetTrue)] + debug: bool, } diff --git a/controller/src/network.rs b/controller/src/network.rs index f44e187a..606a9bcf 100644 --- a/controller/src/network.rs +++ b/controller/src/network.rs @@ -1,97 +1,88 @@ +use std::{net::SocketAddr, sync::Arc}; + use anyhow::Result; -use auth::{AdminInterceptor, UnitInterceptor}; -use simplelog::{error, info}; -use std::sync::Arc; +use auth::AuthInterceptor; +use client::ClientServiceImpl; +use common::error::FancyError; +use manage::ManageServiceImpl; +use proto::{ + client::client_service_server::ClientServiceServer, + manage::manage_service_server::ManageServiceServer, +}; +use simplelog::info; use tokio::{ - sync::watch::{self, Receiver, Sender}, + spawn, + sync::watch::{channel, Receiver, Sender}, task::JoinHandle, }; use tonic::transport::Server; -use crate::application::{Controller, WeakControllerHandle}; -use admin::{proto::admin_service_server::AdminServiceServer, AdminServiceImpl}; -use unit::{proto::unit_service_server::UnitServiceServer, UnitServiceImpl}; +use crate::{ + application::{Shared, TaskSender}, + config::Config, +}; -mod admin; mod auth; -mod stream; -pub mod unit; +pub mod client; +pub mod manage; +mod proto; pub struct NetworkStack { shutdown: Sender, handle: JoinHandle<()>, - controller: WeakControllerHandle, } impl NetworkStack { - pub fn start(controller: Arc) -> Self { - info!("Starting networking stack..."); - - let (sender, receiver) = watch::channel(false); - - return NetworkStack { - shutdown: sender, - handle: controller - .get_runtime() - .as_ref() - .unwrap() - .spawn(launch_server(controller.clone(), receiver)), - controller: Arc::downgrade(&controller), - }; - - async fn launch_server(controller: Arc, shutdown: Receiver) { - if let Err(error) = run(controller, shutdown).await { - error!("Failed to start gRPC server: {}", error); - } - } - - async fn run(controller: Arc, mut shutdown: Receiver) -> Result<()> { - let address = controller - .configuration - .network - .bind - .expect("No bind address found in the config"); - - let admin_service = AdminServiceImpl { - controller: Arc::clone(&controller), - }; - let unit_service = UnitServiceImpl { - controller: Arc::clone(&controller), - }; - - info!("Controller listening on {}", address); + pub fn start(config: &Config, shared: &Arc, queue: &TaskSender) -> Self { + async fn run( + bind: SocketAddr, + shared: Arc, + queue: TaskSender, + mut shutdown: Receiver, + ) -> Result<()> { + let auth_interceptor = AuthInterceptor(shared.clone()); + info!("Controller listening on {}", bind); Server::builder() - .add_service(AdminServiceServer::with_interceptor( - admin_service, - AdminInterceptor { - controller: Arc::clone(&controller), - }, + .add_service(ManageServiceServer::with_interceptor( + ManageServiceImpl(queue.clone(), shared.clone()), + auth_interceptor.clone(), )) - .add_service(UnitServiceServer::with_interceptor( - unit_service, - UnitInterceptor { controller }, + .add_service(ClientServiceServer::with_interceptor( + ClientServiceImpl(queue, shared), + auth_interceptor, )) - .serve_with_shutdown(address, async { + .serve_with_shutdown(bind, async { shutdown.changed().await.ok(); }) .await?; Ok(()) } - } - pub fn shutdown(self) { - self.shutdown - .send(true) - .expect("Failed to send shutdown signal"); - if let Some(controller) = self.controller.upgrade() { - controller - .get_runtime() - .as_ref() - .unwrap() - .block_on(self.handle) - .expect("Failed to shutdown network stack"); + info!("Starting network stack..."); + + let (sender, receiver) = channel(false); + let bind = *config.network_bind(); + let shared = shared.clone(); + let queue = queue.clone(); + + let task = spawn(async move { + if let Err(error) = run(bind, shared, queue, receiver).await { + FancyError::print_fancy(&error, false); + } + }); + + Self { + shutdown: sender, + handle: task, } } + + pub async fn shutdown(self) -> Result<()> { + info!("Stopping network stack..."); + let _ = self.shutdown.send(true); // Ignore error if receiver is dropped + self.handle.await?; + Ok(()) + } } diff --git a/controller/src/network/admin.rs b/controller/src/network/admin.rs deleted file mode 100644 index 60fbb92b..00000000 --- a/controller/src/network/admin.rs +++ /dev/null @@ -1,624 +0,0 @@ -use std::{str::FromStr, sync::atomic::Ordering}; - -use proto::{admin_service_server::AdminService, user_management::UserValue}; -use tonic::{async_trait, Request, Response, Status}; -use uuid::Uuid; - -use crate::{ - application::{ - cloudlet::{Capabilities, LifecycleStatus, RemoteController}, - deployment::{ScalingPolicy, StartConstraints}, - unit::{FallbackPolicy, KeyValue, Resources, Retention, Spec}, - user::transfer::TransferTarget, - ControllerHandle, CreationResult, - }, - VERSION, -}; - -#[allow(clippy::all)] -pub mod proto { - use tonic::include_proto; - - include_proto!("admin"); -} - -pub struct AdminServiceImpl { - pub controller: ControllerHandle, -} - -#[async_trait] -impl AdminService for AdminServiceImpl { - async fn request_stop(&self, _request: Request<()>) -> Result, Status> { - self.controller.request_stop(); - Ok(Response::new(())) - } - - async fn set_resource_status( - &self, - request: Request, - ) -> Result, Status> { - let resource = request.into_inner(); - let status = match proto::resource_management::ResourceStatus::try_from(resource.status) { - Ok(proto::resource_management::ResourceStatus::Active) => LifecycleStatus::Active, - Ok(proto::resource_management::ResourceStatus::Inactive) => LifecycleStatus::Inactive, - _ => return Err(Status::invalid_argument("Invalid resource status")), - }; - match proto::resource_management::ResourceCategory::try_from(resource.category) { - Ok(proto::resource_management::ResourceCategory::Cloudlet) => { - let mut handle = self.controller.lock_cloudlets_mut(); - let cloudlet = handle - .find_by_name(&resource.id) - .ok_or(Status::not_found("Cloudlet not found"))?; - match handle.set_cloudlet_status(&cloudlet, status) { - Ok(()) => Ok(Response::new(())), - Err(error) => Err(Status::internal(error.to_string())), - } - } - Ok(proto::resource_management::ResourceCategory::Deployment) => { - let mut handle = self.controller.lock_deployments_mut(); - let deployment: std::sync::Arc = handle - .find_by_name(&resource.id) - .ok_or(Status::not_found("Deployment not found"))?; - match handle.set_deployment_status(&deployment, status) { - Ok(()) => Ok(Response::new(())), - Err(error) => Err(Status::internal(error.to_string())), - } - } - Err(_) => Err(Status::not_found("Invalid resource category")), - _ => Err(Status::not_found( - "This action is not possible with this resource category", - )), - } - } - - async fn delete_resource( - &self, - request: Request, - ) -> Result, Status> { - let resource = request.into_inner(); - match proto::resource_management::ResourceCategory::try_from(resource.category) { - Ok(proto::resource_management::ResourceCategory::Cloudlet) => { - let mut handle = self.controller.lock_cloudlets_mut(); - let cloudlet = handle - .find_by_name(&resource.id) - .ok_or(Status::not_found("Cloudlet not found"))?; - match handle.delete_cloudlet(&cloudlet) { - Ok(()) => Ok(Response::new(())), - Err(error) => Err(Status::internal(error.to_string())), - } - } - Ok(proto::resource_management::ResourceCategory::Deployment) => { - let mut handle = self.controller.lock_deployments_mut(); - let deployment: std::sync::Arc = handle - .find_by_name(&resource.id) - .ok_or(Status::not_found("Deployment not found"))?; - match handle.delete_deployment(&deployment) { - Ok(()) => Ok(Response::new(())), - Err(error) => Err(Status::internal(error.to_string())), - } - } - Ok(proto::resource_management::ResourceCategory::Unit) => { - let uuid = Uuid::from_str(&resource.id).map_err(|error| { - Status::invalid_argument(format!("Failed to parse UUID of the unit: {}", error)) - })?; - let units = self.controller.get_units(); - let unit = units - .get_unit(uuid) - .ok_or(Status::not_found("Unit not found"))?; - units.checked_unit_stop(&unit); - Ok(Response::new(())) - } - Err(_) => Err(Status::not_found("Invalid resource category")), - } - } - - async fn get_drivers( - &self, - _request: Request<()>, - ) -> Result, Status> { - let drivers = self - .controller - .get_drivers() - .get_drivers() - .iter() - .map(|driver| driver.name().clone()) - .collect(); - - Ok(Response::new( - proto::driver_management::DriverListResponse { drivers }, - )) - } - - async fn create_cloudlet( - &self, - request: Request, - ) -> Result, Status> { - let cloudlet = request.into_inner(); - let name = &cloudlet.name; - let driver = &cloudlet.driver; - - let capabilities = Capabilities { - memory: cloudlet.memory, - max_allocations: cloudlet.max_allocations, - child: cloudlet.child, - }; - - let controller = RemoteController { - address: cloudlet.controller_address.parse().map_err(|_| { - Status::invalid_argument("The controller address is not a valid URL") - })?, - }; - - let driver = match self.controller.drivers.find_by_name(driver) { - Some(driver) => driver, - None => return Err(Status::invalid_argument("The driver does not exist")), - }; - - let mut cloudlets = self.controller.lock_cloudlets_mut(); - match cloudlets.create_cloudlet(name, driver, capabilities, controller) { - Ok(result) => match result { - CreationResult::Created => Ok(Response::new(())), - CreationResult::AlreadyExists => { - Err(Status::already_exists("Cloudlet already exists")) - } - CreationResult::Denied(error) => { - Err(Status::failed_precondition(error.to_string())) - } - }, - Err(error) => Err(Status::internal(error.to_string())), - } - } - - async fn get_cloudlet( - &self, - request: Request, - ) -> Result, Status> { - let handle = self.controller.lock_cloudlets(); - let cloudlet = handle - .find_by_name(&request.into_inner()) - .ok_or(Status::not_found("Cloudlet not found"))?; - - Ok(Response::new(proto::cloudlet_management::CloudletValue { - name: cloudlet.name.to_owned(), - driver: cloudlet.driver.name().to_owned(), - memory: cloudlet.capabilities.memory, - max_allocations: cloudlet.capabilities.max_allocations, - child: cloudlet.capabilities.child.clone(), - controller_address: cloudlet.controller.address.to_string(), - })) - } - - async fn get_cloudlets( - &self, - _request: Request<()>, - ) -> Result, Status> { - let handle = self.controller.lock_cloudlets(); - let mut cloudlets = Vec::with_capacity(handle.get_amount()); - for cloudlet in handle.get_cloudlets() { - cloudlets.push(cloudlet.name.clone()); - } - - Ok(Response::new( - proto::cloudlet_management::CloudletListResponse { cloudlets }, - )) - } - - async fn create_deployment( - &self, - request: Request, - ) -> Result, Status> { - let deployment = request.into_inner(); - let name = &deployment.name; - - /* Constraints */ - let constraints = match &deployment.constraints { - Some(constraints) => StartConstraints { - minimum: constraints.minimum, - maximum: constraints.maximum, - priority: constraints.priority, - }, - None => StartConstraints::default(), - }; - - /* Scaling */ - let scaling = match &deployment.scaling { - Some(scaling) => ScalingPolicy { - enabled: true, - start_threshold: scaling.start_threshold, - stop_empty_units: scaling.stop_empty_units, - }, - None => ScalingPolicy::default(), - }; - - /* Resources */ - let resources = match &deployment.resources { - Some(resources) => Resources { - memory: resources.memory, - swap: resources.swap, - cpu: resources.cpu, - io: resources.io, - disk: resources.disk, - addresses: resources.addresses, - }, - None => Resources::default(), - }; - - /* Spec */ - let mut spec = Spec::default(); - if let Some(value) = deployment.spec { - spec.image.clone_from(&value.image); - spec.max_players = value.max_players; - spec.settings = value - .settings - .iter() - .map(|setting| KeyValue { - key: setting.key.clone(), - value: setting.value.clone(), - }) - .collect(); - spec.environment = value - .environment - .iter() - .map(|setting| KeyValue { - key: setting.key.clone(), - value: setting.value.clone(), - }) - .collect(); - if let Some(value) = value.disk_retention { - spec.disk_retention = - match proto::unit_management::unit_spec::Retention::try_from(value) { - Ok(proto::unit_management::unit_spec::Retention::Permanent) => { - Retention::Permanent - } - _ => Retention::Temporary, - }; - } - if let Some(value) = value.fallback { - spec.fallback = FallbackPolicy { - enabled: value.enabled, - priority: value.priority, - }; - } - } - - /* Cloudlets */ - let mut cloudlet_handles = Vec::with_capacity(deployment.cloudlets.len()); - for cloudlet in &deployment.cloudlets { - let cloudlet = match self.controller.lock_cloudlets().find_by_name(cloudlet) { - Some(cloudlet) => cloudlet, - None => { - return Err(Status::invalid_argument(format!( - "Cloudlet {} does not exist", - cloudlet - ))) - } - }; - cloudlet_handles.push(cloudlet); - } - - let mut deployments = self.controller.lock_deployments_mut(); - match deployments.create_deployment( - name, - cloudlet_handles, - constraints, - scaling, - resources, - spec, - ) { - Ok(result) => match result { - CreationResult::Created => Ok(Response::new(())), - CreationResult::AlreadyExists => { - Err(Status::already_exists("Deployment already exists")) - } - CreationResult::Denied(error) => { - Err(Status::failed_precondition(error.to_string())) - } - }, - Err(error) => Err(Status::internal(error.to_string())), - } - } - - async fn get_deployment( - &self, - request: Request, - ) -> Result, Status> { - let handle = self.controller.lock_deployments(); - let deployment = handle - .find_by_name(&request.into_inner()) - .ok_or(Status::not_found("Deployment not found"))?; - let cloudlets = deployment - .cloudlets - .read() - .unwrap() - .iter() - .filter_map(|cloudlet| cloudlet.upgrade().map(|cloudlet| cloudlet.name.clone())) - .collect(); - - Ok(Response::new( - proto::deployment_management::DeploymentValue { - name: deployment.name.to_owned(), - cloudlets, - constraints: Some( - proto::deployment_management::deployment_value::Constraints { - minimum: deployment.constraints.minimum, - maximum: deployment.constraints.maximum, - priority: deployment.constraints.priority, - }, - ), - scaling: Some(proto::deployment_management::deployment_value::Scaling { - start_threshold: deployment.scaling.start_threshold, - stop_empty_units: deployment.scaling.stop_empty_units, - }), - resources: Some(proto::unit_management::UnitResources { - memory: deployment.resources.memory, - swap: deployment.resources.swap, - cpu: deployment.resources.cpu, - io: deployment.resources.io, - disk: deployment.resources.disk, - addresses: deployment.resources.addresses, - }), - spec: Some(proto::unit_management::UnitSpec { - image: deployment.spec.image.clone(), - max_players: deployment.spec.max_players, - settings: deployment - .spec - .settings - .iter() - .map(|setting| proto::common::KeyValue { - key: setting.key.clone(), - value: setting.value.clone(), - }) - .collect(), - environment: deployment - .spec - .environment - .iter() - .map(|setting| proto::common::KeyValue { - key: setting.key.clone(), - value: setting.value.clone(), - }) - .collect(), - disk_retention: Some(deployment.spec.disk_retention.clone() as i32), - fallback: Some(proto::unit_management::unit_spec::Fallback { - enabled: deployment.spec.fallback.enabled, - priority: deployment.spec.fallback.priority, - }), - }), - }, - )) - } - - async fn get_deployments( - &self, - _request: Request<()>, - ) -> Result, Status> { - let handle = self.controller.lock_deployments(); - let mut deployments = Vec::with_capacity(handle.get_amount()); - for name in handle.get_deployments().keys() { - deployments.push(name.clone()); - } - - Ok(Response::new( - proto::deployment_management::DeploymentListResponse { deployments }, - )) - } - - async fn get_unit( - &self, - request: Request, - ) -> Result, Status> { - let unit_uuid = Uuid::from_str(&request.into_inner()) - .map_err(|e| Status::invalid_argument(format!("Invalid unit UUID: {}", e)))?; - - let unit = self - .controller - .get_units() - .get_unit(unit_uuid) - .ok_or_else(|| Status::not_found("Unit not found"))?; - - let cloudlet = unit - .cloudlet - .upgrade() - .ok_or_else(|| Status::internal("Cloudlet is no longer usable"))?; - - let state = (unit - .state - .read() - .map_err(|_| Status::internal("Failed to lock unit state"))?) - .clone() as i32; - - Ok(Response::new(proto::unit_management::UnitValue { - name: unit.name.clone(), - uuid: unit.uuid.to_string(), - deployment: unit - .deployment - .as_ref() - .and_then(|g| g.deployment.upgrade().map(|grp| grp.name.clone())), - cloudlet: cloudlet.name.clone(), - connected_users: unit.connected_users.load(Ordering::Relaxed), - rediness: unit.rediness.load(Ordering::Relaxed), - auth_token: unit.auth.token.clone(), - allocation: Some(proto::unit_management::UnitAllocation { - addresses: unit - .allocation - .addresses - .iter() - .map(|addr| proto::common::Address { - host: addr.host.clone(), - port: addr.port as u32, - }) - .collect(), - resources: Some(proto::unit_management::UnitResources { - memory: unit.allocation.resources.memory, - swap: unit.allocation.resources.swap, - cpu: unit.allocation.resources.cpu, - io: unit.allocation.resources.io, - disk: unit.allocation.resources.disk, - addresses: unit.allocation.resources.addresses, - }), - spec: Some(proto::unit_management::UnitSpec { - image: unit.allocation.spec.image.clone(), - max_players: unit.allocation.spec.max_players, - settings: unit - .allocation - .spec - .settings - .iter() - .map(|kv| proto::common::KeyValue { - key: kv.key.clone(), - value: kv.value.clone(), - }) - .collect(), - environment: unit - .allocation - .spec - .environment - .iter() - .map(|kv| proto::common::KeyValue { - key: kv.key.clone(), - value: kv.value.clone(), - }) - .collect(), - disk_retention: Some(unit.allocation.spec.disk_retention.clone() as i32), - fallback: Some(proto::unit_management::unit_spec::Fallback { - enabled: unit.allocation.spec.fallback.enabled, - priority: unit.allocation.spec.fallback.priority, - }), - }), - }), - state, - })) - } - - async fn get_units( - &self, - _request: Request<()>, - ) -> Result, Status> { - let units = self - .controller - .get_units() - .get_units() - .values() - .filter_map(|unit| { - unit.cloudlet - .upgrade() - .map(|cloudlet| proto::unit_management::SimpleUnitValue { - name: unit.name.to_string(), - uuid: unit.uuid.to_string(), - deployment: unit - .deployment - .as_ref() - .and_then(|d| d.deployment.upgrade().map(|d| d.name.to_string())), - cloudlet: cloudlet.name.to_string(), - }) - }) - .collect(); - - Ok(Response::new(proto::unit_management::UnitListResponse { - units, - })) - } - - async fn get_users( - &self, - _request: Request<()>, - ) -> Result, Status> { - let users = self - .controller - .get_users() - .get_users() - .iter() - .map(|user| UserValue { - name: user.name.to_string(), - uuid: user.uuid.to_string(), - }) - .collect(); - - Ok(Response::new(proto::user_management::UserListResponse { - users, - })) - } - - async fn transfer_users( - &self, - request: Request, - ) -> Result, Status> { - let transfer = request.into_inner(); - let target = transfer - .target - .ok_or_else(|| Status::invalid_argument("Target must be provided"))?; - - let target = - match proto::transfer_management::transfer_target_value::TargetType::try_from( - target.target_type, - ) { - Ok(proto::transfer_management::transfer_target_value::TargetType::Deployment) => { - TransferTarget::Deployment( - self.controller - .lock_deployments() - .find_by_name(&target.target.ok_or_else(|| { - Status::invalid_argument("Target must be provided") - })?) - .ok_or_else(|| Status::not_found("Deployment does not exist"))?, - ) - } - Ok(proto::transfer_management::transfer_target_value::TargetType::Unit) => { - TransferTarget::Unit( - self.controller - .get_units() - .get_unit( - Uuid::from_str(&target.target.ok_or_else(|| { - Status::invalid_argument("Target must be provided") - })?) - .map_err(|error| { - Status::invalid_argument(format!( - "Failed to parse target UUID: {}", - error - )) - })?, - ) - .ok_or_else(|| Status::not_found("Unit does not exist"))?, - ) - } - Ok(proto::transfer_management::transfer_target_value::TargetType::Fallback) => { - TransferTarget::Fallback - } - Err(error) => return Err(Status::invalid_argument(error.to_string())), - }; - - let mut count = 0; - for user_uuid in &transfer.user_uuids { - let user_uuid = Uuid::from_str(user_uuid).map_err(|error| { - Status::invalid_argument(format!("Failed to parse user UUID: {}", error)) - })?; - - let user = self - .controller - .get_users() - .get_user(user_uuid) - .ok_or_else(|| Status::not_found("User is not connected to this controller"))?; - - let transfer = self - .controller - .get_users() - .resolve_transfer(&user, &target) - .ok_or_else(|| Status::not_found("Failed to resolve transfer"))?; - - if self.controller.get_users().transfer_user(transfer) { - count += 1; - } - } - Ok(Response::new(count)) - } - - async fn get_protocol_version(&self, _request: Request<()>) -> Result, Status> { - Ok(Response::new(VERSION.protocol)) - } - - async fn get_controller_version( - &self, - _request: Request<()>, - ) -> Result, Status> { - Ok(Response::new(VERSION.to_string())) - } -} diff --git a/controller/src/network/auth.rs b/controller/src/network/auth.rs index 664f035c..45d3b64e 100644 --- a/controller/src/network/auth.rs +++ b/controller/src/network/auth.rs @@ -1,53 +1,28 @@ +use futures::executor::block_on; use std::sync::Arc; - use tonic::{service::Interceptor, Request, Status}; -use crate::application::Controller; +use crate::application::Shared; #[derive(Clone)] -pub struct AdminInterceptor { - pub controller: Arc, -} - -impl Interceptor for AdminInterceptor { - fn call(&mut self, mut request: Request<()>) -> Result, Status> { - let metadata = request.metadata(); - let token = metadata.get("authorization").and_then(|t| t.to_str().ok()); - match token { - Some(token) => { - let user = self.controller.get_auth().get_user(token); - if let Some(user) = user { - request.extensions_mut().insert(user); - Ok(request) - } else { - Err(Status::unauthenticated("Invalid user token")) - } - } - None => Err(Status::unauthenticated("No user token provided")), - } - } -} - -#[derive(Clone)] -pub struct UnitInterceptor { - pub controller: Arc, -} +pub struct AuthInterceptor(pub Arc); -impl Interceptor for UnitInterceptor { +impl Interceptor for AuthInterceptor { fn call(&mut self, mut request: Request<()>) -> Result, Status> { let metadata = request.metadata(); let token = metadata.get("authorization").and_then(|t| t.to_str().ok()); - match token { - Some(token) => { - let unit = self.controller.get_auth().get_unit(token); - if let Some(unit) = unit { - request.extensions_mut().insert(unit); + if let Some(token) = token { + match block_on(self.0.auth.has_access(token)) { + Some(auth) => { + request.extensions_mut().insert(auth); Ok(request) - } else { - Err(Status::unauthenticated("Invalid unit token")) } + _ => Err(Status::unauthenticated( + "Invalid authorization token provided", + )), } - None => Err(Status::unauthenticated("No unit token provided")), + } else { + Err(Status::unauthenticated("No authorization token provided")) } } } diff --git a/controller/src/network/client.rs b/controller/src/network/client.rs new file mode 100644 index 00000000..e366444d --- /dev/null +++ b/controller/src/network/client.rs @@ -0,0 +1,246 @@ +use std::{str::FromStr, sync::Arc}; + +use anyhow::Result; +use beat::BeatTask; +use group::GetGroupsTask; +use health::{RequestStopTask, SetRunningTask}; +use ready::SetReadyTask; +use server::GetServersTask; +use tokio_stream::wrappers::ReceiverStream; +use tonic::{async_trait, Request, Response, Status}; +use user::{UserConnectedTask, UserDisconnectedTask}; +use uuid::Uuid; + +use crate::{ + application::{ + auth::AuthType, server::NameAndUuid, user::transfer::TransferTarget, Shared, TaskSender, + }, + task::Task, + VERSION, +}; + +use super::{ + manage::transfer::TransferUsersTask, + proto::client::{ + self, + channel::Msg, + client_service_server::ClientService, + transfer::{target::Type, TransferReq, TransferRes}, + user::{ConnectedReq, DisconnectedReq}, + }, +}; + +mod beat; +mod group; +mod health; +mod ready; +mod server; +mod user; + +pub type TransferMsg = TransferRes; +pub type ChannelMsg = Msg; + +pub struct ClientServiceImpl(pub TaskSender, pub Arc); + +#[async_trait] +impl ClientService for ClientServiceImpl { + type SubscribeToTransfersStream = ReceiverStream>; + type SubscribeToChannelStream = ReceiverStream>; + + // Heartbeat + async fn beat(&self, request: Request<()>) -> Result, Status> { + Ok(Response::new( + Task::execute::<(), _, _>(AuthType::Server, &self.0, request, |_, auth| { + Ok(Box::new(BeatTask(auth))) + }) + .await?, + )) + } + + // Ready state + async fn set_ready(&self, request: Request) -> Result, Status> { + Ok(Response::new( + Task::execute::<(), _, _>(AuthType::Server, &self.0, request, |request, auth| { + Ok(Box::new(SetReadyTask(auth, *request.get_ref()))) + }) + .await?, + )) + } + + // Health + async fn set_running(&self, request: Request<()>) -> Result, Status> { + Ok(Response::new( + Task::execute::<(), _, _>(AuthType::Server, &self.0, request, |_, auth| { + Ok(Box::new(SetRunningTask(auth))) + }) + .await?, + )) + } + async fn request_stop(&self, request: Request<()>) -> Result, Status> { + Ok(Response::new( + Task::execute::<(), _, _>(AuthType::Server, &self.0, request, |_, auth| { + Ok(Box::new(RequestStopTask(auth))) + }) + .await?, + )) + } + + // User + async fn user_connected(&self, request: Request) -> Result, Status> { + Ok(Response::new( + Task::execute::<(), _, _>(AuthType::Server, &self.0, request, |request, auth| { + let request = request.into_inner(); + + let name = request.name; + let Ok(uuid) = Uuid::from_str(&request.id) else { + return Err(Status::invalid_argument("Invalid UUID provided")); + }; + + Ok(Box::new(UserConnectedTask( + auth, + NameAndUuid::new(name, uuid), + ))) + }) + .await?, + )) + } + async fn user_disconnected( + &self, + request: Request, + ) -> Result, Status> { + Ok(Response::new( + Task::execute::<(), _, _>(AuthType::Server, &self.0, request, |request, auth| { + let request = request.into_inner(); + + let Ok(uuid) = Uuid::from_str(&request.id) else { + return Err(Status::invalid_argument("Invalid UUID provided")); + }; + + Ok(Box::new(UserDisconnectedTask(auth, uuid))) + }) + .await?, + )) + } + + // Transfer + async fn transfer_users(&self, request: Request) -> Result, Status> { + Ok(Response::new( + Task::execute::(AuthType::Server, &self.0, request, |request, auth| { + let request = request.into_inner(); + + let target = match request.target { + Some(target) => match Type::try_from(target.r#type) { + Ok(r#type) => match (target.target, r#type) { + (Some(target), Type::Group) => TransferTarget::Group(target), + (Some(target), Type::Server) => { + TransferTarget::Server(match Uuid::from_str(&target) { + Ok(uuid) => uuid, + Err(_) => { + return Err(Status::invalid_argument( + "Invalid UUID provided", + )) + } + }) + } + (None, Type::Fallback) => TransferTarget::Fallback, + _ => { + return Err(Status::invalid_argument( + "Invalid target type combination", + )) + } + }, + Err(_) => { + return Err(Status::invalid_argument("Invalid target type provided")) + } + }, + None => return Err(Status::invalid_argument("Missing target")), + }; + let uuids = request + .ids + .into_iter() + .map(|id| match Uuid::from_str(&id) { + Ok(uuid) => Ok(uuid), + Err(_) => Err(Status::invalid_argument("Invalid UUID provided")), + }) + .collect::, _>>()?; + + Ok(Box::new(TransferUsersTask(auth, uuids, target))) + }) + .await?, + )) + } + async fn subscribe_to_transfers( + &self, + request: Request<()>, + ) -> Result, Status> { + let auth = Task::get_auth(AuthType::Server, &request)?; + let server = auth + .get_server() + .expect("Should be ok. Because type is checked in get_auth"); + + Ok(Response::new( + self.1.subscribers.subscribe_transfer(*server.uuid()).await, + )) + } + + // Channel + async fn publish_message(&self, request: Request) -> Result, Status> { + Ok(Response::new( + self.1 + .subscribers + .publish_channel(request.into_inner()) + .await, + )) + } + async fn subscribe_to_channel( + &self, + request: Request, + ) -> Result, Status> { + Ok(Response::new( + self.1 + .subscribers + .subscribe_channel(request.into_inner()) + .await, + )) + } + + // Server + async fn get_servers( + &self, + request: Request<()>, + ) -> Result, Status> { + Ok(Response::new( + Task::execute::( + AuthType::Server, + &self.0, + request, + |_, _| Ok(Box::new(GetServersTask())), + ) + .await?, + )) + } + + // Group + async fn get_groups( + &self, + request: Request<()>, + ) -> Result, Status> { + Ok(Response::new( + Task::execute::( + AuthType::Server, + &self.0, + request, + |_, _| Ok(Box::new(GetGroupsTask())), + ) + .await?, + )) + } + + // Version info + async fn get_proto_ver(&self, _request: Request<()>) -> Result, Status> { + Ok(Response::new(VERSION.protocol)) + } + async fn get_ctrl_ver(&self, _request: Request<()>) -> Result, Status> { + Ok(Response::new(format!("{VERSION}"))) + } +} diff --git a/controller/src/network/client/beat.rs b/controller/src/network/client/beat.rs new file mode 100644 index 00000000..1ff0339b --- /dev/null +++ b/controller/src/network/client/beat.rs @@ -0,0 +1,24 @@ +use anyhow::Result; +use tonic::async_trait; + +use crate::{ + application::{auth::Authorization, Controller}, + task::{BoxedAny, GenericTask, Task}, +}; + +pub struct BeatTask(pub Authorization); + +#[async_trait] +impl GenericTask for BeatTask { + async fn run(&mut self, controller: &mut Controller) -> Result { + let Some(server) = self + .0 + .get_server() + .and_then(|server| controller.servers.get_server_mut(server.uuid())) + else { + return Task::new_link_error(); + }; + server.heart_mut().beat(); + Task::new_empty() + } +} diff --git a/controller/src/network/client/group.rs b/controller/src/network/client/group.rs new file mode 100644 index 00000000..d49f45cf --- /dev/null +++ b/controller/src/network/client/group.rs @@ -0,0 +1,24 @@ +use anyhow::Result; +use tonic::async_trait; + +use crate::{ + application::Controller, + network::proto::client::group::List, + task::{BoxedAny, GenericTask, Task}, +}; + +pub struct GetGroupsTask(); + +#[async_trait] +impl GenericTask for GetGroupsTask { + async fn run(&mut self, controller: &mut Controller) -> Result { + Task::new_ok(List { + groups: controller + .groups + .get_groups() + .iter() + .map(|group| group.name().clone()) + .collect(), + }) + } +} diff --git a/controller/src/network/client/health.rs b/controller/src/network/client/health.rs new file mode 100644 index 00000000..65d1b733 --- /dev/null +++ b/controller/src/network/client/health.rs @@ -0,0 +1,46 @@ +use anyhow::Result; +use tonic::async_trait; + +use crate::{ + application::{ + auth::Authorization, + server::{manager::StopRequest, State}, + Controller, + }, + task::{BoxedAny, GenericTask, Task}, +}; + +pub struct SetRunningTask(pub Authorization); +pub struct RequestStopTask(pub Authorization); + +#[async_trait] +impl GenericTask for SetRunningTask { + async fn run(&mut self, controller: &mut Controller) -> Result { + let Some(server) = self + .0 + .get_server() + .and_then(|server| controller.servers.get_server_mut(server.uuid())) + else { + return Task::new_link_error(); + }; + server.set_state(State::Running); + Task::new_empty() + } +} + +#[async_trait] +impl GenericTask for RequestStopTask { + async fn run(&mut self, controller: &mut Controller) -> Result { + let Some(server) = self + .0 + .get_server() + .and_then(|server| controller.servers.resolve_server(server.uuid())) + else { + return Task::new_link_error(); + }; + controller + .servers + .schedule_stop(StopRequest::new(None, server)); + Task::new_empty() + } +} diff --git a/controller/src/network/client/ready.rs b/controller/src/network/client/ready.rs new file mode 100644 index 00000000..601fb67a --- /dev/null +++ b/controller/src/network/client/ready.rs @@ -0,0 +1,24 @@ +use anyhow::Result; +use tonic::async_trait; + +use crate::{ + application::{auth::Authorization, Controller}, + task::{BoxedAny, GenericTask, Task}, +}; + +pub struct SetReadyTask(pub Authorization, pub bool); + +#[async_trait] +impl GenericTask for SetReadyTask { + async fn run(&mut self, controller: &mut Controller) -> Result { + let Some(server) = self + .0 + .get_server() + .and_then(|server| controller.servers.get_server_mut(server.uuid())) + else { + return Task::new_link_error(); + }; + server.set_ready(self.1); + Task::new_empty() + } +} diff --git a/controller/src/network/client/server.rs b/controller/src/network/client/server.rs new file mode 100644 index 00000000..ad82a204 --- /dev/null +++ b/controller/src/network/client/server.rs @@ -0,0 +1,34 @@ +use anyhow::Result; +use tonic::async_trait; + +use crate::{ + application::{server::Server, Controller}, + network::proto::client::server::{List, Short}, + task::{BoxedAny, GenericTask, Task}, +}; + +pub struct GetServersTask(); + +#[async_trait] +impl GenericTask for GetServersTask { + async fn run(&mut self, controller: &mut Controller) -> Result { + Task::new_ok(List { + servers: controller + .servers + .get_servers() + .iter() + .map(std::convert::Into::into) + .collect(), + }) + } +} + +impl From<&&Server> for Short { + fn from(server: &&Server) -> Self { + Self { + id: server.id().uuid().to_string(), + name: server.id().name().clone(), + group: server.group().clone(), + } + } +} diff --git a/controller/src/network/client/user.rs b/controller/src/network/client/user.rs new file mode 100644 index 00000000..9c7b9a28 --- /dev/null +++ b/controller/src/network/client/user.rs @@ -0,0 +1,47 @@ +use anyhow::Result; +use tonic::async_trait; +use uuid::Uuid; + +use crate::{ + application::{ + auth::{ActionResult, Authorization}, + server::NameAndUuid, + Controller, + }, + task::{BoxedAny, GenericTask, Task}, +}; + +pub struct UserConnectedTask(pub Authorization, pub NameAndUuid); +pub struct UserDisconnectedTask(pub Authorization, pub Uuid); + +#[async_trait] +impl GenericTask for UserConnectedTask { + async fn run(&mut self, controller: &mut Controller) -> Result { + let Some(server) = self + .0 + .get_server() + .and_then(|server| controller.servers.get_server_mut(server.uuid())) + else { + return Task::new_link_error(); + }; + controller.users.user_connected(server, self.1.clone()); + Task::new_empty() + } +} + +#[async_trait] +impl GenericTask for UserDisconnectedTask { + async fn run(&mut self, controller: &mut Controller) -> Result { + let Some(server) = self + .0 + .get_server() + .and_then(|server| controller.servers.get_server_mut(server.uuid())) + else { + return Task::new_link_error(); + }; + if controller.users.user_disconnected(server, &self.1) == ActionResult::Denied { + return Task::new_permission_error("You are not allowed to disconnect this user"); + } + Task::new_empty() + } +} diff --git a/controller/src/network/manage.rs b/controller/src/network/manage.rs new file mode 100644 index 00000000..2b61c1fa --- /dev/null +++ b/controller/src/network/manage.rs @@ -0,0 +1,427 @@ +use std::{str::FromStr, sync::Arc}; + +use anyhow::Result; +use group::{CreateGroupTask, GetGroupTask, GetGroupsTask}; +use node::{CreateNodeTask, GetNodeTask, GetNodesTask}; +use plugin::GetPluginsTask; +use power::RequestStopTask; +use resource::{DeleteResourceTask, SetResourceTask}; +use server::{GetServerTask, GetServersTask}; +use tokio_stream::wrappers::ReceiverStream; +use tonic::{async_trait, Request, Response, Status}; +use transfer::TransferUsersTask; +use user::GetUsersTask; +use uuid::Uuid; + +use crate::{ + application::{ + auth::AuthType, + group::{ScalingPolicy, StartConstraints}, + node::Capabilities, + server::{DiskRetention, FallbackPolicy, Resources, Spec}, + user::transfer::TransferTarget, + Shared, TaskSender, + }, + task::Task, + VERSION, +}; + +use super::proto::manage::{ + self, + manage_service_server::ManageService, + resource::{Category, DelReq, SetReq}, + screen::{Lines, WriteReq}, + transfer::{target::Type, TransferReq}, +}; + +mod group; +mod node; +mod plugin; +mod power; +mod resource; +mod server; +pub mod transfer; +mod user; + +pub type ScreenLines = Lines; + +pub struct ManageServiceImpl(pub TaskSender, pub Arc); + +#[async_trait] +impl ManageService for ManageServiceImpl { + type SubscribeToScreenStream = ReceiverStream>; + + // Power + async fn request_stop(&self, request: Request<()>) -> Result, Status> { + Ok(Response::new( + Task::execute::<(), _, _>(AuthType::User, &self.0, request, |_, _| { + Ok(Box::new(RequestStopTask())) + }) + .await?, + )) + } + + // Resource + async fn set_resource(&self, request: Request) -> Result, Status> { + Ok(Response::new( + Task::execute::<(), _, _>(AuthType::User, &self.0, request, |request, _| { + let request = request.into_inner(); + + let Ok(category) = Category::try_from(request.category) else { + return Err(Status::invalid_argument("Invalid category provided")); + }; + + Ok(Box::new(SetResourceTask( + category, + request.id, + request.active, + ))) + }) + .await?, + )) + } + async fn delete_resource(&self, request: Request) -> Result, Status> { + Ok(Response::new( + Task::execute::<(), _, _>(AuthType::User, &self.0, request, |request, _| { + let request = request.into_inner(); + + let Ok(category) = Category::try_from(request.category) else { + return Err(Status::invalid_argument("Invalid category provided")); + }; + + Ok(Box::new(DeleteResourceTask(category, request.id))) + }) + .await?, + )) + } + + // Plugin + async fn get_plugins( + &self, + request: Request<()>, + ) -> Result, Status> { + Ok(Response::new( + Task::execute::( + AuthType::User, + &self.0, + request, + |_, _| Ok(Box::new(GetPluginsTask())), + ) + .await?, + )) + } + + // Node + async fn create_node( + &self, + request: Request, + ) -> Result, Status> { + Ok(Response::new( + Task::execute::<(), _, _>(AuthType::User, &self.0, request, |request, _| { + let request = request.into_inner(); + + let capabilities = Capabilities::new(request.memory, request.max, request.child); + let controller = request + .ctrl_addr + .parse() + .map_err(|_| Status::invalid_argument("Invalid controller address provided"))?; + let plugin = request.plugin; + + Ok(Box::new(CreateNodeTask( + request.name, + plugin, + capabilities, + controller, + ))) + }) + .await?, + )) + } + async fn get_node( + &self, + request: Request, + ) -> Result, Status> { + Ok(Response::new( + Task::execute::( + AuthType::User, + &self.0, + request, + |request, _| { + let request = request.into_inner(); + + Ok(Box::new(GetNodeTask(request))) + }, + ) + .await?, + )) + } + async fn get_nodes( + &self, + request: Request<()>, + ) -> Result, Status> { + Ok(Response::new( + Task::execute::(AuthType::User, &self.0, request, |_, _| { + Ok(Box::new(GetNodesTask())) + }) + .await?, + )) + } + + // Group + async fn create_group( + &self, + request: Request, + ) -> Result, Status> { + Ok(Response::new( + Task::execute::<(), _, _>(AuthType::User, &self.0, request, |request, _| { + let request = request.into_inner(); + + let constraints = match request.constraints { + Some(constrains) => { + StartConstraints::new(constrains.min, constrains.max, constrains.prio) + } + None => return Err(Status::invalid_argument("No constraints provided")), + }; + + let scaling = match request.scaling { + Some(scaling) => { + ScalingPolicy::new(true, scaling.start_threshold, scaling.stop_empty) + } + None => ScalingPolicy::default(), + }; + + let resources = match request.resources { + Some(resources) => Resources::new( + resources.memory, + resources.swap, + resources.cpu, + resources.io, + resources.disk, + resources.ports, + ), + None => return Err(Status::invalid_argument("No resources provided")), + }; + + let spec = match request.spec { + Some(spec) => { + let image = spec.img; + let max_players = spec.max_players; + let settings = spec + .settings + .iter() + .map(|key_value| (key_value.key.clone(), key_value.value.clone())) + .collect(); + let environment = spec + .env + .iter() + .map(|key_value| (key_value.key.clone(), key_value.value.clone())) + .collect(); + let disk_retention = if let Some(retention) = spec.retention { + match manage::server::DiskRetention::try_from(retention) { + Ok(manage::server::DiskRetention::Permanent) => { + DiskRetention::Permanent + } + Ok(manage::server::DiskRetention::Temporary) => { + DiskRetention::Temporary + } + Err(_) => { + return Err(Status::invalid_argument( + "Invalid disk retention provided", + )) + } + } + } else { + DiskRetention::Temporary + }; + let fallback = if let Some(fallback) = spec.fallback { + FallbackPolicy::new(true, fallback.prio) + } else { + FallbackPolicy::default() + }; + Spec::new( + settings, + environment, + disk_retention, + image, + max_players, + fallback, + ) + } + None => return Err(Status::invalid_argument("No spec provided")), + }; + + let nodes = request.nodes; + + Ok(Box::new(CreateGroupTask( + request.name, + constraints, + scaling, + resources, + spec, + nodes, + ))) + }) + .await?, + )) + } + async fn get_group( + &self, + request: Request, + ) -> Result, Status> { + Ok(Response::new( + Task::execute::( + AuthType::User, + &self.0, + request, + |request, _| { + let request = request.into_inner(); + + Ok(Box::new(GetGroupTask(request))) + }, + ) + .await?, + )) + } + async fn get_groups( + &self, + request: Request<()>, + ) -> Result, Status> { + Ok(Response::new( + Task::execute::(AuthType::User, &self.0, request, |_, _| { + Ok(Box::new(GetGroupsTask())) + }) + .await?, + )) + } + + // Server + async fn get_server( + &self, + request: Request, + ) -> Result, Status> { + Ok(Response::new( + Task::execute::( + AuthType::User, + &self.0, + request, + |request, _| { + let request = request.into_inner(); + + let Ok(uuid) = Uuid::parse_str(&request) else { + return Err(Status::invalid_argument("Invalid UUID provided")); + }; + + Ok(Box::new(GetServerTask(uuid))) + }, + ) + .await?, + )) + } + async fn get_servers( + &self, + request: Request<()>, + ) -> Result, Status> { + Ok(Response::new( + Task::execute::( + AuthType::User, + &self.0, + request, + |_, _| Ok(Box::new(GetServersTask())), + ) + .await?, + )) + } + + // Screen + async fn write_to_screen(&self, request: Request) -> Result, Status> { + let request = request.into_inner(); + let Ok(uuid) = Uuid::from_str(&request.id) else { + return Err(Status::invalid_argument("Invalid UUID provided")); + }; + + match self.1.screens.write(&uuid, &request.data).await?.await { + Ok(Err(error)) => Err(error.into()), + Err(error) => Err(Status::internal(error.to_string())), + Ok(_) => Ok(Response::new(())), + } + } + async fn subscribe_to_screen( + &self, + request: Request, + ) -> Result, Status> { + let Ok(uuid) = Uuid::from_str(&request.into_inner()) else { + return Err(Status::invalid_argument("Invalid UUID provided")); + }; + + Ok(Response::new(self.1.screens.subscribe_screen(&uuid).await?)) + } + + // User + async fn get_users( + &self, + request: Request<()>, + ) -> Result, Status> { + Ok(Response::new( + Task::execute::(AuthType::User, &self.0, request, |_, _| { + Ok(Box::new(GetUsersTask())) + }) + .await?, + )) + } + + // Transfer + async fn transfer_users(&self, request: Request) -> Result, Status> { + Ok(Response::new( + Task::execute::(AuthType::User, &self.0, request, |request, auth| { + let request = request.into_inner(); + + let target = match request.target { + Some(target) => match Type::try_from(target.r#type) { + Ok(r#type) => match (target.target, r#type) { + (Some(target), Type::Group) => TransferTarget::Group(target), + (Some(target), Type::Server) => { + TransferTarget::Server(match Uuid::from_str(&target) { + Ok(uuid) => uuid, + Err(_) => { + return Err(Status::invalid_argument( + "Invalid UUID provided", + )) + } + }) + } + (None, Type::Fallback) => TransferTarget::Fallback, + _ => { + return Err(Status::invalid_argument( + "Invalid target type combination", + )) + } + }, + Err(_) => { + return Err(Status::invalid_argument("Invalid target type provided")) + } + }, + None => return Err(Status::invalid_argument("Missing target")), + }; + let uuids = request + .ids + .into_iter() + .map(|id| match Uuid::from_str(&id) { + Ok(uuid) => Ok(uuid), + Err(_) => Err(Status::invalid_argument("Invalid UUID provided")), + }) + .collect::, _>>()?; + + Ok(Box::new(TransferUsersTask(auth, uuids, target))) + }) + .await?, + )) + } + + // Version info + async fn get_proto_ver(&self, _request: Request<()>) -> Result, Status> { + Ok(Response::new(VERSION.protocol)) + } + async fn get_ctrl_ver(&self, _request: Request<()>) -> Result, Status> { + Ok(Response::new(format!("{VERSION}"))) + } +} diff --git a/controller/src/network/manage/group.rs b/controller/src/network/manage/group.rs new file mode 100644 index 00000000..cc6da91e --- /dev/null +++ b/controller/src/network/manage/group.rs @@ -0,0 +1,164 @@ +use anyhow::Result; +use tonic::{async_trait, Status}; + +use crate::{ + application::{ + group::{Group, ScalingPolicy, StartConstraints}, + server::{FallbackPolicy, Resources, Spec}, + Controller, + }, + network::proto::{ + common::KeyValue, + manage::{ + group::{Constraints, Item, List, Scaling}, + server::{self, Fallback}, + }, + }, + task::{BoxedAny, GenericTask, Task}, +}; + +pub struct CreateGroupTask( + pub String, + pub StartConstraints, + pub ScalingPolicy, + pub Resources, + pub Spec, + pub Vec, +); +pub struct GetGroupTask(pub String); +pub struct GetGroupsTask(); + +#[async_trait] +impl GenericTask for CreateGroupTask { + async fn run(&mut self, controller: &mut Controller) -> Result { + if let Err(error) = controller + .groups + .create_group( + &self.0, + &self.1, + &self.2, + &self.3, + &self.4, + &self.5, + &controller.nodes, + ) + .await + { + return Task::new_err(error.into()); + } + Task::new_empty() + } +} + +#[async_trait] +impl GenericTask for GetGroupTask { + async fn run(&mut self, controller: &mut Controller) -> Result { + let Some(group) = controller.groups.get_group(&self.0) else { + return Task::new_err(Status::not_found("Group not found")); + }; + + Task::new_ok(Item::from(group)) + } +} + +#[async_trait] +impl GenericTask for GetGroupsTask { + async fn run(&mut self, controller: &mut Controller) -> Result { + Task::new_ok(List { + groups: controller + .groups + .get_groups() + .iter() + .map(|group| group.name().clone()) + .collect(), + }) + } +} + +impl From<&Group> for Item { + fn from(value: &Group) -> Self { + Self { + name: value.name().clone(), + nodes: value.nodes().clone(), + scaling: value.scaling().to_grpc(), + constraints: Some(value.constraints().into()), + resources: Some(value.resources().into()), + spec: Some(value.spec().into()), + } + } +} + +impl From<&StartConstraints> for Constraints { + fn from(value: &StartConstraints) -> Self { + Self { + min: *value.minimum(), + max: *value.maximum(), + prio: *value.priority(), + } + } +} + +impl ScalingPolicy { + pub fn to_grpc(&self) -> Option { + if *self.enabled() { + Some(Scaling { + start_threshold: *self.start_threshold(), + stop_empty: *self.stop_empty_servers(), + }) + } else { + None + } + } +} + +impl From<&Resources> for server::Resources { + fn from(value: &Resources) -> Self { + Self { + memory: *value.memory(), + swap: *value.swap(), + cpu: *value.cpu(), + io: *value.io(), + disk: *value.disk(), + ports: *value.ports(), + } + } +} + +impl From<&Spec> for server::Spec { + fn from(value: &Spec) -> Self { + Self { + img: value.image().clone(), + max_players: *value.max_players(), + settings: value + .settings() + .iter() + .map(|(key, value)| KeyValue { + key: key.clone(), + value: value.clone(), + }) + .collect(), + env: value + .environment() + .iter() + .map(|(key, value)| KeyValue { + key: key.clone(), + value: value.clone(), + }) + .collect(), + retention: Some(value.disk_retention().clone() as i32), + fallback: value.fallback().to_grpc(), + } + } +} + +impl FallbackPolicy { + pub fn to_grpc(&self) -> Option { + if *self.enabled() { + Some(Fallback { + prio: *self.priority(), + }) + } else { + None + } + } +} diff --git a/controller/src/network/manage/node.rs b/controller/src/network/manage/node.rs new file mode 100644 index 00000000..def531d4 --- /dev/null +++ b/controller/src/network/manage/node.rs @@ -0,0 +1,68 @@ +use anyhow::Result; +use tonic::{async_trait, Status}; +use url::Url; + +use crate::{ + application::{ + node::{Capabilities, Node}, + Controller, + }, + network::proto::manage::node::{Item, List}, + task::{BoxedAny, GenericTask, Task}, +}; + +pub struct CreateNodeTask(pub String, pub String, pub Capabilities, pub Url); +pub struct GetNodeTask(pub String); +pub struct GetNodesTask(); + +#[async_trait] +impl GenericTask for CreateNodeTask { + async fn run(&mut self, controller: &mut Controller) -> Result { + if let Err(error) = controller + .nodes + .create_node(&self.0, &self.1, &self.2, &self.3, &controller.plugins) + .await + { + return Task::new_err(error.into()); + } + Task::new_empty() + } +} + +#[async_trait] +impl GenericTask for GetNodeTask { + async fn run(&mut self, controller: &mut Controller) -> Result { + let Some(node) = controller.nodes.get_node(&self.0) else { + return Task::new_err(Status::not_found("Node not found")); + }; + + Task::new_ok(Item::from(node)) + } +} + +#[async_trait] +impl GenericTask for GetNodesTask { + async fn run(&mut self, controller: &mut Controller) -> Result { + Task::new_ok(List { + nodes: controller + .nodes + .get_nodes() + .iter() + .map(|node| node.name().clone()) + .collect(), + }) + } +} + +impl From<&Node> for Item { + fn from(value: &Node) -> Self { + Self { + name: value.name().clone(), + plugin: value.plugin().to_string(), + memory: *value.capabilities().memory(), + max: *value.capabilities().max_servers(), + child: value.capabilities().child().clone(), + ctrl_addr: value.controller().to_string(), + } + } +} diff --git a/controller/src/network/manage/plugin.rs b/controller/src/network/manage/plugin.rs new file mode 100644 index 00000000..938b68ed --- /dev/null +++ b/controller/src/network/manage/plugin.rs @@ -0,0 +1,24 @@ +use anyhow::Result; +use tonic::async_trait; + +use crate::{ + application::Controller, + network::proto::manage::plugin::List, + task::{BoxedAny, GenericTask, Task}, +}; + +pub struct GetPluginsTask(); + +#[async_trait] +impl GenericTask for GetPluginsTask { + async fn run(&mut self, controller: &mut Controller) -> Result { + Task::new_ok(List { + plugins: controller + .plugins + .get_plugins_keys() + .iter() + .map(|plugin| (*plugin).clone()) + .collect(), + }) + } +} diff --git a/controller/src/network/manage/power.rs b/controller/src/network/manage/power.rs new file mode 100644 index 00000000..58c62e83 --- /dev/null +++ b/controller/src/network/manage/power.rs @@ -0,0 +1,17 @@ +use anyhow::Result; +use tonic::async_trait; + +use crate::{ + application::Controller, + task::{BoxedAny, GenericTask, Task}, +}; + +pub struct RequestStopTask(); + +#[async_trait] +impl GenericTask for RequestStopTask { + async fn run(&mut self, controller: &mut Controller) -> Result { + controller.signal_shutdown(); + Task::new_empty() + } +} diff --git a/controller/src/network/manage/resource.rs b/controller/src/network/manage/resource.rs new file mode 100644 index 00000000..813d2c3b --- /dev/null +++ b/controller/src/network/manage/resource.rs @@ -0,0 +1,80 @@ +use anyhow::Result; +use tonic::{async_trait, Status}; +use uuid::Uuid; + +use crate::{ + application::{server::manager::StopRequest, Controller}, + network::proto::manage::resource::Category, + task::{BoxedAny, GenericTask, Task}, +}; + +pub struct SetResourceTask(pub Category, pub String, pub bool); + +pub struct DeleteResourceTask(pub Category, pub String); + +#[async_trait] +impl GenericTask for SetResourceTask { + async fn run(&mut self, controller: &mut Controller) -> Result { + match self.0 { + Category::Node => { + let node = controller + .nodes + .get_node_mut(&self.1) + .ok_or(Status::not_found("Node not found"))?; + if let Err(error) = node.set_active(self.2).await { + return Task::new_err(Status::internal(error.to_string())); + } + Task::new_empty() + } + Category::Group => { + let group = controller + .groups + .get_group_mut(&self.1) + .ok_or(Status::not_found("Group not found"))?; + if let Err(error) = group.set_active(self.2, &mut controller.servers).await { + return Task::new_err(Status::internal(error.to_string())); + } + Task::new_empty() + } + Category::Server => Task::new_err(Status::unimplemented( + "This category is not supported for this action", + )), + } + } +} + +#[async_trait] +impl GenericTask for DeleteResourceTask { + async fn run(&mut self, controller: &mut Controller) -> Result { + match self.0 { + Category::Node => { + if let Err(error) = controller + .nodes + .delete_node(&self.1, &controller.servers, &controller.groups) + .await + { + return Task::new_err(error.into()); + } + Task::new_empty() + } + Category::Group => { + if let Err(error) = controller.groups.delete_group(&self.1).await { + return Task::new_err(error.into()); + } + Task::new_empty() + } + Category::Server => { + let Ok(uuid) = Uuid::parse_str(&self.1) else { + return Task::new_err(Status::invalid_argument("Invalid UUID")); + }; + let id = match controller.servers.get_server(&uuid) { + Some(server) => server.id().clone(), + None => return Task::new_err(Status::not_found("Server not found")), + }; + + controller.servers.schedule_stop(StopRequest::new(None, id)); + Task::new_empty() + } + } + } +} diff --git a/controller/src/network/manage/server.rs b/controller/src/network/manage/server.rs new file mode 100644 index 00000000..ee8627ca --- /dev/null +++ b/controller/src/network/manage/server.rs @@ -0,0 +1,84 @@ +use anyhow::Result; +use tonic::{async_trait, Status}; +use uuid::Uuid; + +use crate::{ + application::{node::Allocation, server::Server, Controller}, + network::proto::{ + common::Address, + manage::server::{self, Detail, List, Short}, + }, + task::{BoxedAny, GenericTask, Task}, +}; + +pub struct GetServerTask(pub Uuid); +pub struct GetServersTask(); + +#[async_trait] +impl GenericTask for GetServerTask { + async fn run(&mut self, controller: &mut Controller) -> Result { + let Some(server) = controller.servers.get_server(&self.0) else { + return Task::new_err(Status::not_found("Server not found")); + }; + + Task::new_ok(Detail::from(server)) + } +} + +#[async_trait] +impl GenericTask for GetServersTask { + async fn run(&mut self, controller: &mut Controller) -> Result { + Task::new_ok(List { + servers: controller + .servers + .get_servers() + .iter() + .map(std::convert::Into::into) + .collect(), + }) + } +} + +impl From<&&Server> for Short { + fn from(server: &&Server) -> Self { + Self { + id: server.id().uuid().to_string(), + name: server.id().name().clone(), + group: server.group().clone(), + node: server.node().clone(), + } + } +} + +impl From<&Server> for Detail { + fn from(server: &Server) -> Self { + Self { + name: server.id().name().clone(), + id: server.id().uuid().to_string(), + group: server.group().clone(), + node: server.node().clone(), + allocation: Some(server.allocation().into()), + users: *server.connected_users(), + token: server.token().clone(), + state: server.state().clone() as i32, + ready: *server.ready(), + } + } +} + +impl From<&Allocation> for server::Allocation { + fn from(value: &Allocation) -> Self { + Self { + ports: value + .ports() + .iter() + .map(|port| Address { + host: port.host.clone(), + port: u32::from(port.port), + }) + .collect(), + resources: Some(value.resources().into()), + spec: Some(value.spec().into()), + } + } +} diff --git a/controller/src/network/manage/transfer.rs b/controller/src/network/manage/transfer.rs new file mode 100644 index 00000000..0937d356 --- /dev/null +++ b/controller/src/network/manage/transfer.rs @@ -0,0 +1,41 @@ +use anyhow::Result; +use tonic::async_trait; +use uuid::Uuid; + +use crate::{ + application::{ + auth::Authorization, + user::transfer::{Transfer, TransferTarget}, + Controller, + }, + task::{BoxedAny, GenericTask, Task}, +}; + +pub struct TransferUsersTask(pub Authorization, pub Vec, pub TransferTarget); + +#[async_trait] +impl GenericTask for TransferUsersTask { + async fn run(&mut self, controller: &mut Controller) -> Result { + let mut count: u32 = 0; + for user in &self.1 { + let Some(user) = controller.users.get_user_mut(user) else { + continue; + }; + let mut transfer = match Transfer::resolve( + &self.0, + user, + &self.2, + &controller.servers, + &controller.groups, + ) { + Ok(transfer) => transfer, + Err(error) => return Task::new_err(error.into()), + }; + if let Err(error) = Transfer::transfer_user(&mut transfer, &controller.shared).await { + return Task::new_err(error); + } + count += 1; + } + Task::new_ok(count) + } +} diff --git a/controller/src/network/manage/user.rs b/controller/src/network/manage/user.rs new file mode 100644 index 00000000..ffe485a0 --- /dev/null +++ b/controller/src/network/manage/user.rs @@ -0,0 +1,33 @@ +use anyhow::Result; +use tonic::async_trait; + +use crate::{ + application::{user::User, Controller}, + network::proto::manage::user::{Item, List}, + task::{BoxedAny, GenericTask, Task}, +}; + +pub struct GetUsersTask(); + +#[async_trait] +impl GenericTask for GetUsersTask { + async fn run(&mut self, controller: &mut Controller) -> Result { + Task::new_ok(List { + users: controller + .users + .get_users() + .iter() + .map(Into::into) + .collect(), + }) + } +} + +impl From<&&User> for Item { + fn from(user: &&User) -> Self { + Self { + id: user.id().uuid().to_string(), + name: user.id().name().clone(), + } + } +} diff --git a/controller/src/network/proto.rs b/controller/src/network/proto.rs new file mode 100644 index 00000000..513bf413 --- /dev/null +++ b/controller/src/network/proto.rs @@ -0,0 +1,20 @@ +pub mod common { + #![allow(clippy::all, clippy::pedantic)] + use tonic::include_proto; + + include_proto!("common"); +} + +pub mod manage { + #![allow(clippy::all, clippy::pedantic)] + use tonic::include_proto; + + include_proto!("manage"); +} + +pub mod client { + #![allow(clippy::all, clippy::pedantic)] + use tonic::include_proto; + + include_proto!("client"); +} diff --git a/controller/src/network/stream.rs b/controller/src/network/stream.rs deleted file mode 100644 index f56eae40..00000000 --- a/controller/src/network/stream.rs +++ /dev/null @@ -1,32 +0,0 @@ -use std::{ - pin::Pin, - sync::mpsc::{Receiver, TryRecvError}, - task::{Context, Poll}, -}; - -use tokio_stream::Stream; - -pub struct StdReceiverStream { - receiver: Receiver, -} - -impl Stream for StdReceiverStream { - type Item = T; - - fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll> { - match self.receiver.try_recv() { - Ok(item) => Poll::Ready(Some(item)), - Err(TryRecvError::Empty) => { - cx.waker().wake_by_ref(); - Poll::Pending - } - Err(TryRecvError::Disconnected) => Poll::Ready(None), - } - } -} - -impl StdReceiverStream { - pub fn new(receiver: Receiver) -> Self { - Self { receiver } - } -} diff --git a/controller/src/network/unit.rs b/controller/src/network/unit.rs deleted file mode 100644 index 22be76b9..00000000 --- a/controller/src/network/unit.rs +++ /dev/null @@ -1,428 +0,0 @@ -use crate::{ - application::{ - auth::AuthUnitHandle, - event::{channel::ChannelMessageSended, transfer::UserTransferRequested, EventKey}, - user::{transfer::TransferTarget, CurrentUnit}, - ControllerHandle, - }, - VERSION, -}; - -use super::stream::StdReceiverStream; -use proto::unit_service_server::UnitService; -use tonic::{async_trait, Request, Response, Status}; -use uuid::Uuid; - -use std::{ - ops::Deref, - str::FromStr, - sync::{mpsc::channel, Arc}, -}; - -#[allow(clippy::all)] -pub mod proto { - use tonic::include_proto; - - include_proto!("unit"); -} - -pub struct UnitServiceImpl { - pub controller: ControllerHandle, -} - -#[async_trait] -impl UnitService for UnitServiceImpl { - async fn beat_heart(&self, request: Request<()>) -> Result, Status> { - let requesting_unit = request - .extensions() - .get::() - .expect("Failed to get unit from extensions. Is tonic broken?") - .unit - .upgrade() - .ok_or_else(|| Status::not_found("The authenticated unit does not exist"))?; - - self.controller - .get_units() - .handle_heart_beat(&requesting_unit); - Ok(Response::new(())) - } - - async fn mark_ready(&self, request: Request<()>) -> Result, Status> { - let requesting_unit = request - .extensions() - .get::() - .expect("Failed to get unit from extensions. Is tonic broken?") - .unit - .upgrade() - .ok_or_else(|| Status::not_found("The authenticated unit does not exist"))?; - - self.controller.get_units().mark_ready(&requesting_unit); - Ok(Response::new(())) - } - - async fn mark_not_ready(&self, request: Request<()>) -> Result, Status> { - let requesting_unit = request - .extensions() - .get::() - .expect("Failed to get unit from extensions. Is tonic broken?") - .unit - .upgrade() - .ok_or_else(|| Status::not_found("The authenticated unit does not exist"))?; - - self.controller.get_units().mark_not_ready(&requesting_unit); - Ok(Response::new(())) - } - - async fn mark_running(&self, request: Request<()>) -> Result, Status> { - let requesting_unit = request - .extensions() - .get::() - .expect("Failed to get unit from extensions. Is tonic broken?") - .unit - .upgrade() - .ok_or_else(|| Status::not_found("The authenticated unit does not exist"))?; - - self.controller.get_units().mark_running(&requesting_unit); - Ok(Response::new(())) - } - - async fn request_stop(&self, request: Request<()>) -> Result, Status> { - let requesting_unit = request - .extensions() - .get::() - .expect("Failed to get unit from extensions. Is tonic broken?") - .unit - .upgrade() - .ok_or_else(|| Status::not_found("The authenticated unit does not exist"))?; - - self.controller - .get_units() - .checked_unit_stop(&requesting_unit); - Ok(Response::new(())) - } - - async fn user_connected( - &self, - request: Request, - ) -> Result, Status> { - let requesting_unit = request - .extensions() - .get::() - .expect("Failed to get unit from extensions. Is tonic broken?") - .unit - .upgrade() - .ok_or_else(|| Status::not_found("The authenticated unit does not exist"))?; - - let user = request.into_inner(); - self.controller.get_users().handle_user_connected( - requesting_unit, - user.name, - Uuid::from_str(&user.uuid).map_err(|error| { - Status::invalid_argument(format!("Failed to parse UUID: {}", error)) - })?, - ); - Ok(Response::new(())) - } - - async fn user_disconnected( - &self, - request: Request, - ) -> Result, Status> { - let requesting_unit = request - .extensions() - .get::() - .expect("Failed to get unit from extensions. Is tonic broken?") - .unit - .upgrade() - .ok_or_else(|| Status::not_found("The authenticated unit does not exist"))?; - - let user = request.into_inner(); - self.controller.get_users().handle_user_disconnected( - requesting_unit, - Uuid::from_str(&user.uuid).map_err(|error| { - Status::invalid_argument(format!("Failed to parse UUID: {}", error)) - })?, - ); - Ok(Response::new(())) - } - - type SubscribeToTransfersStream = - StdReceiverStream>; - async fn subscribe_to_transfers( - &self, - request: Request<()>, - ) -> Result, Status> { - let requesting_unit = request - .extensions() - .get::() - .expect("Failed to get unit from extensions. Is tonic broken?") - .unit - .upgrade() - .ok_or_else(|| Status::not_found("The authenticated unit does not exist"))?; - - let (sender, receiver) = channel(); - self.controller - .get_event_bus() - .register_listener_under_unit( - EventKey::Transfer(requesting_unit.uuid), - Arc::downgrade(&requesting_unit), - Box::new(move |event: &UserTransferRequested| { - let transfer = &event.transfer; - if let Some((user, _, to)) = transfer.get_strong() { - let address = to.allocation.primary_address(); - - let transfer = proto::transfer_management::ResolvedTransferResponse { - user_uuid: user.uuid.to_string(), - host: address.host.clone(), - port: address.port as u32, - }; - sender - .send(Ok(transfer)) - .expect("Failed to send message to transfer stream"); - } - }), - ); - - Ok(Response::new(StdReceiverStream::new(receiver))) - } - - async fn transfer_users( - &self, - request: Request, - ) -> Result, Status> { - let requesting_unit = request - .extensions() - .get::() - .expect("Failed to get unit from extensions. Is tonic broken?") - .unit - .upgrade() - .ok_or_else(|| Status::not_found("The authenticated unit does not exist"))?; - - let transfer = request.into_inner(); - let target = transfer - .target - .ok_or_else(|| Status::invalid_argument("Target must be provided"))?; - - let target = - match proto::transfer_management::transfer_target_value::TargetType::try_from( - target.target_type, - ) { - Ok(proto::transfer_management::transfer_target_value::TargetType::Deployment) => { - TransferTarget::Deployment( - self.controller - .lock_deployments() - .find_by_name(&target.target.ok_or_else(|| { - Status::invalid_argument("Target must be provided") - })?) - .ok_or_else(|| Status::not_found("Deployment does not exist"))?, - ) - } - Ok(proto::transfer_management::transfer_target_value::TargetType::Unit) => { - TransferTarget::Unit( - self.controller - .get_units() - .get_unit( - Uuid::from_str(&target.target.ok_or_else(|| { - Status::invalid_argument("Target must be provided") - })?) - .map_err(|error| { - Status::invalid_argument(format!( - "Failed to parse target UUID: {}", - error - )) - })?, - ) - .ok_or_else(|| Status::not_found("Unit does not exist"))?, - ) - } - Ok(proto::transfer_management::transfer_target_value::TargetType::Fallback) => { - TransferTarget::Fallback - } - Err(error) => return Err(Status::invalid_argument(error.to_string())), - }; - - let mut count = 0; - for user_uuid in &transfer.user_uuids { - let user_uuid = Uuid::from_str(user_uuid).map_err(|error| { - Status::invalid_argument(format!("Failed to parse user UUID: {}", error)) - })?; - - let user = self - .controller - .get_users() - .get_user(user_uuid) - .ok_or_else(|| { - Status::not_found(format!( - "User {} is not connected to this controller", - user_uuid - )) - })?; - - // Check if the user is connected to the unit that requested the transfer - if let CurrentUnit::Connected(unit) = user.unit.read().unwrap().deref() { - if let Some(unit) = unit.upgrade() { - if !Arc::ptr_eq(&unit, &requesting_unit) { - return Err(Status::permission_denied(format!( - "User {} is not connected to the requesting unit", - user_uuid - ))); - } - } - } else { - return Err(Status::permission_denied(format!( - "User {} is not connected to the requesting unit", - user_uuid - ))); - } - - let transfer = self - .controller - .get_users() - .resolve_transfer(&user, &target) - .ok_or_else(|| Status::not_found("Failed to resolve transfer"))?; - - if self.controller.get_users().transfer_user(transfer) { - count += 1; - } - } - - Ok(Response::new(count)) - } - - async fn send_message_to_channel( - &self, - request: Request, - ) -> Result, Status> { - let _requesting_unit = request - .extensions() - .get::() - .expect("Failed to get unit from extensions. Is tonic broken?") - .unit - .upgrade() - .ok_or_else(|| Status::not_found("The authenticated unit does not exist"))?; - - let message = request.into_inner(); - let count = self.controller.get_event_bus().dispatch( - &EventKey::Channel(message.channel.clone()), - &ChannelMessageSended { message }, - ); - Ok(Response::new(count)) - } - - async fn unsubscribe_from_channel( - &self, - request: Request, - ) -> Result, Status> { - let requesting_unit = request - .extensions() - .get::() - .expect("Failed to get unit from extensions. Is tonic broken?") - .unit - .upgrade() - .ok_or_else(|| Status::not_found("The authenticated unit does not exist"))?; - - self.controller - .get_event_bus() - .unregister_listener(EventKey::Channel(request.into_inner()), &requesting_unit); - - Ok(Response::new(())) - } - - type SubscribeToChannelStream = - StdReceiverStream>; - async fn subscribe_to_channel( - &self, - request: Request, - ) -> Result, Status> { - let requesting_unit = request - .extensions() - .get::() - .expect("Failed to get unit from extensions. Is tonic broken?") - .unit - .upgrade() - .ok_or_else(|| Status::not_found("The authenticated unit does not exist"))?; - - let channel_name = &request.into_inner(); - - let (sender, receiver) = channel(); - self.controller - .get_event_bus() - .register_listener_under_unit( - EventKey::Channel(channel_name.clone()), - Arc::downgrade(&requesting_unit), - Box::new(move |event: &ChannelMessageSended| { - sender - .send(Ok(event.message.clone())) - .expect("Failed to send message to channel stream"); - }), - ); - - Ok(Response::new(StdReceiverStream::new(receiver))) - } - - async fn get_units( - &self, - _request: Request<()>, - ) -> Result, Status> { - let units = self - .controller - .get_units() - .get_units() - .values() - .map(|unit| proto::unit_information::SimpleUnitValue { - name: unit.name.clone(), - uuid: unit.uuid.to_string(), - deployment: unit - .deployment - .as_ref() - .and_then(|d| d.deployment.upgrade().map(|d| d.name.clone())), - }) - .collect(); - - Ok(Response::new(proto::unit_information::UnitListResponse { - units, - })) - } - - async fn get_deployments( - &self, - _request: Request<()>, - ) -> Result, Status> { - let handle = self.controller.lock_deployments(); - let mut deployments = Vec::with_capacity(handle.get_amount()); - for name in handle.get_deployments().keys() { - deployments.push(name.clone()); - } - - Ok(Response::new( - proto::deployment_information::DeploymentListResponse { deployments }, - )) - } - - async fn reset(&self, request: Request<()>) -> Result, Status> { - let requesting_unit = request - .extensions() - .get::() - .expect("Failed to get unit from extensions. Is tonic broken?") - .unit - .upgrade() - .ok_or_else(|| Status::not_found("The authenticated unit does not exist"))?; - - self.controller - .get_event_bus() - .cleanup_unit(&requesting_unit); - - Ok(Response::new(())) - } - - async fn get_protocol_version(&self, _request: Request<()>) -> Result, Status> { - Ok(Response::new(VERSION.protocol)) - } - - async fn get_controller_version( - &self, - _request: Request<()>, - ) -> Result, Status> { - Ok(Response::new(VERSION.to_string())) - } -} diff --git a/controller/src/resource.rs b/controller/src/resource.rs new file mode 100644 index 00000000..94cde7c1 --- /dev/null +++ b/controller/src/resource.rs @@ -0,0 +1,43 @@ +use tonic::Status; + +pub enum DeleteResourceError { + StillActive, + StillInUse, + NotFound, + Error(anyhow::Error), +} + +pub enum CreateResourceError { + RequiredNodeNotLoaded, + RequiredPluginNotLoaded, + AlreadyExists, + Error(anyhow::Error), +} + +impl From for Status { + fn from(val: DeleteResourceError) -> Self { + match val { + DeleteResourceError::StillActive => { + Status::unavailable("Resource is still set to active") + } + DeleteResourceError::StillInUse => Status::unavailable("Resource is still in use"), + DeleteResourceError::NotFound => Status::not_found("Resource not found"), + DeleteResourceError::Error(error) => Status::internal(format!("Error: {error}")), + } + } +} + +impl From for Status { + fn from(val: CreateResourceError) -> Self { + match val { + CreateResourceError::RequiredNodeNotLoaded => { + Status::failed_precondition("Required node is not loaded") + } + CreateResourceError::RequiredPluginNotLoaded => { + Status::failed_precondition("Required plugin is not loaded") + } + CreateResourceError::AlreadyExists => Status::already_exists("Resource already exists"), + CreateResourceError::Error(error) => Status::internal(format!("Error: {error}")), + } + } +} diff --git a/controller/src/storage.rs b/controller/src/storage.rs index 13523a49..d81b88ff 100644 --- a/controller/src/storage.rs +++ b/controller/src/storage.rs @@ -3,78 +3,169 @@ All the storage related functions are implemented here. This makes it easier to change them in the future */ -use std::path::PathBuf; +use std::path::{Path, PathBuf}; + +use anyhow::Result; +use serde::{de::DeserializeOwned, Serialize}; +use simplelog::warn; +use tokio::fs; /* Logs */ const LOGS_DIRECTORY: &str = "logs"; const LATEST_LOG_FILE: &str = "latest.log"; -/* Cloudlets */ -const CLOUDLETS_DIRECTORY: &str = "cloudlets"; +/* Nodes */ +const NODES_DIRECTORY: &str = "nodes"; -/* Deployments */ -const DEPLOYMENTS_DIRECTORY: &str = "deployments"; +/* Groups */ +const GROUPS_DIRECTORY: &str = "groups"; /* Auth */ -const AUTH_DIRECTORY: &str = "auth"; const USERS_DIRECTORY: &str = "users"; /* Configs */ const CONFIG_DIRECTORY: &str = "configs"; const PRIMARY_CONFIG_FILE: &str = "config.toml"; -/* Drivers */ -const DRIVERS_DIRECTORY: &str = "drivers"; +/* Wasm Configs */ +const WASM_PLUGINS_CONFIG_FILE: &str = "wasm-plugins.toml"; +const WASM_ENGINE_CONFIG_FILE: &str = "wasm-engine.toml"; + +/* Plugins */ +const PLUGINS_DIRECTORY: &str = "plugins"; const DATA_DIRECTORY: &str = "data"; pub struct Storage; impl Storage { /* Logs */ - pub fn get_latest_log_file() -> PathBuf { + pub fn latest_log_file() -> PathBuf { PathBuf::from(LOGS_DIRECTORY).join(LATEST_LOG_FILE) } - /* Cloudlets */ - pub fn get_cloudlets_folder() -> PathBuf { - PathBuf::from(CLOUDLETS_DIRECTORY) + /* Nodes */ + pub fn nodes_directory() -> PathBuf { + PathBuf::from(NODES_DIRECTORY) } - pub fn get_cloudlet_file(name: &str) -> PathBuf { - Storage::get_cloudlets_folder().join(format!("{}.toml", name)) + pub fn node_file(name: &str) -> PathBuf { + Storage::nodes_directory().join(format!("{name}.toml")) } - /* Deployments */ - pub fn get_deployments_folder() -> PathBuf { - PathBuf::from(DEPLOYMENTS_DIRECTORY) + /* Groups */ + pub fn groups_directory() -> PathBuf { + PathBuf::from(GROUPS_DIRECTORY) } - pub fn get_deployment_file(name: &str) -> PathBuf { - Storage::get_deployments_folder().join(format!("{}.toml", name)) + pub fn group_file(name: &str) -> PathBuf { + Storage::groups_directory().join(format!("{name}.toml")) } /* Auth */ - pub fn get_users_folder() -> PathBuf { - PathBuf::from(AUTH_DIRECTORY).join(USERS_DIRECTORY) + pub fn users_directory() -> PathBuf { + PathBuf::from(USERS_DIRECTORY) } - pub fn get_user_file(name: &str) -> PathBuf { - Storage::get_users_folder().join(format!("{}.toml", name)) + pub fn user_file(name: &str) -> PathBuf { + Storage::users_directory().join(format!("{name}.toml")) } /* Configs */ - pub fn get_configs_folder() -> PathBuf { + pub fn configs_directory() -> PathBuf { PathBuf::from(CONFIG_DIRECTORY) } - pub fn get_primary_config_file() -> PathBuf { - Storage::get_configs_folder().join(PRIMARY_CONFIG_FILE) + pub fn primary_config_file() -> PathBuf { + Storage::configs_directory().join(PRIMARY_CONFIG_FILE) + } + + /* Wasm Configs */ + pub fn wasm_plugins_config_file() -> PathBuf { + Storage::configs_directory().join(WASM_PLUGINS_CONFIG_FILE) + } + pub fn wasm_engine_config_file() -> PathBuf { + Storage::configs_directory().join(WASM_ENGINE_CONFIG_FILE) } - /* Drivers */ - pub fn get_drivers_folder() -> PathBuf { - PathBuf::from(DRIVERS_DIRECTORY) + /* Plugins */ + pub fn plugins_directory() -> PathBuf { + PathBuf::from(PLUGINS_DIRECTORY) } - pub fn get_data_folder_for_driver(name: &str) -> PathBuf { + pub fn data_directory_for_plugin(name: &str) -> PathBuf { PathBuf::from(DATA_DIRECTORY).join(name) } - pub fn get_config_folder_for_driver(name: &str) -> PathBuf { - Storage::get_configs_folder().join(name) + pub fn config_directory_for_plugin(name: &str) -> PathBuf { + Storage::configs_directory().join(name) + } + + pub async fn for_each_content(path: &Path) -> Result> { + let mut result = Vec::new(); + let mut directory = fs::read_dir(path).await?; + while let Some(entry) = directory.next_entry().await? { + if entry.path().is_dir() { + continue; + } + let path = entry.path(); + match (path.file_name(), path.file_stem()) { + (Some(name), Some(stem)) => result.push(( + path.clone(), + name.to_string_lossy().to_string(), + stem.to_string_lossy().to_string(), + )), + _ => { + warn!("Failed to read file names: {:?}", path); + } + } + } + Ok(result) + } + + pub async fn for_each_content_toml( + path: &Path, + error_message: &str, + ) -> Result> { + let mut result = Vec::new(); + let mut directory = fs::read_dir(path).await?; + while let Some(entry) = directory.next_entry().await? { + if entry.path().is_dir() { + continue; + } + match T::from_file(&entry.path()).await { + Ok(value) => { + let path = entry.path(); + match (path.file_name(), path.file_stem()) { + (Some(name), Some(stem)) => result.push(( + path.clone(), + name.to_string_lossy().to_string(), + stem.to_string_lossy().to_string(), + value, + )), + _ => { + warn!("Failed to read file names: {:?}", path); + } + } + } + Err(error) => { + warn!("{}@{:?}: {:?}", error_message, entry.path(), error); + } + } + } + Ok(result) + } +} + +pub trait SaveToTomlFile: Serialize { + async fn save(&self, path: &Path, create_parent: bool) -> Result<()> { + if create_parent { + if let Some(parent) = path.parent() { + fs::create_dir_all(parent).await?; + } + } + fs::write(path, toml::to_string(self)?).await?; + Ok(()) + } +} + +pub trait LoadFromTomlFile: DeserializeOwned { + async fn from_file(path: &Path) -> Result { + let data = fs::read_to_string(path).await?; + let config = toml::from_str(&data)?; + Ok(config) } } diff --git a/controller/src/task.rs b/controller/src/task.rs new file mode 100644 index 00000000..eea51a1e --- /dev/null +++ b/controller/src/task.rs @@ -0,0 +1,106 @@ +use std::any::{type_name, Any}; + +use anyhow::{anyhow, Result}; +use common::error::FancyError; +use simplelog::debug; +use tokio::sync::oneshot::{channel, Sender}; +use tonic::{async_trait, Request, Status}; + +use crate::application::{ + auth::{AuthType, Authorization}, + Controller, TaskSender, +}; + +pub type BoxedTask = Box; +pub type BoxedAny = Box; + +pub struct Task { + task: BoxedTask, + sender: Sender>, +} + +impl Task { + pub fn get_auth(auth: AuthType, request: &Request) -> Result { + match request.extensions().get::() { + Some(data) if data.is_type(auth) => Ok(data.clone()), + _ => Err(Status::permission_denied("Not linked")), + } + } + + pub async fn execute( + auth: AuthType, + queue: &TaskSender, + request: Request, + task: F, + ) -> Result + where + F: FnOnce(Request, Authorization) -> Result, + { + let data = Self::get_auth(auth, &request)?; + debug!("Executing task with a return type of: {}", type_name::()); + match Task::create::(queue, task(request, data)?).await { + Ok(value) => value, + Err(error) => { + FancyError::print_fancy(&error, false); + Err(Status::internal(error.to_string())) + } + } + } + + pub async fn create( + queue: &TaskSender, + task: BoxedTask, + ) -> Result> { + let (sender, receiver) = channel(); + queue + .send(Task { task, sender }) + .await + .map_err(|_| anyhow!("Failed to send task to task queue"))?; + let result = receiver.await??; + match result.downcast::() { + Ok(result) => Ok(Ok(*result)), + Err(result) => match result.downcast::() { + Ok(result) => Ok(Err(*result)), + Err(_) => Err(anyhow!( + "Failed to downcast task result to the expected type. Check task implementation" + )), + }, + } + } + + pub async fn run(mut self, controller: &mut Controller) -> Result<()> { + let task = self.task.run(controller).await; + self.sender + .send(task) + .map_err(|_| anyhow!("Failed to send task result to the task sender")) + } + + #[allow(clippy::unnecessary_wraps)] + pub fn new_ok(value: T) -> Result { + Ok(Box::new(value)) + } + + pub fn new_empty() -> Result { + Self::new_ok(()) + } + + #[allow(clippy::unnecessary_wraps)] + pub fn new_err(value: Status) -> Result { + Ok(Box::new(value)) + } + + pub fn new_permission_error(message: &str) -> Result { + Self::new_err(Status::permission_denied(message)) + } + + pub fn new_link_error() -> Result { + Self::new_err(Status::failed_precondition( + "Your token is not linked to the required resource for this action", + )) + } +} + +#[async_trait] +pub trait GenericTask { + async fn run(&mut self, controller: &mut Controller) -> Result; +} diff --git a/docker-compose.yml b/docker-compose.yml index e5eab4eb..0752acab 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -15,6 +15,6 @@ services: - ./run/logs:/app/logs - ./run/auth:/app/auth - ./run/configs:/app/configs - - ./run/cloudlets:/app/cloudlets - - ./run/deployments:/app/deployments - - ./run/drivers:/app/drivers \ No newline at end of file + - ./run/nodes:/app/nodes + - ./run/groups:/app/groups + - ./run/plugins:/app/plugins \ No newline at end of file diff --git a/docs/features/backend.md b/docs/features/backend.md index c450e9a7..7eb813fa 100644 --- a/docs/features/backend.md +++ b/docs/features/backend.md @@ -1,6 +1,6 @@ # Modular Backend -Atomic Cloud is built on a versatile driver system that abstracts and streamlines the process of initiating servers. This modular approach allows the platform to support various backends, ensuring flexibility and scalability. Currently, Atomic Cloud supports several backend types, including: +Atomic Cloud is built on a versatile plugin system that abstracts and streamlines the process of initiating servers. This modular approach allows the platform to support various backends, ensuring flexibility and scalability. Currently, Atomic Cloud supports several backend types, including: - **Pterodactyl** - **Docker** diff --git a/docs/index.md b/docs/index.md index fc0902bc..2e1f0eff 100644 --- a/docs/index.md +++ b/docs/index.md @@ -4,13 +4,13 @@ Welcome to the **Atomic Cloud** project! Please note that the cloud is currently ## Documentation Status 🚧 -This documentation is a work in progress. We apologize for any grammatical errors or incomplete sections. We welcome contributions from the community—if you’d like to help improve the documentation, please consider submitting a Pull Request. +This documentation is a work in progress. We apologize for any grammatical errors or incomplete sections. We welcome contributions from the commservery—if you’d like to help improve the documentation, please consider submitting a Pull Request. ## Installation Atomic Cloud comprises two main components: the **Controller** and the **CLI**. Below are the recommended installation options: -- **Docker Installation**: We recommend installing via a Docker image for ease of deployment. Follow the instructions in our [Docker Installation Guide](installation/docker.md). +- **Docker Installation**: We recommend installing via a Docker image for ease of group. Follow the instructions in our [Docker Installation Guide](installation/docker.md). - **Standard Installation**: If you prefer not to use Docker, refer to our [Standard Installation Guide](installation/normal.md). ### Controller diff --git a/docs/installation/docker.md b/docs/installation/docker.md index 2b61697e..e96bb04e 100644 --- a/docs/installation/docker.md +++ b/docs/installation/docker.md @@ -15,15 +15,15 @@ services: ports: - "12892:12892" environment: - - PTERODACTYL=true # Enable Pterodactyl driver installation - - LOCAL=true # Enable Local driver installation + - PTERODACTYL=true # Enable Pterodactyl plugin installation + - LOCAL=true # Enable Local plugin installation volumes: - ./logs:/app/logs - ./auth:/app/auth - ./configs:/app/configs - - ./cloudlets:/app/cloudlets - - ./deployments:/app/deployments - - ./drivers:/app/drivers + - ./nodes:/app/nodes + - ./groups:/app/groups + - ./plugins:/app/plugins ``` ## Step 2: Start the Container diff --git a/docs/usage/controller/index.md b/docs/usage/controller/index.md index e57bff9c..0ce766be 100644 --- a/docs/usage/controller/index.md +++ b/docs/usage/controller/index.md @@ -1,6 +1,6 @@ # What is the Controller? -The **Controller** is a vital component of the Atomic Cloud platform. It acts as the central management unit by: +The **Controller** is a vital component of the Atomic Cloud platform. It acts as the central management server by: - **Overseeing Nodes:** It monitors and manages the nodes in the cloud infrastructure. diff --git a/docs/usage/controller/plugins/index.md b/docs/usage/controller/plugins/index.md index 3c8b160c..3d011974 100644 --- a/docs/usage/controller/plugins/index.md +++ b/docs/usage/controller/plugins/index.md @@ -2,4 +2,4 @@ The Controller is built with extensibility at its core, supporting a wide array of plugin platforms. At present, the primary platform for developing plugins is **WebAssembly (WASM)**, leveraging the **WASI Preview 2** specification. This modern approach delivers a flexible and high-performance environment for plugin development. -If you prefer not to use a WASM plugin or have alternative ideas, you are welcome to implement your own plugin mechanism. We encourage community contributions—feel free to submit a pull request with your enhancements or alternative implementations. \ No newline at end of file +If you prefer not to use a WASM plugin or have alternative ideas, you are welcome to implement your own plugin mechanism. We encourage commservery contributions—feel free to submit a pull request with your enhancements or alternative implementations. \ No newline at end of file diff --git a/docs/usage/controller/templates/index.md b/docs/usage/controller/templates/index.md index b7a1048a..2983c602 100644 --- a/docs/usage/controller/templates/index.md +++ b/docs/usage/controller/templates/index.md @@ -19,7 +19,7 @@ Templates work by describing the desired state of your resources in a structured These define the actual components to be created. In a Minecraft server template, resources could include the server jar file, configuration files, mods, or plugins required to run the server. 3. **Outputs**: - These are values returned after the deployment is complete, such as resource IDs, connection URLs, or IP addresses that you might need to connect to your server. + These are values returned after the group is complete, such as resource IDs, connection URLs, or IP addresses that you might need to connect to your server. ### Example Template Structure @@ -79,7 +79,7 @@ startupScripts = [ ] [outputs] -# Output values after deployment +# Output values after group serverUrl = "http://{parameters.serverName}.atomiccloud.example.com:{parameters.serverPort}" ``` diff --git a/drivers/local/src/driver.rs b/drivers/local/src/driver.rs deleted file mode 100644 index 72947138..00000000 --- a/drivers/local/src/driver.rs +++ /dev/null @@ -1,183 +0,0 @@ -use std::{cell::UnsafeCell, fs, rc::Rc, sync::RwLock, time::Instant}; - -use cloudlet::LocalCloudlet; -use common::{allocator::NumberAllocator, tick::TickResult}; -use config::{Config, CLEANUP_TIMEOUT}; -use template::Templates; - -use crate::{ - cloudlet::driver::{ - file::remove_dir_all, - types::{ErrorMessage, ScopedErrors}, - }, - debug, error, - exports::cloudlet::driver::bridge::{ - Capabilities, GenericCloudlet, GuestGenericCloudlet, GuestGenericDriver, Information, - RemoteController, - }, - info, - storage::Storage, -}; - -pub mod cloudlet; -mod config; -mod template; - -// Include the build information generated by build.rs -include!(concat!(env!("OUT_DIR"), "/build_info.rs")); - -pub const AUTHORS: [&str; 1] = ["HttpRafa"]; - -pub struct Local { - /* Cloud Identification */ - cloud_identifier: String, - - /* Config */ - config: UnsafeCell>>, - - /* Shared Resources */ - port_allocator: UnsafeCell>>>>, - - /* Templates */ - templates: Rc>, - - /* Cloudlets that this driver handles */ - cloudlets: RwLock>>, -} - -impl Local { - fn get_config(&self) -> &Rc { - // Safe as we are only borrowing the reference immutably - unsafe { &*self.config.get() }.as_ref().unwrap() - } - fn get_port_allocator(&self) -> &Rc>> { - // Safe as we are only borrowing the reference immutably - unsafe { &*self.port_allocator.get() }.as_ref().unwrap() - } -} - -impl GuestGenericDriver for Local { - fn new(cloud_identifier: String) -> Self { - Self { - cloud_identifier, - config: UnsafeCell::new(None), - port_allocator: UnsafeCell::new(None), - templates: Rc::new(RwLock::new(Templates::new())), - cloudlets: RwLock::new(Vec::new()), - } - } - - fn init(&self) -> Information { - let mut ready = true; - - let tmp_dir = Storage::get_temporary_folder(); - debug!("Checking directories..."); - if tmp_dir.exists() { - if let Err(error) = remove_dir_all(&Storage::get_temporary_folder_host_converted()) { - error!( - "Failed to remove temporary directory: {}", - error - ); - ready = false; - } - } - debug!("Directories are ready"); - - // Load configuration - { - let config = Config::new_filled(); - let allocator = NumberAllocator::new(config.ports.clone()); - unsafe { - *self.config.get() = Some(Rc::new(config)); - *self.port_allocator.get() = Some(Rc::new(RwLock::new(allocator))); - } - } - - // Load all templates - { - let mut templates = self - .templates - .write() - .expect("Failed to get lock on templates"); - templates.load_all(); - templates.prepare_all(); - } - - Information { - authors: AUTHORS.iter().map(|&author| author.to_string()).collect(), - version: VERSION.to_string(), - ready, - } - } - - fn init_cloudlet( - &self, - name: String, - capabilities: Capabilities, - controller: RemoteController, - ) -> Result { - let wrapper = LocalCloudletWrapper::new( - self.cloud_identifier.clone(), - name.clone(), - None, - capabilities, - controller, - ); - unsafe { - *wrapper.inner.config.get() = Some(self.get_config().clone()); - *wrapper.inner.port_allocator.get() = Some(self.get_port_allocator().clone()); - *wrapper.inner.templates.get() = Some(self.templates.clone()); - } - // Add cloudlet to cloudlets list - let mut cloudlets = self - .cloudlets - .write() - .expect("Failed to get lock on cloudlets"); - cloudlets.push(wrapper.inner.clone()); - info!("Cloudlet {} was added", name); - Ok(GenericCloudlet::new(wrapper)) - } - - fn cleanup(&self) -> Result<(), ScopedErrors> { - let cloudlets = self - .cloudlets - .read() - .expect("Failed to get lock on cloudlets"); - info!("Starting cleanup process..."); - let start_time = Instant::now(); - let mut last_attempt = false; - - while !last_attempt { - if start_time.elapsed() > CLEANUP_TIMEOUT { - last_attempt = true; - } - - let all_stopped = cloudlets.iter().try_fold(true, |all_stopped, cloudlet| { - match cloudlet.try_exit(last_attempt) { - Ok(TickResult::Ok) => Ok(false), - Ok(_) => Ok(all_stopped), - Err(error) => Err(error), - } - })?; - - if all_stopped { - break; - } - } - - info!("All units should be stopped now. Removing temporary files..."); - if let Err(error) = fs::remove_dir_all(Storage::get_temporary_folder()) { - error!("Failed to remove temporary directory: {}", error); - } - info!("Driver cleanup finished"); - Ok(()) - } - - fn tick(&self) -> Result<(), ScopedErrors> { - Ok(()) - } -} - -pub struct LocalCloudletWrapper { - pub inner: Rc, -} diff --git a/drivers/local/src/driver/cloudlet.rs b/drivers/local/src/driver/cloudlet.rs deleted file mode 100644 index 4e3b0db3..00000000 --- a/drivers/local/src/driver/cloudlet.rs +++ /dev/null @@ -1,281 +0,0 @@ -use std::{ - cell::UnsafeCell, - rc::Rc, - sync::{RwLock, RwLockReadGuard, RwLockWriteGuard}, -}; - -use anyhow::Result; -use common::{allocator::NumberAllocator, name::TimedName, tick::TickResult}; -use unit::LocalUnit; - -use crate::{ - cloudlet::driver::types::{ErrorMessage, ScopedError, ScopedErrors}, - error, - exports::cloudlet::driver::bridge::{ - Address, Capabilities, GuestGenericCloudlet, RemoteController, Retention, Unit, - UnitProposal, - }, - info, - storage::Storage, -}; - -use super::{config::Config, template::Templates, LocalCloudletWrapper}; - -pub mod unit; - -impl LocalCloudlet { - pub fn tick(&self) -> Result<(), ScopedErrors> { - let mut units = self.get_units_mut(); - let mut errors = ScopedErrors::new(); - units.retain_mut(|unit| match unit.tick() { - Ok(result) => result == TickResult::Ok, - Err(err) => { - errors.push(ScopedError { - scope: unit.name.get_raw_name().to_string(), - message: err.to_string(), - }); - true - } - }); - if errors.is_empty() { - Ok(()) - } else { - Err(errors) - } - } -} - -impl GuestGenericCloudlet for LocalCloudletWrapper { - fn new( - _cloud_identifier: String, - _name: String, - _id: Option, - _capabilities: Capabilities, - controller: RemoteController, - ) -> Self { - Self { - inner: Rc::new(LocalCloudlet { - _name, - config: UnsafeCell::new(None), - controller, - templates: UnsafeCell::new(None), - port_allocator: UnsafeCell::new(None), - units: RwLock::new(vec![]), - }), - } - } - - fn tick(&self) -> Result<(), ScopedErrors> { - self.inner.tick() - } - - fn allocate_addresses(&self, unit: UnitProposal) -> Result, ErrorMessage> { - let amount = unit.resources.addresses; - - let mut ports = Vec::with_capacity(amount as usize); - let mut allocator = self - .inner - .get_port_allocator() - .write() - .expect("Failed to lock port allocator"); - for _ in 0..amount { - if let Some(port) = allocator.allocate() { - ports.push(Address { - host: self.inner.get_config().address.clone(), - port, - }); - } else { - return Err("Failed to allocate ports".to_string()); - } - } - - Ok(ports) - } - - fn deallocate_addresses(&self, addresses: Vec
) { - let mut allocator = self - .inner - .get_port_allocator() - .write() - .expect("Failed to lock port allocator"); - for address in addresses { - allocator.release(address.port); - } - } - - fn start_unit(&self, unit: Unit) { - let spec = &unit.allocation.spec; - let name = - TimedName::new_no_identifier(&unit.name, spec.disk_retention == Retention::Permanent); - - let template = match self - .inner - .get_templates() - .read() - .expect("Failed to lock templates") - .get_template_by_name(&spec.image) - { - Some(template) => template, - None => { - error!( - "Template {} not found for unit {}", - &spec.image, - name.get_name() - ); - return; - } - }; - - let folder = Storage::get_unit_folder(&name, &spec.disk_retention); - if !folder.exists() { - if let Err(err) = template.copy_to_folder(&folder) { - error!( - "Failed to copy template for unit {}: {}", - name.get_name(), - err - ); - return; - } - } - - let mut local_unit = LocalUnit::new(self, unit, &name, template); - if let Err(err) = local_unit.start() { - error!( - "Failed to start unit {}: {}", - name.get_raw_name(), - err - ); - return; - } - - info!( - "Successfully created child process for unit {}", - name.get_raw_name() - ); - self.inner.get_units_mut().push(local_unit); - } - - fn restart_unit(&self, unit: Unit) { - let mut units = self.inner.get_units_mut(); - if let Some(local_unit) = units - .iter_mut() - .find(|u| u.name.get_raw_name() == unit.name) - { - if let Err(err) = local_unit.restart() { - error!( - "Failed to restart unit {}: {}", - unit.name, err - ); - return; - } - info!( - "Child process of unit {} is restarting", - unit.name - ); - } else { - error!("Failed to restart unit {}: Unit was never started by this driver", unit.name); - } - } - - fn stop_unit(&self, unit: Unit) { - let mut units = self.inner.get_units_mut(); - if let Some(local_unit) = units - .iter_mut() - .find(|u| u.name.get_raw_name() == unit.name) - { - if unit.allocation.spec.disk_retention == Retention::Temporary { - if let Err(err) = local_unit.kill() { - error!( - "Failed to stop unit {}: {}", - unit.name, err - ); - return; - } - info!( - "Child process of unit {} was killed", - unit.name - ); - } else { - if let Err(err) = local_unit.stop() { - error!( - "Failed to stop unit {}: {}", - unit.name, err - ); - return; - } - info!( - "Child process of unit {} is stopping", - unit.name - ); - } - } else { - error!("Failed to stop unit {}: Unit was never started by this driver", unit.name); - } - } -} - -pub struct LocalCloudlet { - /* Informations about the cloudlet */ - _name: String, - pub config: UnsafeCell>>, - controller: RemoteController, - - /* Templates */ - pub templates: UnsafeCell>>>, - - /* Dynamic Resources */ - pub port_allocator: UnsafeCell>>>>, - units: RwLock>, -} - -impl LocalCloudlet { - /* Dispose */ - pub fn try_exit(&self, force: bool) -> Result { - if force { - let mut units = self.get_units_mut(); - let mut errors = ScopedErrors::new(); - for unit in units.iter_mut() { - if let Err(error) = unit.kill() { - errors.push(ScopedError { - scope: unit.name.get_raw_name().to_string(), - message: error.to_string(), - }); - } - } - if !errors.is_empty() { - return Err(errors); - } - } - match self.tick() { - Ok(()) => { - if self.get_units().is_empty() { - Ok(TickResult::Drop) - } else { - Ok(TickResult::Ok) - } - } - Err(errors) => Err(errors), - } - } - - fn get_config(&self) -> &Rc { - // Safe as we are only borrowing the reference immutably - unsafe { &*self.config.get() }.as_ref().unwrap() - } - fn get_templates(&self) -> &Rc> { - // Safe as we are only borrowing the reference immutably - unsafe { &*self.templates.get() }.as_ref().unwrap() - } - fn get_port_allocator(&self) -> &Rc>> { - // Safe as we are only borrowing the reference immutably - unsafe { &*self.port_allocator.get() }.as_ref().unwrap() - } - fn get_units(&self) -> RwLockReadGuard> { - // Safe as we are only run on the same thread - self.units.read().unwrap() - } - fn get_units_mut(&self) -> RwLockWriteGuard> { - // Safe as we are only run on the same thread - self.units.write().unwrap() - } -} diff --git a/drivers/local/src/driver/cloudlet/unit.rs b/drivers/local/src/driver/cloudlet/unit.rs deleted file mode 100644 index c53895f8..00000000 --- a/drivers/local/src/driver/cloudlet/unit.rs +++ /dev/null @@ -1,180 +0,0 @@ -use std::{path::PathBuf, rc::Rc, time::Instant}; - -use anyhow::{anyhow, Result}; -use common::{name::TimedName, tick::TickResult}; - -use crate::{ - cloudlet::driver::{ - file::remove_dir_all, - process::{drop_process, kill_process, read_line_async, try_wait, StdReader}, - types::{Directory, KeyValue}, - }, - driver::{config::UNIT_STOP_TIMEOUT, template::Template, LocalCloudletWrapper}, - exports::cloudlet::driver::bridge::{Retention, Unit}, - info, - storage::Storage, - warn, -}; - -/* Variables */ -const CONTROLLER_ADDRESS: &str = "CONTROLLER_ADDRESS"; -const UNIT_TOKEN: &str = "UNIT_TOKEN"; -const UNIT_PORT: &str = "UNIT_PORT"; - -#[derive(PartialEq)] -pub enum UnitState { - Running, - Restarting, - Stopping, - Stopped, -} - -pub struct LocalUnit { - pub unit: Unit, - pub state: UnitState, - pub changed: Instant, - pub pid: Option, - pub name: TimedName, - pub _internal_folder: PathBuf, - pub host_folder: Directory, - pub template: Rc