diff --git a/docs/libraries/workflow/GLOSSARY.md b/docs/libraries/workflow/GLOSSARY.md index c03544218..4cac9abbb 100644 --- a/docs/libraries/workflow/GLOSSARY.md +++ b/docs/libraries/workflow/GLOSSARY.md @@ -15,8 +15,9 @@ A collection of registered workflows. This is solely used for the worker to fetc A series of fallible executions of code (also known as activities), signal listeners, signal transmitters, or sub workflow triggers. -Workflows can be though of as a list of tasks. The code defining a workflow only specifies what items should -be ran; There is no complex logic (e.g. database queries) running within the top level of the workflow. +Workflows can be though of as an outline or a list of tasks. The code defining a workflow only specifies what +items should be ran; There is no complex logic (e.g. database queries) running within the top level of the +workflow. Upon an activity failure, workflow code can be reran without duplicate side effects because activities are cached and re-read after they succeed. @@ -51,6 +52,10 @@ this signal for it to be picked up, otherwise it will stay in the database indef workflow. Signals do not have a response; another signal must be sent back from the workflow and listened to by the sender. +### Differences between message + +Signals are effectively just messages that can only be consumed by workflows. + ## Tagged Signal Same as a signal except it is sent with a JSON blob as its "tags" instead of to a specific workflow. Any @@ -65,6 +70,28 @@ See [the signals document](./SIGNALS.md). A "one of" for signal listening. Allows for listening to multiple signals at once and receiving the first one that gets sent. +## Message + +A payload that can be sent out of a workflow. Includes a JSON blob for tags which can be subscribed to with a +subscription. + +### Differences between signal + +Messages are effectively just signals that can be only consumed by non workflows. + +## Subscription + +An entity that waits for messages with the same (not a superset/subset) tags as itself. Upon receiving a +message, the message will be returned and the developer can choose to continue to listen for more messages. + +## Tail + +Reads the last message without waiting. If none exists (all previous messages expired), `None` is returned. + +## Tail w/ Anchor + +Reads the earliest message after the given anchor timestamp or waits for one to be published if none exist. + ## Workflow Event An action that gets executed in a workflow. An event can be a: diff --git a/docs/libraries/workflow/SIGNALS.md b/docs/libraries/workflow/SIGNALS.md deleted file mode 100644 index cc71587d3..000000000 --- a/docs/libraries/workflow/SIGNALS.md +++ /dev/null @@ -1,7 +0,0 @@ -# Signals - -## Tagged signals - -Tagged signals are consumed on a first-come-first-serve basis because a single signal being consumed by more -than one workflow is not a supported design pattern. To work around this, consume the signal by a workflow -then publish multiple signals from that workflow. diff --git a/docs/libraries/workflow/SIGNALS_AND_MESSAGES.md b/docs/libraries/workflow/SIGNALS_AND_MESSAGES.md new file mode 100644 index 000000000..e0ad13f3b --- /dev/null +++ b/docs/libraries/workflow/SIGNALS_AND_MESSAGES.md @@ -0,0 +1,26 @@ +# Signals + +## Tagged signals + +Tagged signals are consumed on a first-come-first-serve basis because a single signal being consumed by more +than one workflow is not a supported design pattern. To work around this, consume the signal by a workflow +then publish multiple signals from that workflow. + +# Choosing Between Signals and Messages + +> **Note**: non-workflow ecosystem is API layer, standalone, operations, old workers + +## Signal + +- Sending data from the non-workflow ecosystem to the workflow ecosystem +- Sending data from the workflow ecosystem to somewhere else in the workflow ecosystem + +## Message + +- Sending data from the workflow ecosystem to the non-workflow ecosystem + +## Both Signals and Messages + +Sometimes you may need to listen for a particular event in the workflow system and the non-workflow ecosystem. +In this case you can publish both a signal and a message (you can derive `signal` and `message` on the same +struct to make this easier). diff --git a/lib/bolt/core/src/tasks/gen.rs b/lib/bolt/core/src/tasks/gen.rs index 9511dba8d..eac2916df 100644 --- a/lib/bolt/core/src/tasks/gen.rs +++ b/lib/bolt/core/src/tasks/gen.rs @@ -162,12 +162,6 @@ async fn generate_root(path: &Path) { } } } - - // Utils lib - let util_path = pkg.path().join("util"); - if fs::metadata(&util_path).await.is_ok() { - set_license(&util_path.join("Cargo.toml")).await; - } } } diff --git a/lib/chirp-workflow/core/src/ctx/operation.rs b/lib/chirp-workflow/core/src/ctx/operation.rs index 4094d77a2..ae960e082 100644 --- a/lib/chirp-workflow/core/src/ctx/operation.rs +++ b/lib/chirp-workflow/core/src/ctx/operation.rs @@ -4,6 +4,7 @@ use uuid::Uuid; use crate::{DatabaseHandle, Operation, OperationInput, WorkflowError}; +#[derive(Clone)] pub struct OperationCtx { ray_id: Uuid, name: &'static str, diff --git a/lib/chirp-workflow/core/src/ctx/workflow.rs b/lib/chirp-workflow/core/src/ctx/workflow.rs index a6dbd78f8..ea0eef801 100644 --- a/lib/chirp-workflow/core/src/ctx/workflow.rs +++ b/lib/chirp-workflow/core/src/ctx/workflow.rs @@ -9,7 +9,8 @@ use crate::{ activity::ActivityId, event::Event, util::{self, Location}, - Activity, ActivityCtx, ActivityInput, DatabaseHandle, Executable, Listen, PulledWorkflow, + executable::{closure, Executable, AsyncResult}, + Activity, ActivityCtx, ActivityInput, DatabaseHandle, Listen, PulledWorkflow, RegistryHandle, Signal, SignalRow, Workflow, WorkflowError, WorkflowInput, WorkflowResult, }; @@ -28,7 +29,8 @@ const DB_ACTION_RETRY: Duration = Duration::from_millis(150); // Most db action retries const MAX_DB_ACTION_RETRIES: usize = 5; -// TODO: Use generics to store input instead of a string +// TODO: Use generics to store input instead of a json value +// NOTE: Clonable because of inner arcs #[derive(Clone)] pub struct WorkflowCtx { pub workflow_id: Uuid, @@ -663,6 +665,17 @@ impl WorkflowCtx { exec.execute(self).await } + /// Spawns a new thread to execute workflow steps in. + pub fn spawn(&mut self, f: F) -> tokio::task::JoinHandle> + where + F: for<'a> FnOnce(&'a mut WorkflowCtx) -> AsyncResult<'a, T> + Send + 'static + { + let mut ctx = self.clone(); + tokio::task::spawn(async move { + closure(f).execute(&mut ctx).await + }) + } + /// Sends a signal. pub async fn signal( &mut self, diff --git a/lib/chirp-workflow/core/src/executable.rs b/lib/chirp-workflow/core/src/executable.rs index e56e68785..dde5e56a1 100644 --- a/lib/chirp-workflow/core/src/executable.rs +++ b/lib/chirp-workflow/core/src/executable.rs @@ -14,9 +14,9 @@ pub trait Executable: Send { async fn execute(self, ctx: &mut WorkflowCtx) -> GlobalResult; } -type AsyncResult<'a, T> = Pin> + Send + 'a>>; +pub type AsyncResult<'a, T> = Pin> + Send + 'a>>; -// Closure executuable impl +// Closure executable impl #[async_trait] impl Executable for F where @@ -76,7 +76,7 @@ struct TupleHelper { // Must wrap all closured being used as executables in this function due to // https://github.com/rust-lang/rust/issues/70263 -pub fn closure(f: F) -> F +pub fn closure(f: F) -> F where F: for<'a> FnOnce(&'a mut WorkflowCtx) -> AsyncResult<'a, T> + Send, { diff --git a/lib/chirp-workflow/macros/src/lib.rs b/lib/chirp-workflow/macros/src/lib.rs index 51500abe5..7239399ec 100644 --- a/lib/chirp-workflow/macros/src/lib.rs +++ b/lib/chirp-workflow/macros/src/lib.rs @@ -284,8 +284,16 @@ pub fn signal(attr: TokenStream, item: TokenStream) -> TokenStream { let struct_ident = &item_struct.ident; + // If also a message, don't derive serde traits + let also_message = item_struct.attrs.iter().filter_map(|attr| attr.path().segments.last()).any(|seg| seg.ident == "message"); + let serde_derive = if also_message { + quote! {} + } else { + quote!{ #[derive(serde::Serialize, serde::Deserialize)] } + }; + let expanded = quote! { - #[derive(serde::Serialize, serde::Deserialize)] + #serde_derive #item_struct impl Signal for #struct_ident { @@ -295,7 +303,7 @@ pub fn signal(attr: TokenStream, item: TokenStream) -> TokenStream { #[async_trait::async_trait] impl Listen for #struct_ident { async fn listen(ctx: &mut chirp_workflow::prelude::WorkflowCtx) -> chirp_workflow::prelude::WorkflowResult { - let row = ctx.listen_any(&[Self::NAME]).await?; + let row = ctx.listen_any(&[::NAME]).await?; Self::parse(&row.signal_name, row.body) } @@ -313,6 +321,14 @@ pub fn message(attr: TokenStream, item: TokenStream) -> TokenStream { let name = parse_macro_input!(attr as LitStr); let item_struct = parse_macro_input!(item as ItemStruct); + // If also a signal, don't derive serde traits + let also_signal = item_struct.attrs.iter().filter_map(|attr| attr.path().segments.last()).any(|seg| seg.ident == "signal"); + let serde_derive = if also_signal { + quote! {} + } else { + quote!{ #[derive(serde::Serialize, serde::Deserialize)] } + }; + let config = match parse_msg_config(&item_struct.attrs) { Ok(x) => x, Err(err) => return err.into_compile_error().into(), @@ -322,25 +338,14 @@ pub fn message(attr: TokenStream, item: TokenStream) -> TokenStream { let tail_ttl = config.tail_ttl; let expanded = quote! { - #[derive(Debug, serde::Serialize, serde::Deserialize)] + #serde_derive + #[derive(Debug)] #item_struct impl Message for #struct_ident { const NAME: &'static str = #name; const TAIL_TTL: std::time::Duration = std::time::Duration::from_secs(#tail_ttl); } - - #[async_trait::async_trait] - impl Listen for #struct_ident { - async fn listen(ctx: &mut chirp_workflow::prelude::WorkflowCtx) -> chirp_workflow::prelude::WorkflowResult { - let row = ctx.listen_any(&[Self::NAME]).await?; - Self::parse(&row.signal_name, row.body) - } - - fn parse(_name: &str, body: serde_json::Value) -> chirp_workflow::prelude::WorkflowResult { - serde_json::from_value(body).map_err(WorkflowError::DeserializeActivityOutput) - } - } }; TokenStream::from(expanded) diff --git a/proto/backend/cluster.proto b/proto/backend/cluster.proto index 8c901f555..01b73df6d 100644 --- a/proto/backend/cluster.proto +++ b/proto/backend/cluster.proto @@ -94,3 +94,8 @@ message ServerFilter { bool filter_public_ips = 9; repeated string public_ips = 10; } + +// Helper proto for writing to sql +message Pools { + repeated rivet.backend.cluster.Pool pools = 1; +} diff --git a/svc/Cargo.lock b/svc/Cargo.lock index 275cac882..74f6e325a 100644 --- a/svc/Cargo.lock +++ b/svc/Cargo.lock @@ -103,14 +103,7 @@ dependencies = [ "chirp-client", "chirp-workflow", "chrono", - "cluster-datacenter-get", - "cluster-datacenter-list", - "cluster-datacenter-resolve-for-name-id", - "cluster-get", - "cluster-list", - "cluster-server-destroy-with-filter", - "cluster-server-get", - "cluster-server-list", + "cluster", "http 0.2.12", "hyper", "lazy_static", @@ -125,7 +118,6 @@ dependencies = [ "rivet-matchmaker", "rivet-operation", "rivet-pools", - "rivet-util-cluster", "rivet-util-mm", "s3-util", "serde", @@ -253,7 +245,7 @@ dependencies = [ "cloud-namespace-token-public-create", "cloud-version-get", "cloud-version-publish", - "cluster-datacenter-list", + "cluster", "custom-user-avatar-list-for-game", "custom-user-avatar-upload-complete", "faker-region", @@ -307,7 +299,6 @@ dependencies = [ "rivet-health-checks", "rivet-operation", "rivet-pools", - "rivet-util-cluster", "rivet-util-job", "rivet-util-mm", "rivet-util-nsfw", @@ -777,10 +768,7 @@ dependencies = [ "async-trait", "chirp-client", "chrono", - "cluster-datacenter-get", - "cluster-datacenter-tls-get", - "cluster-server-get", - "cluster-server-resolve-for-ip", + "cluster", "http 0.2.12", "hyper", "lazy_static", @@ -791,7 +779,6 @@ dependencies = [ "rivet-health-checks", "rivet-operation", "rivet-pools", - "rivet-util-cluster", "serde", "serde_json", "thiserror", @@ -859,7 +846,7 @@ dependencies = [ "cdn-namespace-domain-create", "chirp-client", "chrono", - "cluster-server-list", + "cluster", "faker-cdn-site", "faker-game", "faker-game-namespace", @@ -2400,58 +2387,25 @@ dependencies = [ ] [[package]] -name = "cluster-datacenter-get" +name = "cluster" version = "0.0.1" dependencies = [ - "chirp-client", - "chirp-worker", - "prost 0.10.4", - "rivet-operation", - "sqlx", -] - -[[package]] -name = "cluster-datacenter-list" -version = "0.0.1" -dependencies = [ - "chirp-client", - "chirp-worker", - "prost 0.10.4", - "rivet-operation", - "sqlx", -] - -[[package]] -name = "cluster-datacenter-location-get" -version = "0.0.1" -dependencies = [ - "chirp-client", - "chirp-worker", + "anyhow", + "chirp-workflow", + "hex", "ip-info", + "lazy_static", + "linode", + "merkle_hash", + "nomad-util", + "nomad_client", + "rivet-metrics", "rivet-operation", + "rivet-runtime", + "serde", "sqlx", -] - -[[package]] -name = "cluster-datacenter-resolve-for-name-id" -version = "0.0.1" -dependencies = [ - "chirp-client", - "chirp-worker", - "prost 0.10.4", - "rivet-operation", - "sqlx", -] - -[[package]] -name = "cluster-datacenter-tls-get" -version = "0.0.1" -dependencies = [ - "chirp-client", - "chirp-worker", - "prost 0.10.4", - "rivet-operation", - "sqlx", + "thiserror", + "tokio", ] [[package]] @@ -2460,7 +2414,7 @@ version = "0.0.1" dependencies = [ "chirp-client", "chirp-worker", - "cluster-datacenter-get", + "cluster", "rivet-connection", "rivet-health-checks", "rivet-metrics", @@ -2472,35 +2426,18 @@ dependencies = [ "tracing-subscriber", ] -[[package]] -name = "cluster-datacenter-topology-get" -version = "0.0.1" -dependencies = [ - "chirp-client", - "chirp-worker", - "lazy_static", - "nomad-util", - "nomad_client", - "prost 0.10.4", - "rivet-operation", - "sqlx", -] - [[package]] name = "cluster-default-update" version = "0.0.1" dependencies = [ "chirp-client", "chirp-worker", - "cluster-datacenter-get", - "cluster-datacenter-list", - "cluster-get", + "cluster", "prost 0.10.4", "reqwest", "rivet-connection", "rivet-operation", "rivet-pools", - "rivet-util-cluster", "serde", "serde_json", "tokio", @@ -2519,16 +2456,12 @@ dependencies = [ "chirp-worker", "chrono", "cloudflare", - "cluster-datacenter-get", - "cluster-datacenter-list", - "cluster-datacenter-topology-get", + "cluster", "http 0.2.12", "include_dir", "indoc 1.0.9", "lazy_static", - "linode-instance-type-get", - "linode-server-destroy", - "linode-server-provision", + "linode", "maplit", "nomad-util", "openssl", @@ -2538,7 +2471,6 @@ dependencies = [ "rivet-metrics", "rivet-operation", "rivet-runtime", - "rivet-util-cluster", "s3-util", "serde_yaml", "ssh2", @@ -2556,165 +2488,36 @@ version = "0.0.1" dependencies = [ "chirp-client", "chirp-worker", - "cluster-datacenter-get", + "cluster", "rivet-connection", "rivet-health-checks", "rivet-metrics", "rivet-operation", "rivet-runtime", - "rivet-util-cluster", "sqlx", "tokio", "tracing", "tracing-subscriber", ] -[[package]] -name = "cluster-get" -version = "0.0.1" -dependencies = [ - "chirp-client", - "chirp-worker", - "prost 0.10.4", - "rivet-operation", - "sqlx", -] - -[[package]] -name = "cluster-get-for-game" -version = "0.0.1" -dependencies = [ - "chirp-client", - "chirp-worker", - "prost 0.10.4", - "rivet-operation", - "rivet-util-cluster", - "sqlx", -] - -[[package]] -name = "cluster-list" -version = "0.0.1" -dependencies = [ - "chirp-client", - "chirp-worker", - "prost 0.10.4", - "rivet-operation", - "sqlx", -] - [[package]] name = "cluster-metrics-publish" version = "0.0.1" dependencies = [ "chirp-client", "chirp-worker", - "cluster-datacenter-get", + "cluster", "rivet-connection", "rivet-health-checks", "rivet-metrics", "rivet-operation", "rivet-runtime", - "rivet-util-cluster", "sqlx", "tokio", "tracing", "tracing-subscriber", ] -[[package]] -name = "cluster-resolve-for-name-id" -version = "0.0.1" -dependencies = [ - "chirp-client", - "chirp-worker", - "prost 0.10.4", - "rivet-operation", - "sqlx", -] - -[[package]] -name = "cluster-server-destroy-with-filter" -version = "0.0.1" -dependencies = [ - "chirp-client", - "chirp-worker", - "cluster-server-list", - "rivet-operation", - "sqlx", -] - -[[package]] -name = "cluster-server-get" -version = "0.0.1" -dependencies = [ - "chirp-client", - "chirp-worker", - "prost 0.10.4", - "rivet-operation", - "sqlx", -] - -[[package]] -name = "cluster-server-list" -version = "0.0.1" -dependencies = [ - "chirp-client", - "chirp-worker", - "prost 0.10.4", - "rivet-operation", - "sqlx", -] - -[[package]] -name = "cluster-server-resolve-for-ip" -version = "0.0.1" -dependencies = [ - "chirp-client", - "chirp-worker", - "prost 0.10.4", - "rivet-operation", - "sqlx", -] - -[[package]] -name = "cluster-worker" -version = "0.0.1" -dependencies = [ - "acme-lib", - "anyhow", - "chirp-client", - "chirp-worker", - "chrono", - "cloudflare", - "cluster-datacenter-get", - "cluster-datacenter-list", - "cluster-datacenter-topology-get", - "http 0.2.12", - "include_dir", - "indoc 1.0.9", - "lazy_static", - "linode-instance-type-get", - "linode-server-destroy", - "linode-server-provision", - "maplit", - "nomad-util", - "nomad_client", - "openssl", - "rivet-convert", - "rivet-health-checks", - "rivet-metrics", - "rivet-runtime", - "rivet-util-cluster", - "s3-util", - "serde_yaml", - "sqlx", - "ssh2", - "thiserror", - "token-create", - "trust-dns-resolver", -] - [[package]] name = "combine" version = "4.6.6" @@ -5102,84 +4905,40 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" [[package]] -name = "linode-gc" +name = "linode" version = "0.0.1" dependencies = [ - "chirp-client", - "chirp-worker", + "chirp-workflow", "chrono", + "cluster", + "rand", "reqwest", - "rivet-connection", - "rivet-health-checks", - "rivet-metrics", - "rivet-operation", - "rivet-runtime", - "rivet-util-cluster", - "rivet-util-linode", "serde", "serde_json", "sqlx", - "tokio", - "tracing", - "tracing-subscriber", -] - -[[package]] -name = "linode-instance-type-get" -version = "0.0.1" -dependencies = [ - "chirp-client", - "chirp-worker", - "rivet-operation", - "rivet-util-cluster", - "rivet-util-linode", - "sqlx", -] - -[[package]] -name = "linode-server-destroy" -version = "0.0.1" -dependencies = [ - "chirp-client", - "chirp-worker", - "cluster-datacenter-get", - "linode-server-provision", - "reqwest", - "rivet-operation", - "rivet-util-cluster", - "rivet-util-linode", - "sqlx", + "ssh-key", ] [[package]] -name = "linode-server-provision" +name = "linode-gc" version = "0.0.1" dependencies = [ "chirp-client", "chirp-worker", - "cluster-datacenter-get", - "linode-server-destroy", + "chrono", + "linode", "reqwest", - "rivet-operation", - "rivet-util-cluster", - "rivet-util-linode", - "sqlx", -] - -[[package]] -name = "linode-worker" -version = "0.0.1" -dependencies = [ - "chirp-client", - "chirp-worker", - "cluster-datacenter-get", - "rivet-convert", + "rivet-connection", "rivet-health-checks", "rivet-metrics", + "rivet-operation", "rivet-runtime", - "rivet-util-cluster", - "rivet-util-linode", + "serde", + "serde_json", "sqlx", + "tokio", + "tracing", + "tracing-subscriber", ] [[package]] @@ -5915,13 +5674,13 @@ dependencies = [ "cf-custom-hostname-worker", "chirp-client", "cloud-worker", - "cluster-worker", + "cluster", "external-worker", "game-user-worker", "job-log-worker", "job-run-worker", "kv-worker", - "linode-worker", + "linode", "mm-worker", "rivet-connection", "rivet-health-checks", @@ -6938,8 +6697,7 @@ version = "0.0.1" dependencies = [ "chirp-client", "chirp-worker", - "cluster-datacenter-get", - "cluster-datacenter-location-get", + "cluster", "faker-region", "prost 0.10.4", "rivet-operation", @@ -6952,11 +6710,10 @@ version = "0.0.1" dependencies = [ "chirp-client", "chirp-worker", - "cluster-datacenter-list", + "cluster", "faker-region", "prost 0.10.4", "rivet-operation", - "rivet-util-cluster", "sqlx", ] @@ -6966,8 +6723,7 @@ version = "0.0.1" dependencies = [ "chirp-client", "chirp-worker", - "cluster-datacenter-list", - "cluster-get-for-game", + "cluster", "faker-region", "prost 0.10.4", "rivet-operation", @@ -6995,7 +6751,7 @@ version = "0.0.1" dependencies = [ "chirp-client", "chirp-worker", - "cluster-datacenter-get", + "cluster", "faker-region", "prost 0.10.4", "region-get", @@ -7010,7 +6766,7 @@ version = "0.0.1" dependencies = [ "chirp-client", "chirp-worker", - "cluster-datacenter-get", + "cluster", "faker-region", "prost 0.10.4", "region-get", @@ -7567,20 +7323,6 @@ dependencies = [ name = "rivet-util-cdn" version = "0.1.0" -[[package]] -name = "rivet-util-cluster" -version = "0.1.0" -dependencies = [ - "hex", - "lazy_static", - "merkle_hash", - "rivet-metrics", - "rivet-util", - "tokio", - "types", - "uuid", -] - [[package]] name = "rivet-util-env" version = "0.1.0" @@ -7612,19 +7354,6 @@ dependencies = [ name = "rivet-util-kv" version = "0.1.0" -[[package]] -name = "rivet-util-linode" -version = "0.1.0" -dependencies = [ - "chrono", - "rand", - "reqwest", - "rivet-operation", - "serde", - "serde_json", - "ssh-key", -] - [[package]] name = "rivet-util-macros" version = "0.1.0" @@ -8934,12 +8663,10 @@ version = "0.0.1" dependencies = [ "chirp-client", "chirp-worker", - "cluster-datacenter-get", - "cluster-datacenter-list", - "linode-instance-type-get", + "cluster", + "linode", "prost 0.10.4", "rivet-operation", - "rivet-util-cluster", ] [[package]] diff --git a/svc/Cargo.toml b/svc/Cargo.toml index 44e05006b..0ff8959e1 100644 --- a/svc/Cargo.toml +++ b/svc/Cargo.toml @@ -59,26 +59,12 @@ members = [ "pkg/cloud/ops/version-get", "pkg/cloud/ops/version-publish", "pkg/cloud/worker", - "pkg/cluster/ops/datacenter-get", - "pkg/cluster/ops/datacenter-list", - "pkg/cluster/ops/datacenter-location-get", - "pkg/cluster/ops/datacenter-resolve-for-name-id", - "pkg/cluster/ops/datacenter-tls-get", - "pkg/cluster/ops/datacenter-topology-get", - "pkg/cluster/ops/get", - "pkg/cluster/ops/get-for-game", - "pkg/cluster/ops/list", - "pkg/cluster/ops/resolve-for-name-id", - "pkg/cluster/ops/server-destroy-with-filter", - "pkg/cluster/ops/server-get", - "pkg/cluster/ops/server-list", - "pkg/cluster/ops/server-resolve-for-ip", + "pkg/cluster", "pkg/cluster/standalone/datacenter-tls-renew", "pkg/cluster/standalone/default-update", "pkg/cluster/standalone/fix-tls", "pkg/cluster/standalone/gc", "pkg/cluster/standalone/metrics-publish", - "pkg/cluster/worker", "pkg/custom-user-avatar/ops/list-for-game", "pkg/custom-user-avatar/ops/upload-complete", "pkg/debug/ops/email-res", @@ -151,11 +137,8 @@ members = [ "pkg/kv/ops/get", "pkg/kv/ops/list", "pkg/kv/worker", - "pkg/linode/ops/instance-type-get", - "pkg/linode/ops/server-destroy", - "pkg/linode/ops/server-provision", + "pkg/linode", "pkg/linode/standalone/gc", - "pkg/linode/worker", "pkg/load-test/standalone/api-cloud", "pkg/load-test/standalone/mm", "pkg/load-test/standalone/mm-sustain", diff --git a/svc/api/admin/Cargo.toml b/svc/api/admin/Cargo.toml index ced0d77b6..697fa1c95 100644 --- a/svc/api/admin/Cargo.toml +++ b/svc/api/admin/Cargo.toml @@ -39,17 +39,9 @@ tracing-subscriber = { version = "0.3", default-features = false, features = [ ] } url = "2.2.2" uuid = { version = "1", features = ["v4"] } -util-cluster = { package = "rivet-util-cluster", path = "../../pkg/cluster/util" } util-mm = { package = "rivet-util-mm", path = "../../pkg/mm/util" } -cluster-get = { path = "../../pkg/cluster/ops/get" } -cluster-list = { path = "../../pkg/cluster/ops/list" } -cluster-server-get = { path = "../../pkg/cluster/ops/server-get" } -cluster-server-destroy-with-filter = { path = "../../pkg/cluster/ops/server-destroy-with-filter" } -cluster-server-list = { path = "../../pkg/cluster/ops/server-list" } -cluster-datacenter-list = { path = "../../pkg/cluster/ops/datacenter-list" } -cluster-datacenter-get = { path = "../../pkg/cluster/ops/datacenter-get" } -cluster-datacenter-resolve-for-name-id = { path = "../../pkg/cluster/ops/datacenter-resolve-for-name-id" } +cluster = { path = "../../pkg/cluster" } token-create = { path = "../../pkg/token/ops/create" } [dev-dependencies] diff --git a/svc/api/cloud/Cargo.toml b/svc/api/cloud/Cargo.toml index 5aad793ab..b7f8dc162 100644 --- a/svc/api/cloud/Cargo.toml +++ b/svc/api/cloud/Cargo.toml @@ -34,7 +34,6 @@ tokio = { version = "1.29" } tracing = "0.1" tracing-subscriber = { version = "0.3", default-features = false, features = ["fmt", "json", "ansi"] } url = "2.2.2" -util-cluster = { package = "rivet-util-cluster", path = "../../pkg/cluster/util" } util-job = { package = "rivet-util-job", path = "../../pkg/job/util" } util-mm = { package = "rivet-util-mm", path = "../../pkg/mm/util" } util-nsfw = { package = "rivet-util-nsfw", path = "../../pkg/nsfw/util" } @@ -64,7 +63,7 @@ cloud-namespace-token-development-create = { path = "../../pkg/cloud/ops/namespa cloud-namespace-token-public-create = { path = "../../pkg/cloud/ops/namespace-token-public-create" } cloud-version-get = { path = "../../pkg/cloud/ops/version-get" } cloud-version-publish = { path = "../../pkg/cloud/ops/version-publish" } -cluster-datacenter-list = { path = "../../pkg/cluster/ops/datacenter-list" } +cluster = { path = "../../pkg/cluster" } custom-user-avatar-list-for-game = { path = "../../pkg/custom-user-avatar/ops/list-for-game" } custom-user-avatar-upload-complete = { path = "../../pkg/custom-user-avatar/ops/upload-complete" } game-banner-upload-complete = { path = "../../pkg/game/ops/banner-upload-complete" } diff --git a/svc/api/provision/Cargo.toml b/svc/api/provision/Cargo.toml index c5a862320..56e4bdb2b 100644 --- a/svc/api/provision/Cargo.toml +++ b/svc/api/provision/Cargo.toml @@ -28,10 +28,6 @@ tracing = "0.1" tracing-subscriber = { version = "0.3", default-features = false, features = ["fmt", "json", "ansi"] } url = "2.2.2" uuid = { version = "1", features = ["v4"] } -util-cluster = { package = "rivet-util-cluster", path = "../../pkg/cluster/util" } -cluster-datacenter-get = { path = "../../pkg/cluster/ops/datacenter-get" } -cluster-datacenter-tls-get = { path = "../../pkg/cluster/ops/datacenter-tls-get" } -cluster-server-get = { path = "../../pkg/cluster/ops/server-get" } -cluster-server-resolve-for-ip = { path = "../../pkg/cluster/ops/server-resolve-for-ip" } +cluster = { path = "../../pkg/cluster" } diff --git a/svc/api/traefik-provider/Cargo.toml b/svc/api/traefik-provider/Cargo.toml index 69708ac00..391695a0c 100644 --- a/svc/api/traefik-provider/Cargo.toml +++ b/svc/api/traefik-provider/Cargo.toml @@ -37,7 +37,7 @@ util-cdn = { package = "rivet-util-cdn", path = "../../pkg/cdn/util" } util-job = { package = "rivet-util-job", path = "../../pkg/job/util" } uuid = { version = "1", features = ["v4"] } -cluster-server-list = { path = "../../pkg/cluster/ops/server-list" } +cluster = { path = "../../pkg/cluster" } [dev-dependencies] rivet-connection = { path = "../../../lib/connection" } diff --git a/svc/pkg/cluster/Cargo.toml b/svc/pkg/cluster/Cargo.toml new file mode 100644 index 000000000..b1215d0a4 --- /dev/null +++ b/svc/pkg/cluster/Cargo.toml @@ -0,0 +1,35 @@ +[package] +name = "cluster" +version = "0.0.1" +edition = "2018" +authors = ["Rivet Gaming, LLC "] +license = "Apache-2.0" + +[dependencies] +anyhow = "1.0" +chirp-workflow = { path = "../../../lib/chirp-workflow/core" } +lazy_static = "1.4" +nomad-util = { path = "../../../lib/nomad-util" } +rivet-metrics = { path = "../../../lib/metrics" } +rivet-operation = { path = "../../../lib/operation/core" } +rivet-runtime = { path = "../../../lib/runtime" } +serde = { version = "1.0.198", features = ["derive"] } +thiserror = "1.0" + +ip-info = { path = "../ip/ops/info" } +linode = { path = "../linode" } + +[dependencies.nomad_client] +git = "https://github.com/rivet-gg/nomad-client" +rev = "abb66bf0c30c7ff5b0c695dae952481c33e538b5" # pragma: allowlist secret + +[dependencies.sqlx] +git = "https://github.com/rivet-gg/sqlx" +rev = "08d6e61aa0572e7ec557abbedb72cebb96e1ac5b" +default-features = false +features = [ "json", "ipnetwork" ] + +[build-dependencies] +merkle_hash = "3.6" +hex = "0.4" +tokio = { version = "1.29", features = ["full"] } diff --git a/svc/pkg/cluster/worker/Service.toml b/svc/pkg/cluster/Service.toml similarity index 78% rename from svc/pkg/cluster/worker/Service.toml rename to svc/pkg/cluster/Service.toml index d4fb93058..c4e73d04c 100644 --- a/svc/pkg/cluster/worker/Service.toml +++ b/svc/pkg/cluster/Service.toml @@ -1,10 +1,10 @@ [service] -name = "cluster-worker" +name = "cluster" [runtime] kind = "rust" -[consumer] +[package] [secrets] "rivet/api_traefik_provider/token" = {} @@ -12,4 +12,4 @@ kind = "rust" "ssh/server/private_key_openssh" = {} [databases] -bucket-build = {} +db-cluster = {} diff --git a/svc/pkg/cluster/util/build.rs b/svc/pkg/cluster/build.rs similarity index 81% rename from svc/pkg/cluster/util/build.rs rename to svc/pkg/cluster/build.rs index eed68bebb..d454c7c22 100644 --- a/svc/pkg/cluster/util/build.rs +++ b/svc/pkg/cluster/build.rs @@ -1,7 +1,7 @@ use std::path::PathBuf; -use tokio::fs; use merkle_hash::MerkleTree; +use tokio::fs; // NOTE: This only gets the hash of the folder. Any template variables changed in the install scripts // will not update the hash. @@ -10,15 +10,11 @@ use merkle_hash::MerkleTree; async fn main() { let out_dir = PathBuf::from(std::env::var("OUT_DIR").unwrap()); let current_dir = std::env::current_dir().unwrap(); - let server_install_path = { - let mut dir = current_dir.clone(); - dir.pop(); - - dir.join("worker") - .join("src") - .join("workers") - .join("server_install") - }; + let server_install_path = current_dir + .join("src") + .join("workflows") + .join("server") + .join("install"); // Add rereun statement println!("cargo:rerun-if-changed={}", server_install_path.display()); diff --git a/svc/pkg/cluster/db/cluster/migrations/20240701225245_add_json.down.sql b/svc/pkg/cluster/db/cluster/migrations/20240701225245_add_json.down.sql new file mode 100644 index 000000000..e69de29bb diff --git a/svc/pkg/cluster/db/cluster/migrations/20240701225245_add_json.up.sql b/svc/pkg/cluster/db/cluster/migrations/20240701225245_add_json.up.sql new file mode 100644 index 000000000..5c8486dec --- /dev/null +++ b/svc/pkg/cluster/db/cluster/migrations/20240701225245_add_json.up.sql @@ -0,0 +1,13 @@ +ALTER TABLE datacenters + ADD COLUMN pools2 JSONB, -- Vec + ADD COLUMN provider2 JSONB, -- cluster::types::Provider + ADD COLUMN build_delivery_method2 JSONB; -- cluster::types::BuildDeliveryMethod + +ALTER TABLE servers + ADD COLUMN pool_type2 JSONB; -- cluster::types::PoolType + +-- Moved to db-linode +DROP TABLE server_images_linode; + +ALTER TABLE datacenter_tls + ADD COLUMN state2 JSONB; -- cluster::types::TlsState diff --git a/svc/pkg/cluster/ops/datacenter-get/Cargo.toml b/svc/pkg/cluster/ops/datacenter-get/Cargo.toml deleted file mode 100644 index 7e1073ba2..000000000 --- a/svc/pkg/cluster/ops/datacenter-get/Cargo.toml +++ /dev/null @@ -1,19 +0,0 @@ -[package] -name = "cluster-datacenter-get" -version = "0.0.1" -edition = "2018" -authors = ["Rivet Gaming, LLC "] -license = "Apache-2.0" - -[dependencies] -chirp-client = { path = "../../../../../lib/chirp/client" } -prost = "0.10" -rivet-operation = { path = "../../../../../lib/operation/core" } - -[dependencies.sqlx] -git = "https://github.com/rivet-gg/sqlx" -rev = "08d6e61aa0572e7ec557abbedb72cebb96e1ac5b" -default-features = false - -[dev-dependencies] -chirp-worker = { path = "../../../../../lib/chirp/worker" } diff --git a/svc/pkg/cluster/ops/datacenter-get/Service.toml b/svc/pkg/cluster/ops/datacenter-get/Service.toml deleted file mode 100644 index a0f9d3cb5..000000000 --- a/svc/pkg/cluster/ops/datacenter-get/Service.toml +++ /dev/null @@ -1,10 +0,0 @@ -[service] -name = "cluster-datacenter-get" - -[runtime] -kind = "rust" - -[operation] - -[databases] -db-cluster = {} diff --git a/svc/pkg/cluster/ops/datacenter-get/src/lib.rs b/svc/pkg/cluster/ops/datacenter-get/src/lib.rs deleted file mode 100644 index d0cd01826..000000000 --- a/svc/pkg/cluster/ops/datacenter-get/src/lib.rs +++ /dev/null @@ -1,107 +0,0 @@ -use std::convert::{TryFrom, TryInto}; - -use proto::backend::{self, pkg::*}; -use rivet_operation::prelude::*; - -#[derive(sqlx::FromRow)] -struct Datacenter { - datacenter_id: Uuid, - cluster_id: Uuid, - name_id: String, - display_name: String, - provider: i64, - provider_datacenter_id: String, - provider_api_token: Option, - pools: Vec, - build_delivery_method: i64, - prebakes_enabled: bool, - create_ts: i64, -} - -impl TryFrom for backend::cluster::Datacenter { - type Error = GlobalError; - - fn try_from(value: Datacenter) -> GlobalResult { - let pools = cluster::msg::datacenter_create::Pools::decode(value.pools.as_slice())?.pools; - - Ok(backend::cluster::Datacenter { - datacenter_id: Some(value.datacenter_id.into()), - cluster_id: Some(value.cluster_id.into()), - name_id: value.name_id, - display_name: value.display_name, - create_ts: value.create_ts, - provider: value.provider as i32, - provider_datacenter_id: value.provider_datacenter_id, - provider_api_token: value.provider_api_token, - pools, - build_delivery_method: value.build_delivery_method as i32, - prebakes_enabled: value.prebakes_enabled, - }) - } -} - -#[operation(name = "cluster-datacenter-get")] -pub async fn handle( - ctx: OperationContext, -) -> GlobalResult { - let datacenter_ids = ctx - .datacenter_ids - .iter() - .map(common::Uuid::as_uuid) - .collect::>(); - - let datacenters = ctx - .cache() - .fetch_all_proto("cluster.datacenters", datacenter_ids, { - let ctx = ctx.base(); - move |mut cache, datacenter_ids| { - let ctx = ctx.clone(); - async move { - let dcs = get_dcs(ctx, datacenter_ids).await?; - for dc in dcs { - let dc_id = unwrap!(dc.datacenter_id).as_uuid(); - cache.resolve(&dc_id, dc); - } - - Ok(cache) - } - } - }) - .await?; - - Ok(cluster::datacenter_get::Response { datacenters }) -} - -async fn get_dcs( - ctx: OperationContext<()>, - datacenter_ids: Vec, -) -> GlobalResult> { - let configs = sql_fetch_all!( - [ctx, Datacenter] - " - SELECT - datacenter_id, - cluster_id, - name_id, - display_name, - provider, - provider_datacenter_id, - provider_api_token, - pools, - build_delivery_method, - prebakes_enabled, - create_ts - FROM db_cluster.datacenters - WHERE datacenter_id = ANY($1) - ", - datacenter_ids, - ) - .await?; - - let datacenters = configs - .into_iter() - .map(TryInto::try_into) - .collect::>>()?; - - Ok(datacenters) -} diff --git a/svc/pkg/cluster/ops/datacenter-list/Cargo.toml b/svc/pkg/cluster/ops/datacenter-list/Cargo.toml deleted file mode 100644 index 9d8912e10..000000000 --- a/svc/pkg/cluster/ops/datacenter-list/Cargo.toml +++ /dev/null @@ -1,19 +0,0 @@ -[package] -name = "cluster-datacenter-list" -version = "0.0.1" -edition = "2018" -authors = ["Rivet Gaming, LLC "] -license = "Apache-2.0" - -[dependencies] -chirp-client = { path = "../../../../../lib/chirp/client" } -prost = "0.10" -rivet-operation = { path = "../../../../../lib/operation/core" } - -[dependencies.sqlx] -git = "https://github.com/rivet-gg/sqlx" -rev = "08d6e61aa0572e7ec557abbedb72cebb96e1ac5b" -default-features = false - -[dev-dependencies] -chirp-worker = { path = "../../../../../lib/chirp/worker" } diff --git a/svc/pkg/cluster/ops/datacenter-list/Service.toml b/svc/pkg/cluster/ops/datacenter-list/Service.toml deleted file mode 100644 index ebad6361d..000000000 --- a/svc/pkg/cluster/ops/datacenter-list/Service.toml +++ /dev/null @@ -1,10 +0,0 @@ -[service] -name = "cluster-datacenter-list" - -[runtime] -kind = "rust" - -[operation] - -[databases] -db-cluster = {} diff --git a/svc/pkg/cluster/ops/datacenter-list/src/lib.rs b/svc/pkg/cluster/ops/datacenter-list/src/lib.rs deleted file mode 100644 index 674e76562..000000000 --- a/svc/pkg/cluster/ops/datacenter-list/src/lib.rs +++ /dev/null @@ -1,62 +0,0 @@ -use std::collections::HashMap; - -use proto::backend::pkg::*; -use rivet_operation::prelude::*; - -#[derive(sqlx::FromRow)] -struct Datacenter { - cluster_id: Uuid, - datacenter_id: Uuid, -} - -#[operation(name = "cluster-datacenter-list")] -pub async fn handle( - ctx: OperationContext, -) -> GlobalResult { - let cluster_ids = ctx - .cluster_ids - .iter() - .map(common::Uuid::as_uuid) - .collect::>(); - - let datacenters = sql_fetch_all!( - [ctx, Datacenter] - " - SELECT - cluster_id, - datacenter_id - FROM db_cluster.datacenters - WHERE cluster_id = ANY($1) - ", - &cluster_ids - ) - .await?; - - // Fill in empty clusters - let mut dcs_by_cluster_id = cluster_ids - .iter() - .map(|cluster_id| (*cluster_id, Vec::new())) - .collect::>>(); - - for dc in datacenters { - dcs_by_cluster_id - .entry(dc.cluster_id) - .or_default() - .push(dc.datacenter_id); - } - - Ok(cluster::datacenter_list::Response { - clusters: dcs_by_cluster_id - .into_iter() - .map( - |(cluster_id, datacenter_ids)| cluster::datacenter_list::response::Cluster { - cluster_id: Some(cluster_id.into()), - datacenter_ids: datacenter_ids - .into_iter() - .map(Into::into) - .collect::>(), - }, - ) - .collect::>(), - }) -} diff --git a/svc/pkg/cluster/ops/datacenter-location-get/Cargo.toml b/svc/pkg/cluster/ops/datacenter-location-get/Cargo.toml deleted file mode 100644 index b8121e63d..000000000 --- a/svc/pkg/cluster/ops/datacenter-location-get/Cargo.toml +++ /dev/null @@ -1,20 +0,0 @@ -[package] -name = "cluster-datacenter-location-get" -version = "0.0.1" -edition = "2021" -authors = ["Rivet Gaming, LLC "] -license = "Apache-2.0" - -[dependencies] -chirp-client = { path = "../../../../../lib/chirp/client" } -rivet-operation = { path = "../../../../../lib/operation/core" } - -ip-info = { path = "../../../ip/ops/info" } - -[dependencies.sqlx] -git = "https://github.com/rivet-gg/sqlx" -rev = "08d6e61aa0572e7ec557abbedb72cebb96e1ac5b" -default-features = false - -[dev-dependencies] -chirp-worker = { path = "../../../../../lib/chirp/worker" } diff --git a/svc/pkg/cluster/ops/datacenter-location-get/Service.toml b/svc/pkg/cluster/ops/datacenter-location-get/Service.toml deleted file mode 100644 index f6c3656b9..000000000 --- a/svc/pkg/cluster/ops/datacenter-location-get/Service.toml +++ /dev/null @@ -1,7 +0,0 @@ -[service] -name = "cluster-datacenter-location-get" - -[runtime] -kind = "rust" - -[operation] diff --git a/svc/pkg/cluster/ops/datacenter-location-get/src/lib.rs b/svc/pkg/cluster/ops/datacenter-location-get/src/lib.rs deleted file mode 100644 index 86ebbca11..000000000 --- a/svc/pkg/cluster/ops/datacenter-location-get/src/lib.rs +++ /dev/null @@ -1,100 +0,0 @@ -use std::net::IpAddr; - -use futures_util::{StreamExt, TryStreamExt}; -use proto::backend::{self, pkg::*}; -use rivet_operation::prelude::*; - -#[operation(name = "cluster-datacenter-location-get")] -pub async fn handle( - ctx: OperationContext, -) -> GlobalResult { - let datacenter_ids = ctx - .datacenter_ids - .iter() - .map(common::Uuid::as_uuid) - .collect::>(); - - let datacenters = ctx - .cache() - .fetch_all_proto("cluster.datacenters.location", datacenter_ids, { - let ctx = ctx.base(); - move |mut cache, datacenter_ids| { - let ctx = ctx.clone(); - async move { - let dcs = query_dcs(ctx, datacenter_ids).await?; - for dc in dcs { - let dc_id = unwrap!(dc.datacenter_id).as_uuid(); - cache.resolve(&dc_id, dc); - } - - Ok(cache) - } - } - }) - .await?; - - Ok(cluster::datacenter_location_get::Response { datacenters }) -} - -async fn query_dcs( - ctx: OperationContext<()>, - datacenter_ids: Vec, -) -> GlobalResult> { - // NOTE: if there is no active GG node in a datacenter, we cannot retrieve its location - // Fetch the gg node public ip for each datacenter (there may be more than one, hence `DISTINCT`) - let server_rows = sql_fetch_all!( - [ctx, (Uuid, Option,)] - " - SELECT DISTINCT - datacenter_id, public_ip - FROM db_cluster.servers - WHERE - datacenter_id = ANY($1) AND - pool_type = $2 AND - cloud_destroy_ts IS NULL - -- For consistency - ORDER BY public_ip DESC - ", - &datacenter_ids, - backend::cluster::PoolType::Gg as i64, - ) - .await?; - - let coords_res = futures_util::stream::iter(server_rows) - .map(|(datacenter_id, public_ip)| { - let ctx = ctx.base(); - - async move { - if let Some(public_ip) = public_ip { - // Fetch IP info of GG node (this is cached inside `ip_info`) - let ip_info_res = op!([ctx] ip_info { - ip: public_ip.to_string(), - provider: ip::info::Provider::IpInfoIo as i32, - }) - .await?; - GlobalResult::Ok(( - datacenter_id, - ip_info_res - .ip_info - .as_ref() - .and_then(|info| info.coords.clone()), - )) - } else { - GlobalResult::Ok((datacenter_id, None)) - } - } - }) - .buffer_unordered(8) - .try_collect::>() - .await?; - - Ok(coords_res - .into_iter() - .map( - |(datacenter_id, coords)| cluster::datacenter_location_get::response::Datacenter { - datacenter_id: Some(datacenter_id.into()), - coords, - }, - ) - .collect::>()) -} diff --git a/svc/pkg/cluster/ops/datacenter-resolve-for-name-id/Service.toml b/svc/pkg/cluster/ops/datacenter-resolve-for-name-id/Service.toml deleted file mode 100644 index aa845fc9a..000000000 --- a/svc/pkg/cluster/ops/datacenter-resolve-for-name-id/Service.toml +++ /dev/null @@ -1,10 +0,0 @@ -[service] -name = "cluster-datacenter-resolve-for-name-id" - -[runtime] -kind = "rust" - -[operation] - -[databases] -db-cluster = {} diff --git a/svc/pkg/cluster/ops/datacenter-resolve-for-name-id/src/lib.rs b/svc/pkg/cluster/ops/datacenter-resolve-for-name-id/src/lib.rs deleted file mode 100644 index ff2e9e3ec..000000000 --- a/svc/pkg/cluster/ops/datacenter-resolve-for-name-id/src/lib.rs +++ /dev/null @@ -1,41 +0,0 @@ -use proto::backend::pkg::*; -use rivet_operation::prelude::*; - -#[derive(sqlx::FromRow)] -struct Datacenter { - datacenter_id: Uuid, - name_id: String, -} - -#[operation(name = "cluster-datacenter-resolve-for-name-id")] -pub async fn handle( - ctx: OperationContext, -) -> GlobalResult { - let cluster_id = unwrap_ref!(ctx.cluster_id).as_uuid(); - - let datacenters = sql_fetch_all!( - [ctx, Datacenter] - " - SELECT - datacenter_id, - name_id - FROM db_cluster.datacenters - WHERE - cluster_id = $1 AND - name_id = ANY($2) - ", - &cluster_id, - &ctx.name_ids, - ) - .await? - .into_iter() - .map( - |dc| cluster::datacenter_resolve_for_name_id::response::Datacenter { - datacenter_id: Some(dc.datacenter_id.into()), - name_id: dc.name_id, - }, - ) - .collect::>(); - - Ok(cluster::datacenter_resolve_for_name_id::Response { datacenters }) -} diff --git a/svc/pkg/cluster/ops/datacenter-tls-get/Cargo.toml b/svc/pkg/cluster/ops/datacenter-tls-get/Cargo.toml deleted file mode 100644 index 9b3cd70b5..000000000 --- a/svc/pkg/cluster/ops/datacenter-tls-get/Cargo.toml +++ /dev/null @@ -1,19 +0,0 @@ -[package] -name = "cluster-datacenter-tls-get" -version = "0.0.1" -edition = "2018" -authors = ["Rivet Gaming, LLC "] -license = "Apache-2.0" - -[dependencies] -chirp-client = { path = "../../../../../lib/chirp/client" } -prost = "0.10" -rivet-operation = { path = "../../../../../lib/operation/core" } - -[dependencies.sqlx] -git = "https://github.com/rivet-gg/sqlx" -rev = "08d6e61aa0572e7ec557abbedb72cebb96e1ac5b" -default-features = false - -[dev-dependencies] -chirp-worker = { path = "../../../../../lib/chirp/worker" } diff --git a/svc/pkg/cluster/ops/datacenter-tls-get/Service.toml b/svc/pkg/cluster/ops/datacenter-tls-get/Service.toml deleted file mode 100644 index a09426cfc..000000000 --- a/svc/pkg/cluster/ops/datacenter-tls-get/Service.toml +++ /dev/null @@ -1,10 +0,0 @@ -[service] -name = "cluster-datacenter-tls-get" - -[runtime] -kind = "rust" - -[operation] - -[databases] -db-cluster = {} diff --git a/svc/pkg/cluster/ops/datacenter-tls-get/src/lib.rs b/svc/pkg/cluster/ops/datacenter-tls-get/src/lib.rs deleted file mode 100644 index 3aaa8d074..000000000 --- a/svc/pkg/cluster/ops/datacenter-tls-get/src/lib.rs +++ /dev/null @@ -1,60 +0,0 @@ -use proto::backend::pkg::*; -use rivet_operation::prelude::*; - -#[derive(sqlx::FromRow)] -struct DatacenterTls { - datacenter_id: Uuid, - gg_cert_pem: Option, - gg_private_key_pem: Option, - job_cert_pem: Option, - job_private_key_pem: Option, - state: i64, - expire_ts: i64, -} - -impl From for cluster::datacenter_tls_get::response::Datacenter { - fn from(value: DatacenterTls) -> Self { - cluster::datacenter_tls_get::response::Datacenter { - datacenter_id: Some(value.datacenter_id.into()), - gg_cert_pem: value.gg_cert_pem, - gg_private_key_pem: value.gg_private_key_pem, - job_cert_pem: value.job_cert_pem, - job_private_key_pem: value.job_private_key_pem, - state: value.state as i32, - expire_ts: value.expire_ts, - } - } -} - -#[operation(name = "cluster-datacenter-tls-get")] -pub async fn handle( - ctx: OperationContext, -) -> GlobalResult { - let datacenter_ids = ctx - .datacenter_ids - .iter() - .map(common::Uuid::as_uuid) - .collect::>(); - - let rows = sql_fetch_all!( - [ctx, DatacenterTls] - " - SELECT - datacenter_id, - gg_cert_pem, - gg_private_key_pem, - job_cert_pem, - job_private_key_pem, - state, - expire_ts - FROM db_cluster.datacenter_tls - WHERE datacenter_id = ANY($1) - ", - datacenter_ids, - ) - .await?; - - Ok(cluster::datacenter_tls_get::Response { - datacenters: rows.into_iter().map(Into::into).collect::>(), - }) -} diff --git a/svc/pkg/cluster/ops/datacenter-topology-get/Cargo.toml b/svc/pkg/cluster/ops/datacenter-topology-get/Cargo.toml deleted file mode 100644 index 51d851aa8..000000000 --- a/svc/pkg/cluster/ops/datacenter-topology-get/Cargo.toml +++ /dev/null @@ -1,25 +0,0 @@ -[package] -name = "cluster-datacenter-topology-get" -version = "0.0.1" -edition = "2018" -authors = ["Rivet Gaming, LLC "] -license = "Apache-2.0" - -[dependencies] -chirp-client = { path = "../../../../../lib/chirp/client" } -lazy_static = "1.4" -nomad-util = { path = "../../../../../lib/nomad-util" } -prost = "0.10" -rivet-operation = { path = "../../../../../lib/operation/core" } - -[dependencies.nomad_client] -git = "https://github.com/rivet-gg/nomad-client" -rev = "abb66bf0c30c7ff5b0c695dae952481c33e538b5" # pragma: allowlist secret - -[dependencies.sqlx] -git = "https://github.com/rivet-gg/sqlx" -rev = "08d6e61aa0572e7ec557abbedb72cebb96e1ac5b" -default-features = false - -[dev-dependencies] -chirp-worker = { path = "../../../../../lib/chirp/worker" } diff --git a/svc/pkg/cluster/ops/datacenter-topology-get/README.md b/svc/pkg/cluster/ops/datacenter-topology-get/README.md deleted file mode 100644 index b24df2068..000000000 --- a/svc/pkg/cluster/ops/datacenter-topology-get/README.md +++ /dev/null @@ -1,3 +0,0 @@ -# datacenter-topology-get - -Fetch the nomad topology for all job servers in a datacenter diff --git a/svc/pkg/cluster/ops/datacenter-topology-get/Service.toml b/svc/pkg/cluster/ops/datacenter-topology-get/Service.toml deleted file mode 100644 index 3c31348cf..000000000 --- a/svc/pkg/cluster/ops/datacenter-topology-get/Service.toml +++ /dev/null @@ -1,10 +0,0 @@ -[service] -name = "cluster-datacenter-topology-get" - -[runtime] -kind = "rust" - -[operation] - -[databases] -db-cluster = {} diff --git a/svc/pkg/cluster/ops/get-for-game/Cargo.toml b/svc/pkg/cluster/ops/get-for-game/Cargo.toml deleted file mode 100644 index 5ac4fb817..000000000 --- a/svc/pkg/cluster/ops/get-for-game/Cargo.toml +++ /dev/null @@ -1,20 +0,0 @@ -[package] -name = "cluster-get-for-game" -version = "0.0.1" -edition = "2018" -authors = ["Rivet Gaming, LLC "] -license = "Apache-2.0" - -[dependencies] -chirp-client = { path = "../../../../../lib/chirp/client" } -prost = "0.10" -rivet-operation = { path = "../../../../../lib/operation/core" } -util-cluster = { package = "rivet-util-cluster", path = "../../util" } - -[dependencies.sqlx] -git = "https://github.com/rivet-gg/sqlx" -rev = "08d6e61aa0572e7ec557abbedb72cebb96e1ac5b" -default-features = false - -[dev-dependencies] -chirp-worker = { path = "../../../../../lib/chirp/worker" } diff --git a/svc/pkg/cluster/ops/get-for-game/Service.toml b/svc/pkg/cluster/ops/get-for-game/Service.toml deleted file mode 100644 index c6b8f6f34..000000000 --- a/svc/pkg/cluster/ops/get-for-game/Service.toml +++ /dev/null @@ -1,10 +0,0 @@ -[service] -name = "cluster-get-for-game" - -[runtime] -kind = "rust" - -[operation] - -[databases] -db-cluster = {} diff --git a/svc/pkg/cluster/ops/get-for-game/src/lib.rs b/svc/pkg/cluster/ops/get-for-game/src/lib.rs deleted file mode 100644 index 053a09d0b..000000000 --- a/svc/pkg/cluster/ops/get-for-game/src/lib.rs +++ /dev/null @@ -1,42 +0,0 @@ -use proto::backend::pkg::*; -use rivet_operation::prelude::*; - -#[operation(name = "cluster-get-for-game")] -pub async fn handle( - ctx: OperationContext, -) -> GlobalResult { - let game_ids = ctx - .game_ids - .iter() - .map(common::Uuid::as_uuid) - .collect::>(); - - let rows = sql_fetch_optional!( - [ctx, (Uuid, Option)] - " - SELECT - g.game_id, gc.cluster_id - FROM unnest($1) AS g(game_id) - LEFT JOIN db_cluster.games AS gc - ON g.game_id = gc.game_id - ", - game_ids, - ) - .await?; - - Ok(cluster::get_for_game::Response { - games: rows - .into_iter() - .map( - |(game_id, cluster_id)| cluster::get_for_game::response::Game { - game_id: Some(game_id.into()), - cluster_id: Some( - cluster_id - .unwrap_or_else(util_cluster::default_cluster_id) - .into(), - ), - }, - ) - .collect::>(), - }) -} diff --git a/svc/pkg/cluster/ops/get/Cargo.toml b/svc/pkg/cluster/ops/get/Cargo.toml deleted file mode 100644 index 7f62318ae..000000000 --- a/svc/pkg/cluster/ops/get/Cargo.toml +++ /dev/null @@ -1,19 +0,0 @@ -[package] -name = "cluster-get" -version = "0.0.1" -edition = "2018" -authors = ["Rivet Gaming, LLC "] -license = "Apache-2.0" - -[dependencies] -chirp-client = { path = "../../../../../lib/chirp/client" } -prost = "0.10" -rivet-operation = { path = "../../../../../lib/operation/core" } - -[dependencies.sqlx] -git = "https://github.com/rivet-gg/sqlx" -rev = "08d6e61aa0572e7ec557abbedb72cebb96e1ac5b" -default-features = false - -[dev-dependencies] -chirp-worker = { path = "../../../../../lib/chirp/worker" } diff --git a/svc/pkg/cluster/ops/get/Service.toml b/svc/pkg/cluster/ops/get/Service.toml deleted file mode 100644 index 06f53f69b..000000000 --- a/svc/pkg/cluster/ops/get/Service.toml +++ /dev/null @@ -1,10 +0,0 @@ -[service] -name = "cluster-get" - -[runtime] -kind = "rust" - -[operation] - -[databases] -db-cluster = {} diff --git a/svc/pkg/cluster/ops/get/src/lib.rs b/svc/pkg/cluster/ops/get/src/lib.rs deleted file mode 100644 index e4892bc35..000000000 --- a/svc/pkg/cluster/ops/get/src/lib.rs +++ /dev/null @@ -1,53 +0,0 @@ -use proto::backend::{self, pkg::*}; -use rivet_operation::prelude::*; - -#[derive(sqlx::FromRow)] -struct Cluster { - cluster_id: Uuid, - name_id: String, - owner_team_id: Option, - create_ts: i64, -} - -impl From for backend::cluster::Cluster { - fn from(value: Cluster) -> Self { - backend::cluster::Cluster { - cluster_id: Some(value.cluster_id.into()), - name_id: value.name_id, - owner_team_id: value.owner_team_id.map(Into::into), - create_ts: value.create_ts, - } - } -} - -#[operation(name = "cluster-get")] -pub async fn handle( - ctx: OperationContext, -) -> GlobalResult { - let crdb = ctx.crdb().await?; - let cluster_ids = ctx - .cluster_ids - .iter() - .map(common::Uuid::as_uuid) - .collect::>(); - - let clusters = sql_fetch_all!( - [ctx, Cluster, &crdb] - " - SELECT - cluster_id, - name_id, - owner_team_id, - create_ts - FROM db_cluster.clusters - WHERE cluster_id = ANY($1) - ", - cluster_ids - ) - .await? - .into_iter() - .map(Into::into) - .collect::>(); - - Ok(cluster::get::Response { clusters }) -} diff --git a/svc/pkg/cluster/ops/list/Cargo.toml b/svc/pkg/cluster/ops/list/Cargo.toml deleted file mode 100644 index 99f578e4a..000000000 --- a/svc/pkg/cluster/ops/list/Cargo.toml +++ /dev/null @@ -1,19 +0,0 @@ -[package] -name = "cluster-list" -version = "0.0.1" -edition = "2018" -authors = ["Rivet Gaming, LLC "] -license = "Apache-2.0" - -[dependencies] -chirp-client = { path = "../../../../../lib/chirp/client" } -prost = "0.10" -rivet-operation = { path = "../../../../../lib/operation/core" } - -[dependencies.sqlx] -git = "https://github.com/rivet-gg/sqlx" -rev = "08d6e61aa0572e7ec557abbedb72cebb96e1ac5b" -default-features = false - -[dev-dependencies] -chirp-worker = { path = "../../../../../lib/chirp/worker" } diff --git a/svc/pkg/cluster/ops/list/Service.toml b/svc/pkg/cluster/ops/list/Service.toml deleted file mode 100644 index a41334f3f..000000000 --- a/svc/pkg/cluster/ops/list/Service.toml +++ /dev/null @@ -1,10 +0,0 @@ -[service] -name = "cluster-list" - -[runtime] -kind = "rust" - -[operation] - -[databases] -db-cluster = {} diff --git a/svc/pkg/cluster/ops/list/src/lib.rs b/svc/pkg/cluster/ops/list/src/lib.rs deleted file mode 100644 index 62cca800f..000000000 --- a/svc/pkg/cluster/ops/list/src/lib.rs +++ /dev/null @@ -1,46 +0,0 @@ -use proto::backend::{self, pkg::*}; -use rivet_operation::prelude::*; - -#[derive(sqlx::FromRow)] -struct Cluster { - cluster_id: Uuid, - name_id: String, - owner_team_id: Option, - create_ts: i64, -} - -impl From for backend::cluster::Cluster { - fn from(value: Cluster) -> Self { - backend::cluster::Cluster { - cluster_id: Some(value.cluster_id.into()), - name_id: value.name_id, - owner_team_id: value.owner_team_id.map(Into::into), - create_ts: value.create_ts, - } - } -} - -#[operation(name = "cluster-list")] -pub async fn handle( - ctx: OperationContext, -) -> GlobalResult { - let crdb = ctx.crdb().await?; - - let cluster_ids = sql_fetch_all!( - [ctx, Cluster, &crdb] - " - SELECT - cluster_id, - name_id, - owner_team_id, - create_ts - FROM db_cluster.clusters - ", - ) - .await? - .into_iter() - .map(|cluster| cluster.cluster_id.into()) - .collect::>(); - - Ok(cluster::list::Response { cluster_ids }) -} diff --git a/svc/pkg/cluster/ops/resolve-for-name-id/Cargo.toml b/svc/pkg/cluster/ops/resolve-for-name-id/Cargo.toml deleted file mode 100644 index 2fdac5937..000000000 --- a/svc/pkg/cluster/ops/resolve-for-name-id/Cargo.toml +++ /dev/null @@ -1,19 +0,0 @@ -[package] -name = "cluster-resolve-for-name-id" -version = "0.0.1" -edition = "2018" -authors = ["Rivet Gaming, LLC "] -license = "Apache-2.0" - -[dependencies] -chirp-client = { path = "../../../../../lib/chirp/client" } -prost = "0.10" -rivet-operation = { path = "../../../../../lib/operation/core" } - -[dependencies.sqlx] -git = "https://github.com/rivet-gg/sqlx" -rev = "08d6e61aa0572e7ec557abbedb72cebb96e1ac5b" -default-features = false - -[dev-dependencies] -chirp-worker = { path = "../../../../../lib/chirp/worker" } diff --git a/svc/pkg/cluster/ops/resolve-for-name-id/Service.toml b/svc/pkg/cluster/ops/resolve-for-name-id/Service.toml deleted file mode 100644 index e944c42f5..000000000 --- a/svc/pkg/cluster/ops/resolve-for-name-id/Service.toml +++ /dev/null @@ -1,10 +0,0 @@ -[service] -name = "cluster-resolve-for-name-id" - -[runtime] -kind = "rust" - -[operation] - -[databases] -db-cluster = {} diff --git a/svc/pkg/cluster/ops/resolve-for-name-id/src/lib.rs b/svc/pkg/cluster/ops/resolve-for-name-id/src/lib.rs deleted file mode 100644 index 8cafcf105..000000000 --- a/svc/pkg/cluster/ops/resolve-for-name-id/src/lib.rs +++ /dev/null @@ -1,35 +0,0 @@ -use proto::backend::pkg::*; -use rivet_operation::prelude::*; - -#[derive(sqlx::FromRow)] -struct Cluster { - cluster_id: Uuid, - name_id: String, -} - -#[operation(name = "cluster-resolve-for-name-id")] -pub async fn handle( - ctx: OperationContext, -) -> GlobalResult { - let clusters = sql_fetch_all!( - [ctx, Cluster] - " - SELECT - cluster_id, - name_id - FROM db_cluster.clusters - WHERE - name_id = ANY($1) - ", - &ctx.name_ids, - ) - .await? - .into_iter() - .map(|dc| cluster::resolve_for_name_id::response::Cluster { - cluster_id: Some(dc.cluster_id.into()), - name_id: dc.name_id, - }) - .collect::>(); - - Ok(cluster::resolve_for_name_id::Response { clusters }) -} diff --git a/svc/pkg/cluster/ops/server-destroy-with-filter/Cargo.toml b/svc/pkg/cluster/ops/server-destroy-with-filter/Cargo.toml deleted file mode 100644 index d2e39e60b..000000000 --- a/svc/pkg/cluster/ops/server-destroy-with-filter/Cargo.toml +++ /dev/null @@ -1,19 +0,0 @@ -[package] -name = "cluster-server-destroy-with-filter" -version = "0.0.1" -edition = "2021" -authors = ["Rivet Gaming, LLC "] -license = "Apache-2.0" - -[dependencies] -chirp-client = { path = "../../../../../lib/chirp/client" } -rivet-operation = { path = "../../../../../lib/operation/core" } -cluster-server-list = { path = "../server-list" } - -[dependencies.sqlx] -git = "https://github.com/rivet-gg/sqlx" -rev = "08d6e61aa0572e7ec557abbedb72cebb96e1ac5b" -default-features = false - -[dev-dependencies] -chirp-worker = { path = "../../../../../lib/chirp/worker" } diff --git a/svc/pkg/cluster/ops/server-destroy-with-filter/Service.toml b/svc/pkg/cluster/ops/server-destroy-with-filter/Service.toml deleted file mode 100644 index 12aba4dd1..000000000 --- a/svc/pkg/cluster/ops/server-destroy-with-filter/Service.toml +++ /dev/null @@ -1,7 +0,0 @@ -[service] -name = "cluster-server-destroy-with-filter" - -[runtime] -kind = "rust" - -[operation] diff --git a/svc/pkg/cluster/ops/server-get/Cargo.toml b/svc/pkg/cluster/ops/server-get/Cargo.toml deleted file mode 100644 index 5861543d2..000000000 --- a/svc/pkg/cluster/ops/server-get/Cargo.toml +++ /dev/null @@ -1,20 +0,0 @@ -[package] -name = "cluster-server-get" -version = "0.0.1" -edition = "2018" -authors = ["Rivet Gaming, LLC "] -license = "Apache-2.0" - -[dependencies] -chirp-client = { path = "../../../../../lib/chirp/client" } -prost = "0.10" -rivet-operation = { path = "../../../../../lib/operation/core" } - -[dependencies.sqlx] -git = "https://github.com/rivet-gg/sqlx" -rev = "08d6e61aa0572e7ec557abbedb72cebb96e1ac5b" -default-features = false -features = [ "ipnetwork" ] - -[dev-dependencies] -chirp-worker = { path = "../../../../../lib/chirp/worker" } diff --git a/svc/pkg/cluster/ops/server-get/Service.toml b/svc/pkg/cluster/ops/server-get/Service.toml deleted file mode 100644 index 496afacad..000000000 --- a/svc/pkg/cluster/ops/server-get/Service.toml +++ /dev/null @@ -1,10 +0,0 @@ -[service] -name = "cluster-server-get" - -[runtime] -kind = "rust" - -[operation] - -[databases] -db-cluster = {} diff --git a/svc/pkg/cluster/ops/server-get/src/lib.rs b/svc/pkg/cluster/ops/server-get/src/lib.rs deleted file mode 100644 index ff7297cfb..000000000 --- a/svc/pkg/cluster/ops/server-get/src/lib.rs +++ /dev/null @@ -1,71 +0,0 @@ -use std::{ - convert::{TryFrom, TryInto}, - net::IpAddr, -}; - -use proto::backend::{self, pkg::*}; -use rivet_operation::prelude::*; - -#[derive(sqlx::FromRow)] -struct Server { - server_id: Uuid, - cluster_id: Uuid, - datacenter_id: Uuid, - pool_type: i64, - vlan_ip: Option, - public_ip: Option, - cloud_destroy_ts: Option, -} - -impl TryFrom for backend::cluster::Server { - type Error = GlobalError; - - fn try_from(value: Server) -> GlobalResult { - Ok(backend::cluster::Server { - server_id: Some(value.server_id.into()), - cluster_id: Some(value.cluster_id.into()), - datacenter_id: Some(value.datacenter_id.into()), - pool_type: value.pool_type.try_into()?, - vlan_ip: value.vlan_ip.map(|ip| ip.to_string()), - public_ip: value.public_ip.map(|ip| ip.to_string()), - cloud_destroy_ts: value.cloud_destroy_ts, - }) - } -} - -#[operation(name = "cluster-server-get")] -pub async fn handle( - ctx: OperationContext, -) -> GlobalResult { - let server_ids = ctx - .server_ids - .iter() - .map(common::Uuid::as_uuid) - .collect::>(); - - let servers = sql_fetch_all!( - [ctx, Server] - " - SELECT - server_id, - d.cluster_id, - s.datacenter_id, - pool_type, - vlan_ip, - public_ip, - cloud_destroy_ts - FROM db_cluster.servers AS s - LEFT JOIN db_cluster.datacenters AS d ON s.datacenter_id = d.datacenter_id - WHERE server_id = ANY($1) - ", - server_ids - ) - .await?; - - Ok(cluster::server_get::Response { - servers: servers - .into_iter() - .map(TryInto::try_into) - .collect::>>()?, - }) -} diff --git a/svc/pkg/cluster/ops/server-list/Cargo.toml b/svc/pkg/cluster/ops/server-list/Cargo.toml deleted file mode 100644 index e79c7aabf..000000000 --- a/svc/pkg/cluster/ops/server-list/Cargo.toml +++ /dev/null @@ -1,19 +0,0 @@ -[package] -name = "cluster-server-list" -version = "0.0.1" -edition = "2018" -authors = ["Rivet Gaming, LLC "] -license = "Apache-2.0" - -[dependencies] -chirp-client = { path = "../../../../../lib/chirp/client" } -prost = "0.10" -rivet-operation = { path = "../../../../../lib/operation/core" } - -[dependencies.sqlx] -git = "https://github.com/rivet-gg/sqlx" -rev = "08d6e61aa0572e7ec557abbedb72cebb96e1ac5b" -default-features = false - -[dev-dependencies] -chirp-worker = { path = "../../../../../lib/chirp/worker" } diff --git a/svc/pkg/cluster/ops/server-list/Service.toml b/svc/pkg/cluster/ops/server-list/Service.toml deleted file mode 100644 index f0def326f..000000000 --- a/svc/pkg/cluster/ops/server-list/Service.toml +++ /dev/null @@ -1,10 +0,0 @@ -[service] -name = "cluster-server-list" - -[runtime] -kind = "rust" - -[operation] - -[databases] -db-cluster = {} diff --git a/svc/pkg/cluster/ops/server-list/src/lib.rs b/svc/pkg/cluster/ops/server-list/src/lib.rs deleted file mode 100644 index cc9b50d91..000000000 --- a/svc/pkg/cluster/ops/server-list/src/lib.rs +++ /dev/null @@ -1,123 +0,0 @@ -use std::{ - convert::{TryFrom, TryInto}, - net::IpAddr, -}; - -use proto::backend::{self, pkg::*}; -use rivet_operation::prelude::*; - -#[derive(sqlx::FromRow)] -struct Server { - server_id: Uuid, - cluster_id: Uuid, - datacenter_id: Uuid, - pool_type: i64, - vlan_ip: Option, - public_ip: Option, - cloud_destroy_ts: Option, -} - -impl TryFrom for backend::cluster::Server { - type Error = GlobalError; - - fn try_from(value: Server) -> GlobalResult { - Ok(backend::cluster::Server { - server_id: Some(value.server_id.into()), - cluster_id: Some(value.cluster_id.into()), - datacenter_id: Some(value.datacenter_id.into()), - pool_type: value.pool_type.try_into()?, - vlan_ip: value.vlan_ip.map(|ip| ip.to_string()), - public_ip: value.public_ip.map(|ip| ip.to_string()), - cloud_destroy_ts: value.cloud_destroy_ts, - }) - } -} - -#[operation(name = "cluster-server-list")] -pub async fn handle( - ctx: OperationContext, -) -> GlobalResult { - let filter = unwrap_ref!(ctx.filter); - - let server_ids = if filter.filter_server_ids { - Some( - filter - .server_ids - .iter() - .map(|&x| x.into()) - .collect::>(), - ) - } else { - None - }; - let cluster_ids = if filter.filter_cluster_ids { - Some( - filter - .cluster_ids - .iter() - .map(|&x| x.into()) - .collect::>(), - ) - } else { - None - }; - let datacenter_ids = if filter.filter_datacenter_ids { - Some( - filter - .datacenter_ids - .iter() - .map(|&x| x.into()) - .collect::>(), - ) - } else { - None - }; - let pool_types = if filter.filter_pool_types { - Some(&filter.pool_types) - } else { - None - }; - let public_ips = if filter.filter_public_ips { - Some(&filter.public_ips) - } else { - None - }; - - let servers = sql_fetch_all!( - [ctx, Server] - " - SELECT - s.server_id, - d.cluster_id, - s.datacenter_id, - s.pool_type, - s.vlan_ip, - s.public_ip, - s.cloud_destroy_ts - FROM db_cluster.servers AS s - JOIN db_cluster.datacenters AS d - ON s.datacenter_id = d.datacenter_id - WHERE - ($1 OR s.cloud_destroy_ts IS NULL) - AND ($2 IS NULL OR s.server_id = ANY($2)) - AND ($3 IS NULL OR d.cluster_id = ANY($3)) - AND ($4 IS NULL OR s.datacenter_id = ANY($4)) - AND ($5 IS NULL OR s.pool_type = ANY($5)) - AND ($6 IS NULL OR s.public_ip = ANY($6::inet[])) - ", - ctx.include_destroyed, - &server_ids, - &cluster_ids, - &datacenter_ids, - &pool_types, - &public_ips, - ) - .await?; - - Ok(cluster::server_list::Response { - servers: servers - .into_iter() - .map(TryInto::try_into) - .collect::>>()?, - }) -} diff --git a/svc/pkg/cluster/ops/server-resolve-for-ip/Cargo.toml b/svc/pkg/cluster/ops/server-resolve-for-ip/Cargo.toml deleted file mode 100644 index 43e8d63f7..000000000 --- a/svc/pkg/cluster/ops/server-resolve-for-ip/Cargo.toml +++ /dev/null @@ -1,19 +0,0 @@ -[package] -name = "cluster-server-resolve-for-ip" -version = "0.0.1" -edition = "2018" -authors = ["Rivet Gaming, LLC "] -license = "Apache-2.0" - -[dependencies] -chirp-client = { path = "../../../../../lib/chirp/client" } -prost = "0.10" -rivet-operation = { path = "../../../../../lib/operation/core" } - -[dependencies.sqlx] -git = "https://github.com/rivet-gg/sqlx" -rev = "08d6e61aa0572e7ec557abbedb72cebb96e1ac5b" -default-features = false - -[dev-dependencies] -chirp-worker = { path = "../../../../../lib/chirp/worker" } diff --git a/svc/pkg/cluster/ops/server-resolve-for-ip/Service.toml b/svc/pkg/cluster/ops/server-resolve-for-ip/Service.toml deleted file mode 100644 index 0ad9fa42d..000000000 --- a/svc/pkg/cluster/ops/server-resolve-for-ip/Service.toml +++ /dev/null @@ -1,10 +0,0 @@ -[service] -name = "cluster-server-resolve-for-ip" - -[runtime] -kind = "rust" - -[operation] - -[databases] -db-cluster = {} diff --git a/svc/pkg/cluster/ops/server-resolve-for-ip/src/lib.rs b/svc/pkg/cluster/ops/server-resolve-for-ip/src/lib.rs deleted file mode 100644 index d28efd5a2..000000000 --- a/svc/pkg/cluster/ops/server-resolve-for-ip/src/lib.rs +++ /dev/null @@ -1,42 +0,0 @@ -use std::net::IpAddr; - -use proto::backend::pkg::*; -use rivet_operation::prelude::*; - -#[derive(sqlx::FromRow)] -struct Server { - server_id: Uuid, - public_ip: IpAddr, -} - -impl From for cluster::server_resolve_for_ip::response::Server { - fn from(value: Server) -> Self { - cluster::server_resolve_for_ip::response::Server { - server_id: Some(value.server_id.into()), - public_ip: value.public_ip.to_string(), - } - } -} - -#[operation(name = "cluster-server-resolve-for-ip")] -pub async fn handle( - ctx: OperationContext, -) -> GlobalResult { - let servers = sql_fetch_all!( - [ctx, Server] - " - SELECT - server_id, public_ip - FROM db_cluster.servers - WHERE - public_ip = ANY($1) AND - cloud_destroy_ts IS NULL - ", - &ctx.ips - ) - .await?; - - Ok(cluster::server_resolve_for_ip::Response { - servers: servers.into_iter().map(Into::into).collect::>(), - }) -} diff --git a/svc/pkg/cluster/src/lib.rs b/svc/pkg/cluster/src/lib.rs new file mode 100644 index 000000000..c84805708 --- /dev/null +++ b/svc/pkg/cluster/src/lib.rs @@ -0,0 +1,15 @@ +use chirp_workflow::prelude::*; + +pub mod ops; +pub mod types; +pub mod util; +pub mod workflows; + +// pub fn registry() -> Registry { +// use workflows::*; + +// let mut registry = Registry::new(); +// registry.register_workflow::(); + +// registry +// } diff --git a/svc/pkg/cluster/src/ops/datacenter/get.rs b/svc/pkg/cluster/src/ops/datacenter/get.rs new file mode 100644 index 000000000..1fbe9e650 --- /dev/null +++ b/svc/pkg/cluster/src/ops/datacenter/get.rs @@ -0,0 +1,129 @@ +use std::convert::{TryFrom, TryInto}; + +use chirp_workflow::prelude::*; +use rivet_operation::prelude::{proto::backend, Message}; + +use crate::types::{BuildDeliveryMethod, Datacenter, Pool, Provider}; + +pub struct Input { + pub datacenter_ids: Vec, +} + +pub struct Output { + pub datacenters: Vec, +} + +#[derive(sqlx::FromRow)] +struct DatacenterRow { + datacenter_id: Uuid, + cluster_id: Uuid, + name_id: String, + display_name: String, + provider2: Option>, + provider: i64, + provider_datacenter_id: String, + provider_api_token: Option, + pools2: Option>>, + pools: Vec, + build_delivery_method2: Option>, + build_delivery_method: i64, + prebakes_enabled: bool, + create_ts: i64, +} + +impl TryFrom for Datacenter { + type Error = GlobalError; + + fn try_from(value: DatacenterRow) -> GlobalResult { + Ok(Datacenter { + datacenter_id: value.datacenter_id, + cluster_id: value.cluster_id, + name_id: value.name_id, + display_name: value.display_name, + create_ts: value.create_ts, + // Handle backwards compatibility + provider: if let Some(provider) = value.provider2 { + provider.0 + } else { + value.provider.try_into()? + }, + provider_datacenter_id: value.provider_datacenter_id, + provider_api_token: value.provider_api_token, + // Handle backwards compatibility + pools: if let Some(pools) = value.pools2 { + pools.0 + } else { + let proto = backend::cluster::Pools::decode(value.pools.as_slice())?.pools; + + proto + .into_iter() + .map(TryInto::try_into) + .collect::>>()? + }, + // Handle backwards compatibility + build_delivery_method: if let Some(build_delivery_method) = value.build_delivery_method2 + { + build_delivery_method.0 + } else { + value.build_delivery_method.try_into()? + }, + prebakes_enabled: value.prebakes_enabled, + }) + } +} + +#[operation] +pub async fn cluster_datacenter_get(ctx: &OperationCtx, input: &Input) -> GlobalResult { + let datacenters = ctx + .cache() + .fetch_all_json("cluster.datacenters", input.datacenter_ids.clone(), { + let ctx = ctx.clone(); + move |mut cache, datacenter_ids| { + let ctx = ctx.clone(); + async move { + let dcs = get_dcs(ctx, datacenter_ids).await?; + for dc in dcs { + let dc_id = dc.datacenter_id; + cache.resolve(&dc_id, dc); + } + + Ok(cache) + } + } + }) + .await?; + + Ok(Output { datacenters }) +} + +async fn get_dcs(ctx: OperationCtx, datacenter_ids: Vec) -> GlobalResult> { + let dc_rows = sql_fetch_all!( + [ctx, DatacenterRow] + " + SELECT + datacenter_id, + cluster_id, + name_id, + display_name, + provider, + provider2, + provider_datacenter_id, + provider_api_token, + pools, + pools2, + build_delivery_method, + build_delivery_method2, + prebakes_enabled, + create_ts + FROM db_cluster.datacenters + WHERE datacenter_id = ANY($1) + ", + datacenter_ids, + ) + .await?; + + dc_rows + .into_iter() + .map(TryInto::try_into) + .collect::>>() +} diff --git a/svc/pkg/cluster/src/ops/datacenter/list.rs b/svc/pkg/cluster/src/ops/datacenter/list.rs new file mode 100644 index 000000000..1ca195ac8 --- /dev/null +++ b/svc/pkg/cluster/src/ops/datacenter/list.rs @@ -0,0 +1,56 @@ +use std::collections::HashMap; + +use chirp_workflow::prelude::*; + +pub struct Input { + pub cluster_ids: Vec, +} + +pub struct Output { + pub clusters: Vec, +} + +pub struct Cluster { + pub cluster_id: Uuid, + pub datacenter_ids: Vec, +} + +#[operation] +pub async fn cluster_datacenter_list(ctx: &OperationCtx, input: &Input) -> GlobalResult { + let rows = sql_fetch_all!( + [ctx, (Uuid, Uuid)] + " + SELECT + cluster_id, + datacenter_id + FROM db_cluster.datacenters + WHERE cluster_id = ANY($1) + ", + &input.cluster_ids, + ) + .await?; + + // Fill in empty clusters + let mut dcs_by_cluster_id = input + .cluster_ids + .iter() + .map(|cluster_id| (*cluster_id, Vec::new())) + .collect::>>(); + + for (cluster_id, datacenter_id) in rows { + dcs_by_cluster_id + .entry(cluster_id) + .or_default() + .push(datacenter_id); + } + + Ok(Output { + clusters: dcs_by_cluster_id + .into_iter() + .map(|(cluster_id, datacenter_ids)| Cluster { + cluster_id, + datacenter_ids, + }) + .collect::>(), + }) +} diff --git a/svc/pkg/cluster/src/ops/datacenter/location_get.rs b/svc/pkg/cluster/src/ops/datacenter/location_get.rs new file mode 100644 index 000000000..a0da752ce --- /dev/null +++ b/svc/pkg/cluster/src/ops/datacenter/location_get.rs @@ -0,0 +1,121 @@ +use std::net::IpAddr; + +use chirp_workflow::prelude::*; +use futures_util::{StreamExt, TryStreamExt}; +use rivet_operation::prelude::proto::backend::pkg::*; + +use crate::types::PoolType; + +pub struct Input { + pub datacenter_ids: Vec, +} + +pub struct Output { + pub datacenters: Vec, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct Datacenter { + datacenter_id: Uuid, + coords: Coordinates, +} + +// TODO: Move to a common types lib +#[derive(Debug, Serialize, Deserialize)] +pub struct Coordinates { + longitude: f64, + latitude: f64, +} + +#[operation] +pub async fn cluster_datacenter_location_get( + ctx: &OperationCtx, + input: &Input, +) -> GlobalResult { + let datacenters = ctx + .cache() + .fetch_all_json( + "cluster.datacenters.location", + input.datacenter_ids.clone(), + { + let ctx = ctx.clone(); + move |mut cache, datacenter_ids| { + let ctx = ctx.clone(); + async move { + let dcs = query_dcs(ctx, datacenter_ids).await?; + for dc in dcs { + let dc_id = dc.datacenter_id; + cache.resolve(&dc_id, dc); + } + + Ok(cache) + } + } + }, + ) + .await?; + + Ok(Output { datacenters }) +} + +async fn query_dcs(ctx: OperationCtx, datacenter_ids: Vec) -> GlobalResult> { + // NOTE: if there is no active GG node in a datacenter, we cannot retrieve its location + // Fetch the gg node public ip for each datacenter (there may be more than one, hence `DISTINCT`) + let server_rows = sql_fetch_all!( + [ctx, (Uuid, IpAddr)] + " + SELECT DISTINCT + datacenter_id, public_ip + FROM db_cluster.servers + WHERE + datacenter_id = ANY($1) AND + pool_type2 = $2 AND + public_ip IS NOT NULL AND + cloud_destroy_ts IS NULL + -- For consistency + ORDER BY public_ip DESC + ", + &datacenter_ids, + serde_json::to_string(&PoolType::Gg)?, + ) + .await?; + + let coords_res = futures_util::stream::iter(server_rows) + .map(|(datacenter_id, public_ip)| { + let ctx = ctx.clone(); + + async move { + // Fetch IP info of GG node (this is cached inside `ip_info`) + let ip_info_res = op!([ctx] ip_info { + ip: public_ip.to_string(), + provider: ip::info::Provider::IpInfoIo as i32, + }) + .await?; + + GlobalResult::Ok(( + datacenter_id, + ip_info_res + .ip_info + .as_ref() + .and_then(|info| info.coords.as_ref()) + .map(|coords| Coordinates { + longitude: coords.longitude, + latitude: coords.latitude, + }), + )) + } + }) + .buffer_unordered(8) + .try_collect::>() + .await?; + + Ok(coords_res + .into_iter() + .filter_map(|(datacenter_id, coords)| { + coords.map(|coords| Datacenter { + datacenter_id, + coords, + }) + }) + .collect::>()) +} diff --git a/svc/pkg/cluster/src/ops/datacenter/mod.rs b/svc/pkg/cluster/src/ops/datacenter/mod.rs new file mode 100644 index 000000000..dce3767e2 --- /dev/null +++ b/svc/pkg/cluster/src/ops/datacenter/mod.rs @@ -0,0 +1,6 @@ +pub mod get; +pub mod list; +pub mod location_get; +pub mod resolve_for_name_id; +pub mod tls_get; +pub mod topology_get; diff --git a/svc/pkg/cluster/src/ops/datacenter/resolve_for_name_id.rs b/svc/pkg/cluster/src/ops/datacenter/resolve_for_name_id.rs new file mode 100644 index 000000000..a47d8f27e --- /dev/null +++ b/svc/pkg/cluster/src/ops/datacenter/resolve_for_name_id.rs @@ -0,0 +1,40 @@ +use chirp_workflow::prelude::*; + +pub struct Input { + pub cluster_id: Uuid, + pub name_ids: Vec, +} + +pub struct Output { + pub datacenters: Vec, +} + +#[derive(sqlx::FromRow)] +pub struct Datacenter { + pub datacenter_id: Uuid, + pub name_id: String, +} + +#[operation] +pub async fn cluster_datacenter_resolve_for_name_id( + ctx: &OperationCtx, + input: &Input, +) -> GlobalResult { + let datacenters = sql_fetch_all!( + [ctx, Datacenter] + " + SELECT + datacenter_id, + name_id + FROM db_cluster.datacenters + WHERE + cluster_id = $1 AND + name_id = ANY($2) + ", + &input.cluster_id, + &input.name_ids, + ) + .await?; + + Ok(Output { datacenters }) +} diff --git a/svc/pkg/cluster/src/ops/datacenter/tls_get.rs b/svc/pkg/cluster/src/ops/datacenter/tls_get.rs new file mode 100644 index 000000000..8f28e7dbf --- /dev/null +++ b/svc/pkg/cluster/src/ops/datacenter/tls_get.rs @@ -0,0 +1,85 @@ +use std::convert::{TryFrom, TryInto}; + +use chirp_workflow::prelude::*; + +use crate::types::TlsState; + +#[derive(sqlx::FromRow)] +struct DatacenterTlsRow { + datacenter_id: Uuid, + gg_cert_pem: Option, + gg_private_key_pem: Option, + job_cert_pem: Option, + job_private_key_pem: Option, + state: i64, + state2: Option>, + expire_ts: i64, +} + +impl TryFrom for DatacenterTls { + type Error = GlobalError; + + fn try_from(value: DatacenterTlsRow) -> GlobalResult { + Ok(DatacenterTls { + datacenter_id: value.datacenter_id, + gg_cert_pem: value.gg_cert_pem, + gg_private_key_pem: value.gg_private_key_pem, + job_cert_pem: value.job_cert_pem, + job_private_key_pem: value.job_private_key_pem, + // Handle backwards compatibility + state: if let Some(state) = value.state2 { + state.0 + } else { + value.state.try_into()? + }, + expire_ts: value.expire_ts, + }) + } +} + +pub struct Input { + pub datacenter_ids: Vec, +} + +pub struct Output { + pub datacenters: Vec, +} + +pub struct DatacenterTls { + pub datacenter_id: Uuid, + pub gg_cert_pem: Option, + pub gg_private_key_pem: Option, + pub job_cert_pem: Option, + pub job_private_key_pem: Option, + pub state: TlsState, + pub expire_ts: i64, +} + +#[operation] +pub async fn cluster_datacenter_tls_get(ctx: &OperationCtx, input: &Input) -> GlobalResult { + let rows = sql_fetch_all!( + [ctx, DatacenterTlsRow] + " + SELECT + datacenter_id, + gg_cert_pem, + gg_private_key_pem, + job_cert_pem, + job_private_key_pem, + state, + state2, + expire_ts + FROM db_cluster.datacenter_tls + WHERE datacenter_id = ANY($1) + ", + &input.datacenter_ids, + ) + .await?; + + Ok(Output { + datacenters: rows + .into_iter() + .map(TryInto::try_into) + .collect::>>()?, + }) +} diff --git a/svc/pkg/cluster/ops/datacenter-topology-get/src/lib.rs b/svc/pkg/cluster/src/ops/datacenter/topology_get.rs similarity index 75% rename from svc/pkg/cluster/ops/datacenter-topology-get/src/lib.rs rename to svc/pkg/cluster/src/ops/datacenter/topology_get.rs index 582a11ec1..c97dc7d33 100644 --- a/svc/pkg/cluster/ops/datacenter-topology-get/src/lib.rs +++ b/svc/pkg/cluster/src/ops/datacenter/topology_get.rs @@ -1,32 +1,52 @@ use std::collections::HashMap; +use chirp_workflow::prelude::*; use nomad_client::apis::{allocations_api, configuration::Configuration, nodes_api}; -use proto::backend::pkg::*; -use rivet_operation::prelude::*; lazy_static::lazy_static! { static ref NOMAD_CONFIG: Configuration = nomad_util::new_config_from_env().unwrap(); } #[derive(sqlx::FromRow)] -struct Server { +struct ServerRow { server_id: Uuid, datacenter_id: Uuid, nomad_node_id: String, } -#[operation(name = "cluster-datacenter-topology-get")] -pub async fn handle( - ctx: OperationContext, -) -> GlobalResult { - let datacenter_ids = ctx - .datacenter_ids - .iter() - .map(common::Uuid::as_uuid) - .collect::>(); +pub struct Input { + pub datacenter_ids: Vec, +} + +pub struct Output { + pub datacenters: Vec, +} + +pub struct Datacenter { + pub datacenter_id: Uuid, + pub servers: Vec, +} + +pub struct Server { + pub server_id: Uuid, + pub node_id: String, + pub usage: Stats, + pub limits: Stats, +} +pub struct Stats { + pub cpu: u64, + pub memory: u64, + pub disk: u64, +} + +#[operation] +pub async fn cluster_datacenter_topology_get( + ctx: &OperationCtx, + input: &Input, +) -> GlobalResult { let servers = sql_fetch_all!( - [ctx, Server] + [ctx, ServerRow] " SELECT server_id, datacenter_id, nomad_node_id @@ -37,7 +57,7 @@ pub async fn handle( cloud_destroy_ts IS NULL AND taint_ts IS NULL ", - &datacenter_ids, + &input.datacenter_ids, ) .await?; @@ -83,13 +103,14 @@ pub async fn handle( )?; // Preempt datacenters - let mut datacenters = datacenter_ids + let mut datacenters = input + .datacenter_ids .iter() .map(|datacenter_id| { ( *datacenter_id, - cluster::datacenter_topology_get::response::Datacenter { - datacenter_id: Some((*datacenter_id).into()), + Datacenter { + datacenter_id: *datacenter_id, servers: Vec::new(), }, ) @@ -97,7 +118,7 @@ pub async fn handle( .collect::>(); for server in servers { - let mut usage = cluster::datacenter_topology_get::response::Stats { + let mut usage = Stats { cpu: 0, memory: 0, disk: 0, @@ -146,7 +167,7 @@ pub async fn handle( format!("node not found {}", server.nomad_node_id) ); let resources = unwrap_ref!(node.node_resources); - let limits = cluster::datacenter_topology_get::response::Stats { + let limits = Stats { cpu: unwrap!(unwrap_ref!(resources.cpu).cpu_shares) as u64, memory: unwrap!(unwrap_ref!(resources.memory).memory_mb) as u64, disk: unwrap!(unwrap_ref!(resources.disk).disk_mb) as u64, @@ -154,17 +175,15 @@ pub async fn handle( let datacenter = unwrap!(datacenters.get_mut(&server.datacenter_id)); - datacenter - .servers - .push(cluster::datacenter_topology_get::response::Server { - server_id: Some(server.server_id.into()), - node_id: server.nomad_node_id, - usage: Some(usage), - limits: Some(limits), - }); + datacenter.servers.push(Server { + server_id: server.server_id, + node_id: server.nomad_node_id, + usage, + limits, + }); } - Ok(cluster::datacenter_topology_get::Response { - datacenters: datacenters.into_values().collect::>(), + Ok(Output { + datacenters: datacenters.into_values().collect(), }) } diff --git a/svc/pkg/cluster/src/ops/get.rs b/svc/pkg/cluster/src/ops/get.rs new file mode 100644 index 000000000..166b76afb --- /dev/null +++ b/svc/pkg/cluster/src/ops/get.rs @@ -0,0 +1,31 @@ +use chirp_workflow::prelude::*; + +use crate::types::Cluster; + +pub struct Input { + pub cluster_ids: Vec, +} + +pub struct Output { + pub clusters: Vec, +} + +#[operation] +pub async fn cluster_get(ctx: &OperationCtx, input: &Input) -> GlobalResult { + let clusters = sql_fetch_all!( + [ctx, Cluster] + " + SELECT + cluster_id, + name_id, + owner_team_id, + create_ts + FROM db_cluster.clusters + WHERE cluster_id = ANY($1) + ", + &input.cluster_ids, + ) + .await?; + + Ok(Output { clusters }) +} diff --git a/svc/pkg/cluster/src/ops/get_for_game.rs b/svc/pkg/cluster/src/ops/get_for_game.rs new file mode 100644 index 000000000..355eade9b --- /dev/null +++ b/svc/pkg/cluster/src/ops/get_for_game.rs @@ -0,0 +1,40 @@ +use chirp_workflow::prelude::*; + +pub struct Input { + pub game_ids: Vec, +} + +pub struct Output { + pub games: Vec, +} + +pub struct Game { + pub game_id: Uuid, + pub cluster_id: Uuid, +} + +#[operation] +pub async fn cluster_get_for_game(ctx: &OperationCtx, input: &Input) -> GlobalResult { + let rows = sql_fetch_optional!( + [ctx, (Uuid, Option)] + " + SELECT + g.game_id, gc.cluster_id + FROM unnest($1) AS g(game_id) + LEFT JOIN db_cluster.games AS gc + ON g.game_id = gc.game_id + ", + &input.game_ids, + ) + .await?; + + Ok(Output { + games: rows + .into_iter() + .map(|(game_id, cluster_id)| Game { + game_id, + cluster_id: cluster_id.unwrap_or_else(crate::util::default_cluster_id), + }) + .collect::>(), + }) +} diff --git a/svc/pkg/cluster/src/ops/list.rs b/svc/pkg/cluster/src/ops/list.rs new file mode 100644 index 000000000..91744c570 --- /dev/null +++ b/svc/pkg/cluster/src/ops/list.rs @@ -0,0 +1,24 @@ +use chirp_workflow::prelude::*; + +pub struct Input {} + +pub struct Output { + pub cluster_ids: Vec, +} + +#[operation] +pub async fn cluster_list(ctx: &OperationCtx, input: &Input) -> GlobalResult { + let cluster_ids = sql_fetch_all!( + [ctx, (Uuid,)] + " + SELECT cluster_id + FROM db_cluster.clusters + ", + ) + .await? + .into_iter() + .map(|(cluster_id,)| cluster_id) + .collect::>(); + + Ok(Output { cluster_ids }) +} diff --git a/svc/pkg/cluster/src/ops/mod.rs b/svc/pkg/cluster/src/ops/mod.rs new file mode 100644 index 000000000..d69e65b3d --- /dev/null +++ b/svc/pkg/cluster/src/ops/mod.rs @@ -0,0 +1,6 @@ +pub mod datacenter; +pub mod get; +pub mod get_for_game; +pub mod list; +pub mod resolve_for_name_id; +pub mod server; diff --git a/svc/pkg/cluster/src/ops/resolve_for_name_id.rs b/svc/pkg/cluster/src/ops/resolve_for_name_id.rs new file mode 100644 index 000000000..19923d682 --- /dev/null +++ b/svc/pkg/cluster/src/ops/resolve_for_name_id.rs @@ -0,0 +1,36 @@ +use chirp_workflow::prelude::*; + +pub struct Input { + pub name_ids: Vec, +} + +pub struct Output { + pub clusters: Vec, +} + +#[derive(sqlx::FromRow)] +pub struct Cluster { + pub cluster_id: Uuid, + pub name_id: String, +} + +#[operation] +pub async fn cluster_resolve_for_name_id( + ctx: &OperationCtx, + input: &Input, +) -> GlobalResult { + let clusters = sql_fetch_all!( + [ctx, Cluster] + " + SELECT + cluster_id, + name_id + FROM db_cluster.clusters + WHERE name_id = ANY($1) + ", + &input.name_ids, + ) + .await?; + + Ok(Output { clusters }) +} diff --git a/svc/pkg/cluster/ops/server-destroy-with-filter/src/lib.rs b/svc/pkg/cluster/src/ops/server/destroy_with_filter.rs similarity index 51% rename from svc/pkg/cluster/ops/server-destroy-with-filter/src/lib.rs rename to svc/pkg/cluster/src/ops/server/destroy_with_filter.rs index 7e2a367ca..a3a4fb635 100644 --- a/svc/pkg/cluster/ops/server-destroy-with-filter/src/lib.rs +++ b/svc/pkg/cluster/src/ops/server/destroy_with_filter.rs @@ -1,22 +1,33 @@ -use proto::backend::pkg::*; -use rivet_operation::prelude::*; use std::collections::HashSet; -#[operation(name = "cluster-server-destroy-with-filter")] -pub async fn handle( - ctx: OperationContext, -) -> GlobalResult { - let servers_res = op!([ctx] cluster_server_list { - filter: ctx.filter.clone(), - }) - .await?; +use chirp_workflow::prelude::*; +use rivet_operation::prelude::proto::backend::pkg::*; + +use crate::types::Filter; + +pub struct Input { + pub filter: Filter, +} + +pub struct Output {} + +#[operation] +pub async fn cluster_server_destroy_with_filter( + ctx: &OperationCtx, + input: &Input, +) -> GlobalResult { + let servers_res = ctx + .op(crate::ops::server::list::Input { + filter: input.filter.clone(), + include_destroyed: false, + }) + .await?; // Flag as destroyed let server_ids = servers_res .servers .iter() - .filter_map(|x| x.server_id) - .map(|x| x.as_uuid()) + .map(|x| x.server_id) .collect::>(); sql_execute!( [ctx] @@ -31,9 +42,9 @@ pub async fn handle( .await?; // Destroy server - for server_id in &server_ids { + for server_id in server_ids { msg!([ctx] cluster::msg::server_destroy(server_id) { - server_id: Some(server_id.clone().into()), + server_id: Some(server_id.into()), force: false, }) .await?; @@ -43,8 +54,7 @@ pub async fn handle( let dc_ids = servers_res .servers .iter() - .filter_map(|x| x.datacenter_id) - .map(|x| x.as_uuid()) + .map(|x| x.datacenter_id) .collect::>(); for dc_id in dc_ids { msg!([ctx] cluster::msg::datacenter_scale(dc_id) { @@ -53,5 +63,5 @@ pub async fn handle( .await?; } - Ok(cluster::server_destroy_with_filter::Response {}) + Ok(Output {}) } diff --git a/svc/pkg/cluster/src/ops/server/get.rs b/svc/pkg/cluster/src/ops/server/get.rs new file mode 100644 index 000000000..1f23e0294 --- /dev/null +++ b/svc/pkg/cluster/src/ops/server/get.rs @@ -0,0 +1,73 @@ +use std::{ + convert::{TryFrom, TryInto}, + net::IpAddr, +}; + +use chirp_workflow::prelude::*; + +use crate::types::{PoolType, Server}; + +pub struct Input { + pub server_ids: Vec, +} + +pub struct Output { + pub servers: Vec, +} + +#[derive(sqlx::FromRow)] +pub(crate) struct ServerRow { + server_id: Uuid, + datacenter_id: Uuid, + pool_type2: Option>, + pool_type: i64, + vlan_ip: Option, + public_ip: Option, + cloud_destroy_ts: Option, +} + +impl TryFrom for Server { + type Error = GlobalError; + + fn try_from(value: ServerRow) -> GlobalResult { + Ok(Server { + server_id: value.server_id, + datacenter_id: value.datacenter_id, + // Handle backwards compatibility + pool_type: if let Some(pool_type) = value.pool_type2 { + pool_type.0 + } else { + value.pool_type.try_into()? + }, + vlan_ip: value.vlan_ip, + public_ip: value.public_ip, + cloud_destroy_ts: value.cloud_destroy_ts, + }) + } +} + +#[operation] +pub async fn cluster_server_get(ctx: &OperationCtx, input: &Input) -> GlobalResult { + let servers = sql_fetch_all!( + [ctx, ServerRow] + " + SELECT + server_id, + datacenter_id, + pool_type, + pool_type2, + vlan_ip, + public_ip, + cloud_destroy_ts + FROM db_cluster.servers + WHERE server_id = ANY($1) + ", + &input.server_ids, + ) + .await? + .into_iter() + .map(TryInto::try_into) + .collect::>>()?; + + Ok(Output { servers }) +} diff --git a/svc/pkg/cluster/src/ops/server/list.rs b/svc/pkg/cluster/src/ops/server/list.rs new file mode 100644 index 000000000..2189bb0b7 --- /dev/null +++ b/svc/pkg/cluster/src/ops/server/list.rs @@ -0,0 +1,66 @@ +use std::{convert::TryInto, net::IpAddr}; + +use chirp_workflow::prelude::*; + +use super::get::ServerRow; +use crate::types::{Filter, Server}; + +pub struct Input { + pub filter: Filter, + pub include_destroyed: bool, +} + +pub struct Output { + pub servers: Vec, +} + +#[operation] +pub async fn cluster_server_list(ctx: &OperationCtx, input: &Input) -> GlobalResult { + let servers = sql_fetch_all!( + [ctx, ServerRow] + " + SELECT + s.server_id, + d.cluster_id, + s.datacenter_id, + s.pool_type, + s.vlan_ip, + s.public_ip, + s.cloud_destroy_ts + FROM db_cluster.servers AS s + JOIN db_cluster.datacenters AS d + ON s.datacenter_id = d.datacenter_id + WHERE + ($1 OR s.cloud_destroy_ts IS NULL) + AND ($2 IS NULL OR s.server_id = ANY($2)) + AND ($3 IS NULL OR s.datacenter_id = ANY($4)) + AND ($4 IS NULL OR d.cluster_id = ANY($3)) + AND ($5 IS NULL OR s.pool_type = ANY($5)) + AND ($6 IS NULL OR s.public_ip = ANY($6)) + ", + input.include_destroyed, + &input.filter.server_ids, + &input.filter.datacenter_ids, + &input.filter.cluster_ids, + input.filter.pool_types + .as_ref() + .map(|x| x.iter() + .cloned() + .map(Into::::into) + .collect::>() + ), + input.filter.public_ips + .as_ref() + .map(|x| x.iter() + .cloned() + .map(IpAddr::V4) + .collect::>() + ), + ) + .await? + .into_iter() + .map(TryInto::try_into) + .collect::>>()?; + + Ok(Output { servers }) +} diff --git a/svc/pkg/cluster/src/ops/server/mod.rs b/svc/pkg/cluster/src/ops/server/mod.rs new file mode 100644 index 000000000..c8d7718a9 --- /dev/null +++ b/svc/pkg/cluster/src/ops/server/mod.rs @@ -0,0 +1,4 @@ +pub mod destroy_with_filter; +pub mod get; +pub mod list; +pub mod resolve_for_ip; diff --git a/svc/pkg/cluster/src/ops/server/resolve_for_ip.rs b/svc/pkg/cluster/src/ops/server/resolve_for_ip.rs new file mode 100644 index 000000000..b48c2fe7d --- /dev/null +++ b/svc/pkg/cluster/src/ops/server/resolve_for_ip.rs @@ -0,0 +1,42 @@ +use std::net::{IpAddr, Ipv4Addr}; + +use chirp_workflow::prelude::*; + +pub struct Input { + pub ips: Vec, +} + +pub struct Output { + pub servers: Vec, +} + +#[derive(sqlx::FromRow)] +pub struct Server { + pub server_id: Uuid, + pub public_ip: IpAddr, +} + +#[operation] +pub async fn cluster_server_resolve_for_ip( + ctx: &OperationCtx, + input: &Input, +) -> GlobalResult { + let servers = sql_fetch_all!( + [ctx, Server] + " + SELECT + server_id, + public_ip + FROM db_cluster.servers + WHERE server_id = ANY($1) + ", + input.ips + .iter() + .cloned() + .map(IpAddr::V4) + .collect::>(), + ) + .await?; + + Ok(Output { servers }) +} diff --git a/svc/pkg/cluster/src/types.rs b/svc/pkg/cluster/src/types.rs new file mode 100644 index 000000000..7c5209eb0 --- /dev/null +++ b/svc/pkg/cluster/src/types.rs @@ -0,0 +1,183 @@ +use std::{ + convert::{TryFrom, TryInto}, + net::{IpAddr, Ipv4Addr}, +}; + +use chirp_workflow::prelude::*; +use rivet_operation::prelude::proto::backend; +use serde::{Deserialize, Serialize}; + +#[derive(sqlx::FromRow)] +pub struct Cluster { + pub cluster_id: Uuid, + pub name_id: String, + pub owner_team_id: Option, + pub create_ts: i64, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct Datacenter { + pub datacenter_id: Uuid, + pub cluster_id: Uuid, + pub name_id: String, + pub display_name: String, + pub provider: Provider, + pub provider_datacenter_id: String, + pub provider_api_token: Option, + pub pools: Vec, + pub build_delivery_method: BuildDeliveryMethod, + pub prebakes_enabled: bool, + pub create_ts: i64, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub enum Provider { + Linode, +} + +// Backwards compatibility +impl TryFrom for Provider { + type Error = GlobalError; + + fn try_from(value: i64) -> GlobalResult { + match value { + 0 => Ok(Provider::Linode), + _ => bail!("unexpected Provider variant"), + } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Pool { + pub pool_type: PoolType, + pub hardware: Vec, + pub desired_count: u32, + pub min_count: u32, + pub max_count: u32, + pub drain_timeout: u64, +} + +// Backwards compatibility +impl TryFrom for Pool { + type Error = GlobalError; + + fn try_from(value: backend::cluster::Pool) -> GlobalResult { + Ok(Pool { + pool_type: (value.pool_type as i64).try_into()?, + hardware: value + .hardware + .iter() + .map(|h| Hardware { + provider_hardware: h.provider_hardware.clone(), + }) + .collect(), + desired_count: value.desired_count, + min_count: value.min_count, + max_count: value.max_count, + drain_timeout: value.drain_timeout, + }) + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub enum PoolType { + Job, + Gg, + Ats, +} + +// Backwards compatibility +impl TryFrom for PoolType { + type Error = GlobalError; + + fn try_from(value: i64) -> GlobalResult { + match value { + 0 => Ok(PoolType::Job), + 1 => Ok(PoolType::Gg), + 2 => Ok(PoolType::Ats), + _ => bail!("unexpected PoolType variant"), + } + } +} +impl From for i64 { + fn from(value: PoolType) -> i64 { + match value { + PoolType::Job => 0, + PoolType::Gg => 1, + PoolType::Ats => 2, + } + } +} + +impl std::fmt::Display for PoolType { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + PoolType::Job => write!(f, "job"), + PoolType::Gg => write!(f, "gg"), + PoolType::Ats => write!(f, "ats"), + } + } +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct Hardware { + pub provider_hardware: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub enum BuildDeliveryMethod { + TrafficServer, + S3Direct, +} + +// Backwards compatibility +impl TryFrom for BuildDeliveryMethod { + type Error = GlobalError; + + fn try_from(value: i64) -> GlobalResult { + match value { + 0 => Ok(BuildDeliveryMethod::TrafficServer), + 1 => Ok(BuildDeliveryMethod::S3Direct), + _ => bail!("unexpected BuildDeliveryMethod variant"), + } + } +} + +pub struct Server { + pub server_id: Uuid, + pub datacenter_id: Uuid, + pub pool_type: PoolType, + pub vlan_ip: Option, + pub public_ip: Option, + pub cloud_destroy_ts: Option, +} + +#[derive(Clone)] +pub struct Filter { + pub server_ids: Option>, + pub datacenter_ids: Option>, + pub cluster_ids: Option>, + pub pool_types: Option>, + pub public_ips: Option>, +} + +#[derive(Debug, Clone, Serialize, Deserialize)] +pub enum TlsState { + Creating, + Active, + Renewing, +} + +// Backwards compatibility +impl TryFrom for TlsState { + type Error = GlobalError; + + fn try_from(value: i64) -> GlobalResult { + match value { + 0 => Ok(TlsState::Creating), + 1 => Ok(TlsState::Active), + 2 => Ok(TlsState::Renewing), + _ => bail!("unexpected TlsState variant"), + } + } +} diff --git a/svc/pkg/cluster/util/src/metrics.rs b/svc/pkg/cluster/src/util/metrics.rs similarity index 100% rename from svc/pkg/cluster/util/src/metrics.rs rename to svc/pkg/cluster/src/util/metrics.rs diff --git a/svc/pkg/cluster/src/util/mod.rs b/svc/pkg/cluster/src/util/mod.rs new file mode 100644 index 000000000..d277920e9 --- /dev/null +++ b/svc/pkg/cluster/src/util/mod.rs @@ -0,0 +1,36 @@ +use chirp_workflow::prelude::*; + +use crate::types::PoolType; + +pub mod metrics; +pub mod test; + +// Use the hash of the server install script in the image variant so that if the install scripts are updated +// we won't be using the old image anymore +pub const INSTALL_SCRIPT_HASH: &str = include_str!(concat!(env!("OUT_DIR"), "/hash.txt")); + +// TTL of the token written to prebake images. Prebake images are renewed before the token would expire +pub const SERVER_TOKEN_TTL: i64 = util::duration::days(30 * 6); + +#[derive(thiserror::Error, Debug)] +#[error("cloudflare: {source}")] +pub(crate) struct CloudflareError { + #[from] + source: anyhow::Error, +} + +// Cluster id for provisioning servers +pub fn default_cluster_id() -> Uuid { + Uuid::nil() +} + +pub fn server_name(provider_datacenter_id: &str, pool_type: PoolType, server_id: Uuid) -> String { + let ns = util::env::namespace(); + let pool_type_str = match pool_type { + PoolType::Job => "job", + PoolType::Gg => "gg", + PoolType::Ats => "ats", + }; + + format!("{ns}-{provider_datacenter_id}-{pool_type_str}-{server_id}",) +} diff --git a/svc/pkg/cluster/util/src/test.rs b/svc/pkg/cluster/src/util/test.rs similarity index 100% rename from svc/pkg/cluster/util/src/test.rs rename to svc/pkg/cluster/src/util/test.rs diff --git a/svc/pkg/cluster/src/workflows/cluster.rs b/svc/pkg/cluster/src/workflows/cluster.rs new file mode 100644 index 000000000..27205c6af --- /dev/null +++ b/svc/pkg/cluster/src/workflows/cluster.rs @@ -0,0 +1,142 @@ +use chirp_workflow::prelude::*; +use futures_util::FutureExt; +use serde_json::json; + +#[derive(Debug, Serialize, Deserialize)] +pub struct Input { + pub cluster_id: Uuid, + pub name_id: String, + pub owner_team_id: Option, +} + +#[workflow] +pub async fn cluster(ctx: &mut WorkflowCtx, input: &Input) -> GlobalResult<()> { + ctx.activity(UpdateDbInput { + cluster_id: input.cluster_id, + name_id: input.name_id.clone(), + owner_team_id: input.owner_team_id, + }) + .await?; + + // For use in spawned threads + let cluster_id = input.cluster_id; + + loop { + match ctx.listen::
().await? { + Main::GameLink(sig) => { + ctx.spawn(move |ctx| { + async move { + ctx.activity(GameLinkInput { + cluster_id, + game_id: sig.game_id, + }) + .await?; + + Ok(()) + } + .boxed() + }); + } + Main::DatacenterCreate(sig) => { + // ctx.spawn(move |ctx| async move { + // ctx.activity(GameLinkInput { + // cluster_id, + // game_id: sig.game_id, + // }) + // .await?; + + // Ok(()) + // }); + } + } + } + + Ok(()) +} + +#[derive(Debug, Serialize, Deserialize, Hash)] +pub struct UpdateDbInput { + pub cluster_id: Uuid, + pub name_id: String, + pub owner_team_id: Option, +} + +#[activity(UpdateDb)] +pub async fn update_db(ctx: &ActivityCtx, input: &UpdateDbInput) -> GlobalResult<()> { + sql_execute!( + [ctx] + " + INSERT INTO db_cluster.clusters ( + cluster_id, + name_id, + owner_team_id, + create_ts + ) + VALUES ($1, $2, $3, $4) + ", + input.cluster_id, + &input.name_id, + input.owner_team_id, + util::timestamp::now(), + ) + .await?; + + ctx.msg( + json!({ + "cluster_id": input.cluster_id, + }), + CreateComplete {}, + ) + .await?; + + Ok(()) +} + +#[message("cluster-create-complete")] +pub struct CreateComplete {} + +#[signal("cluster-game-link")] +pub struct GameLink { + pub game_id: Uuid, +} + +#[signal("cluster-datacenter-create")] +pub struct DatacenterCreate {} + +join_signal!(Main, [GameLink, DatacenterCreate]); + +#[message("cluster-game-link-complete")] +pub struct GameLinkComplete {} + +#[derive(Debug, Serialize, Deserialize, Hash)] +struct GameLinkInput { + cluster_id: Uuid, + game_id: Uuid, +} + +#[activity(GameLinkActivity)] +async fn game_link(ctx: &ActivityCtx, input: &GameLinkInput) -> GlobalResult<()> { + sql_execute!( + [ctx] + " + INSERT INTO db_cluster.games ( + game_id, + cluster_id + ) + VALUES ($1, $2) + ", + input.game_id, + input.cluster_id, + ) + .await?; + + ctx.msg( + json!({ + "cluster_id": input.cluster_id, + }), + GameLinkComplete {}, + ) + .await?; + + Ok(()) +} diff --git a/svc/pkg/cluster/worker/src/workers/datacenter_create.rs b/svc/pkg/cluster/src/workflows/datacenter/datacenter_create.rs similarity index 100% rename from svc/pkg/cluster/worker/src/workers/datacenter_create.rs rename to svc/pkg/cluster/src/workflows/datacenter/datacenter_create.rs diff --git a/svc/pkg/cluster/worker/src/workers/datacenter_scale.rs b/svc/pkg/cluster/src/workflows/datacenter/datacenter_scale.rs similarity index 100% rename from svc/pkg/cluster/worker/src/workers/datacenter_scale.rs rename to svc/pkg/cluster/src/workflows/datacenter/datacenter_scale.rs diff --git a/svc/pkg/cluster/worker/src/workers/datacenter_tls_issue.rs b/svc/pkg/cluster/src/workflows/datacenter/datacenter_tls_issue.rs similarity index 100% rename from svc/pkg/cluster/worker/src/workers/datacenter_tls_issue.rs rename to svc/pkg/cluster/src/workflows/datacenter/datacenter_tls_issue.rs diff --git a/svc/pkg/cluster/worker/src/workers/datacenter_update.rs b/svc/pkg/cluster/src/workflows/datacenter/datacenter_update.rs similarity index 100% rename from svc/pkg/cluster/worker/src/workers/datacenter_update.rs rename to svc/pkg/cluster/src/workflows/datacenter/datacenter_update.rs diff --git a/svc/pkg/cluster/src/workflows/mod.rs b/svc/pkg/cluster/src/workflows/mod.rs new file mode 100644 index 000000000..b0cc125e3 --- /dev/null +++ b/svc/pkg/cluster/src/workflows/mod.rs @@ -0,0 +1,3 @@ +pub mod cluster; +// pub mod server; +// pub mod datacenter; diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/components/mod.rs b/svc/pkg/cluster/src/workflows/server/install/install_scripts/components/mod.rs similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/components/mod.rs rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/components/mod.rs diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/components/nomad.rs b/svc/pkg/cluster/src/workflows/server/install/install_scripts/components/nomad.rs similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/components/nomad.rs rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/components/nomad.rs diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/components/ok_server.rs b/svc/pkg/cluster/src/workflows/server/install/install_scripts/components/ok_server.rs similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/components/ok_server.rs rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/components/ok_server.rs diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/components/rivet.rs b/svc/pkg/cluster/src/workflows/server/install/install_scripts/components/rivet.rs similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/components/rivet.rs rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/components/rivet.rs diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/components/s3.rs b/svc/pkg/cluster/src/workflows/server/install/install_scripts/components/s3.rs similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/components/s3.rs rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/components/s3.rs diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/components/traefik.rs b/svc/pkg/cluster/src/workflows/server/install/install_scripts/components/traefik.rs similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/components/traefik.rs rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/components/traefik.rs diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/components/traffic_server.rs b/svc/pkg/cluster/src/workflows/server/install/install_scripts/components/traffic_server.rs similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/components/traffic_server.rs rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/components/traffic_server.rs diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/components/vector.rs b/svc/pkg/cluster/src/workflows/server/install/install_scripts/components/vector.rs similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/components/vector.rs rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/components/vector.rs diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/cni_plugins.sh b/svc/pkg/cluster/src/workflows/server/install/install_scripts/files/cni_plugins.sh similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/cni_plugins.sh rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/files/cni_plugins.sh diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/docker.sh b/svc/pkg/cluster/src/workflows/server/install/install_scripts/files/docker.sh similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/docker.sh rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/files/docker.sh diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/node_exporter.sh b/svc/pkg/cluster/src/workflows/server/install/install_scripts/files/node_exporter.sh similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/node_exporter.sh rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/files/node_exporter.sh diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/nomad_configure.sh b/svc/pkg/cluster/src/workflows/server/install/install_scripts/files/nomad_configure.sh similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/nomad_configure.sh rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/files/nomad_configure.sh diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/nomad_install.sh b/svc/pkg/cluster/src/workflows/server/install/install_scripts/files/nomad_install.sh similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/nomad_install.sh rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/files/nomad_install.sh diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/ok_server.sh b/svc/pkg/cluster/src/workflows/server/install/install_scripts/files/ok_server.sh similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/ok_server.sh rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/files/ok_server.sh diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/rivet_create_hook.sh b/svc/pkg/cluster/src/workflows/server/install/install_scripts/files/rivet_create_hook.sh similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/rivet_create_hook.sh rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/files/rivet_create_hook.sh diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/rivet_fetch_info.sh b/svc/pkg/cluster/src/workflows/server/install/install_scripts/files/rivet_fetch_info.sh similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/rivet_fetch_info.sh rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/files/rivet_fetch_info.sh diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/rivet_fetch_tls.sh b/svc/pkg/cluster/src/workflows/server/install/install_scripts/files/rivet_fetch_tls.sh similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/rivet_fetch_tls.sh rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/files/rivet_fetch_tls.sh diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/sysctl.sh b/svc/pkg/cluster/src/workflows/server/install/install_scripts/files/sysctl.sh similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/sysctl.sh rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/files/sysctl.sh diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traefik.sh b/svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traefik.sh similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traefik.sh rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traefik.sh diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traefik_instance.sh b/svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traefik_instance.sh similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traefik_instance.sh rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traefik_instance.sh diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/cache.config b/svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server/etc/cache.config similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/cache.config rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server/etc/cache.config diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/hosting.config b/svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server/etc/hosting.config similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/hosting.config rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server/etc/hosting.config diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/ip_allow.yaml b/svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server/etc/ip_allow.yaml similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/ip_allow.yaml rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server/etc/ip_allow.yaml diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/logging.yaml b/svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server/etc/logging.yaml similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/logging.yaml rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server/etc/logging.yaml diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/parent.config b/svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server/etc/parent.config similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/parent.config rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server/etc/parent.config diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/plugin.config b/svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server/etc/plugin.config similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/plugin.config rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server/etc/plugin.config diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/records.config b/svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server/etc/records.config similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/records.config rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server/etc/records.config diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/sni.yaml b/svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server/etc/sni.yaml similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/sni.yaml rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server/etc/sni.yaml diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/socks.config b/svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server/etc/socks.config similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/socks.config rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server/etc/socks.config diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/splitdns.config b/svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server/etc/splitdns.config similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/splitdns.config rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server/etc/splitdns.config diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/ssl_multicert.config b/svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server/etc/ssl_multicert.config similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/ssl_multicert.config rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server/etc/ssl_multicert.config diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/strategies.yaml b/svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server/etc/strategies.yaml similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/strategies.yaml rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server/etc/strategies.yaml diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/strip_headers.lua b/svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server/etc/strip_headers.lua similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/strip_headers.lua rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server/etc/strip_headers.lua diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/trafficserver-release b/svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server/etc/trafficserver-release similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/trafficserver-release rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server/etc/trafficserver-release diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/volume.config b/svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server/etc/volume.config similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/volume.config rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server/etc/volume.config diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server_configure.sh b/svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server_configure.sh similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server_configure.sh rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server_configure.sh diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server_install.sh b/svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server_install.sh similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server_install.sh rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/files/traffic_server_install.sh diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/vector_configure.sh b/svc/pkg/cluster/src/workflows/server/install/install_scripts/files/vector_configure.sh similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/vector_configure.sh rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/files/vector_configure.sh diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/vector_install.sh b/svc/pkg/cluster/src/workflows/server/install/install_scripts/files/vector_install.sh similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/vector_install.sh rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/files/vector_install.sh diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/mod.rs b/svc/pkg/cluster/src/workflows/server/install/install_scripts/mod.rs similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/install_scripts/mod.rs rename to svc/pkg/cluster/src/workflows/server/install/install_scripts/mod.rs diff --git a/svc/pkg/cluster/worker/src/workers/server_install/mod.rs b/svc/pkg/cluster/src/workflows/server/install/mod.rs similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install/mod.rs rename to svc/pkg/cluster/src/workflows/server/install/mod.rs diff --git a/svc/pkg/cluster/worker/src/workers/nomad_node_drain_complete.rs b/svc/pkg/cluster/src/workflows/server/nomad_node_drain_complete.rs similarity index 100% rename from svc/pkg/cluster/worker/src/workers/nomad_node_drain_complete.rs rename to svc/pkg/cluster/src/workflows/server/nomad_node_drain_complete.rs diff --git a/svc/pkg/cluster/worker/src/workers/nomad_node_registered.rs b/svc/pkg/cluster/src/workflows/server/nomad_node_registered.rs similarity index 100% rename from svc/pkg/cluster/worker/src/workers/nomad_node_registered.rs rename to svc/pkg/cluster/src/workflows/server/nomad_node_registered.rs diff --git a/svc/pkg/cluster/worker/src/workers/server_destroy.rs b/svc/pkg/cluster/src/workflows/server/server_destroy.rs similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_destroy.rs rename to svc/pkg/cluster/src/workflows/server/server_destroy.rs diff --git a/svc/pkg/cluster/worker/src/workers/server_dns_create.rs b/svc/pkg/cluster/src/workflows/server/server_dns_create.rs similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_dns_create.rs rename to svc/pkg/cluster/src/workflows/server/server_dns_create.rs diff --git a/svc/pkg/cluster/worker/src/workers/server_dns_delete.rs b/svc/pkg/cluster/src/workflows/server/server_dns_delete.rs similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_dns_delete.rs rename to svc/pkg/cluster/src/workflows/server/server_dns_delete.rs diff --git a/svc/pkg/cluster/worker/src/workers/server_drain.rs b/svc/pkg/cluster/src/workflows/server/server_drain.rs similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_drain.rs rename to svc/pkg/cluster/src/workflows/server/server_drain.rs diff --git a/svc/pkg/cluster/worker/src/workers/server_install_complete.rs b/svc/pkg/cluster/src/workflows/server/server_install_complete.rs similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_install_complete.rs rename to svc/pkg/cluster/src/workflows/server/server_install_complete.rs diff --git a/svc/pkg/cluster/worker/src/workers/server_provision.rs b/svc/pkg/cluster/src/workflows/server/server_provision.rs similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_provision.rs rename to svc/pkg/cluster/src/workflows/server/server_provision.rs diff --git a/svc/pkg/cluster/worker/src/workers/server_taint.rs b/svc/pkg/cluster/src/workflows/server/server_taint.rs similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_taint.rs rename to svc/pkg/cluster/src/workflows/server/server_taint.rs diff --git a/svc/pkg/cluster/worker/src/workers/server_undrain.rs b/svc/pkg/cluster/src/workflows/server/server_undrain.rs similarity index 100% rename from svc/pkg/cluster/worker/src/workers/server_undrain.rs rename to svc/pkg/cluster/src/workflows/server/server_undrain.rs diff --git a/svc/pkg/cluster/standalone/datacenter-tls-renew/Cargo.toml b/svc/pkg/cluster/standalone/datacenter-tls-renew/Cargo.toml index 61b2cf47d..40ea0a6ca 100644 --- a/svc/pkg/cluster/standalone/datacenter-tls-renew/Cargo.toml +++ b/svc/pkg/cluster/standalone/datacenter-tls-renew/Cargo.toml @@ -16,7 +16,7 @@ tokio = { version = "1.29", features = ["full"] } tracing = "0.1" tracing-subscriber = { version = "0.3", default-features = false, features = ["fmt", "json", "ansi"] } -cluster-datacenter-get = { path = "../../ops/datacenter-get" } +cluster = { path = "../.." } [dependencies.sqlx] git = "https://github.com/rivet-gg/sqlx" diff --git a/svc/pkg/cluster/standalone/default-update/Cargo.toml b/svc/pkg/cluster/standalone/default-update/Cargo.toml index 4fb14ac06..e8bbde600 100644 --- a/svc/pkg/cluster/standalone/default-update/Cargo.toml +++ b/svc/pkg/cluster/standalone/default-update/Cargo.toml @@ -18,11 +18,8 @@ tokio = { version = "1.29", features = ["full"] } tracing = "0.1" tracing-subscriber = { version = "0.3", default-features = false, features = ["fmt", "json", "ansi"] } uuid = { version = "1", features = ["v4"] } -util-cluster = { package = "rivet-util-cluster", path = "../../util" } -cluster-get = { path = "../../ops/get" } -cluster-datacenter-get = { path = "../../ops/datacenter-get" } -cluster-datacenter-list = { path = "../../ops/datacenter-list" } +cluster = { path = "../.." } [dev-dependencies] chirp-worker = { path = "../../../../../lib/chirp/worker" } diff --git a/svc/pkg/cluster/standalone/fix-tls/Cargo.toml b/svc/pkg/cluster/standalone/fix-tls/Cargo.toml index 7bea53a34..2832cbe0e 100644 --- a/svc/pkg/cluster/standalone/fix-tls/Cargo.toml +++ b/svc/pkg/cluster/standalone/fix-tls/Cargo.toml @@ -33,14 +33,9 @@ serde_yaml = "0.9" ssh2 = "0.9.4" thiserror = "1.0" trust-dns-resolver = { version = "0.23.2", features = ["dns-over-native-tls"] } -util-cluster = { package = "rivet-util-cluster", path = "../../util" } -cluster-datacenter-get = { path = "../../ops/datacenter-get" } -cluster-datacenter-list = { path = "../../ops/datacenter-list" } -cluster-datacenter-topology-get = { path = "../../ops/datacenter-topology-get" } -linode-instance-type-get = { path = "../../../linode/ops/instance-type-get" } -linode-server-destroy = { path = "../../../linode/ops/server-destroy" } -linode-server-provision = { path = "../../../linode/ops/server-provision" } +cluster = { path = "../.." } +linode = { path = "../../../linode" } token-create = { path = "../../../token/ops/create" } [dev-dependencies] diff --git a/svc/pkg/cluster/standalone/gc/Cargo.toml b/svc/pkg/cluster/standalone/gc/Cargo.toml index 242b1b67f..c30da8cd9 100644 --- a/svc/pkg/cluster/standalone/gc/Cargo.toml +++ b/svc/pkg/cluster/standalone/gc/Cargo.toml @@ -16,7 +16,7 @@ tokio = { version = "1.29", features = ["full"] } tracing = "0.1" tracing-subscriber = { version = "0.3", default-features = false, features = ["fmt", "json", "ansi"] } -cluster-datacenter-get = { path = "../../ops/datacenter-get" } +cluster = { path = "../.." } [dependencies.sqlx] git = "https://github.com/rivet-gg/sqlx" @@ -25,4 +25,3 @@ default-features = false [dev-dependencies] chirp-worker = { path = "../../../../../lib/chirp/worker" } -util-cluster = { package = "rivet-util-cluster", path = "../../util" } diff --git a/svc/pkg/cluster/standalone/metrics-publish/Cargo.toml b/svc/pkg/cluster/standalone/metrics-publish/Cargo.toml index bc0073f00..fc2f121a4 100644 --- a/svc/pkg/cluster/standalone/metrics-publish/Cargo.toml +++ b/svc/pkg/cluster/standalone/metrics-publish/Cargo.toml @@ -15,9 +15,8 @@ rivet-runtime = { path = "../../../../../lib/runtime" } tokio = { version = "1.29", features = ["full"] } tracing = "0.1" tracing-subscriber = { version = "0.3", default-features = false, features = ["fmt", "json", "ansi"] } -util-cluster = { package = "rivet-util-cluster", path = "../../util" } -cluster-datacenter-get = { path = "../../ops/datacenter-get" } +cluster = { path = "../.." } [dependencies.sqlx] git = "https://github.com/rivet-gg/sqlx" @@ -26,4 +25,3 @@ default-features = false [dev-dependencies] chirp-worker = { path = "../../../../../lib/chirp/worker" } -util-cluster = { package = "rivet-util-cluster", path = "../../util" } diff --git a/svc/pkg/cluster/worker/tests/common.rs b/svc/pkg/cluster/testsTMP/common.rs similarity index 100% rename from svc/pkg/cluster/worker/tests/common.rs rename to svc/pkg/cluster/testsTMP/common.rs diff --git a/svc/pkg/cluster/worker/tests/create.rs b/svc/pkg/cluster/testsTMP/create.rs similarity index 100% rename from svc/pkg/cluster/worker/tests/create.rs rename to svc/pkg/cluster/testsTMP/create.rs diff --git a/svc/pkg/cluster/worker/tests/datacenter_create.rs b/svc/pkg/cluster/testsTMP/datacenter_create.rs similarity index 100% rename from svc/pkg/cluster/worker/tests/datacenter_create.rs rename to svc/pkg/cluster/testsTMP/datacenter_create.rs diff --git a/svc/pkg/cluster/ops/datacenter-get/tests/integration.rs b/svc/pkg/cluster/testsTMP/datacenter_get.rs similarity index 100% rename from svc/pkg/cluster/ops/datacenter-get/tests/integration.rs rename to svc/pkg/cluster/testsTMP/datacenter_get.rs diff --git a/svc/pkg/cluster/ops/datacenter-list/tests/integration.rs b/svc/pkg/cluster/testsTMP/datacenter_list.rs similarity index 100% rename from svc/pkg/cluster/ops/datacenter-list/tests/integration.rs rename to svc/pkg/cluster/testsTMP/datacenter_list.rs diff --git a/svc/pkg/cluster/ops/datacenter-location-get/tests/integration.rs b/svc/pkg/cluster/testsTMP/datacenter_location_get.rs similarity index 100% rename from svc/pkg/cluster/ops/datacenter-location-get/tests/integration.rs rename to svc/pkg/cluster/testsTMP/datacenter_location_get.rs diff --git a/svc/pkg/cluster/ops/datacenter-resolve-for-name-id/tests/integration.rs b/svc/pkg/cluster/testsTMP/datacenter_resolve_for_name_id.rs similarity index 100% rename from svc/pkg/cluster/ops/datacenter-resolve-for-name-id/tests/integration.rs rename to svc/pkg/cluster/testsTMP/datacenter_resolve_for_name_id.rs diff --git a/svc/pkg/cluster/worker/tests/datacenter_scale.rs b/svc/pkg/cluster/testsTMP/datacenter_scale.rs similarity index 100% rename from svc/pkg/cluster/worker/tests/datacenter_scale.rs rename to svc/pkg/cluster/testsTMP/datacenter_scale.rs diff --git a/svc/pkg/cluster/ops/datacenter-tls-get/tests/integration.rs b/svc/pkg/cluster/testsTMP/datacenter_tls_get.rs similarity index 100% rename from svc/pkg/cluster/ops/datacenter-tls-get/tests/integration.rs rename to svc/pkg/cluster/testsTMP/datacenter_tls_get.rs diff --git a/svc/pkg/cluster/worker/tests/datacenter_tls_issue.rs b/svc/pkg/cluster/testsTMP/datacenter_tls_issue.rs similarity index 100% rename from svc/pkg/cluster/worker/tests/datacenter_tls_issue.rs rename to svc/pkg/cluster/testsTMP/datacenter_tls_issue.rs diff --git a/svc/pkg/cluster/ops/datacenter-topology-get/tests/integration.rs b/svc/pkg/cluster/testsTMP/datacenter_topology_get.rs similarity index 100% rename from svc/pkg/cluster/ops/datacenter-topology-get/tests/integration.rs rename to svc/pkg/cluster/testsTMP/datacenter_topology_get.rs diff --git a/svc/pkg/cluster/worker/tests/datacenter_update.rs b/svc/pkg/cluster/testsTMP/datacenter_update.rs similarity index 100% rename from svc/pkg/cluster/worker/tests/datacenter_update.rs rename to svc/pkg/cluster/testsTMP/datacenter_update.rs diff --git a/svc/pkg/cluster/worker/tests/game_link.rs b/svc/pkg/cluster/testsTMP/game_link.rs similarity index 100% rename from svc/pkg/cluster/worker/tests/game_link.rs rename to svc/pkg/cluster/testsTMP/game_link.rs diff --git a/svc/pkg/cluster/ops/get/tests/integration.rs b/svc/pkg/cluster/testsTMP/get.rs similarity index 100% rename from svc/pkg/cluster/ops/get/tests/integration.rs rename to svc/pkg/cluster/testsTMP/get.rs diff --git a/svc/pkg/cluster/ops/get-for-game/tests/integration.rs b/svc/pkg/cluster/testsTMP/get_for_game.rs similarity index 100% rename from svc/pkg/cluster/ops/get-for-game/tests/integration.rs rename to svc/pkg/cluster/testsTMP/get_for_game.rs diff --git a/svc/pkg/cluster/ops/list/tests/integration.rs b/svc/pkg/cluster/testsTMP/list.rs similarity index 100% rename from svc/pkg/cluster/ops/list/tests/integration.rs rename to svc/pkg/cluster/testsTMP/list.rs diff --git a/svc/pkg/cluster/worker/tests/nomad_node_drain_complete.rs b/svc/pkg/cluster/testsTMP/nomad_node_drain_complete.rs similarity index 100% rename from svc/pkg/cluster/worker/tests/nomad_node_drain_complete.rs rename to svc/pkg/cluster/testsTMP/nomad_node_drain_complete.rs diff --git a/svc/pkg/cluster/worker/tests/nomad_node_registered.rs b/svc/pkg/cluster/testsTMP/nomad_node_registered.rs similarity index 100% rename from svc/pkg/cluster/worker/tests/nomad_node_registered.rs rename to svc/pkg/cluster/testsTMP/nomad_node_registered.rs diff --git a/svc/pkg/cluster/ops/resolve-for-name-id/tests/integration.rs b/svc/pkg/cluster/testsTMP/resolve_for_name_id.rs similarity index 100% rename from svc/pkg/cluster/ops/resolve-for-name-id/tests/integration.rs rename to svc/pkg/cluster/testsTMP/resolve_for_name_id.rs diff --git a/svc/pkg/cluster/worker/tests/server_destroy.rs b/svc/pkg/cluster/testsTMP/server_destroy.rs similarity index 100% rename from svc/pkg/cluster/worker/tests/server_destroy.rs rename to svc/pkg/cluster/testsTMP/server_destroy.rs diff --git a/svc/pkg/cluster/ops/server-destroy-with-filter/tests/integration.rs b/svc/pkg/cluster/testsTMP/server_destroy_with_filter.rs similarity index 100% rename from svc/pkg/cluster/ops/server-destroy-with-filter/tests/integration.rs rename to svc/pkg/cluster/testsTMP/server_destroy_with_filter.rs diff --git a/svc/pkg/cluster/worker/tests/server_dns_create.rs b/svc/pkg/cluster/testsTMP/server_dns_create.rs similarity index 100% rename from svc/pkg/cluster/worker/tests/server_dns_create.rs rename to svc/pkg/cluster/testsTMP/server_dns_create.rs diff --git a/svc/pkg/cluster/worker/tests/server_dns_delete.rs b/svc/pkg/cluster/testsTMP/server_dns_delete.rs similarity index 100% rename from svc/pkg/cluster/worker/tests/server_dns_delete.rs rename to svc/pkg/cluster/testsTMP/server_dns_delete.rs diff --git a/svc/pkg/cluster/worker/tests/server_drain.rs b/svc/pkg/cluster/testsTMP/server_drain.rs similarity index 100% rename from svc/pkg/cluster/worker/tests/server_drain.rs rename to svc/pkg/cluster/testsTMP/server_drain.rs diff --git a/svc/pkg/cluster/ops/server-get/tests/integration.rs b/svc/pkg/cluster/testsTMP/server_get.rs similarity index 100% rename from svc/pkg/cluster/ops/server-get/tests/integration.rs rename to svc/pkg/cluster/testsTMP/server_get.rs diff --git a/svc/pkg/cluster/worker/tests/server_install.rs b/svc/pkg/cluster/testsTMP/server_install.rs similarity index 100% rename from svc/pkg/cluster/worker/tests/server_install.rs rename to svc/pkg/cluster/testsTMP/server_install.rs diff --git a/svc/pkg/cluster/worker/tests/server_install_complete.rs b/svc/pkg/cluster/testsTMP/server_install_complete.rs similarity index 100% rename from svc/pkg/cluster/worker/tests/server_install_complete.rs rename to svc/pkg/cluster/testsTMP/server_install_complete.rs diff --git a/svc/pkg/cluster/ops/server-list/tests/integration.rs b/svc/pkg/cluster/testsTMP/server_list.rs similarity index 100% rename from svc/pkg/cluster/ops/server-list/tests/integration.rs rename to svc/pkg/cluster/testsTMP/server_list.rs diff --git a/svc/pkg/cluster/worker/tests/server_provision.rs b/svc/pkg/cluster/testsTMP/server_provision.rs similarity index 100% rename from svc/pkg/cluster/worker/tests/server_provision.rs rename to svc/pkg/cluster/testsTMP/server_provision.rs diff --git a/svc/pkg/cluster/ops/server-resolve-for-ip/tests/integration.rs b/svc/pkg/cluster/testsTMP/server_resolve_for_ip.rs similarity index 100% rename from svc/pkg/cluster/ops/server-resolve-for-ip/tests/integration.rs rename to svc/pkg/cluster/testsTMP/server_resolve_for_ip.rs diff --git a/svc/pkg/cluster/worker/tests/server_taint.rs b/svc/pkg/cluster/testsTMP/server_taint.rs similarity index 100% rename from svc/pkg/cluster/worker/tests/server_taint.rs rename to svc/pkg/cluster/testsTMP/server_taint.rs diff --git a/svc/pkg/cluster/worker/tests/server_undrain.rs b/svc/pkg/cluster/testsTMP/server_undrain.rs similarity index 100% rename from svc/pkg/cluster/worker/tests/server_undrain.rs rename to svc/pkg/cluster/testsTMP/server_undrain.rs diff --git a/svc/pkg/cluster/util/Cargo.toml b/svc/pkg/cluster/util/Cargo.toml deleted file mode 100644 index b1065186b..000000000 --- a/svc/pkg/cluster/util/Cargo.toml +++ /dev/null @@ -1,18 +0,0 @@ -[package] -name = "rivet-util-cluster" -version = "0.1.0" -edition = "2021" -authors = ["Rivet Gaming, LLC "] -license = "Apache-2.0" - -[dependencies] -lazy_static = "1.4" -rivet-metrics = { path = "../../../../lib/metrics" } -rivet-util = { path = "../../../../lib/util/core" } -types = { path = "../../../../lib/types/core" } -uuid = { version = "1", features = ["v4", "serde"] } - -[build-dependencies] -merkle_hash = "3.6" -hex = "0.4" -tokio = { version = "1.29", features = ["full"] } diff --git a/svc/pkg/cluster/worker/Cargo.toml b/svc/pkg/cluster/worker/Cargo.toml index 7fefd6abd..0ed9bcc1a 100644 --- a/svc/pkg/cluster/worker/Cargo.toml +++ b/svc/pkg/cluster/worker/Cargo.toml @@ -33,9 +33,7 @@ util-cluster = { package = "rivet-util-cluster", path = "../util" } cluster-datacenter-get = { path = "../ops/datacenter-get" } cluster-datacenter-list = { path = "../ops/datacenter-list" } cluster-datacenter-topology-get = { path = "../ops/datacenter-topology-get" } -linode-instance-type-get = { path = "../../linode/ops/instance-type-get" } -linode-server-destroy = { path = "../../linode/ops/server-destroy" } -linode-server-provision = { path = "../../linode/ops/server-provision" } +linode = { path = "../../linode" } token-create = { path = "../../token/ops/create" } [dependencies.nomad_client] diff --git a/svc/pkg/cluster/worker/src/lib.rs b/svc/pkg/cluster/worker/src/lib.rs deleted file mode 100644 index beb1874f4..000000000 --- a/svc/pkg/cluster/worker/src/lib.rs +++ /dev/null @@ -1,2 +0,0 @@ -pub mod util; -pub mod workers; diff --git a/svc/pkg/cluster/worker/src/util.rs b/svc/pkg/cluster/worker/src/util.rs deleted file mode 100644 index 9cc49fea5..000000000 --- a/svc/pkg/cluster/worker/src/util.rs +++ /dev/null @@ -1,6 +0,0 @@ -#[derive(thiserror::Error, Debug)] -#[error("cloudflare: {source}")] -pub struct CloudflareError { - #[from] - source: anyhow::Error, -} diff --git a/svc/pkg/cluster/worker/src/workers/create.rs b/svc/pkg/cluster/worker/src/workers/create.rs deleted file mode 100644 index 8f6aee608..000000000 --- a/svc/pkg/cluster/worker/src/workers/create.rs +++ /dev/null @@ -1,33 +0,0 @@ -use chirp_worker::prelude::*; -use proto::backend::pkg::*; - -#[worker(name = "cluster-create")] -async fn worker(ctx: &OperationContext) -> GlobalResult<()> { - let cluster_id = unwrap_ref!(ctx.cluster_id).as_uuid(); - let owner_team_id = ctx.owner_team_id.map(|id| id.as_uuid()); - - sql_execute!( - [ctx] - " - INSERT INTO db_cluster.clusters ( - cluster_id, - name_id, - owner_team_id, - create_ts - ) - VALUES ($1, $2, $3, $4) - ", - cluster_id, - &ctx.name_id, - owner_team_id, - util::timestamp::now(), - ) - .await?; - - msg!([ctx] cluster::msg::create_complete(cluster_id) { - cluster_id: ctx.cluster_id - }) - .await?; - - Ok(()) -} diff --git a/svc/pkg/cluster/worker/src/workers/game_link.rs b/svc/pkg/cluster/worker/src/workers/game_link.rs deleted file mode 100644 index 64f241cbb..000000000 --- a/svc/pkg/cluster/worker/src/workers/game_link.rs +++ /dev/null @@ -1,30 +0,0 @@ -use chirp_worker::prelude::*; -use proto::backend::pkg::*; - -#[worker(name = "cluster-game-link")] -async fn worker(ctx: &OperationContext) -> GlobalResult<()> { - let game_id = unwrap_ref!(ctx.game_id).as_uuid(); - let cluster_id = unwrap_ref!(ctx.cluster_id).as_uuid(); - - sql_execute!( - [ctx] - " - INSERT INTO db_cluster.games ( - game_id, - cluster_id - ) - VALUES ($1, $2) - ", - game_id, - cluster_id, - ) - .await?; - - msg!([ctx] cluster::msg::game_link_complete(game_id, cluster_id) { - game_id: ctx.game_id, - cluster_id: ctx.cluster_id, - }) - .await?; - - Ok(()) -} diff --git a/svc/pkg/cluster/worker/src/workers/mod.rs b/svc/pkg/cluster/worker/src/workers/mod.rs deleted file mode 100644 index a943ba64d..000000000 --- a/svc/pkg/cluster/worker/src/workers/mod.rs +++ /dev/null @@ -1,37 +0,0 @@ -pub mod create; -pub mod datacenter_create; -pub mod datacenter_scale; -pub mod datacenter_tls_issue; -pub mod datacenter_update; -pub mod game_link; -pub mod nomad_node_drain_complete; -pub mod nomad_node_registered; -pub mod server_destroy; -pub mod server_dns_create; -pub mod server_dns_delete; -pub mod server_drain; -pub mod server_install; -pub mod server_install_complete; -pub mod server_provision; -pub mod server_taint; -pub mod server_undrain; - -chirp_worker::workers![ - server_taint, - create, - datacenter_create, - datacenter_scale, - datacenter_tls_issue, - datacenter_update, - game_link, - nomad_node_drain_complete, - nomad_node_registered, - server_destroy, - server_dns_create, - server_dns_delete, - server_drain, - server_install_complete, - server_install, - server_provision, - server_undrain, -]; diff --git a/svc/pkg/cluster/ops/datacenter-resolve-for-name-id/Cargo.toml b/svc/pkg/linode/Cargo.toml similarity index 51% rename from svc/pkg/cluster/ops/datacenter-resolve-for-name-id/Cargo.toml rename to svc/pkg/linode/Cargo.toml index 0ff168c36..d1f1d82e7 100644 --- a/svc/pkg/cluster/ops/datacenter-resolve-for-name-id/Cargo.toml +++ b/svc/pkg/linode/Cargo.toml @@ -1,14 +1,18 @@ [package] -name = "cluster-datacenter-resolve-for-name-id" +name = "linode" version = "0.0.1" edition = "2018" authors = ["Rivet Gaming, LLC "] license = "Apache-2.0" [dependencies] -chirp-client = { path = "../../../../../lib/chirp/client" } -prost = "0.10" -rivet-operation = { path = "../../../../../lib/operation/core" } +chirp-workflow = { path = "../../../lib/chirp-workflow/core" } +chrono = "0.4" +rand = "0.8" +reqwest = { version = "0.11", features = ["json"] } +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" +ssh-key = "0.6.3" [dependencies.sqlx] git = "https://github.com/rivet-gg/sqlx" @@ -16,4 +20,4 @@ rev = "08d6e61aa0572e7ec557abbedb72cebb96e1ac5b" default-features = false [dev-dependencies] -chirp-worker = { path = "../../../../../lib/chirp/worker" } +cluster = { path = "../cluster" } diff --git a/svc/pkg/linode/ops/server-provision/Service.toml b/svc/pkg/linode/Service.toml similarity index 69% rename from svc/pkg/linode/ops/server-provision/Service.toml rename to svc/pkg/linode/Service.toml index 40485ec54..86f8b71ab 100644 --- a/svc/pkg/linode/ops/server-provision/Service.toml +++ b/svc/pkg/linode/Service.toml @@ -1,11 +1,14 @@ [service] -name = "linode-server-provision" +name = "linode" [runtime] kind = "rust" -[operation] +[package] [secrets] "linode/token" = { optional = true } "ssh/server/private_key_openssh" = {} + +[databases] +db-linode = {} diff --git a/svc/pkg/linode/db/linode/Service.toml b/svc/pkg/linode/db/linode/Service.toml new file mode 100644 index 000000000..4c08d1950 --- /dev/null +++ b/svc/pkg/linode/db/linode/Service.toml @@ -0,0 +1,7 @@ +[service] +name = "db-linode" + +[runtime] +kind = "crdb" + +[database] diff --git a/svc/pkg/linode/db/linode/migrations/20240705194302_init.down.sql b/svc/pkg/linode/db/linode/migrations/20240705194302_init.down.sql new file mode 100644 index 000000000..e69de29bb diff --git a/svc/pkg/linode/db/linode/migrations/20240705194302_init.up.sql b/svc/pkg/linode/db/linode/migrations/20240705194302_init.up.sql new file mode 100644 index 000000000..888ecbf43 --- /dev/null +++ b/svc/pkg/linode/db/linode/migrations/20240705194302_init.up.sql @@ -0,0 +1,28 @@ +CREATE TABLE server_images ( + id UUID NOT NULL, + + create_ts INT NOT NULL, + destroy_ts INT, + + ssh_key_id INT NOT NULL, + linode_id INT, + firewall_id INT, + disk_id INT, + public_ip INET, + image_id TEXT, + + PRIMARY KEY (install_hash, datacenter_id, firewall_preset) +); + +-- Effectively a conditional primary key +CREATE UNIQUE INDEX idx_server_images_pkey +ON server_images (id) +WHERE destroy_ts IS NULL; + +CREATE INDEX idx_server_images_public_ip +ON server_images (public_ip) +WHERE destroy_ts IS NULL; + +CREATE INDEX idx_server_images_image_id +ON server_images (image_id) +WHERE destroy_ts IS NULL; diff --git a/svc/pkg/linode/ops/instance-type-get/Cargo.toml b/svc/pkg/linode/ops/instance-type-get/Cargo.toml deleted file mode 100644 index fb8fa5f83..000000000 --- a/svc/pkg/linode/ops/instance-type-get/Cargo.toml +++ /dev/null @@ -1,20 +0,0 @@ -[package] -name = "linode-instance-type-get" -version = "0.0.1" -edition = "2018" -authors = ["Rivet Gaming, LLC "] -license = "Apache-2.0" - -[dependencies] -chirp-client = { path = "../../../../../lib/chirp/client" } -rivet-operation = { path = "../../../../../lib/operation/core" } -util-linode = { package = "rivet-util-linode", path = "../../util" } - -[dependencies.sqlx] -git = "https://github.com/rivet-gg/sqlx" -rev = "08d6e61aa0572e7ec557abbedb72cebb96e1ac5b" -default-features = false - -[dev-dependencies] -chirp-worker = { path = "../../../../../lib/chirp/worker" } -util-cluster = { package = "rivet-util-cluster", path = "../../../cluster/util" } diff --git a/svc/pkg/linode/ops/instance-type-get/Service.toml b/svc/pkg/linode/ops/instance-type-get/Service.toml deleted file mode 100644 index 1d9736733..000000000 --- a/svc/pkg/linode/ops/instance-type-get/Service.toml +++ /dev/null @@ -1,10 +0,0 @@ -[service] -name = "linode-instance-type-get" - -[runtime] -kind = "rust" - -[operation] - -[secrets] -"linode/token" = { optional = true } diff --git a/svc/pkg/linode/ops/instance-type-get/src/lib.rs b/svc/pkg/linode/ops/instance-type-get/src/lib.rs deleted file mode 100644 index 4a83d386e..000000000 --- a/svc/pkg/linode/ops/instance-type-get/src/lib.rs +++ /dev/null @@ -1,43 +0,0 @@ -use proto::backend::pkg::*; -use rivet_operation::prelude::*; -use util_linode::api; - -#[operation(name = "linode-instance-type-get")] -pub async fn handle( - ctx: OperationContext, -) -> GlobalResult { - // Build HTTP client - let client = util_linode::Client::new(None).await?; - - // Get hardware stats from linode and cache - let instance_types_res = ctx - .cache() - .ttl(util::duration::days(1)) - .fetch_one_proto("instance_types", "linode", { - let client = client.clone(); - move |mut cache, key| { - let client = client.clone(); - async move { - let api_res = api::list_instance_types(&client).await?; - - cache.resolve( - &key, - linode::instance_type_get::CacheInstanceTypes { - instance_types: api_res.into_iter().map(Into::into).collect::>(), - }, - ); - - Ok(cache) - } - } - }) - .await?; - - let instance_types = unwrap!(instance_types_res) - .instance_types - .into_iter() - .filter(|ty| ctx.hardware_ids.iter().any(|h| h == &ty.hardware_id)) - .collect::>(); - - Ok(linode::instance_type_get::Response { instance_types }) -} diff --git a/svc/pkg/linode/ops/server-destroy/Cargo.toml b/svc/pkg/linode/ops/server-destroy/Cargo.toml deleted file mode 100644 index 23c155ed3..000000000 --- a/svc/pkg/linode/ops/server-destroy/Cargo.toml +++ /dev/null @@ -1,25 +0,0 @@ -[package] -name = "linode-server-destroy" -version = "0.0.1" -edition = "2018" -authors = ["Rivet Gaming, LLC "] -license = "Apache-2.0" - -[dependencies] -chirp-client = { path = "../../../../../lib/chirp/client" } -rivet-operation = { path = "../../../../../lib/operation/core" } -reqwest = { version = "0.11", features = ["json"] } -util-cluster = { package = "rivet-util-cluster", path = "../../../cluster/util" } -util-linode = { package = "rivet-util-linode", path = "../../util" } - -cluster-datacenter-get = { path = "../../../cluster/ops/datacenter-get" } - -[dependencies.sqlx] -git = "https://github.com/rivet-gg/sqlx" -rev = "08d6e61aa0572e7ec557abbedb72cebb96e1ac5b" -default-features = false - -[dev-dependencies] -chirp-worker = { path = "../../../../../lib/chirp/worker" } - -linode-server-provision = { path = "../server-provision" } diff --git a/svc/pkg/linode/ops/server-destroy/Service.toml b/svc/pkg/linode/ops/server-destroy/Service.toml deleted file mode 100644 index be0e245fc..000000000 --- a/svc/pkg/linode/ops/server-destroy/Service.toml +++ /dev/null @@ -1,10 +0,0 @@ -[service] -name = "linode-server-destroy" - -[runtime] -kind = "rust" - -[operation] - -[secrets] -"linode/token" = { optional = true } diff --git a/svc/pkg/linode/ops/server-destroy/src/lib.rs b/svc/pkg/linode/ops/server-destroy/src/lib.rs deleted file mode 100644 index e90978052..000000000 --- a/svc/pkg/linode/ops/server-destroy/src/lib.rs +++ /dev/null @@ -1,72 +0,0 @@ -use proto::backend::pkg::*; -use rivet_operation::prelude::*; -use util_linode::api; - -#[derive(sqlx::FromRow)] -struct LinodeData { - ssh_key_id: i64, - linode_id: Option, - firewall_id: Option, -} - -#[operation(name = "linode-server-destroy")] -pub async fn handle( - ctx: OperationContext, -) -> GlobalResult { - let server_id = unwrap_ref!(ctx.server_id).as_uuid(); - let datacenter_id = unwrap!(ctx.datacenter_id); - - let datacenter_res = op!([ctx] cluster_datacenter_get { - datacenter_ids: vec![datacenter_id], - }) - .await?; - let datacenter = unwrap!(datacenter_res.datacenters.first()); - - let data = sql_fetch_optional!( - [ctx, LinodeData] - " - SELECT ssh_key_id, linode_id, firewall_id - FROM db_cluster.servers_linode - WHERE - server_id = $1 AND - destroy_ts IS NULL - ", - server_id, - ) - .await?; - - let Some(data) = data else { - tracing::warn!("deleting server that doesn't exist"); - return Ok(linode::server_destroy::Response {}); - }; - - // Build HTTP client - let client = util_linode::Client::new(datacenter.provider_api_token.clone()).await?; - - if let Some(linode_id) = data.linode_id { - api::delete_instance(&client, linode_id).await?; - } - - api::delete_ssh_key(&client, data.ssh_key_id).await?; - - if let Some(firewall_id) = data.firewall_id { - api::delete_firewall(&client, firewall_id).await?; - } - - // Remove record - sql_execute!( - [ctx] - " - UPDATE db_cluster.servers_linode - SET destroy_ts = $2 - WHERE - server_id = $1 AND - destroy_ts IS NULL - ", - server_id, - util::timestamp::now(), - ) - .await?; - - Ok(linode::server_destroy::Response {}) -} diff --git a/svc/pkg/linode/ops/server-provision/Cargo.toml b/svc/pkg/linode/ops/server-provision/Cargo.toml deleted file mode 100644 index a7e25cafb..000000000 --- a/svc/pkg/linode/ops/server-provision/Cargo.toml +++ /dev/null @@ -1,25 +0,0 @@ -[package] -name = "linode-server-provision" -version = "0.0.1" -edition = "2018" -authors = ["Rivet Gaming, LLC "] -license = "Apache-2.0" - -[dependencies] -chirp-client = { path = "../../../../../lib/chirp/client" } -rivet-operation = { path = "../../../../../lib/operation/core" } -reqwest = { version = "0.11", features = ["json"] } -util-cluster = { package = "rivet-util-cluster", path = "../../../cluster/util" } -util-linode = { package = "rivet-util-linode", path = "../../util" } - -cluster-datacenter-get = { path = "../../../cluster/ops/datacenter-get" } - -[dependencies.sqlx] -git = "https://github.com/rivet-gg/sqlx" -rev = "08d6e61aa0572e7ec557abbedb72cebb96e1ac5b" -default-features = false - -[dev-dependencies] -chirp-worker = { path = "../../../../../lib/chirp/worker" } - -linode-server-destroy = { path = "../server-destroy" } diff --git a/svc/pkg/linode/ops/server-provision/README.md b/svc/pkg/linode/ops/server-provision/README.md deleted file mode 100644 index 7b0f7f2d1..000000000 --- a/svc/pkg/linode/ops/server-provision/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# linode-server-provision - -This was meant to be agnostic to all other packages and simply create a server on Linode, but because of -custom API keys and prebake images we need to include a `datacenter_id` in the request. In the future and if -needed this can be made optional so that this endpoint does not require a `datacenter_id`. diff --git a/svc/pkg/linode/ops/server-provision/src/lib.rs b/svc/pkg/linode/ops/server-provision/src/lib.rs deleted file mode 100644 index d08dd1c2f..000000000 --- a/svc/pkg/linode/ops/server-provision/src/lib.rs +++ /dev/null @@ -1,266 +0,0 @@ -use proto::backend::{self, cluster::PoolType, pkg::*}; -use rivet_operation::prelude::*; -use util_linode::api; - -// Less than the timeout in cluster-server-provision -#[operation(name = "linode-server-provision", timeout = 245)] -pub async fn handle( - ctx: OperationContext, -) -> GlobalResult { - let crdb = ctx.crdb().await?; - let server_id = unwrap_ref!(ctx.server_id).as_uuid(); - let datacenter_id = unwrap_ref!(ctx.datacenter_id).as_uuid(); - let provider_datacenter_id = ctx.provider_datacenter_id.clone(); - let pool_type = unwrap!(PoolType::from_i32(ctx.pool_type)); - let provider_hardware = unwrap_ref!(ctx.hardware).provider_hardware.clone(); - - let datacenter_res = op!([ctx] cluster_datacenter_get { - datacenter_ids: vec![datacenter_id.into()], - }) - .await?; - let datacenter = unwrap!(datacenter_res.datacenters.first()); - - let ns = util::env::namespace(); - let pool_type_str = match pool_type { - PoolType::Job => "job", - PoolType::Gg => "gg", - PoolType::Ats => "ats", - }; - // Linode label must be 3-64 characters, UUID's are 36 - let name = format!("{ns}-{server_id}"); - - let tags = ctx - .tags - .iter() - .cloned() - .chain([ - // HACK: Linode requires tags to be > 3 characters. We extend the namespace to make sure it - // meets the minimum length requirement. - format!("rivet-{ns}"), - format!("{ns}-{provider_datacenter_id}"), - format!("{ns}-{pool_type_str}"), - format!("{ns}-{provider_datacenter_id}-{pool_type_str}"), - ]) - .collect::>(); - - let firewall_inbound = match pool_type { - PoolType::Job => util::net::job::firewall(), - PoolType::Gg => util::net::gg::firewall(), - PoolType::Ats => util::net::ats::firewall(), - }; - - // Build context - let server = api::ProvisionCtx { - datacenter: provider_datacenter_id, - name, - hardware: provider_hardware, - vlan_ip: Some(ctx.vlan_ip.clone()), - tags, - firewall_inbound, - }; - - // Build HTTP client - let client = util_linode::Client::new(datacenter.provider_api_token.clone()).await?; - - // Create SSH key - let ssh_key_label = format!("{ns}-{server_id}"); - let ssh_key_res = api::create_ssh_key( - &client, - &ssh_key_label, - ctx.tags.iter().any(|tag| tag == "test"), - ) - .await?; - - // Write SSH key id - sql_execute!( - [ctx, &crdb] - " - INSERT INTO db_cluster.servers_linode ( - server_id, - ssh_key_id - ) - VALUES ($1, $2) - ", - server_id, - ssh_key_res.id as i64, - ) - .await?; - - let create_instance_res = - api::create_instance(&client, &server, &ssh_key_res.public_key).await?; - let linode_id = create_instance_res.id; - - // Write linode id - sql_execute!( - [ctx, &crdb] - " - UPDATE db_cluster.servers_linode - SET linode_id = $2 - WHERE - server_id = $1 AND - destroy_ts IS NULL - ", - server_id, - linode_id as i64, - ) - .await?; - - api::wait_instance_ready(&client, linode_id).await?; - - let (create_disks_res, used_custom_image) = create_disks( - &ctx, - &crdb, - &client, - CreateDisks { - provider_datacenter_id: &server.datacenter, - datacenter_id, - pool_type, - ssh_key: &ssh_key_res.public_key, - linode_id, - server_disk_size: create_instance_res.specs.disk, - }, - ) - .await?; - - api::create_instance_config(&client, &server, linode_id, &create_disks_res).await?; - - let firewall_res = api::create_firewall(&client, &server, linode_id).await?; - - // Write firewall id - sql_execute!( - [ctx, &crdb] - " - UPDATE db_cluster.servers_linode - SET firewall_id = $2 - WHERE - server_id = $1 AND - destroy_ts IS NULL - ", - server_id, - firewall_res.id as i64, - ) - .await?; - - api::boot_instance(&client, linode_id).await?; - - let public_ip = api::get_public_ip(&client, linode_id).await?; - - Ok(linode::server_provision::Response { - provider_server_id: linode_id.to_string(), - public_ip: public_ip.to_string(), - already_installed: used_custom_image, - }) -} - -struct CreateDisks<'a> { - provider_datacenter_id: &'a str, - datacenter_id: Uuid, - pool_type: PoolType, - ssh_key: &'a str, - linode_id: u64, - server_disk_size: u64, -} - -async fn create_disks( - ctx: &OperationContext, - crdb: &CrdbPool, - client: &util_linode::Client, - opts: CreateDisks<'_>, -) -> GlobalResult<(api::CreateDisksResponse, bool)> { - // Try to get custom image (if exists) - let (custom_image, updated) = if ctx.use_prebakes { - get_custom_image(ctx, crdb, opts.datacenter_id, opts.pool_type).await? - } else { - (None, false) - }; - - // Default image - let used_custom_image = custom_image.is_some(); - let image = if let Some(custom_image) = custom_image { - tracing::info!("using custom image {}", custom_image); - - custom_image - } else { - tracing::info!("custom image not ready yet, continuing normally"); - - "linode/debian11".to_string() - }; - - // Start custom image creation process - if updated { - msg!([ctx] linode::msg::prebake_provision(opts.datacenter_id, opts.pool_type as i32) { - datacenter_id: ctx.datacenter_id, - pool_type: opts.pool_type as i32, - provider_datacenter_id: opts.provider_datacenter_id.to_string(), - tags: Vec::new(), - }) - .await?; - } - - let create_disks_res = api::create_disks( - client, - opts.ssh_key, - opts.linode_id, - &image, - opts.server_disk_size, - ) - .await?; - - Ok((create_disks_res, used_custom_image)) -} - -async fn get_custom_image( - ctx: &OperationContext, - crdb: &CrdbPool, - datacenter_id: Uuid, - pool_type: PoolType, -) -> GlobalResult<(Option, bool)> { - let provider = backend::cluster::Provider::Linode; - - // Get the custom image id for this server, or insert a record and start creating one - let (image_id, updated) = sql_fetch_one!( - [ctx, (Option, bool), &crdb] - " - WITH - updated AS ( - INSERT INTO db_cluster.server_images AS s ( - provider, install_hash, datacenter_id, pool_type, create_ts - ) - VALUES ($1, $2, $3, $4, $5) - ON CONFLICT (provider, install_hash, datacenter_id, pool_type) DO UPDATE - SET - provider_image_id = NULL, - create_ts = $5 - WHERE s.create_ts < $6 - RETURNING provider, install_hash, datacenter_id, pool_type - ), - selected AS ( - SELECT provider, install_hash, datacenter_id, pool_type, provider_image_id - FROM db_cluster.server_images - WHERE - provider = $1 AND - install_hash = $2 AND - datacenter_id = $3 AND - pool_type = $4 - ) - SELECT - selected.provider_image_id, - -- Primary key is not null - (updated.provider IS NOT NULL) AS updated - FROM selected - FULL OUTER JOIN updated - ON true - ", - provider as i64, - util_cluster::INSTALL_SCRIPT_HASH, - datacenter_id, - pool_type as i64, - util::timestamp::now(), - // 5 month expiration - util::timestamp::now() - util::duration::days(5 * 30), - ) - .await?; - - // Updated is true if this specific sql call either reset (if expired) or inserted the row - Ok((if updated { None } else { image_id }, updated)) -} diff --git a/svc/pkg/linode/src/lib.rs b/svc/pkg/linode/src/lib.rs new file mode 100644 index 000000000..1ebf758e3 --- /dev/null +++ b/svc/pkg/linode/src/lib.rs @@ -0,0 +1,15 @@ +use chirp_workflow::prelude::*; + +pub mod ops; +pub mod types; +pub mod util; +pub mod workflows; + +pub fn registry() -> Registry { + use workflows::*; + + let mut registry = Registry::new(); + registry.register_workflow::(); + + registry +} diff --git a/svc/pkg/linode/src/ops/instance_type_get.rs b/svc/pkg/linode/src/ops/instance_type_get.rs new file mode 100644 index 000000000..69eeed103 --- /dev/null +++ b/svc/pkg/linode/src/ops/instance_type_get.rs @@ -0,0 +1,53 @@ +use chirp_workflow::prelude::*; + +use crate::{ + types::InstanceType, + util::{api, client}, +}; + +pub struct Input { + pub hardware_ids: Vec, +} + +pub struct Output { + pub instance_types: Vec, +} + +#[operation] +pub async fn linode_instance_type_get(ctx: &OperationCtx, input: &Input) -> GlobalResult { + // Build HTTP client + let client = client::Client::new(None).await?; + + // Get hardware stats from linode and cache + let instance_types_res = ctx + .cache() + .ttl(util::duration::days(1)) + .fetch_one_json("instance_types", "linode", { + let client = client.clone(); + move |mut cache, key| { + let client = client.clone(); + async move { + let api_res = api::list_instance_types(&client).await?; + + cache.resolve( + &key, + api_res + .into_iter() + .map(Into::::into) + .collect::>(), + ); + + Ok(cache) + } + } + }) + .await?; + + // Filter by hardware + let instance_types = unwrap!(instance_types_res) + .into_iter() + .filter(|ty| input.hardware_ids.iter().any(|h| h == &ty.hardware_id)) + .collect::>(); + + Ok(Output { instance_types }) +} diff --git a/svc/pkg/linode/src/ops/mod.rs b/svc/pkg/linode/src/ops/mod.rs new file mode 100644 index 000000000..ff2e5a372 --- /dev/null +++ b/svc/pkg/linode/src/ops/mod.rs @@ -0,0 +1 @@ +pub mod instance_type_get; diff --git a/svc/pkg/linode/src/types.rs b/svc/pkg/linode/src/types.rs new file mode 100644 index 000000000..12f57d922 --- /dev/null +++ b/svc/pkg/linode/src/types.rs @@ -0,0 +1,38 @@ +use chirp_workflow::prelude::*; +use serde::{Deserialize, Serialize}; + +#[derive(Debug, Serialize, Deserialize)] +pub struct InstanceType { + pub hardware_id: String, + pub memory: u64, + pub disk: u64, + pub vcpus: u64, + pub transfer: u64, +} + +#[derive(Debug, Clone, Serialize, Deserialize, Hash)] +pub enum FirewallPreset { + Job, + Gg, + Ats, +} + +impl FirewallPreset { + pub fn rules(&self) -> Vec { + match self { + FirewallPreset::Job => util::net::job::firewall(), + FirewallPreset::Gg => util::net::gg::firewall(), + FirewallPreset::Ats => util::net::ats::firewall(), + } + } +} + +impl std::fmt::Display for FirewallPreset { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + FirewallPreset::Job => write!(f, "job"), + FirewallPreset::Gg => write!(f, "gg"), + FirewallPreset::Ats => write!(f, "ats"), + } + } +} diff --git a/svc/pkg/linode/util/src/api.rs b/svc/pkg/linode/src/util/api.rs similarity index 89% rename from svc/pkg/linode/util/src/api.rs rename to svc/pkg/linode/src/util/api.rs index 6f7f59754..857327595 100644 --- a/svc/pkg/linode/util/src/api.rs +++ b/svc/pkg/linode/src/util/api.rs @@ -1,22 +1,18 @@ use std::{net::Ipv4Addr, str, time::Duration}; +use chirp_workflow::prelude::*; use chrono::{DateTime, Utc}; -use proto::backend::pkg::*; -use rivet_operation::prelude::*; use serde::{Deserialize, Deserializer}; use serde_json::json; use ssh_key::PrivateKey; -use crate::{generate_password, ApiErrorResponse, Client}; - -pub struct ProvisionCtx { - pub datacenter: String, - pub name: String, - pub hardware: String, - pub vlan_ip: Option, - pub tags: Vec, - pub firewall_inbound: Vec, -} +use crate::{ + types::FirewallPreset, + util::{ + client::{ApiErrorResponse, Client}, + generate_password, + }, +}; #[derive(Deserialize)] struct CreateSshKeyResponse { @@ -79,7 +75,10 @@ pub struct InstanceSpec { pub async fn create_instance( client: &Client, - server: &ProvisionCtx, + name: &str, + datacenter: &str, + hardware: &str, + tags: &[String], ssh_key: &str, ) -> GlobalResult { let ns = util::env::namespace(); @@ -90,12 +89,12 @@ pub async fn create_instance( .post( "/linode/instances", json!({ - "label": server.name, + "label": name, "group": ns, - "region": server.datacenter, - "type": server.hardware, + "region": datacenter, + "type": hardware, "authorized_keys": vec![ssh_key], - "tags": server.tags, + "tags": tags, "private_ip": true, "backups_enabled": false, }), @@ -158,15 +157,16 @@ pub async fn create_disks( pub async fn create_instance_config( client: &Client, - server: &ProvisionCtx, + vlan_ip: Option<&Ipv4Addr>, linode_id: u64, - disks: &CreateDisksResponse, + boot_disk_id: u64, + swap_disk_id: u64, ) -> GlobalResult<()> { tracing::info!("creating instance config"); let ns = util::env::namespace(); - let interfaces = if let Some(vlan_ip) = &server.vlan_ip { + let interfaces = if let Some(vlan_ip) = vlan_ip { let region_vlan = util::net::region::vlan_ip_net(); let ipam_address = format!("{}/{}", vlan_ip, region_vlan.prefix_len()); @@ -196,10 +196,10 @@ pub async fn create_instance_config( "root_device": "/dev/sda", "devices": { "sda": { - "disk_id": disks.boot_id, + "disk_id": boot_disk_id, }, "sdb": { - "disk_id": disks.swap_id, + "disk_id": swap_disk_id }, }, "interfaces": interfaces, @@ -215,15 +215,16 @@ pub struct CreateFirewallResponse { pub async fn create_firewall( client: &Client, - server: &ProvisionCtx, + firewall: &FirewallPreset, + tags: &[String], linode_id: u64, ) -> GlobalResult { tracing::info!("creating firewall"); let ns = util::env::namespace(); - let firewall_inbound = server - .firewall_inbound + let firewall_inbound = firewall + .rules() .iter() .map(|rule| { json!({ @@ -254,7 +255,7 @@ pub async fn create_firewall( "devices": { "linodes": [linode_id], }, - "tags": server.tags, + "tags": tags, }), ) .await @@ -364,7 +365,7 @@ pub async fn get_public_ip(client: &Client, linode_id: u64) -> GlobalResult GlobalResult<()> { +pub async fn delete_ssh_key(client: &Client, ssh_key_id: u64) -> GlobalResult<()> { tracing::info!("deleting linode ssh key"); client @@ -372,7 +373,7 @@ pub async fn delete_ssh_key(client: &Client, ssh_key_id: i64) -> GlobalResult<() .await } -pub async fn delete_instance(client: &Client, linode_id: i64) -> GlobalResult<()> { +pub async fn delete_instance(client: &Client, linode_id: u64) -> GlobalResult<()> { tracing::info!(?linode_id, "deleting linode instance"); client @@ -380,7 +381,7 @@ pub async fn delete_instance(client: &Client, linode_id: i64) -> GlobalResult<() .await } -pub async fn delete_firewall(client: &Client, firewall_id: i64) -> GlobalResult<()> { +pub async fn delete_firewall(client: &Client, firewall_id: u64) -> GlobalResult<()> { tracing::info!("deleting firewall"); client @@ -388,7 +389,7 @@ pub async fn delete_firewall(client: &Client, firewall_id: i64) -> GlobalResult< .await } -pub async fn shut_down(client: &Client, linode_id: i64) -> GlobalResult<()> { +pub async fn shut_down(client: &Client, linode_id: u64) -> GlobalResult<()> { tracing::info!("shutting down instance"); client @@ -446,10 +447,17 @@ pub struct CustomImage { pub async fn list_custom_images(client: &Client) -> GlobalResult> { tracing::info!("listing custom images"); + let ns = util::env::namespace(); + let req = client .inner() .get("https://api.linode.com/v4/images") - .query(&[("page_size", CUSTOM_IMAGE_LIST_SIZE)]); + .query(&[("page_size", CUSTOM_IMAGE_LIST_SIZE)]) + // Filter this namespace only + .header( + "X-Filter", + format!(r#"{{ "label": {{ "+contains": "{ns}-" }} }}"#), + ); let res = client .request(req, None, false) @@ -485,9 +493,9 @@ pub struct InstanceType { pub network_out: u64, } -impl From for linode::instance_type_get::response::InstanceType { +impl From for crate::types::InstanceType { fn from(value: InstanceType) -> Self { - linode::instance_type_get::response::InstanceType { + crate::types::InstanceType { hardware_id: value.id, memory: value.memory, disk: value.disk, diff --git a/svc/pkg/linode/util/src/lib.rs b/svc/pkg/linode/src/util/client.rs similarity index 93% rename from svc/pkg/linode/util/src/lib.rs rename to svc/pkg/linode/src/util/client.rs index 9ee833285..51900cf5e 100644 --- a/svc/pkg/linode/util/src/lib.rs +++ b/svc/pkg/linode/src/util/client.rs @@ -1,13 +1,9 @@ use std::{fmt, time::Duration}; -use rand::{distributions::Alphanumeric, Rng}; +use chirp_workflow::prelude::*; use reqwest::header; -use rivet_operation::prelude::*; use serde::{de::DeserializeOwned, Deserialize}; -pub mod api; -pub mod consts; - #[derive(Clone)] pub struct Client { // Safe to clone, has inner Arc @@ -201,12 +197,3 @@ struct ApiError { field: Option, reason: String, } - -/// Generates a random string for a secret. -pub(crate) fn generate_password(length: usize) -> String { - rand::thread_rng() - .sample_iter(&Alphanumeric) - .take(length) - .map(char::from) - .collect() -} diff --git a/svc/pkg/linode/util/src/consts.rs b/svc/pkg/linode/src/util/consts.rs similarity index 100% rename from svc/pkg/linode/util/src/consts.rs rename to svc/pkg/linode/src/util/consts.rs diff --git a/svc/pkg/cluster/util/src/lib.rs b/svc/pkg/linode/src/util/mod.rs similarity index 53% rename from svc/pkg/cluster/util/src/lib.rs rename to svc/pkg/linode/src/util/mod.rs index 16817b893..4f44d66e7 100644 --- a/svc/pkg/cluster/util/src/lib.rs +++ b/svc/pkg/linode/src/util/mod.rs @@ -1,12 +1,8 @@ -use types::rivet::backend::{self, pkg::*}; -use uuid::Uuid; +use rand::{distributions::Alphanumeric, Rng}; -pub mod metrics; -pub mod test; - -// Use the hash of the server install script in the image variant so that if the install scripts are updated -// we won't be using the old image anymore -pub const INSTALL_SCRIPT_HASH: &str = include_str!(concat!(env!("OUT_DIR"), "/hash.txt")); +pub mod api; +pub mod client; +pub mod consts; // NOTE: We don't reserve CPU because Nomad is running as a higher priority process than the rest and // shouldn't be doing much heavy lifting. @@ -17,9 +13,6 @@ const RESERVE_MEMORY: u64 = RESERVE_SYSTEM_MEMORY + RESERVE_LB_MEMORY; const CPU_PER_CORE: u64 = 1999; -// TTL of the token written to prebake images. Prebake images are renewed before the token would expire -pub const SERVER_TOKEN_TTL: i64 = rivet_util::duration::days(30 * 6); - /// Provider agnostic hardware specs. #[derive(Debug)] pub struct JobNodeConfig { @@ -31,9 +24,7 @@ pub struct JobNodeConfig { } impl JobNodeConfig { - pub fn from_linode( - instance_type: &linode::instance_type_get::response::InstanceType, - ) -> JobNodeConfig { + pub fn from_linode(instance_type: &crate::types::InstanceType) -> JobNodeConfig { // Account for kernel memory overhead // https://www.linode.com/community/questions/17791/why-doesnt-free-m-match-the-full-amount-of-ram-of-my-nanode-plan let memory = instance_type.memory * 96 / 100; @@ -66,22 +57,11 @@ impl JobNodeConfig { } } -// Cluster id for provisioning servers -pub fn default_cluster_id() -> Uuid { - Uuid::nil() -} - -pub fn server_name( - provider_datacenter_id: &str, - pool_type: backend::cluster::PoolType, - server_id: Uuid, -) -> String { - let ns = rivet_util::env::namespace(); - let pool_type_str = match pool_type { - backend::cluster::PoolType::Job => "job", - backend::cluster::PoolType::Gg => "gg", - backend::cluster::PoolType::Ats => "ats", - }; - - format!("{ns}-{provider_datacenter_id}-{pool_type_str}-{server_id}",) +/// Generates a random string for a secret. +pub(crate) fn generate_password(length: usize) -> String { + rand::thread_rng() + .sample_iter(&Alphanumeric) + .take(length) + .map(char::from) + .collect() } diff --git a/svc/pkg/linode/src/workflows/image.rs b/svc/pkg/linode/src/workflows/image.rs new file mode 100644 index 000000000..e69de29bb diff --git a/svc/pkg/linode/src/workflows/mod.rs b/svc/pkg/linode/src/workflows/mod.rs new file mode 100644 index 000000000..74f47ad34 --- /dev/null +++ b/svc/pkg/linode/src/workflows/mod.rs @@ -0,0 +1 @@ +pub mod server; diff --git a/svc/pkg/linode/worker/src/workers/prebake_install_complete.rs b/svc/pkg/linode/src/workflows/prebake_install_complete.rs similarity index 100% rename from svc/pkg/linode/worker/src/workers/prebake_install_complete.rs rename to svc/pkg/linode/src/workflows/prebake_install_complete.rs diff --git a/svc/pkg/linode/worker/src/workers/prebake_provision.rs b/svc/pkg/linode/src/workflows/prebake_provision.rs similarity index 100% rename from svc/pkg/linode/worker/src/workers/prebake_provision.rs rename to svc/pkg/linode/src/workflows/prebake_provision.rs diff --git a/svc/pkg/linode/src/workflows/server.rs b/svc/pkg/linode/src/workflows/server.rs new file mode 100644 index 000000000..4e42850cb --- /dev/null +++ b/svc/pkg/linode/src/workflows/server.rs @@ -0,0 +1,488 @@ +use std::net::Ipv4Addr; + +use chirp_workflow::prelude::*; +use serde_json::json; + +use crate::{ + types::FirewallPreset, + util::{api, client}, +}; + +const DEFAULT_IMAGE: &str = "linode/debian11"; + +#[derive(Debug, Serialize, Deserialize)] +pub struct Input { + pub server_id: Uuid, + pub custom_image: Option, + pub provider_datacenter_id: String, + pub provider_api_token: String, + pub provider_hardware: String, + pub firewall_preset: FirewallPreset, + pub vlan_ip: Ipv4Addr, + pub tags: Vec, +} + +#[workflow] +pub async fn linode_server(ctx: &mut WorkflowCtx, input: &Input) -> GlobalResult<()> { + let is_test = input.tags.iter().any(|tag| tag == "test"); + let ns = util::env::namespace(); + // Linode label must be 3-64 characters, UUID's are 36 + let name = format!("{ns}-{}", input.server_id); + + let tags = input + .tags + .iter() + .cloned() + .chain([ + // HACK: Linode requires tags to be > 3 characters. We extend the namespace to make sure it + // meets the minimum length requirement. + format!("rivet-{ns}"), + format!("{ns}-{}", input.provider_datacenter_id), + format!("{ns}-{}", input.firewall_preset), + format!( + "{ns}-{}-{}", + input.provider_datacenter_id, input.firewall_preset + ), + ]) + .collect::>(); + + let ssh_key_res = ctx + .activity(CreateSshKeyInput { + server_id: input.server_id, + provider_api_token: input.provider_api_token.clone(), + is_test, + }) + .await?; + + let create_instance_res = ctx + .activity(CreateInstanceInput { + server_id: input.server_id, + provider_api_token: input.provider_api_token.clone(), + ssh_public_key: ssh_key_res.public_key.clone(), + name, + datacenter: input.provider_datacenter_id.clone(), + hardware: input.provider_hardware.clone(), + tags: tags.clone(), + }) + .await?; + + ctx.activity(WaitInstanceReadyInput { + provider_api_token: input.provider_api_token.clone(), + linode_id: create_instance_res.linode_id, + }) + .await?; + + // let image_res = ctx + // .activity(GetImageInput { + // provider_api_token: input.provider_api_token.clone(), + // datacenter_id: input.datacenter_id, + // pool_type: input.pool_type, + // }) + // .await?; + + // // Start custom image creation process + // if image_res.updated { + // msg!([ctx] linode::msg::prebake_provision(opts.datacenter_id, opts.pool_type as i32) { + // datacenter_id: ctx.datacenter_id, + // pool_type: opts.pool_type as i32, + // provider_datacenter_id: opts.provider_datacenter_id.to_string(), + // tags: Vec::new(), + // }) + // .await?; + // } + + let disks_res = ctx + .activity(CreateDisksInput { + provider_api_token: input.provider_api_token.clone(), + image: input + .custom_image + .clone() + .unwrap_or_else(|| DEFAULT_IMAGE.to_string()), + ssh_public_key: ssh_key_res.public_key.clone(), + linode_id: create_instance_res.linode_id, + disk_size: create_instance_res.server_disk_size, + }) + .await?; + + ctx.activity(CreateInstanceConfigInput { + provider_api_token: input.provider_api_token.clone(), + vlan_ip: input.vlan_ip, + linode_id: create_instance_res.linode_id, + disks: disks_res, + }) + .await?; + + let firewall_id = ctx + .activity(CreateFirewallInput { + server_id: input.server_id, + provider_api_token: input.provider_api_token.clone(), + firewall_preset: input.firewall_preset.clone(), + tags, + linode_id: create_instance_res.linode_id, + }) + .await?; + + ctx.activity(BootInstanceInput { + provider_api_token: input.provider_api_token.clone(), + linode_id: create_instance_res.linode_id, + }) + .await?; + + let public_ip = ctx + .activity(GetPublicIpInput { + provider_api_token: input.provider_api_token.clone(), + linode_id: create_instance_res.linode_id, + }) + .await?; + + ctx.tagged_signal( + &json!({ + "server_id": input.server_id, + }), + ProvisionComplete { + linode_id: create_instance_res.linode_id, + public_ip, + }, + ) + .await?; + + ctx.listen::().await?; + + ctx.activity(DestroyInstanceInput { + provider_api_token: input.provider_api_token.clone(), + ssh_key_id: ssh_key_res.ssh_key_id, + linode_id: create_instance_res.linode_id, + firewall_id, + }) + .await?; + + Ok(()) +} + +#[derive(Debug, Serialize, Deserialize, Hash)] +struct CreateSshKeyInput { + server_id: Uuid, + provider_api_token: String, + is_test: bool, +} + +#[derive(Debug, Serialize, Deserialize, Hash)] +struct CreateSshKeyOutput { + ssh_key_id: u64, + public_key: String, +} + +#[activity(CreateSshKey)] +async fn create_ssh_key( + ctx: &ActivityCtx, + input: &CreateSshKeyInput, +) -> GlobalResult { + // Build HTTP client + let client = client::Client::new(Some(input.provider_api_token.clone())).await?; + + let ns = util::env::namespace(); + + let ssh_key_label = format!("{ns}-{}", input.server_id); + let ssh_key_res = api::create_ssh_key(&client, &ssh_key_label, input.is_test).await?; + + Ok(CreateSshKeyOutput { + ssh_key_id: ssh_key_res.id, + public_key: ssh_key_res.public_key, + }) +} + +#[derive(Debug, Serialize, Deserialize, Hash)] +struct CreateInstanceInput { + server_id: Uuid, + provider_api_token: String, + ssh_public_key: String, + name: String, + datacenter: String, + hardware: String, + tags: Vec, +} + +#[derive(Debug, Serialize, Deserialize, Hash)] +struct CreateInstanceOutput { + linode_id: u64, + server_disk_size: u64, +} + +#[activity(CreateInstance)] +async fn create_instance( + ctx: &ActivityCtx, + input: &CreateInstanceInput, +) -> GlobalResult { + // Build HTTP client + let client = client::Client::new(Some(input.provider_api_token.clone())).await?; + + let create_instance_res = api::create_instance( + &client, + &input.name, + &input.datacenter, + &input.hardware, + &input.tags, + &input.ssh_public_key, + ) + .await?; + let linode_id = create_instance_res.id; + + Ok(CreateInstanceOutput { + linode_id, + server_disk_size: create_instance_res.specs.disk, + }) +} + +#[derive(Debug, Serialize, Deserialize, Hash)] +struct WaitInstanceReadyInput { + provider_api_token: String, + linode_id: u64, +} + +#[activity(WaitInstanceReady)] +async fn wait_instance_ready( + ctx: &ActivityCtx, + input: &WaitInstanceReadyInput, +) -> GlobalResult<()> { + // Build HTTP client + let client = client::Client::new(Some(input.provider_api_token.clone())).await?; + + api::wait_instance_ready(&client, input.linode_id).await +} + +// #[derive(Debug, Serialize, Deserialize, Hash)] +// struct GetImageInput { +// server_id: Uuid, +// provider_api_token: String, +// provider_datacenter_id: String, +// datacenter_id: Uuid, +// pool_type: PoolType, +// ssh_key: String, +// linode_id: u64, +// server_disk_size: u64, +// } + +// #[derive(Debug, Serialize, Deserialize, Hash)] +// struct GetImageOutput { +// custom_image: Option, +// updated: bool, +// } + +// #[activity(GetImage)] +// async fn get_image(ctx: &ActivityCtx, input: &GetImageInput) -> GlobalResult { +// // Try to get custom image (if exists) +// let (custom_image, updated) = if input.use_prebakes { +// let provider = Provider::Linode; + +// // Get the custom image id for this server, or insert a record and start creating one +// let (image_id, updated) = sql_fetch_one!( +// [ctx, (Option, bool)] +// " +// WITH +// updated AS ( +// INSERT INTO db_cluster.server_images2 AS s ( +// provider, install_hash, datacenter_id, pool_type, create_ts +// ) +// VALUES ($1, $2, $3, $4, $5) +// ON CONFLICT (provider, install_hash, datacenter_id, pool_type) DO UPDATE +// SET +// provider_image_id = NULL, +// create_ts = $5 +// WHERE s.create_ts < $6 +// RETURNING provider, install_hash, datacenter_id, pool_type +// ), +// selected AS ( +// SELECT provider, install_hash, datacenter_id, pool_type, provider_image_id +// FROM db_cluster.server_images2 +// WHERE +// provider = $1 AND +// install_hash = $2 AND +// datacenter_id = $3 AND +// pool_type = $4 +// ) +// SELECT +// selected.provider_image_id, +// -- Primary key is not null +// (updated.provider IS NOT NULL) AS updated +// FROM selected +// FULL OUTER JOIN updated +// ON true +// ", +// provider as i64, +// crate::util::INSTALL_SCRIPT_HASH, +// input.datacenter_id, +// input.pool_type as i64, +// util::timestamp::now(), +// // 5 month expiration +// util::timestamp::now() - util::duration::days(5 * 30), +// ) +// .await?; + +// // Updated is true if this specific sql call either reset (if expired) or inserted the row +// Ok((if updated { None } else { image_id }, updated)) +// } else { +// Ok((None, false)) +// }; + +// // Default image +// let used_custom_image = custom_image.is_some(); +// let image = if let Some(custom_image) = custom_image { +// tracing::info!("using custom image {}", custom_image); + +// custom_image +// } else { +// tracing::info!("custom image not ready yet, continuing normally"); + +// "linode/debian11".to_string() +// }; + +// Ok(GetImageOutput { image, updated }) +// } + +#[derive(Debug, Serialize, Deserialize, Hash)] +struct CreateDisksInput { + provider_api_token: String, + image: String, + ssh_public_key: String, + linode_id: u64, + disk_size: u64, +} + +#[derive(Debug, Serialize, Deserialize, Hash)] +struct CreateDisksOutput { + boot_id: u64, + swap_id: u64, +} + +#[activity(CreateDisks)] +async fn create_disks( + ctx: &ActivityCtx, + input: &CreateDisksInput, +) -> GlobalResult { + // Build HTTP client + let client = client::Client::new(Some(input.provider_api_token.clone())).await?; + + let create_disks_res = api::create_disks( + &client, + &input.ssh_public_key, + input.linode_id, + &input.image, + input.disk_size, + ) + .await?; + + Ok(CreateDisksOutput { + boot_id: create_disks_res.boot_id, + swap_id: create_disks_res.swap_id, + }) +} + +#[derive(Debug, Serialize, Deserialize, Hash)] +struct CreateInstanceConfigInput { + provider_api_token: String, + vlan_ip: Ipv4Addr, + linode_id: u64, + disks: CreateDisksOutput, +} + +#[activity(CreateInstanceConfig)] +async fn create_instance_config( + ctx: &ActivityCtx, + input: &CreateInstanceConfigInput, +) -> GlobalResult<()> { + // Build HTTP client + let client = client::Client::new(Some(input.provider_api_token.clone())).await?; + + api::create_instance_config( + &client, + Some(&input.vlan_ip), + input.linode_id, + input.disks.boot_id, + input.disks.swap_id, + ) + .await +} + +#[derive(Debug, Serialize, Deserialize, Hash)] +struct CreateFirewallInput { + server_id: Uuid, + provider_api_token: String, + firewall_preset: FirewallPreset, + tags: Vec, + linode_id: u64, +} + +#[activity(CreateFirewall)] +async fn create_firewall(ctx: &ActivityCtx, input: &CreateFirewallInput) -> GlobalResult { + // Build HTTP client + let client = client::Client::new(Some(input.provider_api_token.clone())).await?; + + let firewall_res = api::create_firewall( + &client, + &input.firewall_preset, + &input.tags, + input.linode_id, + ) + .await?; + + Ok(firewall_res.id) +} + +#[derive(Debug, Serialize, Deserialize, Hash)] +struct BootInstanceInput { + provider_api_token: String, + linode_id: u64, +} + +#[activity(BootInstance)] +async fn boot_instance(ctx: &ActivityCtx, input: &BootInstanceInput) -> GlobalResult<()> { + // Build HTTP client + let client = client::Client::new(Some(input.provider_api_token.clone())).await?; + + api::boot_instance(&client, input.linode_id).await?; + + Ok(()) +} + +#[derive(Debug, Serialize, Deserialize, Hash)] +struct GetPublicIpInput { + provider_api_token: String, + linode_id: u64, +} + +#[activity(GetPublicIp)] +async fn get_public_ip(ctx: &ActivityCtx, input: &GetPublicIpInput) -> GlobalResult { + // Build HTTP client + let client = client::Client::new(Some(input.provider_api_token.clone())).await?; + + api::get_public_ip(&client, input.linode_id).await +} + +#[signal("linode-server-provision-complete")] +pub struct ProvisionComplete { + linode_id: u64, + public_ip: Ipv4Addr, +} + +#[signal("linode-server-destroy")] +pub struct Destroy {} + +#[derive(Debug, Serialize, Deserialize, Hash)] +struct DestroyInstanceInput { + provider_api_token: String, + linode_id: u64, + ssh_key_id: u64, + firewall_id: u64, +} + +#[activity(DestroyInstance)] +async fn destroy_instance(ctx: &ActivityCtx, input: &DestroyInstanceInput) -> GlobalResult<()> { + // Build HTTP client + let client = client::Client::new(Some(input.provider_api_token.clone())).await?; + + api::delete_instance(&client, input.linode_id).await?; + api::delete_ssh_key(&client, input.ssh_key_id).await?; + api::delete_firewall(&client, input.firewall_id).await?; + + Ok(()) +} diff --git a/svc/pkg/linode/standalone/gc/Cargo.toml b/svc/pkg/linode/standalone/gc/Cargo.toml index 4a2cd1ce5..3a775d3c2 100644 --- a/svc/pkg/linode/standalone/gc/Cargo.toml +++ b/svc/pkg/linode/standalone/gc/Cargo.toml @@ -19,8 +19,8 @@ serde_json = "1.0" tokio = { version = "1.29", features = ["full"] } tracing = "0.1" tracing-subscriber = { version = "0.3", default-features = false, features = ["fmt", "json", "ansi"] } -util-cluster = { package = "rivet-util-cluster", path = "../../../cluster/util" } -util-linode = { package = "rivet-util-linode", path = "../../util" } + +linode = { path = "../.." } [dependencies.sqlx] git = "https://github.com/rivet-gg/sqlx" diff --git a/svc/pkg/linode/ops/instance-type-get/tests/integration.rs b/svc/pkg/linode/tests/instance_type_get.rs similarity index 100% rename from svc/pkg/linode/ops/instance-type-get/tests/integration.rs rename to svc/pkg/linode/tests/instance_type_get.rs diff --git a/svc/pkg/linode/worker/tests/prebake_install_complete.rs b/svc/pkg/linode/tests/prebake_install_complete.rs similarity index 100% rename from svc/pkg/linode/worker/tests/prebake_install_complete.rs rename to svc/pkg/linode/tests/prebake_install_complete.rs diff --git a/svc/pkg/linode/worker/tests/prebake_provision.rs b/svc/pkg/linode/tests/prebake_provision.rs similarity index 100% rename from svc/pkg/linode/worker/tests/prebake_provision.rs rename to svc/pkg/linode/tests/prebake_provision.rs diff --git a/svc/pkg/linode/ops/server-destroy/tests/integration.rs b/svc/pkg/linode/tests/server_destroy.rs similarity index 100% rename from svc/pkg/linode/ops/server-destroy/tests/integration.rs rename to svc/pkg/linode/tests/server_destroy.rs diff --git a/svc/pkg/linode/ops/server-provision/tests/integration.rs b/svc/pkg/linode/tests/server_provision.rs similarity index 100% rename from svc/pkg/linode/ops/server-provision/tests/integration.rs rename to svc/pkg/linode/tests/server_provision.rs diff --git a/svc/pkg/linode/util/Cargo.toml b/svc/pkg/linode/util/Cargo.toml deleted file mode 100644 index 4f385e77d..000000000 --- a/svc/pkg/linode/util/Cargo.toml +++ /dev/null @@ -1,15 +0,0 @@ -[package] -name = "rivet-util-linode" -version = "0.1.0" -edition = "2021" -authors = ["Rivet Gaming, LLC "] -license = "Apache-2.0" - -[dependencies] -chrono = "0.4" -rand = "0.8" -reqwest = { version = "0.11", features = ["json"] } -rivet-operation = { path = "../../../../lib/operation/core" } -serde = { version = "1.0", features = ["derive"] } -serde_json = "1.0" -ssh-key = "0.6.3" diff --git a/svc/pkg/linode/worker/Cargo.toml b/svc/pkg/linode/worker/Cargo.toml index 4ac5b98f4..ced034a61 100644 --- a/svc/pkg/linode/worker/Cargo.toml +++ b/svc/pkg/linode/worker/Cargo.toml @@ -12,10 +12,9 @@ chirp-worker = { path = "../../../../lib/chirp/worker" } rivet-health-checks = { path = "../../../../lib/health-checks" } rivet-metrics = { path = "../../../../lib/metrics" } rivet-runtime = { path = "../../../../lib/runtime" } -util-cluster = { package = "rivet-util-cluster", path = "../../cluster/util" } util-linode = { package = "rivet-util-linode", path = "../util" } -cluster-datacenter-get = { path = "../../cluster/ops/datacenter-get" } +cluster = { path = "../../cluster" } [dependencies.sqlx] git = "https://github.com/rivet-gg/sqlx" diff --git a/svc/pkg/linode/worker/src/lib.rs b/svc/pkg/linode/worker/src/lib.rs deleted file mode 100644 index 3719b10aa..000000000 --- a/svc/pkg/linode/worker/src/lib.rs +++ /dev/null @@ -1 +0,0 @@ -pub mod workers; diff --git a/svc/pkg/linode/worker/src/workers/mod.rs b/svc/pkg/linode/worker/src/workers/mod.rs deleted file mode 100644 index d54e70dc6..000000000 --- a/svc/pkg/linode/worker/src/workers/mod.rs +++ /dev/null @@ -1,4 +0,0 @@ -pub mod prebake_install_complete; -pub mod prebake_provision; - -chirp_worker::workers![prebake_install_complete, prebake_provision,]; diff --git a/svc/pkg/monolith/standalone/worker/Cargo.toml b/svc/pkg/monolith/standalone/worker/Cargo.toml index 047dd4ead..5e026e03a 100644 --- a/svc/pkg/monolith/standalone/worker/Cargo.toml +++ b/svc/pkg/monolith/standalone/worker/Cargo.toml @@ -23,13 +23,13 @@ tracing-subscriber = { version = "0.3", default-features = false, features = [ cdn-worker = { path = "../../../cdn/worker" } cf-custom-hostname-worker = { path = "../../../cf-custom-hostname/worker" } cloud-worker = { path = "../../../cloud/worker" } -cluster-worker = { path = "../../../cluster/worker" } +cluster = { path = "../../../cluster" } external-worker = { path = "../../../external/worker" } game-user-worker = { path = "../../../game-user/worker" } job-log-worker = { path = "../../../job-log/worker" } job-run-worker = { path = "../../../job-run/worker" } kv-worker = { path = "../../../kv/worker" } -linode-worker = { path = "../../../linode/worker" } +linode = { path = "../../../linode" } mm-worker = { path = "../../../mm/worker" } team-invite-worker = { path = "../../../team-invite/worker" } team-worker = { path = "../../../team/worker" } diff --git a/svc/pkg/region/ops/get/Cargo.toml b/svc/pkg/region/ops/get/Cargo.toml index 2520092cc..824356593 100644 --- a/svc/pkg/region/ops/get/Cargo.toml +++ b/svc/pkg/region/ops/get/Cargo.toml @@ -10,8 +10,7 @@ rivet-operation = { path = "../../../../../lib/operation/core" } chirp-client = { path = "../../../../../lib/chirp/client" } prost = "0.10" -cluster-datacenter-get = { path = "../../../cluster/ops/datacenter-get" } -cluster-datacenter-location-get = { path = "../../../cluster/ops/datacenter-location-get" } +cluster = { path = "../../../cluster" } [dependencies.sqlx] git = "https://github.com/rivet-gg/sqlx" diff --git a/svc/pkg/region/ops/list-for-game/Cargo.toml b/svc/pkg/region/ops/list-for-game/Cargo.toml index a7b72c1dc..87c107aef 100644 --- a/svc/pkg/region/ops/list-for-game/Cargo.toml +++ b/svc/pkg/region/ops/list-for-game/Cargo.toml @@ -10,8 +10,7 @@ rivet-operation = { path = "../../../../../lib/operation/core" } chirp-client = { path = "../../../../../lib/chirp/client" } prost = "0.10" -cluster-get-for-game = { path = "../../../cluster/ops/get-for-game" } -cluster-datacenter-list = { path = "../../../cluster/ops/datacenter-list" } +cluster = { path = "../../../cluster" } [dependencies.sqlx] git = "https://github.com/rivet-gg/sqlx" diff --git a/svc/pkg/region/ops/list/Cargo.toml b/svc/pkg/region/ops/list/Cargo.toml index 7be1a8d5a..5debef519 100644 --- a/svc/pkg/region/ops/list/Cargo.toml +++ b/svc/pkg/region/ops/list/Cargo.toml @@ -9,9 +9,8 @@ license = "Apache-2.0" rivet-operation = { path = "../../../../../lib/operation/core" } chirp-client = { path = "../../../../../lib/chirp/client" } prost = "0.10" -util-cluster = { package = "rivet-util-cluster", path = "../../../cluster/util" } -cluster-datacenter-list = { path = "../../../cluster/ops/datacenter-list" } +cluster = { path = "../../../cluster" } [dependencies.sqlx] git = "https://github.com/rivet-gg/sqlx" diff --git a/svc/pkg/region/ops/resolve-for-game/Cargo.toml b/svc/pkg/region/ops/resolve-for-game/Cargo.toml index 59548df97..49602f8be 100644 --- a/svc/pkg/region/ops/resolve-for-game/Cargo.toml +++ b/svc/pkg/region/ops/resolve-for-game/Cargo.toml @@ -10,7 +10,7 @@ rivet-operation = { path = "../../../../../lib/operation/core" } chirp-client = { path = "../../../../../lib/chirp/client" } prost = "0.10" -cluster-datacenter-get = { path = "../../../cluster/ops/datacenter-get" } +cluster = { path = "../../../cluster" } region-list-for-game = { path = "../list-for-game" } [dependencies.sqlx] diff --git a/svc/pkg/region/ops/resolve/Cargo.toml b/svc/pkg/region/ops/resolve/Cargo.toml index 6dbcda2f7..07ac553d3 100644 --- a/svc/pkg/region/ops/resolve/Cargo.toml +++ b/svc/pkg/region/ops/resolve/Cargo.toml @@ -10,7 +10,7 @@ rivet-operation = { path = "../../../../../lib/operation/core" } chirp-client = { path = "../../../../../lib/chirp/client" } prost = "0.10" -cluster-datacenter-get = { path = "../../../cluster/ops/datacenter-get" } +cluster = { path = "../../../cluster" } region-list = { path = "../list" } [dependencies.sqlx] diff --git a/svc/pkg/tier/ops/list/Cargo.toml b/svc/pkg/tier/ops/list/Cargo.toml index c335edf0b..fdb64f282 100644 --- a/svc/pkg/tier/ops/list/Cargo.toml +++ b/svc/pkg/tier/ops/list/Cargo.toml @@ -9,12 +9,9 @@ license = "Apache-2.0" rivet-operation = { path = "../../../../../lib/operation/core" } chirp-client = { path = "../../../../../lib/chirp/client" } prost = "0.10" -util-cluster = { package = "rivet-util-cluster", path = "../../../cluster/util" } -cluster-datacenter-get = { path = "../../../cluster/ops/datacenter-get" } -linode-instance-type-get = { path = "../../../linode/ops/instance-type-get" } +cluster = { path = "../../../cluster" } +linode = { path = "../../../linode" } [dev-dependencies] chirp-worker = { path = "../../../../../lib/chirp/worker" } - -cluster-datacenter-list = { path = "../../../cluster/ops/datacenter-list" }