diff --git a/docs/libraries/workflow/GOTCHAS.md b/docs/libraries/workflow/GOTCHAS.md index 1d81bca877..ab87e69b8a 100644 --- a/docs/libraries/workflow/GOTCHAS.md +++ b/docs/libraries/workflow/GOTCHAS.md @@ -108,3 +108,17 @@ the internal location. > **\*** Even if they did know about each other via atomics, there is no guarantee of consistency from > `buffer_unordered`. Preemptively incrementing the location ensures consistency regardless of the order or > completion time of the futures. + +## Hashmaps in activity inputs/outputs + +`std::collections::HashMap` does not implement `Hash`. To get around this, use `util::HashableMap`: + +```rust +use util::AsHashableExt; + +ctx + .activity(MyActivityInput { + map: input.map.as_hashable(), + }) + .await?; +``` diff --git a/fern/definition/servers/__package__.yml b/fern/definition/servers/__package__.yml index ea7ed3cb21..40b8beaa19 100644 --- a/fern/definition/servers/__package__.yml +++ b/fern/definition/servers/__package__.yml @@ -91,7 +91,7 @@ types: protocol: commons.PortProtocol internal_port: optional routing: optional - + CreateServerResponse: properties: server: diff --git a/fern/definition/servers/common.yml b/fern/definition/servers/common.yml index 85ff88be18..9410abbbf3 100644 --- a/fern/definition/servers/common.yml +++ b/fern/definition/servers/common.yml @@ -17,6 +17,7 @@ types: lifecycle: Lifecycle created_at: long started_at: optional + connectable_at: optional destroyed_at: optional Runtime: diff --git a/lib/chirp-workflow/macros/src/lib.rs b/lib/chirp-workflow/macros/src/lib.rs index 1bffe673c6..91297bbb47 100644 --- a/lib/chirp-workflow/macros/src/lib.rs +++ b/lib/chirp-workflow/macros/src/lib.rs @@ -49,7 +49,10 @@ pub fn workflow(attr: TokenStream, item: TokenStream) -> TokenStream { input_ident, input_type, output_type, - } = parse_trait_fn(&ctx_ty, "Workflow", &item_fn); + } = match parse_trait_fn(&ctx_ty, "Workflow", &item_fn) { + Ok(x) => x, + Err(err) => return err, + }; let struct_ident = Ident::new(&name, proc_macro2::Span::call_site()); let fn_name = item_fn.sig.ident.to_string(); @@ -97,7 +100,10 @@ pub fn activity(attr: TokenStream, item: TokenStream) -> TokenStream { input_ident, input_type, output_type, - } = parse_trait_fn(&ctx_ty, "Activity", &item_fn); + } = match parse_trait_fn(&ctx_ty, "Activity", &item_fn) { + Ok(x) => x, + Err(err) => return err, + }; let struct_ident = Ident::new(&name, proc_macro2::Span::call_site()); let fn_name = item_fn.sig.ident.to_string(); @@ -165,7 +171,10 @@ pub fn operation(attr: TokenStream, item: TokenStream) -> TokenStream { input_ident, input_type, output_type, - } = parse_trait_fn(&ctx_ty, "Operation", &item_fn); + } = match parse_trait_fn(&ctx_ty, "Operation", &item_fn) { + Ok(x) => x, + Err(err) => return err, + }; let struct_ident = Ident::new(&name, proc_macro2::Span::call_site()); let fn_name = item_fn.sig.ident.to_string(); @@ -207,10 +216,14 @@ struct TraitFnOutput { output_type: syn::Type, } -fn parse_trait_fn(ctx_ty: &syn::Type, trait_name: &str, item_fn: &syn::ItemFn) -> TraitFnOutput { +fn parse_trait_fn( + ctx_ty: &syn::Type, + trait_name: &str, + item_fn: &syn::ItemFn, +) -> Result { // Check if is async if item_fn.sig.asyncness.is_none() { - panic!("the async keyword is missing from the function declaration"); + return Err(error(item_fn.sig.span(), "function must be async")); } let mut arg_names = vec![]; @@ -224,25 +237,30 @@ fn parse_trait_fn(ctx_ty: &syn::Type, trait_name: &str, item_fn: &syn::ItemFn) - arg_names.push(arg_name); arg_types.push((*arg.ty).clone()); } - _ => panic!("Unsupported input parameter pattern"), + _ => { + return Err(error(arg.pat.span(), "unsupported input parameter pattern")); + } } } else { - panic!("Unsupported input parameter type"); + return Err(error(input.span(), "unsupported input parameter type")); } } if arg_types.len() != 2 || &arg_types[0] != ctx_ty { - panic!( - "{} function must have exactly two parameters: ctx: {:?} and input: YourInputType", - trait_name, - ctx_ty.to_token_stream().to_string(), - ); + return Err(error( + item_fn.sig.span(), + &format!( + "{} function must have exactly two parameters: ctx: {:?} and input: YourInputType", + trait_name, + ctx_ty.to_token_stream().to_string() + ), + )); } let input_type = if let syn::Type::Reference(syn::TypeReference { elem, .. }) = &arg_types[1] { elem.clone() } else { - panic!("Input type must be a reference"); + return Err(error(arg_types[1].span(), "input type must be a reference")); }; let output_type = match &item_fn.sig.output { @@ -255,31 +273,49 @@ fn parse_trait_fn(ctx_ty: &syn::Type, trait_name: &str, item_fn: &syn::ItemFn) - if let Some(GenericArgument::Type(ty)) = args.args.first() { ty.clone() } else { - panic!("Unsupported Result type"); + return Err(error(args.span(), "unsupported Result type")); } } - _ => panic!("Unsupported Result type"), + _ => { + return Err(error(segment.arguments.span(), "unsupported Result type")) + } } } else { - panic!("{} function must return a GlobalResult type", trait_name,); + return Err(error( + path.span(), + &format!("{} function must return a GlobalResult type", trait_name), + )); } } - _ => panic!("Unsupported output type"), + _ => return Err(error(ty.span(), "unsupported output type")), }, - _ => panic!("{} function must have a return type", trait_name), + _ => { + return Err(error( + item_fn.sig.output.span(), + &format!("{} function must have a return type", trait_name), + )); + } }; - TraitFnOutput { + Ok(TraitFnOutput { ctx_ident: Ident::new(&arg_names[0], proc_macro2::Span::call_site()), input_ident: Ident::new(&arg_names[1], proc_macro2::Span::call_site()), input_type, output_type, - } + }) } #[proc_macro_attribute] pub fn signal(attr: TokenStream, item: TokenStream) -> TokenStream { let name = parse_macro_input!(attr as LitStr); + if !name + .value() + .chars() + .all(|c| c.is_alphanumeric() || c == '_') + { + return error(name.span(), "invalid signal name, must be [A-Za-z_]"); + } + let item_struct = parse_macro_input!(item as ItemStruct); let struct_ident = &item_struct.ident; @@ -323,6 +359,14 @@ pub fn signal(attr: TokenStream, item: TokenStream) -> TokenStream { #[proc_macro_attribute] pub fn message(attr: TokenStream, item: TokenStream) -> TokenStream { let name = parse_macro_input!(attr as LitStr); + if !name + .value() + .chars() + .all(|c| c.is_alphanumeric() || c == '_') + { + return error(name.span(), "invalid message name, must be [A-Za-z_]"); + } + let item_struct = parse_macro_input!(item as ItemStruct); // If also a signal, don't derive serde traits @@ -421,7 +465,7 @@ pub fn workflow_test(_attr: TokenStream, item: TokenStream) -> TokenStream { result.into() } -fn error(span: proc_macro2::Span, msg: &str) -> proc_macro::TokenStream { +fn error(span: proc_macro2::Span, msg: &str) -> TokenStream { syn::Error::new(span, msg).to_compile_error().into() } diff --git a/lib/convert/src/impls/ds.rs b/lib/convert/src/impls/ds.rs deleted file mode 100644 index fe15e38c06..0000000000 --- a/lib/convert/src/impls/ds.rs +++ /dev/null @@ -1,160 +0,0 @@ -use std::collections::HashMap; - -use proto::backend; -use rivet_api::models; -use rivet_operation::prelude::*; -use util::timestamp; - -use crate::{ApiFrom, ApiInto, ApiTryFrom, ApiTryInto}; -use serde_json::{json, to_value}; - -impl ApiTryFrom for models::ServersServer { - type Error = GlobalError; - fn api_try_from(value: backend::ds::Server) -> GlobalResult { - Ok(models::ServersServer { - id: unwrap!(value.server_id).as_uuid(), - environment: unwrap!(value.env_id).as_uuid(), - datacenter: unwrap!(value.datacenter_id).as_uuid(), - cluster: unwrap!(value.cluster_id).as_uuid(), - created_at: value.create_ts, - started_at: value.connectable_ts, - destroyed_at: value.destroy_ts, - tags: Some(to_value(value.tags).unwrap()), - runtime: Box::new(models::ServersRuntime { - build: unwrap!(value.image_id).as_uuid(), - arguments: Some(value.args), - environment: Some(value.environment), - }), - network: Box::new(models::ServersNetwork { - mode: Some( - unwrap!(backend::ds::NetworkMode::from_i32(value.network_mode)).api_into(), - ), - ports: value - .network_ports - .into_iter() - .map(|(s, p)| Ok((s, p.api_try_into()?))) - .collect::>>()?, - }), - lifecycle: Box::new(models::ServersLifecycle { - kill_timeout: Some(value.kill_timeout_ms), - }), - resources: Box::new(unwrap!(value.resources).api_into()), - }) - } -} - -impl ApiFrom for backend::ds::ServerResources { - fn api_from(value: models::ServersResources) -> backend::ds::ServerResources { - backend::ds::ServerResources { - cpu_millicores: value.cpu, - memory_mib: value.memory, - } - } -} - -impl ApiFrom for models::ServersResources { - fn api_from(value: backend::ds::ServerResources) -> models::ServersResources { - models::ServersResources { - cpu: value.cpu_millicores, - memory: value.memory_mib, - } - } -} - -impl ApiFrom for backend::ds::NetworkMode { - fn api_from(value: models::ServersNetworkMode) -> backend::ds::NetworkMode { - match value { - models::ServersNetworkMode::Bridge => backend::ds::NetworkMode::Bridge, - models::ServersNetworkMode::Host => backend::ds::NetworkMode::Host, - } - } -} - -impl ApiFrom for models::ServersNetworkMode { - fn api_from(value: backend::ds::NetworkMode) -> models::ServersNetworkMode { - match value { - backend::ds::NetworkMode::Bridge => models::ServersNetworkMode::Bridge, - backend::ds::NetworkMode::Host => models::ServersNetworkMode::Host, - } - } -} - -impl ApiTryFrom for models::ServersPort { - type Error = GlobalError; - - fn api_try_from(value: backend::ds::Port) -> GlobalResult { - let protocol = match unwrap!(&value.routing) { - backend::ds::port::Routing::GameGuard(x) => { - unwrap!(backend::ds::GameGuardProtocol::from_i32(x.protocol)).api_into() - } - backend::ds::port::Routing::Host(x) => { - unwrap!(backend::ds::HostProtocol::from_i32(x.protocol)).api_into() - } - }; - - let routing = models::ServersPortRouting { - game_guard: if let Some(backend::ds::port::Routing::GameGuard(_)) = &value.routing { - Some(json!({})) - } else { - None - }, - host: if let Some(backend::ds::port::Routing::Host(_)) = &value.routing { - Some(json!({})) - } else { - None - }, - }; - - Ok(models::ServersPort { - protocol, - internal_port: value.internal_port, - public_hostname: value.public_hostname, - public_port: value.public_port, - routing: Box::new(routing), - }) - } -} - -impl ApiFrom for backend::ds::GameGuardProtocol { - fn api_from(value: models::ServersPortProtocol) -> backend::ds::GameGuardProtocol { - match value { - models::ServersPortProtocol::Udp => backend::ds::GameGuardProtocol::Udp, - models::ServersPortProtocol::Tcp => backend::ds::GameGuardProtocol::Tcp, - models::ServersPortProtocol::Http => backend::ds::GameGuardProtocol::Http, - models::ServersPortProtocol::Https => backend::ds::GameGuardProtocol::Https, - models::ServersPortProtocol::TcpTls => backend::ds::GameGuardProtocol::TcpTls, - } - } -} - -impl ApiFrom for models::ServersPortProtocol { - fn api_from(value: backend::ds::GameGuardProtocol) -> models::ServersPortProtocol { - match value { - backend::ds::GameGuardProtocol::Udp => models::ServersPortProtocol::Udp, - backend::ds::GameGuardProtocol::Tcp => models::ServersPortProtocol::Tcp, - backend::ds::GameGuardProtocol::Http => models::ServersPortProtocol::Http, - backend::ds::GameGuardProtocol::Https => models::ServersPortProtocol::Https, - backend::ds::GameGuardProtocol::TcpTls => models::ServersPortProtocol::TcpTls, - } - } -} - -impl ApiTryFrom for backend::ds::HostProtocol { - type Error = GlobalError; - fn api_try_from(value: models::ServersPortProtocol) -> GlobalResult { - Ok(match value { - models::ServersPortProtocol::Udp => backend::ds::HostProtocol::HostUdp, - models::ServersPortProtocol::Tcp => backend::ds::HostProtocol::HostTcp, - _ => bail_with!(SERVERS_UNSUPPORTED_HOST_PROTOCOL), - }) - } -} - -impl ApiFrom for models::ServersPortProtocol { - fn api_from(value: backend::ds::HostProtocol) -> models::ServersPortProtocol { - match value { - backend::ds::HostProtocol::HostUdp => models::ServersPortProtocol::Udp, - backend::ds::HostProtocol::HostTcp => models::ServersPortProtocol::Tcp, - } - } -} diff --git a/lib/convert/src/impls/mod.rs b/lib/convert/src/impls/mod.rs index f2fc7172c6..9c981bf62c 100644 --- a/lib/convert/src/impls/mod.rs +++ b/lib/convert/src/impls/mod.rs @@ -12,7 +12,6 @@ pub mod group; pub mod identity; pub mod kv; pub mod portal; -pub mod ds; pub mod user; impl ApiFrom for new_models::ValidationError { diff --git a/lib/util/core/Cargo.toml b/lib/util/core/Cargo.toml index a2a089a509..163cd4f187 100644 --- a/lib/util/core/Cargo.toml +++ b/lib/util/core/Cargo.toml @@ -8,7 +8,6 @@ license = "Apache-2.0" [features] default = ["macros"] macros = [] -serde = [] [dependencies] async-trait = "0.1" @@ -17,6 +16,7 @@ chrono = "0.4" formatted-error = { path = "../../formatted-error", optional = true } futures-util = "0.3" global-error = { path = "../../global-error" } +indexmap = { version = "2.0", features = ["serde"] } ipnet = { version = "2.7", features = ["serde"] } lazy_static = "1.4" rand = "0.8" @@ -25,7 +25,7 @@ reqwest = "0.11" rivet-util-env = { path = "../env" } rivet-util-macros = { path = "../macros" } serde = { version = "1.0", features = ["derive"] } -serde_json = "1.0" +serde_json = { version = "1.0" } thiserror = "1.0" tokio = { version = "1.29", default-features = false, features = [ "time", @@ -37,6 +37,3 @@ tokio = { version = "1.29", default-features = false, features = [ ] } types-proto = { path = "../../types-proto/core" } uuid = { version = "1", features = ["v4", "serde"] } - -aws-smithy-client = "^0.41.0" -aws-smithy-types = "^0.41.0" diff --git a/lib/util/core/src/lib.rs b/lib/util/core/src/lib.rs index d0c6dd4fa4..ff56f91605 100644 --- a/lib/util/core/src/lib.rs +++ b/lib/util/core/src/lib.rs @@ -1,6 +1,15 @@ +use std::{ + collections::HashMap, + fmt, + hash::{Hash, Hasher}, + ops::Deref, +}; + +use indexmap::IndexMap; use rand::Rng; pub use rivet_util_env as env; pub use rivet_util_macros as macros; +use serde::{Deserialize, Serialize}; use tokio::time::{Duration, Instant}; pub mod billing; @@ -29,64 +38,6 @@ pub mod watch { pub const DEFAULT_TIMEOUT: u64 = 40 * 1000; } -#[cfg(feature = "serde")] -pub mod serde { - use aws_smithy_types::Document; - use serde_json::Value; - - #[derive(thiserror::Error, Debug)] - #[error("Number could not be decoded by serde_json")] - pub struct NumberDecodeError; - - pub fn as_serde(value: &Document) -> Result { - let val = match value { - Document::Object(map) => Value::Object( - map.iter() - .map(|(k, v)| Ok((k.clone(), as_serde(v)?))) - .collect::>()?, - ), - Document::Array(arr) => { - Value::Array(arr.iter().map(as_serde).collect::>()?) - } - Document::Number(n) => match n { - aws_smithy_types::Number::PosInt(n) => Value::Number(Into::into(*n)), - aws_smithy_types::Number::NegInt(n) => Value::Number(Into::into(*n)), - aws_smithy_types::Number::Float(n) => { - Value::Number(serde_json::Number::from_f64(*n).ok_or(NumberDecodeError)?) - } - }, - Document::String(s) => Value::String(s.clone()), - Document::Bool(b) => Value::Bool(*b), - Document::Null => Value::Null, - }; - - Ok(val) - } - - pub fn as_smithy(value: Value) -> Document { - match value { - Value::Object(map) => { - Document::Object(map.into_iter().map(|(k, v)| (k, as_smithy(v))).collect()) - } - Value::Array(arr) => Document::Array(arr.into_iter().map(as_smithy).collect()), - Value::Number(n) => { - if let Some(n) = n.as_i64() { - Document::Number(aws_smithy_types::Number::NegInt(n)) - } else if let Some(n) = n.as_u64() { - Document::Number(aws_smithy_types::Number::PosInt(n)) - } else if let Some(n) = n.as_f64() { - Document::Number(aws_smithy_types::Number::Float(n)) - } else { - unreachable!() - } - } - Value::String(s) => Document::String(s), - Value::Bool(b) => Document::Bool(b), - Value::Null => Document::Null, - } - } -} - #[cfg(feature = "macros")] #[macro_export] macro_rules! err_path { @@ -150,7 +101,7 @@ impl Backoff { sleep_until: Instant::now(), } } - + pub fn new_at( max_exponent: usize, max_retries: Option, @@ -219,6 +170,60 @@ impl Default for Backoff { } } +/// Used in workflow activity inputs/outputs. Using this over BTreeMap is preferred because this does not +/// reorder keys, providing faster insert and lookup. +#[derive(Serialize, Deserialize)] +pub struct HashableMap { + map: IndexMap, +} + +impl Deref for HashableMap { + type Target = IndexMap; + + fn deref(&self) -> &Self::Target { + &self.map + } +} + +impl Hash for HashableMap { + fn hash(&self, state: &mut H) { + let mut kv = Vec::from_iter(&self.map); + kv.sort_unstable_by(|a, b| a.0.cmp(b.0)); + kv.hash(state); + } +} + +impl fmt::Debug for HashableMap { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + f.debug_map().entries(self.iter()).finish() + } +} + +impl Clone for HashableMap { + fn clone(&self) -> Self { + HashableMap { + map: self.map.clone(), + } + } + + fn clone_from(&mut self, other: &Self) { + self.map.clone_from(&other.map); + } +} + +pub trait AsHashableExt { + /// Converts the iterable to a `HashableMap` via cloning. + fn as_hashable(&self) -> HashableMap; +} + +impl AsHashableExt for HashMap { + fn as_hashable(&self) -> HashableMap { + HashableMap { + map: self.iter().map(|(k, v)| (k.clone(), v.clone())).collect(), + } + } +} + #[cfg(test)] mod tests { use std::time::Instant; diff --git a/sdks/full/go/servers/types.go b/sdks/full/go/servers/types.go index 8d51bb788c..2ef73615a0 100644 --- a/sdks/full/go/servers/types.go +++ b/sdks/full/go/servers/types.go @@ -431,18 +431,19 @@ func (r *Runtime) String() string { } type Server struct { - Id uuid.UUID `json:"id"` - Environment uuid.UUID `json:"environment"` - Datacenter uuid.UUID `json:"datacenter"` - Cluster uuid.UUID `json:"cluster"` - Tags interface{} `json:"tags,omitempty"` - Runtime *Runtime `json:"runtime,omitempty"` - Network *Network `json:"network,omitempty"` - Resources *Resources `json:"resources,omitempty"` - Lifecycle *Lifecycle `json:"lifecycle,omitempty"` - CreatedAt int64 `json:"created_at"` - StartedAt *int64 `json:"started_at,omitempty"` - DestroyedAt *int64 `json:"destroyed_at,omitempty"` + Id uuid.UUID `json:"id"` + Environment uuid.UUID `json:"environment"` + Datacenter uuid.UUID `json:"datacenter"` + Cluster uuid.UUID `json:"cluster"` + Tags interface{} `json:"tags,omitempty"` + Runtime *Runtime `json:"runtime,omitempty"` + Network *Network `json:"network,omitempty"` + Resources *Resources `json:"resources,omitempty"` + Lifecycle *Lifecycle `json:"lifecycle,omitempty"` + CreatedAt int64 `json:"created_at"` + StartedAt *int64 `json:"started_at,omitempty"` + ConnectableAt *int64 `json:"connectable_at,omitempty"` + DestroyedAt *int64 `json:"destroyed_at,omitempty"` _rawJSON json.RawMessage } diff --git a/sdks/full/openapi/openapi.yml b/sdks/full/openapi/openapi.yml index 190ba5f285..e1c96cca97 100644 --- a/sdks/full/openapi/openapi.yml +++ b/sdks/full/openapi/openapi.yml @@ -14426,6 +14426,9 @@ components: started_at: type: integer format: int64 + connectable_at: + type: integer + format: int64 destroyed_at: type: integer format: int64 diff --git a/sdks/full/openapi_compat/openapi.yml b/sdks/full/openapi_compat/openapi.yml index 32370a6cec..b2266ab8bc 100644 --- a/sdks/full/openapi_compat/openapi.yml +++ b/sdks/full/openapi_compat/openapi.yml @@ -4538,20 +4538,6 @@ components: required: - build type: object - ServersDatacenter: - properties: - id: - format: uuid - type: string - name: - type: string - slug: - type: string - required: - - id - - slug - - name - type: object ServersDestroyServerResponse: properties: {} type: object @@ -4736,6 +4722,9 @@ components: cluster: format: uuid type: string + connectable_at: + format: int64 + type: integer created_at: format: int64 type: integer diff --git a/sdks/full/rust-cli/docs/ServersServer.md b/sdks/full/rust-cli/docs/ServersServer.md index eeeba4fff2..b78be0ce99 100644 --- a/sdks/full/rust-cli/docs/ServersServer.md +++ b/sdks/full/rust-cli/docs/ServersServer.md @@ -5,6 +5,7 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- **cluster** | [**uuid::Uuid**](uuid::Uuid.md) | | +**connectable_at** | Option<**i64**> | | [optional] **created_at** | **i64** | | **datacenter** | [**uuid::Uuid**](uuid::Uuid.md) | | **destroyed_at** | Option<**i64**> | | [optional] diff --git a/sdks/full/rust-cli/src/models/servers_server.rs b/sdks/full/rust-cli/src/models/servers_server.rs index 6f0faf2c77..249a878178 100644 --- a/sdks/full/rust-cli/src/models/servers_server.rs +++ b/sdks/full/rust-cli/src/models/servers_server.rs @@ -15,6 +15,8 @@ pub struct ServersServer { #[serde(rename = "cluster")] pub cluster: uuid::Uuid, + #[serde(rename = "connectable_at", skip_serializing_if = "Option::is_none")] + pub connectable_at: Option, #[serde(rename = "created_at")] pub created_at: i64, #[serde(rename = "datacenter")] @@ -43,6 +45,7 @@ impl ServersServer { pub fn new(cluster: uuid::Uuid, created_at: i64, datacenter: uuid::Uuid, environment: uuid::Uuid, id: uuid::Uuid, lifecycle: crate::models::ServersLifecycle, network: crate::models::ServersNetwork, resources: crate::models::ServersResources, runtime: crate::models::ServersRuntime, tags: Option) -> ServersServer { ServersServer { cluster, + connectable_at: None, created_at, datacenter, destroyed_at: None, diff --git a/sdks/full/rust/docs/ServersServer.md b/sdks/full/rust/docs/ServersServer.md index eeeba4fff2..b78be0ce99 100644 --- a/sdks/full/rust/docs/ServersServer.md +++ b/sdks/full/rust/docs/ServersServer.md @@ -5,6 +5,7 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- **cluster** | [**uuid::Uuid**](uuid::Uuid.md) | | +**connectable_at** | Option<**i64**> | | [optional] **created_at** | **i64** | | **datacenter** | [**uuid::Uuid**](uuid::Uuid.md) | | **destroyed_at** | Option<**i64**> | | [optional] diff --git a/sdks/full/rust/src/models/servers_server.rs b/sdks/full/rust/src/models/servers_server.rs index 6f0faf2c77..249a878178 100644 --- a/sdks/full/rust/src/models/servers_server.rs +++ b/sdks/full/rust/src/models/servers_server.rs @@ -15,6 +15,8 @@ pub struct ServersServer { #[serde(rename = "cluster")] pub cluster: uuid::Uuid, + #[serde(rename = "connectable_at", skip_serializing_if = "Option::is_none")] + pub connectable_at: Option, #[serde(rename = "created_at")] pub created_at: i64, #[serde(rename = "datacenter")] @@ -43,6 +45,7 @@ impl ServersServer { pub fn new(cluster: uuid::Uuid, created_at: i64, datacenter: uuid::Uuid, environment: uuid::Uuid, id: uuid::Uuid, lifecycle: crate::models::ServersLifecycle, network: crate::models::ServersNetwork, resources: crate::models::ServersResources, runtime: crate::models::ServersRuntime, tags: Option) -> ServersServer { ServersServer { cluster, + connectable_at: None, created_at, datacenter, destroyed_at: None, diff --git a/sdks/full/typescript/archive.tgz b/sdks/full/typescript/archive.tgz index 1da1d389ce..77e5b7e156 100644 --- a/sdks/full/typescript/archive.tgz +++ b/sdks/full/typescript/archive.tgz @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:9389c1649c6f50b08c9ceecb6d9639b0b42725d359e4622afadb74e50458066d -size 550005 +oid sha256:82df2a905701c5bcb5fb8e50c58ea44b77cfe8092a4f380a557de09ecfdd2e81 +size 541152 diff --git a/sdks/runtime/typescript/archive.tgz b/sdks/runtime/typescript/archive.tgz index 757461da8e..58389ef567 100644 --- a/sdks/runtime/typescript/archive.tgz +++ b/sdks/runtime/typescript/archive.tgz @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:c6b9a2ec5104ef4300ab8c2010a8cb334592269ac9a2e183279771aa0a36a5be -size 282615 +oid sha256:51ae717b57567145b72c5466391cb5f491828b9e3067b9760efb5d3f88c42b7b +size 282637 diff --git a/svc/Cargo.lock b/svc/Cargo.lock index 2795dc455c..aae1fade9a 100644 --- a/svc/Cargo.lock +++ b/svc/Cargo.lock @@ -344,11 +344,7 @@ dependencies = [ "cloud-namespace-token-development-create", "cloud-namespace-token-public-create", "cluster", - "ds-log-read", - "ds-server-create", - "ds-server-delete", - "ds-server-get", - "ds-server-list-for-env", + "ds", "faker-build", "faker-game", "faker-game-namespace", @@ -869,11 +865,9 @@ dependencies = [ "cloud-namespace-token-development-create", "cloud-namespace-token-public-create", "cluster", + "ds", + "ds-log-export", "ds-log-read", - "ds-server-create", - "ds-server-delete", - "ds-server-get", - "ds-server-list-for-env", "faker-build", "faker-game", "faker-game-namespace", @@ -973,6 +967,7 @@ dependencies = [ "chirp-client", "chrono", "cluster", + "ds", "faker-cdn-site", "faker-game", "faker-game-namespace", @@ -994,12 +989,11 @@ dependencies = [ "rivet-pools", "rivet-route", "rivet-util-cdn", - "rivet-util-ds", "rivet-util-job", "s3-util", "serde", "serde_json", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", "thiserror", "tokio", "tracing", @@ -1661,7 +1655,7 @@ dependencies = [ "s3-util", "serde", "serde_json", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", "ssh2", "strum 0.26.3", "token-create", @@ -1683,7 +1677,7 @@ dependencies = [ "reqwest 0.11.27", "rivet-operation", "rivet-util-build", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", "upload-complete", "upload-get", "upload-prepare", @@ -1702,7 +1696,7 @@ dependencies = [ "rivet-connection", "rivet-operation", "rivet-pools", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", "tokio", "tracing", "tracing-logfmt", @@ -1722,7 +1716,7 @@ dependencies = [ "faker-game", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -1735,7 +1729,7 @@ dependencies = [ "faker-game", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -1748,7 +1742,7 @@ dependencies = [ "faker-game", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -1821,7 +1815,7 @@ dependencies = [ "prost 0.10.4", "rivet-operation", "rivet-util-captcha", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -1881,7 +1875,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -1893,7 +1887,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -1904,7 +1898,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -1918,7 +1912,7 @@ dependencies = [ "game-resolve-namespace-id", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -1933,7 +1927,7 @@ dependencies = [ "game-resolve-namespace-id", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -1947,7 +1941,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -1960,7 +1954,7 @@ dependencies = [ "faker-game", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -1972,7 +1966,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -1984,7 +1978,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -1998,7 +1992,7 @@ dependencies = [ "prost 0.10.4", "reqwest 0.11.27", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", "upload-complete", "upload-get", "upload-prepare", @@ -2014,7 +2008,7 @@ dependencies = [ "faker-game", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -2027,7 +2021,7 @@ dependencies = [ "faker-game", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -2040,7 +2034,7 @@ dependencies = [ "faker-game-version", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -2069,7 +2063,7 @@ dependencies = [ "itertools 0.10.5", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", "unzip-n", ] @@ -2108,7 +2102,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -2119,7 +2113,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -2130,7 +2124,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -2283,7 +2277,7 @@ dependencies = [ "rivet-util", "serde", "serde_json", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", "thiserror", "tokio", "tokio-util 0.7.11", @@ -2402,7 +2396,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -2414,7 +2408,7 @@ dependencies = [ "cloud-game-config-create", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -2426,7 +2420,7 @@ dependencies = [ "faker-game", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", "token-create", ] @@ -2447,7 +2441,7 @@ dependencies = [ "mm-config-namespace-create", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -2466,7 +2460,7 @@ dependencies = [ "mm-config-namespace-get", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -2480,7 +2474,7 @@ dependencies = [ "game-token-development-validate", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", "token-create", ] @@ -2495,7 +2489,7 @@ dependencies = [ "prost 0.10.4", "rivet-claims", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", "token-create", ] @@ -2516,7 +2510,7 @@ dependencies = [ "prost 0.10.4", "rivet-operation", "rivet-util-mm", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -2542,7 +2536,7 @@ dependencies = [ "region-list", "rivet-operation", "rivet-util-mm", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -2601,8 +2595,9 @@ dependencies = [ "rivet-runtime", "s3-util", "serde", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", "ssh2", + "strum 0.24.1", "token-create", "tokio", "trust-dns-resolver", @@ -2619,7 +2614,7 @@ dependencies = [ "rivet-health-checks", "rivet-metrics", "rivet-runtime", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", "tokio", "tracing", "tracing-logfmt", @@ -2657,7 +2652,7 @@ dependencies = [ "rivet-health-checks", "rivet-metrics", "rivet-runtime", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", "tokio", "tracing", "tracing-logfmt", @@ -2675,7 +2670,7 @@ dependencies = [ "rivet-health-checks", "rivet-metrics", "rivet-runtime", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", "tokio", "tracing", "tracing-subscriber", @@ -2695,7 +2690,7 @@ dependencies = [ "rivet-operation", "rivet-runtime", "serde", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", "token-create", "tokio", "tracing", @@ -2965,7 +2960,7 @@ dependencies = [ "prost 0.10.4", "reqwest 0.11.27", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", "upload-prepare", ] @@ -2978,7 +2973,7 @@ dependencies = [ "prost 0.10.4", "reqwest 0.11.27", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", "upload-complete", "upload-get", "upload-prepare", @@ -3049,7 +3044,7 @@ dependencies = [ "rand", "rivet-operation", "serde_json", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -3092,34 +3087,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1aaf95b3e5c8f23aa320147307562d361db0ae0d51242340f558153b4eb2439b" [[package]] -name = "ds-log-export" -version = "0.0.1" -dependencies = [ - "chirp-client", - "chirp-worker", - "reqwest 0.11.27", - "rivet-operation", - "serde", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", - "upload-complete", - "upload-prepare", -] - -[[package]] -name = "ds-log-read" -version = "0.0.1" -dependencies = [ - "chirp-client", - "chirp-worker", - "chrono", - "clickhouse", - "prost 0.10.4", - "rivet-operation", - "serde", -] - -[[package]] -name = "ds-server-create" +name = "ds" version = "0.0.1" dependencies = [ "bit-vec", @@ -3127,6 +3095,7 @@ dependencies = [ "chirp-client", "chirp-worker", "chirp-workflow", + "chrono", "cjson", "cluster", "faker-build", @@ -3138,25 +3107,31 @@ dependencies = [ "hex", "http 0.2.12", "ip-info", + "job-run-get", "lazy_static", + "mm-config-version-get", + "mm-lobby-get", "mm-lobby-list-for-user-id", "nomad-util", "nomad_client", "rand", "regex", "region-get", - "reqwest 0.11.27", + "reqwest 0.12.5", "rivet-api", - "rivet-connection", + "rivet-convert", + "rivet-health-checks", + "rivet-metrics", "rivet-operation", + "rivet-runtime", "rivet-util", "rivet-util-build", - "rivet-util-ds", + "rivet-util-job", "s3-util", "serde", "serde_json", "sha2", - "sqlx 0.7.4 (registry+https://github.com/rust-lang/crates.io-index)", + "sqlx", "strum 0.24.1", "tier-list", "token-create", @@ -3167,72 +3142,30 @@ dependencies = [ ] [[package]] -name = "ds-server-delete" +name = "ds-log-export" version = "0.0.1" dependencies = [ "chirp-client", "chirp-worker", - "nomad-util", - "nomad_client", - "region-get", "reqwest 0.11.27", "rivet-operation", - "rivet-util-job", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", -] - -[[package]] -name = "ds-server-get" -version = "0.0.1" -dependencies = [ - "chirp-client", - "chirp-worker", - "chirp-workflow", - "cluster", - "ds-server-create", - "faker-build", - "faker-game", - "faker-region", - "rivet-operation", - "rivet-util-ds", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", -] - -[[package]] -name = "ds-server-list-for-env" -version = "0.0.1" -dependencies = [ - "chirp-client", - "chirp-worker", - "rivet-operation", - "rivet-util-ds", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "serde", + "sqlx", + "upload-complete", + "upload-prepare", ] [[package]] -name = "ds-worker" +name = "ds-log-read" version = "0.0.1" dependencies = [ "chirp-client", "chirp-worker", "chrono", - "ds-server-get", - "job-run-get", - "lazy_static", - "mm-config-version-get", - "mm-lobby-get", - "nomad-util", - "nomad_client", - "region-get", - "reqwest 0.12.5", - "rivet-api", - "rivet-convert", - "rivet-health-checks", - "rivet-metrics", - "rivet-runtime", - "rivet-util-job", + "clickhouse", + "prost 0.10.4", + "rivet-operation", "serde", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -3338,7 +3271,7 @@ dependencies = [ "email-verification-create", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -3358,7 +3291,7 @@ dependencies = [ "reqwest 0.11.27", "rivet-operation", "serde_json", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", "upload-prepare", ] @@ -3429,7 +3362,7 @@ dependencies = [ "rivet-operation", "serde", "serde_json", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -3597,7 +3530,7 @@ dependencies = [ "region-get", "region-list", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -3845,7 +3778,7 @@ dependencies = [ "prost 0.10.4", "reqwest 0.11.27", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", "upload-complete", "upload-get", "upload-prepare", @@ -3864,7 +3797,7 @@ dependencies = [ "prost 0.10.4", "rivet-operation", "rivet-util-team", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", "team-get", ] @@ -3878,7 +3811,7 @@ dependencies = [ "game-create", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", "upload-file-list", "upload-get", ] @@ -3891,7 +3824,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -3903,7 +3836,7 @@ dependencies = [ "faker-game", "faker-team", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -3916,7 +3849,7 @@ dependencies = [ "prost 0.10.4", "reqwest 0.11.27", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", "upload-complete", "upload-get", "upload-prepare", @@ -3933,7 +3866,7 @@ dependencies = [ "game-version-create", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -3947,7 +3880,7 @@ dependencies = [ "game-version-create", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -3961,7 +3894,7 @@ dependencies = [ "game-version-create", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -3975,7 +3908,7 @@ dependencies = [ "game-namespace-get", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -4017,7 +3950,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -4034,7 +3967,7 @@ dependencies = [ "prost 0.10.4", "region-list-for-game", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -4046,7 +3979,7 @@ dependencies = [ "faker-game", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -4059,7 +3992,7 @@ dependencies = [ "game-get", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -4073,7 +4006,7 @@ dependencies = [ "game-version-create", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -4097,7 +4030,7 @@ dependencies = [ "prost 0.10.4", "rivet-operation", "rivet-util-game-user", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", "token-create", ] @@ -4111,7 +4044,7 @@ dependencies = [ "game-user-create", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -4126,7 +4059,7 @@ dependencies = [ "game-user-link-get", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", "token-create", ] @@ -4141,7 +4074,7 @@ dependencies = [ "game-user-link-create", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -4154,7 +4087,7 @@ dependencies = [ "game-user-create", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -4170,7 +4103,7 @@ dependencies = [ "game-version-create", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -4199,7 +4132,7 @@ dependencies = [ "rivet-metrics", "rivet-runtime", "rivet-util-game-user", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", "token-create", "token-exchange", "token-revoke", @@ -4227,7 +4160,7 @@ dependencies = [ "game-version-list", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -4240,7 +4173,7 @@ dependencies = [ "game-version-create", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -4253,7 +4186,7 @@ dependencies = [ "game-version-create", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -4822,7 +4755,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -4834,7 +4767,7 @@ dependencies = [ "identity-config-namespace-create", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -4847,7 +4780,7 @@ dependencies = [ "faker-game-version", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -4871,7 +4804,7 @@ dependencies = [ "identity-config-version-get", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -4932,6 +4865,7 @@ checksum = "de3fc2e30ba82dd1b3911c8de1ffc143c74a914a14e99514d7637e3099df5ea0" dependencies = [ "equivalent", "hashbrown 0.14.5", + "serde", ] [[package]] @@ -4976,7 +4910,7 @@ dependencies = [ "rivet-operation", "serde", "serde_json", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -5063,7 +4997,7 @@ dependencies = [ "rivet-pools", "rivet-runtime", "serde_json", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", "tokio", "tracing", "tracing-logfmt", @@ -5114,7 +5048,7 @@ dependencies = [ "prost 0.10.4", "rivet-operation", "rivet-util-job", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -5162,7 +5096,7 @@ dependencies = [ "rustls 0.20.9", "serde", "sha2", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", "token-create", "webpki 0.22.4", "webpki-roots 0.22.6", @@ -5199,7 +5133,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -5211,7 +5145,7 @@ dependencies = [ "kv-config-namespace-create", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -5224,7 +5158,7 @@ dependencies = [ "faker-game-version", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -5248,7 +5182,7 @@ dependencies = [ "kv-config-version-get", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -5259,7 +5193,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -5361,7 +5295,7 @@ dependencies = [ "reqwest 0.11.27", "serde", "serde_json", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", "ssh-key", ] @@ -5381,7 +5315,7 @@ dependencies = [ "rivet-runtime", "serde", "serde_json", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", "tokio", "tracing", "tracing-logfmt", @@ -5490,7 +5424,7 @@ dependencies = [ "rivet-metrics", "rivet-operation", "rivet-runtime", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", "tokio", "tracing", "tracing-logfmt", @@ -5667,7 +5601,7 @@ dependencies = [ "chirp-client", "chirp-worker", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -5677,7 +5611,7 @@ dependencies = [ "chirp-client", "chirp-worker", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -5691,7 +5625,7 @@ dependencies = [ "mm-config-version-get", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -5707,7 +5641,7 @@ dependencies = [ "prost 0.10.4", "rivet-operation", "rivet-util-mm", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -5721,7 +5655,7 @@ dependencies = [ "mm-config-version-get", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -5735,7 +5669,7 @@ dependencies = [ "mm-config-namespace-get", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -5756,7 +5690,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -5768,7 +5702,7 @@ dependencies = [ "mm-config-namespace-create", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -5785,7 +5719,7 @@ dependencies = [ "prost 0.10.4", "rivet-operation", "rivet-util-mm", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -5809,7 +5743,7 @@ dependencies = [ "rivet-util-job", "rivet-util-mm", "s3-util", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", "tier-list", "upload-get", ] @@ -5827,7 +5761,7 @@ dependencies = [ "prost 0.10.4", "rivet-operation", "rivet-util-mm", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -5862,7 +5796,7 @@ dependencies = [ "rivet-pools", "rivet-runtime", "rivet-util-mm", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", "tokio", "tracing", "tracing-logfmt", @@ -5911,7 +5845,7 @@ dependencies = [ "chirp-worker", "faker-mm-lobby", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -5924,7 +5858,7 @@ dependencies = [ "faker-mm-lobby", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -5937,7 +5871,7 @@ dependencies = [ "faker-mm-lobby-row", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -5959,7 +5893,7 @@ dependencies = [ "prost 0.10.4", "rivet-operation", "rivet-util-mm", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -5972,7 +5906,7 @@ dependencies = [ "prost 0.10.4", "rivet-operation", "rivet-util-mm", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -5984,7 +5918,7 @@ dependencies = [ "faker-mm-lobby", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -5997,7 +5931,7 @@ dependencies = [ "prost 0.10.4", "rivet-operation", "rivet-util-mm", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -6017,7 +5951,7 @@ dependencies = [ "prost 0.10.4", "rivet-operation", "rivet-util-mm", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -6030,7 +5964,7 @@ dependencies = [ "prost 0.10.4", "rivet-operation", "rivet-util-mm", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -6043,7 +5977,7 @@ dependencies = [ "prost 0.10.4", "rivet-operation", "rivet-util-mm", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -6055,7 +5989,7 @@ dependencies = [ "faker-mm-lobby", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -6111,7 +6045,7 @@ dependencies = [ "rivet-util-mm", "s3-util", "serde", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", "team-get", "tier-list", "token-create", @@ -6129,7 +6063,7 @@ dependencies = [ "cf-custom-hostname-worker", "chirp-client", "cloud-worker", - "ds-worker", + "ds", "external-worker", "game-user-worker", "job-log-worker", @@ -7155,7 +7089,7 @@ dependencies = [ "faker-region", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -7169,7 +7103,7 @@ dependencies = [ "faker-region", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -7183,7 +7117,7 @@ dependencies = [ "faker-region", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -7198,7 +7132,7 @@ dependencies = [ "region-get", "region-list", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -7214,7 +7148,7 @@ dependencies = [ "region-get", "region-list", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -7230,7 +7164,7 @@ dependencies = [ "region-get", "region-list-for-game", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -7702,7 +7636,7 @@ dependencies = [ "rand", "redis", "rivet-metrics", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", "thiserror", "tokio", "tokio-util 0.7.11", @@ -7786,12 +7720,11 @@ name = "rivet-util" version = "0.1.0" dependencies = [ "async-trait", - "aws-smithy-client 0.41.0", - "aws-smithy-types 0.41.0", "bcrypt", "chrono", "futures-util", "global-error", + "indexmap 2.3.0", "ipnet", "lazy_static", "rand", @@ -7826,26 +7759,6 @@ dependencies = [ name = "rivet-util-cdn" version = "0.1.0" -[[package]] -name = "rivet-util-ds" -version = "0.1.0" -dependencies = [ - "bit-vec", - "chirp-client", - "heck 0.3.3", - "http 0.2.12", - "ip-info", - "mm-lobby-list-for-user-id", - "region-get", - "rivet-operation", - "rivet-util", - "serde", - "serde_json", - "strum 0.24.1", - "user-identity-get", - "uuid", -] - [[package]] name = "rivet-util-env" version = "0.1.0" @@ -8269,6 +8182,7 @@ version = "1.0.122" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "784b6203951c57ff748476b126ccb5e8e2959a5c19e5c617ab1956be3dbc68da" dependencies = [ + "indexmap 2.3.0", "itoa 1.0.11", "memchr", "ryu", @@ -8512,63 +8426,18 @@ dependencies = [ "unicode_categories", ] -[[package]] -name = "sqlx" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9a2ccff1a000a5a59cd33da541d9f2fdcd9e6e8229cc200565942bff36d0aaa" -dependencies = [ - "sqlx-core 0.7.4 (registry+https://github.com/rust-lang/crates.io-index)", -] - [[package]] name = "sqlx" version = "0.7.4" source = "git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b#08d6e61aa0572e7ec557abbedb72cebb96e1ac5b" dependencies = [ - "sqlx-core 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx-core", "sqlx-macros", "sqlx-mysql", "sqlx-postgres", "sqlx-sqlite", ] -[[package]] -name = "sqlx-core" -version = "0.7.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24ba59a9342a3d9bab6c56c118be528b27c9b60e490080e9711a04dccac83ef6" -dependencies = [ - "ahash 0.8.11", - "atoi", - "byteorder", - "bytes", - "crc", - "crossbeam-queue", - "either", - "event-listener", - "futures-channel", - "futures-core", - "futures-intrusive", - "futures-io", - "futures-util", - "hashlink", - "hex", - "indexmap 2.3.0", - "log", - "memchr", - "once_cell", - "paste", - "percent-encoding", - "serde", - "sha2", - "smallvec", - "sqlformat", - "thiserror", - "tracing", - "url", -] - [[package]] name = "sqlx-core" version = "0.7.4" @@ -8619,7 +8488,7 @@ source = "git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72c dependencies = [ "proc-macro2", "quote", - "sqlx-core 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx-core", "sqlx-macros-core", "syn 1.0.109", ] @@ -8639,7 +8508,7 @@ dependencies = [ "serde", "serde_json", "sha2", - "sqlx-core 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx-core", "sqlx-mysql", "sqlx-postgres", "sqlx-sqlite", @@ -8683,7 +8552,7 @@ dependencies = [ "sha1", "sha2", "smallvec", - "sqlx-core 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx-core", "stringprep", "thiserror", "tracing", @@ -8723,7 +8592,7 @@ dependencies = [ "serde_json", "sha2", "smallvec", - "sqlx-core 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx-core", "stringprep", "thiserror", "tracing", @@ -8747,7 +8616,7 @@ dependencies = [ "log", "percent-encoding", "serde", - "sqlx-core 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx-core", "tracing", "url", "urlencoding", @@ -8964,7 +8833,7 @@ dependencies = [ "prost 0.10.4", "reqwest 0.11.27", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", "upload-complete", "upload-get", "upload-prepare", @@ -8978,7 +8847,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", "upload-file-list", "upload-get", ] @@ -8992,7 +8861,7 @@ dependencies = [ "chrono", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -9005,7 +8874,7 @@ dependencies = [ "rivet-health-checks", "rivet-metrics", "rivet-runtime", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", "team-member-list", "team-user-ban-get", ] @@ -9018,7 +8887,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -9029,7 +8898,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -9040,7 +8909,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -9051,7 +8920,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -9063,7 +8932,7 @@ dependencies = [ "faker-team", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -9074,7 +8943,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -9085,7 +8954,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -9097,7 +8966,7 @@ dependencies = [ "faker-team", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", "team-get", ] @@ -9111,7 +8980,7 @@ dependencies = [ "prost 0.10.4", "regex", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -9122,7 +8991,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -9133,7 +9002,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -9190,7 +9059,7 @@ dependencies = [ "rivet-operation", "rivet-pools", "rivet-runtime", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", "team-get", "team-member-count", "tokio", @@ -9325,7 +9194,7 @@ dependencies = [ "prost 0.10.4", "rivet-claims", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -9337,7 +9206,7 @@ dependencies = [ "chrono", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", "token-create", ] @@ -9350,7 +9219,7 @@ dependencies = [ "chrono", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", "token-create", ] @@ -9363,7 +9232,7 @@ dependencies = [ "chrono", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", "token-create", ] @@ -9922,7 +9791,7 @@ dependencies = [ "reqwest 0.11.27", "rivet-operation", "s3-util", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", "upload-get", "upload-prepare", "url", @@ -9937,7 +9806,7 @@ dependencies = [ "chrono", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -9949,7 +9818,7 @@ dependencies = [ "chrono", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", "upload-prepare", ] @@ -9962,7 +9831,7 @@ dependencies = [ "chrono", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", "upload-prepare", ] @@ -9978,7 +9847,7 @@ dependencies = [ "reqwest 0.11.27", "rivet-operation", "s3-util", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", "upload-complete", "upload-get", ] @@ -9997,7 +9866,7 @@ dependencies = [ "rivet-operation", "rivet-pools", "s3-util", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", "tokio", "tracing", "tracing-logfmt", @@ -10014,7 +9883,7 @@ dependencies = [ "rivet-metrics", "rivet-runtime", "s3-util", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", "upload-get", "upload-prepare", ] @@ -10062,7 +9931,7 @@ dependencies = [ "prost 0.10.4", "reqwest 0.11.27", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", "upload-complete", "upload-get", "upload-prepare", @@ -10107,7 +9976,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", "user-follow-toggle", ] @@ -10119,7 +9988,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", "user-follow-toggle", ] @@ -10131,7 +10000,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", "user-follow-toggle", ] @@ -10143,7 +10012,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", "user-follow-toggle", ] @@ -10155,7 +10024,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", "user-follow-toggle", ] @@ -10167,7 +10036,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -10191,7 +10060,7 @@ dependencies = [ "prost 0.10.4", "rand", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", "upload-file-list", "upload-get", ] @@ -10217,7 +10086,7 @@ dependencies = [ "faker-user", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", "user-identity-create", ] @@ -10230,7 +10099,7 @@ dependencies = [ "faker-user", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", "user-identity-create", ] @@ -10242,7 +10111,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", "user-follow-toggle", ] @@ -10255,7 +10124,7 @@ dependencies = [ "faker-user", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", "user-identity-create", "user-identity-get", ] @@ -10320,7 +10189,7 @@ dependencies = [ "rivet-metrics", "rivet-runtime", "rivet-util-user-presence", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -10333,7 +10202,7 @@ dependencies = [ "profanity-check", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", "user-get", ] @@ -10357,7 +10226,7 @@ dependencies = [ "faker-user", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", "user-identity-create", ] @@ -10370,7 +10239,7 @@ dependencies = [ "faker-user", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", "user-identity-create", ] @@ -10384,7 +10253,7 @@ dependencies = [ "prost 0.10.4", "regex", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", "user-identity-create", ] @@ -10418,7 +10287,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", ] [[package]] @@ -10430,7 +10299,7 @@ dependencies = [ "faker-user", "prost 0.10.4", "rivet-operation", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", "token-create", ] @@ -10918,7 +10787,7 @@ dependencies = [ "rivet-health-checks", "rivet-metrics", "rivet-runtime", - "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", + "sqlx", "tokio", "tracing", "tracing-subscriber", diff --git a/svc/Cargo.toml b/svc/Cargo.toml index efad6845ad..07c6446c42 100644 --- a/svc/Cargo.toml +++ b/svc/Cargo.toml @@ -68,13 +68,9 @@ members = [ "pkg/custom-user-avatar/ops/list-for-game", "pkg/custom-user-avatar/ops/upload-complete", "pkg/debug/ops/email-res", + "pkg/ds", "pkg/ds-log/ops/export", "pkg/ds-log/ops/read", - "pkg/ds/ops/server-create", - "pkg/ds/ops/server-delete", - "pkg/ds/ops/server-get", - "pkg/ds/ops/server-list-for-env", - "pkg/ds/worker", "pkg/email-verification/ops/complete", "pkg/email-verification/ops/create", "pkg/email/ops/send", diff --git a/svc/api/games/Cargo.toml b/svc/api/games/Cargo.toml index 2d035a11be..900c2e1d73 100644 --- a/svc/api/games/Cargo.toml +++ b/svc/api/games/Cargo.toml @@ -36,11 +36,7 @@ build-get = { path = "../../pkg/build/ops/get" } build-list-for-env = { path = "../../pkg/build/ops/list-for-env" } cluster = { path = "../../pkg/cluster" } build = { path = "../../pkg/build" } -ds-log-read = { path = "../../pkg/ds-log/ops/read" } -ds-server-create = { path = "../../pkg/ds/ops/server-create" } -ds-server-delete = { path = "../../pkg/ds/ops/server-delete" } -ds-server-get = { path = "../../pkg/ds/ops/server-get" } -ds-server-list-for-env = { path = "../../pkg/ds/ops/server-list-for-env" } +ds = { path = "../../pkg/ds" } game-get = { path = "../../pkg/game/ops/get" } game-namespace-get = { path = "../../pkg/game/ops/namespace-get" } game-version-get = { path = "../../pkg/game/ops/version-get" } diff --git a/svc/api/servers/Cargo.toml b/svc/api/servers/Cargo.toml index eb1f84234c..b7a880541c 100644 --- a/svc/api/servers/Cargo.toml +++ b/svc/api/servers/Cargo.toml @@ -36,11 +36,9 @@ build-get = { path = "../../pkg/build/ops/get" } build-list-for-env = { path = "../../pkg/build/ops/list-for-env" } cluster = { path = "../../pkg/cluster" } build = { path = "../../pkg/build" } +ds = { path = "../../pkg/ds" } ds-log-read = { path = "../../pkg/ds-log/ops/read" } -ds-server-create = { path = "../../pkg/ds/ops/server-create" } -ds-server-delete = { path = "../../pkg/ds/ops/server-delete" } -ds-server-get = { path = "../../pkg/ds/ops/server-get" } -ds-server-list-for-env = { path = "../../pkg/ds/ops/server-list-for-env" } +ds-log-export = { path = "../../pkg/ds-log/ops/export" } game-get = { path = "../../pkg/game/ops/get" } game-namespace-get = { path = "../../pkg/game/ops/namespace-get" } game-version-get = { path = "../../pkg/game/ops/version-get" } diff --git a/svc/api/servers/src/assert.rs b/svc/api/servers/src/assert.rs index 0b52aa2960..108d6db437 100644 --- a/svc/api/servers/src/assert.rs +++ b/svc/api/servers/src/assert.rs @@ -1,6 +1,4 @@ use api_helper::ctx::Ctx; -use rivet_api::models; -use rivet_convert::ApiTryFrom; use rivet_operation::prelude::*; use crate::auth::Auth; @@ -12,17 +10,15 @@ pub async fn server_for_env( game_id: Uuid, env_id: Uuid, ) -> GlobalResult<()> { - let get_res = op!([ctx] ds_server_get { - server_ids: vec![server_id.into()], - }) - .await?; - let server = unwrap_with!(get_res.servers.first(), SERVERS_SERVER_NOT_FOUND); + let servers_res = ctx + .op(ds::ops::server::get::Input { + server_ids: vec![server_id], + }) + .await?; + let server = unwrap_with!(servers_res.servers.first(), SERVERS_SERVER_NOT_FOUND); // Validate token can access server - ensure_with!( - unwrap!(server.env_id).as_uuid() == env_id, - SERVERS_SERVER_NOT_FOUND - ); + ensure_with!(server.env_id == env_id, SERVERS_SERVER_NOT_FOUND); Ok(()) } diff --git a/svc/api/servers/src/auth.rs b/svc/api/servers/src/auth.rs index 6e4dd0a8eb..f27b54441b 100644 --- a/svc/api/servers/src/auth.rs +++ b/svc/api/servers/src/auth.rs @@ -2,7 +2,7 @@ use api_helper::{ auth::{ApiAuth, AuthRateLimitCtx}, util::{as_auth_expired, basic_rate_limit}, }; -use proto::{backend, claims::Claims}; +use proto::claims::Claims; use rivet_claims::ClaimsDecode; use rivet_operation::prelude::*; diff --git a/svc/api/servers/src/route/builds.rs b/svc/api/servers/src/route/builds.rs index 950afd675d..b3469f0050 100644 --- a/svc/api/servers/src/route/builds.rs +++ b/svc/api/servers/src/route/builds.rs @@ -98,15 +98,11 @@ pub async fn list( .builds .iter() .filter_map(|build| { - if let Some(upload) = uploads_res + uploads_res .uploads .iter() .find(|u| u.upload_id == build.upload_id) - { - Some((build, upload)) - } else { - None - } + .map(|upload| (build, upload)) }) .map(|(build, upload)| { GlobalResult::Ok(( diff --git a/svc/api/servers/src/route/servers.rs b/svc/api/servers/src/route/servers.rs index 49c1e0fb5f..5ea19cccce 100644 --- a/svc/api/servers/src/route/servers.rs +++ b/svc/api/servers/src/route/servers.rs @@ -1,9 +1,9 @@ use api_helper::{anchor::WatchIndexQuery, ctx::Ctx}; -use proto::backend::{self, pkg::*}; use rivet_api::models; -use rivet_convert::{ApiFrom, ApiInto, ApiTryFrom, ApiTryInto}; +use rivet_convert::{ApiInto, ApiTryInto}; use rivet_operation::prelude::*; use serde::{Deserialize, Serialize}; +use serde_json::json; use std::collections::HashMap; use crate::{assert, auth::Auth}; @@ -21,20 +21,18 @@ pub async fn get( .await?; // Get the server - let get_res = op!([ctx] ds_server_get { - server_ids: vec![server_id.into()], - }) - .await?; - let server = unwrap_with!(get_res.servers.first(), SERVERS_SERVER_NOT_FOUND).clone(); + let servers_res = ctx + .op(ds::ops::server::get::Input { + server_ids: vec![server_id], + }) + .await?; + let server = unwrap_with!(servers_res.servers.first(), SERVERS_SERVER_NOT_FOUND); // Validate token can access server - ensure_with!( - unwrap!(server.env_id).as_uuid() == env_id && unwrap!(server.env_id).as_uuid() == env_id, - SERVERS_SERVER_NOT_FOUND - ); + ensure_with!(server.env_id == env_id, SERVERS_SERVER_NOT_FOUND); Ok(models::ServersGetServerResponse { - server: Box::new(models::ServersServer::api_try_from(server)?), + server: Box::new(server.clone().api_try_into()?), }) } @@ -72,57 +70,83 @@ pub async fn create( tracing::info!(?tags, "creating server with tags"); - let server = op!([ctx] ds_server_create { - env_id: Some(env_id.into()), - datacenter_id: Some(body.datacenter.into()), - cluster_id: Some(cluster_id.into()), - tags: tags, - resources: Some((*body.resources).api_into()), - kill_timeout_ms: body.lifecycle.as_ref().and_then(|x| x.kill_timeout).unwrap_or_default(), - image_id: Some(body.runtime.build.into()), - args: body.runtime.arguments.unwrap_or_default(), - network_mode: backend::ds::NetworkMode::api_from( - body.network.mode.unwrap_or_default(), - ) as i32, - environment: body.runtime.environment.unwrap_or_default(), - network_ports: unwrap!(body.network - .ports - .into_iter() - .map(|(s, p)| Ok((s, dynamic_servers::server_create::Port { - internal_port: p.internal_port, - routing: Some(if let Some(routing) = p.routing { - match *routing { - models::ServersPortRouting { - game_guard: Some(_), - host: None, - } => dynamic_servers::server_create::port::Routing::GameGuard( - backend::ds::GameGuardRouting { - protocol: backend::ds::GameGuardProtocol::api_from(p.protocol) as i32, - }, - ), - models::ServersPortRouting { - game_guard: None, - host: Some(_), - } => dynamic_servers::server_create::port::Routing::Host(backend::ds::HostRouting { - protocol: backend::ds::HostProtocol::api_try_from(p.protocol)? as i32, - }), - models::ServersPortRouting { .. } => { - bail_with!(SERVERS_MUST_SPECIFY_ROUTING_TYPE) + let server_id = Uuid::new_v4(); + + let mut sub = ctx + .subscribe::(&json!({ + "server_id": server_id, + })) + .await?; + + ctx.dispatch_tagged_workflow( + &json!({ + "server_id": server_id, + }), + ds::workflows::server::Input { + server_id, + env_id, + cluster_id, + datacenter_id: body.datacenter, + tags, + resources: (*body.resources).api_into(), + kill_timeout_ms: body + .lifecycle + .as_ref() + .and_then(|x| x.kill_timeout) + .unwrap_or_default(), + image_id: body.runtime.build, + args: body.runtime.arguments.unwrap_or_default(), + network_mode: body.network.mode.unwrap_or_default().api_into(), + environment: body.runtime.environment.unwrap_or_default(), + network_ports: unwrap!(body + .network + .ports + .into_iter() + .map(|(s, p)| Ok(( + s, + ds::workflows::server::Port { + internal_port: p.internal_port, + routing: if let Some(routing) = p.routing { + match *routing { + models::ServersPortRouting { + game_guard: Some(_), + host: None, + } => ds::types::Routing::GameGuard { + protocol: p.protocol.api_into(), + }, + models::ServersPortRouting { + game_guard: None, + host: Some(_), + } => ds::types::Routing::Host { + protocol: p.protocol.api_try_into()?, + }, + models::ServersPortRouting { .. } => { + bail_with!(SERVERS_MUST_SPECIFY_ROUTING_TYPE) + } + } + } else { + ds::types::Routing::GameGuard { + protocol: p.protocol.api_into(), + } } } - } else { - dynamic_servers::server_create::port::Routing::GameGuard(backend::ds::GameGuardRouting { - protocol: backend::ds::GameGuardProtocol::api_from(p.protocol) as i32, - }) - }) - }))) - .collect::>>()), - }) - .await? - .server; + ))) + .collect::>>()), + }, + ) + .await?; + + sub.next().await?; + + let servers_res = ctx + .op(ds::ops::server::get::Input { + server_ids: vec![server_id], + }) + .await?; + let server = unwrap_with!(servers_res.servers.first(), SERVERS_SERVER_NOT_FOUND); Ok(models::ServersCreateServerResponse { - server: Box::new(unwrap!(server).api_try_into()?), + server: Box::new(server.clone().api_try_into()?), }) } @@ -145,13 +169,25 @@ pub async fn destroy( assert::server_for_env(&ctx, server_id, game_id, env_id).await?; - op!([ctx] ds_server_delete { - server_id: Some(server_id.into()), - override_kill_timeout_ms: query.override_kill_timeout.unwrap_or_default(), - }) + let mut sub = ctx + .subscribe::(&json!({ + "server_id": server_id, + })) + .await?; + + ctx.tagged_signal( + &json!({ + "server_id": server_id, + }), + ds::workflows::server::Destroy { + override_kill_timeout_ms: query.override_kill_timeout.unwrap_or_default(), + }, + ) .await?; - Ok(serde_json::json!({})) + sub.next().await?; + + Ok(json!({})) } // MARK: GET /games/{}/environments/{}/servers @@ -173,26 +209,28 @@ pub async fn list_servers( .check_game(ctx.op_ctx(), game_id, env_id, true) .await?; - let list_res = op!([ctx] ds_server_list_for_env { - env_id: Some(env_id.into()), - tags: query.tags_json.as_deref().map_or(Ok(HashMap::new()), serde_json::from_str)?, - include_destroyed: query.include_destroyed.unwrap_or(false), - cursor: query.cursor.map(|x| x.into()), - }) - .await?; + let list_res = ctx + .op(ds::ops::server::list_for_env::Input { + env_id, + tags: query + .tags_json + .as_deref() + .map_or(Ok(HashMap::new()), serde_json::from_str)?, + include_destroyed: query.include_destroyed.unwrap_or(false), + cursor: query.cursor, + }) + .await?; - let servers_res = op!([ctx] ds_server_get { - server_ids: list_res.server_ids.clone(), - }) - .await?; + let servers_res = ctx + .op(ds::ops::server::get::Input { + server_ids: list_res.server_ids.clone(), + }) + .await?; let servers = servers_res .servers .into_iter() - .map(|server| { - let server = models::ServersServer::api_try_from(server)?; - Ok(server) - }) + .map(ApiTryInto::api_try_into) .collect::>>()?; Ok(models::ServersListServersResponse { servers }) diff --git a/svc/api/traefik-provider/Cargo.toml b/svc/api/traefik-provider/Cargo.toml index fb1a7a98b5..d93c106b72 100644 --- a/svc/api/traefik-provider/Cargo.toml +++ b/svc/api/traefik-provider/Cargo.toml @@ -33,12 +33,12 @@ tracing-subscriber = { version = "0.3", default-features = false, features = [ "ansi", ] } url = "2.2.2" -util-ds = { package = "rivet-util-ds", path = "../../pkg/ds/util" } util-cdn = { package = "rivet-util-cdn", path = "../../pkg/cdn/util" } util-job = { package = "rivet-util-job", path = "../../pkg/job/util" } uuid = { version = "1", features = ["v4"] } cluster = { path = "../../pkg/cluster" } +ds = { path = "../../pkg/ds" } [dependencies.sqlx] git = "https://github.com/rivet-gg/sqlx" diff --git a/svc/api/traefik-provider/src/route/game_guard/dynamic_servers.rs b/svc/api/traefik-provider/src/route/game_guard/dynamic_servers.rs index f54937aeac..6612a3b7ba 100644 --- a/svc/api/traefik-provider/src/route/game_guard/dynamic_servers.rs +++ b/svc/api/traefik-provider/src/route/game_guard/dynamic_servers.rs @@ -3,13 +3,9 @@ use std::{ hash::{Hash, Hasher}, }; -use api_helper::{anchor::WatchIndexQuery, ctx::Ctx}; -use proto::backend::{self, pkg::*}; -use redis::AsyncCommands; +use api_helper::ctx::Ctx; use rivet_operation::prelude::*; -use rivet_pools::prelude::*; use serde::{Deserialize, Serialize}; -use url::Url; use crate::{auth::Auth, types}; @@ -37,7 +33,7 @@ impl DynamicServer { } fn hostname(&self) -> GlobalResult { - util_ds::build_ds_hostname(self.server_id, &self.port_name, self.datacenter_id) + ds::util::build_ds_hostname(self.server_id, &self.port_name, self.datacenter_id) } } @@ -48,7 +44,6 @@ pub async fn build_ds( ) -> GlobalResult<()> { // TODO put in function, clean up // TODO: remove cache for now - tracing::info!(?config, "config timeeee"); // let dynamic_servers: Option> = ctx // .cache() @@ -61,29 +56,25 @@ pub async fn build_ds( [ctx, DynamicServer] " SELECT - servers.server_id, - servers.datacenter_id, - internal_ports.nomad_label AS label, - internal_ports.nomad_ip, - internal_ports.nomad_source, - docker_ports_protocol_game_guard.port_number, - docker_ports_protocol_game_guard.gg_port, - docker_ports_protocol_game_guard.port_name, - docker_ports_protocol_game_guard.protocol - FROM - db_ds.internal_ports - JOIN - db_ds.servers + s.server_id, + s.datacenter_id, + ip.nomad_label AS label, + ip.nomad_ip, + ip.nomad_source, + gg.port_number, + gg.gg_port, + gg.port_name, + gg.protocol + FROM db_ds.internal_ports AS ip + JOIN db_ds.servers AS s + ON ip.server_id = s.server_id + JOIN db_ds.docker_ports_protocol_game_guard AS gg ON - internal_ports.server_id = servers.server_id - JOIN - db_ds.docker_ports_protocol_game_guard - ON - internal_ports.server_id = docker_ports_protocol_game_guard.server_id - AND - internal_ports.nomad_label = CONCAT('ds_', docker_ports_protocol_game_guard.port_name) + ip.server_id = gg.server_id AND + ip.nomad_label = CONCAT('ds_', gg.port_name) WHERE - servers.datacenter_id = $1 AND servers.stop_ts IS NULL + s.datacenter_id = $1 AND + s.stop_ts IS NULL ", dc_id ) @@ -95,15 +86,8 @@ pub async fn build_ds( // }) // .await?; - tracing::info!(?config, "config timeeee2"); - - // let dynamic_servers = unwrap!(dynamic_servers); - tracing::info!(?dynamic_servers, "ds0time"); - // Process proxied ports for dynamic_server in &dynamic_servers { - tracing::info!(?dynamic_server, "ds1time"); - let server_id = dynamic_server.server_id; let register_res = ds_register_proxied_port(server_id, dynamic_server, config); match register_res { @@ -114,8 +98,6 @@ pub async fn build_ds( } } - tracing::info!(?config, "config timeeee3"); - config.http.middlewares.insert( "ds-rate-limit".to_owned(), types::TraefikMiddlewareHttp::RateLimit { @@ -153,16 +135,16 @@ fn ds_register_proxied_port( proxied_port: &DynamicServer, config: &mut types::TraefikConfigResponse, ) -> GlobalResult<()> { - let ingress_port = proxied_port.gg_port.clone(); + let ingress_port = proxied_port.gg_port; let target_nomad_port_label = proxied_port.label.clone(); let service_id = format!("ds-run:{}:{}", run_id, target_nomad_port_label); - let proxy_protocol = unwrap!(backend::ds::GameGuardProtocol::from_i32( - proxied_port.protocol as i32 + let proxy_protocol = unwrap!(ds::types::GameGuardProtocol::from_repr( + proxied_port.protocol.try_into()? )); // Insert the relevant service match proxy_protocol { - backend::ds::GameGuardProtocol::Http | backend::ds::GameGuardProtocol::Https => { + ds::types::GameGuardProtocol::Http | ds::types::GameGuardProtocol::Https => { config.http.services.insert( service_id.clone(), types::TraefikService { @@ -179,7 +161,7 @@ fn ds_register_proxied_port( }, ); } - backend::ds::GameGuardProtocol::Tcp | backend::ds::GameGuardProtocol::TcpTls => { + ds::types::GameGuardProtocol::Tcp | ds::types::GameGuardProtocol::TcpTls => { config.tcp.services.insert( service_id.clone(), types::TraefikService { @@ -196,7 +178,7 @@ fn ds_register_proxied_port( }, ); } - backend::ds::GameGuardProtocol::Udp => { + ds::types::GameGuardProtocol::Udp => { config.udp.services.insert( service_id.clone(), types::TraefikService { @@ -217,7 +199,7 @@ fn ds_register_proxied_port( // Insert the relevant router match proxy_protocol { - backend::ds::GameGuardProtocol::Http => { + ds::types::GameGuardProtocol::Http => { // Generate config let middlewares = http_router_middlewares(); let rule = format_http_rule(proxied_port)?; @@ -240,7 +222,7 @@ fn ds_register_proxied_port( }, ); } - backend::ds::GameGuardProtocol::Https => { + ds::types::GameGuardProtocol::Https => { // Generate config let middlewares = http_router_middlewares(); let rule = format_http_rule(proxied_port)?; @@ -263,7 +245,7 @@ fn ds_register_proxied_port( }, ); } - backend::ds::GameGuardProtocol::Tcp => { + ds::types::GameGuardProtocol::Tcp => { config.tcp.routers.insert( format!("ds-run:{}:{}:tcp", run_id, target_nomad_port_label), types::TraefikRouter { @@ -276,7 +258,7 @@ fn ds_register_proxied_port( }, ); } - backend::ds::GameGuardProtocol::TcpTls => { + ds::types::GameGuardProtocol::TcpTls => { config.tcp.routers.insert( format!("ds-run:{}:{}:tcp-tls", run_id, target_nomad_port_label), types::TraefikRouter { @@ -289,7 +271,7 @@ fn ds_register_proxied_port( }, ); } - backend::ds::GameGuardProtocol::Udp => { + ds::types::GameGuardProtocol::Udp => { config.udp.routers.insert( format!("ds-run:{}:{}:udp", run_id, target_nomad_port_label), types::TraefikRouter { diff --git a/svc/api/traefik-provider/src/route/game_guard/job.rs b/svc/api/traefik-provider/src/route/game_guard/job.rs index 05d9c7a39c..d95606a2b3 100644 --- a/svc/api/traefik-provider/src/route/game_guard/job.rs +++ b/svc/api/traefik-provider/src/route/game_guard/job.rs @@ -3,11 +3,10 @@ use std::{ hash::{Hash, Hasher}, }; -use api_helper::{anchor::WatchIndexQuery, ctx::Ctx}; +use api_helper::ctx::Ctx; use proto::backend::{self, pkg::*}; use redis::AsyncCommands; use rivet_operation::prelude::*; -use serde::{Deserialize, Serialize}; use url::Url; use crate::{auth::Auth, types}; diff --git a/svc/api/traefik-provider/src/route/game_guard/mod.rs b/svc/api/traefik-provider/src/route/game_guard/mod.rs index e12012dec5..be26825858 100644 --- a/svc/api/traefik-provider/src/route/game_guard/mod.rs +++ b/svc/api/traefik-provider/src/route/game_guard/mod.rs @@ -1,16 +1,8 @@ -use std::{ - collections::hash_map::DefaultHasher, - hash::{Hash, Hasher}, -}; - use api_helper::{anchor::WatchIndexQuery, ctx::Ctx}; use dynamic_servers::build_ds; use job::build_job; -use proto::backend::{self, pkg::*}; -use redis::AsyncCommands; use rivet_operation::prelude::*; use serde::{Deserialize, Serialize}; -use url::Url; use crate::{auth::Auth, types}; diff --git a/svc/pkg/cluster/Cargo.toml b/svc/pkg/cluster/Cargo.toml index 5cf84a0115..5f1f1392c4 100644 --- a/svc/pkg/cluster/Cargo.toml +++ b/svc/pkg/cluster/Cargo.toml @@ -23,6 +23,7 @@ rivet-runtime = { path = "../../../lib/runtime" } s3-util = { path = "../../../lib/s3-util" } serde = { version = "1.0.198", features = ["derive"] } ssh2 = "0.9.4" +strum = { version = "0.24", features = ["derive"] } trust-dns-resolver = { version = "0.23.2", features = ["dns-over-native-tls"] } ip-info = { path = "../ip/ops/info" } diff --git a/svc/pkg/cluster/src/ops/datacenter/get.rs b/svc/pkg/cluster/src/ops/datacenter/get.rs index a0ed09280f..9691819e69 100644 --- a/svc/pkg/cluster/src/ops/datacenter/get.rs +++ b/svc/pkg/cluster/src/ops/datacenter/get.rs @@ -67,7 +67,9 @@ impl TryFrom for Datacenter { { build_delivery_method.0 } else { - value.build_delivery_method.try_into()? + unwrap!(BuildDeliveryMethod::from_repr( + value.build_delivery_method.try_into()? + )) }, prebakes_enabled: value.prebakes_enabled, }) diff --git a/svc/pkg/cluster/src/types.rs b/svc/pkg/cluster/src/types.rs index 38c8e63e8b..d22612bee9 100644 --- a/svc/pkg/cluster/src/types.rs +++ b/svc/pkg/cluster/src/types.rs @@ -6,6 +6,7 @@ use std::{ use chirp_workflow::prelude::*; use rivet_operation::prelude::proto::backend; use serde::{Deserialize, Serialize}; +use strum::FromRepr; #[derive(Debug, sqlx::FromRow)] pub struct Cluster { @@ -138,23 +139,10 @@ pub struct PoolUpdate { pub drain_timeout: Option, } -#[derive(Debug, Clone, Serialize, Deserialize, Hash)] +#[derive(Serialize, Deserialize, Hash, Debug, Clone, Copy, PartialEq, Eq, FromRepr)] pub enum BuildDeliveryMethod { - TrafficServer, - S3Direct, -} - -// Backwards compatibility -impl TryFrom for BuildDeliveryMethod { - type Error = GlobalError; - - fn try_from(value: i64) -> GlobalResult { - match value { - 0 => Ok(BuildDeliveryMethod::TrafficServer), - 1 => Ok(BuildDeliveryMethod::S3Direct), - _ => bail!("unexpected BuildDeliveryMethod variant"), - } - } + TrafficServer = 0, + S3Direct = 1, } #[derive(Debug)] diff --git a/svc/pkg/cluster/src/workflows/cluster.rs b/svc/pkg/cluster/src/workflows/cluster.rs index e7471ca4ac..6fb94229c7 100644 --- a/svc/pkg/cluster/src/workflows/cluster.rs +++ b/svc/pkg/cluster/src/workflows/cluster.rs @@ -135,7 +135,7 @@ pub struct DatacenterCreate { } join_signal!(Main, [GameLink, DatacenterCreate]); -#[message("cluster-game-link-complete")] +#[message("cluster_game_link_complete")] pub struct GameLinkComplete {} #[derive(Debug, Serialize, Deserialize, Hash)] diff --git a/svc/pkg/cluster/src/workflows/datacenter/mod.rs b/svc/pkg/cluster/src/workflows/datacenter/mod.rs index dde860f17d..56a94a656e 100644 --- a/svc/pkg/cluster/src/workflows/datacenter/mod.rs +++ b/svc/pkg/cluster/src/workflows/datacenter/mod.rs @@ -41,7 +41,7 @@ pub(crate) async fn cluster_datacenter(ctx: &mut WorkflowCtx, input: &Input) -> pools: input.pools.clone(), - build_delivery_method: input.build_delivery_method.clone(), + build_delivery_method: input.build_delivery_method, prebakes_enabled: input.prebakes_enabled, }) .await?; diff --git a/svc/pkg/cluster/standalone/workflow-backfill/src/lib.rs b/svc/pkg/cluster/standalone/workflow-backfill/src/lib.rs index 7efd250ee1..b210553f4b 100644 --- a/svc/pkg/cluster/standalone/workflow-backfill/src/lib.rs +++ b/svc/pkg/cluster/standalone/workflow-backfill/src/lib.rs @@ -179,8 +179,9 @@ pub async fn run_from_env() -> GlobalResult<()> { .collect::>>()? }), - "build_delivery_method": - TryInto::::try_into(dc.build_delivery_method)?, + "build_delivery_method": unwrap!( + cluster::types::BuildDeliveryMethod::from_repr(dc.build_delivery_method.try_into()?) + ), "prebakes_enabled": false, }))?; wf.finalize(); @@ -223,7 +224,9 @@ pub async fn run_from_env() -> GlobalResult<()> { .collect::>>()? }, - build_delivery_method: dc.build_delivery_method.try_into()?, + build_delivery_method: unwrap!(cluster::types::BuildDeliveryMethod::from_repr( + dc.build_delivery_method.try_into()?, + )), prebakes_enabled: false, }, serde_json::Value::Null, @@ -739,7 +742,9 @@ pub async fn run_from_env() -> GlobalResult<()> { .collect::>>()? }, - build_delivery_method: dc.build_delivery_method.try_into()?, + build_delivery_method: unwrap!(cluster::types::BuildDeliveryMethod::from_repr( + dc.build_delivery_method.try_into()?, + )), prebakes_enabled: false, create_ts: util::timestamp::now(), }, diff --git a/svc/pkg/ds-log/ops/export/src/lib.rs b/svc/pkg/ds-log/ops/export/src/lib.rs index a9f7a1d3b8..cbcba2b16e 100644 --- a/svc/pkg/ds-log/ops/export/src/lib.rs +++ b/svc/pkg/ds-log/ops/export/src/lib.rs @@ -24,16 +24,12 @@ pub async fn handle( .await? .query(indoc!( " - SELECT - message - FROM - db_ds_log.server_logs + SELECT message + FROM db_ds_log.server_logs WHERE - server_id = ? - AND stream_type = ? - ORDER BY - ts - ASC + server_id = ? AND + stream_type = ? + ORDER BY ts ASC " )) .bind(server_id) diff --git a/svc/pkg/ds-log/ops/read/src/lib.rs b/svc/pkg/ds-log/ops/read/src/lib.rs index 7d2def992b..69dd89d3b3 100644 --- a/svc/pkg/ds-log/ops/read/src/lib.rs +++ b/svc/pkg/ds-log/ops/read/src/lib.rs @@ -91,19 +91,14 @@ async fn query_before_nts( let mut entries_cursor = clickhouse .query(&formatdoc!( " - SELECT - ts, - message - FROM - db_ds_log.server_logs + SELECT ts, message + FROM db_ds_log.server_logs WHERE - server_id = ? - AND stream_type = ? - AND ts < fromUnixTimestamp64Nano(?) - ORDER BY - ts {order_by} - LIMIT - ? + server_id = ? AND + stream_type = ? AND + ts < fromUnixTimestamp64Nano(?) + ORDER BY ts {order_by} + LIMIT ? " )) .bind(run_id) @@ -130,19 +125,14 @@ async fn query_after_nts( let mut entries_cursor = clickhouse .query(&formatdoc!( " - SELECT - ts, - message - FROM - db_ds_log.server_logs + SELECT ts, message + FROM db_ds_log.server_logs WHERE - server_id = ? - AND stream_type = ? - AND ts > fromUnixTimestamp64Nano(?) - ORDER BY - ts {order_by} - LIMIT - ? + server_id = ? AND + stream_type = ? AND + ts > fromUnixTimestamp64Nano(?) + ORDER BY ts {order_by} + LIMIT ? " )) .bind(run_id) @@ -170,20 +160,15 @@ async fn query_nts_range( let mut entries_cursor = clickhouse .query(&formatdoc!( " - SELECT - ts, - message - FROM - db_ds_log.server_logs + SELECT ts, message + FROM db_ds_log.server_logs WHERE - run_id = ? - AND stream_type = ? - AND ts > fromUnixTimestamp64Nano(?) - AND ts < fromUnixTimestamp64Nano(?) - ORDER BY - ts {order_by} - LIMIT - ? + run_id = ? AND + stream_type = ? AND + ts > fromUnixTimestamp64Nano(?) AND + ts < fromUnixTimestamp64Nano(?) + ORDER BY ts {order_by} + LIMIT ? " )) .bind(run_id) diff --git a/svc/pkg/ds/Cargo.toml b/svc/pkg/ds/Cargo.toml new file mode 100644 index 0000000000..ad669172e1 --- /dev/null +++ b/svc/pkg/ds/Cargo.toml @@ -0,0 +1,73 @@ +[package] +name = "ds" +version = "0.0.1" +edition = "2018" +authors = ["Rivet Gaming, LLC "] +license = "Apache-2.0" + +[dependencies] +bit-vec = "0.6" +chirp-client = { path = "../../../lib/chirp/client" } +chirp-worker = { path = "../../../lib/chirp/worker" } +chirp-workflow = { path = "../../../lib/chirp-workflow/core" } +chrono = "0.4" +cjson = "0.1" +heck = "0.3" +hex = "0.4" +http = "0.2" +lazy_static = "1.4.0" +nomad-util = { path = "../../../lib/nomad-util" } +rand = "0.8" +regex = "1.10" +reqwest = { version = "0.12", features = ["json"] } +rivet-api = { path = "../../../sdks/full/rust" } +rivet-convert = { path = "../../../lib/convert" } +rivet-health-checks = { path = "../../../lib/health-checks" } +rivet-metrics = { path = "../../../lib/metrics" } +rivet-operation = { path = "../../../lib/operation/core" } +rivet-runtime = { path = "../../../lib/runtime" } +rivet-util = { path = "../../../lib/util/core" } +s3-util = { path = "../../../lib/s3-util" } +serde = { version = "1.0", features = ["derive"] } +serde_json = { version = "1.0", features = ["preserve_order"] } +sha2 = "0.10" +strum = { version = "0.24", features = ["derive"] } +tracing-subscriber = { version = "0.3", default-features = false, features = [ + "fmt", + "json", + "ansi", +] } +util-build = { package = "rivet-util-build", path = "../build/util" } +util-job = { package = "rivet-util-job", path = "../job/util" } +uuid = { version = "1", features = ["v4", "serde"] } + +build-get = { path = "../build/ops/get" } +cluster = { path = "../cluster" } +ip-info = { path = "../ip/ops/info" } +job-run-get = { path = "../job-run/ops/get" } +mm-config-version-get = { path = "../mm-config/ops/version-get" } +mm-lobby-get = { path = "../mm/ops/lobby-get" } +mm-lobby-list-for-user-id = { path = "../mm/ops/lobby-list-for-user-id" } +region-get = { path = "../region/ops/get" } +tier-list = { path = "../tier/ops/list" } +token-create = { path = "../token/ops/create" } +upload-get = { path = "../upload/ops/get" } +user-identity-get = { path = "../user-identity/ops/get" } + +[dependencies.sqlx] +git = "https://github.com/rivet-gg/sqlx" +rev = "08d6e61aa0572e7ec557abbedb72cebb96e1ac5b" +default-features = false + +[dependencies.nomad_client] +package = "nomad_client" +git = "https://github.com/rivet-gg/nomad-client" +rev = "abb66bf0c30c7ff5b0c695dae952481c33e538b5" # pragma: allowlist secret + +[dev-dependencies] +cluster = { path = "../cluster" } +faker-build = { path = "../faker/ops/build" } +faker-game = { path = "../faker/ops/game" } +faker-region = { path = "../faker/ops/region" } +faker-user = { path = "../faker/ops/user" } +game-get = { path = "../game/ops/get" } diff --git a/svc/pkg/ds/ops/server-delete/Service.toml b/svc/pkg/ds/Service.toml similarity index 61% rename from svc/pkg/ds/ops/server-delete/Service.toml rename to svc/pkg/ds/Service.toml index d701bee942..e54ce65a24 100644 --- a/svc/pkg/ds/ops/server-delete/Service.toml +++ b/svc/pkg/ds/Service.toml @@ -1,10 +1,12 @@ [service] -name = "ds-server-delete" +name = "ds" [runtime] kind = "rust" -[operation] +[package] [databases] db-ds = {} + +[secrets] diff --git a/svc/pkg/ds/ops/server-create/Cargo.toml b/svc/pkg/ds/ops/server-create/Cargo.toml deleted file mode 100644 index 5da848b0bc..0000000000 --- a/svc/pkg/ds/ops/server-create/Cargo.toml +++ /dev/null @@ -1,67 +0,0 @@ -[package] -name = "ds-server-create" -version = "0.0.1" -edition = "2021" -authors = ["Rivet Gaming, LLC "] -license = "Apache-2.0" - -[dependencies] -chirp-client = { path = "../../../../../lib/chirp/client" } -chirp-worker = { path = "../../../../../lib/chirp/worker" } -chirp-workflow = { path = "../../../../../lib/chirp-workflow/core" } -rivet-operation = { path = "../../../../../lib/operation/core" } -serde = { version = "1.0", features = ["derive"] } -serde_json = "1.0" -lazy_static = "1.4.0" -uuid = { version = "1", features = ["v4", "serde"] } -http = "0.2" -bit-vec = "0.6" -cjson = "0.1" -nomad-util = { path = "../../../../../lib/nomad-util" } -strum = { version = "0.24", features = ["derive"] } -sha2 = "0.10" -hex = "0.4" -rivet-util = { path = "../../../../../lib/util/core" } -heck = "0.3" -s3-util = { path = "../../../../../lib/s3-util" } -util-build = { package = "rivet-util-build", path = "../../../build/util" } -util-ds = { package = "rivet-util-ds", path = "../../util" } -regex = "1.10" -rand = "0.8" -reqwest = "0.11" -rivet-api = { path = "../../../../../sdks/full/rust" } -tracing-subscriber = { version = "0.3", default-features = false, features = [ - "fmt", - "json", - "ansi", -] } - -build-get = { path = "../../../build/ops/get" } -cluster = { path = "../../../cluster" } -ip-info = { path = "../../../ip/ops/info" } -mm-lobby-list-for-user-id = { path = "../../../mm/ops/lobby-list-for-user-id" } -region-get = { path = "../../../region/ops/get" } -tier-list = { path = "../../../tier/ops/list" } -token-create = { path = "../../../token/ops/create" } -upload-get = { path = "../../../upload/ops/get" } -user-identity-get = { path = "../../../user-identity/ops/get" } - -[dependencies.nomad_client] -package = "nomad_client" -git = "https://github.com/rivet-gg/nomad-client" -rev = "abb66bf0c30c7ff5b0c695dae952481c33e538b5" # pragma: allowlist secret - -[dependencies.sqlx] -version = "0.7" -default-features = false - -[dev-dependencies] -chirp-worker = { path = "../../../../../lib/chirp/worker" } -rivet-connection = { path = "../../../../../lib/connection" } - -faker-build = { path = "../../../faker/ops/build" } -faker-user = { path = "../../../faker/ops/user" } -faker-game = { path = "../../../faker/ops/game" } -faker-region = { path = "../../../faker/ops/region" } -game-get = { path = "../../../../pkg/game/ops/get" } - diff --git a/svc/pkg/ds/ops/server-create/Service.toml b/svc/pkg/ds/ops/server-create/Service.toml deleted file mode 100644 index 4c22e9bb5e..0000000000 --- a/svc/pkg/ds/ops/server-create/Service.toml +++ /dev/null @@ -1,10 +0,0 @@ -[service] -name = "ds-server-create" - -[runtime] -kind = "rust" - -[operation] - -[databases] -db-ds = {} diff --git a/svc/pkg/ds/ops/server-create/src/lib.rs b/svc/pkg/ds/ops/server-create/src/lib.rs deleted file mode 100644 index 409b9ac9b7..0000000000 --- a/svc/pkg/ds/ops/server-create/src/lib.rs +++ /dev/null @@ -1,1532 +0,0 @@ -use std::{ - collections::HashMap, - hash::{DefaultHasher, Hasher}, - net::IpAddr, - time::Duration, -}; - -use crate::sqlx; -use futures_util::FutureExt; -use nomad_client::models::*; -use nomad_job::{ - escape_go_template, gen_oci_bundle_config, inject_consul_env_template, nomad_host_port_env_var, - template_env_var, template_env_var_int, DecodedPort, ProxyProtocol, TransportProtocol, -}; -use proto::{ - backend::{self, pkg::*}, - chirp::response::Ok, -}; -use rand::Rng; -use regex::Regex; -use rivet_operation::prelude::*; -use serde_json::json; -use sha2::{Digest, Sha256}; -use team::member_get::request; - -mod nomad_job; -mod oci_config; -mod seccomp; -mod util_job; - -lazy_static::lazy_static! { - pub static ref NEW_NOMAD_CONFIG: nomad_client::apis::configuration::Configuration = - nomad_util::new_config_from_env().unwrap(); -} - -/// Choose which port to assign for a job's ingress port. -/// -/// If not provided by `ProxiedPort`, then: -/// - HTTP: 80 -/// - HTTPS: 443 -/// - TCP/TLS: random -/// - UDP: random -/// -/// This is somewhat poorly written for TCP & UDP ports and may bite us in the ass -/// some day. See https://linear.app/rivet-gg/issue/RIV-1799 -async fn choose_ingress_port( - ctx: OperationContext, - ingress_port: i32, - protocol: i32, -) -> GlobalResult { - use backend::job::ProxyProtocol; - - let ingress_port = match unwrap!(backend::job::ProxyProtocol::from_i32(protocol)) { - ProxyProtocol::Http => 80_i32, - ProxyProtocol::Https => 443, - ProxyProtocol::Tcp | ProxyProtocol::TcpTls => { - bind_with_retries( - ctx, - protocol, - util::net::job::MIN_INGRESS_PORT_TCP..=util::net::job::MAX_INGRESS_PORT_TCP, - ) - .await? - } - ProxyProtocol::Udp => { - bind_with_retries( - ctx, - protocol, - util::net::job::MIN_INGRESS_PORT_UDP..=util::net::job::MAX_INGRESS_PORT_UDP, - ) - .await? - } - }; - - Ok(ingress_port) -} - -async fn bind_with_retries( - ctx: OperationContext, - proxy_protocol: i32, - range: std::ops::RangeInclusive, -) -> GlobalResult { - let mut attempts = 3u32; - - // Try to bind to a random port, verifying that it is not already bound - loop { - if attempts == 0 { - bail!("failed all attempts to bind to unique port"); - } - attempts -= 1; - - let port = rand::thread_rng().gen_range(range.clone()) as i32; - - let (already_exists,) = sql_fetch_one!( - [ctx, (bool,)] - " - SELECT EXISTS( - SELECT 1 - FROM db_ds.servers as r - JOIN db_ds.docker_ports_protocol_game_guard as p - ON r.server_id = p.server_id - WHERE - r.cleanup_ts IS NULL AND - p.gg_port = $1 AND - p.protocol = $2 - ) - ", - port, - proxy_protocol, - ) - .await?; - - if !already_exists { - break Ok(port); - } - - tracing::info!(?port, ?attempts, "port collision, retrying"); - } -} - -#[operation(name = "ds-server-create")] -pub async fn handle( - ctx: OperationContext, -) -> GlobalResult { - let resources = unwrap_ref!(ctx.resources).clone(); - let server_id = Uuid::new_v4(); - let env_id = unwrap_ref!(ctx.env_id).as_uuid(); - let cluster_id = unwrap_ref!(ctx.cluster_id).as_uuid(); - let datacenter_id = unwrap_ref!(ctx.datacenter_id).as_uuid(); - - let create_ts = ctx.ts(); - - // MARK: db insert - - #[derive(Default, Clone)] - struct GameGuardUnnest { - port_names: Vec, - port_numbers: Vec>, - gg_ports: Vec>, - protocols: Vec, - } - - #[derive(Default, Clone)] - struct HostUnnest { - port_names: Vec, - port_numbers: Vec>, - } - - let mut game_guard_unnest = GameGuardUnnest::default(); - let mut host_unnest = HostUnnest::default(); - - for (name, port) in ctx.network_ports.iter() { - let routing = unwrap!(port.routing.clone()); - match routing { - dynamic_servers::server_create::port::Routing::GameGuard(gameguard_protocol) => { - game_guard_unnest.port_names.push(name.clone()); - game_guard_unnest.port_numbers.push(port.internal_port); - game_guard_unnest.gg_ports.push(match port.internal_port { - Some(port) => Some( - choose_ingress_port(ctx.clone(), port, gameguard_protocol.protocol).await?, - ), - None => None, - }); - game_guard_unnest - .protocols - .push(gameguard_protocol.protocol); - } - dynamic_servers::server_create::port::Routing::Host(_) => { - host_unnest.port_names.push(name.clone()); - host_unnest.port_numbers.push(port.internal_port); - } - }; - } - - rivet_pools::utils::crdb::tx(&ctx.crdb().await?, |tx| { - let ctx = ctx.clone(); - let host_unnest = host_unnest.clone(); - let game_guard_unnest = game_guard_unnest.clone(); - - async move { - sql_execute!( - [ctx, @tx tx] - " - WITH - servers_cte AS ( - INSERT INTO - db_ds.servers ( - server_id, - env_id, - datacenter_id, - cluster_id, - tags, - resources_cpu_millicores, - resources_memory_mib, - kill_timeout_ms, - create_ts, - image_id, - args, - network_mode, - environment - ) - VALUES - ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13) - RETURNING - 1 - ), - docker_ports_host_cte AS ( - INSERT INTO - db_ds.docker_ports_host ( - server_id, - port_name, - port_number - ) - SELECT - $1, - t.* - FROM - unnest($14, $15) AS t (port_name, port_number) - RETURNING - 1 - ), - docker_ports_protocol_game_guard_cte AS ( - INSERT INTO - db_ds.docker_ports_protocol_game_guard ( - server_id, - port_name, - port_number, - gg_port, - protocol - ) - SELECT - $1, - t.* - FROM - unnest($16, $17, $18, $19) AS t (port_name, port_number, protocol) - RETURNING - 1 - ) - SELECT - 1 - ", - server_id, - env_id, - datacenter_id, - cluster_id, - serde_json::value::to_raw_value(&ctx.tags.to_owned())?.to_string(), // 5 - resources.cpu_millicores, - resources.memory_mib, - ctx.kill_timeout_ms, - create_ts, - unwrap!(ctx.image_id).as_uuid(), // 10 - &ctx.args, - ctx.network_mode, - serde_json::value::to_raw_value(&ctx.environment)?.to_string(), - host_unnest.port_names, - host_unnest.port_numbers, // 15 - game_guard_unnest.port_names, - game_guard_unnest.port_numbers, - game_guard_unnest.gg_ports, - game_guard_unnest.protocols, - ) - .await - } - .boxed() - }) - .await?; - - // let ( - // (mm_game_config, namespace), - // mm_ns_config, - // (lobby_group, lobby_group_meta, version_id), - // region, - // tiers, - // ) = tokio::try_join!( - // fetch_namespace(ctx, namespace_id), - // fetch_mm_namespace_config(ctx, namespace_id), - // fetch_lobby_group_config(ctx, lobby_group_id), - // fetch_region(ctx, region_id), - // fetch_tiers(ctx, region_id), - // )?; - // let (mm_game_config, namespace) = fetch_namespace(ctx, namespace_id).await?; - // let mm_ns_config = fetch_mm_namespace_config(ctx, namespace_id).await?; - // let (lobby_group, lobby_group_meta, version_id) = fetch_lobby_group_config(ctx, lobby_group_id) - // .await?; - // let region = fetch_region(ctx, region_id).await?; - // let tiers = fetch_tiers(ctx, region_id).await?; - // let version = fetch_version(ctx, version_id).await?; - - // // Do all nomad stuff - // let namespace_id = unwrap_ref!(namespace.namespace_id).as_uuid(); - // let version_id = unwrap_ref!(version.version_id).as_uuid(); - // let lobby_group_id = unwrap_ref!(lobby_group_meta.lobby_group_id).as_uuid(); - // let region_id = unwrap_ref!(region.region_id).as_uuid(); - - // let job_runner_binary_url = resolve_job_runner_binary_url(ctx).await?; - - // let resolve_perf = ctx.perf().start("resolve-image-artifact-url").await; - // let build_id = unwrap_ref!(runtime.build_id).as_uuid(); - // let image_artifact_url = resolve_image_artifact_url(ctx, build_id, region).await?; - // resolve_perf.end(); - - // // Validate build exists and belongs to this game - // let build_id = unwrap_ref!(runtime.build_id).as_uuid(); - // let build_get = op!([ctx] build_get { - // build_ids: vec![build_id.into()], - // }) - // .await?; - // let build = unwrap!(build_get.builds.first()); - // let build_kind = unwrap!(backend::build::BuildKind::from_i32(build.kind)); - // let build_compression = unwrap!(backend::build::BuildCompression::from_i32( - // build.compression - // )); - - let ctx: OperationContext = ctx; - - // Generate the Docker job - - // let runtime = backend::ds::lobby_runtime::Docker { - // build_id: todo!(), - // args: docker_runtime.args, - // env_vars: todo!(), - // network_mode: todo!(), - // ports: todo!(), - // }; - // let _image_tag = &build.image_tag; - // let tier = backend::region::Tier { - // tier_name_id: todo!(), - // rivet_cores_numerator: todo!(), - // rivet_cores_denominator: todo!(), - // cpu: todo!(), - // memory: todo!(), - // memory_max: todo!(), - // disk: todo!(), - // bandwidth: todo!(), - // }; - - // let lobby_config = ctx.lobby_config_json.is_some(); - // let lobby_tags = !ctx.tags.is_empty(); - // let build_kind = backend::build::BuildKind::DockerImage; - // let build_compression = backend::build::BuildCompression::None; - - // IMPORTANT: This job spec must be deterministic. Do not pass in parameters - // that change with every run, such as the lobby ID. Ensure the - // `reuse_job_id` test passes when changing this function. - use nomad_client::models::*; - - let resources = unwrap!(ctx.resources.clone()); - - let tier_res = op!([ctx] tier_list { - region_ids: vec![datacenter_id.into()], - }) - .await?; - let tier_region = unwrap!(tier_res.regions.first()); - - // // runc-compatible resourcesd - // let cpu = resources.cpu_millicores; // Millicore (1/1000 of a core) - // let memory = resources.memory_mib * (1024 * 1024); // bytes - // // let memory_max = tier.memory_max * (1024 * 1024); // bytes - - // Find the first tier that has more CPU and memory than the requested - // resources - let mut tiers = tier_region.tiers.clone(); - - // Sort the tiers by cpu - tiers.sort_by(|a, b| a.cpu.cmp(&b.cpu)); - let tier = unwrap!(tiers.iter().find(|t| { - t.cpu as i32 >= resources.cpu_millicores && t.memory as i32 >= resources.memory_mib - })); - - // runc-compatible resources - let cpu = tier.rivet_cores_numerator as u64 * 1_000 / tier.rivet_cores_denominator as u64; // Millicore (1/1000 of a core) - let memory = tier.memory * (1024 * 1024); // bytes - let memory_max = tier.memory_max * (1024 * 1024); // bytes - - // dbg!(tier, cpu, memory, memory_max); - // panic!(); - - // Validate build exists and belongs to this game - let build_id = unwrap_ref!(ctx.image_id).as_uuid(); - let build_get = op!([ctx] build_get { - build_ids: vec![build_id.into()], - }) - .await?; - let build = unwrap!(build_get.builds.first()); - let build_kind = unwrap!(backend::build::BuildKind::from_i32(build.kind)); - let build_compression = unwrap!(backend::build::BuildCompression::from_i32( - build.compression - )); - - // // Nomad-compatible resources - // let resources = Resources { - // // TODO: Configure this per-provider - // // Nomad configures CPU based on MHz, not millicores. We havel to calculate the CPU share - // // by knowing how many MHz are on the client. - // CPU: if cpu < 1000 { - // Some((cpu - util_job::TASK_CLEANUP_CPU).try_into()?) - // } else { - // None - // }, - // cores: if cpu >= 1000 { - // Some((cpu / 1000) as i32) - // } else { - // None - // }, - // memory_mb: Some( - // (TryInto::::try_into(memory)? / (1024 * 1024) - // - util_job::TASK_CLEANUP_MEMORY as i64) - // .try_into()?, - // ), - // // Allow oversubscribing memory by 50% of the reserved - // // memory if using less than the node's total memory - // memory_max_mb: Some( - // (TryInto::::try_into((memory as f64 * 1.5) as i64)? / (1024 * 1024) - // - util_job::TASK_CLEANUP_MEMORY as i64) - // .try_into()?, - // ), - // ..Resources::new() - // }; - - // Nomad-compatible resources - let nomad_resources = Resources { - // TODO: Configure this per-provider - // Nomad configures CPU based on MHz, not millicores. We havel to calculate the CPU share - // by knowing how many MHz are on the client. - CPU: if tier.rivet_cores_numerator < tier.rivet_cores_denominator { - Some((tier.cpu - util_job::TASK_CLEANUP_CPU as u64).try_into()?) - } else { - None - }, - cores: if tier.rivet_cores_numerator >= tier.rivet_cores_denominator { - Some((tier.rivet_cores_numerator / tier.rivet_cores_denominator) as i32) - } else { - None - }, - memory_mb: Some( - (TryInto::::try_into(memory)? / (1024 * 1024) - - util_job::TASK_CLEANUP_MEMORY as i64) - .try_into()?, - ), - // Allow oversubscribing memory by 50% of the reserved - // memory if using less than the node's total memory - memory_max_mb: None, - // Some( - // (TryInto::::try_into(memory_max)? / (1024 * 1024) - // - util_job::TASK_CLEANUP_MEMORY as i64) - // .try_into()?, - // ), - disk_mb: Some(tier.disk as i32), // TODO: Is this deprecated? - ..Resources::new() - }; - - // // let network_mode = unwrap!(LobbyRuntimeNetworkMode::from_i32(runtime.network_mode)); - - // Read ports - let decoded_ports = ctx - .network_ports - .clone() - .into_iter() - .map(|(port_label, port)| match port.routing { - Some(dynamic_servers::server_create::port::Routing::GameGuard(game_guard_routing)) => { - let target = unwrap!(port.internal_port) as u16; - - GlobalResult::Ok(DecodedPort { - label: port_label.clone(), - nomad_port_label: util_ds::format_nomad_port_label(&port_label), - target, - proxy_protocol: unwrap!(backend::ds::GameGuardProtocol::from_i32( - game_guard_routing.protocol - )) - .into(), - }) - } - Some(dynamic_servers::server_create::port::Routing::Host(_)) => { - todo!() - } - None => { - todo!() - } - }) - .collect::>>()?; - - // The container will set up port forwarding manually from the Nomad-defined ports on the host - // to the CNI container - let dynamic_ports = decoded_ports - .iter() - .map(|port| Port { - label: Some(port.nomad_port_label.clone()), - ..Port::new() - }) - .collect::>(); - - // Port mappings to pass to the container. Only used in bridge networking. - let cni_port_mappings = decoded_ports - .clone() - .into_iter() - .map(|port| { - json!({ - "HostPort": template_env_var_int(&nomad_host_port_env_var(&port.nomad_port_label)), - "ContainerPort": port.target, - "Protocol": TransportProtocol::from(port.proxy_protocol).as_cni_protocol(), - }) - }) - .collect::>(); - - let prepared_ports = ctx.network_ports.iter().map(|(label, port)| { - let mode = unwrap!(backend::ds::NetworkMode::from_i32(ctx.network_mode)); - let port_value = match mode { - // CNI will handle mapping the host port to the container port - backend::ds::NetworkMode::Bridge => unwrap!(port.internal_port).to_string(), - // The container needs to listen on the correct port - backend::ds::NetworkMode::Host => template_env_var(&nomad_host_port_env_var(&label)), - }; - - GlobalResult::Ok(Some(String::new())) - // TODO - // Port with the kebab case port key. Included for backward compatabiilty & for - // less confusion. - // Ok((format!("PORT_{}", port.label.replace('-', "_")), port_value)) - }); - - // Also see util_ds:consts::DEFAULT_ENV_KEYS - let mut env = Vec::<(String, String)>::new() - .into_iter() - // TODO - // .chain(if lobby_config { - // Some(( - // "RIVET_LOBBY_CONFIG".to_string(), - // template_env_var("NOMAD_META_LOBBY_CONFIG"), - // )) - // } else { - // None - // }) - // .chain(if lobby_tags { - // Some(( - // "RIVET_LOBBY_TAGS".to_string(), - // template_env_var("NOMAD_META_LOBBY_TAGS"), - // )) - // } else { - // None - // }) - .chain([( - "RIVET_API_ENDPOINT".to_string(), - util::env::origin_api().to_string(), - )]) - // Ports - // TODO - // .chain(prepared_ports) - // // Port ranges - // .chain( - // decoded_ports - // .iter() - // .filter_map(|port| { - // if let PortTarget::Range { min, max } = &port.target { - // let snake_port_label = port.label.replace('-', "_"); - // Some([ - // ( - // format!("PORT_RANGE_MIN_{}", snake_port_label), - // min.to_string(), - // ), - // ( - // format!("PORT_RANGE_MAX_{}", snake_port_label), - // max.to_string(), - // ), - // ]) - // } else { - // None - // } - // }) - // .flatten(), - // ) - .map(|(k, v)| format!("{k}={v}")) - .collect::>(); - env.sort(); - - let services = decoded_ports - .iter() - .map(|port| { - let service_name = format!("${{NOMAD_META_LOBBY_ID}}-{}", port.label); - GlobalResult::Ok(Some(Service { - provider: Some("nomad".into()), - name: Some(service_name), - tags: Some(vec!["game".into()]), - port_label: Some(port.nomad_port_label.clone()), - // checks: if TransportProtocol::from(port.proxy_protocol) - // == TransportProtocol::Tcp - // { - // Some(vec![ServiceCheck { - // name: Some(format!("{}-probe", port.label)), - // port_label: Some(port.nomad_port_label.clone()), - // _type: Some("tcp".into()), - // interval: Some(30_000_000_000), - // timeout: Some(2_000_000_000), - // ..ServiceCheck::new() - // }]) - // } else { - // None - // }, - ..Service::new() - })) - }) - .filter_map(|x| x.transpose()) - .collect::>>()?; - - // Generate the command to download and decompress the file - let mut download_cmd = r#"curl -Lf "$NOMAD_META_IMAGE_ARTIFACT_URL""#.to_string(); - match build_compression { - backend::build::BuildCompression::None => {} - backend::build::BuildCompression::Lz4 => { - download_cmd.push_str(" | lz4 -d -"); - } - } - - // MARK: Job spec - - let job_spec = Job { - _type: Some("batch".into()), - // constraints: Some(vec![Constraint { - // l_target: Some("${node.class}".into()), - // r_target: Some("job".into()), - // operand: Some("=".into()), - // }]), - parameterized_job: Some(Box::new(ParameterizedJobConfig { - payload: Some("forbidden".into()), - meta_required: Some(vec![ - "job_runner_binary_url".into(), - "vector_socket_addr".into(), - "image_artifact_url".into(), - "root_user_enabled".into(), - "runner".into(), - "user_env".into(), - ]), - meta_optional: Some(vec!["rivet_test_id".into()]), - })), - task_groups: Some(vec![TaskGroup { - name: Some(util_job::RUN_MAIN_TASK_NAME.into()), - constraints: None, // TODO: Use parameter meta to specify the hardware - affinities: None, // TODO: - // Allows for jobs to keep running and receiving players in the - // event of a disconnection from the Nomad server. - max_client_disconnect: Some(5 * 60 * 1_000_000_000), - restart_policy: Some(Box::new(RestartPolicy { - attempts: Some(0), - mode: Some("fail".into()), - ..RestartPolicy::new() - })), - reschedule_policy: Some(Box::new(ReschedulePolicy { - attempts: Some(0), - unlimited: Some(false), - ..ReschedulePolicy::new() - })), - networks: Some(vec![NetworkResource { - // The setup.sh script will set up a CNI network if using bridge networking - mode: Some("host".into()), - dynamic_ports: Some(dynamic_ports.clone()), - ..NetworkResource::new() - }]), - services: Some(services), - // Configure ephemeral disk for logs - ephemeral_disk: Some(Box::new(EphemeralDisk { - size_mb: Some(tier.disk as i32), - ..EphemeralDisk::new() - })), - tasks: Some(vec![ - // TODO - Task { - name: Some("runc-setup".into()), - lifecycle: Some(Box::new(TaskLifecycle { - hook: Some("prestart".into()), - sidecar: Some(false), - })), - driver: Some("raw_exec".into()), - config: Some({ - let mut x = HashMap::new(); - x.insert("command".into(), json!("${NOMAD_TASK_DIR}/setup.sh")); - x - }), - templates: Some(vec![ - Template { - embedded_tmpl: Some(include_str!("./scripts/setup.sh").replace( - "__HOST_NETWORK__", - match unwrap!(backend::ds::NetworkMode::from_i32(ctx.network_mode)) - { - backend::ds::NetworkMode::Bridge => "false", - backend::ds::NetworkMode::Host => "true", - }, - )), - dest_path: Some("${NOMAD_TASK_DIR}/setup.sh".into()), - perms: Some("744".into()), - ..Template::new() - }, - Template { - embedded_tmpl: Some( - include_str!("./scripts/setup_job_runner.sh").into(), - ), - dest_path: Some("${NOMAD_TASK_DIR}/setup_job_runner.sh".into()), - perms: Some("744".into()), - ..Template::new() - }, - Template { - embedded_tmpl: Some( - include_str!("./scripts/setup_oci_bundle.sh") - .replace("__DOWNLOAD_CMD__", &download_cmd) - .replace( - "__BUILD_KIND__", - match build_kind { - backend::build::BuildKind::DockerImage => { - "docker-image" - } - backend::build::BuildKind::OciBundle => "oci-bundle", - }, - ), - ), - dest_path: Some("${NOMAD_TASK_DIR}/setup_oci_bundle.sh".into()), - perms: Some("744".into()), - ..Template::new() - }, - Template { - embedded_tmpl: Some( - include_str!("./scripts/setup_cni_network.sh").into(), - ), - dest_path: Some("${NOMAD_TASK_DIR}/setup_cni_network.sh".into()), - perms: Some("744".into()), - ..Template::new() - }, - Template { - embedded_tmpl: Some(gen_oci_bundle_config( - cpu, memory, memory_max, env, - )?), - dest_path: Some( - "${NOMAD_ALLOC_DIR}/oci-bundle-config.base.json".into(), - ), - ..Template::new() - }, - Template { - embedded_tmpl: Some(inject_consul_env_template( - &serde_json::to_string(&cni_port_mappings)?, - )?), - dest_path: Some("${NOMAD_ALLOC_DIR}/cni-port-mappings.json".into()), - ..Template::new() - }, - ]), - resources: Some(Box::new(Resources { - CPU: Some(util_ds::RUNC_SETUP_CPU), - memory_mb: Some(util_ds::RUNC_SETUP_MEMORY), - ..Resources::new() - })), - log_config: Some(Box::new(LogConfig { - max_files: Some(4), - max_file_size_mb: Some(2), - disabled: None, - })), - ..Task::new() - }, - // TODO - Task { - name: Some(util_job::RUN_MAIN_TASK_NAME.into()), - driver: Some("raw_exec".into()), - config: Some({ - let mut x = HashMap::new(); - // This is downloaded in setup_job_runner.sh - x.insert("command".into(), json!("${NOMAD_ALLOC_DIR}/job-runner")); - x - }), - resources: Some(Box::new(nomad_resources.clone())), - // Intentionally high timeout. Killing jobs is handled manually with signals. - kill_timeout: Some(86400 * 1_000_000_000), - kill_signal: Some("SIGTERM".into()), - log_config: Some(Box::new(LogConfig { - max_files: Some(4), - max_file_size_mb: Some(4), - disabled: None, - })), - ..Task::new() - }, - // TODO: Remove - // Task { - // name: Some("runc-cleanup".into()), - // lifecycle: Some(Box::new(TaskLifecycle { - // hook: Some("poststop".into()), - // sidecar: Some(false), - // })), - // driver: Some("raw_exec".into()), - // config: Some({ - // let mut x = HashMap::new(); - // x.insert("command".into(), json!("${NOMAD_TASK_DIR}/cleanup.sh")); - // x - // }), - // templates: Some(vec![Template { - // embedded_tmpl: Some(include_str!("./scripts/cleanup.sh").into()), - // dest_path: Some("${NOMAD_TASK_DIR}/cleanup.sh".into()), - // perms: Some("744".into()), - // ..Template::new() - // }]), - // resources: Some(Box::new(Resources { - // CPU: Some(util_mm::RUNC_CLEANUP_CPU), - // memory_mb: Some(util_mm::RUNC_CLEANUP_MEMORY), - // ..Resources::new() - // })), - // log_config: Some(Box::new(LogConfig { - // max_files: Some(4), - // max_file_size_mb: Some(2), - // })), - // ..Task::new() - // }, - ]), - ..TaskGroup::new() - }]), - // Disables rescheduling in the event of a node drain - reschedule: Some(Box::new(ReschedulePolicy { - attempts: Some(0), - ..ReschedulePolicy::new() - })), - ..Job::new() - }; - - let job_spec_json = serde_json::to_string(&job_spec)?; - - // // Build proxied ports for each exposed port - // let proxied_ports = runtime - // .ports - // .iter() - // .filter(|port| { - // port.proxy_kind == backend::ds::lobby_runtime::ProxyKind::GameGuard as i32 - // && port.port_range.is_none() - // }) - // .flat_map(|port| { - // let mut ports = vec![direct_proxied_port(lobby_id, region_id, port)]; - // match backend::ds::lobby_runtime::ProxyProtocol::from_i32( - // port.proxy_protocol, - // ) { - // Some( - // backend::ds::lobby_runtime::ProxyProtocol::Http - // | backend::ds::lobby_runtime::ProxyProtocol::Https, - // ) => { - // ports.push(path_proxied_port(lobby_id, region_id, port)); - // } - // Some( - // backend::ds::lobby_runtime::ProxyProtocol::Udp - // | backend::ds::lobby_runtime::ProxyProtocol::Tcp - // | backend::ds::lobby_runtime::ProxyProtocol::TcpTls, - // ) - // | None => {} - // } - // ports - // }) - // .collect::>>()?; - - // submit_job(&job_spec_json, Some(region_id.into())); - - // Get the region to dispatch in - let region_res = op!([ctx] region_get { - region_ids: vec![datacenter_id.into()], - }) - .await?; - let region = unwrap!(region_res.regions.first()); - - // let region = region; - let base_job: Job = serde_json::from_str::(&job_spec_json)?; - - // Modify the job spec - let mut job = base_job; - // let region = region; - // Replace all job IDs with a placeholder value in order to create a - // deterministic job spec. - { - let job_id: &str = "__PLACEHOLDER__"; - let job: &mut nomad_client::models::Job = &mut job; - job.ID = Some(job_id.into()); - job.name = Some(job_id.into()); - }; - - ensure_eq!( - "batch", - unwrap_ref!(job._type).as_str(), - "only the batch job type is supported" - ); - - // Update the job's region - job.region = Some(region.nomad_region.clone()); - job.datacenters = Some(vec![region.nomad_datacenter.clone()]); - - // Validate that the job is parameterized - // TODO: clean up how stuff is put in here - let parameters = unwrap!(job.parameterized_job.as_mut(), "job not parameterized"); - - // Add run parameters - parameters.meta_required = Some({ - let mut meta_required = parameters.meta_required.clone().unwrap_or_default(); - meta_required.push("job_run_id".into()); - meta_required - }); - - // Get task group - let task_groups = unwrap!(job.task_groups.as_mut()); - ensure_eq!(1, task_groups.len(), "must have exactly 1 task group"); - let task_group = unwrap!(task_groups.first_mut()); - ensure_eq!( - task_group.name.as_deref(), - Some(RUN_MAIN_TASK_NAME), - "must have main task group" - ); - - // Ensure has main task - let main_task = unwrap!( - task_group - .tasks - .iter_mut() - .flatten() - .find(|x| x.name.as_deref() == Some(RUN_MAIN_TASK_NAME)), - "must have main task" - ); - ensure!( - main_task - .lifecycle - .as_ref() - .map_or(true, |x| x.hook.is_none()), - "main task must not have a lifecycle hook" - ); - - // Configure networks - let networks = unwrap!(task_group.networks.as_mut()); - ensure_eq!(1, networks.len(), "must have exactly 1 network"); - let network = unwrap!(networks.first_mut()); - // Disable IPv6 DNS since Docker doesn't support IPv6 yet - network.DNS = Some(Box::new(nomad_client::models::DnsConfig { - servers: Some(vec![ - // Google - "8.8.8.8".into(), - "8.8.4.4".into(), - "2001:4860:4860::8888".into(), - "2001:4860:4860::8844".into(), - ]), - // Disable default search from the host - searches: Some(Vec::new()), - options: Some(vec!["rotate".into(), "edns0".into(), "attempts:2".into()]), - ..nomad_client::models::DnsConfig::new() - })); - - // Disable rescheduling, since job-run doesn't support this at the moment - task_group.reschedule_policy = Some(Box::new(nomad_client::models::ReschedulePolicy { - attempts: Some(0), - unlimited: Some(false), - ..nomad_client::models::ReschedulePolicy::new() - })); - - // Disable restarts. Our Nomad monitoring workflow doesn't support restarts - // at the moment. - task_group.restart_policy = Some(Box::new(nomad_client::models::RestartPolicy { - attempts: Some(0), - // unlimited: Some(false), - ..nomad_client::models::RestartPolicy::new() - })); - - // MARK: Cleanup task - - // Add cleanup task - let tasks: &mut Vec = unwrap!(task_group.tasks.as_mut()); - tasks.push({ - Task { - name: Some(RUN_CLEANUP_TASK_NAME.into()), - lifecycle: Some(Box::new(TaskLifecycle { - hook: Some("poststop".into()), - sidecar: Some(false), - })), - driver: Some("docker".into()), - config: Some({ - let mut config = HashMap::new(); - - config.insert("image".into(), json!("python:3.10.7-alpine3.16")); - config.insert( - "args".into(), - json!([ - "/bin/sh", - "-c", - "apk add --no-cache ca-certificates && python3 /local/cleanup.py" - ]), - ); - - config - }), - templates: Some(vec![Template { - dest_path: Some("local/cleanup.py".into()), - embedded_tmpl: Some(formatdoc!( - r#" - import ssl - import urllib.request, json, os, mimetypes, sys - - BEARER = '{{{{env "NOMAD_META_JOB_RUN_TOKEN"}}}}' - - ctx = ssl.create_default_context() - - def eprint(*args, **kwargs): - print(*args, file=sys.stderr, **kwargs) - - def req(method, url, data = None, headers = {{}}): - request = urllib.request.Request( - url=url, - data=data, - method=method, - headers=headers - ) - - try: - res = urllib.request.urlopen(request, context=ctx) - assert res.status == 200, f"Received non-200 status: {{res.status}}" - return res - except urllib.error.HTTPError as err: - eprint(f"HTTP Error ({{err.code}} {{err.reason}}):\n\nBODY:\n{{err.read().decode()}}\n\nHEADERS:\n{{err.headers}}") - - raise err - - print(f'\n> Cleaning up job') - - res_json = None - with req('POST', f'{origin_api}/job/runs/cleanup', - data = json.dumps({{}}).encode(), - headers = {{ - 'Authorization': f'Bearer {{BEARER}}', - 'Content-Type': 'application/json' - }} - ) as res: - res_json = json.load(res) - - - print('\n> Finished') - "#, - origin_api = util::env::origin_api(), - )), - ..Template::new() - }]), - resources: Some(Box::new(Resources { - CPU: Some(TASK_CLEANUP_CPU), - memory_mb: Some(TASK_CLEANUP_MEMORY), - ..Resources::new() - })), - log_config: Some(Box::new(LogConfig { - max_files: Some(4), - max_file_size_mb: Some(2), - disabled: Some(false), - })), - ..Task::new() - } - }); - - // Derive jobspec hash - // - // We serialize the JSON to a canonical string then take a SHA hash of the output. - let job_cjson_str = match cjson::to_string(&job) { - Ok(x) => x, - Err(err) => { - tracing::error!(?err, "cjson serialization failed"); - bail!("cjson serialization failed") - } - }; - let job_hash = Sha256::digest(job_cjson_str.as_bytes()); - let job_hash_str = hex::encode(job_hash); - - // Generate new job ID - let job_id = format!( - "job-{hash}:{region}", - hash = &job_hash_str[0..12], - region = region.name_id - ); - { - let job_id: &str = &job_id; - let job: &mut nomad_client::models::Job = &mut job; - job.ID = Some(job_id.into()); - job.name = Some(job_id.into()); - }; - - // Submit the job - tracing::info!("submitting job"); - - // dbg!( - // // &NEW_NOMAD_CONFIG, - // &job_id, - // nomad_client::models::JobRegisterRequest { - // job: Some(Box::new(job.clone())), - // ..nomad_client::models::JobRegisterRequest::new() - // }, - // Some(®ion.nomad_region), - // ); - // panic!(); - - // pub struct Configuration { - // pub base_path: String, - // pub user_agent: Option, - // pub client: reqwest::Client, - // pub basic_auth: Option, - // pub oauth_access_token: Option, - // pub bearer_access_token: Option, - // pub api_key: Option, - // // TODO: take an oauth2 token source, similar to the go one - // } - - // dbg!( - // &NEW_NOMAD_CONFIG.base_path, - // &NEW_NOMAD_CONFIG.user_agent, - // &NEW_NOMAD_CONFIG.client, - // &NEW_NOMAD_CONFIG.basic_auth, - // &NEW_NOMAD_CONFIG.oauth_access_token, - // &NEW_NOMAD_CONFIG.bearer_access_token, - // &NEW_NOMAD_CONFIG.api_key, - // ); - // panic!(); - - let a = nomad_client::apis::jobs_api::post_job( - &NEW_NOMAD_CONFIG, - &job_id, - nomad_client::models::JobRegisterRequest { - job: Some(Box::new(job)), - ..nomad_client::models::JobRegisterRequest::new() - }, - Some(®ion.nomad_region), - None, - None, - None, - ) - .await?; - dbg!(a); - - // let build_res = op!([ctx] build_get { - // build_ids: vec![build_id.into()], - // }) - // .await?; - // let build = build_res.builds.first(); - // let build = unwrap_ref!(build); - // let build_kind = unwrap!(backend::build::BuildKind::from_i32(build.kind)); - // let build_compression = unwrap!(backend::build::BuildCompression::from_i32( - // build.compression - // )); - let upload_id_proto = unwrap!(build.upload_id); - - let upload_res = op!([ctx] upload_get { - upload_ids: vec![upload_id_proto], - }) - .await?; - let upload = unwrap!(upload_res.uploads.first()); - - // Get provider - let proto_provider = unwrap!( - backend::upload::Provider::from_i32(upload.provider), - "invalid upload provider" - ); - let provider = match proto_provider { - backend::upload::Provider::Minio => s3_util::Provider::Minio, - backend::upload::Provider::Backblaze => s3_util::Provider::Backblaze, - backend::upload::Provider::Aws => s3_util::Provider::Aws, - }; - - let file_name = util_build::file_name(build_kind, build_compression); - - let mm_lobby_delivery_method = unwrap!( - backend::cluster::BuildDeliveryMethod::from_i32(region.build_delivery_method), - "invalid datacenter build delivery method" - ); - let image_artifact_url = match mm_lobby_delivery_method { - backend::cluster::BuildDeliveryMethod::S3Direct => { - tracing::info!("using s3 direct delivery"); - - let bucket = "bucket-build"; - - // Build client - let s3_client = - s3_util::Client::from_env_opt(bucket, provider, s3_util::EndpointKind::External) - .await?; - - let upload_id = unwrap_ref!(upload.upload_id).as_uuid(); - let presigned_req = s3_client - .get_object() - .bucket(s3_client.bucket()) - .key(format!("{upload_id}/{file_name}")) - .presigned( - s3_util::aws_sdk_s3::presigning::config::PresigningConfig::builder() - .expires_in(std::time::Duration::from_secs(15 * 60)) - .build()?, - ) - .await?; - - let addr = presigned_req.uri().clone(); - - let addr_str = addr.to_string(); - tracing::info!(addr = %addr_str, "resolved artifact s3 presigned request"); - - addr_str - } - backend::cluster::BuildDeliveryMethod::TrafficServer => { - tracing::info!("using traffic server delivery"); - - let region_id = unwrap_ref!(region.region_id).as_uuid(); - - // Hash build so that the ATS server that we download the build from is always the same one. This - // improves cache hit rates and reduces download times. - let build_id = unwrap_ref!(build.build_id).as_uuid(); - let mut hasher = DefaultHasher::new(); - hasher.write(build_id.as_bytes()); - let hash = hasher.finish() as i64; - - // NOTE: The algorithm for choosing the vlan_ip from the hash should match the one in - // prewarm_ats.rs @ prewarm_ats_cache - // Get vlan ip from build id hash for consistent routing - let (ats_vlan_ip,) = sql_fetch_one!( - [ctx, (IpAddr,)] - " - WITH sel AS ( - -- Select candidate vlan ips - SELECT - vlan_ip - FROM db_cluster.servers - WHERE - datacenter_id = $1 AND - pool_type2 = $2 AND - vlan_ip IS NOT NULL AND - install_complete_ts IS NOT NULL AND - drain_ts IS NULL AND - cloud_destroy_ts IS NULL - ) - SELECT vlan_ip - FROM sel - -- Use mod to make sure the hash stays within bounds - OFFSET abs($3 % GREATEST((SELECT COUNT(*) FROM sel), 1)) - LIMIT 1 - ", - // NOTE: region_id is just the old name for datacenter_id - ®ion_id, - serde_json::to_string(&cluster::types::PoolType::Ats)?, - hash, - ) - .await?; - - let upload_id = unwrap_ref!(upload.upload_id).as_uuid(); - let addr = format!( - "http://{vlan_ip}:8080/s3-cache/{provider}/{namespace}-bucket-build/{upload_id}/{file_name}", - vlan_ip = ats_vlan_ip, - provider = heck::KebabCase::to_kebab_case(provider.as_str()), - namespace = util::env::namespace(), - upload_id = upload_id, - ); - - tracing::info!(%addr, "resolved artifact s3 url"); - - addr - } - }; - - let job_runner_binary_url = resolve_job_runner_binary_url(&ctx, region).await?; - - // MARK: Parameters - - let parameters: Vec = vec![ - backend::job::Parameter { - key: "job_runner_binary_url".into(), - value: job_runner_binary_url, - }, - backend::job::Parameter { - key: "vector_socket_addr".into(), - value: "127.0.0.1:5021".to_string(), - }, - backend::job::Parameter { - key: "image_artifact_url".into(), - value: image_artifact_url.to_string(), - }, - backend::job::Parameter { - key: "root_user_enabled".into(), - // TODO make table dynamic host, make reference so that we can find - // other locations - value: "0".into(), - }, - backend::job::Parameter { - key: "runner".into(), - value: "dynamic_servers".into(), - }, - backend::job::Parameter { - key: "user_env".into(), - // other locations - value: unwrap!(serde_json::to_string( - &ctx.environment - .iter() - .map(|(k, v)| (k.clone(), escape_go_template(v))) - .collect::>(), - )), - }, - ] - .into_iter() - // .chain(ctx.parameters.clone()) - // .chain(port_parameters) - .collect(); - - let job_params: Vec<(String, String)> = vec![("job_run_id".into(), server_id.to_string())]; - - // MARK: Insert into db - sql_execute!( - [ctx] - " - INSERT INTO - db_ds.server_nomad (server_id) - VALUES - ($1) - ", - server_id, - ) - .await?; - - // MARK: Dispatch - let dispatch_res = nomad_client::apis::jobs_api::post_job_dispatch( - &NEW_NOMAD_CONFIG, - &job_id, - nomad_client::models::JobDispatchRequest { - job_id: Some(job_id.to_string()), - payload: None, - meta: Some( - parameters - .iter() - .map(|p| (p.key.clone(), p.value.clone())) - .chain(job_params.into_iter()) - .collect::>(), - ), - }, - Some(®ion.nomad_region), - None, - None, - None, - ) - .await; - let nomad_dispatched_job_id: Option = match dispatch_res { - Ok(dispatch_res) => { - // We will use the dispatched job ID to identify this allocation for the future. We can't use - // eval ID, since that changes if we mutate the allocation (i.e. try to stop it). - let nomad_dispatched_job_id = unwrap_ref!(dispatch_res.dispatched_job_id); - GlobalResult::Ok(Some(nomad_dispatched_job_id.clone())) - } - Err(err) => { - tracing::error!(?err, "failed to dispatch job"); - Ok(None) - } - }?; - - // MARK: Write to db after run - sql_execute!( - [ctx] - " - UPDATE - db_ds.server_nomad - SET - nomad_dispatched_job_id = $2 - WHERE - server_id = $1 - ", - server_id, - unwrap!(nomad_dispatched_job_id), - ) - .await?; - - // Ok(job_id); - - // msg!([ctx] job_run::msg::create(run_id) { - // run_id: Some(run_id.into()), - // region_id: Some(region_id.into()), - - // job_spec_json: job_spec_json, - // proxied_ports: proxied_ports, - // ..Default::default() - // }) - // .await?; - - // Build response ports - let network_ports = ctx - .network_ports - .iter() - .map(|(port_label, port)| { - GlobalResult::Ok(( - port_label.clone(), - backend::ds::Port { - internal_port: port.internal_port, - public_hostname: None, - public_port: None, - routing: Some(match unwrap!(port.routing.clone()) { - dynamic_servers::server_create::port::Routing::GameGuard(x) => { - backend::ds::port::Routing::GameGuard(x) - } - dynamic_servers::server_create::port::Routing::Host(x) => { - backend::ds::port::Routing::Host(x) - } - }), - }, - )) - }) - .collect::>>()?; - - Ok(dynamic_servers::server_create::Response { - server: Some(backend::ds::Server { - server_id: Some(server_id.into()), - env_id: Some(env_id.into()), - datacenter_id: Some(datacenter_id.into()), - cluster_id: Some(cluster_id.into()), - tags: ctx.tags.clone(), - resources: Some(backend::ds::ServerResources { - cpu_millicores: resources.cpu_millicores, - memory_mib: resources.memory_mib, - }), - kill_timeout_ms: ctx.kill_timeout_ms, - create_ts, - start_ts: None, - connectable_ts: None, - destroy_ts: None, - args: ctx.args.clone(), - environment: ctx.environment.clone(), - image_id: ctx.image_id, - network_mode: ctx.network_mode, - network_ports, - }), - }) -} - -/// Determines if a Nomad job is dispatched from our run. -/// -/// We use this when monitoring Nomad in order to determine which events to -/// pay attention to. -pub fn is_nomad_job_run(job_id: &str) -> bool { - job_id.starts_with("job-") && job_id.contains("/dispatch-") -} - -// Timeout from when `stop_job` is called and the kill signal is sent -pub const JOB_STOP_TIMEOUT: Duration = Duration::from_secs(30); - -pub const TASK_CLEANUP_CPU: i32 = 50; - -// Query Prometheus with: -// -// ``` -// max(nomad_client_allocs_memory_max_usage{ns="prod",exported_job=~"job-.*",task="run-cleanup"}) / 1000 / 1000 -// ``` -// -// 13.5 MB baseline, 29 MB highest peak -pub const TASK_CLEANUP_MEMORY: i32 = 32; - -pub const RUN_MAIN_TASK_NAME: &str = "main"; -pub const RUN_CLEANUP_TASK_NAME: &str = "run-cleanup"; - -// dispatch, need alloc, nomad monitor stuff, lots of stuff here, means that -// jobs can't be destroyed, maybe by job id? - -/// Generates a presigned URL for the job runner binary. -#[tracing::instrument] -async fn resolve_job_runner_binary_url( - ctx: &OperationContext, - region: &backend::region::Region, -) -> GlobalResult { - // Get provider - let provider = s3_util::Provider::default()?; - - let file_name = std::env::var("JOB_RUNNER_BINARY_KEY")?; - - // Build URL - let mm_lobby_delivery_method = unwrap!( - backend::cluster::BuildDeliveryMethod::from_i32(region.build_delivery_method), - "invalid datacenter build delivery method" - ); - match mm_lobby_delivery_method { - backend::cluster::BuildDeliveryMethod::S3Direct => { - tracing::info!("job runner using s3 direct delivery"); - - // Build client - let s3_client = s3_util::Client::from_env_opt( - "bucket-infra-artifacts", - provider, - s3_util::EndpointKind::External, - ) - .await?; - let presigned_req = s3_client - .get_object() - .bucket(s3_client.bucket()) - .key(file_name) - .presigned( - s3_util::aws_sdk_s3::presigning::config::PresigningConfig::builder() - .expires_in(std::time::Duration::from_secs(15 * 60)) - .build()?, - ) - .await?; - - let addr = presigned_req.uri().clone(); - - let addr_str = addr.to_string(); - tracing::info!(addr = %addr_str, "resolved job runner presigned request"); - - Ok(addr_str) - } - backend::cluster::BuildDeliveryMethod::TrafficServer => { - tracing::info!("job runner using traffic server delivery"); - - let region_id = unwrap_ref!(region.region_id).as_uuid(); - - // Choose a random ATS node to pull from - let (ats_vlan_ip,) = sql_fetch_one!( - [ctx, (IpAddr,)] - " - WITH sel AS ( - -- Select candidate vlan ips - SELECT - vlan_ip - FROM db_cluster.servers - WHERE - datacenter_id = $1 AND - pool_type2 = $2 AND - vlan_ip IS NOT NULL AND - install_complete_ts IS NOT NULL AND - drain_ts IS NULL AND - cloud_destroy_ts IS NULL - ) - SELECT vlan_ip - FROM sel - ORDER BY random() - LIMIT 1 - ", - // NOTE: region_id is just the old name for datacenter_id - ®ion_id, - serde_json::to_string(&cluster::types::PoolType::Ats)?, - ) - .await?; - - let addr = format!( - "http://{vlan_ip}:8080/s3-cache/{provider}/{namespace}-bucket-infra-artifacts/{file_name}", - vlan_ip = ats_vlan_ip, - provider = heck::KebabCase::to_kebab_case(provider.as_str()), - namespace = util::env::namespace(), - ); - - tracing::info!(%addr, "resolved artifact s3 url"); - - Ok(addr) - } - } -} diff --git a/svc/pkg/ds/ops/server-create/src/nomad_job.rs b/svc/pkg/ds/ops/server-create/src/nomad_job.rs deleted file mode 100644 index bf683aaa25..0000000000 --- a/svc/pkg/ds/ops/server-create/src/nomad_job.rs +++ /dev/null @@ -1,613 +0,0 @@ -use std::{collections::HashMap, convert::TryInto}; - -use chirp_worker::prelude::*; -use proto::backend::{self, matchmaker::lobby_runtime::NetworkMode as LobbyRuntimeNetworkMode}; -use regex::Regex; -use serde_json::json; - -use crate::{oci_config, util_job}; - -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -pub enum TransportProtocol { - Tcp, - Udp, -} - -impl From for TransportProtocol { - fn from(proxy_protocol: ProxyProtocol) -> Self { - match proxy_protocol { - ProxyProtocol::Http - | ProxyProtocol::Https - | ProxyProtocol::Tcp - | ProxyProtocol::TcpTls => Self::Tcp, - ProxyProtocol::Udp => Self::Udp, - } - } -} - -impl TransportProtocol { - pub fn as_cni_protocol(&self) -> &'static str { - match self { - Self::Tcp => "tcp", - Self::Udp => "udp", - } - } -} - -#[derive(Clone)] -pub enum ProxyProtocol { - Http, - Https, - Tcp, - TcpTls, - Udp, -} - -impl From for ProxyProtocol { - fn from(protocol: backend::ds::GameGuardProtocol) -> Self { - match protocol { - backend::ds::GameGuardProtocol::Http => Self::Http, - backend::ds::GameGuardProtocol::Https => Self::Https, - backend::ds::GameGuardProtocol::Tcp => Self::Tcp, - backend::ds::GameGuardProtocol::TcpTls => Self::TcpTls, - backend::ds::GameGuardProtocol::Udp => Self::Udp, - } - } -} - -/// Helper structure for parsing all of the runtime's ports before building the -/// config. -#[derive(Clone)] -pub struct DecodedPort { - pub label: String, - pub nomad_port_label: String, - pub target: u16, - pub proxy_protocol: ProxyProtocol, -} - -// pub fn gen_lobby_docker_job( -// runtime: &backend::matchmaker::lobby_runtime::Docker, -// _image_tag: &str, -// tier: &backend::region::Tier, -// lobby_config: bool, -// lobby_tags: bool, -// build_kind: backend::build::BuildKind, -// build_compression: backend::build::BuildCompression, -// ) -> GlobalResult { -// // IMPORTANT: This job spec must be deterministic. Do not pass in parameters -// // that change with every run, such as the lobby ID. Ensure the -// // `reuse_job_id` test passes when changing this function. -// use nomad_client::models::*; -// -// // runc-compatible resources -// let cpu = tier.rivet_cores_numerator as u64 * 1_000 / tier.rivet_cores_denominator as u64; // Millicore (1/1000 of a core) -// let memory = tier.memory * (1024 * 1024); // bytes -// let memory_max = tier.memory_max * (1024 * 1024); // bytes -// -// // Nomad-compatible resources -// let resources = Resources { -// // TODO: Configure this per-provider -// // Nomad configures CPU based on MHz, not millicores. We havel to calculate the CPU share -// // by knowing how many MHz are on the client. -// CPU: if tier.rivet_cores_numerator < tier.rivet_cores_denominator { -// Some((tier.cpu - util_job::TASK_CLEANUP_CPU as u64).try_into()?) -// } else { -// None -// }, -// cores: if tier.rivet_cores_numerator >= tier.rivet_cores_denominator { -// Some((tier.rivet_cores_numerator / tier.rivet_cores_denominator) as i32) -// } else { -// None -// }, -// memory_mb: Some( -// (TryInto::::try_into(memory)? / (1024 * 1024) -// - util_job::TASK_CLEANUP_MEMORY as i64) -// .try_into()?, -// ), -// // Allow oversubscribing memory by 50% of the reserved -// // memory if using less than the node's total memory -// memory_max_mb: Some( -// (TryInto::::try_into(memory_max)? / (1024 * 1024) -// - util_job::TASK_CLEANUP_MEMORY as i64) -// .try_into()?, -// ), -// disk_mb: Some(tier.disk as i32), // TODO: Is this deprecated? -// ..Resources::new() -// }; -// -// let network_mode = unwrap!(LobbyRuntimeNetworkMode::from_i32(runtime.network_mode)); -// -// // Read ports -// let decoded_ports = runtime -// .ports -// .iter() -// .map(|port| { -// let target = unwrap!(port.target_port) as u16; -// -// // TODO -// // GlobalResult::Ok(DecodedPort { -// // label: port.label.clone(), -// // nomad_port_label: util_mm::format_nomad_port_label(&port.label), -// // target, -// // proxy_protocol: unwrap!(ProxyProtocol::from_i32(port.proxy_protocol)), -// // }) -// GlobalResult::Ok(DecodedPort { -// label: port.label.clone(), -// nomad_port_label: String::new(), -// target, -// proxy_protocol: ProxyProtocol::Http, -// }) -// }) -// .collect::>>()?; -// -// // The container will set up port forwarding manually from the Nomad-defined ports on the host -// // to the CNI container -// let dynamic_ports = decoded_ports -// .iter() -// .map(|port| Port { -// label: Some(port.nomad_port_label.clone()), -// ..Port::new() -// }) -// .collect::>(); -// -// // Port mappings to pass to the container. Only used in bridge networking. -// let cni_port_mappings = decoded_ports -// .iter() -// .map(|port| { -// json!({ -// "HostPort": template_env_var_int(&nomad_host_port_env_var(&port.nomad_port_label)), -// "ContainerPort": port.target, -// // TODO -// // "Protocol": TransportProtocol::from(port.proxy_protocol).as_cni_protocol(), -// "Protocol": TransportProtocol::Udp.as_cni_protocol(), -// }) -// }) -// .collect::>(); -// -// // Also see util_mm:consts::DEFAULT_ENV_KEYS -// let mut my_env = runtime -// .env_vars -// .iter() -// .map(|v| (v.key.clone(), escape_go_template(&v.value))) -// .chain(if lobby_config { -// Some(( -// "RIVET_LOBBY_CONFIG".to_string(), -// template_env_var("NOMAD_META_LOBBY_CONFIG"), -// )) -// } else { -// None -// }) -// .chain(if lobby_tags { -// Some(( -// "RIVET_LOBBY_TAGS".to_string(), -// template_env_var("NOMAD_META_LOBBY_TAGS"), -// )) -// } else { -// None -// }) -// .chain([( -// "RIVET_API_ENDPOINT".to_string(), -// util::env::origin_api().to_string(), -// )]) -// .chain( -// // DEPRECATED: -// [ -// ("RIVET_CHAT_API_URL", "chat"), -// ("RIVET_GROUP_API_URL", "group"), -// ("RIVET_IDENTITY_API_URL", "identity"), -// ("RIVET_KV_API_URL", "kv"), -// ("RIVET_MATCHMAKER_API_URL", "matchmaker"), -// ] -// .iter() -// .filter(|_| util::env::support_deprecated_subdomains()) -// .map(|(env, service)| { -// ( -// env.to_string(), -// util::env::origin_api().replace("://", &format!("://{}.", service)), -// ) -// }), -// ) -// .chain( -// [ -// ( -// "RIVET_NAMESPACE_NAME", -// template_env_var("NOMAD_META_NAMESPACE_NAME"), -// ), -// ( -// "RIVET_NAMESPACE_ID", -// template_env_var("NOMAD_META_NAMESPACE_ID"), -// ), -// ( -// "RIVET_VERSION_NAME", -// template_env_var("NOMAD_META_VERSION_NAME"), -// ), -// ( -// "RIVET_VERSION_ID", -// template_env_var("NOMAD_META_VERSION_ID"), -// ), -// ( -// "RIVET_GAME_MODE_ID", -// template_env_var("NOMAD_META_LOBBY_GROUP_ID"), -// ), -// ( -// "RIVET_GAME_MODE_NAME", -// template_env_var("NOMAD_META_LOBBY_GROUP_NAME"), -// ), -// ("RIVET_LOBBY_ID", template_env_var("NOMAD_META_LOBBY_ID")), -// ("RIVET_TOKEN", template_env_var("NOMAD_META_LOBBY_TOKEN")), -// ("RIVET_REGION_ID", template_env_var("NOMAD_META_REGION_ID")), -// ( -// "RIVET_REGION_NAME", -// template_env_var("NOMAD_META_REGION_NAME"), -// ), -// ( -// "RIVET_MAX_PLAYERS_NORMAL", -// template_env_var("NOMAD_META_MAX_PLAYERS_NORMAL"), -// ), -// ( -// "RIVET_MAX_PLAYERS_DIRECT", -// template_env_var("NOMAD_META_MAX_PLAYERS_DIRECT"), -// ), -// ( -// "RIVET_MAX_PLAYERS_PARTY", -// template_env_var("NOMAD_META_MAX_PLAYERS_PARTY"), -// ), -// // CPU in millicores -// // -// // < 1000 is for fractional CPU -// // > 1000 is for whole CPU, will always be 1000 increments -// ("RIVET_CPU", cpu.to_string()), -// // Memory in bytes -// ("RIVET_MEMORY", memory.to_string()), -// // Memory in bytes for oversubscription -// ("RIVET_MEMORY_OVERSUBSCRIBE", memory_max.to_string()), -// // DEPRECATED: -// ( -// "RIVET_LOBBY_TOKEN", -// template_env_var("NOMAD_META_LOBBY_TOKEN"), -// ), -// ( -// "RIVET_LOBBY_GROUP_ID", -// template_env_var("NOMAD_META_LOBBY_GROUP_ID"), -// ), -// ( -// "RIVET_LOBBY_GROUP_NAME", -// template_env_var("NOMAD_META_LOBBY_GROUP_NAME"), -// ), -// ] -// .iter() -// .map(|(k, v)| (k.to_string(), v.to_string())), -// ) -// // Ports -// .chain(decoded_ports.iter().map(|port| { -// let port_value = match network_mode { -// // CNI will handle mapping the host port to the container port -// LobbyRuntimeNetworkMode::Bridge => port.target.to_string(), -// // The container needs to listen on the correct port -// LobbyRuntimeNetworkMode::Host => { -// template_env_var(&nomad_host_port_env_var(&port.nomad_port_label)) -// } -// }; -// -// // Port with the kebab case port key. Included for backward compatabiilty & for -// // less confusion. -// (format!("PORT_{}", port.label.replace('-', "_")), port_value) -// })) -// .map(|(k, v)| format!("{k}={v}")) -// .collect::>(); -// my_env.sort(); -// tracing::info!(?my_env, "my env"); -// -// let services = decoded_ports -// .iter() -// .map(|port| { -// let service_name = format!("${{NOMAD_META_LOBBY_ID}}-{}", port.label); -// GlobalResult::Ok(Some(Service { -// provider: Some("nomad".into()), -// name: Some(service_name), -// tags: Some(vec!["game".into()]), -// port_label: Some(port.nomad_port_label.clone()), -// // checks: if TransportProtocol::from(port.proxy_protocol) -// // == TransportProtocol::Tcp -// // { -// // Some(vec![ServiceCheck { -// // name: Some(format!("{}-probe", port.label)), -// // port_label: Some(port.nomad_port_label.clone()), -// // _type: Some("tcp".into()), -// // interval: Some(30_000_000_000), -// // timeout: Some(2_000_000_000), -// // ..ServiceCheck::new() -// // }]) -// // } else { -// // None -// // }, -// ..Service::new() -// })) -// }) -// .filter_map(|x| x.transpose()) -// .collect::>>()?; -// -// // Generate the command to download and decompress the file -// let mut download_cmd = r#"curl -Lf "$NOMAD_META_IMAGE_ARTIFACT_URL""#.to_string(); -// match build_compression { -// backend::build::BuildCompression::None => {} -// backend::build::BuildCompression::Lz4 => { -// download_cmd.push_str(" | lz4 -d -"); -// } -// } -// -// Ok(Job { -// _type: Some("batch".into()), -// constraints: Some(vec![Constraint { -// l_target: Some("${node.class}".into()), -// r_target: Some("job".into()), -// operand: Some("=".into()), -// }]), -// parameterized_job: Some(Box::new(ParameterizedJobConfig { -// payload: Some("forbidden".into()), -// meta_required: Some(vec![ -// "job_runner_binary_url".into(), -// "vector_socket_addr".into(), -// "image_artifact_url".into(), -// "namespace_id".into(), -// "namespace_name".into(), -// "version_id".into(), -// "version_name".into(), -// "lobby_group_id".into(), -// "lobby_group_name".into(), -// "lobby_id".into(), -// "lobby_token".into(), -// "lobby_config".into(), -// "lobby_tags".into(), -// "region_id".into(), -// "region_name".into(), -// "max_players_normal".into(), -// "max_players_direct".into(), -// "max_players_party".into(), -// "root_user_enabled".into(), -// ]), -// meta_optional: Some(vec!["rivet_test_id".into()]), -// })), -// task_groups: Some(vec![TaskGroup { -// name: Some(util_job::RUN_MAIN_TASK_NAME.into()), -// constraints: None, // TODO: Use parameter meta to specify the hardware -// affinities: None, // TODO: -// // Allows for jobs to keep running and receiving players in the -// // event of a disconnection from the Nomad server. -// max_client_disconnect: Some(5 * 60 * 1_000_000_000), -// restart_policy: Some(Box::new(RestartPolicy { -// attempts: Some(0), -// mode: Some("fail".into()), -// ..RestartPolicy::new() -// })), -// reschedule_policy: Some(Box::new(ReschedulePolicy { -// attempts: Some(0), -// unlimited: Some(false), -// ..ReschedulePolicy::new() -// })), -// networks: Some(vec![NetworkResource { -// // The setup.sh script will set up a CNI network if using bridge networking -// mode: Some("host".into()), -// dynamic_ports: Some(dynamic_ports), -// ..NetworkResource::new() -// }]), -// services: Some(services), -// // Configure ephemeral disk for logs -// ephemeral_disk: Some(Box::new(EphemeralDisk { -// size_mb: Some(tier.disk as i32), -// ..EphemeralDisk::new() -// })), -// tasks: Some(vec![ -// Task { -// name: Some("runc-setup".into()), -// lifecycle: Some(Box::new(TaskLifecycle { -// hook: Some("prestart".into()), -// sidecar: Some(false), -// })), -// driver: Some("raw_exec".into()), -// config: Some({ -// let mut x = HashMap::new(); -// x.insert("command".into(), json!("${NOMAD_TASK_DIR}/setup.sh")); -// x -// }), -// templates: Some(vec![ -// Template { -// embedded_tmpl: Some(include_str!("./scripts/setup.sh").replace( -// "__HOST_NETWORK__", -// match network_mode { -// LobbyRuntimeNetworkMode::Bridge => "false", -// LobbyRuntimeNetworkMode::Host => "true", -// }, -// )), -// dest_path: Some("${NOMAD_TASK_DIR}/setup.sh".into()), -// perms: Some("744".into()), -// ..Template::new() -// }, -// Template { -// embedded_tmpl: Some( -// include_str!("./scripts/setup_job_runner.sh").into(), -// ), -// dest_path: Some("${NOMAD_TASK_DIR}/setup_job_runner.sh".into()), -// perms: Some("744".into()), -// ..Template::new() -// }, -// Template { -// embedded_tmpl: Some( -// include_str!("./scripts/setup_oci_bundle.sh") -// .replace("__DOWNLOAD_CMD__", &download_cmd) -// .replace( -// "__BUILD_KIND__", -// match build_kind { -// backend::build::BuildKind::DockerImage => { -// "docker-image" -// } -// backend::build::BuildKind::OciBundle => "oci-bundle", -// }, -// ), -// ), -// dest_path: Some("${NOMAD_TASK_DIR}/setup_oci_bundle.sh".into()), -// perms: Some("744".into()), -// ..Template::new() -// }, -// Template { -// embedded_tmpl: Some( -// include_str!("./scripts/setup_cni_network.sh").into(), -// ), -// dest_path: Some("${NOMAD_TASK_DIR}/setup_cni_network.sh".into()), -// perms: Some("744".into()), -// ..Template::new() -// }, -// Template { -// embedded_tmpl: Some(gen_oci_bundle_config( -// cpu, memory, memory_max, my_env, -// )?), -// dest_path: Some( -// "${NOMAD_ALLOC_DIR}/oci-bundle-config.base.json".into(), -// ), -// ..Template::new() -// }, -// Template { -// embedded_tmpl: Some(inject_consul_env_template( -// &serde_json::to_string(&cni_port_mappings)?, -// )?), -// dest_path: Some("${NOMAD_ALLOC_DIR}/cni-port-mappings.json".into()), -// ..Template::new() -// }, -// ]), -// resources: Some(Box::new(Resources { -// // TODO -// // CPU: Some(util_mm::RUNC_SETUP_CPU), -// // memory_mb: Some(util_mm::RUNC_SETUP_MEMORY), -// CPU: None, -// memory_mb: None, -// ..Resources::new() -// })), -// log_config: Some(Box::new(LogConfig { -// max_files: Some(4), -// max_file_size_mb: Some(2), -// disabled: None, -// })), -// ..Task::new() -// }, -// Task { -// name: Some(util_job::RUN_MAIN_TASK_NAME.into()), -// driver: Some("raw_exec".into()), -// config: Some({ -// let mut x = HashMap::new(); -// // This is downloaded in setup_job_runner.sh -// x.insert("command".into(), json!("${NOMAD_ALLOC_DIR}/job-runner")); -// x -// }), -// resources: Some(Box::new(resources.clone())), -// // Intentionally high timeout. Killing jobs is handled manually with signals. -// kill_timeout: Some(86400 * 1_000_000_000), -// kill_signal: Some("SIGTERM".into()), -// log_config: Some(Box::new(LogConfig { -// max_files: Some(4), -// max_file_size_mb: Some(4), -// disabled: None, -// })), -// ..Task::new() -// }, -// Task { -// name: Some("runc-cleanup".into()), -// lifecycle: Some(Box::new(TaskLifecycle { -// hook: Some("poststop".into()), -// sidecar: Some(false), -// })), -// driver: Some("raw_exec".into()), -// config: Some({ -// let mut x = HashMap::new(); -// x.insert("command".into(), json!("${NOMAD_TASK_DIR}/cleanup.sh")); -// x -// }), -// templates: Some(vec![Template { -// embedded_tmpl: Some(include_str!("./scripts/cleanup.sh").into()), -// dest_path: Some("${NOMAD_TASK_DIR}/cleanup.sh".into()), -// perms: Some("744".into()), -// ..Template::new() -// }]), -// resources: Some(Box::new(Resources { -// // TODO -// // CPU: Some(util_mm::RUNC_CLEANUP_CPU), -// // memory_mb: Some(util_mm::RUNC_CLEANUP_MEMORY), -// CPU: None, -// memory_mb: None, -// ..Resources::new() -// })), -// log_config: Some(Box::new(LogConfig { -// max_files: Some(4), -// max_file_size_mb: Some(2), -// disabled: None, -// })), -// ..Task::new() -// }, -// ]), -// ..TaskGroup::new() -// }]), -// ..Job::new() -// }) -// } - -/// Build base config used to generate the OCI bundle's config.json. -pub fn gen_oci_bundle_config( - cpu: u64, - memory: u64, - memory_max: u64, - env: Vec, -) -> GlobalResult { - let config_str = serde_json::to_string(&oci_config::config(cpu, memory, memory_max, env))?; - - // Escape Go template syntax - let config_str = inject_consul_env_template(&config_str)?; - - Ok(config_str) -} - -/// Makes user-generated string safe to inject in to a Go template. -pub fn escape_go_template(input: &str) -> String { - let re = Regex::new(r"(\{\{|\}\})").unwrap(); - re.replace_all(input, r#"{{"$1"}}"#) - .to_string() - // TODO: This removes exploits to inject env vars (see below) - // SVC-3307 - .replace("###", "") -} - -/// Generates a template string that we can substitute with the real environment variable -/// -/// This must be safe to inject in to a JSON string so it can be substituted after rendering the -/// JSON object. Intended to be used from within JSON. -/// -/// See inject_consul_env_template. -pub fn template_env_var(name: &str) -> String { - format!("###ENV:{name}###") -} - -/// Like template_env_var, but removes surrounding quotes. -pub fn template_env_var_int(name: &str) -> String { - format!("###ENV_INT:{name}###") -} - -/// Substitutes env vars generated from template_env_var with Consul template syntax. -/// -/// Intended to be used from within JSON. -pub fn inject_consul_env_template(input: &str) -> GlobalResult { - // Regular strings - let re = Regex::new(r"###ENV:(\w+)###")?; - let output = re - .replace_all(input, r#"{{ env "$1" | regexReplaceAll "\"" "\\\"" }}"#) - .to_string(); - - // Integers - let re = Regex::new(r####""###ENV_INT:(\w+)###""####)?; - let output = re - .replace_all(&output, r#"{{ env "$1" | regexReplaceAll "\"" "\\\"" }}"#) - .to_string(); - - Ok(output) -} - -pub fn nomad_host_port_env_var(port_label: &str) -> String { - format!("NOMAD_HOST_PORT_{}", port_label.replace('-', "_")) -} diff --git a/svc/pkg/ds/ops/server-create/src/util_job.rs b/svc/pkg/ds/ops/server-create/src/util_job.rs deleted file mode 100644 index b2a9b44bfd..0000000000 --- a/svc/pkg/ds/ops/server-create/src/util_job.rs +++ /dev/null @@ -1,26 +0,0 @@ -use std::time::Duration; - -/// Determines if a Nomad job is dispatched from our run. -/// -/// We use this when monitoring Nomad in order to determine which events to -/// pay attention to. -pub fn is_nomad_job_run(job_id: &str) -> bool { - job_id.starts_with("job-") && job_id.contains("/dispatch-") -} - -// Timeout from when `stop_job` is called and the kill signal is sent -pub const JOB_STOP_TIMEOUT: Duration = Duration::from_secs(30); - -pub const TASK_CLEANUP_CPU: i32 = 50; - -// Query Prometheus with: -// -// ``` -// max(nomad_client_allocs_memory_max_usage{ns="prod",exported_job=~"job-.*",task="run-cleanup"}) / 1000 / 1000 -// ``` -// -// 13.5 MB baseline, 29 MB highest peak -pub const TASK_CLEANUP_MEMORY: i32 = 32; - -pub const RUN_MAIN_TASK_NAME: &str = "main"; -pub const RUN_CLEANUP_TASK_NAME: &str = "run-cleanup"; diff --git a/svc/pkg/ds/ops/server-delete/Cargo.toml b/svc/pkg/ds/ops/server-delete/Cargo.toml deleted file mode 100644 index fcac44d68d..0000000000 --- a/svc/pkg/ds/ops/server-delete/Cargo.toml +++ /dev/null @@ -1,29 +0,0 @@ -[package] -name = "ds-server-delete" -version = "0.0.1" -edition = "2021" -authors = ["Rivet Gaming, LLC "] -license = "Apache-2.0" - -[dependencies] -chirp-worker = { path = "../../../../../lib/chirp/worker" } -chirp-client = { path = "../../../../../lib/chirp/client" } -rivet-operation = { path = "../../../../../lib/operation/core" } -nomad-util = { path = "../../../../../lib/nomad-util" } -util-job = { package = "rivet-util-job", path = "../../../job/util" } -reqwest = "0.11" - -region-get = { path = "../../../region/ops/get" } - -[dependencies.sqlx] -git = "https://github.com/rivet-gg/sqlx" -rev = "08d6e61aa0572e7ec557abbedb72cebb96e1ac5b" -default-features = false - -[dependencies.nomad_client] -package = "nomad_client" -git = "https://github.com/rivet-gg/nomad-client" -rev = "abb66bf0c30c7ff5b0c695dae952481c33e538b5" # pragma: allowlist secret - -[dev-dependencies] -chirp-worker = { path = "../../../../../lib/chirp/worker" } diff --git a/svc/pkg/ds/ops/server-delete/src/lib.rs b/svc/pkg/ds/ops/server-delete/src/lib.rs deleted file mode 100644 index 1306d13c43..0000000000 --- a/svc/pkg/ds/ops/server-delete/src/lib.rs +++ /dev/null @@ -1,351 +0,0 @@ -use chirp_worker::prelude::*; -use futures_util::FutureExt; -use proto::backend::pkg::*; -use tokio::task; - -#[derive(Debug, sqlx::FromRow)] -struct UpdatedServer { - ds_server_id: Uuid, - ds_datacenter_id: Uuid, - alloc_id: String, - dispatched_job_id: String, -} - -#[operation(name = "ds-server-delete")] -pub async fn handle( - ctx: OperationContext, -) -> GlobalResult { - let server_id = unwrap_ref!(ctx.server_id).as_uuid(); - - let dynamic_server = rivet_pools::utils::crdb::tx(&ctx.crdb().await?, |tx| { - let ctx = ctx.clone(); - - async move { - let dynamic_server = sql_fetch_one!( - [ctx, UpdatedServer, @tx tx] - " - UPDATE db_ds.servers - SET delete_ts = $2 - WHERE - server_id = $1 - AND delete_ts IS NULL - RETURNING - server_id, - datacenter_id - server_nomad.nomad_dispatched_job_id, - server_nomad.nomad_alloc_id, - FROM - db_ds.servers - JOIN - db_ds.server_nomad - ON - db_ds.servers.server_id = db_ds.server_nomad.server_id - ", - server_id, - ctx.ts(), - ) - .await?; - - Ok(dynamic_server) - } - .boxed() - }) - .await?; - - // // NOTE: Idempotent - - // let run_id = unwrap_ref!(ctx.run_id).as_uuid(); - - // // Cleanup the job ASAP. - // // - // // This will also be called in `job-run-cleanup`, but this is idempotent. - // // msg!([ctx] job_run::msg::cleanup(run_id) { - // // run_id: Some(run_id.into()), - // // ..Default::default() - // // }) - // // .await?; - - // let run_id = unwrap_ref!(ctx.run_id).as_uuid(); - - // #[derive(Debug, sqlx::FromRow)] - // struct RunRow { - // region_id: Uuid, - // create_ts: i64, - // cleanup_ts: Option, - // } - - // let Some((run_row, run_meta_nomad_row)) = - // rivet_pools::utils::crdb::tx(&ctx.crdb().await?, |tx| { - // let run_row = sql_fetch_optional!( - // [ctx, RunRow, @tx tx] - // " - // SELECT region_id, create_ts, cleanup_ts - // FROM db_job_state.runs - // WHERE run_id = $1 - // FOR UPDATE - // ", - // run_id, - // ) - // .await?; - // tracing::info!(?run_row, "run row"); - - // let Some(run_row) = run_row else { - // return Ok(None); - // }; - - // let run_meta_nomad_row = sql_fetch_optional!( - // [ctx, RunMetaNomadRow, @tx tx] - // " - // SELECT dispatched_job_id, node_id - // FROM db_job_state.run_meta_nomad - // WHERE run_id = $1 - // FOR UPDATE - // ", - // run_id, - // ) - // .await?; - // tracing::info!(?run_meta_nomad_row, "run meta row"); - - // // Check if job has been dispatched already - // if let Some(run_meta_nomad) = &run_meta_nomad_row { - // if run_meta_nomad.dispatched_job_id.is_none() - // && now - run_row.create_ts < util::duration::seconds(75) - // { - // // If the job is new, then there may be a race condition with - // // submitting the job to Nomad and writing the dispatched job ID to - // // the database. - // // - // // In this case, we'll fail and retry this later. - // // - // // There is a situation where the Nomad API returns an error and the - // // job ID is never written to the database. - // retry_bail!("potential race condition with starting nomad job") - // } - // } - - // tracing::info!("deleting run"); - // if run_row.cleanup_ts.is_none() { - // sql_execute!( - // [ctx, @tx tx] - // "UPDATE db_job_state.runs SET cleanup_ts = $2 WHERE run_id = $1", - // run_id, - // now, - // ) - // .await?; - // } - // }) - // .await? - // else { - // if ctx.req_dt() > util::duration::minutes(5) { - // tracing::error!("discarding stale message"); - // return Ok(()); - // } else { - // retry_bail!("run not found, may be race condition with insertion"); - // } - // }; - - // tracing::info!("removing from cache"); - // if matches!( - // run_meta_nomad_row, - // Some(RunMetaNomadRow { - // node_id: Some(_), - // .. - // }) - // ) { - // ctx.redis_job() - // .await? - // .hdel( - // util_job::key::proxied_ports(run_row.region_id), - // run_id.to_string(), - // ) - // .await?; - // } - - // let Some((run_row, run_meta_nomad_row)) = - // rivet_pools::utils::crdb::tx(&ctx.crdb().await?, |tx| { - // let run_row = sql_fetch_optional!( - // [ctx, RunRow, @tx tx] - // " - // SELECT region_id, create_ts, stop_ts - // FROM db_job_state.runs - // WHERE run_id = $1 - // FOR UPDATE - // ", - // run_id, - // ) - // .await?; - // tracing::info!(?run_row, "fetched run"); - - // let Some(run_row) = run_row else { - // return Ok(None); - // }; - - // let run_meta_nomad_row = sql_fetch_optional!( - // [ctx, RunMetaNomadRow, @tx tx] - // " - // SELECT alloc_id, dispatched_job_id - // FROM db_job_state.run_meta_nomad - // WHERE run_id = $1 - // FOR UPDATE - // ", - // run_id, - // ) - // .await?; - // tracing::info!(?run_meta_nomad_row, "fetched run meta nomad"); - - // // Check if job has been dispatched already - // if let Some(run_meta_nomad) = &run_meta_nomad_row { - // if run_meta_nomad.dispatched_job_id.is_none() - // && now - run_row.create_ts < util::duration::seconds(75) - // { - // // If the job is new, then there may be a race condition with - // // submitting the job to Nomad and writing the dispatched job ID to - // // the database. - // // - // // In this case, we'll fail and retry this later. - // // - // // There is a situation where the Nomad API returns an error and the - // // job ID is never written to the database. - // retry_bail!("potential race condition with starting nomad job") - // } - // } - - // // We can't assume that started has been called here, so we can't fetch the alloc ID. - - // if run_row.stop_ts.is_none() { - // sql_execute!( - // [ctx, @tx tx] - // "UPDATE db_job_state.runs SET stop_ts = $2 WHERE run_id = $1", - // run_id, - // now, - // ) - // .await?; - // } - // }) - // .await? - // else { - // if ctx.req_dt() > util::duration::minutes(5) { - // tracing::error!("discarding stale message"); - // return Ok(()); - // } else { - // retry_bail!("run not found, may be race condition with insertion"); - // } - // }; - - // // HACK: Remove from proxied ports early. This also gets removed in job-run-cleanup, but that - // // may not run correclty if the dispatched job id is not set correctly. - // ctx.redis_job() - // .await? - // .hdel( - // util_job::key::proxied_ports(run_row.region_id), - // run_id.to_string(), - // ) - // .await?; - - // Get the region - let region_res = op!([ctx] region_get { - region_ids: vec![dynamic_server.ds_datacenter_id.into()], - }) - .await?; - let region = unwrap!(region_res.regions.first()); - - // TODO: Handle 404 safely. See RIV-179 - // Stop the job. - // - // Setting purge to false will change the behavior of the create poll - // functionality if the job dies immediately. You can set it to false to - // debug lobbies, but it's preferred to extract metadata from the - // job-run-stop lifecycle event. - - match nomad_client::apis::jobs_api::delete_job( - &nomad_util::new_config_from_env().unwrap(), - &dynamic_server.dispatched_job_id, - Some(®ion.nomad_region), - None, - None, - None, - Some(false), // TODO: Maybe change back to true for performance? - None, - ) - .await - { - Ok(_) => { - tracing::info!("job stopped"); - - task::spawn(async move { - // tokio::time::sleep(util_job::JOB_STOP_TIMEOUT).await; - - // tracing::info!(?dynamic_server.alloc_id, "manually killing allocation"); - - // if let Err(err) = { - // let local_var_client = &configuration.client; - - // let local_var_uri_str = format!( - // "{}/client/allocation/{alloc_id}/signal", - // configuration.base_path, - // alloc_id = nomad_client::apis::urlencode(dynamic_server.alloc_id), - // ); - // let mut local_var_req_builder = - // local_var_client.post(local_var_uri_str.as_str()); - - // if let Some(ref local_var_str) = namespace { - // local_var_req_builder = local_var_req_builder - // .query(&[("namespace", &local_var_str.to_string())]); - // } - // if let Some(ref local_var_str) = region { - // local_var_req_builder = local_var_req_builder - // .query(&[("region", &local_var_str.to_string())]); - // } - // if let Some(ref local_var_str) = index { - // local_var_req_builder = local_var_req_builder - // .query(&[("index", &local_var_str.to_string())]); - // } - // if let Some(ref local_var_str) = wait { - // local_var_req_builder = local_var_req_builder - // .query(&[("wait", &local_var_str.to_string())]); - // } - // if let Some(ref local_var_user_agent) = configuration.user_agent { - // local_var_req_builder = local_var_req_builder - // .header(reqwest::header::USER_AGENT, local_var_user_agent.clone()); - // } - // local_var_req_builder = local_var_req_builder.json(&alloc_signal_request); - - // let local_var_req = local_var_req_builder.build()?; - // let local_var_resp = local_var_client.execute(local_var_req).await?; - - // let local_var_status = local_var_resp.status(); - // let local_var_content = local_var_resp.text().await?; - - // if !local_var_status.is_client_error() - // && !local_var_status.is_server_error() - // { - // Ok(()) - // } else { - // let local_var_entity: Option< - // nomad_client::apis::allocations_api::SignalAllocationError, - // > = serde_json::from_str(&local_var_content).ok(); - // let local_var_error = nomad_client::apis::ResponseContent { - // status: local_var_status, - // content: local_var_content, - // entity: local_var_entity, - // }; - // Err(nomad_client::apis::Error::ResponseError(local_var_error)) - // } - // } { - // tracing::warn!( - // ?err, - // ?alloc_id, - // "error while trying to manually kill allocation" - // ); - // } - }); - } - Err(err) => { - tracing::warn!(?err, "error thrown while stopping job, probably a 404, will continue as if stopped normally"); - } - } - - Ok(dynamic_servers::server_delete::Response { - server_id: Some(server_id.into()), - }) -} diff --git a/svc/pkg/ds/ops/server-delete/tests/integration.rs b/svc/pkg/ds/ops/server-delete/tests/integration.rs deleted file mode 100644 index d7d641e21e..0000000000 --- a/svc/pkg/ds/ops/server-delete/tests/integration.rs +++ /dev/null @@ -1,6 +0,0 @@ -use chirp_worker::prelude::*; - -#[worker_test] -async fn basic(ctx: TestCtx) { - // TODO: -} diff --git a/svc/pkg/ds/ops/server-get/Cargo.toml b/svc/pkg/ds/ops/server-get/Cargo.toml deleted file mode 100644 index 821a2563ff..0000000000 --- a/svc/pkg/ds/ops/server-get/Cargo.toml +++ /dev/null @@ -1,27 +0,0 @@ -[package] -name = "ds-server-get" -version = "0.0.1" -edition = "2021" -authors = ["Rivet Gaming, LLC "] -license = "Apache-2.0" - -[dependencies] -chirp-client = { path = "../../../../../lib/chirp/client" } -chirp-workflow = { path = "../../../../../lib/chirp-workflow/core" } -rivet-operation = { path = "../../../../../lib/operation/core" } -util-ds = { package = "rivet-util-ds", path = "../../util" } - -[dependencies.sqlx] -git = "https://github.com/rivet-gg/sqlx" -rev = "08d6e61aa0572e7ec557abbedb72cebb96e1ac5b" -default-features = false - -[dev-dependencies] -chirp-worker = { path = "../../../../../lib/chirp/worker" } - -ds-server-create = { path = "../server-create" } -cluster = { path = "../../../cluster" } -faker-build = { path = "../../../faker/ops/build" } -faker-game = { path = "../../../faker/ops/game" } -faker-region = { path = "../../../faker/ops/region" } - diff --git a/svc/pkg/ds/ops/server-get/Service.toml b/svc/pkg/ds/ops/server-get/Service.toml deleted file mode 100644 index 4ca136b6ad..0000000000 --- a/svc/pkg/ds/ops/server-get/Service.toml +++ /dev/null @@ -1,7 +0,0 @@ -[service] -name = "ds-server-get" - -[runtime] -kind = "rust" - -[operation] diff --git a/svc/pkg/ds/ops/server-get/tests/integration.rs b/svc/pkg/ds/ops/server-get/tests/integration.rs deleted file mode 100644 index 7efddf2f1d..0000000000 --- a/svc/pkg/ds/ops/server-get/tests/integration.rs +++ /dev/null @@ -1,83 +0,0 @@ -use std::collections::HashMap; - -use chirp_workflow::prelude::*; -use rivet_operation::prelude::proto::{ - self, - backend::{self, pkg::dynamic_servers}, -}; - -#[workflow_test] -async fn server_get(ctx: TestCtx) { - let game_res = op!([ctx] faker_game { - ..Default::default() - }) - .await - .unwrap(); - let env_id = game_res.prod_env_id.unwrap(); - - // Pick an existing cluster - let cluster_id = ctx - .op(cluster::ops::list::Input {}) - .await - .unwrap() - .cluster_ids - .first() - .unwrap() - .to_owned(); - - let build_res: backend::pkg::faker::build::Response = op!([ctx] faker_build { - env_id: Some(env_id), - image: backend::faker::Image::DsEcho as i32, - }) - .await - .unwrap(); - - let faker_region = op!([ctx] faker_region {}).await.unwrap(); - - let env = vec![ - ("some_envkey_test".to_string(), "2134523".to_string()), - ( - "some_other_envkey_test".to_string(), - "4325234356".to_string(), - ), - ] - .into_iter() - .collect(); - - let ports = vec![( - "testing2".to_string(), - dynamic_servers::server_create::Port { - internal_port: Some(28234), - routing: Some(dynamic_servers::server_create::port::Routing::GameGuard( - backend::ds::GameGuardRouting { protocol: 0 }, - )), - }, - )] - // Collect into hashmap - .into_iter() - .collect(); - - let server = op!([ctx] ds_server_create { - env_id: Some(env_id), - cluster_id: Some(cluster_id.into()), - datacenter_id: faker_region.region_id, - resources: Some(proto::backend::ds::ServerResources { cpu_millicores: 100, memory_mib: 200 }), - kill_timeout_ms: 0, - tags: HashMap::new(), - args: Vec::new(), - environment: env, - image_id: Some(build_res.build_id.unwrap()), - network_mode: 0, - network_ports: ports, - }) - .await - .unwrap() - .server - .unwrap(); - - let server_res = op!([ctx] ds_server_get { - server_ids: vec![server.server_id.unwrap()], - }) - .await - .unwrap(); -} diff --git a/svc/pkg/ds/ops/server-list-for-env/Cargo.toml b/svc/pkg/ds/ops/server-list-for-env/Cargo.toml deleted file mode 100644 index 9096df4030..0000000000 --- a/svc/pkg/ds/ops/server-list-for-env/Cargo.toml +++ /dev/null @@ -1,19 +0,0 @@ -[package] -name = "ds-server-list-for-env" -version = "0.0.1" -edition = "2021" -authors = ["Rivet Gaming, LLC "] -license = "Apache-2.0" - -[dependencies] -chirp-client = { path = "../../../../../lib/chirp/client" } -rivet-operation = { path = "../../../../../lib/operation/core" } -util-ds = { package = "rivet-util-ds", path = "../../util" } - -[dependencies.sqlx] -git = "https://github.com/rivet-gg/sqlx" -rev = "08d6e61aa0572e7ec557abbedb72cebb96e1ac5b" -default-features = false - -[dev-dependencies] -chirp-worker = { path = "../../../../../lib/chirp/worker" } diff --git a/svc/pkg/ds/ops/server-list-for-env/Service.toml b/svc/pkg/ds/ops/server-list-for-env/Service.toml deleted file mode 100644 index 52677f16cf..0000000000 --- a/svc/pkg/ds/ops/server-list-for-env/Service.toml +++ /dev/null @@ -1,7 +0,0 @@ -[service] -name = "ds-server-list-for-env" - -[runtime] -kind = "rust" - -[operation] diff --git a/svc/pkg/ds/ops/server-list-for-env/src/lib.rs b/svc/pkg/ds/ops/server-list-for-env/src/lib.rs deleted file mode 100644 index 396e0e84f9..0000000000 --- a/svc/pkg/ds/ops/server-list-for-env/src/lib.rs +++ /dev/null @@ -1,43 +0,0 @@ -use proto::backend::pkg::*; -use rivet_operation::prelude::*; - -#[operation(name = "server-list-for-env")] -async fn handle( - ctx: OperationContext, -) -> GlobalResult { - let env_id = unwrap_ref!(ctx.env_id).as_uuid(); - let cursor = ctx.cursor.map(|x| x.as_uuid()); - - let server_ids = sql_fetch_all!( - [ctx, (Uuid,)] - " - WITH after_server AS ( - SELECT create_ts, server_id - FROM db_ds.servers - WHERE server_id = $4 - ) - SELECT server_id - FROM db_ds.servers - WHERE - env_id = $1 - AND tags @> $2 - AND ($3 OR destroy_ts IS NOT NULL) - AND ( - $4 IS NULL - OR (create_ts, server_id) < (SELECT create_ts, server_id FROM after_server) - ) - ORDER BY create_ts DESC, server_id DESC - LIMIT 64 - ", - env_id, - serde_json::to_value(&ctx.tags)?, - ctx.include_destroyed, - cursor, - ) - .await? - .into_iter() - .map(|(id,)| common::Uuid::from(id)) - .collect::>(); - - Ok(dynamic_servers::server_list_for_env::Response { server_ids }) -} diff --git a/svc/pkg/ds/ops/server-list-for-env/tests/integration.rs b/svc/pkg/ds/ops/server-list-for-env/tests/integration.rs deleted file mode 100644 index 6fc7435bec..0000000000 --- a/svc/pkg/ds/ops/server-list-for-env/tests/integration.rs +++ /dev/null @@ -1,4 +0,0 @@ -use std::collections::HashMap; - -// #[worker_test] -// async fn server_get(ctx: TestCtx) {} diff --git a/svc/pkg/ds/proto/msg/ds-nomad-monitor-alloc-plan.proto b/svc/pkg/ds/proto/msg/ds-nomad-monitor-alloc-plan.proto deleted file mode 100644 index 5823ca713d..0000000000 --- a/svc/pkg/ds/proto/msg/ds-nomad-monitor-alloc-plan.proto +++ /dev/null @@ -1,13 +0,0 @@ -syntax = "proto3"; - -package rivet.backend.pkg.ds.msg.ds_nomad_monitor_alloc_plan; - -import "proto/common.proto"; - -/// name = "msg-ds-ds-nomad-monitor-alloc-plan" -/// parameters = [ -/// { name = "user_id" }, -/// ] -message Message { - -} diff --git a/svc/pkg/ds/proto/server-create.proto b/svc/pkg/ds/proto/server-create.proto deleted file mode 100644 index bd91a9d7fa..0000000000 --- a/svc/pkg/ds/proto/server-create.proto +++ /dev/null @@ -1,37 +0,0 @@ -syntax = "proto3"; - -package rivet.backend.pkg.dynamic_servers.server_create; - -import "proto/common.proto"; -import "proto/backend/ds.proto"; - -message Request { - reserved 8; - - rivet.common.Uuid env_id = 1; - rivet.common.Uuid datacenter_id = 2; - rivet.common.Uuid cluster_id = 3; - map tags = 5; - rivet.backend.ds.ServerResources resources = 6; - int64 kill_timeout_ms = 7; - rivet.common.Uuid image_id = 9; - repeated string args = 10; - rivet.backend.ds.NetworkMode network_mode = 11; - map environment = 12; - map network_ports = 13; -} - -message Response { - rivet.backend.ds.Server server = 1; -} - -message Port { - // Null when using host networking since one is automatially assigned - optional int32 internal_port = 1; - - oneof routing { - rivet.backend.ds.GameGuardRouting game_guard = 101; - rivet.backend.ds.HostRouting host = 102; - } -} - diff --git a/svc/pkg/ds/proto/server-delete.proto b/svc/pkg/ds/proto/server-delete.proto deleted file mode 100644 index 9212894cb1..0000000000 --- a/svc/pkg/ds/proto/server-delete.proto +++ /dev/null @@ -1,14 +0,0 @@ -syntax = "proto3"; - -package rivet.backend.pkg.dynamic_servers.server_delete; - -import "proto/common.proto"; - -message Request { - rivet.common.Uuid server_id = 1; - int64 override_kill_timeout_ms = 2; -} - -message Response { - rivet.common.Uuid server_id = 1; -} diff --git a/svc/pkg/ds/proto/server-get.proto b/svc/pkg/ds/proto/server-get.proto deleted file mode 100644 index f40b860a89..0000000000 --- a/svc/pkg/ds/proto/server-get.proto +++ /dev/null @@ -1,14 +0,0 @@ -syntax = "proto3"; - -package rivet.backend.pkg.dynamic_servers.server_get; - -import "proto/common.proto"; -import "proto/backend/ds.proto"; - -message Request { - repeated rivet.common.Uuid server_ids = 1; -} - -message Response { - repeated rivet.backend.ds.Server servers = 1; -} diff --git a/svc/pkg/ds/proto/server-list-for-env.proto b/svc/pkg/ds/proto/server-list-for-env.proto deleted file mode 100644 index c134d9d08a..0000000000 --- a/svc/pkg/ds/proto/server-list-for-env.proto +++ /dev/null @@ -1,17 +0,0 @@ -syntax = "proto3"; - -package rivet.backend.pkg.dynamic_servers.server_list_for_env; - -import "proto/common.proto"; -import "proto/backend/ds.proto"; - -message Request { - rivet.common.Uuid env_id = 1; - map tags = 2; // JSON - bool include_destroyed = 3; - optional rivet.common.Uuid cursor = 4; -} - -message Response { - repeated rivet.common.Uuid server_ids = 1; -} diff --git a/svc/pkg/ds/src/lib.rs b/svc/pkg/ds/src/lib.rs new file mode 100644 index 0000000000..a11c5bb0f5 --- /dev/null +++ b/svc/pkg/ds/src/lib.rs @@ -0,0 +1,17 @@ +use chirp_workflow::prelude::*; + +pub mod ops; +pub mod types; +pub mod util; +pub mod workers; +pub mod workflows; + +pub fn registry() -> WorkflowResult { + use workflows::*; + + let mut registry = Registry::new(); + registry.register_workflow::()?; + registry.register_workflow::()?; + + Ok(registry) +} diff --git a/svc/pkg/ds/src/ops/mod.rs b/svc/pkg/ds/src/ops/mod.rs new file mode 100644 index 0000000000..74f47ad347 --- /dev/null +++ b/svc/pkg/ds/src/ops/mod.rs @@ -0,0 +1 @@ +pub mod server; diff --git a/svc/pkg/ds/ops/server-get/src/lib.rs b/svc/pkg/ds/src/ops/server/get.rs similarity index 62% rename from svc/pkg/ds/ops/server-get/src/lib.rs rename to svc/pkg/ds/src/ops/server/get.rs index 2b4f4dfc14..3ea576446f 100644 --- a/svc/pkg/ds/ops/server-get/src/lib.rs +++ b/svc/pkg/ds/src/ops/server/get.rs @@ -1,15 +1,18 @@ -use futures_util::FutureExt; -use proto::backend::{self, pkg::*}; -use rivet_operation::prelude::*; -use std::collections::HashMap; +use std::{collections::HashMap, convert::TryInto}; + +use chirp_workflow::prelude::*; + +use crate::types::{ + GameGuardProtocol, HostProtocol, NetworkMode, Port, Routing, Server, ServerResources, +}; #[derive(sqlx::FromRow)] -struct Server { +struct ServerRow { server_id: Uuid, env_id: Uuid, datacenter_id: Uuid, cluster_id: Uuid, - tags: serde_json::Value, + tags: sqlx::types::Json>, resources_cpu_millicores: i64, resources_memory_mib: i64, kill_timeout_ms: i64, @@ -20,7 +23,7 @@ struct Server { image_id: Uuid, args: Vec, network_mode: i64, - environment: serde_json::Value, + environment: sqlx::types::Json>, } #[derive(sqlx::FromRow)] @@ -43,12 +46,12 @@ struct DockerPortHost { #[derive(sqlx::FromRow)] struct ServerNomad { server_id: Uuid, - nomad_dispatched_job_id: Option, - nomad_alloc_id: Option, - nomad_node_id: Option, - nomad_node_name: Option, - nomad_node_public_ipv4: Option, - nomad_node_vlan_ipv4: Option, + // nomad_dispatched_job_id: Option, + // nomad_alloc_id: Option, + // nomad_node_id: Option, + // nomad_node_name: Option, + // nomad_node_public_ipv4: Option, + // nomad_node_vlan_ipv4: Option, nomad_alloc_plan_ts: Option, } @@ -60,19 +63,21 @@ struct ServerPort { nomad_source: i64, } -#[operation(name = "ds-server-get")] -pub async fn handle( - ctx: OperationContext, -) -> GlobalResult { - let server_ids = ctx - .server_ids - .iter() - .map(common::Uuid::as_uuid) - .collect::>(); +#[derive(Debug)] +pub struct Input { + pub server_ids: Vec, +} + +#[derive(Debug)] +pub struct Output { + pub servers: Vec, +} +#[operation] +pub async fn get(ctx: &OperationCtx, input: &Input) -> GlobalResult { let (server_rows, port_gg_rows, port_host_rows, server_nomad_rows, internal_port_rows) = tokio::try_join!( sql_fetch_all!( - [ctx, Server] + [ctx, ServerRow] " SELECT server_id, @@ -91,12 +96,10 @@ pub async fn handle( args, network_mode, environment - FROM - db_ds.servers - WHERE - server_id = ANY($1) + FROM db_ds.servers + WHERE server_id = ANY($1) ", - &server_ids, + &input.server_ids, ), sql_fetch_all!( [ctx, DockerPortProtocolGameGuard] @@ -107,12 +110,10 @@ pub async fn handle( port_number, gg_port, protocol - FROM - db_ds.docker_ports_protocol_game_guard - WHERE - server_id = ANY($1) + FROM db_ds.docker_ports_protocol_game_guard + WHERE server_id = ANY($1) ", - &server_ids, + &input.server_ids, ), sql_fetch_all!( [ctx, DockerPortHost] @@ -122,12 +123,10 @@ pub async fn handle( port_name, port_number, protocol - FROM - db_ds.docker_ports_host - WHERE - server_id = ANY($1) + FROM db_ds.docker_ports_host + WHERE server_id = ANY($1) ", - &server_ids, + &input.server_ids, ), sql_fetch_all!( [ctx, ServerNomad] @@ -141,12 +140,10 @@ pub async fn handle( nomad_node_public_ipv4, nomad_node_vlan_ipv4, nomad_alloc_plan_ts - FROM - db_ds.server_nomad - WHERE - server_id = ANY($1) + FROM db_ds.server_nomad + WHERE server_id = ANY($1) ", - &server_ids, + &input.server_ids, ), sql_fetch_all!( [ctx, ServerPort] @@ -156,23 +153,16 @@ pub async fn handle( nomad_label, nomad_ip, nomad_source - FROM - db_ds.internal_ports - WHERE - server_id = ANY($1) + FROM db_ds.internal_ports + WHERE server_id = ANY($1) ", - &server_ids, + &input.server_ids, ), )?; - let servers_proto = server_rows + let servers = server_rows .into_iter() .map(|server| { - let tags: std::collections::HashMap = - serde_json::from_value(server.tags)?; - let environment: std::collections::HashMap = - serde_json::from_value(server.environment)?; - let server_nomad = unwrap!(server_nomad_rows .iter() .find(|x| x.server_id == server.server_id)); @@ -184,7 +174,7 @@ pub async fn handle( .iter() .filter(|p| p.server_id == server.server_id) .map(|gg_port| { - GlobalResult::Ok(( + Ok(( gg_port.port_name.clone(), create_port_gg(is_connectable, gg_port, server.datacenter_id)?, )) @@ -197,9 +187,12 @@ pub async fn handle( let internal_port = internal_port_rows.iter().find(|x| { x.server_id == server.server_id && x.nomad_label - == util_ds::format_nomad_port_label(&host_port.port_name) + == crate::util::format_nomad_port_label( + &host_port.port_name, + ) }); - GlobalResult::Ok(( + + Ok(( host_port.port_name.clone(), create_port_host(is_connectable, host_port, internal_port)?, )) @@ -207,46 +200,42 @@ pub async fn handle( ) .collect::>>()?; - let server_proto = backend::ds::Server { - server_id: Some(server.server_id.into()), - env_id: Some(server.env_id.into()), - datacenter_id: Some(server.datacenter_id.into()), - cluster_id: Some(server.cluster_id.into()), - tags, - resources: Some(backend::ds::ServerResources { + Ok(Server { + server_id: server.server_id, + env_id: server.env_id, + datacenter_id: server.datacenter_id, + cluster_id: server.cluster_id, + tags: server.tags.0, + resources: ServerResources { cpu_millicores: server.resources_cpu_millicores.try_into()?, memory_mib: server.resources_memory_mib.try_into()?, - }), + }, kill_timeout_ms: server.kill_timeout_ms, args: server.args, - environment, - image_id: Some(server.image_id.into()), - network_mode: server.network_mode.try_into()?, + environment: server.environment.0, + image_id: server.image_id, + network_mode: unwrap!(NetworkMode::from_repr(server.network_mode.try_into()?)), network_ports: ports, create_ts: server.create_ts, start_ts: server.start_ts, connectable_ts: server.connectable_ts, destroy_ts: server.destroy_ts, - }; - - Ok(server_proto) + }) }) .collect::>>()?; - Ok(dynamic_servers::server_get::Response { - servers: servers_proto, - }) + Ok(Output { servers }) } fn create_port_gg( is_connectable: bool, gg_port: &DockerPortProtocolGameGuard, datacenter_id: Uuid, -) -> GlobalResult { - Ok(backend::ds::Port { +) -> GlobalResult { + Ok(Port { internal_port: Some(gg_port.port_number.try_into()?), public_hostname: if is_connectable { - Some(util_ds::build_ds_hostname( + Some(crate::util::build_ds_hostname( gg_port.server_id, &gg_port.port_name, datacenter_id, @@ -259,11 +248,9 @@ fn create_port_gg( } else { None }, - routing: Some(backend::ds::port::Routing::GameGuard( - backend::ds::GameGuardRouting { - protocol: gg_port.protocol.try_into()?, - }, - )), + routing: Routing::GameGuard { + protocol: unwrap!(GameGuardProtocol::from_repr(gg_port.protocol.try_into()?)), + }, }) } @@ -271,8 +258,8 @@ fn create_port_host( is_connectable: bool, host_port: &DockerPortHost, internal_port: Option<&ServerPort>, -) -> GlobalResult { - Ok(backend::ds::Port { +) -> GlobalResult { + Ok(Port { internal_port: Some(host_port.port_number.try_into()?), public_hostname: if is_connectable { internal_port.map(|x| x.nomad_ip.clone()) @@ -286,8 +273,8 @@ fn create_port_host( } else { None }, - routing: Some(backend::ds::port::Routing::Host(backend::ds::HostRouting { - protocol: host_port.protocol.try_into()?, - })), + routing: Routing::Host { + protocol: unwrap!(HostProtocol::from_repr(host_port.protocol.try_into()?)), + }, }) } diff --git a/svc/pkg/ds/src/ops/server/list_for_env.rs b/svc/pkg/ds/src/ops/server/list_for_env.rs new file mode 100644 index 0000000000..5065dc9299 --- /dev/null +++ b/svc/pkg/ds/src/ops/server/list_for_env.rs @@ -0,0 +1,53 @@ +use std::collections::HashMap; + +use chirp_workflow::prelude::*; + +#[derive(Debug, Default)] +pub struct Input { + pub env_id: Uuid, + pub tags: HashMap, + pub include_destroyed: bool, + pub cursor: Option, +} + +#[derive(Debug)] +pub struct Output { + pub server_ids: Vec, +} + +#[operation] +pub async fn list_for_env(ctx: &OperationCtx, input: &Input) -> GlobalResult { + let server_ids = sql_fetch_all!( + [ctx, (Uuid,)] + " + WITH + after_server AS ( + SELECT create_ts, server_id + FROM db_ds.servers + WHERE server_id = $4 + ) + SELECT server_id + FROM db_ds.servers + WHERE + env_id = $1 AND + tags @> $2 AND + ($3 OR destroy_ts IS NOT NULL) AND + ( + $4 IS NULL OR + (create_ts, server_id) < (SELECT create_ts, server_id FROM after_server) + ) + ORDER BY create_ts DESC, server_id DESC + LIMIT 64 + ", + input.env_id, + serde_json::to_value(&input.tags)?, + input.include_destroyed, + input.cursor, + ) + .await? + .into_iter() + .map(|(id,)| id) + .collect::>(); + + Ok(Output { server_ids }) +} diff --git a/svc/pkg/ds/src/ops/server/mod.rs b/svc/pkg/ds/src/ops/server/mod.rs new file mode 100644 index 0000000000..1bce724963 --- /dev/null +++ b/svc/pkg/ds/src/ops/server/mod.rs @@ -0,0 +1,2 @@ +pub mod get; +pub mod list_for_env; diff --git a/svc/pkg/ds/src/types.rs b/svc/pkg/ds/src/types.rs new file mode 100644 index 0000000000..b2ab043c84 --- /dev/null +++ b/svc/pkg/ds/src/types.rs @@ -0,0 +1,232 @@ +use std::collections::HashMap; + +use chirp_workflow::prelude::*; +use rivet_api::models; +use rivet_convert::{ApiFrom, ApiInto, ApiTryFrom}; +use serde::{Deserialize, Serialize}; +use serde_json::json; +use strum::FromRepr; + +#[derive(Debug, Clone)] +pub struct Server { + pub server_id: Uuid, + pub env_id: Uuid, + pub datacenter_id: Uuid, + pub cluster_id: Uuid, + pub tags: HashMap, + pub resources: ServerResources, + pub kill_timeout_ms: i64, + pub create_ts: i64, + pub start_ts: Option, + pub connectable_ts: Option, + pub destroy_ts: Option, + pub image_id: Uuid, + pub args: Vec, + pub network_mode: NetworkMode, + pub environment: HashMap, + pub network_ports: HashMap, +} + +#[derive(Debug, Clone, Serialize, Deserialize, Hash)] +pub struct ServerResources { + pub cpu_millicores: i32, + pub memory_mib: i32, +} + +#[derive(Serialize, Deserialize, Hash, Debug, Clone, Copy, PartialEq, Eq, FromRepr)] +pub enum NetworkMode { + Bridge = 0, + Host = 1, +} + +#[derive(Debug, Clone)] +pub struct Port { + // Null when using host networking since one is automatically assigned + pub internal_port: Option, + pub public_hostname: Option, + pub public_port: Option, + pub routing: Routing, +} + +#[derive(Debug, Clone, Serialize, Deserialize, Hash)] +pub enum Routing { + GameGuard { protocol: GameGuardProtocol }, + Host { protocol: HostProtocol }, +} + +#[derive(Serialize, Deserialize, Hash, Debug, Clone, Copy, PartialEq, Eq, FromRepr)] +pub enum GameGuardProtocol { + Http = 0, + Https = 1, + Tcp = 2, + TcpTls = 3, + Udp = 4, +} + +#[derive(Serialize, Deserialize, Hash, Debug, Clone, Copy, PartialEq, Eq, FromRepr)] +pub enum HostProtocol { + Tcp = 0, + Udp = 1, +} + +// Move to build pkg when migrated to workflows +pub mod build { + use serde::{Deserialize, Serialize}; + use strum::FromRepr; + + #[derive(Serialize, Deserialize, Hash, Debug, Clone, Copy, PartialEq, Eq, FromRepr)] + pub enum BuildKind { + DockerImage = 0, + OciBundle = 1, + } + + #[derive(Serialize, Deserialize, Hash, Debug, Clone, Copy, PartialEq, Eq, FromRepr)] + pub enum BuildCompression { + None = 0, + Lz4 = 1, + } +} + +impl ApiTryFrom for models::ServersServer { + type Error = GlobalError; + + fn api_try_from(value: Server) -> GlobalResult { + Ok(models::ServersServer { + id: value.server_id, + environment: value.env_id, + datacenter: value.datacenter_id, + cluster: value.cluster_id, + created_at: value.create_ts, + started_at: value.start_ts, + connectable_at: value.connectable_ts, + destroyed_at: value.destroy_ts, + tags: Some(serde_json::to_value(value.tags)?), + runtime: Box::new(models::ServersRuntime { + build: value.image_id, + arguments: Some(value.args), + environment: Some(value.environment), + }), + network: Box::new(models::ServersNetwork { + mode: Some(value.network_mode.api_into()), + ports: value + .network_ports + .into_iter() + .map(|(s, p)| (s, p.api_into())) + .collect::>(), + }), + lifecycle: Box::new(models::ServersLifecycle { + kill_timeout: Some(value.kill_timeout_ms), + }), + resources: Box::new(value.resources.api_into()), + }) + } +} + +impl ApiFrom for ServerResources { + fn api_from(value: models::ServersResources) -> ServerResources { + ServerResources { + cpu_millicores: value.cpu, + memory_mib: value.memory, + } + } +} + +impl ApiFrom for models::ServersResources { + fn api_from(value: ServerResources) -> models::ServersResources { + models::ServersResources { + cpu: value.cpu_millicores, + memory: value.memory_mib, + } + } +} + +impl ApiFrom for NetworkMode { + fn api_from(value: models::ServersNetworkMode) -> NetworkMode { + match value { + models::ServersNetworkMode::Bridge => NetworkMode::Bridge, + models::ServersNetworkMode::Host => NetworkMode::Host, + } + } +} + +impl ApiFrom for models::ServersNetworkMode { + fn api_from(value: NetworkMode) -> models::ServersNetworkMode { + match value { + NetworkMode::Bridge => models::ServersNetworkMode::Bridge, + NetworkMode::Host => models::ServersNetworkMode::Host, + } + } +} + +impl ApiFrom for models::ServersPort { + fn api_from(value: Port) -> models::ServersPort { + let (protocol, routing) = match &value.routing { + Routing::GameGuard { protocol } => ( + (*protocol).api_into(), + models::ServersPortRouting { + game_guard: Some(json!({})), + ..Default::default() + }, + ), + Routing::Host { protocol } => ( + (*protocol).api_into(), + models::ServersPortRouting { + host: Some(json!({})), + ..Default::default() + }, + ), + }; + + models::ServersPort { + protocol, + internal_port: value.internal_port, + public_hostname: value.public_hostname, + public_port: value.public_port, + routing: Box::new(routing), + } + } +} + +impl ApiFrom for GameGuardProtocol { + fn api_from(value: models::ServersPortProtocol) -> GameGuardProtocol { + match value { + models::ServersPortProtocol::Udp => GameGuardProtocol::Udp, + models::ServersPortProtocol::Tcp => GameGuardProtocol::Tcp, + models::ServersPortProtocol::Http => GameGuardProtocol::Http, + models::ServersPortProtocol::Https => GameGuardProtocol::Https, + models::ServersPortProtocol::TcpTls => GameGuardProtocol::TcpTls, + } + } +} + +impl ApiFrom for models::ServersPortProtocol { + fn api_from(value: GameGuardProtocol) -> models::ServersPortProtocol { + match value { + GameGuardProtocol::Udp => models::ServersPortProtocol::Udp, + GameGuardProtocol::Tcp => models::ServersPortProtocol::Tcp, + GameGuardProtocol::Http => models::ServersPortProtocol::Http, + GameGuardProtocol::Https => models::ServersPortProtocol::Https, + GameGuardProtocol::TcpTls => models::ServersPortProtocol::TcpTls, + } + } +} + +impl ApiTryFrom for HostProtocol { + type Error = GlobalError; + fn api_try_from(value: models::ServersPortProtocol) -> GlobalResult { + Ok(match value { + models::ServersPortProtocol::Udp => HostProtocol::Udp, + models::ServersPortProtocol::Tcp => HostProtocol::Tcp, + _ => bail_with!(SERVERS_UNSUPPORTED_HOST_PROTOCOL), + }) + } +} + +impl ApiFrom for models::ServersPortProtocol { + fn api_from(value: HostProtocol) -> models::ServersPortProtocol { + match value { + HostProtocol::Udp => models::ServersPortProtocol::Udp, + HostProtocol::Tcp => models::ServersPortProtocol::Tcp, + } + } +} diff --git a/svc/pkg/ds/util/src/consts.rs b/svc/pkg/ds/src/util/consts.rs similarity index 97% rename from svc/pkg/ds/util/src/consts.rs rename to svc/pkg/ds/src/util/consts.rs index 10d67ca4a0..ebc2fec604 100644 --- a/svc/pkg/ds/util/src/consts.rs +++ b/svc/pkg/ds/src/util/consts.rs @@ -1,4 +1,4 @@ -use rivet_util as util; +use chirp_workflow::prelude::*; pub const LOBBY_READY_TIMEOUT: i64 = util::duration::minutes(5); pub const PLAYER_READY_TIMEOUT: i64 = util::duration::minutes(2); diff --git a/svc/pkg/ds/util/src/lib.rs b/svc/pkg/ds/src/util/mod.rs similarity index 78% rename from svc/pkg/ds/util/src/lib.rs rename to svc/pkg/ds/src/util/mod.rs index 3437193c79..358c066a56 100644 --- a/svc/pkg/ds/util/src/lib.rs +++ b/svc/pkg/ds/src/util/mod.rs @@ -1,7 +1,15 @@ +use chirp_workflow::prelude::*; + pub mod consts; +pub mod nomad_job; +mod oci_config; +mod seccomp; pub mod test; -use rivet_operation::prelude::*; +lazy_static::lazy_static! { + pub static ref NEW_NOMAD_CONFIG: nomad_client::apis::configuration::Configuration = + nomad_util::new_config_from_env().unwrap(); +} pub fn build_ds_hostname( server_id: Uuid, diff --git a/svc/pkg/ds/src/util/nomad_job.rs b/svc/pkg/ds/src/util/nomad_job.rs new file mode 100644 index 0000000000..0e73389777 --- /dev/null +++ b/svc/pkg/ds/src/util/nomad_job.rs @@ -0,0 +1,105 @@ +use chirp_worker::prelude::*; +use regex::Regex; + +use super::oci_config; +use crate::types::GameGuardProtocol; + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum TransportProtocol { + Tcp, + Udp, +} + +impl From for TransportProtocol { + fn from(proxy_protocol: GameGuardProtocol) -> Self { + match proxy_protocol { + GameGuardProtocol::Http + | GameGuardProtocol::Https + | GameGuardProtocol::Tcp + | GameGuardProtocol::TcpTls => Self::Tcp, + GameGuardProtocol::Udp => Self::Udp, + } + } +} + +impl TransportProtocol { + pub fn as_cni_protocol(&self) -> &'static str { + match self { + Self::Tcp => "tcp", + Self::Udp => "udp", + } + } +} + +/// Helper structure for parsing all of the runtime's ports before building the +/// config. +#[derive(Clone)] +pub struct DecodedPort { + pub label: String, + pub nomad_port_label: String, + pub target: u16, + pub proxy_protocol: GameGuardProtocol, +} + +/// Build base config used to generate the OCI bundle's config.json. +pub fn gen_oci_bundle_config( + cpu: u64, + memory: u64, + memory_max: u64, + env: Vec, +) -> GlobalResult { + let config_str = serde_json::to_string(&oci_config::config(cpu, memory, memory_max, env))?; + + // Escape Go template syntax + let config_str = inject_consul_env_template(&config_str)?; + + Ok(config_str) +} + +/// Makes user-generated string safe to inject in to a Go template. +pub fn escape_go_template(input: &str) -> String { + let re = Regex::new(r"(\{\{|\}\})").unwrap(); + re.replace_all(input, r#"{{"$1"}}"#) + .to_string() + // TODO: This removes exploits to inject env vars (see below) + // SVC-3307 + .replace("###", "") +} + +/// Generates a template string that we can substitute with the real environment variable +/// +/// This must be safe to inject in to a JSON string so it can be substituted after rendering the +/// JSON object. Intended to be used from within JSON. +/// +/// See inject_consul_env_template. +pub fn template_env_var(name: &str) -> String { + format!("###ENV:{name}###") +} + +/// Like template_env_var, but removes surrounding quotes. +pub fn template_env_var_int(name: &str) -> String { + format!("###ENV_INT:{name}###") +} + +/// Substitutes env vars generated from template_env_var with Consul template syntax. +/// +/// Intended to be used from within JSON. +pub fn inject_consul_env_template(input: &str) -> GlobalResult { + // Regular strings + let re = Regex::new(r"###ENV:(\w+)###")?; + let output = re + .replace_all(input, r#"{{ env "$1" | regexReplaceAll "\"" "\\\"" }}"#) + .to_string(); + + // Integers + let re = Regex::new(r####""###ENV_INT:(\w+)###""####)?; + let output = re + .replace_all(&output, r#"{{ env "$1" | regexReplaceAll "\"" "\\\"" }}"#) + .to_string(); + + Ok(output) +} + +pub fn nomad_host_port_env_var(port_label: &str) -> String { + format!("NOMAD_HOST_PORT_{}", port_label.replace('-', "_")) +} diff --git a/svc/pkg/ds/ops/server-create/src/oci_config.rs b/svc/pkg/ds/src/util/oci_config.rs similarity index 96% rename from svc/pkg/ds/ops/server-create/src/oci_config.rs rename to svc/pkg/ds/src/util/oci_config.rs index f03105fd13..fea2c9d732 100644 --- a/svc/pkg/ds/ops/server-create/src/oci_config.rs +++ b/svc/pkg/ds/src/util/oci_config.rs @@ -1,11 +1,7 @@ -use chirp_worker::prelude::*; +use super::seccomp; +use chirp_workflow::prelude::*; use serde_json::json; -// CPU period in microseconds. -// -// https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt -const CPU_PERIOD: u64 = 100000; - /// Generates base config.json for an OCI bundle. pub fn config(cpu: u64, memory: u64, memory_max: u64, env: Vec) -> serde_json::Value { // CPU shares is a relative weight. It doesn't matter what unit we pass here as @@ -132,7 +128,7 @@ pub fn config(cpu: u64, memory: u64, memory_max: u64, env: Vec) -> serde "/proc/sys", "/proc/sysrq-trigger" ], - "seccomp": super::seccomp::seccomp() + "seccomp": seccomp::config() } }) } diff --git a/svc/pkg/ds/ops/server-create/src/seccomp.rs b/svc/pkg/ds/src/util/seccomp.rs similarity index 99% rename from svc/pkg/ds/ops/server-create/src/seccomp.rs rename to svc/pkg/ds/src/util/seccomp.rs index f3f7fcfec3..b946338faf 100644 --- a/svc/pkg/ds/ops/server-create/src/seccomp.rs +++ b/svc/pkg/ds/src/util/seccomp.rs @@ -1,7 +1,6 @@ -use chirp_worker::prelude::*; use serde_json::json; -pub fn seccomp() -> serde_json::Value { +pub fn config() -> serde_json::Value { // Copied from auto-generated containerd // // See comment in super::oci_conifg::config on how to generate this diff --git a/svc/pkg/ds/util/src/test.rs b/svc/pkg/ds/src/util/test.rs similarity index 100% rename from svc/pkg/ds/util/src/test.rs rename to svc/pkg/ds/src/util/test.rs diff --git a/svc/pkg/ds/src/workers/mod.rs b/svc/pkg/ds/src/workers/mod.rs new file mode 100644 index 0000000000..1b2542aed8 --- /dev/null +++ b/svc/pkg/ds/src/workers/mod.rs @@ -0,0 +1,10 @@ +pub mod nomad_monitor_alloc_plan; +pub mod nomad_monitor_alloc_update; +pub mod nomad_monitor_eval_update; +mod webhook; + +chirp_worker::workers![ + nomad_monitor_alloc_plan, + nomad_monitor_alloc_update, + nomad_monitor_eval_update +]; diff --git a/svc/pkg/ds/worker/src/workers/nomad_monitor_alloc_plan.rs b/svc/pkg/ds/src/workers/nomad_monitor_alloc_plan.rs similarity index 78% rename from svc/pkg/ds/worker/src/workers/nomad_monitor_alloc_plan.rs rename to svc/pkg/ds/src/workers/nomad_monitor_alloc_plan.rs index f82c7ec86c..7fd79ec9ce 100644 --- a/svc/pkg/ds/worker/src/workers/nomad_monitor_alloc_plan.rs +++ b/svc/pkg/ds/src/workers/nomad_monitor_alloc_plan.rs @@ -1,10 +1,10 @@ +use std::time::Duration; + use chirp_worker::prelude::*; -use proto::backend::{self, pkg::*}; -use redis::AsyncCommands; +use rivet_operation::prelude::proto::backend::{self, pkg::*}; use serde::Deserialize; -use std::time::Duration; -use crate::workers::NEW_NOMAD_CONFIG; +use crate::util::NEW_NOMAD_CONFIG; // TODO: const TRAEFIK_GRACE_PERIOD: Duration = Duration::from_secs(3); @@ -18,20 +18,18 @@ struct PlanResult { #[derive(Debug, sqlx::FromRow)] struct RunRow { server_id: Uuid, - datacenter_id: Uuid, - stop_ts: Option, connectable_ts: Option, nomad_alloc_plan_ts: Option, // this was nomad_plan_ts } -#[derive(Debug, sqlx::FromRow)] -struct ProxiedPort { - target_nomad_port_label: Option, - ingress_port: i64, - ingress_hostnames: Vec, - proxy_protocol: i64, - ssl_domain_mode: i64, -} +// #[derive(Debug, sqlx::FromRow)] +// struct ProxiedPort { +// target_nomad_port_label: Option, +// ingress_port: i64, +// ingress_hostnames: Vec, +// proxy_protocol: i64, +// ssl_domain_mode: i64, +// } #[derive(Clone)] struct RunData { @@ -48,7 +46,7 @@ struct RunData { async fn worker( ctx: &OperationContext, ) -> GlobalResult<()> { - let PlanResult { allocation: alloc } = serde_json::from_str::(&ctx.payload_json)?; + let PlanResult { allocation: alloc } = serde_json::from_str(&ctx.payload_json)?; tracing::info!(?alloc, "from nomad"); let job_id = unwrap_ref!(alloc.job_id, "alloc has no job id"); @@ -104,7 +102,7 @@ async fn worker( // // Backoff mitigates race condition with job-run-create not having inserted // the dispatched_job_id yet. - let run_data: RunData = RunData { + let run_data = RunData { job_id: job_id.clone(), alloc_id: alloc_id.clone(), nomad_node_id: nomad_node_id.clone(), @@ -122,12 +120,7 @@ async fn worker( .await?; // Check if run found - let Some(DbOutput { - server_id, - datacenter_id, - stop_ts, - }) = db_output - else { + let Some(DbOutput { server_id }) = db_output else { if ctx.req_dt() > util::duration::minutes(5) { tracing::error!("discarding stale message"); return Ok(()); @@ -144,8 +137,6 @@ async fn worker( #[derive(Debug)] struct DbOutput { server_id: Uuid, - datacenter_id: Uuid, - stop_ts: Option, } /// Returns `None` if the run could not be found. @@ -164,27 +155,19 @@ async fn update_db( ports, }: RunData, ) -> GlobalResult> { - tracing::info!(?ports, "got the portdatda"); - let run_row = sql_fetch_optional!( [ctx, RunRow, @tx tx] " SELECT - servers.server_id, - servers.datacenter_id, - servers.stop_ts, - servers.connectable_ts, - server_nomad.nomad_alloc_plan_ts - FROM - db_ds.server_nomad - INNER JOIN - db_ds.servers - ON - servers.server_id = server_nomad.server_id - WHERE - server_nomad.nomad_dispatched_job_id = $1 - FOR UPDATE OF - server_nomad + s.server_id, + s.connectable_ts, + s.stop_ts, + sn.nomad_alloc_plan_ts + FROM db_ds.server_nomad AS sn + INNER JOIN db_ds.servers AS s + ON s.server_id = sn.server_id + WHERE sn.nomad_dispatched_job_id = $1 + FOR UPDATE OF sn ", &job_id, ) @@ -229,9 +212,8 @@ async fn update_db( nomad_node_id = $4, nomad_node_name = $5, nomad_node_public_ipv4 = $6, - nomad_node_vlan_ipv4 = $ - WHERE - server_id = $1 + nomad_node_vlan_ipv4 = $7 + WHERE server_id = $1 ", server_id, &alloc_id, @@ -251,15 +233,13 @@ async fn update_db( sql_execute!( [ctx, @tx tx] " - INSERT INTO - db_ds.internal_ports ( - server_id, - nomad_label, - nomad_source, - nomad_ip - ) - VALUES - ($1, $2, $3, $4) + INSERT INTO db_ds.internal_ports ( + server_id, + nomad_label, + nomad_source, + nomad_ip + ) + VALUES ($1, $2, $3, $4) ", server_id, &port.label, @@ -270,11 +250,5 @@ async fn update_db( } } - tracing::info!("ayy event2c"); - - Ok(Some(DbOutput { - server_id, - datacenter_id: run_row.datacenter_id, - stop_ts: run_row.stop_ts, - })) + Ok(Some(DbOutput { server_id })) } diff --git a/svc/pkg/ds/worker/src/workers/nomad_monitor_alloc_update.rs b/svc/pkg/ds/src/workers/nomad_monitor_alloc_update.rs similarity index 64% rename from svc/pkg/ds/worker/src/workers/nomad_monitor_alloc_update.rs rename to svc/pkg/ds/src/workers/nomad_monitor_alloc_update.rs index 9ce99067a8..4e3d52164e 100644 --- a/svc/pkg/ds/worker/src/workers/nomad_monitor_alloc_update.rs +++ b/svc/pkg/ds/src/workers/nomad_monitor_alloc_update.rs @@ -1,7 +1,6 @@ use chirp_worker::prelude::*; -use proto::backend::pkg::*; +use rivet_operation::prelude::proto::backend::pkg::*; use serde::Deserialize; -use sqlx; #[derive(Debug, Clone, Deserialize)] #[serde(rename_all = "PascalCase")] @@ -20,10 +19,8 @@ enum TaskState { async fn worker( ctx: &OperationContext, ) -> GlobalResult<()> { - let _crdb = ctx.crdb().await?; - let AllocationUpdated { allocation: alloc } = serde_json::from_str(&ctx.payload_json)?; - let alloc_state_json = serde_json::to_value(&alloc)?; + let alloc_state_json = serde_json::value::to_raw_value(&alloc)?.to_string(); let alloc_id = unwrap_ref!(alloc.ID); let eval_id = unwrap_ref!(alloc.eval_id, "alloc has no eval"); @@ -70,12 +67,10 @@ async fn worker( let run_row = sql_fetch_optional!( [ctx, (Uuid,)] " - UPDATE - db_ds.server_nomad - SET - nomad_alloc_state = $2 - WHERE - nomad_dispatched_job_id = $1 RETURNING server_id + UPDATE db_ds.server_nomad + SET nomad_alloc_state = $2 + WHERE nomad_dispatched_job_id = $1 + RETURNING server_id ", job_id, &alloc_state_json, @@ -91,7 +86,7 @@ async fn worker( } }; - crate::workers::webhook_call(ctx, alloc_id.to_string()).await?; + crate::workers::webhook::call(ctx, alloc_id.to_string()).await?; Ok(()) } @@ -99,41 +94,31 @@ async fn worker( let run_row = sql_fetch_optional!( [ctx, (Uuid, Option)] " - WITH select_server AS ( - SELECT - servers.server_id, - servers.start_ts - FROM - db_ds.server_nomad - INNER JOIN db_ds.servers ON servers.server_id = server_nomad.server_id - WHERE - nomad_dispatched_job_id = $1 - ), - _update_servers AS ( - UPDATE - db_ds.servers - SET - start_ts = $2 - FROM - select_server - WHERE - servers.server_id = select_server.server_id - AND servers.start_ts IS NULL RETURNING 1 - ), - _update_server_nomad AS ( - UPDATE - db_ds.server_nomad - SET - nomad_alloc_state = $3 - FROM - select_server - WHERE - server_nomad.server_id = select_server.server_id RETURNING 1 - ) - SELECT - * - FROM - select_server + WITH + select_server AS ( + SELECT s.server_id, s.start_ts + FROM db_ds.server_nomad AS sn + INNER JOIN db_ds.servers AS s + ON s.server_id = sn.server_id + WHERE nomad_dispatched_job_id = $1 + ), + update_servers AS ( + UPDATE db_ds.servers + SET start_ts = $2 + FROM select_server + WHERE + servers.server_id = select_server.server_id AND + servers.start_ts IS NULL + RETURNING 1 + ), + update_server_nomad AS ( + UPDATE db_ds.server_nomad + SET nomad_alloc_state = $3 + FROM select_server + WHERE server_nomad.server_id = select_server.server_id + RETURNING 1 + ) + SELECT * FROM select_server ", job_id, ctx.ts(), @@ -150,7 +135,7 @@ async fn worker( } }; - crate::workers::webhook_call(ctx, alloc_id.to_string()).await?; + crate::workers::webhook::call(ctx, alloc_id.to_string()).await?; if start_ts.is_none() { tracing::info!("run started"); @@ -171,44 +156,35 @@ async fn worker( let run_row = sql_fetch_optional!( [ctx, (Uuid, Option)] r#" - WITH select_server AS ( - SELECT - servers.server_id, - servers.finish_ts - FROM - db_ds.server_nomad - INNER JOIN db_ds.servers ON servers.server_id = server_nomad.server_id - WHERE - nomad_dispatched_job_id = $1 - ), - _update_servers AS ( - UPDATE - db_ds.servers - SET - -- If the job stops immediately, the task state will never be "running" so we need to - -- make sure start_ts is set here as well - start_ts = COALESCE(start_ts, $2), - finish_ts = $2 - FROM - select_server - WHERE - servers.server_id = select_server.server_id - AND servers.finish_ts IS NULL RETURNING 1 - ), - _update_server_nomad AS ( - UPDATE - db_ds.server_nomad - SET - nomad_alloc_state = $3 - FROM - select_server - WHERE - server_nomad.server_id = select_server.server_id RETURNING 1 - ) - SELECT - * - FROM - select_server + WITH + select_server AS ( + SELECT s.server_id, s.finish_ts + FROM db_ds.server_nomad AS sn + INNER JOIN db_ds.servers AS s + ON s.server_id = sn.server_id + WHERE nomad_dispatched_job_id = $1 + ), + update_servers AS ( + UPDATE db_ds.servers + SET + -- If the job stops immediately, the task state will never be "running" so we need to + -- make sure start_ts is set here as well + start_ts = COALESCE(start_ts, $2), + finish_ts = $2 + FROM select_server + WHERE + servers.server_id = select_server.server_id AND + servers.finish_ts IS NULL + RETURNING 1 + ), + update_server_nomad AS ( + UPDATE db_ds.server_nomad + SET nomad_alloc_state = $3 + FROM select_server + WHERE server_nomad.server_id = select_server.server_id + RETURNING 1 + ) + SELECT * FROM select_server "#, job_id, ctx.ts(), @@ -225,7 +201,7 @@ async fn worker( } }; - crate::workers::webhook_call(ctx, alloc_id.to_string()).await?; + crate::workers::webhook::call(ctx, alloc_id.to_string()).await?; if finish_ts.is_none() { tracing::info!("run finished"); diff --git a/svc/pkg/ds/worker/src/workers/nomad_monitor_eval_update.rs b/svc/pkg/ds/src/workers/nomad_monitor_eval_update.rs similarity index 91% rename from svc/pkg/ds/worker/src/workers/nomad_monitor_eval_update.rs rename to svc/pkg/ds/src/workers/nomad_monitor_eval_update.rs index 0979c699c4..38c82ce382 100644 --- a/svc/pkg/ds/worker/src/workers/nomad_monitor_eval_update.rs +++ b/svc/pkg/ds/src/workers/nomad_monitor_eval_update.rs @@ -1,14 +1,12 @@ use chirp_worker::prelude::*; -use proto::backend::pkg::*; +use rivet_operation::prelude::proto::backend::pkg::*; use serde::Deserialize; -use crate::workers::NEW_NOMAD_CONFIG; +use crate::util::NEW_NOMAD_CONFIG; #[derive(Debug, Deserialize)] #[serde(rename_all = "PascalCase")] -struct PlanResult { - evaluation: nomad_client::models::Evaluation, -} +struct PlanResult {} #[derive(Debug, Copy, Clone)] enum EvalStatus { @@ -27,13 +25,10 @@ struct RunRow { async fn worker( ctx: &OperationContext, ) -> GlobalResult<()> { - let _crdb = ctx.crdb().await?; - let payload_value = serde_json::from_str::(&ctx.payload_json)?; - let PlanResult { evaluation: eval } = serde_json::from_str::(&ctx.payload_json)?; - let job_id = unwrap_ref!(eval.job_id, "eval has no job id"); - let eval_status_raw = unwrap_ref!(eval.status).as_str(); + let job_id = unwrap!(unwrap!(payload_value.get("JobID"), "eval has no job id").as_str()); + let eval_status_raw = unwrap!(unwrap!(payload_value.get("Status")).as_str()); // We can't decode this with serde, so manually deserialize the response let eval_value = unwrap!(payload_value.get("Evaluation")); diff --git a/svc/pkg/ds/worker/src/workers/mod.rs b/svc/pkg/ds/src/workers/webhook.rs similarity index 72% rename from svc/pkg/ds/worker/src/workers/mod.rs rename to svc/pkg/ds/src/workers/webhook.rs index 7dfe02b161..93a22a730c 100644 --- a/svc/pkg/ds/worker/src/workers/mod.rs +++ b/svc/pkg/ds/src/workers/webhook.rs @@ -1,27 +1,13 @@ -use proto::backend::pkg::nomad; -use rivet_convert::ApiTryFrom; -use serde_json::Value; +use chirp_worker::prelude::*; +// use rivet_convert::ApiTryFrom; +use rivet_operation::prelude::proto::backend::pkg::nomad; +// use serde_json::Value; -pub mod nomad_monitor_alloc_plan; -pub mod nomad_monitor_alloc_update; -pub mod nomad_monitor_eval_update; - -chirp_worker::workers![ - nomad_monitor_alloc_plan, - nomad_monitor_alloc_update, - nomad_monitor_eval_update -]; - -lazy_static::lazy_static! { - pub static ref NEW_NOMAD_CONFIG: nomad_client::apis::configuration::Configuration = - nomad_util::new_config_from_env().unwrap(); -} - -pub async fn webhook_call( - ctx: &OperationContext, - alloc_id: String, +pub async fn call( + _ctx: &OperationContext, + _alloc_id: String, ) -> GlobalResult<()> { - let ctx = ctx.clone(); + // let ctx = ctx.clone(); // tokio::spawn(async move { // // Get the server from the database. If it has a webhook_url, send all // // of the info about the server to it @@ -30,12 +16,9 @@ pub async fn webhook_call( // let server_id = match sql_fetch_optional!( // [ctx, (Uuid,)] // " - // SELECT - // server_id - // FROM - // db_ds.server_nomad - // WHERE - // nomad_alloc_id = $1 + // SELECT server_id + // FROM db_ds.server_nomad + // WHERE nomad_alloc_id = $1 // ", // alloc_id, // ) diff --git a/svc/pkg/ds/src/workflows/mod.rs b/svc/pkg/ds/src/workflows/mod.rs new file mode 100644 index 0000000000..74f47ad347 --- /dev/null +++ b/svc/pkg/ds/src/workflows/mod.rs @@ -0,0 +1 @@ +pub mod server; diff --git a/svc/pkg/ds/src/workflows/server/destroy.rs b/svc/pkg/ds/src/workflows/server/destroy.rs new file mode 100644 index 0000000000..7ce454349f --- /dev/null +++ b/svc/pkg/ds/src/workflows/server/destroy.rs @@ -0,0 +1,128 @@ +use chirp_workflow::prelude::*; +use futures_util::FutureExt; +use serde_json::json; + +use crate::util::NEW_NOMAD_CONFIG; + +#[derive(Debug, Serialize, Deserialize)] +pub(crate) struct Input { + pub server_id: Uuid, + pub override_kill_timeout_ms: i64, +} + +#[workflow] +pub(crate) async fn ds_server_destroy(ctx: &mut WorkflowCtx, input: &Input) -> GlobalResult<()> { + let dynamic_server = ctx + .activity(UpdateDbInput { + server_id: input.server_id, + }) + .await?; + + ctx.activity(DeleteJobInput { + job_id: dynamic_server.dispatched_job_id.clone(), + }) + .await?; + + ctx.msg( + json!({ + "server_id": input.server_id, + }), + DestroyComplete {}, + ) + .await?; + + Ok(()) +} + +#[derive(Debug, Serialize, Deserialize, Hash)] +struct UpdateDbInput { + server_id: Uuid, +} + +#[derive(Debug, Serialize, Deserialize, Hash, sqlx::FromRow)] +struct UpdateDbOutput { + ds_server_id: Uuid, + ds_datacenter_id: Uuid, + alloc_id: String, + dispatched_job_id: String, +} + +#[activity(UpdateDb)] +async fn update_db(ctx: &ActivityCtx, input: &UpdateDbInput) -> GlobalResult { + // Run in transaction for internal retryability + rivet_pools::utils::crdb::tx(&ctx.crdb().await?, |tx| { + let ctx = ctx.clone(); + let server_id = input.server_id; + + async move { + sql_fetch_one!( + [ctx, UpdateDbOutput, @tx tx] + " + UPDATE db_ds.servers + SET delete_ts = $2 + WHERE + server_id = $1 AND + delete_ts IS NULL + RETURNING + server_id, + datacenter_id + server_nomad.nomad_dispatched_job_id, + server_nomad.nomad_alloc_id, + FROM db_ds.servers AS s + JOIN db_ds.server_nomad AS sn + ON s.server_id = sn.server_id + ", + server_id, + ctx.ts(), + ) + .await + } + .boxed() + }) + .await +} + +#[derive(Debug, Serialize, Deserialize, Hash)] +struct DeleteJobInput { + job_id: String, +} + +#[activity(DeleteJob)] +async fn delete_job(ctx: &ActivityCtx, input: &DeleteJobInput) -> GlobalResult<()> { + // TODO: Handle 404 safely. See RIV-179 + // Stop the job. + // + // Setting purge to false will change the behavior of the create poll + // functionality if the job dies immediately. You can set it to false to + // debug lobbies, but it's preferred to extract metadata from the + // job-run-stop lifecycle event. + + match nomad_client::apis::jobs_api::delete_job( + &NEW_NOMAD_CONFIG, + &input.job_id, + Some(super::NOMAD_REGION), + None, + None, + None, + Some(false), // TODO: Maybe change back to true for performance? + None, + ) + .await + { + Ok(_) => { + tracing::info!("job stopped"); + + // TODO: Manually kill the allocation after util_job::JOB_STOP_TIMEOUT + // task::spawn(async move { + // }); + } + Err(err) => { + tracing::warn!(?err, "error thrown while stopping job"); + } + } + + Ok(()) +} + +#[message("ds_server_destroy_complete")] +pub struct DestroyComplete {} diff --git a/svc/pkg/ds/src/workflows/server/mod.rs b/svc/pkg/ds/src/workflows/server/mod.rs new file mode 100644 index 0000000000..59e08c0a9f --- /dev/null +++ b/svc/pkg/ds/src/workflows/server/mod.rs @@ -0,0 +1,1335 @@ +use std::{ + collections::HashMap, + convert::TryInto, + hash::{DefaultHasher, Hasher}, + net::IpAddr, +}; + +use chirp_workflow::prelude::*; +use cluster::types::BuildDeliveryMethod; +use futures_util::FutureExt; +use nomad_client::models::*; +use rand::Rng; +use rivet_operation::prelude::proto::backend; +use serde_json::json; +use sha2::{Digest, Sha256}; +use util::AsHashableExt; + +use crate::{ + types::{ + build::{BuildCompression, BuildKind}, + GameGuardProtocol, NetworkMode, Routing, ServerResources, + }, + util::{ + nomad_job::{ + escape_go_template, gen_oci_bundle_config, inject_consul_env_template, + nomad_host_port_env_var, template_env_var_int, DecodedPort, TransportProtocol, + }, + NEW_NOMAD_CONFIG, + }, +}; + +pub mod destroy; + +const NOMAD_REGION: &str = "global"; +const SETUP_SCRIPT: &str = include_str!("./scripts/setup.sh"); +const SETUP_JOB_RUNNER_SCRIPT: &str = include_str!("./scripts/setup_job_runner.sh"); +const SETUP_OCI_BUNDLE_SCRIPT: &str = include_str!("./scripts/setup_oci_bundle.sh"); +const SETUP_CNI_NETWORK_SCRIPT: &str = include_str!("./scripts/setup_cni_network.sh"); +// const CLEANUP_SCRIPT: &str = include_str!("./scripts/cleanup.sh"); + +#[derive(Default, Clone)] +struct GameGuardUnnest { + port_names: Vec, + port_numbers: Vec>, + gg_ports: Vec>, + protocols: Vec, +} + +#[derive(Default, Clone)] +struct HostUnnest { + port_names: Vec, + port_numbers: Vec>, +} + +#[derive(Debug, Serialize, Deserialize)] +pub struct Input { + pub server_id: Uuid, + pub env_id: Uuid, + pub datacenter_id: Uuid, + pub cluster_id: Uuid, + pub tags: HashMap, + pub resources: ServerResources, + pub kill_timeout_ms: i64, + pub image_id: Uuid, + pub args: Vec, + pub network_mode: NetworkMode, + pub environment: HashMap, + pub network_ports: HashMap, +} + +#[derive(Debug, Clone, Serialize, Deserialize, Hash)] +pub struct Port { + // Null when using host networking since one is automatically assigned + pub internal_port: Option, + pub routing: Routing, +} + +#[workflow] +pub async fn ds_server(ctx: &mut WorkflowCtx, input: &Input) -> GlobalResult<()> { + let (_, prereq) = ctx + .join(( + InsertDbInput { + server_id: input.server_id, + env_id: input.env_id, + datacenter_id: input.datacenter_id, + cluster_id: input.cluster_id, + tags: input.tags.as_hashable(), + resources: input.resources.clone(), + kill_timeout_ms: input.kill_timeout_ms, + image_id: input.image_id, + args: input.args.clone(), + network_mode: input.network_mode, + environment: input.environment.as_hashable(), + network_ports: input.network_ports.as_hashable(), + }, + GetBuildAndDcInput { + image_id: input.image_id, + datacenter_id: input.datacenter_id, + }, + )) + .await?; + + let job_id = ctx + .activity(SubmitJobInput { + datacenter_id: input.datacenter_id, + resources: input.resources.clone(), + network_mode: input.network_mode, + network_ports: input.network_ports.as_hashable(), + build_kind: prereq.build_kind, + build_compression: prereq.build_compression, + dc_name_id: prereq.dc_name_id, + }) + .await?; + + let artifacts = ctx + .activity(ResolveArtifactsInput { + datacenter_id: input.datacenter_id, + image_id: input.image_id, + server_id: input.server_id, + build_upload_id: prereq.build_upload_id, + build_file_name: prereq.build_file_name, + dc_build_delivery_method: prereq.dc_build_delivery_method, + }) + .await?; + + let nomad_dispatched_job_id = ctx + .activity(DispatchJobInput { + environment: input.environment.as_hashable(), + server_id: input.server_id, + build_upload_id: prereq.build_upload_id, + job_id, + image_artifact_url: artifacts.image_artifact_url, + job_runner_binary_url: artifacts.job_runner_binary_url, + }) + .await?; + + ctx.activity(UpdateDbInput { + server_id: input.server_id, + nomad_dispatched_job_id, + }) + .await?; + + ctx.msg( + json!({ + "server_id": input.server_id + }), + CreateComplete {}, + ) + .await?; + + let destroy_sig = ctx.listen::().await?; + + ctx.workflow(destroy::Input { + server_id: input.server_id, + override_kill_timeout_ms: destroy_sig.override_kill_timeout_ms, + }) + .await?; + + Ok(()) +} + +#[derive(Debug, Clone, Serialize, Deserialize, Hash)] +struct InsertDbInput { + server_id: Uuid, + env_id: Uuid, + datacenter_id: Uuid, + cluster_id: Uuid, + tags: util::HashableMap, + resources: ServerResources, + kill_timeout_ms: i64, + image_id: Uuid, + args: Vec, + network_mode: NetworkMode, + environment: util::HashableMap, + network_ports: util::HashableMap, +} + +#[activity(InsertDb)] +async fn insert_db(ctx: &ActivityCtx, input: &InsertDbInput) -> GlobalResult<()> { + let mut gg_unnest = GameGuardUnnest::default(); + let mut host_unnest = HostUnnest::default(); + + for (name, port) in input.network_ports.iter() { + match port.routing { + Routing::GameGuard { protocol } => { + gg_unnest.port_names.push(name.clone()); + gg_unnest.port_numbers.push(port.internal_port); + gg_unnest.gg_ports.push(if port.internal_port.is_some() { + Some(choose_ingress_port(ctx, protocol).await?) + } else { + None + }); + gg_unnest.protocols.push(protocol as i32); + } + Routing::Host { .. } => { + host_unnest.port_names.push(name.clone()); + host_unnest.port_numbers.push(port.internal_port); + } + }; + } + + // Run in a transaction for retryability + rivet_pools::utils::crdb::tx(&ctx.crdb().await?, |tx| { + let ctx = ctx.clone(); + let input = input.clone(); + let host_unnest = host_unnest.clone(); + let gg_unnest = gg_unnest.clone(); + + async move { + sql_execute!( + [ctx, @tx tx] + " + WITH + server AS ( + INSERT INTO db_ds.servers ( + server_id, + env_id, + datacenter_id, + cluster_id, + tags, + resources_cpu_millicores, + resources_memory_mib, + kill_timeout_ms, + create_ts, + image_id, + args, + network_mode, + environment + ) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13) + RETURNING 1 + ), + host_port AS ( + INSERT INTO db_ds.docker_ports_host ( + server_id, + port_name, + port_number + ) + SELECT $1, t.* + FROM unnest($14, $15) AS t(port_name, port_number) + RETURNING 1 + ), + gg_port AS ( + INSERT INTO db_ds.docker_ports_protocol_game_guard ( + server_id, + port_name, + port_number, + gg_port, + protocol + ) + SELECT $1, t.* + FROM unnest($16, $17, $18, $19) AS t(port_name, port_number, gg_port, protocol) + RETURNING 1 + ) + SELECT 1 + ", + input.server_id, + input.env_id, + input.datacenter_id, + input.cluster_id, + serde_json::to_string(&input.tags)?, // 5 + input.resources.cpu_millicores, + input.resources.memory_mib, + input.kill_timeout_ms, + ctx.ts(), + input.image_id, // 10 + &input.args, + input.network_mode as i32, + serde_json::to_string(&input.environment)?, + host_unnest.port_names, + host_unnest.port_numbers, // 15 + gg_unnest.port_names, + gg_unnest.port_numbers, + gg_unnest.gg_ports, + gg_unnest.protocols, + ) + .await + } + .boxed() + }) + .await?; + + Ok(()) +} + +#[derive(Debug, Serialize, Deserialize, Hash)] +struct GetBuildAndDcInput { + datacenter_id: Uuid, + image_id: Uuid, +} + +#[derive(Debug, Serialize, Deserialize, Hash)] +struct GetBuildAndDcOutput { + build_upload_id: Uuid, + build_file_name: String, + build_kind: BuildKind, + build_compression: BuildCompression, + dc_name_id: String, + dc_build_delivery_method: BuildDeliveryMethod, +} + +#[activity(GetBuildAndDc)] +async fn get_build_and_dc( + ctx: &ActivityCtx, + input: &GetBuildAndDcInput, +) -> GlobalResult { + // Validate build exists and belongs to this game + let (build_res, dc_res) = tokio::try_join!( + op!([ctx] build_get { + build_ids: vec![input.image_id.into()], + }), + ctx.op(cluster::ops::datacenter::get::Input { + datacenter_ids: vec![input.datacenter_id], + }) + )?; + let build = unwrap!(build_res.builds.first()); + let upload_id = unwrap!(build.upload_id).as_uuid(); + let build_kind = unwrap!(backend::build::BuildKind::from_i32(build.kind)); + let build_compression = unwrap!(backend::build::BuildCompression::from_i32( + build.compression + )); + + let dc = unwrap!(dc_res.datacenters.first()); + + Ok(GetBuildAndDcOutput { + build_upload_id: upload_id, + build_file_name: util_build::file_name(build_kind, build_compression), + build_kind: unwrap!(BuildKind::from_repr(build.kind.try_into()?)), + build_compression: unwrap!(BuildCompression::from_repr(build.compression.try_into()?)), + dc_name_id: dc.name_id.clone(), + dc_build_delivery_method: dc.build_delivery_method, + }) +} + +#[derive(Debug, Serialize, Deserialize, Hash)] +struct SubmitJobInput { + datacenter_id: Uuid, + resources: ServerResources, + network_mode: NetworkMode, + network_ports: util::HashableMap, + build_kind: BuildKind, + build_compression: BuildCompression, + dc_name_id: String, +} + +#[activity(SubmitJob)] +async fn submit_job(ctx: &ActivityCtx, input: &SubmitJobInput) -> GlobalResult { + let tier_res = op!([ctx] tier_list { + region_ids: vec![input.datacenter_id.into()], + }) + .await?; + let tier_region = unwrap!(tier_res.regions.first()); + + // Find the first tier that has more CPU and memory than the requested + // resources + let mut tiers = tier_region.tiers.clone(); + + // Sort the tiers by cpu + tiers.sort_by(|a, b| a.cpu.cmp(&b.cpu)); + let tier = unwrap!(tiers.iter().find(|t| { + t.cpu as i32 >= input.resources.cpu_millicores + && t.memory as i32 >= input.resources.memory_mib + })); + + // runc-compatible resources + let cpu = tier.rivet_cores_numerator as u64 * 1_000 / tier.rivet_cores_denominator as u64; // Millicore (1/1000 of a core) + let memory = tier.memory * (1024 * 1024); // bytes + let memory_max = tier.memory_max * (1024 * 1024); // bytes + + // Nomad-compatible resources + let nomad_resources = Resources { + // TODO: Configure this per-provider + // Nomad configures CPU based on MHz, not millicores. We havel to calculate the CPU share + // by knowing how many MHz are on the client. + CPU: if tier.rivet_cores_numerator < tier.rivet_cores_denominator { + Some((tier.cpu - util_job::TASK_CLEANUP_CPU as u64).try_into()?) + } else { + None + }, + cores: if tier.rivet_cores_numerator >= tier.rivet_cores_denominator { + Some((tier.rivet_cores_numerator / tier.rivet_cores_denominator) as i32) + } else { + None + }, + memory_mb: Some( + (TryInto::::try_into(memory)? / (1024 * 1024) + - util_job::TASK_CLEANUP_MEMORY as i64) + .try_into()?, + ), + // Allow oversubscribing memory by 50% of the reserved + // memory if using less than the node's total memory + memory_max_mb: None, + // Some( + // (TryInto::::try_into(memory_max)? / (1024 * 1024) + // - util_job::TASK_CLEANUP_MEMORY as i64) + // .try_into()?, + // ), + disk_mb: Some(tier.disk as i32), // TODO: Is this deprecated? + ..Resources::new() + }; + + // Read ports + let decoded_ports = input + .network_ports + .iter() + .map(|(port_label, port)| match port.routing { + Routing::GameGuard { protocol } => { + let target = unwrap!(port.internal_port) as u16; + + Ok(DecodedPort { + label: port_label.clone(), + nomad_port_label: crate::util::format_nomad_port_label(port_label), + target, + proxy_protocol: protocol, + }) + } + Routing::Host { .. } => { + todo!() + } + }) + .collect::>>()?; + + // The container will set up port forwarding manually from the Nomad-defined ports on the host + // to the CNI container + let dynamic_ports = decoded_ports + .iter() + .map(|port| nomad_client::models::Port { + label: Some(port.nomad_port_label.clone()), + ..nomad_client::models::Port::new() + }) + .collect::>(); + + // Port mappings to pass to the container. Only used in bridge networking. + let cni_port_mappings = decoded_ports + .clone() + .into_iter() + .map(|port| { + json!({ + "HostPort": template_env_var_int(&nomad_host_port_env_var(&port.nomad_port_label)), + "ContainerPort": port.target, + "Protocol": TransportProtocol::from(port.proxy_protocol).as_cni_protocol(), + }) + }) + .collect::>(); + + // TODO: + // let prepared_ports = input.network_ports.iter().map(|(label, port)| { + // let port_value = match input.network_mode { + // // CNI will handle mapping the host port to the container port + // NetworkMode::Bridge => unwrap!(port.internal_port).to_string(), + // // The container needs to listen on the correct port + // NetworkMode::Host => template_env_var(&nomad_host_port_env_var(&label)), + // }; + + // GlobalResult::Ok(Some(String::new())) + // // TODO + // // Port with the kebab case port key. Included for backward compatibility & for + // // less confusion. + // // Ok((format!("PORT_{}", port.label.replace('-', "_")), port_value)) + // }); + + // Also see util_ds:consts::DEFAULT_ENV_KEYS + let mut env = Vec::<(String, String)>::new() + .into_iter() + // TODO + // .chain(if lobby_config { + // Some(( + // "RIVET_LOBBY_CONFIG".to_string(), + // template_env_var("NOMAD_META_LOBBY_CONFIG"), + // )) + // } else { + // None + // }) + // .chain(if lobby_tags { + // Some(( + // "RIVET_LOBBY_TAGS".to_string(), + // template_env_var("NOMAD_META_LOBBY_TAGS"), + // )) + // } else { + // None + // }) + .chain([( + "RIVET_API_ENDPOINT".to_string(), + util::env::origin_api().to_string(), + )]) + // Ports + // TODO + // .chain(prepared_ports) + // // Port ranges + // .chain( + // decoded_ports + // .iter() + // .filter_map(|port| { + // if let PortTarget::Range { min, max } = &port.target { + // let snake_port_label = port.label.replace('-', "_"); + // Some([ + // ( + // format!("PORT_RANGE_MIN_{}", snake_port_label), + // min.to_string(), + // ), + // ( + // format!("PORT_RANGE_MAX_{}", snake_port_label), + // max.to_string(), + // ), + // ]) + // } else { + // None + // } + // }) + // .flatten(), + // ) + .map(|(k, v)| format!("{k}={v}")) + .collect::>(); + env.sort(); + + let services = decoded_ports + .iter() + .map(|port| { + let service_name = format!("${{NOMAD_META_LOBBY_ID}}-{}", port.label); + Ok(Some(Service { + provider: Some("nomad".into()), + name: Some(service_name), + tags: Some(vec!["game".into()]), + port_label: Some(port.nomad_port_label.clone()), + // checks: if TransportProtocol::from(port.proxy_protocol) + // == TransportProtocol::Tcp + // { + // Some(vec![ServiceCheck { + // name: Some(format!("{}-probe", port.label)), + // port_label: Some(port.nomad_port_label.clone()), + // _type: Some("tcp".into()), + // interval: Some(30_000_000_000), + // timeout: Some(2_000_000_000), + // ..ServiceCheck::new() + // }]) + // } else { + // None + // }, + ..Service::new() + })) + }) + .filter_map(|x| x.transpose()) + .collect::>>()?; + + // Generate the command to download and decompress the file + let mut download_cmd = r#"curl -Lf "$NOMAD_META_IMAGE_ARTIFACT_URL""#.to_string(); + match input.build_compression { + BuildCompression::None => {} + BuildCompression::Lz4 => { + download_cmd.push_str(" | lz4 -d -"); + } + } + + // IMPORTANT: This job spec must be deterministic. Do not pass in parameters + // that change with every run, such as the lobby ID. Ensure the + // `reuse_job_id` test passes when changing this function. + let mut job_spec = Job { + _type: Some("batch".into()), + // Replace all job IDs with a placeholder value in order to create a + // deterministic job spec for generating a hash + ID: Some("__PLACEHOLDER__".into()), + name: Some("__PLACEHOLDER__".into()), + region: Some(NOMAD_REGION.into()), + datacenters: Some(vec![input.datacenter_id.to_string()]), + // constraints: Some(vec![Constraint { + // l_target: Some("${node.class}".into()), + // operand: Some("=".into()), + // r_target: Some("job".into()), + // }]), + parameterized_job: Some(Box::new(ParameterizedJobConfig { + payload: Some("forbidden".into()), + meta_required: Some(vec![ + "job_runner_binary_url".into(), + "vector_socket_addr".into(), + "image_artifact_url".into(), + "root_user_enabled".into(), + "runner".into(), + "user_env".into(), + "job_run_id".into(), + ]), + meta_optional: Some(vec!["rivet_test_id".into()]), + })), + task_groups: Some(vec![TaskGroup { + name: Some(util_job::RUN_MAIN_TASK_NAME.into()), + constraints: None, // TODO: Use parameter meta to specify the hardware + affinities: None, // TODO: + // Allows for jobs to keep running and receiving players in the + // event of a disconnection from the Nomad server. + max_client_disconnect: Some(5 * 60 * 1_000_000_000), + restart_policy: Some(Box::new(RestartPolicy { + attempts: Some(0), + mode: Some("fail".into()), + ..RestartPolicy::new() + })), + reschedule_policy: Some(Box::new(ReschedulePolicy { + attempts: Some(0), + unlimited: Some(false), + ..ReschedulePolicy::new() + })), + networks: Some(vec![NetworkResource { + // The setup.sh script will set up a CNI network if using bridge networking + mode: Some("host".into()), + dynamic_ports: Some(dynamic_ports.clone()), + // Disable IPv6 DNS since Docker doesn't support IPv6 yet + DNS: Some(Box::new(nomad_client::models::DnsConfig { + servers: Some(vec![ + // Google + "8.8.8.8".into(), + "8.8.4.4".into(), + "2001:4860:4860::8888".into(), + "2001:4860:4860::8844".into(), + ]), + // Disable default search from the host + searches: Some(Vec::new()), + options: Some(vec!["rotate".into(), "edns0".into(), "attempts:2".into()]), + })), + ..NetworkResource::new() + }]), + services: Some(services), + // Configure ephemeral disk for logs + ephemeral_disk: Some(Box::new(EphemeralDisk { + size_mb: Some(tier.disk as i32), + ..EphemeralDisk::new() + })), + tasks: Some(vec![ + // TODO + Task { + name: Some("runc-setup".into()), + lifecycle: Some(Box::new(TaskLifecycle { + hook: Some("prestart".into()), + sidecar: Some(false), + })), + driver: Some("raw_exec".into()), + config: Some({ + let mut x = HashMap::new(); + x.insert("command".into(), json!("${NOMAD_TASK_DIR}/setup.sh")); + x + }), + templates: Some(vec![ + Template { + embedded_tmpl: Some(SETUP_SCRIPT.replace( + "__HOST_NETWORK__", + match input.network_mode { + NetworkMode::Bridge => "false", + NetworkMode::Host => "true", + }, + )), + dest_path: Some("${NOMAD_TASK_DIR}/setup.sh".into()), + perms: Some("744".into()), + ..Template::new() + }, + Template { + embedded_tmpl: Some(SETUP_JOB_RUNNER_SCRIPT.into()), + dest_path: Some("${NOMAD_TASK_DIR}/setup_job_runner.sh".into()), + perms: Some("744".into()), + ..Template::new() + }, + Template { + embedded_tmpl: Some( + SETUP_OCI_BUNDLE_SCRIPT + .replace("__DOWNLOAD_CMD__", &download_cmd) + .replace( + "__BUILD_KIND__", + match input.build_kind { + BuildKind::DockerImage => "docker-image", + BuildKind::OciBundle => "oci-bundle", + }, + ), + ), + dest_path: Some("${NOMAD_TASK_DIR}/setup_oci_bundle.sh".into()), + perms: Some("744".into()), + ..Template::new() + }, + Template { + embedded_tmpl: Some(SETUP_CNI_NETWORK_SCRIPT.into()), + dest_path: Some("${NOMAD_TASK_DIR}/setup_cni_network.sh".into()), + perms: Some("744".into()), + ..Template::new() + }, + Template { + embedded_tmpl: Some(gen_oci_bundle_config( + cpu, memory, memory_max, env, + )?), + dest_path: Some( + "${NOMAD_ALLOC_DIR}/oci-bundle-config.base.json".into(), + ), + ..Template::new() + }, + Template { + embedded_tmpl: Some(inject_consul_env_template( + &serde_json::to_string(&cni_port_mappings)?, + )?), + dest_path: Some("${NOMAD_ALLOC_DIR}/cni-port-mappings.json".into()), + ..Template::new() + }, + ]), + resources: Some(Box::new(Resources { + CPU: Some(crate::util::RUNC_SETUP_CPU), + memory_mb: Some(crate::util::RUNC_SETUP_MEMORY), + ..Resources::new() + })), + log_config: Some(Box::new(LogConfig { + max_files: Some(4), + max_file_size_mb: Some(2), + disabled: None, + })), + ..Task::new() + }, + // TODO + Task { + name: Some(util_job::RUN_MAIN_TASK_NAME.into()), + driver: Some("raw_exec".into()), + config: Some({ + let mut x = HashMap::new(); + // This is downloaded in setup_job_runner.sh + x.insert("command".into(), json!("${NOMAD_ALLOC_DIR}/job-runner")); + x + }), + resources: Some(Box::new(nomad_resources.clone())), + // Intentionally high timeout. Killing jobs is handled manually with signals. + kill_timeout: Some(86400 * 1_000_000_000), + kill_signal: Some("SIGTERM".into()), + log_config: Some(Box::new(LogConfig { + max_files: Some(4), + max_file_size_mb: Some(4), + disabled: None, + })), + ..Task::new() + }, + // TODO: Remove + // Task { + // name: Some("runc-cleanup".into()), + // lifecycle: Some(Box::new(TaskLifecycle { + // hook: Some("poststop".into()), + // sidecar: Some(false), + // })), + // driver: Some("raw_exec".into()), + // config: Some({ + // let mut x = HashMap::new(); + // x.insert("command".into(), json!("${NOMAD_TASK_DIR}/cleanup.sh")); + // x + // }), + // templates: Some(vec![Template { + // embedded_tmpl: Some(CLEANUP_SCRIPT.into()), + // dest_path: Some("${NOMAD_TASK_DIR}/cleanup.sh".into()), + // perms: Some("744".into()), + // ..Template::new() + // }]), + // resources: Some(Box::new(Resources { + // CPU: Some(util_mm::RUNC_CLEANUP_CPU), + // memory_mb: Some(util_mm::RUNC_CLEANUP_MEMORY), + // ..Resources::new() + // })), + // log_config: Some(Box::new(LogConfig { + // max_files: Some(4), + // max_file_size_mb: Some(2), + // })), + // ..Task::new() + // }, + // Run cleanup task + Task { + name: Some(util_job::RUN_CLEANUP_TASK_NAME.into()), + lifecycle: Some(Box::new(TaskLifecycle { + hook: Some("poststop".into()), + sidecar: Some(false), + })), + driver: Some("docker".into()), + config: Some({ + let mut config = HashMap::new(); + + config.insert("image".into(), json!("python:3.10.7-alpine3.16")); + config.insert( + "args".into(), + json!([ + "/bin/sh", + "-c", + "apk add --no-cache ca-certificates && python3 /local/cleanup.py" + ]), + ); + + config + }), + templates: Some(vec![Template { + dest_path: Some("local/cleanup.py".into()), + embedded_tmpl: Some(formatdoc!( + r#" + import ssl + import urllib.request, json, os, mimetypes, sys + + BEARER = '{{{{env "NOMAD_META_JOB_RUN_TOKEN"}}}}' + + ctx = ssl.create_default_context() + + def eprint(*args, **kwargs): + print(*args, file=sys.stderr, **kwargs) + + def req(method, url, data = None, headers = {{}}): + request = urllib.request.Request( + url=url, + data=data, + method=method, + headers=headers + ) + + try: + res = urllib.request.urlopen(request, context=ctx) + assert res.status == 200, f"Received non-200 status: {{res.status}}" + return res + except urllib.error.HTTPError as err: + eprint(f"HTTP Error ({{err.code}} {{err.reason}}):\n\nBODY:\n{{err.read().decode()}}\n\nHEADERS:\n{{err.headers}}") + + raise err + + print(f'\n> Cleaning up job') + + res_json = None + with req('POST', f'{origin_api}/job/runs/cleanup', + data = json.dumps({{}}).encode(), + headers = {{ + 'Authorization': f'Bearer {{BEARER}}', + 'Content-Type': 'application/json' + }} + ) as res: + res_json = json.load(res) + + + print('\n> Finished') + "#, + origin_api = util::env::origin_api(), + )), + ..Template::new() + }]), + resources: Some(Box::new(Resources { + CPU: Some(util_job::TASK_CLEANUP_CPU), + memory_mb: Some(util_job::TASK_CLEANUP_MEMORY), + ..Resources::new() + })), + log_config: Some(Box::new(LogConfig { + max_files: Some(4), + max_file_size_mb: Some(2), + disabled: Some(false), + })), + ..Task::new() + }, + ]), + ..TaskGroup::new() + }]), + // Disables rescheduling in the event of a node drain + reschedule: Some(Box::new(ReschedulePolicy { + attempts: Some(0), + ..ReschedulePolicy::new() + })), + ..Job::new() + }; + + // Derive jobspec hash + // + // We serialize the JSON to a canonical string then take a SHA hash of the output. + let job_cjson_str = match cjson::to_string(&job_spec) { + Ok(x) => x, + Err(err) => { + tracing::error!(?err, "cjson serialization failed"); + bail!("cjson serialization failed") + } + }; + let job_hash = Sha256::digest(job_cjson_str.as_bytes()); + let job_hash_str = hex::encode(job_hash); + + // Generate new job ID + let job_id = format!( + "job-{hash}:{dc}", + hash = &job_hash_str[0..12], + dc = input.dc_name_id, + ); + job_spec.ID = Some(job_id.clone()); + job_spec.name = Some(job_id.clone()); + + tracing::info!("submitting job"); + + nomad_client::apis::jobs_api::post_job( + &NEW_NOMAD_CONFIG, + &job_id, + nomad_client::models::JobRegisterRequest { + job: Some(Box::new(job_spec)), + ..nomad_client::models::JobRegisterRequest::new() + }, + Some(NOMAD_REGION), + None, + None, + None, + ) + .await?; + + Ok(job_id) +} + +#[derive(Debug, Serialize, Deserialize, Hash)] +struct ResolveArtifactsInput { + datacenter_id: Uuid, + image_id: Uuid, + server_id: Uuid, + build_upload_id: Uuid, + build_file_name: String, + dc_build_delivery_method: BuildDeliveryMethod, +} + +#[derive(Debug, Serialize, Deserialize, Hash)] +struct ResolveArtifactsOutput { + image_artifact_url: String, + job_runner_binary_url: String, +} + +#[activity(ResolveArtifacts)] +async fn resolve_artifacts( + ctx: &ActivityCtx, + input: &ResolveArtifactsInput, +) -> GlobalResult { + let upload_res = op!([ctx] upload_get { + upload_ids: vec![input.build_upload_id.into()], + }) + .await?; + let upload = unwrap!(upload_res.uploads.first()); + let upload_id = unwrap_ref!(upload.upload_id).as_uuid(); + + // Get provider + let proto_provider = unwrap!( + backend::upload::Provider::from_i32(upload.provider), + "invalid upload provider" + ); + let provider = match proto_provider { + backend::upload::Provider::Minio => s3_util::Provider::Minio, + backend::upload::Provider::Backblaze => s3_util::Provider::Backblaze, + backend::upload::Provider::Aws => s3_util::Provider::Aws, + }; + + let image_artifact_url = resolve_image_artifact_url( + ctx, + input.datacenter_id, + input.build_file_name.clone(), + input.dc_build_delivery_method, + input.image_id, + upload_id, + provider, + ) + .await?; + let job_runner_binary_url = + resolve_job_runner_binary_url(ctx, input.datacenter_id, input.dc_build_delivery_method) + .await?; + + // MARK: Insert into db + sql_execute!( + [ctx] + " + INSERT INTO db_ds.server_nomad (server_id) + VALUES ($1) + ", + input.server_id, + ) + .await?; + + Ok(ResolveArtifactsOutput { + image_artifact_url, + job_runner_binary_url, + }) +} + +#[derive(Debug, Serialize, Deserialize, Hash)] +struct DispatchJobInput { + environment: util::HashableMap, + server_id: Uuid, + job_id: String, + build_upload_id: Uuid, + image_artifact_url: String, + job_runner_binary_url: String, +} + +#[activity(DispatchJob)] +async fn dispatch_job(ctx: &ActivityCtx, input: &DispatchJobInput) -> GlobalResult { + let parameters = vec![ + backend::job::Parameter { + key: "job_runner_binary_url".into(), + value: input.job_runner_binary_url.clone(), + }, + backend::job::Parameter { + key: "vector_socket_addr".into(), + value: "127.0.0.1:5021".to_string(), + }, + backend::job::Parameter { + key: "image_artifact_url".into(), + value: input.image_artifact_url.clone(), + }, + backend::job::Parameter { + key: "root_user_enabled".into(), + // TODO make table dynamic host, make reference so that we can find + // other locations + value: "0".into(), + }, + backend::job::Parameter { + key: "runner".into(), + value: "dynamic_servers".into(), + }, + backend::job::Parameter { + key: "user_env".into(), + // other locations + value: unwrap!(serde_json::to_string( + &input + .environment + .iter() + .map(|(k, v)| (k.clone(), escape_go_template(v))) + .collect::>(), + )), + }, + ] + .into_iter() + .collect::>(); + + let job_params = vec![("job_run_id".to_string(), input.server_id.to_string())]; + + // MARK: Dispatch job + let dispatch_res = nomad_client::apis::jobs_api::post_job_dispatch( + &NEW_NOMAD_CONFIG, + &input.job_id, + nomad_client::models::JobDispatchRequest { + job_id: Some(input.job_id.clone()), + payload: None, + meta: Some( + parameters + .iter() + .map(|p| (p.key.clone(), p.value.clone())) + .chain(job_params.into_iter()) + .collect::>(), + ), + }, + Some(NOMAD_REGION), + None, + None, + None, + ) + .await?; + + // We will use the dispatched job ID to identify this allocation for the future. We can't use + // eval ID, since that changes if we mutate the allocation (i.e. try to stop it). + let nomad_dispatched_job_id = unwrap_ref!(dispatch_res.dispatched_job_id); + + Ok(nomad_dispatched_job_id.clone()) +} + +#[derive(Debug, Serialize, Deserialize, Hash)] +struct UpdateDbInput { + server_id: Uuid, + nomad_dispatched_job_id: String, +} + +#[activity(UpdateDb)] +async fn update_db(ctx: &ActivityCtx, input: &UpdateDbInput) -> GlobalResult<()> { + // MARK: Write to db after run + sql_execute!( + [ctx] + " + UPDATE db_ds.server_nomad + SET nomad_dispatched_job_id = $2 + WHERE server_id = $1 + ", + input.server_id, + &input.nomad_dispatched_job_id, + ) + .await?; + + Ok(()) +} + +#[message("ds_server_create_complete")] +pub struct CreateComplete {} + +#[signal("ds_server_destroy")] +pub struct Destroy { + pub override_kill_timeout_ms: i64, +} + +/// Choose which port to assign for a job's ingress port. +/// +/// If not provided by `ProxiedPort`, then: +/// - HTTP: 80 +/// - HTTPS: 443 +/// - TCP/TLS: random +/// - UDP: random +/// +/// This is somewhat poorly written for TCP & UDP ports and may bite us in the ass +/// some day. See https://linear.app/rivet-gg/issue/RIV-1799 +async fn choose_ingress_port(ctx: &ActivityCtx, protocol: GameGuardProtocol) -> GlobalResult { + match protocol { + GameGuardProtocol::Http => Ok(80), + GameGuardProtocol::Https => Ok(443), + GameGuardProtocol::Tcp | GameGuardProtocol::TcpTls => { + bind_with_retries( + ctx, + protocol, + util::net::job::MIN_INGRESS_PORT_TCP..=util::net::job::MAX_INGRESS_PORT_TCP, + ) + .await + } + GameGuardProtocol::Udp => { + bind_with_retries( + ctx, + protocol, + util::net::job::MIN_INGRESS_PORT_UDP..=util::net::job::MAX_INGRESS_PORT_UDP, + ) + .await + } + } +} + +async fn bind_with_retries( + ctx: &ActivityCtx, + proxy_protocol: GameGuardProtocol, + range: std::ops::RangeInclusive, +) -> GlobalResult { + let mut attempts = 3u32; + + // Try to bind to a random port, verifying that it is not already bound + loop { + if attempts == 0 { + bail!("failed all attempts to bind to unique port"); + } + attempts -= 1; + + let port = rand::thread_rng().gen_range(range.clone()) as i32; + + let (already_exists,) = sql_fetch_one!( + [ctx, (bool,)] + " + SELECT EXISTS( + SELECT 1 + FROM db_ds.servers AS r + JOIN db_ds.docker_ports_protocol_game_guard AS p + ON r.server_id = p.server_id + WHERE + r.cleanup_ts IS NULL AND + p.gg_port = $1 AND + p.protocol = $2 + ) + ", + port, + proxy_protocol as i32, + ) + .await?; + + if !already_exists { + break Ok(port); + } + + tracing::info!(?port, ?attempts, "port collision, retrying"); + } +} + +/// Generates a presigned URL for the build image. +async fn resolve_image_artifact_url( + ctx: &ActivityCtx, + datacenter_id: Uuid, + build_file_name: String, + build_delivery_method: BuildDeliveryMethod, + build_id: Uuid, + upload_id: Uuid, + provider: s3_util::Provider, +) -> GlobalResult { + // Build URL + match build_delivery_method { + BuildDeliveryMethod::S3Direct => { + tracing::info!("using s3 direct delivery"); + + // Build client + let s3_client = s3_util::Client::from_env_opt( + "bucket-build", + provider, + s3_util::EndpointKind::External, + ) + .await?; + + let presigned_req = s3_client + .get_object() + .bucket(s3_client.bucket()) + .key(format!("{upload_id}/{build_file_name}")) + .presigned( + s3_util::aws_sdk_s3::presigning::config::PresigningConfig::builder() + .expires_in(std::time::Duration::from_secs(15 * 60)) + .build()?, + ) + .await?; + + let addr = presigned_req.uri().clone(); + + let addr_str = addr.to_string(); + tracing::info!(addr = %addr_str, "resolved artifact s3 presigned request"); + + Ok(addr_str) + } + BuildDeliveryMethod::TrafficServer => { + tracing::info!("using traffic server delivery"); + + // Hash build so that the ATS server that we download the build from is always the same one. This + // improves cache hit rates and reduces download times. + let mut hasher = DefaultHasher::new(); + hasher.write(build_id.as_bytes()); + let hash = hasher.finish() as i64; + + // NOTE: The algorithm for choosing the vlan_ip from the hash should match the one in + // prewarm_ats.rs @ prewarm_ats_cache + // Get vlan ip from build id hash for consistent routing + let (ats_vlan_ip,) = sql_fetch_one!( + [ctx, (IpAddr,)] + " + WITH sel AS ( + -- Select candidate vlan ips + SELECT + vlan_ip + FROM db_cluster.servers + WHERE + datacenter_id = $1 AND + pool_type2 = $2 AND + vlan_ip IS NOT NULL AND + install_complete_ts IS NOT NULL AND + drain_ts IS NULL AND + cloud_destroy_ts IS NULL + ) + SELECT vlan_ip + FROM sel + -- Use mod to make sure the hash stays within bounds + OFFSET abs($3 % GREATEST((SELECT COUNT(*) FROM sel), 1)) + LIMIT 1 + ", + &datacenter_id, + serde_json::to_string(&cluster::types::PoolType::Ats)?, + hash, + ) + .await?; + + let addr = format!( + "http://{vlan_ip}:8080/s3-cache/{provider}/{namespace}-bucket-build/{upload_id}/{build_file_name}", + vlan_ip = ats_vlan_ip, + provider = heck::KebabCase::to_kebab_case(provider.as_str()), + namespace = util::env::namespace(), + upload_id = upload_id, + ); + + tracing::info!(%addr, "resolved artifact s3 url"); + + Ok(addr) + } + } +} + +/// Generates a presigned URL for the job runner binary. +async fn resolve_job_runner_binary_url( + ctx: &ActivityCtx, + datacenter_id: Uuid, + build_delivery_method: BuildDeliveryMethod, +) -> GlobalResult { + // Get provider + let provider = s3_util::Provider::default()?; + + let file_name = std::env::var("JOB_RUNNER_BINARY_KEY")?; + + // Build URL + match build_delivery_method { + BuildDeliveryMethod::S3Direct => { + tracing::info!("job runner using s3 direct delivery"); + + // Build client + let s3_client = s3_util::Client::from_env_opt( + "bucket-infra-artifacts", + provider, + s3_util::EndpointKind::External, + ) + .await?; + let presigned_req = s3_client + .get_object() + .bucket(s3_client.bucket()) + .key(file_name) + .presigned( + s3_util::aws_sdk_s3::presigning::config::PresigningConfig::builder() + .expires_in(std::time::Duration::from_secs(15 * 60)) + .build()?, + ) + .await?; + + let addr = presigned_req.uri().clone(); + + let addr_str = addr.to_string(); + tracing::info!(addr = %addr_str, "resolved job runner presigned request"); + + Ok(addr_str) + } + BuildDeliveryMethod::TrafficServer => { + tracing::info!("job runner using traffic server delivery"); + + // Choose a random ATS node to pull from + let (ats_vlan_ip,) = sql_fetch_one!( + [ctx, (IpAddr,)] + " + WITH sel AS ( + -- Select candidate vlan ips + SELECT + vlan_ip + FROM db_cluster.servers + WHERE + datacenter_id = $1 AND + pool_type2 = $2 AND + vlan_ip IS NOT NULL AND + install_complete_ts IS NOT NULL AND + drain_ts IS NULL AND + cloud_destroy_ts IS NULL + ) + SELECT vlan_ip + FROM sel + ORDER BY random() + LIMIT 1 + ", + &datacenter_id, + serde_json::to_string(&cluster::types::PoolType::Ats)?, + ) + .await?; + + let addr = format!( + "http://{vlan_ip}:8080/s3-cache/{provider}/{namespace}-bucket-infra-artifacts/{file_name}", + vlan_ip = ats_vlan_ip, + provider = heck::KebabCase::to_kebab_case(provider.as_str()), + namespace = util::env::namespace(), + ); + + tracing::info!(%addr, "resolved artifact s3 url"); + + Ok(addr) + } + } +} diff --git a/svc/pkg/ds/ops/server-create/src/scripts/cleanup.sh b/svc/pkg/ds/src/workflows/server/scripts/cleanup.sh similarity index 100% rename from svc/pkg/ds/ops/server-create/src/scripts/cleanup.sh rename to svc/pkg/ds/src/workflows/server/scripts/cleanup.sh diff --git a/svc/pkg/ds/ops/server-create/src/scripts/setup.sh b/svc/pkg/ds/src/workflows/server/scripts/setup.sh similarity index 100% rename from svc/pkg/ds/ops/server-create/src/scripts/setup.sh rename to svc/pkg/ds/src/workflows/server/scripts/setup.sh diff --git a/svc/pkg/ds/ops/server-create/src/scripts/setup_cni_network.sh b/svc/pkg/ds/src/workflows/server/scripts/setup_cni_network.sh similarity index 100% rename from svc/pkg/ds/ops/server-create/src/scripts/setup_cni_network.sh rename to svc/pkg/ds/src/workflows/server/scripts/setup_cni_network.sh diff --git a/svc/pkg/ds/ops/server-create/src/scripts/setup_job_runner.sh b/svc/pkg/ds/src/workflows/server/scripts/setup_job_runner.sh similarity index 100% rename from svc/pkg/ds/ops/server-create/src/scripts/setup_job_runner.sh rename to svc/pkg/ds/src/workflows/server/scripts/setup_job_runner.sh diff --git a/svc/pkg/ds/ops/server-create/src/scripts/setup_oci_bundle.sh b/svc/pkg/ds/src/workflows/server/scripts/setup_oci_bundle.sh similarity index 100% rename from svc/pkg/ds/ops/server-create/src/scripts/setup_oci_bundle.sh rename to svc/pkg/ds/src/workflows/server/scripts/setup_oci_bundle.sh diff --git a/svc/pkg/ds/worker/tests/common.rs b/svc/pkg/ds/tests/common.rs similarity index 100% rename from svc/pkg/ds/worker/tests/common.rs rename to svc/pkg/ds/tests/common.rs diff --git a/svc/pkg/ds/worker/tests/lobby_connectivity.rs b/svc/pkg/ds/tests/lobby_connectivity.rs similarity index 100% rename from svc/pkg/ds/worker/tests/lobby_connectivity.rs rename to svc/pkg/ds/tests/lobby_connectivity.rs diff --git a/svc/pkg/ds/ops/server-create/tests/print_test_data.rs b/svc/pkg/ds/tests/print_test_data.rs similarity index 95% rename from svc/pkg/ds/ops/server-create/tests/print_test_data.rs rename to svc/pkg/ds/tests/print_test_data.rs index 6aeaba18aa..4e11f9d704 100644 --- a/svc/pkg/ds/ops/server-create/tests/print_test_data.rs +++ b/svc/pkg/ds/tests/print_test_data.rs @@ -62,7 +62,7 @@ async fn print_test_data(ctx: TestCtx) { token::create::request::KindNew { entitlements: vec![proto::claims::Entitlement { kind: Some(proto::claims::entitlement::Kind::EnvService( proto::claims::entitlement::EnvService { - env_id: Some(env_id.clone()), + env_id: Some(env_id), } )), }]}, @@ -81,9 +81,9 @@ async fn print_test_data(ctx: TestCtx) { issuer: "test".to_owned(), kind: Some(token::create::request::Kind::New( token::create::request::KindNew { entitlements: vec![proto::claims::Entitlement { - kind: Some(proto::claims::entitlement::Kind::GameCloud( - proto::claims::entitlement::GameCloud { - game_id: game_res.game_id.clone(), + kind: Some(proto::claims::entitlement::Kind::EnvService( + proto::claims::entitlement::EnvService { + env_id: Some(env_id), } )), },proto::claims::Entitlement { @@ -94,7 +94,7 @@ async fn print_test_data(ctx: TestCtx) { )), }]}, )), - label: Some("cloud".to_owned()), + label: Some("env_service".to_owned()), ..Default::default() }) .await @@ -115,7 +115,7 @@ async fn print_test_data(ctx: TestCtx) { )), }]}, )), - label: Some("env".to_owned()), + label: Some("user".to_owned()), ..Default::default() }) .await diff --git a/svc/pkg/ds/ops/server-create/tests/integration.rs b/svc/pkg/ds/tests/server_create.rs similarity index 65% rename from svc/pkg/ds/ops/server-create/tests/integration.rs rename to svc/pkg/ds/tests/server_create.rs index b2f6c631b7..ccf2235ed2 100644 --- a/svc/pkg/ds/ops/server-create/tests/integration.rs +++ b/svc/pkg/ds/tests/server_create.rs @@ -1,14 +1,11 @@ -use std::collections::HashMap; - use chirp_workflow::prelude::*; -use rivet_api::{apis::*, models}; +use ds::types; +// use rivet_api::{apis::*, models}; use rivet_operation::prelude::proto::{ self, - backend::{ - self, - pkg::{dynamic_servers, token}, - }, + backend::{self, pkg::token}, }; +use serde_json::json; #[workflow_test] async fn create(ctx: TestCtx) { @@ -29,7 +26,7 @@ async fn create(ctx: TestCtx) { token::create::request::KindNew { entitlements: vec![proto::claims::Entitlement { kind: Some(proto::claims::entitlement::Kind::EnvService( proto::claims::entitlement::EnvService { - env_id: Some(env_id.clone()) + env_id: Some(env_id), } )), }]}, @@ -52,22 +49,7 @@ async fn create(ctx: TestCtx) { .unwrap() .to_owned(); - // Pick an existing datacenter - let datacenter_id = ctx - .op(cluster::ops::datacenter::list::Input { - cluster_ids: vec![cluster_id], - }) - .await - .unwrap() - .clusters - .first() - .unwrap() - .datacenter_ids - .first() - .unwrap() - .to_owned(); - - let build_res: backend::pkg::faker::build::Response = op!([ctx] faker_build { + let build_res = op!([ctx] faker_build { env_id: Some(env_id), image: backend::faker::Image::DsEcho as i32, }) @@ -88,38 +70,56 @@ async fn create(ctx: TestCtx) { let ports = vec![( "testing2".to_string(), - dynamic_servers::server_create::Port { + ds::workflows::server::Port { internal_port: Some(28234), - routing: Some(dynamic_servers::server_create::port::Routing::GameGuard( - backend::ds::GameGuardRouting { protocol: 0 }, - )), + routing: types::Routing::GameGuard { + protocol: types::GameGuardProtocol::Http, + }, }, )] // Collect into hashmap .into_iter() .collect(); - let server = op!([ctx] ds_server_create { - env_id: Some(env_id), - cluster_id: Some(cluster_id.into()), - datacenter_id: faker_region.region_id, - resources: Some(proto::backend::ds::ServerResources { cpu_millicores: 100, memory_mib: 200 }), - kill_timeout_ms: 0, - // webhook_url: Some("https://rivettest.free.beeceptor.com".to_string()), - tags: vec![(String::from("test"), String::from("123"))] - .into_iter() - .collect(), - args: Vec::new(), - environment: env, - image_id: Some(build_res.build_id.unwrap()), - network_mode: 0, - network_ports: ports, - }) + let server_id = Uuid::new_v4(); + + let mut sub = ctx + .subscribe::(&json!({ + "server_id": server_id, + })) + .await + .unwrap(); + + ctx.dispatch_tagged_workflow( + &json!({ + "server_id": server_id, + }), + ds::workflows::server::Input { + server_id, + env_id: *env_id, + cluster_id, + datacenter_id: faker_region.region_id.unwrap().as_uuid(), + resources: ds::types::ServerResources { + cpu_millicores: 100, + memory_mib: 200, + }, + kill_timeout_ms: 0, + // webhook_url: Some("https://rivettest.free.beeceptor.com".to_string()), + tags: vec![(String::from("test"), String::from("123"))] + .into_iter() + .collect(), + args: Vec::new(), + environment: env, + image_id: build_res.build_id.unwrap().as_uuid(), + network_mode: types::NetworkMode::Bridge, + network_ports: ports, + }, + ) .await - .unwrap() - .server .unwrap(); + sub.next().await.unwrap(); + // TODO: Switch this // let hostname = format!( // "{}-{}.server.{}.rivet.run", @@ -130,7 +130,7 @@ async fn create(ctx: TestCtx) { let hostname = format!( "{}-{}.lobby.{}.{}", - server.server_id.unwrap(), + server_id, "testing2", faker_region.region_id.unwrap(), util::env::domain_job().unwrap(), diff --git a/svc/pkg/ds/tests/server_get.rs b/svc/pkg/ds/tests/server_get.rs new file mode 100644 index 0000000000..1b7083b954 --- /dev/null +++ b/svc/pkg/ds/tests/server_get.rs @@ -0,0 +1,102 @@ +use std::collections::HashMap; + +use chirp_workflow::prelude::*; +use ds::types; +use rivet_operation::prelude::proto::backend; +use serde_json::json; + +#[workflow_test] +async fn server_get(ctx: TestCtx) { + let game_res = op!([ctx] faker_game { + ..Default::default() + }) + .await + .unwrap(); + let env_id = game_res.prod_env_id.unwrap(); + + // Pick an existing cluster + let cluster_id = ctx + .op(cluster::ops::list::Input {}) + .await + .unwrap() + .cluster_ids + .first() + .unwrap() + .to_owned(); + + let build_res: backend::pkg::faker::build::Response = op!([ctx] faker_build { + env_id: Some(env_id), + image: backend::faker::Image::DsEcho as i32, + }) + .await + .unwrap(); + + let faker_region = op!([ctx] faker_region {}).await.unwrap(); + + let env = vec![ + ("some_envkey_test".to_string(), "2134523".to_string()), + ( + "some_other_envkey_test".to_string(), + "4325234356".to_string(), + ), + ] + .into_iter() + .collect(); + + let ports = vec![( + "testing2".to_string(), + ds::workflows::server::Port { + internal_port: Some(28234), + routing: types::Routing::GameGuard { + protocol: types::GameGuardProtocol::Http, + }, + }, + )] + .into_iter() + .collect(); + + let server_id = Uuid::new_v4(); + + let mut sub = ctx + .subscribe::(&json!({ + "server_id": server_id, + })) + .await + .unwrap(); + + ctx.dispatch_tagged_workflow( + &json!({ + "server_id": server_id, + }), + ds::workflows::server::Input { + server_id, + env_id: *env_id, + cluster_id, + datacenter_id: faker_region.region_id.unwrap().as_uuid(), + resources: ds::types::ServerResources { + cpu_millicores: 100, + memory_mib: 200, + }, + kill_timeout_ms: 0, + tags: HashMap::new(), + args: Vec::new(), + environment: env, + image_id: build_res.build_id.unwrap().as_uuid(), + network_mode: types::NetworkMode::Bridge, + network_ports: ports, + }, + ) + .await + .unwrap(); + + sub.next().await.unwrap(); + + ctx.op(ds::ops::server::get::Input { + server_ids: vec![server_id], + }) + .await + .unwrap() + .servers + .first() + .unwrap(); +} diff --git a/svc/pkg/ds/util/Cargo.toml b/svc/pkg/ds/util/Cargo.toml deleted file mode 100644 index 57949c5e44..0000000000 --- a/svc/pkg/ds/util/Cargo.toml +++ /dev/null @@ -1,23 +0,0 @@ -[package] -name = "rivet-util-ds" -version = "0.1.0" -edition = "2021" -authors = ["Rivet Gaming, LLC "] -license = "Apache-2.0" - -[dependencies] -bit-vec = "0.6" -chirp-client = { path = "../../../../lib/chirp/client" } -heck = "0.3" -http = "0.2" -rivet-operation = { path = "../../../../lib/operation/core" } -rivet-util = { path = "../../../../lib/util/core" } -serde = { version = "1.0", features = ["derive"] } -serde_json = "1.0" -strum = { version = "0.24", features = ["derive"] } -uuid = { version = "1", features = ["v4", "serde"] } - -ip-info = { path = "../../ip/ops/info" } -mm-lobby-list-for-user-id = { path = "../../mm/ops/lobby-list-for-user-id" } -region-get = { path = "../../region/ops/get" } -user-identity-get = { path = "../../user-identity/ops/get" } diff --git a/svc/pkg/ds/worker/Cargo.toml b/svc/pkg/ds/worker/Cargo.toml deleted file mode 100644 index c6e0272a81..0000000000 --- a/svc/pkg/ds/worker/Cargo.toml +++ /dev/null @@ -1,42 +0,0 @@ -[package] -name = "ds-worker" -version = "0.0.1" -edition = "2021" -authors = ["Rivet Gaming, LLC "] -license = "Apache-2.0" - -[dependencies] -chirp-client = { path = "../../../../lib/chirp/client" } -chirp-worker = { path = "../../../../lib/chirp/worker" } -chrono = "0.4" -lazy_static = "1.4.0" -nomad-util = { path = "../../../../lib/nomad-util" } -rivet-convert = { path = "../../../../lib/convert" } -rivet-health-checks = { path = "../../../../lib/health-checks" } -rivet-metrics = { path = "../../../../lib/metrics" } -rivet-runtime = { path = "../../../../lib/runtime" } -serde = { version = "1.0", features = ["derive"] } -util-job = { package = "rivet-util-job", path = "../../job/util" } -reqwest = { version = "0.12", features = ["json"] } -region-get = { path = "../../region/ops/get" } -rivet-api = { path = "../../../../sdks/full/rust" } - - - -mm-lobby-get = { path = "../../mm/ops/lobby-get" } -job-run-get = { path = "../../job-run/ops/get" } -ds-server-get = { path = "../ops/server-get" } -mm-config-version-get = { path = "../../mm-config/ops/version-get" } - -[dev-dependencies] -chirp-worker = { path = "../../../../lib/chirp/worker" } - -[dependencies.sqlx] -git = "https://github.com/rivet-gg/sqlx" -rev = "08d6e61aa0572e7ec557abbedb72cebb96e1ac5b" -default-features = false - -[dependencies.nomad_client] -package = "nomad_client" -git = "https://github.com/rivet-gg/nomad-client" -rev = "abb66bf0c30c7ff5b0c695dae952481c33e538b5" # pragma: allowlist secret diff --git a/svc/pkg/ds/worker/Service.toml b/svc/pkg/ds/worker/Service.toml deleted file mode 100644 index 1ac4bc9454..0000000000 --- a/svc/pkg/ds/worker/Service.toml +++ /dev/null @@ -1,8 +0,0 @@ -[service] -name = "ds-worker" - -[runtime] -kind = "rust" - -[consumer] - diff --git a/svc/pkg/ds/worker/src/lib.rs b/svc/pkg/ds/worker/src/lib.rs deleted file mode 100644 index 3719b10aa8..0000000000 --- a/svc/pkg/ds/worker/src/lib.rs +++ /dev/null @@ -1 +0,0 @@ -pub mod workers; diff --git a/svc/pkg/monolith/standalone/worker/Cargo.toml b/svc/pkg/monolith/standalone/worker/Cargo.toml index b52d35a096..c8bdc51772 100644 --- a/svc/pkg/monolith/standalone/worker/Cargo.toml +++ b/svc/pkg/monolith/standalone/worker/Cargo.toml @@ -23,7 +23,7 @@ tracing-subscriber = { version = "0.3", default-features = false, features = [ cdn-worker = { path = "../../../cdn/worker" } cf-custom-hostname-worker = { path = "../../../cf-custom-hostname/worker" } cloud-worker = { path = "../../../cloud/worker" } -ds-worker = { path = "../../../ds/worker" } +ds = { path = "../../../ds" } external-worker = { path = "../../../external/worker" } game-user-worker = { path = "../../../game-user/worker" } job-log-worker = { path = "../../../job-log/worker" } diff --git a/svc/pkg/monolith/standalone/worker/src/lib.rs b/svc/pkg/monolith/standalone/worker/src/lib.rs index 6321ceb62d..1e861f2152 100644 --- a/svc/pkg/monolith/standalone/worker/src/lib.rs +++ b/svc/pkg/monolith/standalone/worker/src/lib.rs @@ -1,9 +1,9 @@ use rivet_operation::prelude::*; macro_rules! spawn_workers { - ([$shared_client:ident, $pools:ident, $cache:ident, $join_set:ident] $($worker:ident),* $(,)?) => { + ([$shared_client:ident, $pools:ident, $cache:ident, $join_set:ident] $($pkg:ident),* $(,)?) => { $( - $worker::workers::spawn_workers( + $pkg::workers::spawn_workers( $shared_client.clone(), $pools.clone(), $cache.clone(), @@ -25,7 +25,7 @@ pub async fn run_from_env(pools: rivet_pools::Pools) -> GlobalResult<()> { cdn_worker, cf_custom_hostname_worker, cloud_worker, - ds_worker, + ds, external_worker, game_user_worker, job_log_worker,