diff --git a/.vscode/rivet.code-workspace b/.vscode/rivet.code-workspace index 445d8457b..34801add3 100644 --- a/.vscode/rivet.code-workspace +++ b/.vscode/rivet.code-workspace @@ -6,5 +6,8 @@ { "path": "../lib/bolt", }, + { + "path": "../svc", + } ], } diff --git a/fern/definition/dynamic-servers/common.yml b/fern/definition/dynamic-servers/common.yml index 81dc5f6d1..982843830 100644 --- a/fern/definition/dynamic-servers/common.yml +++ b/fern/definition/dynamic-servers/common.yml @@ -75,4 +75,10 @@ types: - udp DockerHostRouting: - properties: {} \ No newline at end of file + properties: + protocol: optional + + HostProtocol: + enum: + - tcp + - udp diff --git a/infra/tf/modules/secrets/main.tf b/infra/tf/modules/secrets/main.tf index 412412700..b42ba22d0 100644 --- a/infra/tf/modules/secrets/main.tf +++ b/infra/tf/modules/secrets/main.tf @@ -2,7 +2,7 @@ terraform { required_providers { external = { source = "hashicorp/external" - version = "2.3.1" + version = "2.3.3" } } } diff --git a/lib/convert/src/impls/ds.rs b/lib/convert/src/impls/ds.rs index 801836c92..526c7b55e 100644 --- a/lib/convert/src/impls/ds.rs +++ b/lib/convert/src/impls/ds.rs @@ -9,7 +9,9 @@ use serde_json::to_value; impl ApiTryFrom for models::DynamicServersServer { type Error = GlobalError; - fn api_try_from(value: backend::dynamic_servers::Server) -> GlobalResult { + fn api_try_from( + value: backend::dynamic_servers::Server, + ) -> GlobalResult { Ok(models::DynamicServersServer { cluster_id: unwrap!(value.cluster_id).as_uuid(), create_ts: value.create_ts, @@ -26,7 +28,9 @@ impl ApiTryFrom for models::DynamicServersServ } impl ApiFrom for backend::dynamic_servers::ServerResources { - fn api_from(value: models::DynamicServersResources) -> backend::dynamic_servers::ServerResources { + fn api_from( + value: models::DynamicServersResources, + ) -> backend::dynamic_servers::ServerResources { backend::dynamic_servers::ServerResources { cpu_millicores: value.cpu, memory_mib: value.memory, @@ -35,7 +39,9 @@ impl ApiFrom for backend::dynamic_servers::Serv } impl ApiFrom for models::DynamicServersResources { - fn api_from(value: backend::dynamic_servers::ServerResources) -> models::DynamicServersResources { + fn api_from( + value: backend::dynamic_servers::ServerResources, + ) -> models::DynamicServersResources { models::DynamicServersResources { cpu: value.cpu_millicores, memory: value.memory_mib, @@ -43,7 +49,9 @@ impl ApiFrom for models::DynamicServe } } -impl ApiTryFrom for backend::pkg::dynamic_servers::server_create::request::Runtime { +impl ApiTryFrom + for backend::pkg::dynamic_servers::server_create::request::Runtime +{ type Error = GlobalError; fn api_try_from( @@ -97,8 +105,9 @@ impl ApiTryFrom for backend::dynamic_server value: models::DynamicServersDockerNetwork, ) -> GlobalResult { Ok(backend::dynamic_servers::DockerNetwork { - mode: backend::dynamic_servers::DockerNetworkMode::api_from(value.mode.unwrap_or_default()) - as i32, + mode: backend::dynamic_servers::DockerNetworkMode::api_from( + value.mode.unwrap_or_default(), + ) as i32, ports: unwrap!(value .ports .into_iter() @@ -114,7 +123,10 @@ impl ApiTryInto for backend::dynamic_server fn api_try_into(self) -> GlobalResult { Ok(models::DynamicServersDockerNetwork { mode: Some( - unwrap!(backend::dynamic_servers::DockerNetworkMode::from_i32(self.mode)).api_into(), + unwrap!(backend::dynamic_servers::DockerNetworkMode::from_i32( + self.mode + )) + .api_into(), ), ports: self .ports @@ -125,20 +137,36 @@ impl ApiTryInto for backend::dynamic_server } } -impl ApiFrom for backend::dynamic_servers::DockerNetworkMode { - fn api_from(value: models::DynamicServersDockerNetworkMode) -> backend::dynamic_servers::DockerNetworkMode { +impl ApiFrom + for backend::dynamic_servers::DockerNetworkMode +{ + fn api_from( + value: models::DynamicServersDockerNetworkMode, + ) -> backend::dynamic_servers::DockerNetworkMode { match value { - models::DynamicServersDockerNetworkMode::Bridge => backend::dynamic_servers::DockerNetworkMode::Bridge, - models::DynamicServersDockerNetworkMode::Host => backend::dynamic_servers::DockerNetworkMode::Host, + models::DynamicServersDockerNetworkMode::Bridge => { + backend::dynamic_servers::DockerNetworkMode::Bridge + } + models::DynamicServersDockerNetworkMode::Host => { + backend::dynamic_servers::DockerNetworkMode::Host + } } } } -impl ApiFrom for models::DynamicServersDockerNetworkMode { - fn api_from(value: backend::dynamic_servers::DockerNetworkMode) -> models::DynamicServersDockerNetworkMode { +impl ApiFrom + for models::DynamicServersDockerNetworkMode +{ + fn api_from( + value: backend::dynamic_servers::DockerNetworkMode, + ) -> models::DynamicServersDockerNetworkMode { match value { - backend::dynamic_servers::DockerNetworkMode::Bridge => models::DynamicServersDockerNetworkMode::Bridge, - backend::dynamic_servers::DockerNetworkMode::Host => models::DynamicServersDockerNetworkMode::Host, + backend::dynamic_servers::DockerNetworkMode::Bridge => { + models::DynamicServersDockerNetworkMode::Bridge + } + backend::dynamic_servers::DockerNetworkMode::Host => { + models::DynamicServersDockerNetworkMode::Host + } } } } @@ -169,18 +197,20 @@ impl ApiTryFrom for models::DynamicServers } } -impl ApiTryFrom for backend::dynamic_servers::docker_port::Routing { +impl ApiTryFrom + for backend::dynamic_servers::docker_port::Routing +{ type Error = GlobalError; fn api_try_from( value: models::DynamicServersDockerPortRouting, ) -> GlobalResult { match (value.game_guard, value.host) { - (Some(game_guard), None) => Ok(backend::dynamic_servers::docker_port::Routing::GameGuard( - (*game_guard).api_into(), - )), - (None, Some(_)) => Ok(backend::dynamic_servers::docker_port::Routing::Host( - backend::dynamic_servers::DockerHostRouting {}, + (Some(game_guard), None) => Ok( + backend::dynamic_servers::docker_port::Routing::GameGuard((*game_guard).api_into()), + ), + (None, Some(host)) => Ok(backend::dynamic_servers::docker_port::Routing::Host( + (*host).api_into(), )), (None, None) => bail_with!(SERVERS_NO_PORT_ROUTERS), _ => bail_with!(SERVERS_MULTIPLE_PORT_ROUTERS), @@ -188,7 +218,9 @@ impl ApiTryFrom for backend::dynamic_se } } -impl ApiTryFrom for models::DynamicServersDockerPortRouting { +impl ApiTryFrom + for models::DynamicServersDockerPortRouting +{ type Error = GlobalError; fn api_try_from( @@ -201,17 +233,19 @@ impl ApiTryFrom for models::Dyna host: None, }) } - backend::dynamic_servers::docker_port::Routing::Host(_) => { + backend::dynamic_servers::docker_port::Routing::Host(host) => { Ok(models::DynamicServersDockerPortRouting { game_guard: None, - host: Some(to_value({})?), + host: Some(Box::new(host.api_try_into()?)), }) } } } } -impl ApiFrom for backend::dynamic_servers::DockerGameGuardRouting { +impl ApiFrom + for backend::dynamic_servers::DockerGameGuardRouting +{ fn api_from( value: models::DynamicServersDockerGameGuardRouting, ) -> backend::dynamic_servers::DockerGameGuardRouting { @@ -242,26 +276,117 @@ impl ApiTryFrom } } -impl ApiFrom for backend::dynamic_servers::GameGuardProtocol { - fn api_from(value: models::DynamicServersGameGuardProtocol) -> backend::dynamic_servers::GameGuardProtocol { +impl ApiFrom + for backend::dynamic_servers::DockerHostRouting +{ + fn api_from( + value: models::DynamicServersDockerHostRouting, + ) -> backend::dynamic_servers::DockerHostRouting { + backend::dynamic_servers::DockerHostRouting { + protocol: backend::dynamic_servers::HostProtocol::api_from( + value.protocol.unwrap_or_default().into(), + ) as i32, + } + } +} + +impl ApiTryFrom + for models::DynamicServersDockerHostRouting +{ + type Error = GlobalError; + + fn api_try_from( + value: backend::dynamic_servers::DockerHostRouting, + ) -> GlobalResult { + Ok(models::DynamicServersDockerHostRouting { + protocol: Some( + unwrap!(backend::dynamic_servers::HostProtocol::from_i32( + value.protocol + )) + .api_into(), + ), + }) + } +} + +impl ApiFrom + for backend::dynamic_servers::GameGuardProtocol +{ + fn api_from( + value: models::DynamicServersGameGuardProtocol, + ) -> backend::dynamic_servers::GameGuardProtocol { match value { - models::DynamicServersGameGuardProtocol::Udp => backend::dynamic_servers::GameGuardProtocol::Udp, - models::DynamicServersGameGuardProtocol::Tcp => backend::dynamic_servers::GameGuardProtocol::Tcp, - models::DynamicServersGameGuardProtocol::Http => backend::dynamic_servers::GameGuardProtocol::Http, - models::DynamicServersGameGuardProtocol::Https => backend::dynamic_servers::GameGuardProtocol::Https, - models::DynamicServersGameGuardProtocol::TcpTls => backend::dynamic_servers::GameGuardProtocol::TcpTls, + models::DynamicServersGameGuardProtocol::Udp => { + backend::dynamic_servers::GameGuardProtocol::Udp + } + models::DynamicServersGameGuardProtocol::Tcp => { + backend::dynamic_servers::GameGuardProtocol::Tcp + } + models::DynamicServersGameGuardProtocol::Http => { + backend::dynamic_servers::GameGuardProtocol::Http + } + models::DynamicServersGameGuardProtocol::Https => { + backend::dynamic_servers::GameGuardProtocol::Https + } + models::DynamicServersGameGuardProtocol::TcpTls => { + backend::dynamic_servers::GameGuardProtocol::TcpTls + } } } } -impl ApiFrom for models::DynamicServersGameGuardProtocol { - fn api_from(value: backend::dynamic_servers::GameGuardProtocol) -> models::DynamicServersGameGuardProtocol { +impl ApiFrom + for models::DynamicServersGameGuardProtocol +{ + fn api_from( + value: backend::dynamic_servers::GameGuardProtocol, + ) -> models::DynamicServersGameGuardProtocol { match value { - backend::dynamic_servers::GameGuardProtocol::Udp => models::DynamicServersGameGuardProtocol::Udp, - backend::dynamic_servers::GameGuardProtocol::Tcp => models::DynamicServersGameGuardProtocol::Tcp, - backend::dynamic_servers::GameGuardProtocol::Http => models::DynamicServersGameGuardProtocol::Http, - backend::dynamic_servers::GameGuardProtocol::Https => models::DynamicServersGameGuardProtocol::Https, - backend::dynamic_servers::GameGuardProtocol::TcpTls => models::DynamicServersGameGuardProtocol::TcpTls, + backend::dynamic_servers::GameGuardProtocol::Udp => { + models::DynamicServersGameGuardProtocol::Udp + } + backend::dynamic_servers::GameGuardProtocol::Tcp => { + models::DynamicServersGameGuardProtocol::Tcp + } + backend::dynamic_servers::GameGuardProtocol::Http => { + models::DynamicServersGameGuardProtocol::Http + } + backend::dynamic_servers::GameGuardProtocol::Https => { + models::DynamicServersGameGuardProtocol::Https + } + backend::dynamic_servers::GameGuardProtocol::TcpTls => { + models::DynamicServersGameGuardProtocol::TcpTls + } + } + } +} + +impl ApiFrom for backend::dynamic_servers::HostProtocol { + fn api_from( + value: models::DynamicServersHostProtocol, + ) -> backend::dynamic_servers::HostProtocol { + match value { + models::DynamicServersHostProtocol::Udp => { + backend::dynamic_servers::HostProtocol::HostUdp + } + models::DynamicServersHostProtocol::Tcp => { + backend::dynamic_servers::HostProtocol::HostTcp + } + } + } +} + +impl ApiFrom for models::DynamicServersHostProtocol { + fn api_from( + value: backend::dynamic_servers::HostProtocol, + ) -> models::DynamicServersHostProtocol { + match value { + backend::dynamic_servers::HostProtocol::HostUdp => { + models::DynamicServersHostProtocol::Udp + } + backend::dynamic_servers::HostProtocol::HostTcp => { + models::DynamicServersHostProtocol::Tcp + } } } } diff --git a/proto/backend/dynamic_servers.proto b/proto/backend/dynamic_servers.proto index 47ca82c95..3949b8e64 100644 --- a/proto/backend/dynamic_servers.proto +++ b/proto/backend/dynamic_servers.proto @@ -3,6 +3,8 @@ syntax = "proto3"; package rivet.backend.dynamic_servers; import "proto/common.proto"; +import "proto/backend/captcha.proto"; +import "proto/backend/region.proto"; message Server { rivet.common.Uuid server_id = 1; @@ -65,4 +67,247 @@ enum GameGuardProtocol { UDP = 4; } -message DockerHostRouting {} +message DockerHostRouting { + HostProtocol protocol = 1; +} + +enum HostProtocol { + HOST_TCP = 0; + HOST_UDP = 1; +} + + + + + +// MARK: Game Config +message GameConfig { + bool host_networking_enabled = 1; + bool root_user_enabled = 2; +} + +// MARK: Game Namespace Config +message NamespaceConfig { + uint32 lobby_count_max = 1; + uint32 max_players_per_client = 2; + uint32 max_players_per_client_vpn = 3; + uint32 max_players_per_client_proxy = 4; + uint32 max_players_per_client_tor = 5; + uint32 max_players_per_client_hosting = 6; +} + +// MARK: Game Version Config +message VersionConfig { + repeated LobbyGroup lobby_groups = 1; + + optional rivet.backend.captcha.CaptchaConfig captcha = 2; +} + +message LobbyGroup { + message Region { + rivet.common.Uuid region_id = 1; + string tier_name_id = 2; + IdleLobbies idle_lobbies = 3; + } + + message IdleLobbies { + uint32 min_idle_lobbies = 1; + uint32 max_idle_lobbies = 2; + } + + message Actions { + optional FindConfig find = 1; + optional JoinConfig join = 2; + optional CreateConfig create = 3; + } + + string name_id = 1; + + repeated Region regions = 101; + uint32 max_players_normal = 102; + uint32 max_players_direct = 103; + uint32 max_players_party = 104; + bool listable = 105; + bool taggable = 106; + bool allow_dynamic_max_players = 107; + + LobbyRuntime runtime = 201; + + optional Actions actions = 301; +} + +message LobbyRuntime { + enum NetworkMode { + BRIDGE = 0; + HOST = 1; + } + + // Should be named "PortProtocol" + enum ProxyProtocol { + HTTP = 0; + HTTPS = 1; + TCP = 3; + TCP_TLS = 4; + UDP = 2; + } + + enum ProxyKind { + GAME_GUARD = 0; + NONE = 1; + } + + message PortRange { + uint32 min = 1; + uint32 max = 2; + } + + message Port { + string label = 1; + + // Only applicable to `ProxyProtocol::HTTP` and `ProxyProtocol::HTTP`. + optional uint32 target_port = 2; + + // Only applicable to `ProxyProtocol::UDP` and `ProxyProtocol::TCP` when `proxy_kind` is `ProxyKind::GameGuard`. + optional PortRange port_range = 4; + + ProxyProtocol proxy_protocol = 3; + + ProxyKind proxy_kind = 5; + } + + message EnvVar { + string key = 1; + string value = 2; + } + + message Docker { + rivet.common.Uuid build_id = 1; + repeated string args = 2; + repeated EnvVar env_vars = 4; + NetworkMode network_mode = 5; + repeated Port ports = 3; + } + + oneof runtime { + Docker docker = 201; + }; +} + +enum IdentityRequirement { + NONE = 0; + GUEST = 1; + REGISTERED = 2; +} + +message VerificationConfig { + string url = 1; + map headers = 2; +} + +message FindConfig { + bool enabled = 1; + IdentityRequirement identity_requirement = 2; + optional VerificationConfig verification = 3; +} + +message JoinConfig { + bool enabled = 1; + IdentityRequirement identity_requirement = 2; + optional VerificationConfig verification = 3; +} + +message CreateConfig { + bool enabled = 1; + IdentityRequirement identity_requirement = 2; + optional VerificationConfig verification = 3; + + bool enable_public = 4; + bool enable_private = 5; + + optional uint64 max_lobbies_per_identity = 6; +} + +// MARK: Game Version Config Context +// Context required to publish a new version. +message VersionConfigCtx { + repeated LobbyGroupCtx lobby_groups = 1; +} + +message LobbyGroupCtx { + LobbyRuntimeCtx runtime = 101; +} + +message LobbyRuntimeCtx { + message Docker { + optional rivet.common.Uuid job_template_id = 1 [deprecated = true]; + } + + oneof runtime { + Docker docker = 1; + }; +} + +// MARK: Game Version Config Meta +// Metadata about a given configuration generated after publishing. +message VersionConfigMeta { + repeated LobbyGroupMeta lobby_groups = 1; +} + +message LobbyGroupMeta { + // The indexes of `LobbyGroupMeta` and `LobbyGroupConfig` returned by `game-version-get` line up, so + // fetching lobby group config via `lobby_group_id` is done via zipping. + rivet.common.Uuid lobby_group_id = 1; + + LobbyRuntimeMeta runtime = 101; +} + +message LobbyRuntimeMeta { + message Docker { + optional rivet.common.Uuid job_template_id = 1 [deprecated = true]; + } + + oneof runtime { + Docker docker = 201; + }; +} + +// MARK: Lobby State +message Lobby { + enum Publicity { + PUBLIC = 0; + PRIVATE = 1; + } + + reserved 10; + + rivet.common.Uuid lobby_id = 1; + rivet.common.Uuid lobby_group_id = 2; + rivet.common.Uuid region_id = 3; + rivet.common.Uuid token_session_id = 4; + int64 create_ts = 5; + optional int64 ready_ts = 14; + optional int64 stop_ts = 13; + optional rivet.common.Uuid run_id = 6; + bool is_closed = 11; + rivet.common.Uuid namespace_id = 9; + optional rivet.common.Uuid create_ray_id = 12; + optional rivet.common.Uuid creator_user_id = 15; + bool is_custom = 16; + Publicity publicity = 17; + + uint32 max_players_normal = 101; + uint32 max_players_direct = 102; + uint32 max_players_party = 103; +} + +// MARK: Player State +message Player { + rivet.common.Uuid player_id = 1; + rivet.common.Uuid lobby_id = 2; + int64 create_ts = 3; + optional int64 register_ts = 4; + optional int64 remove_ts = 5; + rivet.common.Uuid token_session_id = 6; + rivet.common.Uuid create_ray_id = 7; +} + diff --git a/sdks/full/go/dynamicservers/types.go b/sdks/full/go/dynamicservers/types.go index 776db2a45..095082e8d 100644 --- a/sdks/full/go/dynamicservers/types.go +++ b/sdks/full/go/dynamicservers/types.go @@ -39,6 +39,8 @@ func (d *DockerGameGuardRouting) String() string { } type DockerHostRouting struct { + Protocol *HostProtocol `json:"protocol,omitempty"` + _rawJSON json.RawMessage } @@ -240,6 +242,28 @@ func (g GameGuardProtocol) Ptr() *GameGuardProtocol { return &g } +type HostProtocol string + +const ( + HostProtocolTcp HostProtocol = "tcp" + HostProtocolUdp HostProtocol = "udp" +) + +func NewHostProtocolFromString(s string) (HostProtocol, error) { + switch s { + case "tcp": + return HostProtocolTcp, nil + case "udp": + return HostProtocolUdp, nil + } + var t HostProtocol + return "", fmt.Errorf("%s is not a valid %T", s, t) +} + +func (h HostProtocol) Ptr() *HostProtocol { + return &h +} + type Resources struct { // The number of CPU cores in millicores, or 1/1000 of a core. For example, // 1/8 of a core would be 125 millicores, and 1 core would be 1000 diff --git a/sdks/full/openapi/openapi.yml b/sdks/full/openapi/openapi.yml index 719239570..2cf09cb31 100644 --- a/sdks/full/openapi/openapi.yml +++ b/sdks/full/openapi/openapi.yml @@ -12416,7 +12416,14 @@ components: - udp DynamicServersDockerHostRouting: type: object - properties: {} + properties: + protocol: + $ref: '#/components/schemas/DynamicServersHostProtocol' + DynamicServersHostProtocol: + type: string + enum: + - tcp + - udp DynamicServersCreateServerRequest: type: object properties: diff --git a/sdks/full/openapi_compat/openapi.yml b/sdks/full/openapi_compat/openapi.yml index 10eb93560..bec3de598 100644 --- a/sdks/full/openapi_compat/openapi.yml +++ b/sdks/full/openapi_compat/openapi.yml @@ -2710,7 +2710,9 @@ components: $ref: '#/components/schemas/DynamicServersGameGuardProtocol' type: object DynamicServersDockerHostRouting: - properties: {} + properties: + protocol: + $ref: '#/components/schemas/DynamicServersHostProtocol' type: object DynamicServersDockerNetwork: properties: @@ -2771,6 +2773,11 @@ components: - tcp_tls - udp type: string + DynamicServersHostProtocol: + enum: + - tcp + - udp + type: string DynamicServersResources: properties: cpu: diff --git a/sdks/full/rust-cli/.openapi-generator/FILES b/sdks/full/rust-cli/.openapi-generator/FILES index 5c455304f..0d61f8298 100644 --- a/sdks/full/rust-cli/.openapi-generator/FILES +++ b/sdks/full/rust-cli/.openapi-generator/FILES @@ -211,12 +211,14 @@ docs/DynamicServersCreateServerRequest.md docs/DynamicServersCreateServerResponse.md docs/DynamicServersDestroyServerResponse.md docs/DynamicServersDockerGameGuardRouting.md +docs/DynamicServersDockerHostRouting.md docs/DynamicServersDockerNetwork.md docs/DynamicServersDockerNetworkMode.md docs/DynamicServersDockerPort.md docs/DynamicServersDockerPortRouting.md docs/DynamicServersDockerRuntime.md docs/DynamicServersGameGuardProtocol.md +docs/DynamicServersHostProtocol.md docs/DynamicServersResources.md docs/DynamicServersRuntime.md docs/DynamicServersServer.md @@ -592,12 +594,14 @@ src/models/dynamic_servers_create_server_request.rs src/models/dynamic_servers_create_server_response.rs src/models/dynamic_servers_destroy_server_response.rs src/models/dynamic_servers_docker_game_guard_routing.rs +src/models/dynamic_servers_docker_host_routing.rs src/models/dynamic_servers_docker_network.rs src/models/dynamic_servers_docker_network_mode.rs src/models/dynamic_servers_docker_port.rs src/models/dynamic_servers_docker_port_routing.rs src/models/dynamic_servers_docker_runtime.rs src/models/dynamic_servers_game_guard_protocol.rs +src/models/dynamic_servers_host_protocol.rs src/models/dynamic_servers_resources.rs src/models/dynamic_servers_runtime.rs src/models/dynamic_servers_server.rs diff --git a/sdks/full/rust-cli/README.md b/sdks/full/rust-cli/README.md index 971ef9d74..465b7efee 100644 --- a/sdks/full/rust-cli/README.md +++ b/sdks/full/rust-cli/README.md @@ -357,12 +357,14 @@ Class | Method | HTTP request | Description - [DynamicServersCreateServerResponse](docs/DynamicServersCreateServerResponse.md) - [DynamicServersDestroyServerResponse](docs/DynamicServersDestroyServerResponse.md) - [DynamicServersDockerGameGuardRouting](docs/DynamicServersDockerGameGuardRouting.md) + - [DynamicServersDockerHostRouting](docs/DynamicServersDockerHostRouting.md) - [DynamicServersDockerNetwork](docs/DynamicServersDockerNetwork.md) - [DynamicServersDockerNetworkMode](docs/DynamicServersDockerNetworkMode.md) - [DynamicServersDockerPort](docs/DynamicServersDockerPort.md) - [DynamicServersDockerPortRouting](docs/DynamicServersDockerPortRouting.md) - [DynamicServersDockerRuntime](docs/DynamicServersDockerRuntime.md) - [DynamicServersGameGuardProtocol](docs/DynamicServersGameGuardProtocol.md) + - [DynamicServersHostProtocol](docs/DynamicServersHostProtocol.md) - [DynamicServersResources](docs/DynamicServersResources.md) - [DynamicServersRuntime](docs/DynamicServersRuntime.md) - [DynamicServersServer](docs/DynamicServersServer.md) diff --git a/sdks/full/rust-cli/docs/DynamicServersDockerHostRouting.md b/sdks/full/rust-cli/docs/DynamicServersDockerHostRouting.md new file mode 100644 index 000000000..972be1d30 --- /dev/null +++ b/sdks/full/rust-cli/docs/DynamicServersDockerHostRouting.md @@ -0,0 +1,11 @@ +# DynamicServersDockerHostRouting + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**protocol** | Option<[**crate::models::DynamicServersHostProtocol**](DynamicServersHostProtocol.md)> | | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/full/rust-cli/docs/DynamicServersDockerPortRouting.md b/sdks/full/rust-cli/docs/DynamicServersDockerPortRouting.md index cde3e79c6..609cfedb6 100644 --- a/sdks/full/rust-cli/docs/DynamicServersDockerPortRouting.md +++ b/sdks/full/rust-cli/docs/DynamicServersDockerPortRouting.md @@ -5,7 +5,7 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- **game_guard** | Option<[**crate::models::DynamicServersDockerGameGuardRouting**](DynamicServersDockerGameGuardRouting.md)> | | [optional] -**host** | Option<[**serde_json::Value**](.md)> | | [optional] +**host** | Option<[**crate::models::DynamicServersDockerHostRouting**](DynamicServersDockerHostRouting.md)> | | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/sdks/full/rust-cli/docs/DynamicServersHostProtocol.md b/sdks/full/rust-cli/docs/DynamicServersHostProtocol.md new file mode 100644 index 000000000..afdcaa1f4 --- /dev/null +++ b/sdks/full/rust-cli/docs/DynamicServersHostProtocol.md @@ -0,0 +1,10 @@ +# DynamicServersHostProtocol + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/full/rust-cli/src/models/dynamic_servers_docker_host_routing.rs b/sdks/full/rust-cli/src/models/dynamic_servers_docker_host_routing.rs new file mode 100644 index 000000000..c4f23ee69 --- /dev/null +++ b/sdks/full/rust-cli/src/models/dynamic_servers_docker_host_routing.rs @@ -0,0 +1,28 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct DynamicServersDockerHostRouting { + #[serde(rename = "protocol", skip_serializing_if = "Option::is_none")] + pub protocol: Option, +} + +impl DynamicServersDockerHostRouting { + pub fn new() -> DynamicServersDockerHostRouting { + DynamicServersDockerHostRouting { + protocol: None, + } + } +} + + diff --git a/sdks/full/rust-cli/src/models/dynamic_servers_docker_port_routing.rs b/sdks/full/rust-cli/src/models/dynamic_servers_docker_port_routing.rs index 6940c69f4..30e815a2f 100644 --- a/sdks/full/rust-cli/src/models/dynamic_servers_docker_port_routing.rs +++ b/sdks/full/rust-cli/src/models/dynamic_servers_docker_port_routing.rs @@ -16,7 +16,7 @@ pub struct DynamicServersDockerPortRouting { #[serde(rename = "game_guard", skip_serializing_if = "Option::is_none")] pub game_guard: Option>, #[serde(rename = "host", skip_serializing_if = "Option::is_none")] - pub host: Option, + pub host: Option>, } impl DynamicServersDockerPortRouting { diff --git a/sdks/full/rust-cli/src/models/dynamic_servers_host_protocol.rs b/sdks/full/rust-cli/src/models/dynamic_servers_host_protocol.rs new file mode 100644 index 000000000..0c34ad63f --- /dev/null +++ b/sdks/full/rust-cli/src/models/dynamic_servers_host_protocol.rs @@ -0,0 +1,39 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + + +/// +#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] +pub enum DynamicServersHostProtocol { + #[serde(rename = "tcp")] + Tcp, + #[serde(rename = "udp")] + Udp, + +} + +impl ToString for DynamicServersHostProtocol { + fn to_string(&self) -> String { + match self { + Self::Tcp => String::from("tcp"), + Self::Udp => String::from("udp"), + } + } +} + +impl Default for DynamicServersHostProtocol { + fn default() -> DynamicServersHostProtocol { + Self::Tcp + } +} + + + + diff --git a/sdks/full/rust-cli/src/models/mod.rs b/sdks/full/rust-cli/src/models/mod.rs index 8ba520de0..6c3348aef 100644 --- a/sdks/full/rust-cli/src/models/mod.rs +++ b/sdks/full/rust-cli/src/models/mod.rs @@ -366,6 +366,8 @@ pub mod dynamic_servers_destroy_server_response; pub use self::dynamic_servers_destroy_server_response::DynamicServersDestroyServerResponse; pub mod dynamic_servers_docker_game_guard_routing; pub use self::dynamic_servers_docker_game_guard_routing::DynamicServersDockerGameGuardRouting; +pub mod dynamic_servers_docker_host_routing; +pub use self::dynamic_servers_docker_host_routing::DynamicServersDockerHostRouting; pub mod dynamic_servers_docker_network; pub use self::dynamic_servers_docker_network::DynamicServersDockerNetwork; pub mod dynamic_servers_docker_network_mode; @@ -378,6 +380,8 @@ pub mod dynamic_servers_docker_runtime; pub use self::dynamic_servers_docker_runtime::DynamicServersDockerRuntime; pub mod dynamic_servers_game_guard_protocol; pub use self::dynamic_servers_game_guard_protocol::DynamicServersGameGuardProtocol; +pub mod dynamic_servers_host_protocol; +pub use self::dynamic_servers_host_protocol::DynamicServersHostProtocol; pub mod dynamic_servers_resources; pub use self::dynamic_servers_resources::DynamicServersResources; pub mod dynamic_servers_runtime; diff --git a/sdks/full/rust/.openapi-generator/FILES b/sdks/full/rust/.openapi-generator/FILES index 5c455304f..0d61f8298 100644 --- a/sdks/full/rust/.openapi-generator/FILES +++ b/sdks/full/rust/.openapi-generator/FILES @@ -211,12 +211,14 @@ docs/DynamicServersCreateServerRequest.md docs/DynamicServersCreateServerResponse.md docs/DynamicServersDestroyServerResponse.md docs/DynamicServersDockerGameGuardRouting.md +docs/DynamicServersDockerHostRouting.md docs/DynamicServersDockerNetwork.md docs/DynamicServersDockerNetworkMode.md docs/DynamicServersDockerPort.md docs/DynamicServersDockerPortRouting.md docs/DynamicServersDockerRuntime.md docs/DynamicServersGameGuardProtocol.md +docs/DynamicServersHostProtocol.md docs/DynamicServersResources.md docs/DynamicServersRuntime.md docs/DynamicServersServer.md @@ -592,12 +594,14 @@ src/models/dynamic_servers_create_server_request.rs src/models/dynamic_servers_create_server_response.rs src/models/dynamic_servers_destroy_server_response.rs src/models/dynamic_servers_docker_game_guard_routing.rs +src/models/dynamic_servers_docker_host_routing.rs src/models/dynamic_servers_docker_network.rs src/models/dynamic_servers_docker_network_mode.rs src/models/dynamic_servers_docker_port.rs src/models/dynamic_servers_docker_port_routing.rs src/models/dynamic_servers_docker_runtime.rs src/models/dynamic_servers_game_guard_protocol.rs +src/models/dynamic_servers_host_protocol.rs src/models/dynamic_servers_resources.rs src/models/dynamic_servers_runtime.rs src/models/dynamic_servers_server.rs diff --git a/sdks/full/rust/README.md b/sdks/full/rust/README.md index 971ef9d74..465b7efee 100644 --- a/sdks/full/rust/README.md +++ b/sdks/full/rust/README.md @@ -357,12 +357,14 @@ Class | Method | HTTP request | Description - [DynamicServersCreateServerResponse](docs/DynamicServersCreateServerResponse.md) - [DynamicServersDestroyServerResponse](docs/DynamicServersDestroyServerResponse.md) - [DynamicServersDockerGameGuardRouting](docs/DynamicServersDockerGameGuardRouting.md) + - [DynamicServersDockerHostRouting](docs/DynamicServersDockerHostRouting.md) - [DynamicServersDockerNetwork](docs/DynamicServersDockerNetwork.md) - [DynamicServersDockerNetworkMode](docs/DynamicServersDockerNetworkMode.md) - [DynamicServersDockerPort](docs/DynamicServersDockerPort.md) - [DynamicServersDockerPortRouting](docs/DynamicServersDockerPortRouting.md) - [DynamicServersDockerRuntime](docs/DynamicServersDockerRuntime.md) - [DynamicServersGameGuardProtocol](docs/DynamicServersGameGuardProtocol.md) + - [DynamicServersHostProtocol](docs/DynamicServersHostProtocol.md) - [DynamicServersResources](docs/DynamicServersResources.md) - [DynamicServersRuntime](docs/DynamicServersRuntime.md) - [DynamicServersServer](docs/DynamicServersServer.md) diff --git a/sdks/full/rust/docs/DynamicServersDockerHostRouting.md b/sdks/full/rust/docs/DynamicServersDockerHostRouting.md new file mode 100644 index 000000000..972be1d30 --- /dev/null +++ b/sdks/full/rust/docs/DynamicServersDockerHostRouting.md @@ -0,0 +1,11 @@ +# DynamicServersDockerHostRouting + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**protocol** | Option<[**crate::models::DynamicServersHostProtocol**](DynamicServersHostProtocol.md)> | | [optional] + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/full/rust/docs/DynamicServersDockerPortRouting.md b/sdks/full/rust/docs/DynamicServersDockerPortRouting.md index cde3e79c6..609cfedb6 100644 --- a/sdks/full/rust/docs/DynamicServersDockerPortRouting.md +++ b/sdks/full/rust/docs/DynamicServersDockerPortRouting.md @@ -5,7 +5,7 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- **game_guard** | Option<[**crate::models::DynamicServersDockerGameGuardRouting**](DynamicServersDockerGameGuardRouting.md)> | | [optional] -**host** | Option<[**serde_json::Value**](.md)> | | [optional] +**host** | Option<[**crate::models::DynamicServersDockerHostRouting**](DynamicServersDockerHostRouting.md)> | | [optional] [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/sdks/full/rust/docs/DynamicServersHostProtocol.md b/sdks/full/rust/docs/DynamicServersHostProtocol.md new file mode 100644 index 000000000..afdcaa1f4 --- /dev/null +++ b/sdks/full/rust/docs/DynamicServersHostProtocol.md @@ -0,0 +1,10 @@ +# DynamicServersHostProtocol + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/full/rust/src/models/dynamic_servers_docker_host_routing.rs b/sdks/full/rust/src/models/dynamic_servers_docker_host_routing.rs new file mode 100644 index 000000000..c4f23ee69 --- /dev/null +++ b/sdks/full/rust/src/models/dynamic_servers_docker_host_routing.rs @@ -0,0 +1,28 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct DynamicServersDockerHostRouting { + #[serde(rename = "protocol", skip_serializing_if = "Option::is_none")] + pub protocol: Option, +} + +impl DynamicServersDockerHostRouting { + pub fn new() -> DynamicServersDockerHostRouting { + DynamicServersDockerHostRouting { + protocol: None, + } + } +} + + diff --git a/sdks/full/rust/src/models/dynamic_servers_docker_port_routing.rs b/sdks/full/rust/src/models/dynamic_servers_docker_port_routing.rs index 6940c69f4..30e815a2f 100644 --- a/sdks/full/rust/src/models/dynamic_servers_docker_port_routing.rs +++ b/sdks/full/rust/src/models/dynamic_servers_docker_port_routing.rs @@ -16,7 +16,7 @@ pub struct DynamicServersDockerPortRouting { #[serde(rename = "game_guard", skip_serializing_if = "Option::is_none")] pub game_guard: Option>, #[serde(rename = "host", skip_serializing_if = "Option::is_none")] - pub host: Option, + pub host: Option>, } impl DynamicServersDockerPortRouting { diff --git a/sdks/full/rust/src/models/dynamic_servers_host_protocol.rs b/sdks/full/rust/src/models/dynamic_servers_host_protocol.rs new file mode 100644 index 000000000..0c34ad63f --- /dev/null +++ b/sdks/full/rust/src/models/dynamic_servers_host_protocol.rs @@ -0,0 +1,39 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + + +/// +#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] +pub enum DynamicServersHostProtocol { + #[serde(rename = "tcp")] + Tcp, + #[serde(rename = "udp")] + Udp, + +} + +impl ToString for DynamicServersHostProtocol { + fn to_string(&self) -> String { + match self { + Self::Tcp => String::from("tcp"), + Self::Udp => String::from("udp"), + } + } +} + +impl Default for DynamicServersHostProtocol { + fn default() -> DynamicServersHostProtocol { + Self::Tcp + } +} + + + + diff --git a/sdks/full/rust/src/models/mod.rs b/sdks/full/rust/src/models/mod.rs index 8ba520de0..6c3348aef 100644 --- a/sdks/full/rust/src/models/mod.rs +++ b/sdks/full/rust/src/models/mod.rs @@ -366,6 +366,8 @@ pub mod dynamic_servers_destroy_server_response; pub use self::dynamic_servers_destroy_server_response::DynamicServersDestroyServerResponse; pub mod dynamic_servers_docker_game_guard_routing; pub use self::dynamic_servers_docker_game_guard_routing::DynamicServersDockerGameGuardRouting; +pub mod dynamic_servers_docker_host_routing; +pub use self::dynamic_servers_docker_host_routing::DynamicServersDockerHostRouting; pub mod dynamic_servers_docker_network; pub use self::dynamic_servers_docker_network::DynamicServersDockerNetwork; pub mod dynamic_servers_docker_network_mode; @@ -378,6 +380,8 @@ pub mod dynamic_servers_docker_runtime; pub use self::dynamic_servers_docker_runtime::DynamicServersDockerRuntime; pub mod dynamic_servers_game_guard_protocol; pub use self::dynamic_servers_game_guard_protocol::DynamicServersGameGuardProtocol; +pub mod dynamic_servers_host_protocol; +pub use self::dynamic_servers_host_protocol::DynamicServersHostProtocol; pub mod dynamic_servers_resources; pub use self::dynamic_servers_resources::DynamicServersResources; pub mod dynamic_servers_runtime; diff --git a/sdks/full/typescript/archive.tgz b/sdks/full/typescript/archive.tgz index 8c893d0f6..748041d08 100644 --- a/sdks/full/typescript/archive.tgz +++ b/sdks/full/typescript/archive.tgz @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ec8cefcd8cb85c2248e3b192b75f2687887e62b500de8b74594391ee1e8d36ec -size 655532 +oid sha256:0f87ca39e4f5b30a96b79aa8dac92c2e0ab172a32ef9e0b03926629be618bd41 +size 655917 diff --git a/sdks/runtime/typescript/archive.tgz b/sdks/runtime/typescript/archive.tgz index 46eef25cf..620125e78 100644 --- a/sdks/runtime/typescript/archive.tgz +++ b/sdks/runtime/typescript/archive.tgz @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:4fc74b5397e25b115486586e81eff6ec5e386dfcfe2dc78a169dbfb427e36fac -size 373103 +oid sha256:7991058e28770cdb1869497b43593f970075e2019ac68a071971e0f0ec9b1dbf +size 373136 diff --git a/svc/Cargo.lock b/svc/Cargo.lock index 2da724553..444437c54 100644 --- a/svc/Cargo.lock +++ b/svc/Cargo.lock @@ -1554,7 +1554,7 @@ dependencies = [ "reqwest", "rivet-operation", "rivet-util-build", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", "upload-complete", "upload-get", "upload-prepare", @@ -1573,7 +1573,7 @@ dependencies = [ "rivet-connection", "rivet-operation", "rivet-pools", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", "tokio", "tracing", "tracing-subscriber", @@ -1592,7 +1592,7 @@ dependencies = [ "faker-game", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -1605,7 +1605,7 @@ dependencies = [ "faker-game", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -1678,7 +1678,7 @@ dependencies = [ "prost 0.10.4", "rivet-operation", "rivet-util-captcha", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -1738,7 +1738,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -1750,7 +1750,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -1761,7 +1761,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -1775,7 +1775,7 @@ dependencies = [ "game-resolve-namespace-id", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -1790,7 +1790,7 @@ dependencies = [ "game-resolve-namespace-id", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -1804,7 +1804,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -1817,7 +1817,7 @@ dependencies = [ "faker-game", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -1829,7 +1829,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -1841,7 +1841,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -1855,7 +1855,7 @@ dependencies = [ "prost 0.10.4", "reqwest", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", "upload-complete", "upload-get", "upload-prepare", @@ -1871,7 +1871,7 @@ dependencies = [ "faker-game", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -1884,7 +1884,7 @@ dependencies = [ "faker-game", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -1897,7 +1897,7 @@ dependencies = [ "faker-game-version", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -1926,7 +1926,7 @@ dependencies = [ "itertools 0.10.5", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", "unzip-n", ] @@ -1965,7 +1965,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -1976,7 +1976,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -1987,7 +1987,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -2138,7 +2138,7 @@ dependencies = [ "rivet-util", "serde", "serde_json", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", "thiserror", "tokio", "tracing", @@ -2256,7 +2256,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -2268,7 +2268,7 @@ dependencies = [ "cloud-game-config-create", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -2280,7 +2280,7 @@ dependencies = [ "faker-game", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", "token-create", ] @@ -2301,7 +2301,7 @@ dependencies = [ "mm-config-namespace-create", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -2320,7 +2320,7 @@ dependencies = [ "mm-config-namespace-get", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -2334,7 +2334,7 @@ dependencies = [ "game-token-development-validate", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", "token-create", ] @@ -2349,7 +2349,7 @@ dependencies = [ "prost 0.10.4", "rivet-claims", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", "token-create", ] @@ -2370,7 +2370,7 @@ dependencies = [ "prost 0.10.4", "rivet-operation", "rivet-util-mm", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -2396,7 +2396,7 @@ dependencies = [ "region-list", "rivet-operation", "rivet-util-mm", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -2442,7 +2442,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -2453,7 +2453,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -2464,7 +2464,7 @@ dependencies = [ "chirp-worker", "ip-info", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -2475,7 +2475,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -2486,7 +2486,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -2501,7 +2501,7 @@ dependencies = [ "rivet-metrics", "rivet-operation", "rivet-runtime", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", "tokio", "tracing", "tracing-subscriber", @@ -2518,7 +2518,7 @@ dependencies = [ "nomad_client", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -2598,7 +2598,7 @@ dependencies = [ "rivet-operation", "rivet-runtime", "rivet-util-cluster", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", "tokio", "tracing", "tracing-subscriber", @@ -2612,7 +2612,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -2624,7 +2624,7 @@ dependencies = [ "prost 0.10.4", "rivet-operation", "rivet-util-cluster", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -2635,7 +2635,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -2651,7 +2651,7 @@ dependencies = [ "rivet-operation", "rivet-runtime", "rivet-util-cluster", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", "tokio", "tracing", "tracing-subscriber", @@ -2665,7 +2665,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -2676,7 +2676,7 @@ dependencies = [ "chirp-worker", "cluster-server-list", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -2687,7 +2687,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -2698,7 +2698,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -2709,7 +2709,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -2743,7 +2743,7 @@ dependencies = [ "rivet-util-cluster", "s3-util", "serde_yaml", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", "ssh2", "thiserror", "token-create", @@ -3013,7 +3013,7 @@ dependencies = [ "prost 0.10.4", "reqwest", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", "upload-prepare", ] @@ -3026,7 +3026,7 @@ dependencies = [ "prost 0.10.4", "reqwest", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", "upload-complete", "upload-get", "upload-prepare", @@ -3097,7 +3097,7 @@ dependencies = [ "rand", "rivet-operation", "serde_json", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -3143,13 +3143,39 @@ checksum = "1aaf95b3e5c8f23aa320147307562d361db0ae0d51242340f558153b4eb2439b" name = "ds-server-create" version = "0.0.1" dependencies = [ + "bit-vec", + "build-get", "chirp-client", "chirp-worker", + "cjson", + "cluster-datacenter-list", + "cluster-list", + "faker-build", + "faker-game", + "heck 0.3.3", + "hex", + "http 0.2.12", + "ip-info", "lazy_static", - "nomad-client", + "mm-lobby-list-for-user-id", + "nomad-util", "nomad_client", + "rand", + "regex", + "region-get", "rivet-operation", - "sqlx", + "rivet-util", + "rivet-util-build", + "s3-util", + "serde", + "serde_json", + "sha2", + "sqlx 0.7.4 (registry+https://github.com/rust-lang/crates.io-index)", + "strum 0.24.1", + "tier-list", + "upload-get", + "user-identity-get", + "uuid", ] [[package]] @@ -3159,7 +3185,26 @@ dependencies = [ "chirp-client", "chirp-worker", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (registry+https://github.com/rust-lang/crates.io-index)", +] + +[[package]] +name = "ds-worker" +version = "0.0.1" +dependencies = [ + "chirp-client", + "chirp-worker", + "chrono", + "lazy_static", + "nomad-util", + "nomad_client", + "rivet-convert", + "rivet-health-checks", + "rivet-metrics", + "rivet-runtime", + "rivet-util-job", + "serde", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -3265,7 +3310,7 @@ dependencies = [ "email-verification-create", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -3285,7 +3330,7 @@ dependencies = [ "reqwest", "rivet-operation", "serde_json", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", "upload-prepare", ] @@ -3356,7 +3401,7 @@ dependencies = [ "rivet-operation", "serde", "serde_json", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -3524,7 +3569,7 @@ dependencies = [ "region-get", "region-list", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -3787,7 +3832,7 @@ dependencies = [ "prost 0.10.4", "reqwest", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", "upload-complete", "upload-get", "upload-prepare", @@ -3804,7 +3849,7 @@ dependencies = [ "prost 0.10.4", "rivet-operation", "rivet-util-team", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", "team-get", ] @@ -3818,7 +3863,7 @@ dependencies = [ "game-create", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", "upload-file-list", "upload-get", ] @@ -3831,7 +3876,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -3843,7 +3888,7 @@ dependencies = [ "faker-game", "faker-team", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -3856,7 +3901,7 @@ dependencies = [ "prost 0.10.4", "reqwest", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", "upload-complete", "upload-get", "upload-prepare", @@ -3873,7 +3918,7 @@ dependencies = [ "game-version-create", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -3887,7 +3932,7 @@ dependencies = [ "game-version-create", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -3901,7 +3946,7 @@ dependencies = [ "game-version-create", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -3915,7 +3960,7 @@ dependencies = [ "game-namespace-get", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -3957,7 +4002,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -3974,7 +4019,7 @@ dependencies = [ "prost 0.10.4", "region-list-for-game", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -3986,7 +4031,7 @@ dependencies = [ "faker-game", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -3999,7 +4044,7 @@ dependencies = [ "game-get", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -4013,7 +4058,7 @@ dependencies = [ "game-version-create", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -4037,7 +4082,7 @@ dependencies = [ "prost 0.10.4", "rivet-operation", "rivet-util-game-user", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", "token-create", ] @@ -4051,7 +4096,7 @@ dependencies = [ "game-user-create", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -4066,7 +4111,7 @@ dependencies = [ "game-user-link-get", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", "token-create", ] @@ -4081,7 +4126,7 @@ dependencies = [ "game-user-link-create", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -4094,7 +4139,7 @@ dependencies = [ "game-user-create", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -4110,7 +4155,7 @@ dependencies = [ "game-version-create", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -4139,7 +4184,7 @@ dependencies = [ "rivet-metrics", "rivet-runtime", "rivet-util-game-user", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", "token-create", "token-exchange", "token-revoke", @@ -4167,7 +4212,7 @@ dependencies = [ "game-version-list", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -4180,7 +4225,7 @@ dependencies = [ "game-version-create", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -4193,7 +4238,7 @@ dependencies = [ "game-version-create", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -4641,7 +4686,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -4653,7 +4698,7 @@ dependencies = [ "identity-config-namespace-create", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -4666,7 +4711,7 @@ dependencies = [ "faker-game-version", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -4690,7 +4735,7 @@ dependencies = [ "identity-config-version-get", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -4795,7 +4840,7 @@ dependencies = [ "rivet-operation", "serde", "serde_json", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -4882,7 +4927,7 @@ dependencies = [ "rivet-pools", "rivet-runtime", "serde_json", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", "tokio", "tracing", "tracing-subscriber", @@ -4932,7 +4977,7 @@ dependencies = [ "prost 0.10.4", "rivet-operation", "rivet-util-job", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -4980,7 +5025,7 @@ dependencies = [ "rustls 0.20.9", "serde", "sha2", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", "token-create", "webpki 0.22.4", "webpki-roots 0.22.6", @@ -5017,7 +5062,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -5029,7 +5074,7 @@ dependencies = [ "kv-config-namespace-create", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -5042,7 +5087,7 @@ dependencies = [ "faker-game-version", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -5066,7 +5111,7 @@ dependencies = [ "kv-config-version-get", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -5077,7 +5122,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -5185,7 +5230,7 @@ dependencies = [ "rivet-util-linode", "serde", "serde_json", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", "tokio", "tracing", "tracing-subscriber", @@ -5200,7 +5245,7 @@ dependencies = [ "rivet-operation", "rivet-util-cluster", "rivet-util-linode", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -5215,7 +5260,7 @@ dependencies = [ "rivet-operation", "rivet-util-cluster", "rivet-util-linode", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -5230,7 +5275,7 @@ dependencies = [ "rivet-operation", "rivet-util-cluster", "rivet-util-linode", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -5246,7 +5291,7 @@ dependencies = [ "rivet-runtime", "rivet-util-cluster", "rivet-util-linode", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -5348,7 +5393,7 @@ dependencies = [ "rivet-metrics", "rivet-operation", "rivet-runtime", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", "tokio", "tracing", "tracing-subscriber", @@ -5523,7 +5568,7 @@ dependencies = [ "chirp-client", "chirp-worker", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -5533,7 +5578,7 @@ dependencies = [ "chirp-client", "chirp-worker", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -5547,7 +5592,7 @@ dependencies = [ "mm-config-version-get", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -5563,7 +5608,7 @@ dependencies = [ "prost 0.10.4", "rivet-operation", "rivet-util-mm", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -5577,7 +5622,7 @@ dependencies = [ "mm-config-version-get", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -5591,7 +5636,7 @@ dependencies = [ "mm-config-namespace-get", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -5612,7 +5657,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -5624,7 +5669,7 @@ dependencies = [ "mm-config-namespace-create", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -5641,7 +5686,7 @@ dependencies = [ "prost 0.10.4", "rivet-operation", "rivet-util-mm", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -5664,7 +5709,7 @@ dependencies = [ "rivet-util-job", "rivet-util-mm", "s3-util", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", "tier-list", "upload-get", ] @@ -5682,7 +5727,7 @@ dependencies = [ "prost 0.10.4", "rivet-operation", "rivet-util-mm", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -5717,7 +5762,7 @@ dependencies = [ "rivet-pools", "rivet-runtime", "rivet-util-mm", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", "tokio", "tracing", "tracing-subscriber", @@ -5765,7 +5810,7 @@ dependencies = [ "chirp-worker", "faker-mm-lobby", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -5778,7 +5823,7 @@ dependencies = [ "faker-mm-lobby", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -5791,7 +5836,7 @@ dependencies = [ "faker-mm-lobby-row", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -5813,7 +5858,7 @@ dependencies = [ "prost 0.10.4", "rivet-operation", "rivet-util-mm", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -5826,7 +5871,7 @@ dependencies = [ "prost 0.10.4", "rivet-operation", "rivet-util-mm", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -5838,7 +5883,7 @@ dependencies = [ "faker-mm-lobby", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -5851,7 +5896,7 @@ dependencies = [ "prost 0.10.4", "rivet-operation", "rivet-util-mm", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -5871,7 +5916,7 @@ dependencies = [ "prost 0.10.4", "rivet-operation", "rivet-util-mm", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -5884,7 +5929,7 @@ dependencies = [ "prost 0.10.4", "rivet-operation", "rivet-util-mm", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -5897,7 +5942,7 @@ dependencies = [ "prost 0.10.4", "rivet-operation", "rivet-util-mm", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -5909,7 +5954,7 @@ dependencies = [ "faker-mm-lobby", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -5964,7 +6009,7 @@ dependencies = [ "rivet-util-mm", "s3-util", "serde", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", "team-get", "tier-list", "token-create", @@ -5983,6 +6028,7 @@ dependencies = [ "chirp-client", "cloud-worker", "cluster-worker", + "ds-worker", "external-worker", "game-user-worker", "job-log-worker", @@ -7011,7 +7057,7 @@ dependencies = [ "faker-region", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -7025,7 +7071,7 @@ dependencies = [ "prost 0.10.4", "rivet-operation", "rivet-util-cluster", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -7039,7 +7085,7 @@ dependencies = [ "faker-region", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -7054,7 +7100,7 @@ dependencies = [ "region-get", "region-list", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -7069,7 +7115,7 @@ dependencies = [ "region-get", "region-list", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -7084,7 +7130,7 @@ dependencies = [ "region-get", "region-list-for-game", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -7512,7 +7558,7 @@ dependencies = [ "rand", "redis", "rivet-metrics", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", "thiserror", "tokio", "tokio-util 0.7.10", @@ -8341,18 +8387,63 @@ dependencies = [ "unicode_categories", ] +[[package]] +name = "sqlx" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9a2ccff1a000a5a59cd33da541d9f2fdcd9e6e8229cc200565942bff36d0aaa" +dependencies = [ + "sqlx-core 0.7.4 (registry+https://github.com/rust-lang/crates.io-index)", +] + [[package]] name = "sqlx" version = "0.7.4" source = "git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b#08d6e61aa0572e7ec557abbedb72cebb96e1ac5b" dependencies = [ - "sqlx-core", + "sqlx-core 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", "sqlx-macros", "sqlx-mysql", "sqlx-postgres", "sqlx-sqlite", ] +[[package]] +name = "sqlx-core" +version = "0.7.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24ba59a9342a3d9bab6c56c118be528b27c9b60e490080e9711a04dccac83ef6" +dependencies = [ + "ahash 0.8.11", + "atoi", + "byteorder", + "bytes", + "crc", + "crossbeam-queue", + "either", + "event-listener", + "futures-channel", + "futures-core", + "futures-intrusive", + "futures-io", + "futures-util", + "hashlink", + "hex", + "indexmap 2.2.6", + "log", + "memchr", + "once_cell", + "paste", + "percent-encoding", + "serde", + "sha2", + "smallvec", + "sqlformat", + "thiserror", + "tracing", + "url", +] + [[package]] name = "sqlx-core" version = "0.7.4" @@ -8403,7 +8494,7 @@ source = "git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72c dependencies = [ "proc-macro2", "quote", - "sqlx-core", + "sqlx-core 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", "sqlx-macros-core", "syn 1.0.109", ] @@ -8423,7 +8514,7 @@ dependencies = [ "serde", "serde_json", "sha2", - "sqlx-core", + "sqlx-core 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", "sqlx-mysql", "sqlx-postgres", "sqlx-sqlite", @@ -8467,7 +8558,7 @@ dependencies = [ "sha1", "sha2", "smallvec", - "sqlx-core", + "sqlx-core 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", "stringprep", "thiserror", "tracing", @@ -8507,7 +8598,7 @@ dependencies = [ "serde_json", "sha2", "smallvec", - "sqlx-core", + "sqlx-core 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", "stringprep", "thiserror", "tracing", @@ -8531,7 +8622,7 @@ dependencies = [ "log", "percent-encoding", "serde", - "sqlx-core", + "sqlx-core 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", "tracing", "url", "urlencoding", @@ -8720,7 +8811,7 @@ dependencies = [ "prost 0.10.4", "reqwest", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", "upload-complete", "upload-get", "upload-prepare", @@ -8734,7 +8825,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", "upload-file-list", "upload-get", ] @@ -8748,7 +8839,7 @@ dependencies = [ "chrono", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -8761,7 +8852,7 @@ dependencies = [ "rivet-health-checks", "rivet-metrics", "rivet-runtime", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", "team-member-list", "team-user-ban-get", ] @@ -8774,7 +8865,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -8785,7 +8876,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -8796,7 +8887,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -8807,7 +8898,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -8819,7 +8910,7 @@ dependencies = [ "faker-team", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -8830,7 +8921,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -8841,7 +8932,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -8853,7 +8944,7 @@ dependencies = [ "faker-team", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", "team-get", ] @@ -8867,7 +8958,7 @@ dependencies = [ "prost 0.10.4", "regex", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -8878,7 +8969,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -8889,7 +8980,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -8946,7 +9037,7 @@ dependencies = [ "rivet-operation", "rivet-pools", "rivet-runtime", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", "team-get", "team-member-count", "tokio", @@ -9080,7 +9171,7 @@ dependencies = [ "prost 0.10.4", "rivet-claims", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -9092,7 +9183,7 @@ dependencies = [ "chrono", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", "token-create", ] @@ -9105,7 +9196,7 @@ dependencies = [ "chrono", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", "token-create", ] @@ -9118,7 +9209,7 @@ dependencies = [ "chrono", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", "token-create", ] @@ -9656,7 +9747,7 @@ dependencies = [ "reqwest", "rivet-operation", "s3-util", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", "upload-get", "upload-prepare", "url", @@ -9671,7 +9762,7 @@ dependencies = [ "chrono", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -9683,7 +9774,7 @@ dependencies = [ "chrono", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", "upload-prepare", ] @@ -9696,7 +9787,7 @@ dependencies = [ "chrono", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", "upload-prepare", ] @@ -9712,7 +9803,7 @@ dependencies = [ "reqwest", "rivet-operation", "s3-util", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", "upload-complete", "upload-get", ] @@ -9731,7 +9822,7 @@ dependencies = [ "rivet-operation", "rivet-pools", "s3-util", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", "tokio", "tracing", "tracing-subscriber", @@ -9747,7 +9838,7 @@ dependencies = [ "rivet-metrics", "rivet-runtime", "s3-util", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", "upload-get", "upload-prepare", ] @@ -9796,7 +9887,7 @@ dependencies = [ "prost 0.10.4", "reqwest", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", "upload-complete", "upload-get", "upload-prepare", @@ -9840,7 +9931,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", "user-follow-toggle", ] @@ -9852,7 +9943,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", "user-follow-toggle", ] @@ -9864,7 +9955,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", "user-follow-toggle", ] @@ -9876,7 +9967,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", "user-follow-toggle", ] @@ -9888,7 +9979,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", "user-follow-toggle", ] @@ -9900,7 +9991,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -9924,7 +10015,7 @@ dependencies = [ "prost 0.10.4", "rand", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", "upload-file-list", "upload-get", ] @@ -9950,7 +10041,7 @@ dependencies = [ "faker-user", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", "user-identity-create", ] @@ -9963,7 +10054,7 @@ dependencies = [ "faker-user", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", "user-identity-create", ] @@ -9975,7 +10066,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", "user-follow-toggle", ] @@ -9988,7 +10079,7 @@ dependencies = [ "faker-user", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", "user-identity-create", "user-identity-get", ] @@ -10052,7 +10143,7 @@ dependencies = [ "rivet-metrics", "rivet-runtime", "rivet-util-user-presence", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -10065,7 +10156,7 @@ dependencies = [ "profanity-check", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", "user-get", ] @@ -10089,7 +10180,7 @@ dependencies = [ "faker-user", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", "user-identity-create", ] @@ -10102,7 +10193,7 @@ dependencies = [ "faker-user", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", "user-identity-create", ] @@ -10116,7 +10207,7 @@ dependencies = [ "prost 0.10.4", "regex", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", "user-identity-create", ] @@ -10149,7 +10240,7 @@ dependencies = [ "chirp-worker", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", ] [[package]] @@ -10161,7 +10252,7 @@ dependencies = [ "faker-user", "prost 0.10.4", "rivet-operation", - "sqlx", + "sqlx 0.7.4 (git+https://github.com/rivet-gg/sqlx?rev=08d6e61aa0572e7ec557abbedb72cebb96e1ac5b)", "token-create", ] diff --git a/svc/Cargo.toml b/svc/Cargo.toml index adbbc5a11..0febc98b1 100644 --- a/svc/Cargo.toml +++ b/svc/Cargo.toml @@ -85,6 +85,7 @@ members = [ "pkg/debug/ops/email-res", "pkg/ds/ops/server-create", "pkg/ds/ops/server-delete", + "pkg/ds/worker", "pkg/email-verification/ops/complete", "pkg/email-verification/ops/create", "pkg/email/ops/send", diff --git a/svc/api/dynamic-servers/tests/basic.rs b/svc/api/dynamic-servers/tests/basic.rs index 9644e31d4..ed70f5ece 100644 --- a/svc/api/dynamic-servers/tests/basic.rs +++ b/svc/api/dynamic-servers/tests/basic.rs @@ -39,7 +39,6 @@ impl Ctx { util::timestamp::now(), util::timestamp::now(), (), - Vec::new(), ); Ctx { op_ctx } diff --git a/svc/api/traefik-provider/src/route/game_guard/dynamic_servers.rs b/svc/api/traefik-provider/src/route/game_guard/dynamic_servers.rs new file mode 100644 index 000000000..9a8dadb25 --- /dev/null +++ b/svc/api/traefik-provider/src/route/game_guard/dynamic_servers.rs @@ -0,0 +1,318 @@ +use std::{ + collections::hash_map::DefaultHasher, + hash::{Hash, Hasher}, +}; + +use api_helper::{anchor::WatchIndexQuery, ctx::Ctx}; +use proto::backend::{self, pkg::*}; +use redis::AsyncCommands; +use rivet_operation::prelude::*; +use serde::{Deserialize, Serialize}; +use url::Url; + +use crate::{auth::Auth, types}; + +#[derive(Clone, Debug, Serialize, Deserialize)] +struct DynamicServer { + server_id: Uuid, + datacenter_id: Uuid, + label: String, + ip: String, + source: String, + target: String, + protocol: i32, + hostname: String, +} + +pub async fn build_ds( + ctx: &Ctx, + dc_id: Uuid, + config: &mut types::TraefikConfigResponse, +) -> GlobalResult<()> { + // TODO put in function, clean up + // TODO: remove cache for now + let dynamic_servers: Option> = ctx + .cache() + .fetch_one_json("servers_ports", dc_id, |mut cache, dc_id| { + let ctx = ctx.clone(); + async move { + let rows = sql_fetch_all!( + [ctx, (Uuid, Uuid, String, String, String, String, i32)] + " + SELECT + db_dynamic_servers.server_id, + db_dynamic_servers.datacenter_id, + server_ports.nomad_label, + server_ports.nomad_ip, + server_ports.nomad_source, + server_ports.nomad_target + docker_ports_protocol_game_guard.protocol + FROM + db_dynamic_servers.server_ports + JOIN + db_dynamic_servers.servers + ON + server_ports.server_id = servers.server_id + JOIN + db_dynamic_servers.docker_ports_protocol_game_guard + ON + server_ports.server_id = docker_ports_protocol_game_guard.server_id + AND + server_ports.nomad_label = docker_ports_protocol_game_guard.nomad_label + WHERE + s.datacenter_id = $1 + ", + dc_id + ) + .await? + .into_iter() + .map( + |(server_id, datacenter_id, label, ip, source, target, protocol)| { + DynamicServer { + server_id, + datacenter_id, + label, + ip, + source: source.clone(), + target: target.clone(), + protocol, + hostname: format!( + "{}-{}.server.{}.rivet.run", + source, target, datacenter_id + ), + } + }, + ) + .collect(); + + cache.resolve(&dc_id, rows); + + Ok(cache) + } + }) + .await?; + + let dynamic_servers = unwrap!(dynamic_servers); + + // Process proxied ports + for dynamic_server in &dynamic_servers { + let server_id = dynamic_server.server_id; + let register_res = ds_register_proxied_port(server_id, dynamic_server, config); + match register_res { + Ok(_) => {} + Err(err) => { + tracing::error!(?err, "failed to register proxied port route") + } + } + } + + config.http.middlewares.insert( + "ds-rate-limit".to_owned(), + types::TraefikMiddlewareHttp::RateLimit { + average: 100, + period: "5m".into(), + burst: 256, + source_criterion: types::InFlightReqSourceCriterion::IpStrategy(types::IpStrategy { + depth: 0, + exclude_ips: None, + }), + }, + ); + config.http.middlewares.insert( + "ds-in-flight".to_owned(), + types::TraefikMiddlewareHttp::InFlightReq { + // This number needs to be high to allow for parallel requests + amount: 4, + source_criterion: types::InFlightReqSourceCriterion::IpStrategy(types::IpStrategy { + depth: 0, + exclude_ips: None, + }), + }, + ); + + // TODO: add middleware & services & ports + // TODO: same as jobs, watch out for namespaces + Ok(()) +} + +#[tracing::instrument(skip(config))] +fn ds_register_proxied_port( + run_id: Uuid, + proxied_port: &DynamicServer, + config: &mut types::TraefikConfigResponse, +) -> GlobalResult<()> { + let ingress_port = proxied_port.source.clone(); + let target_nomad_port_label = proxied_port.label.clone(); + let service_id = format!("ds-run:{}:{}", run_id, target_nomad_port_label); + let proxy_protocol = unwrap!(backend::dynamic_servers::GameGuardProtocol::from_i32( + proxied_port.protocol + )); + + // Insert the relevant service + match proxy_protocol { + backend::dynamic_servers::GameGuardProtocol::Http + | backend::dynamic_servers::GameGuardProtocol::Https => { + config.http.services.insert( + service_id.clone(), + types::TraefikService { + load_balancer: types::TraefikLoadBalancer { + servers: vec![types::TraefikServer { + url: Some(format!( + "http://{}:{}", + proxied_port.ip, proxied_port.source + )), + address: None, + }], + sticky: None, + }, + }, + ); + } + backend::dynamic_servers::GameGuardProtocol::Tcp + | backend::dynamic_servers::GameGuardProtocol::TcpTls => { + config.tcp.services.insert( + service_id.clone(), + types::TraefikService { + load_balancer: types::TraefikLoadBalancer { + servers: vec![types::TraefikServer { + url: None, + address: Some(format!("{}:{}", proxied_port.ip, proxied_port.source)), + }], + sticky: None, + }, + }, + ); + } + backend::dynamic_servers::GameGuardProtocol::Udp => { + config.udp.services.insert( + service_id.clone(), + types::TraefikService { + load_balancer: types::TraefikLoadBalancer { + servers: vec![types::TraefikServer { + url: None, + address: Some(format!("{}:{}", proxied_port.ip, proxied_port.source)), + }], + sticky: None, + }, + }, + ); + } + }; + + // Insert the relevant router + match proxy_protocol { + backend::dynamic_servers::GameGuardProtocol::Http => { + // Generate config + let middlewares = http_router_middlewares(); + let rule = format_http_rule(proxied_port); + + // Hash key + let unique_key = (&run_id, &target_nomad_port_label, &rule, &middlewares); + let mut hasher = DefaultHasher::new(); + unique_key.hash(&mut hasher); + let hash = hasher.finish(); + + config.http.routers.insert( + format!("ds-run:{run_id}:{hash:x}:http"), + types::TraefikRouter { + entry_points: vec![format!("lb-{ingress_port}")], + rule: Some(rule), + priority: None, + service: service_id.clone(), + middlewares, + tls: None, + }, + ); + } + backend::dynamic_servers::GameGuardProtocol::Https => { + // Generate config + let middlewares = http_router_middlewares(); + let rule = format_http_rule(proxied_port); + + // Hash key + let unique_key = (&run_id, &target_nomad_port_label, &rule, &middlewares); + let mut hasher = DefaultHasher::new(); + unique_key.hash(&mut hasher); + let hash = hasher.finish(); + + config.http.routers.insert( + format!("ds-run:{run_id}:{hash:x}:https"), + types::TraefikRouter { + entry_points: vec![format!("lb-{ingress_port}")], + rule: Some(rule), + priority: None, + service: service_id.clone(), + middlewares, + tls: Some(types::TraefikTls::build(build_tls_domains(proxied_port)?)), + }, + ); + } + backend::dynamic_servers::GameGuardProtocol::Tcp => { + config.tcp.routers.insert( + format!("ds-run:{}:{}:tcp", run_id, target_nomad_port_label), + types::TraefikRouter { + entry_points: vec![format!("lb-{ingress_port}-tcp")], + rule: Some("HostSNI(`*`)".into()), + priority: None, + service: service_id, + middlewares: vec![], + tls: None, + }, + ); + } + backend::dynamic_servers::GameGuardProtocol::TcpTls => { + config.tcp.routers.insert( + format!("ds-run:{}:{}:tcp-tls", run_id, target_nomad_port_label), + types::TraefikRouter { + entry_points: vec![format!("lb-{ingress_port}-tcp")], + rule: Some("HostSNI(`*`)".into()), + priority: None, + service: service_id, + middlewares: vec![], + tls: Some(types::TraefikTls::build(build_tls_domains(proxied_port)?)), + }, + ); + } + backend::dynamic_servers::GameGuardProtocol::Udp => { + config.udp.routers.insert( + format!("ds-run:{}:{}:udp", run_id, target_nomad_port_label), + types::TraefikRouter { + entry_points: vec![format!("lb-{ingress_port}-udp")], + rule: None, + priority: None, + service: service_id, + middlewares: vec![], + tls: None, + }, + ); + } + } + + Ok(()) +} + +fn format_http_rule(proxied_port: &DynamicServer) -> String { + format!("Host(`{}`)", proxied_port.hostname) +} + +fn build_tls_domains(proxied_port: &DynamicServer) -> GlobalResult> { + // Derive TLS config. Jobs can specify their own ingress rules, so we + // need to derive which domains to use for the job. + // + // A parent wildcard SSL mode will use the parent domain as the SSL + // name. + let mut domains = Vec::new(); + let (_, parent_host) = unwrap!(proxied_port.hostname.split_once('.')); + domains.push(types::TraefikTlsDomain { + main: parent_host.to_owned(), + sans: vec![format!("*.{}", parent_host)], + }); + + Ok(domains) +} + +fn http_router_middlewares() -> Vec { + let middlewares = vec!["ds-rate-limit".to_string(), "ds-in-flight".to_string()]; + + middlewares +} diff --git a/svc/api/traefik-provider/src/route/game_guard.rs b/svc/api/traefik-provider/src/route/game_guard/job.rs similarity index 87% rename from svc/api/traefik-provider/src/route/game_guard.rs rename to svc/api/traefik-provider/src/route/game_guard/job.rs index 5f7758a63..05d9c7a39 100644 --- a/svc/api/traefik-provider/src/route/game_guard.rs +++ b/svc/api/traefik-provider/src/route/game_guard/job.rs @@ -12,52 +12,13 @@ use url::Url; use crate::{auth::Auth, types}; -#[derive(Debug, Serialize, Deserialize)] -#[serde(deny_unknown_fields)] -pub struct ConfigQuery { - token: String, - datacenter: Uuid, -} - -#[tracing::instrument(skip(ctx))] -pub async fn config( - ctx: Ctx, - _watch_index: WatchIndexQuery, - ConfigQuery { token, datacenter }: ConfigQuery, -) -> GlobalResult { - ctx.auth().token(&token).await?; - - // Fetch configs and catch any errors - let config = build_job(&ctx, datacenter).await?; - - // tracing::info!( - // http_services = ?config.http.services.len(), - // http_routers = config.http.routers.len(), - // http_middlewares = ?config.http.middlewares.len(), - // tcp_services = ?config.tcp.services.len(), - // tcp_routers = config.tcp.routers.len(), - // tcp_middlewares = ?config.tcp.middlewares.len(), - // udp_services = ?config.udp.services.len(), - // udp_routers = config.udp.routers.len(), - // udp_middlewares = ?config.udp.middlewares.len(), - // "traefik config" - // ); - - Ok(types::TraefikConfigResponseNullified { - http: config.http.nullified(), - tcp: config.tcp.nullified(), - udp: config.udp.nullified(), - }) -} - /// Builds configuration for job routes. #[tracing::instrument(skip(ctx))] pub async fn build_job( ctx: &Ctx, region_id: Uuid, -) -> GlobalResult { - let mut config = types::TraefikConfigResponse::default(); - + config: &mut types::TraefikConfigResponse, +) -> GlobalResult<()> { let redis_job = ctx.op_ctx().redis_job().await?; let job_runs_fetch = fetch_job_runs(redis_job, region_id).await?; @@ -90,7 +51,7 @@ pub async fn build_job( let run_id = unwrap_ref!(run_proxied_ports.run_id); tracing::info!(proxied_ports_len = ?run_proxied_ports.proxied_ports.len(), "adding job run"); for proxied_port in &run_proxied_ports.proxied_ports { - let register_res = register_proxied_port(**run_id, proxied_port, &mut config); + let register_res = job_register_proxied_port(**run_id, proxied_port, config); match register_res { Ok(_) => {} Err(err) => { @@ -113,7 +74,7 @@ pub async fn build_job( "job traefik config" ); - Ok(config) + Ok(()) } #[tracing::instrument(skip(redis_job))] @@ -141,7 +102,7 @@ async fn fetch_job_runs( } #[tracing::instrument(skip(config))] -fn register_proxied_port( +fn job_register_proxied_port( run_id: Uuid, proxied_port: &job::redis_job::run_proxied_ports::ProxiedPort, config: &mut types::TraefikConfigResponse, diff --git a/svc/api/traefik-provider/src/route/game_guard/mod.rs b/svc/api/traefik-provider/src/route/game_guard/mod.rs new file mode 100644 index 000000000..1d98016e9 --- /dev/null +++ b/svc/api/traefik-provider/src/route/game_guard/mod.rs @@ -0,0 +1,59 @@ +use std::{ + collections::hash_map::DefaultHasher, + hash::{Hash, Hasher}, +}; + +use api_helper::{anchor::WatchIndexQuery, ctx::Ctx}; +use dynamic_servers::build_ds; +use job::build_job; +use proto::backend::{self, pkg::*}; +use redis::AsyncCommands; +use rivet_operation::prelude::*; +use serde::{Deserialize, Serialize}; +use url::Url; + +use crate::{auth::Auth, types}; + +pub mod dynamic_servers; +pub mod job; + +#[derive(Debug, Serialize, Deserialize)] +#[serde(deny_unknown_fields)] +pub struct ConfigQuery { + token: String, + datacenter: Uuid, +} + +#[tracing::instrument(skip(ctx))] +pub async fn config( + ctx: Ctx, + _watch_index: WatchIndexQuery, + ConfigQuery { token, datacenter }: ConfigQuery, +) -> GlobalResult { + ctx.auth().token(&token).await?; + + let mut config = types::TraefikConfigResponse::default(); + + // Fetch configs and catch any errors + build_job(&ctx, datacenter, &mut config).await?; + build_ds(&ctx, datacenter, &mut config).await?; + + // tracing::info!( + // http_services = ?config.http.services.len(), + // http_routers = config.http.routers.len(), + // http_middlewares = ?config.http.middlewares.len(), + // tcp_services = ?config.tcp.services.len(), + // tcp_routers = config.tcp.routers.len(), + // tcp_middlewares = ?config.tcp.middlewares.len(), + // udp_services = ?config.udp.services.len(), + // udp_routers = config.udp.routers.len(), + // udp_middlewares = ?config.udp.middlewares.len(), + // "traefik config" + // ); + + Ok(types::TraefikConfigResponseNullified { + http: config.http.nullified(), + tcp: config.tcp.nullified(), + udp: config.udp.nullified(), + }) +} diff --git a/svc/pkg/ds/db/servers/migrations/20240501133910_init.up.sql b/svc/pkg/ds/db/servers/migrations/20240501133910_init.up.sql index d4ad485ce..3d24917e6 100644 --- a/svc/pkg/ds/db/servers/migrations/20240501133910_init.up.sql +++ b/svc/pkg/ds/db/servers/migrations/20240501133910_init.up.sql @@ -12,24 +12,25 @@ CREATE TABLE servers ( kill_timeout_ms INT NOT NULL, create_ts INT NOT NULL, + stop_ts INT, + finish_ts INT, + cleanup_ts INT, -- When the server was marked to be deleted by Rivet destroy_ts INT, - INDEX (game_id) -); - -CREATE TABLE docker_runtimes ( - server_id UUID PRIMARY KEY REFERENCES servers(server_id), + -- Docker image_id UUID NOT NULL, args STRING[], network_mode INT NOT NULL, -- rivet.backend.dynamic_servers.DockerNetworkMode -- This is a map environment JSONB NOT NULL + + INDEX (game_id) ); CREATE TABLE docker_ports_protocol_game_guard ( - server_id UUID NOT NULL REFERENCES docker_runtimes(server_id), - port_name string NOT NULL, + server_id UUID NOT NULL REFERENCES servers, + port_name STRING NOT NULL, port_number INT NOT NULL, protocol INT NOT NULL, -- rivet.backend.dynamic_servers.GameGuardProtocol @@ -37,9 +38,33 @@ CREATE TABLE docker_ports_protocol_game_guard ( ); CREATE TABLE docker_ports_host ( - server_id UUID NOT NULL REFERENCES docker_runtimes(server_id), - port_name string NOT NULL, + server_id UUID NOT NULL REFERENCES servers, + port_name STRING NOT NULL, port_number INT NOT NULL, + protocol INT NOT NULL, -- rivet.backend.dynamic_servers.HostProtocol PRIMARY KEY (server_id, port_name) +); + +-- TODO make all nomad stucc clear +CREATE TABLE server_nomad ( + server_id UUID PRIMARY KEY REFERENCES servers, + nomad_dispatched_job_id STRING, + nomad_alloc_id STRING, + nomad_node_id STRING, + nomad_alloc_plan_ts INT, + nomad_alloc_state JSONB, + nomad_eval_plan_ts INT, + + INDEX (dispatched_job_id) +); + +CREATE TABLE server_ports ( + server_id UUID NOT NULL REFERENCES servers, + nomad_label STRING NOT NULL, + nomad_ip STRING NOT NULL, + nomad_source INT NOT NULL, + nomad_target INT NOT NULL, + + PRIMARY KEY (server_id, label) ); \ No newline at end of file diff --git a/svc/pkg/ds/ops/server-create/Cargo.toml b/svc/pkg/ds/ops/server-create/Cargo.toml index f242969b1..34604fce2 100644 --- a/svc/pkg/ds/ops/server-create/Cargo.toml +++ b/svc/pkg/ds/ops/server-create/Cargo.toml @@ -7,11 +7,39 @@ license = "Apache-2.0" [dependencies] chirp-client = { path = "../../../../../lib/chirp/client" } +chirp-worker = { path = "../../../../../lib/chirp/worker" } rivet-operation = { path = "../../../../../lib/operation/core" } -nomad-client = "0.0.9" +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" lazy_static = "1.4.0" +uuid = { version = "1", features = ["v4", "serde"] } +http = "0.2" +bit-vec = "0.6" +cjson = "0.1" +nomad-util = { path = "../../../../../lib/nomad-util" } +strum = { version = "0.24", features = ["derive"] } +sha2 = "0.10" +hex = "0.4" +rivet-util = { path = "../../../../../lib/util/core" } +heck = "0.3" +s3-util = { path = "../../../../../lib/s3-util" } +util-build = { package = "rivet-util-build", path = "../../../build/util" } +regex = "1.10" +rand = "0.8" + +mm-lobby-list-for-user-id = { path = "../../../mm/ops/lobby-list-for-user-id" } +build-get = { path = "../../../build/ops/get" } +user-identity-get = { path = "../../../user-identity/ops/get" } +upload-get = { path = "../../../upload/ops/get" } +region-get = { path = "../../../region/ops/get" } +ip-info = { path = "../../../ip/ops/info" } +tier-list = { path = "../../../tier/ops/list" } +cluster-datacenter-list = { path = "../../../cluster/ops/datacenter-list" } +cluster-list = { path = "../../../cluster/ops/list" } +faker-build = { path = "../../../faker/ops/build" } +faker-game = { path = "../../../faker/ops/game" } -[dependencies.nomad_client_new] +[dependencies.nomad_client] package = "nomad_client" git = "https://github.com/rivet-gg/nomad-client" rev = "abb66bf0c30c7ff5b0c695dae952481c33e538b5" # pragma: allowlist secret diff --git a/svc/pkg/ds/ops/server-create/src/lib.rs b/svc/pkg/ds/ops/server-create/src/lib.rs index 323c3ddc5..f8ae079b4 100644 --- a/svc/pkg/ds/ops/server-create/src/lib.rs +++ b/svc/pkg/ds/ops/server-create/src/lib.rs @@ -1,6 +1,34 @@ +// use chirp_worker::prelude::*; use futures_util::FutureExt; -use proto::backend::{self, pkg::*}; +use nomad_client::models::*; +use nomad_job::{ + escape_go_template, gen_oci_bundle_config, inject_consul_env_template, nomad_host_port_env_var, + template_env_var, template_env_var_int, DecodedPort, ProxyProtocol, TransportProtocol, +}; +use proto::backend::{ + self, dynamic_servers::lobby_runtime::NetworkMode as LobbyRuntimeNetworkMode, +}; +use proto::{backend::pkg::*, chirp::response::Ok}; +use regex::Regex; use rivet_operation::prelude::*; +use serde_json::json; +use sha2::{Digest, Sha256}; +use std::hash::Hasher; +use std::{collections::HashMap, hash::DefaultHasher, net::IpAddr, time::Duration}; +use team::member_get::request; +use util_mm::key::lobby_config; +use crate::sqlx; + +mod nomad_job; +mod oci_config; +mod seccomp; +mod util_job; +mod util_mm; + +lazy_static::lazy_static! { + pub static ref NEW_NOMAD_CONFIG: nomad_client::apis::configuration::Configuration = + nomad_util::new_config_from_env().unwrap(); +} #[operation(name = "ds-server-create")] pub async fn handle( @@ -16,6 +44,8 @@ pub async fn handle( let create_ts = ctx.ts(); + // MARK: db insert + rivet_pools::utils::crdb::tx(&ctx.crdb().await?, |tx| { let ctx = ctx.clone(); let runtime = runtime.clone(); @@ -79,23 +109,13 @@ pub async fn handle( resources_memory_mib, kill_timeout_ms, create_ts - ) - VALUES - ($1, $2, $3, $4, $5, $6, $7, $8, $9) - RETURNING - 1 - ), - docker_runtimes_cte AS ( - INSERT INTO - db_dynamic_servers.docker_runtimes ( - server_id, image_id, args, network_mode, environment ) VALUES - ($1, $10, $11, $12, $13) + ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13) RETURNING 1 ), @@ -162,6 +182,1103 @@ pub async fn handle( }) .await?; + // let ( + // (mm_game_config, namespace), + // mm_ns_config, + // (lobby_group, lobby_group_meta, version_id), + // region, + // tiers, + // ) = tokio::try_join!( + // fetch_namespace(ctx, namespace_id), + // fetch_mm_namespace_config(ctx, namespace_id), + // fetch_lobby_group_config(ctx, lobby_group_id), + // fetch_region(ctx, region_id), + // fetch_tiers(ctx, region_id), + // )?; + // let (mm_game_config, namespace) = fetch_namespace(ctx, namespace_id).await?; + // let mm_ns_config = fetch_mm_namespace_config(ctx, namespace_id).await?; + // let (lobby_group, lobby_group_meta, version_id) = fetch_lobby_group_config(ctx, lobby_group_id) + // .await?; + // let region = fetch_region(ctx, region_id).await?; + // let tiers = fetch_tiers(ctx, region_id).await?; + // let version = fetch_version(ctx, version_id).await?; + + // // Do all nomad stuff + // let namespace_id = unwrap_ref!(namespace.namespace_id).as_uuid(); + // let version_id = unwrap_ref!(version.version_id).as_uuid(); + // let lobby_group_id = unwrap_ref!(lobby_group_meta.lobby_group_id).as_uuid(); + // let region_id = unwrap_ref!(region.region_id).as_uuid(); + + // let job_runner_binary_url = resolve_job_runner_binary_url(ctx).await?; + + // let resolve_perf = ctx.perf().start("resolve-image-artifact-url").await; + // let build_id = unwrap_ref!(runtime.build_id).as_uuid(); + // let image_artifact_url = resolve_image_artifact_url(ctx, build_id, region).await?; + // resolve_perf.end(); + + // // Validate build exists and belongs to this game + // let build_id = unwrap_ref!(runtime.build_id).as_uuid(); + // let build_get = op!([ctx] build_get { + // build_ids: vec![build_id.into()], + // }) + // .await?; + // let build = unwrap!(build_get.builds.first()); + // let build_kind = unwrap!(backend::build::BuildKind::from_i32(build.kind)); + // let build_compression = unwrap!(backend::build::BuildCompression::from_i32( + // build.compression + // )); + + let ctx: OperationContext = ctx; + + let request_runtime = match unwrap!(ctx.runtime.clone()) { + dynamic_servers::server_create::request::Runtime::DockerRuntime(docker_runtime) => { + docker_runtime + } + }; + let request_runtime_network = unwrap!(request_runtime.network.clone()); + + // Generate the Docker job + + // let runtime = backend::dynamic_servers::lobby_runtime::Docker { + // build_id: todo!(), + // args: docker_runtime.args, + // env_vars: todo!(), + // network_mode: todo!(), + // ports: todo!(), + // }; + // let _image_tag = &build.image_tag; + // let tier = backend::region::Tier { + // tier_name_id: todo!(), + // rivet_cores_numerator: todo!(), + // rivet_cores_denominator: todo!(), + // cpu: todo!(), + // memory: todo!(), + // memory_max: todo!(), + // disk: todo!(), + // bandwidth: todo!(), + // }; + + // let lobby_config = ctx.lobby_config_json.is_some(); + // let lobby_tags = !ctx.tags.is_empty(); + // let build_kind = backend::build::BuildKind::DockerImage; + // let build_compression = backend::build::BuildCompression::None; + + // IMPORTANT: This job spec must be deterministic. Do not pass in parameters + // that change with every run, such as the lobby ID. Ensure the + // `reuse_job_id` test passes when changing this function. + use nomad_client::models::*; + + let resources = unwrap!(ctx.resources.clone()); + + let tier_res = op!([ctx] tier_list { + region_ids: vec![datacenter_id.into()], + }) + .await?; + let tier_region = unwrap!(tier_res.regions.first()); + + // // runc-compatible resourcesd + // let cpu = resources.cpu_millicores; // Millicore (1/1000 of a core) + // let memory = resources.memory_mib * (1024 * 1024); // bytes + // // let memory_max = tier.memory_max * (1024 * 1024); // bytes + + // Find the first tier that has more CPU and memory than the requested + // resources + let mut tiers = tier_region.tiers.clone(); + + // Sort the tiers by cpu + tiers.sort_by(|a, b| a.cpu.cmp(&b.cpu)); + let tier = unwrap!(tiers.iter().find(|t| { + t.cpu as i32 >= resources.cpu_millicores && t.memory as i32 >= resources.memory_mib + })); + + // runc-compatible resources + let cpu = tier.rivet_cores_numerator as u64 * 1_000 / tier.rivet_cores_denominator as u64; // Millicore (1/1000 of a core) + let memory = tier.memory * (1024 * 1024); // bytes + let memory_max = tier.memory_max * (1024 * 1024); // bytes + + // dbg!(tier, cpu, memory, memory_max); + // panic!(); + + // Validate build exists and belongs to this game + let build_id = unwrap_ref!(request_runtime.image_id).as_uuid(); + let build_get = op!([ctx] build_get { + build_ids: vec![build_id.into()], + }) + .await?; + let build = unwrap!(build_get.builds.first()); + let build_kind = unwrap!(backend::build::BuildKind::from_i32(build.kind)); + let build_compression = unwrap!(backend::build::BuildCompression::from_i32( + build.compression + )); + + // // Nomad-compatible resources + // let resources = Resources { + // // TODO: Configure this per-provider + // // Nomad configures CPU based on MHz, not millicores. We havel to calculate the CPU share + // // by knowing how many MHz are on the client. + // CPU: if cpu < 1000 { + // Some((cpu - util_job::TASK_CLEANUP_CPU).try_into()?) + // } else { + // None + // }, + // cores: if cpu >= 1000 { + // Some((cpu / 1000) as i32) + // } else { + // None + // }, + // memory_mb: Some( + // (TryInto::::try_into(memory)? / (1024 * 1024) + // - util_job::TASK_CLEANUP_MEMORY as i64) + // .try_into()?, + // ), + // // Allow oversubscribing memory by 50% of the reserved + // // memory if using less than the node's total memory + // memory_max_mb: Some( + // (TryInto::::try_into((memory as f64 * 1.5) as i64)? / (1024 * 1024) + // - util_job::TASK_CLEANUP_MEMORY as i64) + // .try_into()?, + // ), + // ..Resources::new() + // }; + + // Nomad-compatible resources + let nomad_resources = Resources { + // TODO: Configure this per-provider + // Nomad configures CPU based on MHz, not millicores. We havel to calculate the CPU share + // by knowing how many MHz are on the client. + CPU: if tier.rivet_cores_numerator < tier.rivet_cores_denominator { + Some((tier.cpu - util_job::TASK_CLEANUP_CPU as u64).try_into()?) + } else { + None + }, + cores: if tier.rivet_cores_numerator >= tier.rivet_cores_denominator { + Some((tier.rivet_cores_numerator / tier.rivet_cores_denominator) as i32) + } else { + None + }, + memory_mb: Some( + (TryInto::::try_into(memory)? / (1024 * 1024) + - util_job::TASK_CLEANUP_MEMORY as i64) + .try_into()?, + ), + // Allow oversubscribing memory by 50% of the reserved + // memory if using less than the node's total memory + memory_max_mb: None, + // Some( + // (TryInto::::try_into(memory_max)? / (1024 * 1024) + // - util_job::TASK_CLEANUP_MEMORY as i64) + // .try_into()?, + // ), + disk_mb: Some(tier.disk as i32), // TODO: Is this deprecated? + ..Resources::new() + }; + + // // let network_mode = unwrap!(LobbyRuntimeNetworkMode::from_i32(runtime.network_mode)); + + // Read ports + let decoded_ports = request_runtime_network + .ports + .clone() + .into_iter() + .map(|(port, docker_port)| match docker_port.routing { + Some(backend::dynamic_servers::docker_port::Routing::GameGuard(game_guard_routing)) => { + let target = unwrap!(docker_port.port) as u16; + + GlobalResult::Ok(DecodedPort { + label: port.clone(), + nomad_port_label: util_mm::format_nomad_port_label(&port), + target, + proxy_protocol: unwrap!(backend::dynamic_servers::GameGuardProtocol::from_i32( + game_guard_routing.protocol + )) + .into(), + }) + } + Some(backend::dynamic_servers::docker_port::Routing::Host(_)) => { + todo!() + } + None => { + todo!() + } + }) + .collect::>>()?; + + // The container will set up port forwarding manually from the Nomad-defined ports on the host + // to the CNI container + let dynamic_ports = decoded_ports + .iter() + .map(|port| Port { + label: Some(port.nomad_port_label.clone()), + ..Port::new() + }) + .collect::>(); + + // Port mappings to pass to the container. Only used in bridge networking. + let cni_port_mappings = decoded_ports + .clone() + .into_iter() + .map(|port| { + json!({ + "HostPort": template_env_var_int(&nomad_host_port_env_var(&port.nomad_port_label)), + "ContainerPort": port.target, + "Protocol": TransportProtocol::from(port.proxy_protocol).as_cni_protocol(), + }) + }) + .collect::>(); + + let prepared_ports = request_runtime_network + .ports + .iter() + .map(|(label, docker_port)| { + let mode = unwrap!(backend::dynamic_servers::DockerNetworkMode::from_i32( + request_runtime_network.mode + )); + let port_value = match mode { + // CNI will handle mapping the host port to the container port + backend::dynamic_servers::DockerNetworkMode::Bridge => { + unwrap!(docker_port.port).to_string() + } + // The container needs to listen on the correct port + backend::dynamic_servers::DockerNetworkMode::Host => { + template_env_var(&nomad_host_port_env_var(&label)) + } + }; + + GlobalResult::Ok(Some(String::new())) + // TODO + // Port with the kebab case port key. Included for backward compatabiilty & for + // less confusion. + // Ok((format!("PORT_{}", port.label.replace('-', "_")), port_value)) + }); + + // Also see util_mm:consts::DEFAULT_ENV_KEYS + let mut env = Vec::<(String, String)>::new() + .into_iter() + //runtime.env_vars + // .iter() + // .map(|v| (v.key.clone(), escape_go_template(&v.value))) + // TODO + // .chain(if lobby_config { + // Some(( + // "RIVET_LOBBY_CONFIG".to_string(), + // template_env_var("NOMAD_META_LOBBY_CONFIG"), + // )) + // } else { + // None + // }) + // .chain(if lobby_tags { + // Some(( + // "RIVET_LOBBY_TAGS".to_string(), + // template_env_var("NOMAD_META_LOBBY_TAGS"), + // )) + // } else { + // None + // }) + .chain([( + "RIVET_API_ENDPOINT".to_string(), + util::env::origin_api().to_string(), + )]) + // Ports + // TODO + // .chain(prepared_ports) + // // Port ranges + // .chain( + // decoded_ports + // .iter() + // .filter_map(|port| { + // if let PortTarget::Range { min, max } = &port.target { + // let snake_port_label = port.label.replace('-', "_"); + // Some([ + // ( + // format!("PORT_RANGE_MIN_{}", snake_port_label), + // min.to_string(), + // ), + // ( + // format!("PORT_RANGE_MAX_{}", snake_port_label), + // max.to_string(), + // ), + // ]) + // } else { + // None + // } + // }) + // .flatten(), + // ) + .map(|(k, v)| format!("{k}={v}")) + .collect::>(); + env.sort(); + + let services = decoded_ports + .iter() + .map(|port| { + let service_name = format!("${{NOMAD_META_LOBBY_ID}}-{}", port.label); + GlobalResult::Ok(Some(Service { + provider: Some("nomad".into()), + name: Some(service_name), + tags: Some(vec!["game".into()]), + port_label: Some(port.nomad_port_label.clone()), + // checks: if TransportProtocol::from(port.proxy_protocol) + // == TransportProtocol::Tcp + // { + // Some(vec![ServiceCheck { + // name: Some(format!("{}-probe", port.label)), + // port_label: Some(port.nomad_port_label.clone()), + // _type: Some("tcp".into()), + // interval: Some(30_000_000_000), + // timeout: Some(2_000_000_000), + // ..ServiceCheck::new() + // }]) + // } else { + // None + // }, + ..Service::new() + })) + }) + .filter_map(|x| x.transpose()) + .collect::>>()?; + + // Generate the command to download and decompress the file + let mut download_cmd = r#"curl -Lf "$NOMAD_META_IMAGE_ARTIFACT_URL""#.to_string(); + match build_compression { + backend::build::BuildCompression::None => {} + backend::build::BuildCompression::Lz4 => { + download_cmd.push_str(" | lz4 -d -"); + } + } + + // MARK: Job spec + + let job_spec = Job { + _type: Some("batch".into()), + // constraints: Some(vec![Constraint { + // l_target: Some("${node.class}".into()), + // r_target: Some("job".into()), + // operand: Some("=".into()), + // }]), + parameterized_job: Some(Box::new(ParameterizedJobConfig { + payload: Some("forbidden".into()), + meta_required: Some(vec![ + "job_runner_binary_url".into(), + "vector_socket_addr".into(), + "image_artifact_url".into(), + "root_user_enabled".into(), + ]), + meta_optional: Some(vec!["rivet_test_id".into()]), + })), + task_groups: Some(vec![TaskGroup { + name: Some(util_job::RUN_MAIN_TASK_NAME.into()), + constraints: None, // TODO: Use parameter meta to specify the hardware + affinities: None, // TODO: + // Allows for jobs to keep running and receiving players in the + // event of a disconnection from the Nomad server. + max_client_disconnect: Some(5 * 60 * 1_000_000_000), + restart_policy: Some(Box::new(RestartPolicy { + attempts: Some(0), + mode: Some("fail".into()), + ..RestartPolicy::new() + })), + reschedule_policy: Some(Box::new(ReschedulePolicy { + attempts: Some(0), + unlimited: Some(false), + ..ReschedulePolicy::new() + })), + networks: Some(vec![NetworkResource { + // The setup.sh script will set up a CNI network if using bridge networking + mode: Some("host".into()), + dynamic_ports: Some(dynamic_ports.clone()), + ..NetworkResource::new() + }]), + services: Some(services), + // Configure ephemeral disk for logs + ephemeral_disk: Some(Box::new(EphemeralDisk { + size_mb: Some(tier.disk as i32), + ..EphemeralDisk::new() + })), + tasks: Some(vec![ + // TODO + Task { + name: Some("runc-setup".into()), + lifecycle: Some(Box::new(TaskLifecycle { + hook: Some("prestart".into()), + sidecar: Some(false), + })), + driver: Some("raw_exec".into()), + config: Some({ + let mut x = HashMap::new(); + x.insert("command".into(), json!("${NOMAD_TASK_DIR}/setup.sh")); + x + }), + templates: Some(vec![ + Template { + embedded_tmpl: Some(include_str!("./scripts/setup.sh").replace( + "__HOST_NETWORK__", + match unwrap!( + backend::dynamic_servers::DockerNetworkMode::from_i32( + request_runtime_network.mode + ) + ) { + backend::dynamic_servers::DockerNetworkMode::Bridge => "false", + backend::dynamic_servers::DockerNetworkMode::Host => "true", + }, + )), + dest_path: Some("${NOMAD_TASK_DIR}/setup.sh".into()), + perms: Some("744".into()), + ..Template::new() + }, + Template { + embedded_tmpl: Some( + include_str!("./scripts/setup_job_runner.sh").into(), + ), + dest_path: Some("${NOMAD_TASK_DIR}/setup_job_runner.sh".into()), + perms: Some("744".into()), + ..Template::new() + }, + Template { + embedded_tmpl: Some( + include_str!("./scripts/setup_oci_bundle.sh") + .replace("__DOWNLOAD_CMD__", &download_cmd) + .replace( + "__BUILD_KIND__", + match build_kind { + backend::build::BuildKind::DockerImage => { + "docker-image" + } + backend::build::BuildKind::OciBundle => "oci-bundle", + }, + ), + ), + dest_path: Some("${NOMAD_TASK_DIR}/setup_oci_bundle.sh".into()), + perms: Some("744".into()), + ..Template::new() + }, + Template { + embedded_tmpl: Some( + include_str!("./scripts/setup_cni_network.sh").into(), + ), + dest_path: Some("${NOMAD_TASK_DIR}/setup_cni_network.sh".into()), + perms: Some("744".into()), + ..Template::new() + }, + Template { + embedded_tmpl: Some(gen_oci_bundle_config( + cpu, memory, memory_max, env, + )?), + dest_path: Some( + "${NOMAD_ALLOC_DIR}/oci-bundle-config.base.json".into(), + ), + ..Template::new() + }, + Template { + embedded_tmpl: Some(inject_consul_env_template( + &serde_json::to_string(&cni_port_mappings)?, + )?), + dest_path: Some("${NOMAD_ALLOC_DIR}/cni-port-mappings.json".into()), + ..Template::new() + }, + ]), + resources: Some(Box::new(Resources { + CPU: Some(util_mm::RUNC_SETUP_CPU), + memory_mb: Some(util_mm::RUNC_SETUP_MEMORY), + ..Resources::new() + })), + log_config: Some(Box::new(LogConfig { + max_files: Some(4), + max_file_size_mb: Some(2), + disabled: None, + })), + ..Task::new() + }, + // TODO + Task { + name: Some(util_job::RUN_MAIN_TASK_NAME.into()), + driver: Some("raw_exec".into()), + config: Some({ + let mut x = HashMap::new(); + // This is downloaded in setup_job_runner.sh + x.insert("command".into(), json!("${NOMAD_ALLOC_DIR}/job-runner")); + x + }), + resources: Some(Box::new(nomad_resources.clone())), + // Intentionally high timeout. Killing jobs is handled manually with signals. + kill_timeout: Some(86400 * 1_000_000_000), + kill_signal: Some("SIGTERM".into()), + log_config: Some(Box::new(LogConfig { + max_files: Some(4), + max_file_size_mb: Some(4), + disabled: None, + })), + ..Task::new() + }, + // TODO: Remove + // Task { + // name: Some("runc-cleanup".into()), + // lifecycle: Some(Box::new(TaskLifecycle { + // hook: Some("poststop".into()), + // sidecar: Some(false), + // })), + // driver: Some("raw_exec".into()), + // config: Some({ + // let mut x = HashMap::new(); + // x.insert("command".into(), json!("${NOMAD_TASK_DIR}/cleanup.sh")); + // x + // }), + // templates: Some(vec![Template { + // embedded_tmpl: Some(include_str!("./scripts/cleanup.sh").into()), + // dest_path: Some("${NOMAD_TASK_DIR}/cleanup.sh".into()), + // perms: Some("744".into()), + // ..Template::new() + // }]), + // resources: Some(Box::new(Resources { + // CPU: Some(util_mm::RUNC_CLEANUP_CPU), + // memory_mb: Some(util_mm::RUNC_CLEANUP_MEMORY), + // ..Resources::new() + // })), + // log_config: Some(Box::new(LogConfig { + // max_files: Some(4), + // max_file_size_mb: Some(2), + // })), + // ..Task::new() + // }, + ]), + ..TaskGroup::new() + }]), + ..Job::new() + }; + + let job_spec_json = serde_json::to_string(&job_spec)?; + + // // Build proxied ports for each exposed port + // let proxied_ports = runtime + // .ports + // .iter() + // .filter(|port| { + // port.proxy_kind == backend::dynamic_servers::lobby_runtime::ProxyKind::GameGuard as i32 + // && port.port_range.is_none() + // }) + // .flat_map(|port| { + // let mut ports = vec![direct_proxied_port(lobby_id, region_id, port)]; + // match backend::dynamic_servers::lobby_runtime::ProxyProtocol::from_i32( + // port.proxy_protocol, + // ) { + // Some( + // backend::dynamic_servers::lobby_runtime::ProxyProtocol::Http + // | backend::dynamic_servers::lobby_runtime::ProxyProtocol::Https, + // ) => { + // ports.push(path_proxied_port(lobby_id, region_id, port)); + // } + // Some( + // backend::dynamic_servers::lobby_runtime::ProxyProtocol::Udp + // | backend::dynamic_servers::lobby_runtime::ProxyProtocol::Tcp + // | backend::dynamic_servers::lobby_runtime::ProxyProtocol::TcpTls, + // ) + // | None => {} + // } + // ports + // }) + // .collect::>>()?; + + // submit_job(&job_spec_json, Some(region_id.into())); + + // Get the region to dispatch in + let region_res = op!([ctx] region_get { + region_ids: vec![datacenter_id.into()], + }) + .await?; + let region = unwrap!(region_res.regions.first()); + + // let region = region; + let base_job: Job = serde_json::from_str::(&job_spec_json)?; + + // Modify the job spec + let mut job = base_job; + // let region = region; + // Replace all job IDs with a placeholder value in order to create a + // deterministic job spec. + { + let job_id: &str = "__PLACEHOLDER__"; + let job: &mut nomad_client::models::Job = &mut job; + job.ID = Some(job_id.into()); + job.name = Some(job_id.into()); + }; + + ensure_eq!( + "batch", + unwrap_ref!(job._type).as_str(), + "only the batch job type is supported" + ); + + // Update the job's region + job.region = Some(region.nomad_region.clone()); + job.datacenters = Some(vec![region.nomad_datacenter.clone()]); + + // Validate that the job is parameterized + // TODO: clean up how stuff is put in here + let parameters = unwrap!(job.parameterized_job.as_mut(), "job not parameterized"); + + // Add run parameters + parameters.meta_required = Some({ + let mut meta_required = parameters.meta_required.clone().unwrap_or_default(); + meta_required.push("job_run_id".into()); + meta_required + }); + + // Get task group + let task_groups = unwrap!(job.task_groups.as_mut()); + ensure_eq!(1, task_groups.len(), "must have exactly 1 task group"); + let task_group = unwrap!(task_groups.first_mut()); + ensure_eq!( + task_group.name.as_deref(), + Some(RUN_MAIN_TASK_NAME), + "must have main task group" + ); + + // Ensure has main task + let main_task = unwrap!( + task_group + .tasks + .iter_mut() + .flatten() + .find(|x| x.name.as_deref() == Some(RUN_MAIN_TASK_NAME)), + "must have main task" + ); + ensure!( + main_task + .lifecycle + .as_ref() + .map_or(true, |x| x.hook.is_none()), + "main task must not have a lifecycle hook" + ); + + // Configure networks + let networks = unwrap!(task_group.networks.as_mut()); + ensure_eq!(1, networks.len(), "must have exactly 1 network"); + let network = unwrap!(networks.first_mut()); + // Disable IPv6 DNS since Docker doesn't support IPv6 yet + network.DNS = Some(Box::new(nomad_client::models::DnsConfig { + servers: Some(vec![ + // Google + "8.8.8.8".into(), + "8.8.4.4".into(), + "2001:4860:4860::8888".into(), + "2001:4860:4860::8844".into(), + ]), + // Disable default search from the host + searches: Some(Vec::new()), + options: Some(vec!["rotate".into(), "edns0".into(), "attempts:2".into()]), + ..nomad_client::models::DnsConfig::new() + })); + + // Disable rescheduling, since job-run doesn't support this at the moment + task_group.reschedule_policy = Some(Box::new(nomad_client::models::ReschedulePolicy { + attempts: Some(0), + unlimited: Some(false), + ..nomad_client::models::ReschedulePolicy::new() + })); + + // Disable restarts. Our Nomad monitoring workflow doesn't support restarts + // at the moment. + task_group.restart_policy = Some(Box::new(nomad_client::models::RestartPolicy { + attempts: Some(0), + // unlimited: Some(false), + ..nomad_client::models::RestartPolicy::new() + })); + + // MARK: Cleanup task + + // Add cleanup task + let tasks: &mut Vec = unwrap!(task_group.tasks.as_mut()); + tasks.push({ + Task { + name: Some(RUN_CLEANUP_TASK_NAME.into()), + lifecycle: Some(Box::new(TaskLifecycle { + hook: Some("poststop".into()), + sidecar: Some(false), + })), + driver: Some("docker".into()), + config: Some({ + let mut config = HashMap::new(); + + config.insert("image".into(), json!("python:3.10.7-alpine3.16")); + config.insert( + "args".into(), + json!([ + "/bin/sh", + "-c", + "apk add --no-cache ca-certificates && python3 /local/cleanup.py" + ]), + ); + + config + }), + templates: Some(vec![Template { + dest_path: Some("local/cleanup.py".into()), + embedded_tmpl: Some(formatdoc!( + r#" + import ssl + import urllib.request, json, os, mimetypes, sys + + BEARER = '{{{{env "NOMAD_META_JOB_RUN_TOKEN"}}}}' + + ctx = ssl.create_default_context() + + def eprint(*args, **kwargs): + print(*args, file=sys.stderr, **kwargs) + + def req(method, url, data = None, headers = {{}}): + request = urllib.request.Request( + url=url, + data=data, + method=method, + headers=headers + ) + + try: + res = urllib.request.urlopen(request, context=ctx) + assert res.status == 200, f"Received non-200 status: {{res.status}}" + return res + except urllib.error.HTTPError as err: + eprint(f"HTTP Error ({{err.code}} {{err.reason}}):\n\nBODY:\n{{err.read().decode()}}\n\nHEADERS:\n{{err.headers}}") + + raise err + + print(f'\n> Cleaning up job') + + res_json = None + with req('POST', f'{origin_api}/job/runs/cleanup', + data = json.dumps({{}}).encode(), + headers = {{ + 'Authorization': f'Bearer {{BEARER}}', + 'Content-Type': 'application/json' + }} + ) as res: + res_json = json.load(res) + + + print('\n> Finished') + "#, + origin_api = util::env::origin_api(), + )), + ..Template::new() + }]), + resources: Some(Box::new(Resources { + CPU: Some(TASK_CLEANUP_CPU), + memory_mb: Some(TASK_CLEANUP_MEMORY), + ..Resources::new() + })), + log_config: Some(Box::new(LogConfig { + max_files: Some(4), + max_file_size_mb: Some(2), + disabled: Some(false), + })), + ..Task::new() + } + }); + + // Derive jobspec hash + // + // We serialize the JSON to a canonical string then take a SHA hash of the output. + let job_cjson_str = match cjson::to_string(&job) { + Ok(x) => x, + Err(err) => { + tracing::error!(?err, "cjson serialization failed"); + bail!("cjson serialization failed") + } + }; + let job_hash = Sha256::digest(job_cjson_str.as_bytes()); + let job_hash_str = hex::encode(job_hash); + + // Generate new job ID + let job_id = format!( + "job-{hash}:{region}", + hash = &job_hash_str[0..12], + region = region.name_id + ); + { + let job_id: &str = &job_id; + let job: &mut nomad_client::models::Job = &mut job; + job.ID = Some(job_id.into()); + job.name = Some(job_id.into()); + }; + + // Submit the job + tracing::info!("submitting job"); + + // dbg!( + // // &NEW_NOMAD_CONFIG, + // &job_id, + // nomad_client::models::JobRegisterRequest { + // job: Some(Box::new(job.clone())), + // ..nomad_client::models::JobRegisterRequest::new() + // }, + // Some(®ion.nomad_region), + // ); + // panic!(); + + // pub struct Configuration { + // pub base_path: String, + // pub user_agent: Option, + // pub client: reqwest::Client, + // pub basic_auth: Option, + // pub oauth_access_token: Option, + // pub bearer_access_token: Option, + // pub api_key: Option, + // // TODO: take an oauth2 token source, similar to the go one + // } + + // dbg!( + // &NEW_NOMAD_CONFIG.base_path, + // &NEW_NOMAD_CONFIG.user_agent, + // &NEW_NOMAD_CONFIG.client, + // &NEW_NOMAD_CONFIG.basic_auth, + // &NEW_NOMAD_CONFIG.oauth_access_token, + // &NEW_NOMAD_CONFIG.bearer_access_token, + // &NEW_NOMAD_CONFIG.api_key, + // ); + // panic!(); + + let a = nomad_client::apis::jobs_api::post_job( + &NEW_NOMAD_CONFIG, + &job_id, + nomad_client::models::JobRegisterRequest { + job: Some(Box::new(job)), + ..nomad_client::models::JobRegisterRequest::new() + }, + Some(®ion.nomad_region), + None, + None, + None, + ) + .await?; + dbg!(a); + + // let build_res = op!([ctx] build_get { + // build_ids: vec![build_id.into()], + // }) + // .await?; + // let build = build_res.builds.first(); + // let build = unwrap_ref!(build); + // let build_kind = unwrap!(backend::build::BuildKind::from_i32(build.kind)); + // let build_compression = unwrap!(backend::build::BuildCompression::from_i32( + // build.compression + // )); + let upload_id_proto = unwrap!(build.upload_id); + + let upload_res = op!([ctx] upload_get { + upload_ids: vec![upload_id_proto], + }) + .await?; + let upload = unwrap!(upload_res.uploads.first()); + + // Get provider + let proto_provider = unwrap!( + backend::upload::Provider::from_i32(upload.provider), + "invalid upload provider" + ); + let provider = match proto_provider { + backend::upload::Provider::Minio => s3_util::Provider::Minio, + backend::upload::Provider::Backblaze => s3_util::Provider::Backblaze, + backend::upload::Provider::Aws => s3_util::Provider::Aws, + }; + + let file_name = util_build::file_name(build_kind, build_compression); + + let mm_lobby_delivery_method = unwrap!( + backend::cluster::BuildDeliveryMethod::from_i32(region.build_delivery_method), + "invalid datacenter build delivery method" + ); + let image_artifact_url = match mm_lobby_delivery_method { + backend::cluster::BuildDeliveryMethod::S3Direct => { + tracing::info!("using s3 direct delivery"); + + let bucket = "bucket-build"; + + // Build client + let s3_client = + s3_util::Client::from_env_opt(bucket, provider, s3_util::EndpointKind::External) + .await?; + + let upload_id = unwrap_ref!(upload.upload_id).as_uuid(); + let presigned_req = s3_client + .get_object() + .bucket(s3_client.bucket()) + .key(format!("{upload_id}/{file_name}")) + .presigned( + s3_util::aws_sdk_s3::presigning::config::PresigningConfig::builder() + .expires_in(std::time::Duration::from_secs(15 * 60)) + .build()?, + ) + .await?; + + let addr = presigned_req.uri().clone(); + + let addr_str = addr.to_string(); + tracing::info!(addr = %addr_str, "resolved artifact s3 presigned request"); + + addr_str + } + backend::cluster::BuildDeliveryMethod::TrafficServer => { + tracing::info!("using traffic server delivery"); + + let region_id = unwrap_ref!(region.region_id).as_uuid(); + + // Hash build so that the ATS server that we download the build from is always the same one. This + // improves cache hit rates and reduces download times. + let build_id = unwrap_ref!(build.build_id).as_uuid(); + let mut hasher = DefaultHasher::new(); + hasher.write(build_id.as_bytes()); + let hash = hasher.finish() as i64; + + // NOTE: The algorithm for choosing the vlan_ip from the hash should match the one in + // prewarm_ats.rs @ prewarm_ats_cache + // Get vlan ip from build id hash for consistent routing + let (ats_vlan_ip,) = sql_fetch_one!( + [ctx, (IpAddr,)] + " + WITH sel AS ( + -- Select candidate vlan ips + SELECT + vlan_ip + FROM db_cluster.servers + WHERE + datacenter_id = $1 AND + pool_type = $2 AND + vlan_ip IS NOT NULL AND + install_complete_ts IS NOT NULL AND + drain_ts IS NULL AND + cloud_destroy_ts IS NULL + ) + SELECT vlan_ip + FROM sel + -- Use mod to make sure the hash stays within bounds + OFFSET abs($3 % (SELECT COUNT(*) from sel)) + LIMIT 1 + ", + // NOTE: region_id is just the old name for datacenter_id + ®ion_id, + backend::cluster::PoolType::Ats as i64, + hash, + ) + .await?; + + let upload_id = unwrap_ref!(upload.upload_id).as_uuid(); + let addr = format!( + "http://{vlan_ip}:8080/s3-cache/{provider}/{namespace}-bucket-build/{upload_id}/{file_name}", + vlan_ip = ats_vlan_ip, + provider = heck::KebabCase::to_kebab_case(provider.as_str()), + namespace = util::env::namespace(), + upload_id = upload_id, + ); + + tracing::info!(%addr, "resolved artifact s3 url"); + + addr + } + }; + + // Build client + let s3_client = s3_util::Client::from_env_opt( + "bucket-infra-artifacts", + s3_util::Provider::default()?, + s3_util::EndpointKind::External, + ) + .await?; + let presigned_req = s3_client + .get_object() + .bucket(s3_client.bucket()) + .key("job-runner/job-runner") + .presigned( + s3_util::aws_sdk_s3::presigning::config::PresigningConfig::builder() + .expires_in(std::time::Duration::from_secs(15 * 60)) + .build()?, + ) + .await?; + + let addr = presigned_req.uri().clone(); + + let addr_str = addr.to_string(); + tracing::info!(addr = %addr_str, "resolved job runner presigned request"); + + let job_runner_binary_url = addr_str; + + // MARK: Parameters + + let parameters: Vec = vec![ + backend::job::Parameter { + key: "job_runner_binary_url".into(), + value: job_runner_binary_url, + }, + backend::job::Parameter { + key: "vector_socket_addr".into(), + value: "127.0.0.1:5021".to_string(), + }, + backend::job::Parameter { + key: "image_artifact_url".into(), + value: image_artifact_url.to_string(), + }, + backend::job::Parameter { + key: "root_user_enabled".into(), + // TODO make table dynamic host, make reference so that we can find + // other locations + value: "0".into(), + }, + ] + .into_iter() + // .chain(ctx.parameters.clone()) + // .chain(port_parameters) + .collect(); + + let run_id = Uuid::new_v4(); + let job_params: Vec<(String, String)> = vec![ + ("job_run_id".into(), run_id.to_string()), + // ("job_run_token".into(), job_run_token), + ]; + + let dispatch_res = nomad_client::apis::jobs_api::post_job_dispatch( + &NEW_NOMAD_CONFIG, + &job_id, + nomad_client::models::JobDispatchRequest { + job_id: Some(job_id.to_string()), + payload: None, + meta: Some( + parameters + .iter() + .map(|p| (p.key.clone(), p.value.clone())) + .chain(job_params.into_iter()) + .collect::>(), + ), + }, + Some(®ion.nomad_region), + None, + None, + None, + ) + .await; + let a: GlobalResult> = match dispatch_res { + Ok(dispatch_res) => { + // We will use the dispatched job ID to identify this allocation for the future. We can't use + // eval ID, since that changes if we mutate the allocation (i.e. try to stop it). + let nomad_dispatched_job_id = unwrap_ref!(dispatch_res.dispatched_job_id); + Ok(Some(nomad_dispatched_job_id.clone())) + } + Err(err) => { + tracing::error!(?err, "failed to dispatch job"); + panic!(); + Ok(None) + } + }; + + // Ok(job_id); + + // msg!([ctx] job_run::msg::create(run_id) { + // run_id: Some(run_id.into()), + // region_id: Some(region_id.into()), + + // job_spec_json: job_spec_json, + // proxied_ports: proxied_ports, + // ..Default::default() + // }) + // .await?; + Ok(dynamic_servers::server_create::Response { server: Some(backend::dynamic_servers::Server { server_id: Some(server_id.into()), @@ -191,3 +1308,31 @@ pub async fn handle( }), }) } + +/// Determines if a Nomad job is dispatched from our run. +/// +/// We use this when monitoring Nomad in order to determine which events to +/// pay attention to. +pub fn is_nomad_job_run(job_id: &str) -> bool { + job_id.starts_with("job-") && job_id.contains("/dispatch-") +} + +// Timeout from when `stop_job` is called and the kill signal is sent +pub const JOB_STOP_TIMEOUT: Duration = Duration::from_secs(30); + +pub const TASK_CLEANUP_CPU: i32 = 50; + +// Query Prometheus with: +// +// ``` +// max(nomad_client_allocs_memory_max_usage{ns="prod",exported_job=~"job-.*",task="run-cleanup"}) / 1000 / 1000 +// ``` +// +// 13.5 MB baseline, 29 MB highest peak +pub const TASK_CLEANUP_MEMORY: i32 = 32; + +pub const RUN_MAIN_TASK_NAME: &str = "main"; +pub const RUN_CLEANUP_TASK_NAME: &str = "run-cleanup"; + +// dispatch, need alloc, nomad monitor stuff, lots of stuff here, means that +// jobs can't be destroyed, maybe by job id? diff --git a/svc/pkg/ds/ops/server-create/src/nomad_job.rs b/svc/pkg/ds/ops/server-create/src/nomad_job.rs new file mode 100644 index 000000000..c4f1fa40c --- /dev/null +++ b/svc/pkg/ds/ops/server-create/src/nomad_job.rs @@ -0,0 +1,612 @@ +use std::{collections::HashMap, convert::TryInto}; + +use chirp_worker::prelude::*; +use proto::backend::{self, matchmaker::lobby_runtime::NetworkMode as LobbyRuntimeNetworkMode}; +use regex::Regex; +use serde_json::json; + +use crate::{oci_config, util_job}; + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum TransportProtocol { + Tcp, + Udp, +} + +impl From for TransportProtocol { + fn from(proxy_protocol: ProxyProtocol) -> Self { + match proxy_protocol { + ProxyProtocol::Http + | ProxyProtocol::Https + | ProxyProtocol::Tcp + | ProxyProtocol::TcpTls => Self::Tcp, + ProxyProtocol::Udp => Self::Udp, + } + } +} + +impl TransportProtocol { + pub fn as_cni_protocol(&self) -> &'static str { + match self { + Self::Tcp => "tcp", + Self::Udp => "udp", + } + } +} + +#[derive(Clone)] +pub enum ProxyProtocol { + Http, + Https, + Tcp, + TcpTls, + Udp, +} + +impl From for ProxyProtocol { + fn from(protocol: backend::dynamic_servers::GameGuardProtocol) -> Self { + match protocol { + backend::dynamic_servers::GameGuardProtocol::Http => Self::Http, + backend::dynamic_servers::GameGuardProtocol::Https => Self::Https, + backend::dynamic_servers::GameGuardProtocol::Tcp => Self::Tcp, + backend::dynamic_servers::GameGuardProtocol::TcpTls => Self::TcpTls, + backend::dynamic_servers::GameGuardProtocol::Udp => Self::Udp, + } + } +} + +/// Helper structure for parsing all of the runtime's ports before building the +/// config. +#[derive(Clone)] +pub struct DecodedPort { + pub label: String, + pub nomad_port_label: String, + pub target: u16, + pub proxy_protocol: ProxyProtocol, +} + +pub fn gen_lobby_docker_job( + runtime: &backend::matchmaker::lobby_runtime::Docker, + _image_tag: &str, + tier: &backend::region::Tier, + lobby_config: bool, + lobby_tags: bool, + build_kind: backend::build::BuildKind, + build_compression: backend::build::BuildCompression, +) -> GlobalResult { + // IMPORTANT: This job spec must be deterministic. Do not pass in parameters + // that change with every run, such as the lobby ID. Ensure the + // `reuse_job_id` test passes when changing this function. + use nomad_client::models::*; + + // runc-compatible resources + let cpu = tier.rivet_cores_numerator as u64 * 1_000 / tier.rivet_cores_denominator as u64; // Millicore (1/1000 of a core) + let memory = tier.memory * (1024 * 1024); // bytes + let memory_max = tier.memory_max * (1024 * 1024); // bytes + + // Nomad-compatible resources + let resources = Resources { + // TODO: Configure this per-provider + // Nomad configures CPU based on MHz, not millicores. We havel to calculate the CPU share + // by knowing how many MHz are on the client. + CPU: if tier.rivet_cores_numerator < tier.rivet_cores_denominator { + Some((tier.cpu - util_job::TASK_CLEANUP_CPU as u64).try_into()?) + } else { + None + }, + cores: if tier.rivet_cores_numerator >= tier.rivet_cores_denominator { + Some((tier.rivet_cores_numerator / tier.rivet_cores_denominator) as i32) + } else { + None + }, + memory_mb: Some( + (TryInto::::try_into(memory)? / (1024 * 1024) + - util_job::TASK_CLEANUP_MEMORY as i64) + .try_into()?, + ), + // Allow oversubscribing memory by 50% of the reserved + // memory if using less than the node's total memory + memory_max_mb: Some( + (TryInto::::try_into(memory_max)? / (1024 * 1024) + - util_job::TASK_CLEANUP_MEMORY as i64) + .try_into()?, + ), + disk_mb: Some(tier.disk as i32), // TODO: Is this deprecated? + ..Resources::new() + }; + + let network_mode = unwrap!(LobbyRuntimeNetworkMode::from_i32(runtime.network_mode)); + + // Read ports + let decoded_ports = runtime + .ports + .iter() + .map(|port| { + let target = unwrap!(port.target_port) as u16; + + // TODO + // GlobalResult::Ok(DecodedPort { + // label: port.label.clone(), + // nomad_port_label: util_mm::format_nomad_port_label(&port.label), + // target, + // proxy_protocol: unwrap!(ProxyProtocol::from_i32(port.proxy_protocol)), + // }) + GlobalResult::Ok(DecodedPort { + label: port.label.clone(), + nomad_port_label: String::new(), + target, + proxy_protocol: ProxyProtocol::Http, + }) + }) + .collect::>>()?; + + // The container will set up port forwarding manually from the Nomad-defined ports on the host + // to the CNI container + let dynamic_ports = decoded_ports + .iter() + .map(|port| Port { + label: Some(port.nomad_port_label.clone()), + ..Port::new() + }) + .collect::>(); + + // Port mappings to pass to the container. Only used in bridge networking. + let cni_port_mappings = decoded_ports + .iter() + .map(|port| { + json!({ + "HostPort": template_env_var_int(&nomad_host_port_env_var(&port.nomad_port_label)), + "ContainerPort": port.target, + // TODO + // "Protocol": TransportProtocol::from(port.proxy_protocol).as_cni_protocol(), + "Protocol": TransportProtocol::Udp.as_cni_protocol(), + }) + }) + .collect::>(); + + // Also see util_mm:consts::DEFAULT_ENV_KEYS + let mut env = runtime + .env_vars + .iter() + .map(|v| (v.key.clone(), escape_go_template(&v.value))) + .chain(if lobby_config { + Some(( + "RIVET_LOBBY_CONFIG".to_string(), + template_env_var("NOMAD_META_LOBBY_CONFIG"), + )) + } else { + None + }) + .chain(if lobby_tags { + Some(( + "RIVET_LOBBY_TAGS".to_string(), + template_env_var("NOMAD_META_LOBBY_TAGS"), + )) + } else { + None + }) + .chain([( + "RIVET_API_ENDPOINT".to_string(), + util::env::origin_api().to_string(), + )]) + .chain( + // DEPRECATED: + [ + ("RIVET_CHAT_API_URL", "chat"), + ("RIVET_GROUP_API_URL", "group"), + ("RIVET_IDENTITY_API_URL", "identity"), + ("RIVET_KV_API_URL", "kv"), + ("RIVET_MATCHMAKER_API_URL", "matchmaker"), + ] + .iter() + .filter(|_| util::env::support_deprecated_subdomains()) + .map(|(env, service)| { + ( + env.to_string(), + util::env::origin_api().replace("://", &format!("://{}.", service)), + ) + }), + ) + .chain( + [ + ( + "RIVET_NAMESPACE_NAME", + template_env_var("NOMAD_META_NAMESPACE_NAME"), + ), + ( + "RIVET_NAMESPACE_ID", + template_env_var("NOMAD_META_NAMESPACE_ID"), + ), + ( + "RIVET_VERSION_NAME", + template_env_var("NOMAD_META_VERSION_NAME"), + ), + ( + "RIVET_VERSION_ID", + template_env_var("NOMAD_META_VERSION_ID"), + ), + ( + "RIVET_GAME_MODE_ID", + template_env_var("NOMAD_META_LOBBY_GROUP_ID"), + ), + ( + "RIVET_GAME_MODE_NAME", + template_env_var("NOMAD_META_LOBBY_GROUP_NAME"), + ), + ("RIVET_LOBBY_ID", template_env_var("NOMAD_META_LOBBY_ID")), + ("RIVET_TOKEN", template_env_var("NOMAD_META_LOBBY_TOKEN")), + ("RIVET_REGION_ID", template_env_var("NOMAD_META_REGION_ID")), + ( + "RIVET_REGION_NAME", + template_env_var("NOMAD_META_REGION_NAME"), + ), + ( + "RIVET_MAX_PLAYERS_NORMAL", + template_env_var("NOMAD_META_MAX_PLAYERS_NORMAL"), + ), + ( + "RIVET_MAX_PLAYERS_DIRECT", + template_env_var("NOMAD_META_MAX_PLAYERS_DIRECT"), + ), + ( + "RIVET_MAX_PLAYERS_PARTY", + template_env_var("NOMAD_META_MAX_PLAYERS_PARTY"), + ), + // CPU in millicores + // + // < 1000 is for fractional CPU + // > 1000 is for whole CPU, will always be 1000 increments + ("RIVET_CPU", cpu.to_string()), + // Memory in bytes + ("RIVET_MEMORY", memory.to_string()), + // Memory in bytes for oversubscription + ("RIVET_MEMORY_OVERSUBSCRIBE", memory_max.to_string()), + // DEPRECATED: + ( + "RIVET_LOBBY_TOKEN", + template_env_var("NOMAD_META_LOBBY_TOKEN"), + ), + ( + "RIVET_LOBBY_GROUP_ID", + template_env_var("NOMAD_META_LOBBY_GROUP_ID"), + ), + ( + "RIVET_LOBBY_GROUP_NAME", + template_env_var("NOMAD_META_LOBBY_GROUP_NAME"), + ), + ] + .iter() + .map(|(k, v)| (k.to_string(), v.to_string())), + ) + // Ports + .chain(decoded_ports.iter().map(|port| { + let port_value = match network_mode { + // CNI will handle mapping the host port to the container port + LobbyRuntimeNetworkMode::Bridge => port.target.to_string(), + // The container needs to listen on the correct port + LobbyRuntimeNetworkMode::Host => { + template_env_var(&nomad_host_port_env_var(&port.nomad_port_label)) + } + }; + + // Port with the kebab case port key. Included for backward compatabiilty & for + // less confusion. + (format!("PORT_{}", port.label.replace('-', "_")), port_value) + })) + .map(|(k, v)| format!("{k}={v}")) + .collect::>(); + env.sort(); + + let services = decoded_ports + .iter() + .map(|port| { + let service_name = format!("${{NOMAD_META_LOBBY_ID}}-{}", port.label); + GlobalResult::Ok(Some(Service { + provider: Some("nomad".into()), + name: Some(service_name), + tags: Some(vec!["game".into()]), + port_label: Some(port.nomad_port_label.clone()), + // checks: if TransportProtocol::from(port.proxy_protocol) + // == TransportProtocol::Tcp + // { + // Some(vec![ServiceCheck { + // name: Some(format!("{}-probe", port.label)), + // port_label: Some(port.nomad_port_label.clone()), + // _type: Some("tcp".into()), + // interval: Some(30_000_000_000), + // timeout: Some(2_000_000_000), + // ..ServiceCheck::new() + // }]) + // } else { + // None + // }, + ..Service::new() + })) + }) + .filter_map(|x| x.transpose()) + .collect::>>()?; + + // Generate the command to download and decompress the file + let mut download_cmd = r#"curl -Lf "$NOMAD_META_IMAGE_ARTIFACT_URL""#.to_string(); + match build_compression { + backend::build::BuildCompression::None => {} + backend::build::BuildCompression::Lz4 => { + download_cmd.push_str(" | lz4 -d -"); + } + } + + Ok(Job { + _type: Some("batch".into()), + constraints: Some(vec![Constraint { + l_target: Some("${node.class}".into()), + r_target: Some("job".into()), + operand: Some("=".into()), + }]), + parameterized_job: Some(Box::new(ParameterizedJobConfig { + payload: Some("forbidden".into()), + meta_required: Some(vec![ + "job_runner_binary_url".into(), + "vector_socket_addr".into(), + "image_artifact_url".into(), + "namespace_id".into(), + "namespace_name".into(), + "version_id".into(), + "version_name".into(), + "lobby_group_id".into(), + "lobby_group_name".into(), + "lobby_id".into(), + "lobby_token".into(), + "lobby_config".into(), + "lobby_tags".into(), + "region_id".into(), + "region_name".into(), + "max_players_normal".into(), + "max_players_direct".into(), + "max_players_party".into(), + "root_user_enabled".into(), + ]), + meta_optional: Some(vec!["rivet_test_id".into()]), + })), + task_groups: Some(vec![TaskGroup { + name: Some(util_job::RUN_MAIN_TASK_NAME.into()), + constraints: None, // TODO: Use parameter meta to specify the hardware + affinities: None, // TODO: + // Allows for jobs to keep running and receiving players in the + // event of a disconnection from the Nomad server. + max_client_disconnect: Some(5 * 60 * 1_000_000_000), + restart_policy: Some(Box::new(RestartPolicy { + attempts: Some(0), + mode: Some("fail".into()), + ..RestartPolicy::new() + })), + reschedule_policy: Some(Box::new(ReschedulePolicy { + attempts: Some(0), + unlimited: Some(false), + ..ReschedulePolicy::new() + })), + networks: Some(vec![NetworkResource { + // The setup.sh script will set up a CNI network if using bridge networking + mode: Some("host".into()), + dynamic_ports: Some(dynamic_ports), + ..NetworkResource::new() + }]), + services: Some(services), + // Configure ephemeral disk for logs + ephemeral_disk: Some(Box::new(EphemeralDisk { + size_mb: Some(tier.disk as i32), + ..EphemeralDisk::new() + })), + tasks: Some(vec![ + Task { + name: Some("runc-setup".into()), + lifecycle: Some(Box::new(TaskLifecycle { + hook: Some("prestart".into()), + sidecar: Some(false), + })), + driver: Some("raw_exec".into()), + config: Some({ + let mut x = HashMap::new(); + x.insert("command".into(), json!("${NOMAD_TASK_DIR}/setup.sh")); + x + }), + templates: Some(vec![ + Template { + embedded_tmpl: Some(include_str!("./scripts/setup.sh").replace( + "__HOST_NETWORK__", + match network_mode { + LobbyRuntimeNetworkMode::Bridge => "false", + LobbyRuntimeNetworkMode::Host => "true", + }, + )), + dest_path: Some("${NOMAD_TASK_DIR}/setup.sh".into()), + perms: Some("744".into()), + ..Template::new() + }, + Template { + embedded_tmpl: Some( + include_str!("./scripts/setup_job_runner.sh").into(), + ), + dest_path: Some("${NOMAD_TASK_DIR}/setup_job_runner.sh".into()), + perms: Some("744".into()), + ..Template::new() + }, + Template { + embedded_tmpl: Some( + include_str!("./scripts/setup_oci_bundle.sh") + .replace("__DOWNLOAD_CMD__", &download_cmd) + .replace( + "__BUILD_KIND__", + match build_kind { + backend::build::BuildKind::DockerImage => { + "docker-image" + } + backend::build::BuildKind::OciBundle => "oci-bundle", + }, + ), + ), + dest_path: Some("${NOMAD_TASK_DIR}/setup_oci_bundle.sh".into()), + perms: Some("744".into()), + ..Template::new() + }, + Template { + embedded_tmpl: Some( + include_str!("./scripts/setup_cni_network.sh").into(), + ), + dest_path: Some("${NOMAD_TASK_DIR}/setup_cni_network.sh".into()), + perms: Some("744".into()), + ..Template::new() + }, + Template { + embedded_tmpl: Some(gen_oci_bundle_config( + cpu, memory, memory_max, env, + )?), + dest_path: Some( + "${NOMAD_ALLOC_DIR}/oci-bundle-config.base.json".into(), + ), + ..Template::new() + }, + Template { + embedded_tmpl: Some(inject_consul_env_template( + &serde_json::to_string(&cni_port_mappings)?, + )?), + dest_path: Some("${NOMAD_ALLOC_DIR}/cni-port-mappings.json".into()), + ..Template::new() + }, + ]), + resources: Some(Box::new(Resources { + // TODO + // CPU: Some(util_mm::RUNC_SETUP_CPU), + // memory_mb: Some(util_mm::RUNC_SETUP_MEMORY), + CPU: None, + memory_mb: None, + ..Resources::new() + })), + log_config: Some(Box::new(LogConfig { + max_files: Some(4), + max_file_size_mb: Some(2), + disabled: None, + })), + ..Task::new() + }, + Task { + name: Some(util_job::RUN_MAIN_TASK_NAME.into()), + driver: Some("raw_exec".into()), + config: Some({ + let mut x = HashMap::new(); + // This is downloaded in setup_job_runner.sh + x.insert("command".into(), json!("${NOMAD_ALLOC_DIR}/job-runner")); + x + }), + resources: Some(Box::new(resources.clone())), + // Intentionally high timeout. Killing jobs is handled manually with signals. + kill_timeout: Some(86400 * 1_000_000_000), + kill_signal: Some("SIGTERM".into()), + log_config: Some(Box::new(LogConfig { + max_files: Some(4), + max_file_size_mb: Some(4), + disabled: None, + })), + ..Task::new() + }, + Task { + name: Some("runc-cleanup".into()), + lifecycle: Some(Box::new(TaskLifecycle { + hook: Some("poststop".into()), + sidecar: Some(false), + })), + driver: Some("raw_exec".into()), + config: Some({ + let mut x = HashMap::new(); + x.insert("command".into(), json!("${NOMAD_TASK_DIR}/cleanup.sh")); + x + }), + templates: Some(vec![Template { + embedded_tmpl: Some(include_str!("./scripts/cleanup.sh").into()), + dest_path: Some("${NOMAD_TASK_DIR}/cleanup.sh".into()), + perms: Some("744".into()), + ..Template::new() + }]), + resources: Some(Box::new(Resources { + // TODO + // CPU: Some(util_mm::RUNC_CLEANUP_CPU), + // memory_mb: Some(util_mm::RUNC_CLEANUP_MEMORY), + CPU: None, + memory_mb: None, + ..Resources::new() + })), + log_config: Some(Box::new(LogConfig { + max_files: Some(4), + max_file_size_mb: Some(2), + disabled: None, + })), + ..Task::new() + }, + ]), + ..TaskGroup::new() + }]), + ..Job::new() + }) +} + +/// Build base config used to generate the OCI bundle's config.json. +pub fn gen_oci_bundle_config( + cpu: u64, + memory: u64, + memory_max: u64, + env: Vec, +) -> GlobalResult { + let config_str = serde_json::to_string(&oci_config::config(cpu, memory, memory_max, env))?; + + // Escape Go template syntax + let config_str = inject_consul_env_template(&config_str)?; + + Ok(config_str) +} + +/// Makes user-generated string safe to inject in to a Go template. +pub fn escape_go_template(input: &str) -> String { + let re = Regex::new(r"(\{\{|\}\})").unwrap(); + re.replace_all(input, r#"{{"$1"}}"#) + .to_string() + // TODO: This removes exploits to inject env vars (see below) + // SVC-3307 + .replace("###", "") +} + +/// Generates a template string that we can substitute with the real environment variable +/// +/// This must be safe to inject in to a JSON string so it can be substituted after rendering the +/// JSON object. Intended to be used from within JSON. +/// +/// See inject_consul_env_template. +pub fn template_env_var(name: &str) -> String { + format!("###ENV:{name}###") +} + +/// Like template_env_var, but removes surrounding quotes. +pub fn template_env_var_int(name: &str) -> String { + format!("###ENV_INT:{name}###") +} + +/// Substitutes env vars generated from template_env_var with Consul template syntax. +/// +/// Intended to be used from within JSON. +pub fn inject_consul_env_template(input: &str) -> GlobalResult { + // Regular strings + let re = Regex::new(r"###ENV:(\w+)###")?; + let output = re + .replace_all(input, r#"{{ env "$1" | regexReplaceAll "\"" "\\\"" }}"#) + .to_string(); + + // Integers + let re = Regex::new(r####""###ENV_INT:(\w+)###""####)?; + let output = re + .replace_all(&output, r#"{{ env "$1" | regexReplaceAll "\"" "\\\"" }}"#) + .to_string(); + + Ok(output) +} + +pub fn nomad_host_port_env_var(port_label: &str) -> String { + format!("NOMAD_HOST_PORT_{}", port_label.replace('-', "_")) +} diff --git a/svc/pkg/ds/ops/server-create/src/oci_config.rs b/svc/pkg/ds/ops/server-create/src/oci_config.rs new file mode 100644 index 000000000..f03105fd1 --- /dev/null +++ b/svc/pkg/ds/ops/server-create/src/oci_config.rs @@ -0,0 +1,316 @@ +use chirp_worker::prelude::*; +use serde_json::json; + +// CPU period in microseconds. +// +// https://www.kernel.org/doc/Documentation/scheduler/sched-bwc.txt +const CPU_PERIOD: u64 = 100000; + +/// Generates base config.json for an OCI bundle. +pub fn config(cpu: u64, memory: u64, memory_max: u64, env: Vec) -> serde_json::Value { + // CPU shares is a relative weight. It doesn't matter what unit we pass here as + // long as the ratios between the containers are correct. + // + // Corresponds to cpu.weight in cgroups. Must be [1, 10_000] + // + // We divide by 8 in order to make sure the CPU shares are within bounds. `cpu` is measured in + // millishares, so 1_000 = 1 core. For a range of 32d1 (32_000) to 1d16 (62), we divide by 8 + // to make the range 3_200 to 6. + let mut cpu_shares = cpu / 10; + if cpu_shares > 10_000 { + cpu_shares = 10_000; + tracing::warn!(?cpu_shares, "cpu_shares > 10_000"); + } else if cpu_shares < 1 { + cpu_shares = 1; + tracing::warn!(?cpu_shares, "cpu_shares < 1"); + } + + // This is a modified version of the default config.json generated by containerd. + // + // Some values will be overridden at runtime by the values in the OCI bundle's config.json. + // + // Default Docker spec: https://github.com/moby/moby/blob/777e9f271095685543f30df0ff7a12397676f938/oci/defaults.go#L49 + // + // Generate config.json with containerd: + // ctr run --rm -t --seccomp docker.io/library/debian:latest debian-container-id /bin/bash + // cat /run/containerd/io.containerd.runtime.v2.task/default/debian-container-id/config.json | jq + json!({ + "ociVersion": "1.0.2-dev", + "process": { + // user, args, and cwd will be injected at runtime + + // Will be merged with the OCI bundle's env + // + // These will take priority over the OCI bundle's env + "env": env, + + "terminal": false, + "capabilities": { + "bounding": capabilities(), + "effective": capabilities(), + "permitted": capabilities() + }, + "rlimits": [ + { + "type": "RLIMIT_NOFILE", + "hard": 1024, + "soft": 1024 + } + ], + "noNewPrivileges": true + + // TODO: oomScoreAdj + // TODO: scheduler + // TODO: iopriority + // TODO: rlimit? + }, + "root": { + "path": "rootfs", + // This means we can't reuse the oci-bundle since the rootfs is writable. + "readonly": false + }, + "mounts": mounts(), + "linux": { + "resources": { + "devices": linux_resources_devices(), + "cpu": { + "shares": cpu_shares, + // If `quota` is greater than `period`, it is allowed to use multiple cores. + // + // Read more: https://access.redhat.com/documentation/en-us/red_hat_enterprise_linux/6/html/resource_management_guide/sec-cpu + // "quota": CPU_PERIOD * cpu / 1_000, + // "period": CPU_PERIOD, + // Use the env var for the CPU since Nomad handles assigning CPUs to each task + // "cpus": if cpu >= 1_000 { + // Some(template_env_var("NOMAD_CPU_CORES")) + // } else { + // None + // } + }, + // Docker: https://github.com/moby/moby/blob/777e9f271095685543f30df0ff7a12397676f938/daemon/daemon_unix.go#L75 + "memory": { + "reservation": memory, + "limit": memory_max, + }, + + // TODO: network + // TODO: pids + // TODO: hugepageLimits + // TODO: blockIO + }, + // "cgroupsPath": "/default/debian-container-id", + "namespaces": [ + { + "type": "pid" + }, + { + "type": "ipc" + }, + { + "type": "uts" + }, + { + "type": "mount" + } + ], + "maskedPaths": [ + "/proc/acpi", + "/proc/asound", + "/proc/kcore", + "/proc/keys", + "/proc/latency_stats", + "/proc/timer_list", + "/proc/timer_stats", + "/proc/sched_debug", + "/sys/firmware", + "/proc/scsi" + ], + "readonlyPaths": [ + "/proc/bus", + "/proc/fs", + "/proc/irq", + "/proc/sys", + "/proc/sysrq-trigger" + ], + "seccomp": super::seccomp::seccomp() + } + }) +} + +// Default Docker capabilities: https://github.com/moby/moby/blob/777e9f271095685543f30df0ff7a12397676f938/oci/caps/defaults.go#L4 +fn capabilities() -> Vec<&'static str> { + vec![ + "CAP_CHOWN", + "CAP_DAC_OVERRIDE", + "CAP_FSETID", + "CAP_FOWNER", + "CAP_MKNOD", + "CAP_NET_RAW", + "CAP_SETGID", + "CAP_SETUID", + "CAP_SETFCAP", + "CAP_SETPCAP", + "CAP_NET_BIND_SERVICE", + "CAP_SYS_CHROOT", + "CAP_KILL", + "CAP_AUDIT_WRITE", + ] +} + +fn mounts() -> serde_json::Value { + json!([ + { + "destination": "/proc", + "type": "proc", + "source": "proc", + "options": [ + "nosuid", + "noexec", + "nodev" + ] + }, + { + "destination": "/dev", + "type": "tmpfs", + "source": "tmpfs", + "options": [ + "nosuid", + "strictatime", + "mode=755", + "size=65536k" + ] + }, + { + "destination": "/dev/pts", + "type": "devpts", + "source": "devpts", + "options": [ + "nosuid", + "noexec", + "newinstance", + "ptmxmode=0666", + "mode=0620", + "gid=5" + ] + }, + { + "destination": "/dev/shm", + "type": "tmpfs", + "source": "shm", + "options": [ + "nosuid", + "noexec", + "nodev", + "mode=1777", + "size=65536k" + ] + }, + { + "destination": "/dev/mqueue", + "type": "mqueue", + "source": "mqueue", + "options": [ + "nosuid", + "noexec", + "nodev" + ] + }, + { + "destination": "/sys", + "type": "sysfs", + "source": "sysfs", + "options": [ + "nosuid", + "noexec", + "nodev", + "ro" + ] + }, + { + "destination": "/run", + "type": "tmpfs", + "source": "tmpfs", + "options": [ + "nosuid", + "strictatime", + "mode=755", + "size=65536k" + ] + } + ]) +} + +fn linux_resources_devices() -> serde_json::Value { + // Devices implicitly contains the following devices: + // null, zero, full, random, urandom, tty, console, and ptmx. + // ptmx is a bind mount or symlink of the container's ptmx. + // See also: https://github.com/opencontainers/runtime-spec/blob/master/config-linux.md#default-devices + json!([ + { + "allow": false, + "access": "rwm" + }, + { + "allow": true, + "type": "c", + "major": 1, + "minor": 3, + "access": "rwm" + }, + { + "allow": true, + "type": "c", + "major": 1, + "minor": 8, + "access": "rwm" + }, + { + "allow": true, + "type": "c", + "major": 1, + "minor": 7, + "access": "rwm" + }, + { + "allow": true, + "type": "c", + "major": 5, + "minor": 0, + "access": "rwm" + }, + { + "allow": true, + "type": "c", + "major": 1, + "minor": 5, + "access": "rwm" + }, + { + "allow": true, + "type": "c", + "major": 1, + "minor": 9, + "access": "rwm" + }, + { + "allow": true, + "type": "c", + "major": 5, + "minor": 1, + "access": "rwm" + }, + { + "allow": true, + "type": "c", + "major": 136, + "access": "rwm" + }, + { + "allow": true, + "type": "c", + "major": 5, + "minor": 2, + "access": "rwm" + } + ]) +} diff --git a/svc/pkg/ds/ops/server-create/src/scripts/cleanup.sh b/svc/pkg/ds/ops/server-create/src/scripts/cleanup.sh new file mode 100644 index 000000000..2815a2617 --- /dev/null +++ b/svc/pkg/ds/ops/server-create/src/scripts/cleanup.sh @@ -0,0 +1,33 @@ +#!/usr/bin/env bash +set -euf -o pipefail + +log() { + local timestamp=$(date +"%Y-%m-%d %H:%M:%S.%3N") + echo "[$timestamp] [cleanup] $@" +} + +# MARK: Generate CNI parameters +export CNI_PATH="/opt/cni/bin" +export NETCONFPATH="/opt/cni/config" +export CNI_IFNAME="eth0" +export CAP_ARGS=$(cat "$NOMAD_ALLOC_DIR/cni-cap-args.json") + +# Every step in this script gracefully fails so everything gets cleaned up no matter what. + +if [ -f "$NOMAD_ALLOC_DIR/container-id" ]; then + CONTAINER_ID=$(cat "$NOMAD_ALLOC_DIR/container-id") + NETWORK_NAME="rivet-job" + NETNS_PATH="/var/run/netns/$CONTAINER_ID" + + log "Deleting container $CONTAINER_ID" + runc delete --force "$CONTAINER_ID" || log 'Failed to delete container' >&2 + + log "Deleting network $NETWORK_NAME from namespace $NETNS_PATH" + cnitool del $NETWORK_NAME $NETNS_PATH || log 'Failed to delete network' >&2 + + log "Deleting network $CONTAINER_ID" + ip netns del "$CONTAINER_ID" || log 'Failed to delete network' >&2 +else + log "No container ID found. Network may have leaked." >&2 +fi + diff --git a/svc/pkg/ds/ops/server-create/src/scripts/setup.sh b/svc/pkg/ds/ops/server-create/src/scripts/setup.sh new file mode 100644 index 000000000..02b3f424b --- /dev/null +++ b/svc/pkg/ds/ops/server-create/src/scripts/setup.sh @@ -0,0 +1,71 @@ +#!/usr/bin/env bash +set -euf -o pipefail + +log() { + local timestamp=$(date +"%Y-%m-%d %H:%M:%S.%3N") + echo "[$timestamp] [setup] $@" +} + +log "Starting setup" + +log 'Env:' +env +echo + +# Need to prefix with "rivet-" in order to not interfere with any +# auto-generated resources that Nomad creates for the given alloc ID +export CONTAINER_ID="rivet-$NOMAD_ALLOC_ID" +log "CONTAINER_ID: $CONTAINER_ID" +echo -n "$CONTAINER_ID" > "$NOMAD_ALLOC_DIR/container-id" + +# Path to the created namespace +if __HOST_NETWORK__; then + # Host network + export NETNS_PATH="/proc/1/ns/net" +else + # CNI network that will be created + export NETNS_PATH="/var/run/netns/$CONTAINER_ID" +fi + +# Run job runner setup script +"$NOMAD_TASK_DIR/setup_job_runner.sh" & +pid_job_runner=$! + +# Run OCI setup script +"$NOMAD_TASK_DIR/setup_oci_bundle.sh" & +pid_oci=$! + +# Run CNI setup script +if ! __HOST_NETWORK__; then + "$NOMAD_TASK_DIR/setup_cni_network.sh" & + pid_cni=$! +fi + +# Wait for job runner setup scripts to finish +wait $pid_job_runner +exit_status_job_runner=$? +if [ $exit_status_job_runner -ne 0 ]; then + log "job-runner setup failed with exit code $exit_status_job_runner" + exit $exit_status_job_runner +fi + +# Wait for OCI setup scripts to finish +wait $pid_oci +exit_status_oci=$? +if [ $exit_status_oci -ne 0 ]; then + log "OCI setup failed with exit code $exit_status_oci" + exit $exit_status_oci +fi + +# Wait for CNI setup script to finish +if ! __HOST_NETWORK__; then + wait $pid_cni + exit_status_cni=$? + if [ $exit_status_cni -ne 0 ]; then + log "CNI setup failed with exit code $exit_status_cni" + exit $exit_status_cni + fi +fi + +log "Setup finished" + diff --git a/svc/pkg/ds/ops/server-create/src/scripts/setup_cni_network.sh b/svc/pkg/ds/ops/server-create/src/scripts/setup_cni_network.sh new file mode 100644 index 000000000..8a97a2e34 --- /dev/null +++ b/svc/pkg/ds/ops/server-create/src/scripts/setup_cni_network.sh @@ -0,0 +1,45 @@ +#!/usr/bin/env bash +set -euf -o pipefail + +log() { + local timestamp=$(date +"%Y-%m-%d %H:%M:%S.%3N") + echo "[$timestamp] [setup_cni_network] $@" +} + +# MARK: Generate CNI parameters +# +# See https://github.com/containernetworking/cni/blob/b62753aa2bfa365c1ceaff6f25774a8047c896b5/cnitool/cnitool.go#L31 + +# See Nomad capabilities equivalent: +# https://github.com/hashicorp/nomad/blob/a8f0f2612ef9d283ed903721f8453a0c0c3f51c5/client/allocrunner/networking_cni.go#L105C46-L105C46 +# +# See supported args: +# https://github.com/containerd/go-cni/blob/6603d5bd8941d7f2026bb5627f6aa4ff434f859a/namespace_opts.go#L22 +jq -c < "$NOMAD_ALLOC_DIR/cni-cap-args.json" +{ + "portMappings": $(cat "$NOMAD_ALLOC_DIR/cni-port-mappings.json") +} +EOF + +export CNI_PATH="/opt/cni/bin" +export NETCONFPATH="/opt/cni/config" +export CNI_IFNAME="eth0" +export CAP_ARGS=$(cat "$NOMAD_ALLOC_DIR/cni-cap-args.json") +log "CAP_ARGS: $CAP_ARGS" + +# MARK: Create network +# +# See Nomad network creation: +# https://github.com/hashicorp/nomad/blob/a8f0f2612ef9d283ed903721f8453a0c0c3f51c5/client/allocrunner/network_manager_linux.go#L119 + +# Name of the network in /opt/cni/config/$NETWORK_NAME.conflist +NETWORK_NAME="rivet-job" + +log "Creating network $CONTAINER_ID" +ip netns add "$CONTAINER_ID" + +log "Adding network $NETWORK_NAME to namespace $NETNS_PATH" +cnitool add "$NETWORK_NAME" "$NETNS_PATH" > $NOMAD_ALLOC_DIR/cni.json + +log "Finished setting up CNI network" + diff --git a/svc/pkg/ds/ops/server-create/src/scripts/setup_job_runner.sh b/svc/pkg/ds/ops/server-create/src/scripts/setup_job_runner.sh new file mode 100644 index 000000000..2c2d11671 --- /dev/null +++ b/svc/pkg/ds/ops/server-create/src/scripts/setup_job_runner.sh @@ -0,0 +1,13 @@ +#!/usr/bin/env bash +set -euf -o pipefail + +log() { + local timestamp=$(date +"%Y-%m-%d %H:%M:%S.%3N") + echo "[$timestamp] [setup_job_runner] $@" +} + +# Download job runner binary +curl -Lf "$NOMAD_META_job_runner_binary_url" -o "${NOMAD_ALLOC_DIR}/job-runner" +chmod +x "${NOMAD_ALLOC_DIR}/job-runner" +log "Finished downloading job-runner" + diff --git a/svc/pkg/ds/ops/server-create/src/scripts/setup_oci_bundle.sh b/svc/pkg/ds/ops/server-create/src/scripts/setup_oci_bundle.sh new file mode 100644 index 000000000..a5e4bec32 --- /dev/null +++ b/svc/pkg/ds/ops/server-create/src/scripts/setup_oci_bundle.sh @@ -0,0 +1,84 @@ +#!/usr/bin/env bash +set -euf -o pipefail + +log() { + local timestamp=$(date +"%Y-%m-%d %H:%M:%S.%3N") + echo "[$timestamp] [setup_oci_bundle] $@" +} + +DOCKER_IMAGE_PATH="$NOMAD_ALLOC_DIR/docker-image.tar" +OCI_IMAGE_PATH="$NOMAD_ALLOC_DIR/oci-image" +OCI_BUNDLE_PATH="$NOMAD_ALLOC_DIR/oci-bundle" + +# MARK: Generate OCI bundle +case "__BUILD_KIND__" in + "docker-image") + # We need to convert the Docker image to an OCI bundle in order to run it. + + log "Downloading Docker image" + __DOWNLOAD_CMD__ > "$DOCKER_IMAGE_PATH" + + # Allows us to work with the build with umoci + log "Converting Docker image -> OCI image" + skopeo copy "docker-archive:$DOCKER_IMAGE_PATH" "oci:$OCI_IMAGE_PATH:default" + + # Allows us to run the bundle natively with runc + log "Converting OCI image -> OCI bundle" + + umoci unpack --image "$OCI_IMAGE_PATH:default" "$OCI_BUNDLE_PATH" + ;; + "oci-bundle") + log "Downloading OCI bundle" + mkdir "$OCI_BUNDLE_PATH" + __DOWNLOAD_CMD__ | tar -x -C "$OCI_BUNDLE_PATH" + + ;; + *) + log "Unknown build kind" + exit 1 + ;; +esac + +# resolv.conf +# +# See also rivet-job.conflist in lib/bolt/core/src/dep/terraform/install_scripts/files/nomad.sh +cat < $NOMAD_ALLOC_DIR/resolv.conf +nameserver 8.8.8.8 +nameserver 8.8.4.4 +nameserver 2001:4860:4860::8888 +nameserver 2001:4860:4860::8844 +options rotate +options edns0 +options attempts:2 +EOF + +# MARK: Config +# +# Sanitize the config.json by copying safe properties from the provided bundle in to our base config. +# +# This way, we enforce our own capabilities on the container instead of trusting the +# provided config.json +log "Templating config.json" +OVERRIDE_CONFIG="$NOMAD_ALLOC_DIR/oci-bundle-config.overrides.json" +mv "$OCI_BUNDLE_PATH/config.json" "$OVERRIDE_CONFIG" + + +# Template new config +jq " +.process.args = $(jq '.process.args' $OVERRIDE_CONFIG) | +.process.env = $(jq '.process.env' $OVERRIDE_CONFIG) + .process.env | +.process.user = $(jq '.process.user' $OVERRIDE_CONFIG) | +.process.cwd = $(jq '.process.cwd' $OVERRIDE_CONFIG) | +.linux.namespaces += [{\"type\": \"network\", \"path\": \"$NETNS_PATH\"}] | +.mounts += [{ + \"destination\": \"/etc/resolv.conf\", + \"type\": \"bind\", + \"source\": \"$NOMAD_ALLOC_DIR/resolv.conf\", + \"options\": [\"rbind\", \"rprivate\"] +}] +" "$NOMAD_ALLOC_DIR/oci-bundle-config.base.json" > "$OCI_BUNDLE_PATH/config.json" + +# Config will be validated in `job-runner` + +log "Finished setting up OCI bundle" + diff --git a/svc/pkg/ds/ops/server-create/src/seccomp.rs b/svc/pkg/ds/ops/server-create/src/seccomp.rs new file mode 100644 index 000000000..f3f7fcfec --- /dev/null +++ b/svc/pkg/ds/ops/server-create/src/seccomp.rs @@ -0,0 +1,481 @@ +use chirp_worker::prelude::*; +use serde_json::json; + +pub fn seccomp() -> serde_json::Value { + // Copied from auto-generated containerd + // + // See comment in super::oci_conifg::config on how to generate this + json!({ + "defaultAction": "SCMP_ACT_ERRNO", + "architectures": [ + "SCMP_ARCH_X86_64", + "SCMP_ARCH_X86", + "SCMP_ARCH_X32" + ], + "syscalls": [ + { + "names": syscall_names(), + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "personality" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 0, + "op": "SCMP_CMP_EQ" + } + ] + }, + { + "names": [ + "personality" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 8, + "op": "SCMP_CMP_EQ" + } + ] + }, + { + "names": [ + "personality" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 131072, + "op": "SCMP_CMP_EQ" + } + ] + }, + { + "names": [ + "personality" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 131080, + "op": "SCMP_CMP_EQ" + } + ] + }, + { + "names": [ + "personality" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 4294967295u32, + "op": "SCMP_CMP_EQ" + } + ] + }, + { + "names": [ + "ptrace" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "arch_prctl", + "modify_ldt" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "chroot" + ], + "action": "SCMP_ACT_ALLOW" + }, + { + "names": [ + "clone" + ], + "action": "SCMP_ACT_ALLOW", + "args": [ + { + "index": 0, + "value": 2114060288, + "op": "SCMP_CMP_MASKED_EQ" + } + ] + }, + { + "names": [ + "clone3" + ], + "action": "SCMP_ACT_ERRNO", + "errnoRet": 38 + } + ] + }) +} + +fn syscall_names() -> Vec<&'static str> { + vec![ + "accept", + "accept4", + "access", + "adjtimex", + "alarm", + "bind", + "brk", + "capget", + "capset", + "chdir", + "chmod", + "chown", + "chown32", + "clock_adjtime", + "clock_adjtime64", + "clock_getres", + "clock_getres_time64", + "clock_gettime", + "clock_gettime64", + "clock_nanosleep", + "clock_nanosleep_time64", + "close", + "close_range", + "connect", + "copy_file_range", + "creat", + "dup", + "dup2", + "dup3", + "epoll_create", + "epoll_create1", + "epoll_ctl", + "epoll_ctl_old", + "epoll_pwait", + "epoll_pwait2", + "epoll_wait", + "epoll_wait_old", + "eventfd", + "eventfd2", + "execve", + "execveat", + "exit", + "exit_group", + "faccessat", + "faccessat2", + "fadvise64", + "fadvise64_64", + "fallocate", + "fanotify_mark", + "fchdir", + "fchmod", + "fchmodat", + "fchown", + "fchown32", + "fchownat", + "fcntl", + "fcntl64", + "fdatasync", + "fgetxattr", + "flistxattr", + "flock", + "fork", + "fremovexattr", + "fsetxattr", + "fstat", + "fstat64", + "fstatat64", + "fstatfs", + "fstatfs64", + "fsync", + "ftruncate", + "ftruncate64", + "futex", + "futex_time64", + "futex_waitv", + "futimesat", + "getcpu", + "getcwd", + "getdents", + "getdents64", + "getegid", + "getegid32", + "geteuid", + "geteuid32", + "getgid", + "getgid32", + "getgroups", + "getgroups32", + "getitimer", + "getpeername", + "getpgid", + "getpgrp", + "getpid", + "getppid", + "getpriority", + "getrandom", + "getresgid", + "getresgid32", + "getresuid", + "getresuid32", + "getrlimit", + "get_robust_list", + "getrusage", + "getsid", + "getsockname", + "getsockopt", + "get_thread_area", + "gettid", + "gettimeofday", + "getuid", + "getuid32", + "getxattr", + "inotify_add_watch", + "inotify_init", + "inotify_init1", + "inotify_rm_watch", + "io_cancel", + "ioctl", + "io_destroy", + "io_getevents", + "io_pgetevents", + "io_pgetevents_time64", + "ioprio_get", + "ioprio_set", + "io_setup", + "io_submit", + "io_uring_enter", + "io_uring_register", + "io_uring_setup", + "ipc", + "kill", + "landlock_add_rule", + "landlock_create_ruleset", + "landlock_restrict_self", + "lchown", + "lchown32", + "lgetxattr", + "link", + "linkat", + "listen", + "listxattr", + "llistxattr", + "_llseek", + "lremovexattr", + "lseek", + "lsetxattr", + "lstat", + "lstat64", + "madvise", + "membarrier", + "memfd_create", + "memfd_secret", + "mincore", + "mkdir", + "mkdirat", + "mknod", + "mknodat", + "mlock", + "mlock2", + "mlockall", + "mmap", + "mmap2", + "mprotect", + "mq_getsetattr", + "mq_notify", + "mq_open", + "mq_timedreceive", + "mq_timedreceive_time64", + "mq_timedsend", + "mq_timedsend_time64", + "mq_unlink", + "mremap", + "msgctl", + "msgget", + "msgrcv", + "msgsnd", + "msync", + "munlock", + "munlockall", + "munmap", + "name_to_handle_at", + "nanosleep", + "newfstatat", + "_newselect", + "open", + "openat", + "openat2", + "pause", + "pidfd_open", + "pidfd_send_signal", + "pipe", + "pipe2", + "pkey_alloc", + "pkey_free", + "pkey_mprotect", + "poll", + "ppoll", + "ppoll_time64", + "prctl", + "pread64", + "preadv", + "preadv2", + "prlimit64", + "process_mrelease", + "pselect6", + "pselect6_time64", + "pwrite64", + "pwritev", + "pwritev2", + "read", + "readahead", + "readlink", + "readlinkat", + "readv", + "recv", + "recvfrom", + "recvmmsg", + "recvmmsg_time64", + "recvmsg", + "remap_file_pages", + "removexattr", + "rename", + "renameat", + "renameat2", + "restart_syscall", + "rmdir", + "rseq", + "rt_sigaction", + "rt_sigpending", + "rt_sigprocmask", + "rt_sigqueueinfo", + "rt_sigreturn", + "rt_sigsuspend", + "rt_sigtimedwait", + "rt_sigtimedwait_time64", + "rt_tgsigqueueinfo", + "sched_getaffinity", + "sched_getattr", + "sched_getparam", + "sched_get_priority_max", + "sched_get_priority_min", + "sched_getscheduler", + "sched_rr_get_interval", + "sched_rr_get_interval_time64", + "sched_setaffinity", + "sched_setattr", + "sched_setparam", + "sched_setscheduler", + "sched_yield", + "seccomp", + "select", + "semctl", + "semget", + "semop", + "semtimedop", + "semtimedop_time64", + "send", + "sendfile", + "sendfile64", + "sendmmsg", + "sendmsg", + "sendto", + "setfsgid", + "setfsgid32", + "setfsuid", + "setfsuid32", + "setgid", + "setgid32", + "setgroups", + "setgroups32", + "setitimer", + "setpgid", + "setpriority", + "setregid", + "setregid32", + "setresgid", + "setresgid32", + "setresuid", + "setresuid32", + "setreuid", + "setreuid32", + "setrlimit", + "set_robust_list", + "setsid", + "setsockopt", + "set_thread_area", + "set_tid_address", + "setuid", + "setuid32", + "setxattr", + "shmat", + "shmctl", + "shmdt", + "shmget", + "shutdown", + "sigaltstack", + "signalfd", + "signalfd4", + "sigprocmask", + "sigreturn", + "socket", + "socketcall", + "socketpair", + "splice", + "stat", + "stat64", + "statfs", + "statfs64", + "statx", + "symlink", + "symlinkat", + "sync", + "sync_file_range", + "syncfs", + "sysinfo", + "tee", + "tgkill", + "time", + "timer_create", + "timer_delete", + "timer_getoverrun", + "timer_gettime", + "timer_gettime64", + "timer_settime", + "timer_settime64", + "timerfd_create", + "timerfd_gettime", + "timerfd_gettime64", + "timerfd_settime", + "timerfd_settime64", + "times", + "tkill", + "truncate", + "truncate64", + "ugetrlimit", + "umask", + "uname", + "unlink", + "unlinkat", + "utime", + "utimensat", + "utimensat_time64", + "utimes", + "vfork", + "vmsplice", + "wait4", + "waitid", + "waitpid", + "write", + "writev", + ] +} diff --git a/svc/pkg/ds/ops/server-create/src/util_job.rs b/svc/pkg/ds/ops/server-create/src/util_job.rs new file mode 100644 index 000000000..b2a9b44bf --- /dev/null +++ b/svc/pkg/ds/ops/server-create/src/util_job.rs @@ -0,0 +1,26 @@ +use std::time::Duration; + +/// Determines if a Nomad job is dispatched from our run. +/// +/// We use this when monitoring Nomad in order to determine which events to +/// pay attention to. +pub fn is_nomad_job_run(job_id: &str) -> bool { + job_id.starts_with("job-") && job_id.contains("/dispatch-") +} + +// Timeout from when `stop_job` is called and the kill signal is sent +pub const JOB_STOP_TIMEOUT: Duration = Duration::from_secs(30); + +pub const TASK_CLEANUP_CPU: i32 = 50; + +// Query Prometheus with: +// +// ``` +// max(nomad_client_allocs_memory_max_usage{ns="prod",exported_job=~"job-.*",task="run-cleanup"}) / 1000 / 1000 +// ``` +// +// 13.5 MB baseline, 29 MB highest peak +pub const TASK_CLEANUP_MEMORY: i32 = 32; + +pub const RUN_MAIN_TASK_NAME: &str = "main"; +pub const RUN_CLEANUP_TASK_NAME: &str = "run-cleanup"; diff --git a/svc/pkg/ds/ops/server-create/src/util_mm/consts.rs b/svc/pkg/ds/ops/server-create/src/util_mm/consts.rs new file mode 100644 index 000000000..10d67ca4a --- /dev/null +++ b/svc/pkg/ds/ops/server-create/src/util_mm/consts.rs @@ -0,0 +1,39 @@ +use rivet_util as util; + +pub const LOBBY_READY_TIMEOUT: i64 = util::duration::minutes(5); +pub const PLAYER_READY_TIMEOUT: i64 = util::duration::minutes(2); +pub const PLAYER_AUTO_REMOVE_TIMEOUT: i64 = util::duration::hours(8); + +pub const MIN_HOST_PORT: u16 = 26000; +pub const MAX_HOST_PORT: u16 = 31999; + +/// Constants used for mocking responses when using dev tokens. +pub const DEV_REGION_ID: &str = "dev-lcl"; +pub const DEV_PROVIDER_NAME: &str = "Development"; +pub const DEV_REGION_NAME: &str = "Local"; + +// Also see svc/mm-lobby-create/src/nomad_job.rs +pub const DEFAULT_ENV_KEYS: &[&str] = &[ + "RIVET_API_ENDPOINT", + "RIVET_CHAT_API_URL", + "RIVET_GROUP_API_URL", + "RIVET_IDENTITY_API_URL", + "RIVET_KV_API_URL", + "RIVET_MATCHMAKER_API_URL", + "RIVET_NAMESPACE_NAME", + "RIVET_NAMESPACE_ID", + "RIVET_VERSION_NAME", + "RIVET_VERSION_ID", + "RIVET_GAME_MODE_ID", + "RIVET_GAME_MODE_NAME", + "RIVET_LOBBY_ID", + "RIVET_TOKEN", + "RIVET_REGION_ID", + "RIVET_REGION_NAME", + "RIVET_MAX_PLAYERS_NORMAL", + "RIVET_MAX_PLAYERS_DIRECT", + "RIVET_MAX_PLAYERS_PARTY", + "RIVET_LOBBY_TOKEN", + "RIVET_LOBBY_GROUP_ID", + "RIVET_LOBBY_GROUP_NAME", +]; diff --git a/svc/pkg/ds/ops/server-create/src/util_mm/defaults.rs b/svc/pkg/ds/ops/server-create/src/util_mm/defaults.rs new file mode 100644 index 000000000..1c85d6df0 --- /dev/null +++ b/svc/pkg/ds/ops/server-create/src/util_mm/defaults.rs @@ -0,0 +1,4 @@ +pub const TIER_NAME_ID: &str = "basic-1d1"; +pub const MAX_PLAYERS_NORMAL: u32 = 32; +pub const MAX_PLAYERS_DIRECT: u32 = 40; +pub const MAX_PLAYERS_PARTY: u32 = 40; diff --git a/svc/pkg/ds/ops/server-create/src/util_mm/key.rs b/svc/pkg/ds/ops/server-create/src/util_mm/key.rs new file mode 100644 index 000000000..d62c244ea --- /dev/null +++ b/svc/pkg/ds/ops/server-create/src/util_mm/key.rs @@ -0,0 +1,207 @@ +use uuid::Uuid; + +/// HASH +pub fn player_config(player_id: Uuid) -> String { + format!("{{global}}:mm:player:{}:config", player_id) +} + +pub mod player_config { + use uuid::Uuid; + + #[derive(Debug, serde::Serialize)] + pub struct Config { + #[serde(rename = "l")] + pub lobby_id: Uuid, + #[serde(rename = "qi")] + pub query_id: Option, + #[serde(rename = "ra")] + pub remote_address: String, + } + + pub const LOBBY_ID: &str = "l"; + pub const QUERY_ID: &str = "qi"; + pub const REMOTE_ADDRESS: &str = "ra"; +} + +/// HASH +pub fn lobby_config(lobby_id: Uuid) -> String { + format!("{{global}}:mm:lobby:{}:config", lobby_id) +} + +/// HASH +pub fn lobby_tags(lobby_id: Uuid) -> String { + format!("{{global}}:mm:lobby:{}:tags", lobby_id) +} + +pub mod lobby_config { + use uuid::Uuid; + + #[derive(Debug, serde::Serialize)] + pub struct Config { + #[serde(rename = "ns")] + pub namespace_id: Uuid, + #[serde(rename = "r")] + pub region_id: Uuid, + #[serde(rename = "lg")] + pub lobby_group_id: Uuid, + #[serde(rename = "mpn")] + pub max_players_normal: u32, + #[serde(rename = "mpp")] + pub max_players_party: u32, + #[serde(rename = "mpd")] + pub max_players_direct: u32, + #[serde(rename = "p")] + pub preemptive: bool, + #[serde(rename = "rt", skip_serializing_if = "Option::is_none")] + pub ready_ts: Option, + #[serde(rename = "c")] + pub is_closed: bool, + #[serde(rename = "cu")] + pub is_custom: bool, + #[serde(rename = "st", skip_serializing_if = "Option::is_none")] + pub state_json: Option, + } + + pub const NAMESPACE_ID: &str = "ns"; + pub const REGION_ID: &str = "r"; + pub const LOBBY_GROUP_ID: &str = "lg"; + pub const MAX_PLAYERS_NORMAL: &str = "mpn"; + pub const MAX_PLAYERS_PARTY: &str = "mpp"; + pub const MAX_PLAYERS_DIRECT: &str = "mpd"; + pub const PREEMPTIVE: &str = "p"; + pub const READY_TS: &str = "rt"; + pub const IS_CLOSED: &str = "c"; + pub const IS_CUSTOM: &str = "cu"; + pub const STATE_JSON: &str = "st"; +} + +/// HASH +/// +/// Includes the state of all active find queries. +pub fn find_query_state(query_id: Uuid) -> String { + format!("{{global}}:mm:find_query:{}:state", query_id) +} + +pub mod find_query_state { + use uuid::Uuid; + + #[derive(Debug, serde::Serialize)] + pub struct State { + #[serde(rename = "n")] + pub namespace_id: Uuid, + #[serde(rename = "l", skip_serializing_if = "Option::is_none")] + pub lobby_id: Option, + #[serde(rename = "lac", skip_serializing_if = "Option::is_none")] + pub lobby_auto_created: Option, + #[serde(rename = "s")] + pub status: u8, + } + + pub const NAMESPACE_ID: &str = "n"; + pub const PLAYER_IDS: &str = "pl"; + pub const LOBBY_ID: &str = "l"; + pub const LOBBY_AUTO_CREATED: &str = "lac"; + pub const STATUS: &str = "s"; +} + +/// SET +pub fn find_query_player_ids(query_id: Uuid) -> String { + format!("{{global}}:mm:find_query:{}:player_ids", query_id) +} + +/// ZSET +/// +/// Includes all active find queries for a lobby. +pub fn lobby_find_queries(lobby_id: Uuid) -> String { + format!("{{global}}:mm:lobby:{}:find_queries", lobby_id) +} + +/// ZSET +pub fn ns_player_ids(namespace_id: Uuid) -> String { + format!("{{global}}:mm:ns:{}:player_ids", namespace_id) +} + +/// ZSET +pub fn ns_lobby_ids(namespace_id: Uuid) -> String { + format!("{{global}}:mm:ns:{}:lobby_ids", namespace_id) +} + +/// SET +pub fn ns_remote_address_player_ids(namespace_id: Uuid, remote_address: &str) -> String { + format!( + "{{global}}:mm:ns:{}:remote_address:{}:player_ids", + namespace_id, remote_address + ) +} + +/// ZSET +pub fn lobby_player_ids(lobby_id: Uuid) -> String { + format!("{{global}}:mm:lobby:{}:player_ids", lobby_id) +} + +/// ZSET +pub fn lobby_registered_player_ids(lobby_id: Uuid) -> String { + format!("{{global}}:mm:lobby:{}:registered_player_ids", lobby_id) +} + +/// ZSET +pub fn idle_lobby_ids(namespace_id: Uuid, region_id: Uuid, lobby_group_id: Uuid) -> String { + format!( + "{{global}}:mm:ns:{}:region:{}:lg:{}:idle_lobby_ids", + namespace_id, region_id, lobby_group_id + ) +} + +/// Map containing all idle lobbies and their associated lobby group +/// IDs. +/// +/// We limit this to just idle lobbies since we need to iterate over all +/// the values in this hash in mm-lobby-idle-update, so we want to limit +/// the values in here as much as possible. +/// +/// We keep this all in one hash so we only have to lock one key instead +/// of using `SCAN`. +/// +/// HASH +pub fn idle_lobby_lobby_group_ids(namespace_id: Uuid, region_id: Uuid) -> String { + format!( + "{{global}}:mm:ns:{}:region:{}:lobby:idle:lobby_group_ids", + namespace_id, region_id, + ) +} + +/// ZSET +pub fn lobby_available_spots( + namespace_id: Uuid, + region_id: Uuid, + lobby_group_id: Uuid, + join_kind: super::JoinKind, +) -> String { + format!( + "{{global}}:mm:ns:{}:region:{}:lg:{}:lobby:available_spots:{}", + namespace_id, + region_id, + lobby_group_id, + join_kind.short() + ) +} + +/// ZSET +pub fn lobby_unready() -> String { + "{global}:mm:lobby:unready".to_string() +} + +/// ZSET +pub fn player_unregistered() -> String { + "{global}:mm:player:unregistered".to_string() +} + +/// ZSET +pub fn player_auto_remove() -> String { + "{global}:mm:player:auto_remove".to_string() +} + +// Placeholder key +pub fn empty() -> String { + "{global}".to_string() +} diff --git a/svc/pkg/ds/ops/server-create/src/util_mm/mod.rs b/svc/pkg/ds/ops/server-create/src/util_mm/mod.rs new file mode 100644 index 000000000..ac75d4f9f --- /dev/null +++ b/svc/pkg/ds/ops/server-create/src/util_mm/mod.rs @@ -0,0 +1,48 @@ +pub mod consts; +pub mod defaults; +pub mod key; +pub mod test; +pub mod verification; +pub mod version_migrations; + +pub enum JoinKind { + Normal, + Party, + Direct, +} + +impl JoinKind { + pub fn short(self) -> &'static str { + match self { + JoinKind::Normal => "normal", + JoinKind::Party => "party", + JoinKind::Direct => "direct", + } + } +} + +#[derive(Debug, PartialEq, strum::FromRepr)] +#[repr(u8)] +pub enum FindQueryStatus { + /// Lobby is creating or in between mm-lobby-find and + /// mm-lobby-find-try-complete. + Pending = 0, + /// Find finished and lobby is ready. + Complete = 1, + /// There was an error. + Fail = 2, +} + +/// Formats the port label to be used in Nomad. +/// +/// Prefixing this port ensure that the user defined port names don't interfere +/// with other ports. +pub fn format_nomad_port_label(port_label: &str) -> String { + let snake_port_label = heck::SnakeCase::to_snake_case(port_label); + format!("game_{snake_port_label}") +} + +pub const RUNC_SETUP_CPU: i32 = 50; +pub const RUNC_SETUP_MEMORY: i32 = 32; +pub const RUNC_CLEANUP_CPU: i32 = 50; +pub const RUNC_CLEANUP_MEMORY: i32 = 32; diff --git a/svc/pkg/ds/ops/server-create/src/util_mm/test.rs b/svc/pkg/ds/ops/server-create/src/util_mm/test.rs new file mode 100644 index 000000000..8de746645 --- /dev/null +++ b/svc/pkg/ds/ops/server-create/src/util_mm/test.rs @@ -0,0 +1 @@ +pub const TIER_NAME_ID: &str = "basic-1d16"; diff --git a/svc/pkg/ds/ops/server-create/src/util_mm/verification.rs b/svc/pkg/ds/ops/server-create/src/util_mm/verification.rs new file mode 100644 index 000000000..ae51d87f5 --- /dev/null +++ b/svc/pkg/ds/ops/server-create/src/util_mm/verification.rs @@ -0,0 +1,391 @@ +use std::collections::HashMap; + +use futures_util::{StreamExt, TryStreamExt}; +use http::StatusCode; +use proto::backend::{self, pkg::*}; +use rivet_operation::prelude::*; +use serde::Serialize; +use uuid::Uuid; + +#[derive(Serialize)] +pub struct ExternalVerificationRequest { + pub verification_data: Option, + pub game: Game, + pub clients: HashMap, + pub join_kind: JoinKind, + pub kind: ConnectionKind, +} + +#[derive(Serialize)] +pub struct Game { + pub namespace_id: Uuid, + pub game_mode_id: Uuid, + pub game_mode_name_id: String, + + pub lobby: Option, + pub state: Option, + pub config: Option, + pub tags: HashMap, + pub dynamic_max_players: Option, +} + +#[derive(Serialize)] +pub struct Lobby { + pub lobby_id: Uuid, + pub region_id: Uuid, + pub region_name_id: String, + pub create_ts: String, + pub is_closed: bool, +} + +#[derive(Serialize)] +pub struct Client { + pub user_agent: Option, + pub latitude: Option, + pub longitude: Option, +} + +#[derive(Serialize)] +pub enum JoinKind { + Normal, + Party, +} + +#[derive(Copy, Clone, Serialize)] +pub enum ConnectionKind { + Find, + Join, + Create, +} + +pub struct VerifyConfigOpts<'a> { + pub kind: ConnectionKind, + pub namespace_id: Uuid, + pub user_id: Option, + pub client_info: Vec, + pub tags: &'a HashMap, + pub dynamic_max_players: Option, + + pub lobby_groups: &'a [backend::matchmaker::LobbyGroup], + pub lobby_group_meta: &'a [backend::matchmaker::LobbyGroupMeta], + pub lobby_info: Option<&'a backend::matchmaker::Lobby>, + pub lobby_state_json: Option<&'a str>, + + pub verification_data_json: Option<&'a str>, + pub lobby_config_json: Option<&'a str>, + pub custom_lobby_publicity: Option, +} + +struct ExternalRequestConfigAndLobby<'a> { + pub lobby_group: &'a backend::matchmaker::LobbyGroup, + pub lobby_group_meta: &'a backend::matchmaker::LobbyGroupMeta, + external_request_config: backend::net::ExternalRequestConfig, +} + +/// Verifies everything required to make a find request or create a custom lobby. +pub async fn verify_config( + ctx: &OperationContext<()>, + opts: &VerifyConfigOpts<'_>, +) -> GlobalResult<()> { + let mut highest_identity_requirement = backend::matchmaker::IdentityRequirement::None; + let mut external_request_configs = Vec::new(); + + // Collect all external request configs and identity requirement + for (lobby_group, lobby_group_meta) in opts.lobby_groups.iter().zip(opts.lobby_group_meta) { + let (identity_requirement, external_request_config) = match ( + opts.kind, + lobby_group.actions.as_ref().and_then(|a| a.find.as_ref()), + lobby_group.actions.as_ref().and_then(|a| a.join.as_ref()), + lobby_group.actions.as_ref().and_then(|a| a.create.as_ref()), + ) { + (ConnectionKind::Find, Some(find_config), _, _) => { + if !find_config.enabled { + bail_with!(MATCHMAKER_FIND_DISABLED); + } + + ( + unwrap!( + backend::matchmaker::IdentityRequirement::from_i32( + find_config.identity_requirement + ), + "invalid identity requirement variant" + ), + find_config.verification.as_ref().map(|config| { + backend::net::ExternalRequestConfig { + url: config.url.clone(), + method: backend::net::HttpMethod::Post as i32, + headers: config.headers.clone(), + } + }), + ) + } + (ConnectionKind::Join, _, Some(join_config), _) => { + if !join_config.enabled { + bail_with!(MATCHMAKER_JOIN_DISABLED); + } + + ( + unwrap!( + backend::matchmaker::IdentityRequirement::from_i32( + join_config.identity_requirement + ), + "invalid identity requirement variant" + ), + join_config.verification.as_ref().map(|config| { + backend::net::ExternalRequestConfig { + url: config.url.clone(), + method: backend::net::HttpMethod::Post as i32, + headers: config.headers.clone(), + } + }), + ) + } + (ConnectionKind::Create, _, _, Some(create_config)) => { + let publicity = unwrap!(opts.custom_lobby_publicity); + + // Verify publicity + match ( + publicity, + create_config.enable_public, + create_config.enable_private, + ) { + (backend::matchmaker::lobby::Publicity::Public, allowed, _) => { + ensure_with!( + allowed, + MATCHMAKER_CUSTOM_LOBBY_CONFIG_INVALID, + reason = r#""public" publicity not allowed with this custom game mode"# + ); + } + (backend::matchmaker::lobby::Publicity::Private, _, allowed) => { + ensure_with!( + allowed, + MATCHMAKER_CUSTOM_LOBBY_CONFIG_INVALID, + reason = + r#""private" publicity not allowed with this custom game mode"# + ); + } + } + + // Verify lobby count + if let (Some(max_lobbies_per_identity), Some(user_id)) = + (create_config.max_lobbies_per_identity, opts.user_id) + { + let lobbies_res = op!([ctx] mm_lobby_list_for_user_id { + user_ids: vec![user_id.into()], + }) + .await?; + let user = unwrap!(lobbies_res.users.first()); + ensure_with!( + (user.lobby_ids.len() as u64) < max_lobbies_per_identity, + MATCHMAKER_CUSTOM_LOBBY_LIMIT_REACHED + ); + } + + ( + unwrap!( + backend::matchmaker::IdentityRequirement::from_i32( + create_config.identity_requirement + ), + "invalid identity requirement variant" + ), + create_config.verification.as_ref().map(|config| { + backend::net::ExternalRequestConfig { + url: config.url.clone(), + method: backend::net::HttpMethod::Post as i32, + headers: config.headers.clone(), + } + }), + ) + } + (ConnectionKind::Create, _, _, None) => { + bail_with!(MATCHMAKER_CUSTOM_LOBBIES_DISABLED); + } + _ => (backend::matchmaker::IdentityRequirement::None, None), + }; + + // Updated highest requirement + match highest_identity_requirement { + backend::matchmaker::IdentityRequirement::None => { + highest_identity_requirement = identity_requirement; + } + backend::matchmaker::IdentityRequirement::Guest => { + if matches!( + identity_requirement, + backend::matchmaker::IdentityRequirement::Registered + ) { + highest_identity_requirement = identity_requirement; + } + } + backend::matchmaker::IdentityRequirement::Registered => {} + } + + if let Some(external_request_config) = external_request_config { + external_request_configs.push(ExternalRequestConfigAndLobby { + lobby_group, + lobby_group_meta, + external_request_config, + }); + } + } + + // Verify identity requirement + match (highest_identity_requirement, opts.user_id) { + (backend::matchmaker::IdentityRequirement::Registered, Some(user_id)) => { + let user_identities_res = op!([ctx] user_identity_get { + user_ids: vec![user_id.into()], + }) + .await?; + let user = unwrap!( + user_identities_res.users.first(), + "could not find user identities" + ); + let is_registered = !user.identities.is_empty(); + + if !is_registered { + bail_with!(MATCHMAKER_REGISTRATION_REQUIRED); + } + } + ( + backend::matchmaker::IdentityRequirement::Guest + | backend::matchmaker::IdentityRequirement::Registered, + None, + ) => { + bail_with!(MATCHMAKER_IDENTITY_REQUIRED); + } + _ => {} + } + + // Verify lobby config + if let Some(lobby_config_json) = opts.lobby_config_json { + ensure_with!( + lobby_config_json.len() as u64 <= util::file_size::kibibytes(16), + MATCHMAKER_CUSTOM_LOBBY_CONFIG_INVALID, + reason = "too large (> 16KiB)" + ); + } + + // Verify user data externally + for external_request_config_and_lobby in external_request_configs { + let ExternalRequestConfigAndLobby { + lobby_group, + lobby_group_meta, + external_request_config, + } = external_request_config_and_lobby; + + // Build lobby info + let lobby = if let Some(l) = &opts.lobby_info { + // Fetch region data for readable name + let region_id = unwrap!(l.region_id); + let regions_res = op!([ctx] region_get { + region_ids: vec![region_id], + }) + .await?; + let region = unwrap!(regions_res.regions.first()); + + Some(Lobby { + lobby_id: unwrap_ref!(l.lobby_id).as_uuid(), + region_id: region_id.as_uuid(), + region_name_id: region.name_id.clone(), + create_ts: util::timestamp::to_string(l.create_ts)?, + is_closed: l.is_closed, + }) + } else { + None + }; + + // Fetch IP info + let clients = futures_util::stream::iter( + opts.client_info + .iter() + .filter_map(|client_info| { + client_info + .remote_address + .as_ref() + .map(|ip| (ip.clone(), client_info.user_agent.clone())) + }) + .collect::>(), + ) + .map(|(ip, user_agent)| async move { + let ip_res = op!([ctx] ip_info { + ip: ip.clone(), + }) + .await?; + let (latitude, longitude) = ip_res + .ip_info + .and_then(|ip_info| ip_info.coords) + .map(|coords| (coords.latitude, coords.longitude)) + .unzip(); + + GlobalResult::Ok(( + ip.clone(), + Client { + user_agent: user_agent.clone(), + longitude, + latitude, + }, + )) + }) + .buffer_unordered(16) + .try_collect::>() + .await?; + + // Build body + let body = ExternalVerificationRequest { + verification_data: opts + .verification_data_json + .as_ref() + .map(|json| serde_json::from_str::(json)) + .transpose()?, + game: Game { + game_mode_id: unwrap_ref!(lobby_group_meta.lobby_group_id).as_uuid(), + game_mode_name_id: lobby_group.name_id.clone(), + namespace_id: opts.namespace_id, + + lobby, + state: opts + .lobby_state_json + .as_ref() + .map(|json| serde_json::from_str::(json)) + .transpose()?, + config: opts + .lobby_config_json + .as_ref() + .map(|json| serde_json::from_str::(json)) + .transpose()?, + tags: opts.tags.clone(), + dynamic_max_players: opts.dynamic_max_players, + }, + clients, + join_kind: JoinKind::Normal, + kind: opts.kind, + }; + + // Send request + let request_id = Uuid::new_v4(); + let external_res = msg!([ctx] external::msg::request_call(request_id) + -> Result + { + request_id: Some(request_id.into()), + config: Some(external_request_config), + timeout: util::duration::seconds(10) as u64, + body: Some(serde_json::to_vec(&body)?), + ..Default::default() + }) + .await?; + + // Handle status code + if let Ok(res) = external_res { + let status = StatusCode::from_u16(res.status_code as u16)?; + + tracing::info!(?status, "user verification response"); + + if !status.is_success() { + bail_with!(MATCHMAKER_VERIFICATION_FAILED); + } + } else { + bail_with!(MATCHMAKER_VERIFICATION_REQUEST_FAILED); + } + } + + Ok(()) +} diff --git a/svc/pkg/ds/ops/server-create/src/util_mm/version_migrations.rs b/svc/pkg/ds/ops/server-create/src/util_mm/version_migrations.rs new file mode 100644 index 000000000..ed2064c68 --- /dev/null +++ b/svc/pkg/ds/ops/server-create/src/util_mm/version_migrations.rs @@ -0,0 +1,8 @@ +use bit_vec::BitVec; + +pub const PORT_RANGE_PROXY_IDX: usize = 0; + +/// The bit flags expected for a game version with all migrations applied. +pub fn all() -> BitVec { + BitVec::from_elem(1, true) +} diff --git a/svc/pkg/ds/ops/server-create/tests/integration.rs b/svc/pkg/ds/ops/server-create/tests/integration.rs index 0c25ec363..e070012aa 100644 --- a/svc/pkg/ds/ops/server-create/tests/integration.rs +++ b/svc/pkg/ds/ops/server-create/tests/integration.rs @@ -1,41 +1,89 @@ use std::collections::HashMap; use chirp_worker::prelude::*; +use proto::backend; #[worker_test] async fn create(ctx: TestCtx) { - let datacenter_id = Uuid::new_v4(); - let cluster_id = Uuid::new_v4(); - let game_id = Uuid::new_v4(); - let image_id = Uuid::new_v4(); + let game_res = op!([ctx] faker_game { + ..Default::default() + }) + .await + .unwrap(); + let game_id = game_res.game_id.unwrap(); + + // Pick an existing cluster + let cluster_id = op!([ctx] cluster_list {}) + .await + .unwrap() + .cluster_ids + .first() + .unwrap() + .to_owned(); + + // Pick an existing datacenter + let datacenter_id = op!([ctx] cluster_datacenter_list { + cluster_ids: vec![cluster_id], + }) + .await + .unwrap() + .clusters + .first() + .unwrap() + .datacenter_ids + .first() + .unwrap() + .to_owned(); + + let build_res: backend::pkg::faker::build::Response = op!([ctx] faker_build { + game_id: Some(game_id), + image: backend::faker::Image::HangIndefinitely as i32, + }) + .await + .unwrap(); + + let runtime = Some( + proto::backend::pkg::dynamic_servers::server_create::request::Runtime::DockerRuntime( + proto::backend::dynamic_servers::DockerRuntime { + args: Vec::new(), + environment: HashMap::new(), + image_id: Some(build_res.build_id.unwrap()), + network: Some(proto::backend::dynamic_servers::DockerNetwork { + mode: 0, + ports: vec![( + "testing2".to_string(), + backend::dynamic_servers::DockerPort { + port: Some(1234), + routing: Some( + backend::dynamic_servers::docker_port::Routing::GameGuard( + backend::dynamic_servers::DockerGameGuardRouting { + protocol: 1, + }, + ), + ), + }, + )] + // Collect into hashmap + .into_iter() + .collect(), + }), + }, + ), + ); let server = op!([ctx] ds_server_create { - game_id: Some(game_id.into()), - cluster_id: Some(cluster_id.into()), - datacenter_id: Some(datacenter_id.into()), - resources: Some(proto::backend::dynamic_servers::ServerResources { cpu_millicores: 1000, memory_mib: 1000 }), + game_id: Some(game_id), + cluster_id: Some(cluster_id), + datacenter_id: Some(datacenter_id), + resources: Some(proto::backend::dynamic_servers::ServerResources { cpu_millicores: 100, memory_mib: 200 }), kill_timeout_ms: 0, metadata: HashMap::new(), - runtime: Some( - proto::backend::pkg::dynamic_servers::server_create::request::Runtime::DockerRuntime( - proto::backend::dynamic_servers::DockerRuntime { - args: Vec::new(), - environment: HashMap::new(), - image_id: Some(image_id.into()), - network: Some( - proto::backend::dynamic_servers::DockerNetwork { - mode: 1, - ports: HashMap::new() - } - ) - } - ) - ), + runtime: runtime, }) .await .unwrap() .server .unwrap(); - assert_eq!(game_id, server.game_id.unwrap().as_uuid()); + // assert_eq!(game_res.game_id.unwrap(), server.game_id.unwrap().as_uuid()); } diff --git a/svc/pkg/ds/types/msg/ds-nomad-monitor-alloc-plan.proto b/svc/pkg/ds/types/msg/ds-nomad-monitor-alloc-plan.proto new file mode 100644 index 000000000..5823ca713 --- /dev/null +++ b/svc/pkg/ds/types/msg/ds-nomad-monitor-alloc-plan.proto @@ -0,0 +1,13 @@ +syntax = "proto3"; + +package rivet.backend.pkg.ds.msg.ds_nomad_monitor_alloc_plan; + +import "proto/common.proto"; + +/// name = "msg-ds-ds-nomad-monitor-alloc-plan" +/// parameters = [ +/// { name = "user_id" }, +/// ] +message Message { + +} diff --git a/svc/pkg/ds/worker/Cargo.toml b/svc/pkg/ds/worker/Cargo.toml new file mode 100644 index 000000000..e69de29bb diff --git a/svc/pkg/ds/worker/Service.toml b/svc/pkg/ds/worker/Service.toml new file mode 100644 index 000000000..1ac4bc945 --- /dev/null +++ b/svc/pkg/ds/worker/Service.toml @@ -0,0 +1,8 @@ +[service] +name = "ds-worker" + +[runtime] +kind = "rust" + +[consumer] + diff --git a/svc/pkg/ds/worker/src/lib.rs b/svc/pkg/ds/worker/src/lib.rs new file mode 100644 index 000000000..3719b10aa --- /dev/null +++ b/svc/pkg/ds/worker/src/lib.rs @@ -0,0 +1 @@ +pub mod workers; diff --git a/svc/pkg/ds/worker/src/workers/mod.rs b/svc/pkg/ds/worker/src/workers/mod.rs new file mode 100644 index 000000000..151a05303 --- /dev/null +++ b/svc/pkg/ds/worker/src/workers/mod.rs @@ -0,0 +1,14 @@ +pub mod nomad_monitor_alloc_plan; +pub mod nomad_monitor_alloc_update; +pub mod nomad_monitor_eval_update; + +chirp_worker::workers![ + nomad_monitor_alloc_plan, + nomad_monitor_alloc_update, + nomad_monitor_eval_update +]; + +lazy_static::lazy_static! { + pub static ref NEW_NOMAD_CONFIG: nomad_client::apis::configuration::Configuration = + nomad_util::new_config_from_env().unwrap(); +} diff --git a/svc/pkg/ds/worker/src/workers/nomad_monitor_alloc_plan.rs b/svc/pkg/ds/worker/src/workers/nomad_monitor_alloc_plan.rs new file mode 100644 index 000000000..42cc95913 --- /dev/null +++ b/svc/pkg/ds/worker/src/workers/nomad_monitor_alloc_plan.rs @@ -0,0 +1,381 @@ +use chirp_worker::prelude::*; +use proto::backend::{self, pkg::*}; +use redis::AsyncCommands; +use serde::Deserialize; + +use crate::workers::NEW_NOMAD_CONFIG; + +#[derive(Debug, Deserialize)] +#[serde(rename_all = "PascalCase")] +struct PlanResult { + allocation: nomad_client::models::Allocation, +} + +#[derive(Debug, sqlx::FromRow)] +struct RunRow { + server_id: Uuid, + datacenter_id: Uuid, + stop_ts: Option, + alloc_plan_ts: Option, +} + +#[derive(Debug, sqlx::FromRow)] +struct ProxiedPort { + target_nomad_port_label: Option, + ingress_port: i64, + ingress_hostnames: Vec, + proxy_protocol: i64, + ssl_domain_mode: i64, +} + +#[derive(Clone)] +struct RunData { + job_id: String, + alloc_id: String, + nomad_node_id: String, + nomad_node_name: String, + nomad_node_public_ipv4: String, + nomad_node_vlan_ipv4: String, + run_networks: Vec, + ports: Vec, +} + +#[worker(name = "ds-nomad-monitor-alloc-plan")] +async fn worker( + ctx: &OperationContext, +) -> GlobalResult<()> { + let mut redis_job = ctx.redis_job().await?; + + let PlanResult { allocation: alloc } = serde_json::from_str::(&ctx.payload_json)?; + + let job_id = unwrap_ref!(alloc.job_id, "alloc has no job id"); + let alloc_id = unwrap_ref!(alloc.ID); + let nomad_node_id = unwrap_ref!(alloc.node_id, "alloc has no node id"); + let _nomad_node_name = unwrap_ref!(alloc.node_id, "alloc has no node name"); + + tracing::info!(?alloc, "ayy event"); + + // Fetch node metadata + let node = nomad_client::apis::nodes_api::get_node( + &NEW_NOMAD_CONFIG, + nomad_node_id, + None, + None, + None, + None, + None, + None, + None, + None, + None, + ) + .await?; + let mut meta = unwrap!(node.meta); + + // Read ports + let mut run_networks = Vec::new(); + let mut ports = Vec::new(); + let alloc_resources = unwrap_ref!(alloc.resources); + if let Some(networks) = &alloc_resources.networks { + for network in networks { + let network_mode = unwrap_ref!(network.mode); + let network_ip = unwrap_ref!(network.IP); + + run_networks.push(backend::job::Network { + mode: network_mode.clone(), + ip: network_ip.clone(), + }); + + if let Some(dynamic_ports) = &network.dynamic_ports { + for port in dynamic_ports { + // Don't share connect proxy ports + let label = unwrap_ref!(port.label); + ports.push(backend::job::Port { + label: label.clone(), + source: *unwrap_ref!(port.value) as u32, + target: *unwrap_ref!(port.to) as u32, + ip: network_ip.clone(), + }); + } + } + } + } else { + tracing::info!("no network returned"); + } + + // Fetch the run + // + // Backoff mitigates race condition with job-run-create not having inserted + // the dispatched_job_id yet. + let run_data = RunData { + job_id: job_id.clone(), + alloc_id: alloc_id.clone(), + nomad_node_id: nomad_node_id.clone(), + nomad_node_name: unwrap!(node.name), + nomad_node_public_ipv4: unwrap!(meta.remove("network-public-ipv4")), + nomad_node_vlan_ipv4: unwrap!(meta.remove("network-vlan-ipv4")), + run_networks: run_networks.clone(), + ports: ports.clone(), + }; + let db_output = rivet_pools::utils::crdb::tx(&ctx.crdb().await?, |tx| { + let ctx = ctx.clone(); + let now = ctx.ts(); + let run_data = run_data.clone(); + Box::pin(update_db(ctx, tx, now, run_data)) + }) + .await?; + + // Check if run found + let Some(DbOutput { + server_id, + datacenter_id, + proxied_ports, + stop_ts, + }) = db_output + else { + if ctx.req_dt() > util::duration::minutes(5) { + tracing::error!("discarding stale message"); + return Ok(()); + } else { + retry_bail!("run not found, may be race condition with insertion"); + } + }; + + // // Write the port to the cache + // if stop_ts.is_none() { + // let msg = job::redis_job::RunProxiedPorts { + // run_id: Some(run_id.into()), + // proxied_ports: proxied_ports + // .iter() + // .filter_map(|pp| { + // ports + // .iter() + // .find(|p| Some(&p.label) == pp.target_nomad_port_label.as_ref()) + // .map(|p| (p, pp)) + // }) + // .map( + // |(port, proxied_port)| job::redis_job::run_proxied_ports::ProxiedPort { + // ip: port.ip.clone(), + // source: port.source, + // target_nomad_port_label: proxied_port.target_nomad_port_label.clone(), + // ingress_port: proxied_port.ingress_port as u32, + // ingress_hostnames: proxied_port.ingress_hostnames.clone(), + // proxy_protocol: proxied_port.proxy_protocol as i32, + // ssl_domain_mode: proxied_port.ssl_domain_mode as i32, + // }, + // ) + // .collect(), + // }; + // let mut buf = Vec::with_capacity(msg.encoded_len()); + // msg.encode(&mut buf)?; + + // let write_perf = ctx.perf().start("write-proxied-ports-redis").await; + // tracing::info!(proxied_ports = ?msg, "writing job run proxied ports to cache"); + // redis_job + // .hset( + // util_job::key::proxied_ports(region_id), + // run_id.to_string(), + // buf, + // ) + // .await?; + // write_perf.end(); + // } + + tracing::info!(%job_id, %server_id, "updated run"); + // msg!([ctx] job_run::msg::alloc_planned(server_id) { + // server_id: Some(server_id.into()), + // run_meta: Some(job_run::msg::alloc_planned::message::RunMeta::Nomad(job_run::msg::alloc_planned::message::Nomad { + // alloc_id: alloc_id.clone(), + // node_id: nomad_node_id.clone(), + // })), + // }) + // .await?; + // msg!([ctx] job_run::msg::ports_resolved(server_id) { + // server_id: Some(server_id.into()), + // }) + // .await?; + + Ok(()) +} + +#[derive(Debug)] +struct DbOutput { + server_id: Uuid, + datacenter_id: Uuid, + proxied_ports: Vec, + stop_ts: Option, +} + +/// Returns `None` if the run could not be found. +#[tracing::instrument(skip_all)] +async fn update_db( + ctx: OperationContext, + tx: &mut sqlx::Transaction<'_, sqlx::Postgres>, + now: i64, + RunData { + job_id, + alloc_id, + nomad_node_id, + nomad_node_name, + nomad_node_public_ipv4, + nomad_node_vlan_ipv4, + run_networks, + ports, + }: RunData, +) -> GlobalResult> { + let run_row = sql_fetch_optional!( + [ctx, RunRow, @tx tx] + // " + // SELECT runs.run_id, runs.region_id, runs.stop_ts, run_meta_nomad.alloc_plan_ts + // FROM db_dynamic_servers.run_meta_nomad + // INNER JOIN db_dynamic_servers.runs ON runs.run_id = run_meta_nomad.run_id + // WHERE dispatched_job_id = $1 + // FOR UPDATE OF run_meta_nomad + // ", + " + SELECT + servers.server_id, + servers.datacenter_id, + servers.stop_ts, + server_meta_nomad.alloc_plan_ts + FROM + db_dynamic_servers.server_meta_nomad + INNER JOIN + db_dynamic_servers.servers ON servers.run_id = server_meta_nomad.run_id + WHERE + dispatched_job_id = $1 + FOR UPDATE OF + server_meta_nomad + ", + &job_id, + ) + .await?; + + // Check if run found + let run_row = if let Some(run_row) = run_row { + run_row + } else { + tracing::info!("caught race condition with job-run-create"); + return Ok(None); + }; + let server_id = run_row.server_id; + + // Write run meta on first plan + if run_row.alloc_plan_ts.is_none() { + // Write alloc information + sql_execute!( + [ctx, @tx tx] + " + UPDATE + db_dynamic_servers.run_meta_nomad + SET + alloc_id = $2, + alloc_plan_ts = $3, + node_id = $4, + node_name = $5, + node_public_ipv4 = $6, + node_vlan_ipv4 = $7 + WHERE + run_id = $1 + ", + server_id, + &alloc_id, + now, + &nomad_node_id, + &nomad_node_name, + &nomad_node_public_ipv4, + &nomad_node_vlan_ipv4, + ) + .await?; + + // Save the ports to the db + for network in &run_networks { + tracing::info!(%server_id, mode = %network.mode, ip = %network.ip, "inserting network"); + sql_execute!( + [ctx, @tx tx] + " + INSERT INTO + db_dynamic_servers.run_networks ( + server_id, + mode, + ip + ) + VALUES + ($1, $2, $3) + ", + server_id, + &network.mode, + &network.ip, + ) + .await?; + } + + // Save the ports to the db + for port in &ports { + tracing::info!(%server_id, label = %port.label, source = port.source, target = port.target, ip = %port.ip, "inserting port"); + sql_execute!( + [ctx, @tx tx] + " + INSERT INTO + db_dynamic_servers.run_ports ( + server_id, + label, + source, + target, + ip + ) + VALUES + ($1, $2, $3, $4, $5) + ", + server_id, + &port.label, + port.source as i64, + port.target as i64, + &port.ip, + ) + .await?; + } + } + + // Update the run ports + let proxied_ports = sql_fetch_all!( + [ctx, ProxiedPort, @tx tx] + // " + // SELECT target_nomad_port_label, ingress_port, ingress_hostnames, proxy_protocol, ssl_domain_mode + // FROM db_dynamic_servers.run_proxied_ports + // WHERE server_id = $1 + // ", + " + SELECT + target_nomad_port_label, + ingress_port, + ingress_hostnames, + proxy_protocol, + ssl_domain_mode + FROM + db_dynamic_servers.server_proxied_ports + WHERE + server_id = $1 + ", + server_id, + ) + .await?; + tracing::info!(?proxied_ports, "fetched proxied ports"); + + // Validate ports match proxied ports + for proxied_port in &proxied_ports { + ensure!( + ports + .iter() + .any(|port| Some(&port.label) == proxied_port.target_nomad_port_label.as_ref()), + "no matching port with proxied target" + ); + } + + Ok(Some(DbOutput { + server_id, + datacenter_id: run_row.datacenter_id, + proxied_ports, + stop_ts: run_row.stop_ts, + })) +} diff --git a/svc/pkg/ds/worker/src/workers/nomad_monitor_alloc_update.rs b/svc/pkg/ds/worker/src/workers/nomad_monitor_alloc_update.rs new file mode 100644 index 000000000..87c2fb632 --- /dev/null +++ b/svc/pkg/ds/worker/src/workers/nomad_monitor_alloc_update.rs @@ -0,0 +1,228 @@ +use chirp_worker::prelude::*; +use proto::backend::pkg::*; +use serde::Deserialize; +use sqlx; + +#[derive(Debug, Clone, Deserialize)] +#[serde(rename_all = "PascalCase")] +struct AllocationUpdated { + allocation: nomad_client::models::Allocation, +} + +#[derive(Debug, Copy, Clone)] +enum TaskState { + Pending, + Running, + Dead, +} + +#[worker(name = "ds-run-nomad-monitor-alloc-update")] +async fn worker( + ctx: &OperationContext, +) -> GlobalResult<()> { + let _crdb = ctx.crdb().await?; + + let AllocationUpdated { allocation: alloc } = serde_json::from_str(&ctx.payload_json)?; + let alloc_state_json = serde_json::to_value(&alloc)?; + + let alloc_id = unwrap_ref!(alloc.ID); + let eval_id = unwrap_ref!(alloc.eval_id, "alloc has no eval"); + let job_id = unwrap_ref!(alloc.job_id); + let client_status = unwrap_ref!(alloc.client_status); + let task_states = unwrap_ref!(alloc.task_states); + + if !util_job::is_nomad_job_run(job_id) { + tracing::info!(%job_id, "disregarding event"); + return Ok(()); + } + + // Get the main task by finding the task that is not the run cleanup task + let main_task = task_states + .iter() + .filter(|(k, _)| k.as_str() == util_job::RUN_MAIN_TASK_NAME) + .map(|(_, v)| v) + .next(); + let main_task = unwrap!(main_task, "could not find main task"); + let main_task_state_raw = unwrap_ref!(main_task.state); + + tracing::info!( + ?client_status, + ?alloc_id, + ?eval_id, + ?job_id, + ?main_task_state_raw, + main_task_events = ?main_task.events, + "alloc updated" + ); + + let main_task_state = match (main_task_state_raw.as_str(), client_status.as_str()) { + ("pending", _) => TaskState::Pending, + ("running", _) => TaskState::Running, + ("dead", _) | (_, "failed" | "lost") => TaskState::Dead, + _ => { + tracing::error!(?main_task_state_raw, ?client_status, "unknown task state"); + return Ok(()); + } + }; + + match main_task_state { + TaskState::Pending => { + tracing::info!("run pending"); + + let run_row = sql_fetch_optional!( + [ctx, (Uuid,)] + " + UPDATE db_job_state.run_meta_nomad + SET alloc_state = $2 + WHERE dispatched_job_id = $1 + RETURNING run_id + ", + job_id, + &alloc_state_json, + ) + .await?; + + if run_row.is_none() { + if ctx.req_dt() > util::duration::minutes(5) { + tracing::error!("discarding stale message"); + return Ok(()); + } else { + retry_bail!("run not found, may be race condition with insertion"); + } + }; + + Ok(()) + } + TaskState::Running => { + let run_row = sql_fetch_optional!( + [ctx, (Uuid, Option)] + " + WITH + select_run AS ( + SELECT runs.run_id, runs.start_ts + FROM db_job_state.run_meta_nomad + INNER JOIN db_job_state.runs ON runs.run_id = run_meta_nomad.run_id + WHERE dispatched_job_id = $1 + ), + _update_runs AS ( + UPDATE db_job_state.runs + SET start_ts = $2 + FROM select_run + WHERE + runs.run_id = select_run.run_id AND + runs.start_ts IS NULL + RETURNING 1 + ), + _update_run_meta_nomad AS ( + UPDATE db_job_state.run_meta_nomad + SET alloc_state = $3 + FROM select_run + WHERE run_meta_nomad.run_id = select_run.run_id + RETURNING 1 + ) + SELECT * FROM select_run + ", + job_id, + ctx.ts(), + &alloc_state_json, + ) + .await?; + + let Some((run_id, start_ts)) = run_row else { + if ctx.req_dt() > util::duration::minutes(5) { + tracing::error!("discarding stale message"); + return Ok(()); + } else { + retry_bail!("run not found, may be race condition with insertion"); + } + }; + + if start_ts.is_none() { + tracing::info!("run started"); + + msg!([ctx] job_run::msg::started(run_id) { + run_id: Some(run_id.into()), + }) + .await?; + + Ok(()) + } else { + tracing::info!("run already started"); + + Ok(()) + } + } + TaskState::Dead => { + let run_row = sql_fetch_optional!( + [ctx, (Uuid, Option)] + r#" + WITH + select_run AS ( + SELECT runs.run_id, runs.finish_ts + FROM db_job_state.run_meta_nomad + INNER JOIN db_job_state.runs ON runs.run_id = run_meta_nomad.run_id + WHERE dispatched_job_id = $1 + ), + _update_runs AS ( + UPDATE db_job_state.runs + SET + -- If the job stops immediately, the task state will never be "running" so we need to + -- make sure start_ts is set here as well + start_ts = COALESCE(start_ts, $2), + finish_ts = $2 + FROM select_run + WHERE + runs.run_id = select_run.run_id AND + runs.finish_ts IS NULL + RETURNING 1 + ), + _update_run_meta_nomad AS ( + UPDATE db_job_state.run_meta_nomad + SET alloc_state = $3 + FROM select_run + WHERE run_meta_nomad.run_id = select_run.run_id + RETURNING 1 + ) + SELECT * FROM select_run + "#, + job_id, + ctx.ts(), + &alloc_state_json, + ) + .await?; + + let Some((run_id, finish_ts)) = run_row else { + if ctx.req_dt() > util::duration::minutes(5) { + tracing::error!("discarding stale message"); + return Ok(()); + } else { + retry_bail!("run not found, may be race condition with insertion"); + } + }; + + if finish_ts.is_none() { + tracing::info!("run finished"); + + // Publish message + // + // It's fine if this is called multiple times. The operation is + // idempotent and it's better to ensure the job gets cleaned up + // rather than forgotten. + msg!([ctx] job_run::msg::cleanup(run_id) { + run_id: Some(run_id.into()), + ..Default::default() + }) + .await?; + msg!([ctx] job_run::msg::finished(run_id) { + run_id: Some(run_id.into()), + }) + .await?; + + Ok(()) + } else { + tracing::info!("run already finished"); + Ok(()) + } + } + } +} diff --git a/svc/pkg/ds/worker/src/workers/nomad_monitor_eval_update.rs b/svc/pkg/ds/worker/src/workers/nomad_monitor_eval_update.rs new file mode 100644 index 000000000..d64c4ae89 --- /dev/null +++ b/svc/pkg/ds/worker/src/workers/nomad_monitor_eval_update.rs @@ -0,0 +1,174 @@ +use self::sqlx; +use chirp_worker::prelude::*; +use proto::backend::pkg::*; +use serde::Deserialize; + +use crate::workers::NEW_NOMAD_CONFIG; + +#[derive(Debug, Deserialize)] +#[serde(rename_all = "PascalCase")] +struct PlanResult { + evaluation: nomad_client::models::Evaluation, +} + +#[derive(Debug, Copy, Clone)] +enum EvalStatus { + Failed, + Complete, +} + +#[derive(Debug, sqlx::FromRow)] +struct RunRow { + run_id: Uuid, + region_id: Uuid, + eval_plan_ts: Option, +} + +#[worker(name = "ds-run-nomad-monitor-eval-update")] +async fn worker( + ctx: &OperationContext, +) -> GlobalResult<()> { + let _crdb = ctx.crdb().await?; + + let payload_value = serde_json::from_str::(&ctx.payload_json)?; + let PlanResult { evaluation: eval } = serde_json::from_str::(&ctx.payload_json)?; + + let job_id = unwrap_ref!(eval.job_id, "eval has no job id"); + let eval_status_raw = unwrap_ref!(eval.status).as_str(); + + // We can't decode this with serde, so manually deserialize the response + let eval_value = unwrap!(payload_value.get("Evaluation")); + + if !util_job::is_nomad_job_run(job_id) { + tracing::info!(%job_id, "disregarding event"); + return Ok(()); + } + + // HACK: Serde isn't deserializing this correctly for some reason so + // we use raw JSON + // Filter out data we need from the event. Ignore events we don't care about + // before we touch the database. + let failed_tg_allocs = eval_value.get("FailedTGAllocs").and_then(|x| x.as_object()); + let eval_status = match (eval_status_raw, &failed_tg_allocs) { + ("complete", Some(failed_tg_allocs)) if !failed_tg_allocs.is_empty() => { + let failed_tg_allocs_str = + serde_json::to_string(&failed_tg_allocs).unwrap_or("?".to_string()); + tracing::warn!(%job_id, failed_tg_allocs = %failed_tg_allocs_str, "eval failed"); + + EvalStatus::Failed + } + ("complete", _) => EvalStatus::Complete, + _ => { + tracing::info!( + %job_id, + ?eval_status_raw, + ?failed_tg_allocs, + "ignoring status" + ); + return Ok(()); + } + }; + + // Fetch and update the run + let run_row = sql_fetch_optional!( + [ctx, RunRow] + " + WITH + select_run AS ( + SELECT runs.run_id, runs.region_id, run_meta_nomad.eval_plan_ts + FROM db_job_state.run_meta_nomad + INNER JOIN db_job_state.runs ON runs.run_id = run_meta_nomad.run_id + WHERE dispatched_job_id = $1 + ), + _update AS ( + UPDATE db_job_state.run_meta_nomad + SET eval_plan_ts = $2 + FROM select_run + WHERE + run_meta_nomad.run_id = select_run.run_id AND + run_meta_nomad.eval_plan_ts IS NULL + RETURNING 1 + ) + SELECT * FROM select_run + ", + job_id, + ctx.ts(), + ) + .await?; + + // Check if run found + let Some(run_row) = run_row else { + if ctx.req_dt() > util::duration::minutes(5) { + tracing::error!("discarding stale message"); + return Ok(()); + } else { + retry_bail!("run not found, may be race condition with insertion"); + } + }; + let run_id = run_row.run_id; + + if let Some(eval_plan_ts) = run_row.eval_plan_ts { + tracing::info!(?eval_plan_ts, "eval already planned"); + return Ok(()); + } + + tracing::info!(%job_id, %run_id, ?eval_status, "updated run"); + + match eval_status { + EvalStatus::Failed => { + tracing::info!(%run_id, "eval failed"); + + let error_code = job_run::msg::fail::ErrorCode::NomadEvalPlanFailed; + tracing::warn!(%run_id, ?error_code, "job run fail"); + msg!([ctx] job_run::msg::fail(run_id) { + run_id: Some(run_id.into()), + error_code: error_code as i32, + }) + .await?; + + // Get the region + let region_res = op!([ctx] region_get { + region_ids: vec![run_row.region_id.into()], + }) + .await?; + let region = unwrap!(region_res.regions.first()); + + // Stop the job from attempting to run on another node. This will + // be called in job-run-stop too, but we want to catch this earlier. + match nomad_client::apis::jobs_api::delete_job( + &NEW_NOMAD_CONFIG, + job_id, + Some(®ion.nomad_region), + None, + None, + None, + Some(false), + None, + ) + .await + { + Ok(_) => tracing::info!("job stopped"), + Err(err) => { + tracing::warn!(?err, "error thrown while stopping job, probably a 404, will continue as if stopped normally"); + } + } + + // Cleanup the job + msg!([ctx] job_run::msg::stop(run_id) { + run_id: Some(run_id.into()), + ..Default::default() + }) + .await?; + } + EvalStatus::Complete => { + tracing::info!(%run_id, "eval complete"); + + msg!([ctx] job_run::msg::eval_complete(run_id) { + run_id: Some(run_id.into()), + }) + .await?; + } + } + + Ok(()) +} diff --git a/svc/pkg/ds/worker/tests/ds_nomad_monitor_alloc_plan.rs b/svc/pkg/ds/worker/tests/ds_nomad_monitor_alloc_plan.rs new file mode 100644 index 000000000..1b03565b6 --- /dev/null +++ b/svc/pkg/ds/worker/tests/ds_nomad_monitor_alloc_plan.rs @@ -0,0 +1,13 @@ +use chirp_worker::prelude::*; +use proto::backend::pkg::*; + +#[worker_test] +async fn ds_nomad_monitor_alloc_plan(ctx: TestCtx) { + // msg!([ctx] ds::msg::ds_nomad_monitor_alloc_plan() { + + // }) + // .await + // .unwrap(); + + todo!(); +} diff --git a/svc/pkg/monolith/standalone/worker/Cargo.toml b/svc/pkg/monolith/standalone/worker/Cargo.toml index 047dd4ead..f8cd7f71f 100644 --- a/svc/pkg/monolith/standalone/worker/Cargo.toml +++ b/svc/pkg/monolith/standalone/worker/Cargo.toml @@ -24,6 +24,7 @@ cdn-worker = { path = "../../../cdn/worker" } cf-custom-hostname-worker = { path = "../../../cf-custom-hostname/worker" } cloud-worker = { path = "../../../cloud/worker" } cluster-worker = { path = "../../../cluster/worker" } +ds-worker = { path = "../../../ds/worker" } external-worker = { path = "../../../external/worker" } game-user-worker = { path = "../../../game-user/worker" } job-log-worker = { path = "../../../job-log/worker" } diff --git a/svc/pkg/monolith/standalone/worker/src/lib.rs b/svc/pkg/monolith/standalone/worker/src/lib.rs index 4cb67dfb0..884ffea5d 100644 --- a/svc/pkg/monolith/standalone/worker/src/lib.rs +++ b/svc/pkg/monolith/standalone/worker/src/lib.rs @@ -26,6 +26,7 @@ pub async fn run_from_env(pools: rivet_pools::Pools) -> GlobalResult<()> { cf_custom_hostname_worker, cloud_worker, cluster_worker, + ds_worker, external_worker, game_user_worker, job_log_worker,