diff --git a/.github/actions/pre-init/action.yaml b/.github/actions/pre-init/action.yaml index f6c862b08b..928944ca11 100644 --- a/.github/actions/pre-init/action.yaml +++ b/.github/actions/pre-init/action.yaml @@ -39,14 +39,6 @@ runs: echo 'AWS_SECRET_ACCESS_KEY=${{ inputs.SCCACHE_AWS_SECRET_ACCESS_KEY }}' >> $GITHUB_ENV echo 'AWS_ACCESS_KEY_ID=${{ inputs.SCCACHE_AWS_ACCESS_KEY_ID }}' >> $GITHUB_ENV - # Cache generated Bolt files in order to prevent needless rebuilding - - name: Bolt Cache - uses: actions/cache@v3 - with: - key: ${{ runner.os }}-bolt-gen - path: | - svc/pkg/region/ops/config-get/gen - # MARK: Nix - uses: cachix/install-nix-action@v22 with: diff --git a/.gitignore b/.gitignore index d9a7c93de1..15a5ca6a88 100644 --- a/.gitignore +++ b/.gitignore @@ -29,11 +29,13 @@ Bolt.local.toml !/secrets/README.md # Generated code +gen/hash.txt gen/build_script.sh gen/svc/ gen/tf/ gen/docker/ gen/k8s/ +svc/pkg/cluster/util/gen/hash.txt # Rust lib/**/Cargo.lock diff --git a/.rivet/config.yaml b/.rivet/config.yaml new file mode 100644 index 0000000000..848c691202 --- /dev/null +++ b/.rivet/config.yaml @@ -0,0 +1,6 @@ +cluster: + api_endpoint: https://api.eg.rivet.gg +telemetry: + disabled: false +tokens: + cloud: null diff --git a/CHANGELOG.md b/CHANGELOG.md index c45eb0e66a..9eb8fead8d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -91,6 +91,7 @@ and this project adheres to [Calendar Versioning](https://calver.org/). - **Bolt** `bolt.confirm_commands` to namespace to confirm before running commands on a namespace - `watch-requests` load test - `mm-sustain` load test +- **Infra** Automatic server provisioning system ([Read more](/docs/packages/cluster/SERVER_PROVISIONING.md)). ### Changed diff --git a/docs/packages/cluster/AUTOSCALING.md b/docs/packages/cluster/AUTOSCALING.md new file mode 100644 index 0000000000..f16787df8c --- /dev/null +++ b/docs/packages/cluster/AUTOSCALING.md @@ -0,0 +1,138 @@ +# Autoscaling + +The autoscaler service runs every 15 seconds. + +## Why memory? + +The autoscaler uses CPU usage for GG nodes and memory usage for job nodes. This is because certain cloud providers like linode do not provide an actual value for the speed of the CPU, but rather the amount of cores. This is problematic because we use Nomad's API for determining the usage on any given node, and it returns its stats in MHz. + +## Hardware failover + +Before a job server provisioned, we don't know for sure what its specs will be because of the hardware failover system in `cluster-server-provision`. In the autoscaling process, all servers that aren't provisioned yet are assumed to have the specs of the first hardware option in the list. + +### Failover has lower specs + +In the event that the hardware which ended up being provisioned has lower specs than the first hardware in the list, the autoscaler will calculate the error between how much was expected and how much was actually provisioned. This error number corresponds to how many more servers might be needed to reach the desired server count. + +Here is an example of the process in action: + +| time since start | desired count | expected total memory | actual total memory | +| ---------------- | ------------- | --------------------- | ------------------- | +| 0s | 2 | 2000MB | 0MB | + +We start with 0 servers provisioned, and 2 desired. Our config consists of two hardwares, the first having 1000MB of memory and the second having 500MB of memory. With our failover system if the first one fails to provision, the second will be provisioned. + +| time since start | desired count | expected total memory | actual total memory | +| ---------------- | ------------- | --------------------- | ------------------- | +| 0s | 2 | 2000MB | 0MB | +| 15s | 3 | 2000MB | 1000MB | + +After the first iteration, the autoscaler provisioned 2 servers which both ended up failing over and only providing a total of 1000MB of memory. The autoscaler then proceeds to calculate the error like so: + +```rust +ceil(expected - actual) / expected_memory_per_server) + +ceil((2000 - 1000) / 1000) = 1 +``` + +So an extra server was added to the desired count. + +Now, if the next server to be provisioned ends up having 1000MB like it should, we will end up having the original amount of desired memory. + +| time since start | desired count | expected total memory | actual total memory | +| ---------------- | ------------- | --------------------- | ------------------- | +| 0s | 2 | 2000MB | 0MB | +| 15s | 3 | 2000MB | 1000MB | +| 30s | 3 | 2000MB | 2000MB | + +The error calculation would now be: + +```rust +ceil((3000 - 2000) / 1000) = 1 +``` + +So the error count stays the same and we stay at 3 desired servers. + +However, if the server provisioned was again a failover server, we would have this scenario: + +| time since start | desired count | expected total memory | actual total memory | +| ---------------- | ------------- | --------------------- | ------------------- | +| 0s | 2 | 2000MB | 0MB | +| 15s | 3 | 2000MB | 1000MB | +| 30s | 4 | 2000MB | 1500MB | + +We end up with two extra servers to provision atop our original 2. + +```rust +ceil((3000 - 1500) / 1000) = 2 +``` + +| time since start | desired count | expected total memory | actual total memory | +| ---------------- | ------------- | --------------------- | ------------------- | +| 0s | 2 | 2000MB | 0MB | +| 15s | 3 | 2000MB | 1000MB | +| 30s | 4 | 2000MB | 1500MB | +| 45s | 4 | 2000MB | 2000MB | + +And finally we reach the desired capacity. + +### Failover has higher specs + +In the event that the failover hardware has higher specs than the desired amount, there is no error system that reduces the desired count to account for this difference. This is because there is no direct correlation between desired count and the hardware being provisioned and destroyed. Thus, if hardware with higher than expected specs is provisioned, that extra space will not be taken into account. + +If it was taken into account in a similar error system as failover with lower specs, it would look like this: + +| time since start | desired count | expected total memory | actual total memory | +| ---------------- | ------------- | --------------------- | ------------------- | +| 0s | 1 | 1000MB | 2000MB | + +Error: + +```rust +ceil(expected - actual) / expected_memory_per_server) + +ceil((1000 - 2000) / 1000) = -1 +``` + +The original desired count + error would be 0, destroying the only server and causing the capacity to drop to 0. If the higher-spec'd failover kept getting provisioned, this would end up in a loop. + +## Job server autoscaling + +The nomad topology for each job server in a datacenter is fetched and the memory is aggregated. This value is then divided by the expected memory capacity (the capacity of the first hardware in the config), which determines the minimum expected server count required to accommodate the current usage. Then, we add the error value (discussed above) and the margin value which is configured in the namespace config. + +### Autoscaling via machine learning + +Coming soon + +## GG server autoscaling + +Because we do not need to be preemptive with GG servers, the autoscaling is a bit more simple. + +- If the current CPU usage is more than 20% under the total, add a server. +- If the current CPU usage is less than 130% under the total, remove a server. + +Examples: + +```rust +// 3 servers +total_cpu = 300 +cpu_usage = 285 + +// result: add a server +``` + +```rust +// 1 server +total_cpu = 100 +cpu_usage = 70 + +// result: do nothing +``` + +```rust +// 4 servers +total_cpu = 400 +cpu_usage = 250 + +// result: remove a server +``` diff --git a/docs/packages/cluster/SERVER_PROVISIONING.md b/docs/packages/cluster/SERVER_PROVISIONING.md new file mode 100644 index 0000000000..161d49b7e5 --- /dev/null +++ b/docs/packages/cluster/SERVER_PROVISIONING.md @@ -0,0 +1,43 @@ +# Automatic Server Provisioning + +Server provisioning handles everything responsible for getting servers running and installed for game lobbies to run on. Server provisioning occurs in the `cluster` package and is automatically brought up and down to desired levels via `cluster-datacenter-scale`. + +## Motivation + +Server provisioning was created to allow for quick and stateful configuration of the game server topology on Rivet. This system was also written with the intention to allow clients to choose their own hardware options and server providers. + +In the future, an autoscaling system will be hooked up to the provisioning system to allow the system to scale up to meet spikes in demand, and scale down when load is decreased to save on costs. + +## Basic structure + +There are currently three types of servers that work together to host game lobbies: + +- ### ATS + + ATS servers host game images via Apache Traffic server. The caching feature provided by ATS along with ATS node being in the same datacenter as the Job node allows for very quick lobby start times. + +- ### Job + + Job servers run Nomad which handles the orchestration of the game lobbies themselves. + +- ### GG + + GameGuard nodes serve as a proxy for all incoming game connection and provide DoS protection. + +## Why are servers in the same availability zone (aka datacenter or region) + +Servers are placed in the same region for two reasons: + +1. ### VLAN + Network Constraints + + Servers rely on VLAN to communicate between each other. + +2. ### Latency + + Having all of the required components to run a Job server on the edge, (i.e. in the same datacenter) allows for very quick lobby start times. + +## Prior art + +- https://console.aiven.io/project/rivet-3143/new-service?serviceType=pg +- https://karpenter.sh/docs/concepts/nodepools/ +- Nomad autoscaler diff --git a/docs/packages/cluster/TLS_AND_DNS.md b/docs/packages/cluster/TLS_AND_DNS.md new file mode 100644 index 0000000000..2ec9c11c65 --- /dev/null +++ b/docs/packages/cluster/TLS_AND_DNS.md @@ -0,0 +1,66 @@ +# [rivet.run](http://rivet.run) DNS & TLS Configuration + +## Moving parts + +#### TLS Cert + +- Can only have 1 wildcard + - i.e. `*.lobby.{dc_id}.rivet.run` +- Takes a long time to issue +- Prone to Lets Encrypt downtime and [rate limits](https://letsencrypt.org/docs/rate-limits/) + - Nathan requested a rate limit increase for when this is needed + +#### DNS record + +- Must point to the IP of the datacenter we need + - i.e. `*.lobby.{dc_id}.rivet.run` goes to the GG Node for the given datacenter + - `*.rivet.run` will not work as a static DNS record because you can’t point it at a single datacenter + +#### GG host resolution + +- When a request hits the GG server for HTTP(S) or TCP+TLS requests, we need to be able to resolve the lobby to send it to +- This is why the lobby ID Needs to be in the DNS name + +#### GG autoscaling + +- The IPs that the DNS records point to change frequently as GG nodes scale up and down + +## Design + +#### DNS records + +Dynamically create a DNS record for each GG node formatted like `*.lobby.{dc_id}.rivet.run`. Example: + +```bash +A *.lobby.51f3d45e-693f-4470-b86d-66980edd87ec.rivet.run 1.2.3.4 # DC foo, GG node 1 +A *.lobby.51f3d45e-693f-4470-b86d-66980edd87ec.rivet.run 5.6.7.8 # DC foo, GG node 2 +A *.lobby.51f3d45e-693f-4470-b86d-66980edd87ec.rivet.run 9.10.11.12 # DC bar, GG node 1 +``` + +These the IPs of these records change as the GG nodes scale up and down, but the origin stays the same. + +#### TLS certs + +Each datacenter needs a TLS cert. For the example above, we need a TLS cert for `*.lobby.51f3d45e-693f-4470-b86d-66980edd87ec.rivet.run` and `*.lobby.51f3d45e-693f-4470-b86d-66980edd87ec.rivet.run`. + +## TLS + +#### TLS cert provider + +Currently we use Lets Encrypt as our TLS certificate provider. + +Alternatives: + +- ZeroSSL + +#### TLS cert refreshing + +Right now, the TLS certs are issued in the Terraform plan. Eventually, TLS certs should renew on their own automatically. + +## TLS Alternatives + +#### Use `*.rivet.run` TLS cert with custom DNS server + +Create a `NS` record for `*.rivet.run` pointed at our custom DNS server + +We can use a single static TLS cert diff --git a/fern/definition/admin/cluster/__package__.yml b/fern/definition/admin/cluster/__package__.yml new file mode 100644 index 0000000000..28f2c92ba1 --- /dev/null +++ b/fern/definition/admin/cluster/__package__.yml @@ -0,0 +1,23 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/fern-api/fern/main/fern.schema.json + +imports: + localCommons: ../common.yml + +service: + auth: true + base-path: /cluster + endpoints: + getServerIps: + path: /server_ips + method: GET + request: + name: GetServerIpsRequest + query-parameters: + server_id: optional + pool: optional + response: GetServerIpsResponse + +types: + GetServerIpsResponse: + properties: + ips: list diff --git a/fern/definition/admin/common.yml b/fern/definition/admin/common.yml new file mode 100644 index 0000000000..435e5173b6 --- /dev/null +++ b/fern/definition/admin/common.yml @@ -0,0 +1,7 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/fern-api/fern/main/fern.schema.json +types: + PoolType: + enum: + - job + - gg + - ats diff --git a/fern/definition/cloud/common.yml b/fern/definition/cloud/common.yml index 2d029927d6..b1c0303677 100644 --- a/fern/definition/cloud/common.yml +++ b/fern/definition/cloud/common.yml @@ -328,9 +328,6 @@ types: provider: docs: The server provider of this region. type: string - universal_region: - docs: A universal region label given to this region. - type: UniversalRegion provider_display_name: docs: Represent a resource's readable display name. type: string @@ -434,37 +431,6 @@ types: USD, 1,000,000,000,000 = $1.00). type: integer - UniversalRegion: - enum: - - unknown - - local - - amsterdam - - atlanta - - bangalore - - dallas - - frankfurt - - london - - mumbai - - newark - - new_york_city - - san_francisco - - singapore - - sydney - - tokyo - - toronto - - washington_dc - - chicago - - paris - - seattle - - sao_paulo - - stockholm - - chennai - - osaka - - milan - - miami - - jakarta - - los_angeles - NamespaceFull: docs: A full namespace. properties: diff --git a/fern/definition/cloud/games/versions.yml b/fern/definition/cloud/games/versions.yml index 943e6a92ef..fc4dcd1c86 100644 --- a/fern/definition/cloud/games/versions.yml +++ b/fern/definition/cloud/games/versions.yml @@ -23,7 +23,9 @@ service: reserveVersionName: path: /reserve-name method: POST - docs: Reserves a display name for the next version. Used to generate a monotomically increasing build number without causing a race condition with multiple versions getting created at the same time. + docs: >- + Reserves a display name for the next version. Used to generate a monotomically increasing build + number without causing a race condition with multiple versions getting created at the same time. response: ReserveVersionNameResponse validateGameVersion: diff --git a/fern/definition/cloud/version/matchmaker/common.yml b/fern/definition/cloud/version/matchmaker/common.yml index 14cc194e70..c4439ee74c 100644 --- a/fern/definition/cloud/version/matchmaker/common.yml +++ b/fern/definition/cloud/version/matchmaker/common.yml @@ -16,6 +16,10 @@ types: Read more about host networking [here](https://rivet.gg/docs/dynamic-servers/concepts/host-bridge-networking). Only available on Rivet Open Source & Enterprise. + + ### Related + - cloud.version.matchmaker.PortProtocol + - cloud.version.matchmaker.ProxyKind properties: min: docs: Unsigned 32 bit integer. @@ -26,11 +30,23 @@ types: PortProtocol: docs: >- - Type of network traffic to allow access to this port. - - Configuring `https` or `tcp_tls` will provide TLS termination for you via Game Guard. - - `https` and `tcp_tls` must have `proxy_kind` set to `game_guard`. + Signifies the protocol of the port. + + Note that when proxying through GameGuard (via `ProxyKind`), the port number returned by `/find`, + `/join`, and `/create` will not be the same as the port number configured in the config: + + - With HTTP, the port will always be 80. The hostname of the port correctly routes the incoming + connection to the correct port being used by the game server. + - With HTTPS, the port will always be 443. The hostname of the port correctly routes the incoming + connection to the correct port being used by the game server. + - Using TCP/UDP, the port will be a random number between 26000 and 31999. This gets automatically + routed to the correct port being used by the game server. + + ### Related + - cloud.version.matchmaker.GameModeRuntimeDockerPort + - cloud.version.matchmaker.ProxyKind + - /docs/dynamic-servers/concepts/game-guard + - matchmaker.lobbies.find enum: - http - https @@ -45,6 +61,10 @@ types: `game_guard` (default) proxies all traffic through [Game Guard](https://rivet.gg/docs/dynamic-servers/concepts/game-guard) to mitigate DDoS attacks and provide TLS termination. `none` sends traffic directly to the game server. If configured, `network_mode` must equal `host`. Read more about host networking [here](https://rivet.gg/docs/dynamic-servers/concepts/host-bridge-networking). Only available on Rivet Open Source & Enterprise. + + ### Related + - /docs/dynamic-servers/concepts/game-guard + - cloud.version.matchmaker.PortProtocol enum: - none - game_guard diff --git a/fern/definition/cloud/version/matchmaker/game_mode.yml b/fern/definition/cloud/version/matchmaker/game_mode.yml index c00fcd602d..506b98d9e9 100644 --- a/fern/definition/cloud/version/matchmaker/game_mode.yml +++ b/fern/definition/cloud/version/matchmaker/game_mode.yml @@ -61,10 +61,17 @@ types: ports: optional> GameModeRuntimeDockerPort: - docs: A docker port. + docs: | + Port config for a docker build. properties: port: - docs: The port number to connect to. + docs: >- + The port number to connect to. + + ### Related + - cloud.version.matchmaker.PortProtocol + - cloud.version.matchmaker.ProxyKind + type: optional port_range: optional protocol: optional diff --git a/fern/definition/provision/servers/__package__.yml b/fern/definition/provision/servers/__package__.yml new file mode 100644 index 0000000000..b7e0b3a5b5 --- /dev/null +++ b/fern/definition/provision/servers/__package__.yml @@ -0,0 +1,22 @@ +# yaml-language-server: $schema=https://raw.githubusercontent.com/fern-api/fern/main/fern.schema.json + +service: + auth: true + base-path: /servers + endpoints: + getServerInfo: + path: /{ip}/info + method: GET + path-parameters: + ip: + type: string + response: GetServerInfoResponse + +types: + GetServerInfoResponse: + properties: + name: string + server_id: uuid + datacenter_id: uuid + cluster_id: uuid + vlan_ip: string diff --git a/infra/default-builds/dockerfiles/test-mm-lobby-ready/Dockerfile b/infra/default-builds/dockerfiles/test-mm-lobby-ready/Dockerfile index dfb536f6be..8cffc67b8c 100644 --- a/infra/default-builds/dockerfiles/test-mm-lobby-ready/Dockerfile +++ b/infra/default-builds/dockerfiles/test-mm-lobby-ready/Dockerfile @@ -7,4 +7,3 @@ RUN chmod +x ./run.sh RUN adduser -D app USER app CMD ["sh", "-ecx", "/app/run.sh"] - diff --git a/infra/default-builds/outputs/game-multiplayer-tag.txt b/infra/default-builds/outputs/game-multiplayer-tag.txt index 2a93f755b2..411dbdb3e6 100644 --- a/infra/default-builds/outputs/game-multiplayer-tag.txt +++ b/infra/default-builds/outputs/game-multiplayer-tag.txt @@ -1 +1 @@ -game-multiplayer:1705695278 \ No newline at end of file +game-multiplayer:1705540701 diff --git a/infra/default-builds/outputs/game-multiplayer.tar b/infra/default-builds/outputs/game-multiplayer.tar index 5cd33b25d1..1845429fb8 100644 --- a/infra/default-builds/outputs/game-multiplayer.tar +++ b/infra/default-builds/outputs/game-multiplayer.tar @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:ace37500dd4f03ec2576a9abc27be297a6bc28724ff17355851222a467b3b060 -size 8397824 +oid sha256:2a8fdbf35e440dbed49526582f7cc6a9dd0281aac349bcf649763d98cddfe341 +size 8382464 diff --git a/infra/default-builds/outputs/test-fail-immediately-tag.txt b/infra/default-builds/outputs/test-fail-immediately-tag.txt index 7b4ca26934..66c8caee82 100644 --- a/infra/default-builds/outputs/test-fail-immediately-tag.txt +++ b/infra/default-builds/outputs/test-fail-immediately-tag.txt @@ -1 +1 @@ -test-fail-immediately:1705695278 \ No newline at end of file +test-fail-immediately:1705540701 diff --git a/infra/default-builds/outputs/test-fail-immediately.tar b/infra/default-builds/outputs/test-fail-immediately.tar index 2b92971bab..08421f520a 100644 --- a/infra/default-builds/outputs/test-fail-immediately.tar +++ b/infra/default-builds/outputs/test-fail-immediately.tar @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:cb926f34e1391b22011ed2b7abd37ebf8237d5282248be3608c11340c98480fe -size 5918720 +oid sha256:99156a8a46d8cfbef05574d5e654862bb1e0f36b55ce8a3f66a3945ab767cd63 +size 5902848 diff --git a/infra/default-builds/outputs/test-hang-indefinitely-tag.txt b/infra/default-builds/outputs/test-hang-indefinitely-tag.txt index dd7c407285..33beaa94f0 100644 --- a/infra/default-builds/outputs/test-hang-indefinitely-tag.txt +++ b/infra/default-builds/outputs/test-hang-indefinitely-tag.txt @@ -1 +1 @@ -test-hang-indefinitely:1705695278 \ No newline at end of file +test-hang-indefinitely:1705540701 diff --git a/infra/default-builds/outputs/test-hang-indefinitely.tar b/infra/default-builds/outputs/test-hang-indefinitely.tar index af3cf3311f..c9ce4c940b 100644 --- a/infra/default-builds/outputs/test-hang-indefinitely.tar +++ b/infra/default-builds/outputs/test-hang-indefinitely.tar @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:0ac031db6075defa89fc9ae8168e2324ad9061156ea39a48b35ac29789d5ebd0 -size 5918720 +oid sha256:aed9ddfa887f58311fc3da0e018628de5116e4b9fc495f6adeb158be6e18e74a +size 5902848 diff --git a/infra/default-builds/outputs/test-mm-lobby-echo-tag.txt b/infra/default-builds/outputs/test-mm-lobby-echo-tag.txt index 44f7e248a7..000fccf7a5 100644 --- a/infra/default-builds/outputs/test-mm-lobby-echo-tag.txt +++ b/infra/default-builds/outputs/test-mm-lobby-echo-tag.txt @@ -1 +1 @@ -test-mm-lobby-echo:1705695278 \ No newline at end of file +test-mm-lobby-echo:1705540701 diff --git a/infra/default-builds/outputs/test-mm-lobby-echo.tar b/infra/default-builds/outputs/test-mm-lobby-echo.tar index 7f7a9fcac8..2f8bd60c48 100644 --- a/infra/default-builds/outputs/test-mm-lobby-echo.tar +++ b/infra/default-builds/outputs/test-mm-lobby-echo.tar @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:6021e0382d85a2e51cf6b6ce7fb275a00e9a0434594fbd4d7c83a1c67b24c601 +oid sha256:07d934f006305d1a5748703eb542ba776b843accae95092afed97f22b3bc48b5 size 17920000 diff --git a/infra/default-builds/outputs/test-mm-lobby-ready-tag.txt b/infra/default-builds/outputs/test-mm-lobby-ready-tag.txt index 3161839a69..93fb281691 100644 --- a/infra/default-builds/outputs/test-mm-lobby-ready-tag.txt +++ b/infra/default-builds/outputs/test-mm-lobby-ready-tag.txt @@ -1 +1 @@ -test-mm-lobby-ready:1705695278 \ No newline at end of file +test-mm-lobby-ready:1705600876 diff --git a/infra/default-builds/outputs/test-mm-lobby-ready.tar b/infra/default-builds/outputs/test-mm-lobby-ready.tar index 4171ef5e21..32efb3bb5a 100644 --- a/infra/default-builds/outputs/test-mm-lobby-ready.tar +++ b/infra/default-builds/outputs/test-mm-lobby-ready.tar @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:e8f9ff0dcd26c58052b73fa0aa821b8cb6f6469c40515ffe15cb6348a5e343ff +oid sha256:3cf604c60fe52c4eb829a721fe3b3b1e46c0df46ffa853235867d0e3569843a0 size 15896576 diff --git a/infra/default-builds/outputs/test-mm-player-connect-tag.txt b/infra/default-builds/outputs/test-mm-player-connect-tag.txt index 7b62e48fb4..7bcc60f2e0 100644 --- a/infra/default-builds/outputs/test-mm-player-connect-tag.txt +++ b/infra/default-builds/outputs/test-mm-player-connect-tag.txt @@ -1 +1 @@ -test-mm-player-connect:1705695278 \ No newline at end of file +test-mm-player-connect:1705540701 diff --git a/infra/default-builds/outputs/test-mm-player-connect.tar b/infra/default-builds/outputs/test-mm-player-connect.tar index 7e455796df..dfda3060ed 100644 --- a/infra/default-builds/outputs/test-mm-player-connect.tar +++ b/infra/default-builds/outputs/test-mm-player-connect.tar @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:f7c685583cbca4ace1a64741cf58079bfc4e32efd2c4bae9a4d031399c53c2a6 -size 272826368 +oid sha256:7a2b83b4d2228e6640ffa30a0c3894df2e415502ae18bd0e11f6b014b42084e7 +size 272799232 diff --git a/infra/tf/dns/dns.tf b/infra/tf/dns/dns.tf index bd41d3126e..649cc3dba5 100644 --- a/infra/tf/dns/dns.tf +++ b/infra/tf/dns/dns.tf @@ -4,15 +4,6 @@ locals { cdn = local.cloudflare_zone_id_cdn job = local.cloudflare_zone_id_job } - - # Add fake local server if developing locally. - servers = var.deploy_method_local ? merge(var.servers, { - "${var.namespace}-local" = { - region_id = "local" - pool_id = "local" - name = "${var.namespace}-local" - } - }) : var.servers } locals { @@ -46,29 +37,6 @@ locals { }, ], - # Job. Matchmaker lobbies will point CNAME record at this. - [ - for server_id, server in local.servers: - { - zone_id = local.cloudflare_zone_id_job - name = "*.lobby.${server.region_id}.${var.domain_job}" - server = server - proxied = false - } - if server.pool_id == "gg" - ], - # Lobby ID in path instead of domain - [ - for server_id, server in local.servers: - { - zone_id = local.cloudflare_zone_id_job - name = "lobby.${server.region_id}.${var.domain_job}" - server = server - proxied = false - } - if server.pool_id == "gg" - ], - # Deprecated var.dns_deprecated_subdomains ? [{ zone_id = local.cloudflare_zone_id_main @@ -81,14 +49,14 @@ locals { resource "cloudflare_record" "main" { for_each = { for record in local.records: - "${record.zone_id}:${record.name}:${try(record.server.name, "core")}" => record + "${record.zone_id}:${record.name}:core" => record } zone_id = each.value.zone_id name = each.value.name - # Use local node's public IP if in local region. Otherwise, look up server's IP. - value = try(data.terraform_remote_state.pools.outputs.servers[each.value.server.name].public_ipv4, data.terraform_remote_state.k8s_infra.outputs.traefik_external_ip) - type = (can(each.value.server) || var.deploy_method_local) ? "A" : "CNAME" + # Use local node's public IP if in local region + value = data.terraform_remote_state.k8s_infra.outputs.traefik_external_ip + type = var.deploy_method_local ? "A" : "CNAME" # TODO: Increase the unproxied TTL once we have proper floating IP support on all providers ttl = each.value.proxied ? 1 : 60 # 1 = automatic proxied = each.value.proxied diff --git a/infra/tf/dns/vars.tf b/infra/tf/dns/vars.tf index 4abab2d4b8..126f421d2a 100644 --- a/infra/tf/dns/vars.tf +++ b/infra/tf/dns/vars.tf @@ -45,12 +45,3 @@ variable "extra_dns" { variable "cloudflare_account_id" { type = string } - -# MARK: Servers -variable "servers" { - type = map(object({ - region_id = string - pool_id = string - name = string - })) -} diff --git a/infra/tf/k8s_infra/vars.tf b/infra/tf/k8s_infra/vars.tf index 17a8821c84..ae50b96c55 100644 --- a/infra/tf/k8s_infra/vars.tf +++ b/infra/tf/k8s_infra/vars.tf @@ -93,11 +93,6 @@ variable "redis_dbs" { })) } -# MARK: Regions -variable "regions" { - type = map(any) -} - # MARK: K8s variable "kubeconfig_path" { type = string diff --git a/infra/tf/pools/main.tf b/infra/tf/pools/main.tf index 70bce0603d..0196480620 100644 --- a/infra/tf/pools/main.tf +++ b/infra/tf/pools/main.tf @@ -11,7 +11,7 @@ module "secrets" { source = "../modules/secrets" keys = [ - "linode/terraform/token", + "linode/token", "ssh/server/private_key_openssh", ] } diff --git a/infra/tf/pools/providers.tf b/infra/tf/pools/providers.tf index 39c28ce1fa..0c6d7aa3fc 100644 --- a/infra/tf/pools/providers.tf +++ b/infra/tf/pools/providers.tf @@ -1,3 +1,3 @@ provider "linode" { - token = module.secrets.values["linode/terraform/token"] + token = module.secrets.values["linode/token"] } diff --git a/infra/tf/pools/vars.tf b/infra/tf/pools/vars.tf index e3d0e665ac..783beb0617 100644 --- a/infra/tf/pools/vars.tf +++ b/infra/tf/pools/vars.tf @@ -10,55 +10,7 @@ variable "deploy_method_cluster" { type = bool } -# MARK: Regions -variable "primary_region" { - type = string -} - -variable "regions" { - type = map(object({ - id = string - provider = string - provider_region = string - vlan = object({ - address = string - prefix_len = number - }) - })) -} - -# MARK: Pools -variable "pools" { - type = map(object({ - firewall_inbound = list(object({ - label = string - ports = string - protocol = string - inbound_ipv4_cidr = list(string) - inbound_ipv6_cidr = list(string) - })) - })) -} - -# MARK: Servers -variable "servers" { - type = map(object({ - region_id = string - pool_id = string - version_id = string - index = number - name = string - size = string - vlan_ip = string - volumes = map(object({ - size = number - })) - tags = list(string) - })) -} - variable "server_install_scripts" { type = map(string) sensitive = true } - diff --git a/infra/tf/s3_minio/buckets.tf b/infra/tf/s3_minio/buckets.tf index 6f6bd27a8b..eca44b2b2b 100644 --- a/infra/tf/s3_minio/buckets.tf +++ b/infra/tf/s3_minio/buckets.tf @@ -4,7 +4,7 @@ resource "null_resource" "check_minio" { command = < { + tracing::debug!("matched"); + self.path_segments.pop(); true } diff --git a/lib/api-helper/macros/src/lib.rs b/lib/api-helper/macros/src/lib.rs index 618f18aa85..e0a96e5475 100644 --- a/lib/api-helper/macros/src/lib.rs +++ b/lib/api-helper/macros/src/lib.rs @@ -210,7 +210,6 @@ impl EndpointRouter { use std::str::FromStr; use api_helper::macro_util::{self, __AsyncOption}; - // Create path segments list if !router_config.try_prefix() { return Ok(None); } diff --git a/lib/bolt/cli/src/commands/config.rs b/lib/bolt/cli/src/commands/config.rs index 60c33661f3..c4e35f6b53 100644 --- a/lib/bolt/cli/src/commands/config.rs +++ b/lib/bolt/cli/src/commands/config.rs @@ -24,9 +24,6 @@ pub enum SubCommand { #[clap(index = 1)] namespace: String, }, - /// Adds missing regions from supported cloud providers to default_regions.toml. - #[clap(hide(true))] - GenerateDefaultRegions, ServiceDependencies { #[clap(index = 1)] svc_name: String, @@ -75,7 +72,6 @@ impl SubCommand { Self::SetNamespace { namespace } => { tasks::config::set_namespace(&namespace).await?; } - Self::GenerateDefaultRegions => tasks::config::generate_default_regions().await?, Self::ServiceDependencies { svc_name, recursive, diff --git a/lib/bolt/cli/src/commands/create.rs b/lib/bolt/cli/src/commands/create.rs index fce0f72945..97511da391 100644 --- a/lib/bolt/cli/src/commands/create.rs +++ b/lib/bolt/cli/src/commands/create.rs @@ -121,6 +121,16 @@ impl SubCommand { ), }; + assert!( + !pkg_name.contains("_"), + "package name should not contain underscores, use dashes" + ); + + assert!( + !service_name.contains("_"), + "service name should not contain underscores, use dashes" + ); + tasks::template::generate( &mut ctx, tasks::template::TemplateOpts { diff --git a/lib/bolt/cli/src/commands/ssh.rs b/lib/bolt/cli/src/commands/ssh.rs index 0ec1344600..690ae8b283 100644 --- a/lib/bolt/cli/src/commands/ssh.rs +++ b/lib/bolt/cli/src/commands/ssh.rs @@ -18,17 +18,15 @@ pub enum SubCommand { #[clap(long)] ssh_key: Option, }, - Name { + Id { #[clap(index = 1)] - name: String, + server_id: String, #[clap(index = 2)] command: Option, }, Pool { #[clap(index = 1)] pool: String, - #[clap(long, short = 'r')] - region: Option, #[clap(index = 2)] command: Option, #[clap(short = 'a', long)] @@ -55,33 +53,17 @@ impl SubCommand { ) .await?; } - Self::Name { name, command } => { - bolt_core::tasks::ssh::name(&ctx, &name, command.as_ref().map(String::as_str)) + Self::Id { server_id, command } => { + bolt_core::tasks::ssh::id(&ctx, &server_id, command.as_ref().map(String::as_str)) .await?; } - Self::Pool { - pool, - region, - command, - all, - } => { + Self::Pool { pool, command, all } => { if all { let command = command.context("must provide command with --all")?; - bolt_core::tasks::ssh::pool_all( - &ctx, - &pool, - region.as_ref().map(String::as_str), - &command, - ) - .await?; + bolt_core::tasks::ssh::pool_all(&ctx, &pool, &command).await?; } else { - bolt_core::tasks::ssh::pool( - &ctx, - &pool, - region.as_ref().map(String::as_str), - command.as_ref().map(String::as_str), - ) - .await?; + bolt_core::tasks::ssh::pool(&ctx, &pool, command.as_ref().map(String::as_str)) + .await?; } } } diff --git a/lib/bolt/config/src/ns.rs b/lib/bolt/config/src/ns.rs index 8c060377f1..678531ace2 100644 --- a/lib/bolt/config/src/ns.rs +++ b/lib/bolt/config/src/ns.rs @@ -12,10 +12,6 @@ pub struct Namespace { pub cluster: Cluster, #[serde(default)] pub secrets: Secrets, - #[serde(default = "default_regions")] - pub regions: HashMap, - #[serde(default)] - pub pools: Vec, #[serde(default)] pub terraform: Terraform, pub dns: Option, @@ -106,42 +102,6 @@ pub struct _1Password { pub secrets_path: String, } -#[derive(Serialize, Deserialize, Clone, Debug)] -#[serde(deny_unknown_fields)] -pub struct Region { - #[serde(default)] - pub primary: bool, - pub id: String, - pub provider: String, - pub provider_region: String, - pub netnum: usize, -} - -#[derive(Serialize, Deserialize, Clone, Debug)] -#[serde(deny_unknown_fields)] -pub struct Pool { - pub pool: String, - pub version: String, - pub region: String, - pub count: usize, - pub size: String, - #[serde(default)] - pub volumes: HashMap, -} - -#[derive(Serialize, Deserialize, Clone, Debug)] -#[serde(deny_unknown_fields)] -pub struct Volume { - pub size: usize, -} - -#[derive(Serialize, Deserialize, Clone, Debug)] -#[serde(deny_unknown_fields)] -pub enum ProviderKind { - #[serde(rename = "linode")] - Linode {}, -} - #[derive(Serialize, Deserialize, Clone, Debug, Default)] #[serde(deny_unknown_fields)] pub struct Terraform { @@ -184,7 +144,7 @@ pub struct DnsDomains { /// - api.{domain.main} pub main: String, /// Will create DNS records for: - /// - *.lobby.{region}.{domain.job} + /// - *.lobby.{region_id}.{domain.job} /// /// Can be the identical to `domain.main`. pub job: String, @@ -549,7 +509,7 @@ pub struct Rivet { #[serde(default)] pub upload: Upload, #[serde(default)] - pub dynamic_servers: DynamicServers, + pub dynamic_servers: Option, #[serde(default)] pub cdn: Cdn, #[serde(default)] @@ -632,11 +592,16 @@ pub struct Upload { pub nsfw_error_verbose: bool, } -#[derive(Serialize, Deserialize, Clone, Debug, Default)] +#[derive(Serialize, Deserialize, Clone, Debug)] #[serde(deny_unknown_fields)] pub struct DynamicServers { + pub cluster: DynamicServersCluster, + /// Whether or not to send a taint message in the next cluster update. #[serde(default)] - pub build_delivery_method: DynamicServersBuildDeliveryMethod, + pub taint: bool, + /// How many empty job servers to have at all times. Used in the simple provisioning algorithm. + #[serde(default = "default_job_server_provision_margin")] + pub job_server_provision_margin: u32, } #[derive(Serialize, Deserialize, Clone, Debug, Default, strum_macros::Display)] @@ -650,6 +615,60 @@ pub enum DynamicServersBuildDeliveryMethod { S3Direct, } +#[derive(Serialize, Deserialize, Clone, Debug)] +#[serde(deny_unknown_fields)] +pub struct DynamicServersCluster { + name_id: String, + #[serde(default)] + pub datacenters: HashMap, +} + +#[derive(Serialize, Deserialize, Clone, Debug)] +#[serde(deny_unknown_fields)] +pub struct DynamicServersDatacenter { + pub datacenter_id: Uuid, + pub display_name: String, + pub provider: DynamicServersProvider, + pub provider_datacenter_name: String, + #[serde(default)] + pub build_delivery_method: DynamicServersBuildDeliveryMethod, + /// Nomad drain time in seconds. + pub drain_timeout: u32, + + #[serde(default)] + pub pools: HashMap, +} + +#[derive(Serialize, Deserialize, Clone, Debug)] +pub enum DynamicServersProvider { + #[serde(rename = "linode")] + Linode, +} + +#[derive(Serialize, Deserialize, Clone, Debug)] +#[serde(deny_unknown_fields)] +pub struct DynamicServersDatacenterPool { + pub hardware: Vec, + pub desired_count: u32, + pub max_count: u32, +} + +#[derive(Serialize, Deserialize, Clone, Debug)] +#[serde(deny_unknown_fields)] +pub struct DynamicServersDatacenterHardware { + pub name: String, +} + +#[derive(Serialize, Deserialize, Clone, Debug, PartialEq, Eq, Hash)] +pub enum DynamicServersDatacenterPoolType { + #[serde(rename = "job")] + Job, + #[serde(rename = "gg")] + Gg, + #[serde(rename = "ats")] + Ats, +} + #[derive(Serialize, Deserialize, Clone, Debug)] #[serde(deny_unknown_fields)] pub struct Cdn { @@ -675,9 +694,22 @@ pub struct Bolt { pub confirm_commands: bool, } -fn default_regions() -> HashMap { - toml::from_str(include_str!("../default_regions.toml")) - .expect("failed to parse default_regions.toml") +#[derive(Serialize, Deserialize, Clone, Debug, Default)] +#[serde(deny_unknown_fields)] +pub struct BetterUptime { + /// The name of your company. This will be displayed on your status page + /// in the top left. This is required by Better Uptime. + pub company_name: String, + /// The URL of your company. This will be used on the status page to link + /// to your company's website. This is required by Better Uptime. + pub company_url: String, + /// The subdomain is the part of the public URL of your status page uses. + /// + /// Eg. .betteruptime.com. + /// + /// It needs to be unique across all of Better Uptime. This is required + /// by Better Uptime. + pub company_subdomain: String, } fn default_docker_repo() -> String { @@ -700,20 +732,6 @@ fn default_tunnel_port() -> u16 { 5000 } -#[derive(Serialize, Deserialize, Clone, Debug, Default)] -#[serde(deny_unknown_fields)] -pub struct BetterUptime { - /// The name of your company. This will be displayed on your status page - /// in the top left. This is required by Better Uptime. - pub company_name: String, - /// The URL of your company. This will be used on the status page to link - /// to your company's website. This is required by Better Uptime. - pub company_url: String, - /// The subdomain is the part of the public URL of your status page uses. - /// - /// Eg. .betteruptime.com. - /// - /// It needs to be unique across all of Better Uptime. This is required - /// by Better Uptime. - pub company_subdomain: String, +fn default_job_server_provision_margin() -> u32 { + 2 } diff --git a/lib/bolt/core/src/context/project.rs b/lib/bolt/core/src/context/project.rs index 16ca3417b8..3ab61014c0 100644 --- a/lib/bolt/core/src/context/project.rs +++ b/lib/bolt/core/src/context/project.rs @@ -1,5 +1,5 @@ use std::{ - collections::HashMap, + collections::{HashMap, HashSet}, path::{Path, PathBuf}, process::Command, sync::{Arc, Weak}, @@ -184,6 +184,11 @@ impl ProjectContextData { 9000, *minio_port, "minio_port must not be changed if dns is configured" ); + } else { + assert!( + self.ns().rivet.dynamic_servers.is_none(), + "must have dns configured to provision servers" + ); } } config::ns::ClusterKind::Distributed { .. } => { @@ -203,64 +208,77 @@ impl ProjectContextData { } // MARK: Dynamic Servers - if !self.ns().pools.is_empty() { - assert!( - self.ns().dns.is_some(), - "must have dns configured to provision servers" - ); - } + if let Some(dynamic_servers) = &self.ns().rivet.dynamic_servers { + let mut unique_datacenter_ids = HashSet::new(); - // Validate the build delivery method - if !self.ns().pools.is_empty() { - let ats_count = self.ns().pools.iter().filter(|p| p.pool == "ats").count(); - match self.ns().rivet.dynamic_servers.build_delivery_method { - config::ns::DynamicServersBuildDeliveryMethod::TrafficServer => { - assert_ne!(ats_count, 0, "TrafficServer delivery method will not work without ats servers in each region. either set rivet.dynamic_servers.build_delivery_method = \"S3Direct\" to download builds directly from S3 or add an ATS pool to each region."); - } - config::ns::DynamicServersBuildDeliveryMethod::S3Direct => { - assert_eq!( - ats_count, 0, - "S3Direct delivery method should not be used if ats servers are available" - ); - } - } - } + for (name_id, datacenter) in &dynamic_servers.cluster.datacenters { + assert!( + !unique_datacenter_ids.contains(&datacenter.datacenter_id), + "invalid datacenter ({}): datacenter_id not unique", + name_id, + ); + unique_datacenter_ids.insert(datacenter.datacenter_id); - // MARK: Pools - for region_id in self.ns().regions.keys() { - // Skip empty regions - if !self.ns().pools.iter().any(|p| p.region == *region_id) { - continue; - } + let Some(ats_pool) = datacenter + .pools + .get(&config::ns::DynamicServersDatacenterPoolType::Ats) + else { + panic!("invalid datacenter ({}): Missing ATS pool", name_id); + }; - // Validate all required pools exist - assert!( - self.ns() + // Validate the build delivery method + assert!( + ats_pool.desired_count <= ats_pool.max_count, + "invalid datacenter ({}): ATS desired > max", + name_id + ); + match datacenter.build_delivery_method { + config::ns::DynamicServersBuildDeliveryMethod::TrafficServer => { + assert_ne!( + 0, ats_pool.desired_count, + "invalid datacenter ({}): TrafficServer delivery method will not work without ats servers. Either set datacenter.build_delivery_method = \"s3_direct\" to download builds directly from S3 or increase the ATS pool count.", + name_id, + ); + } + config::ns::DynamicServersBuildDeliveryMethod::S3Direct => { + assert_eq!( + 0, ats_pool.desired_count, + "invalid datacenter ({}): S3Direct delivery method should not be used if ats servers are available", + name_id, + ); + } + } + + // Validate all required pools exist + let gg_pool = datacenter .pools - .iter() - .any(|p| p.pool == "gg" && p.region == *region_id), - "missing gg pool for region {region_id}", - region_id = region_id - ); - assert!( - self.ns() + .get(&config::ns::DynamicServersDatacenterPoolType::Gg); + let gg_count = gg_pool.map(|pool| pool.desired_count).unwrap_or_default(); + assert_ne!( + gg_count, 0, + "invalid datacenter ({}): Missing GG servers", + name_id, + ); + assert!( + gg_count <= gg_pool.unwrap().max_count, + "invalid datacenter ({}): GG desired > max", + name_id + ); + + let job_pool = datacenter .pools - .iter() - .any(|p| p.pool == "job" && p.region == *region_id), - "missing job pool for region {region_id}", - region_id = region_id - ); - if matches!( - self.ns().rivet.dynamic_servers.build_delivery_method, - config::ns::DynamicServersBuildDeliveryMethod::TrafficServer - ) { + .get(&config::ns::DynamicServersDatacenterPoolType::Job); + let job_count = job_pool.map(|pool| pool.desired_count).unwrap_or_default(); + + assert_ne!( + job_count, 0, + "invalid datacenter ({}): Missing job servers", + name_id, + ); assert!( - self.ns() - .pools - .iter() - .any(|p| p.pool == "ats" && p.region == *region_id), - "missing ats pool for region {region_id}", - region_id = region_id + job_count <= job_pool.unwrap().max_count, + "invalid datacenter ({}): Job desired > max", + name_id ); } } @@ -387,7 +405,22 @@ impl ProjectContextData { async fn read_config(project_path: &Path) -> config::project::Project { let config_path = project_path.join("Bolt.toml"); let config_str = fs::read_to_string(config_path).await.unwrap(); - toml::from_str::(&config_str).unwrap() + + match toml::from_str::(&config_str) { + Result::Ok(x) => x, + Result::Err(err) => { + if let Some(span) = err.span().filter(|span| span.start != span.end) { + panic!( + "failed to parse project config ({:?}): {}\n\n{}\n", + &span, + err.message(), + &config_str[span.clone()] + ); + } else { + panic!("failed to parse project config: {}", err.message()); + } + } + } } pub async fn read_config_local(project_path: &Path) -> config::local::Local { @@ -406,8 +439,21 @@ impl ProjectContextData { "failed to read namespace config: {}", path.display() )); - let config = toml::from_str::(&config_str) - .expect("failed to parse namespace config"); + let config = match toml::from_str::(&config_str) { + Result::Ok(x) => x, + Result::Err(err) => { + if let Some(span) = err.span().filter(|span| span.start != span.end) { + panic!( + "failed to parse namespace config ({:?}): {}\n\n{}\n", + &span, + err.message(), + &config_str[span.clone()] + ); + } else { + panic!("failed to parse namespace config: {}", err.message()); + } + } + }; // Verify s3 config if config.s3.providers.minio.is_none() @@ -870,30 +916,6 @@ impl ProjectContextData { config::ns::ClusterKind::Distributed { .. } => 3, } } - - /// Returns the region which contains the core cluster. - /// - /// Seldom used in services. Only used to specify the CDN region at the - /// moment, but that will be deprecated later. - pub fn primary_region(&self) -> String { - self.ns() - .regions - .iter() - .find(|(_, x)| x.primary) - .map(|(x, _)| x.clone()) - .expect("missing primary region") - } - - /// Species the region or returns "local" for local development. - /// - /// This is useful for deploying Nomad services from Bolt to know which - /// region to connect to. - pub fn primary_region_or_local(&self) -> String { - match &self.ns().cluster.kind { - config::ns::ClusterKind::SingleNode { .. } => "local".to_string(), - config::ns::ClusterKind::Distributed { .. } => self.primary_region(), - } - } } impl ProjectContextData { diff --git a/lib/bolt/core/src/context/service.rs b/lib/bolt/core/src/context/service.rs index 7648f94416..9405de4e44 100644 --- a/lib/bolt/core/src/context/service.rs +++ b/lib/bolt/core/src/context/service.rs @@ -91,8 +91,25 @@ impl ServiceContextData { Ok(v) => v, Err(_) => return None, }; - let config = toml::from_str::(&config_str) - .expect(&format!("failed to read config: {}", path.display())); + let config = match toml::from_str::(&config_str) { + Result::Ok(x) => x, + Result::Err(err) => { + if let Some(span) = err.span().filter(|span| span.start != span.end) { + panic!( + "failed to parse service config ({}): {}\n\n{}\n", + path.display(), + err.message(), + &config_str[span.clone()], + ); + } else { + panic!( + "failed to parse service config ({}): {}", + path.display(), + err.message(), + ); + } + } + }; let cargo_path = path.join("Cargo.toml"); let cargo = match fs::read_to_string(&cargo_path).await { @@ -217,10 +234,6 @@ impl ServiceContextData { .join(self.name()) } - pub async fn gen_proto_path(&self) -> PathBuf { - self.gen_path().await.join("proto") - } - pub fn migrations_path(&self) -> PathBuf { self.path().join("migrations") } @@ -333,6 +346,18 @@ impl ServiceContextData { // TODO: true } + + pub fn depends_on_infra(&self) -> bool { + self.name() == "cluster-worker" || self.name() == "monolith-worker" + } + + pub fn depends_on_cluster_config(&self) -> bool { + self.name() == "cluster-default-update" + } + + pub fn depends_on_provision_margin(&self) -> bool { + self.name() == "cluster-autoscale" + } } impl ServiceContextData { @@ -713,7 +738,6 @@ impl ServiceContextData { pub async fn env(&self, run_context: &RunContext) -> Result> { let project_ctx = self.project().await; - let region_id = project_ctx.primary_region_or_local(); let mut env = Vec::new(); // HACK: Link to dynamically linked libraries in /nix/store @@ -748,7 +772,6 @@ impl ServiceContextData { // Provide default Nomad variables if in test if matches!(run_context, RunContext::Test { .. }) { env.push(("KUBERNETES_REGION".into(), "global".into())); - env.push(("KUBERNETES_DC".into(), region_id.clone())); env.push(( "KUBERNETES_TASK_DIR".into(), project_ctx.gen_path().display().to_string(), @@ -820,15 +843,6 @@ impl ServiceContextData { } } - // Pools - if !project_ctx.ns().pools.is_empty() { - env.push(("RIVET_HAS_POOLS".into(), "1".into())); - } - - // Regions - env.push(("RIVET_REGION".into(), region_id.clone())); - env.push(("RIVET_PRIMARY_REGION".into(), project_ctx.primary_region())); - // Networking if matches!(run_context, RunContext::Service { .. }) { env.push(("HEALTH_PORT".into(), k8s::gen::HEALTH_PORT.to_string())); @@ -941,10 +955,12 @@ impl ServiceContextData { // Expose all S3 endpoints to services that need them let s3_deps = if self.depends_on_s3() { + // self.s3_dependencies(&run_context).await project_ctx.all_services().await.to_vec() } else { - self.s3_dependencies(&run_context).await + Vec::new() }; + for s3_dep in s3_deps { if !matches!(s3_dep.config().runtime, RuntimeKind::S3 { .. }) { continue; @@ -1001,15 +1017,31 @@ impl ServiceContextData { if project_ctx.ns().rivet.upload.nsfw_error_verbose { env.push(("RIVET_UPLOAD_NSFW_ERROR_VERBOSE".into(), "1".into())); } - env.push(( - "RIVET_DS_BUILD_DELIVERY_METHOD".into(), - project_ctx - .ns() - .rivet - .dynamic_servers - .build_delivery_method - .to_string(), - )); + + // Dynamic servers + if let Some(dynamic_servers) = &project_ctx.ns().rivet.dynamic_servers { + if self.depends_on_cluster_config() || matches!(run_context, RunContext::Test { .. }) { + env.push(( + "RIVET_DEFAULT_CLUSTER_CONFIG".into(), + serde_json::to_string(&dynamic_servers.cluster)?, + )); + env.push(( + "RIVET_TAINT_DEFAULT_CLUSTER".into(), + if dynamic_servers.taint { + "1".to_string() + } else { + "0".to_string() + }, + )); + } + + if self.depends_on_provision_margin() { + env.push(( + format!("JOB_SERVER_PROVISION_MARGIN"), + dynamic_servers.job_server_provision_margin.to_string(), + )); + } + } // Sort env by keys so it's always in the same order env.sort_by_cached_key(|x| x.0.clone()); @@ -1167,12 +1199,14 @@ impl ServiceContextData { env.push(("CLICKHOUSE_URL".into(), uri)); } - // Expose S3 endpoints to services that need them + // Expose all S3 endpoints to services that need them let s3_deps = if self.depends_on_s3() { + // self.s3_dependencies(&run_context).await project_ctx.all_services().await.to_vec() } else { - self.s3_dependencies(run_context).await + Vec::new() }; + for s3_dep in s3_deps { if !matches!(s3_dep.config().runtime, RuntimeKind::S3 { .. }) { continue; @@ -1207,6 +1241,36 @@ impl ServiceContextData { )); } + if self.depends_on_infra() { + let tls = terraform::output::read_tls(&project_ctx).await; + let k8s_infra = terraform::output::read_k8s_infra(&project_ctx).await; + + env.push(( + "TLS_CERT_LOCALLY_SIGNED_JOB_CERT_PEM".into(), + tls.tls_cert_locally_signed_job.cert_pem.clone(), + )); + env.push(( + "TLS_CERT_LOCALLY_SIGNED_JOB_KEY_PEM".into(), + tls.tls_cert_locally_signed_job.key_pem.clone(), + )); + env.push(( + "TLS_CERT_LETSENCRYPT_RIVET_JOB_CERT_PEM".into(), + tls.tls_cert_letsencrypt_rivet_job.cert_pem.clone(), + )); + env.push(( + "TLS_CERT_LETSENCRYPT_RIVET_JOB_KEY_PEM".into(), + tls.tls_cert_letsencrypt_rivet_job.key_pem.clone(), + )); + env.push(( + "TLS_ROOT_CA_CERT_PEM".into(), + (*tls.root_ca_cert_pem).clone(), + )); + env.push(( + "K8S_TRAEFIK_TUNNEL_EXTERNAL_IP".into(), + (*k8s_infra.traefik_tunnel_external_ip).clone(), + )); + } + Ok(env) } } diff --git a/lib/bolt/core/src/dep/cargo/cli.rs b/lib/bolt/core/src/dep/cargo/cli.rs index 9aac1f3d7c..47f1759696 100644 --- a/lib/bolt/core/src/dep/cargo/cli.rs +++ b/lib/bolt/core/src/dep/cargo/cli.rs @@ -292,9 +292,6 @@ pub async fn build_tests<'a, T: AsRef>( ]) .stdout(std::process::Stdio::piped()) .current_dir(abs_path) - // TODO: Not sure why the .cargo/config.toml isn't working with nested projects, have to hardcode - // the target dir - // .env("CARGO_TARGET_DIR", $(readlink -f ./target)) // Used for Tokio Console. See https://github.com/tokio-rs/console#using-it .env("RUSTFLAGS", "--cfg tokio_unstable"); if let Some(jobs) = opts.jobs { diff --git a/lib/bolt/core/src/dep/k8s/gen.rs b/lib/bolt/core/src/dep/k8s/gen.rs index 08123cb289..7acd8ab9e0 100644 --- a/lib/bolt/core/src/dep/k8s/gen.rs +++ b/lib/bolt/core/src/dep/k8s/gen.rs @@ -201,22 +201,6 @@ pub async fn gen_svc(exec_ctx: &ExecServiceContext) -> Vec { "data": secret_data })); - // Create config for /etc/rivet - // - // Use cjson for consistent outputs - let regions_json = cjson::to_string(&project_ctx.ns().regions).unwrap(); - specs.push(json!({ - "apiVersion": "v1", - "kind": "ConfigMap", - "metadata": { - "name": format!("rivet-etc-{}", svc_ctx.name()), - "namespace": "rivet-service" - }, - "data": { - "region_config.json": regions_json - } - })); - // Render ports let (pod_ports, service_ports) = { let mut pod_ports = Vec::new(); @@ -654,19 +638,6 @@ async fn build_volumes( let mut volumes = Vec::::new(); let mut volume_mounts = Vec::::new(); - // TODO: Move this to reading vanilla config inside service instead of populating this - // Add static config - volumes.push(json!({ - "name": "rivet-etc", - "configMap": { - "name": format!("rivet-etc-{}", svc_ctx.name()), - } - })); - volume_mounts.push(json!({ - "name": "rivet-etc", - "mountPath": "/etc/rivet" - })); - // Add volumes based on exec service match driver { // Mount the service binaries to execute directly in the container. diff --git a/lib/bolt/core/src/dep/terraform/gen.rs b/lib/bolt/core/src/dep/terraform/gen.rs index b13835a558..1b4fea67af 100644 --- a/lib/bolt/core/src/dep/terraform/gen.rs +++ b/lib/bolt/core/src/dep/terraform/gen.rs @@ -1,7 +1,7 @@ use anyhow::{Context, Result}; use indoc::{formatdoc, indoc}; use serde_json::json; -use std::collections::{HashMap, HashSet}; +use std::collections::HashMap; use tokio::fs; use crate::{ @@ -222,19 +222,6 @@ async fn vars(ctx: &ProjectContext) { } } - // Regions - vars.insert( - "primary_region".into(), - json!(ctx.primary_region_or_local()), - ); - - let regions = super::regions::build_regions(&ctx).unwrap(); - vars.insert("regions".into(), json!(®ions)); - - // Pools - let pools = super::pools::build_pools(&ctx).await.unwrap(); - vars.insert("pools".into(), json!(&pools)); - // Tunnels if let Some(ns::Dns { provider: Some(ns::DnsProvider::Cloudflare { access, .. }), @@ -268,31 +255,6 @@ async fn vars(ctx: &ProjectContext) { vars.insert("tunnels".into(), json!(&tunnels)); } - // Servers - let servers = super::servers::build_servers(&ctx, ®ions, &pools).unwrap(); - vars.insert("servers".into(), json!(servers)); - - if dep::terraform::cli::has_applied(ctx, "k8s_infra").await - && dep::terraform::cli::has_applied(ctx, "tls").await - { - let k8s_infra = dep::terraform::output::read_k8s_infra(ctx).await; - let tls = dep::terraform::output::read_tls(ctx).await; - - let mut server_install_scripts = HashMap::new(); - for (k, v) in &servers { - server_install_scripts.insert( - k.clone(), - super::install_scripts::gen(ctx, v, &servers, &k8s_infra, &tls) - .await - .unwrap(), - ); - } - vars.insert( - "server_install_scripts".into(), - json!(server_install_scripts), - ); - } - // Services { let mut services = HashMap::new(); @@ -311,6 +273,27 @@ async fn vars(ctx: &ProjectContext) { vars.insert("services".into(), json!(services)); } + // Datacenters + if let Some(dynamic_servers) = &config.rivet.dynamic_servers { + let datacenters = dynamic_servers + .cluster + .datacenters + .iter() + .map(|(name_id, dc)| { + ( + name_id, + json!({ + "datacenter_id": dc.datacenter_id, + }), + ) + }) + .collect::>(); + + vars.insert("datacenters".into(), json!(datacenters)); + } else { + vars.insert("datacenters".into(), json!({})); + }; + // Docker vars.insert( "authenticate_all_docker_hub_pulls".into(), @@ -513,67 +496,68 @@ async fn vars(ctx: &ProjectContext) { vars.insert("s3_providers".into(), s3_providers(ctx).await.unwrap()); } + // TODO: Reimplement with new server provisioning // Better Uptime - if let Some(better_uptime) = &config.better_uptime { - // Make sure DNS is enabled - if config.dns.is_none() { - panic!("Better Uptime requires DNS to be enabled, since it uses subdomains to monitor services"); - } - - // Make sure there is at least one pool - if config.pools.is_empty() { - panic!("Better Uptime requires at least one pool, otherwise it will not be able to monitor the service"); - } - - // Load all the regions of pools - let mut regions = config - .pools - .iter() - .filter_map(|pool| match config.regions.get(&pool.region) { - Some(region) => Some((pool.region.clone(), region.provider_region.clone())), - None => None, - }) - .collect::>() - .into_iter() - .collect::>(); - regions.sort(); - - // Create monitors - let mm_monitors = regions - .iter() - .map(|(region, _)| { - json!({ - "id": region, - "url": format!("{}/status/matchmaker?region={}", ctx.origin_api(), region), - "public_name": region, - }) - }) - .collect::>(); - - vars.insert( - "better_uptime_groups".into(), - json!([ - { - "id": "mm", - "name": "Matchmaker", - "monitors": mm_monitors, - }, - { - "id": "cdn", - "name": "CDN", - "monitors": [ - { - "id": "sandbox", - "url": format!("https://sandbox.{}", ctx.domain_cdn().unwrap()), - "public_name": "CDN" - } - ] - }, - ]), - ); - - vars.insert("better_uptime".into(), json!(better_uptime.to_owned())); - } + // if let Some(better_uptime) = &config.better_uptime { + // // Make sure DNS is enabled + // if config.dns.is_none() { + // panic!("Better Uptime requires DNS to be enabled, since it uses subdomains to monitor services"); + // } + + // // Make sure there is at least one pool + // if config.pools.is_empty() { + // panic!("Better Uptime requires at least one pool, otherwise it will not be able to monitor the service"); + // } + + // // Load all the regions of pools + // let mut regions = config + // .pools + // .iter() + // .filter_map(|pool| match config.regions.get(&pool.region) { + // Some(region) => Some((pool.region.clone(), region.provider_region.clone())), + // None => None, + // }) + // .collect::>() + // .into_iter() + // .collect::>(); + // regions.sort(); + + // // Create monitors + // let mm_monitors = regions + // .iter() + // .map(|(region, _)| { + // json!({ + // "id": region, + // "url": format!("{}/status/matchmaker?region={}", ctx.origin_api(), region), + // "public_name": region, + // }) + // }) + // .collect::>(); + + // vars.insert( + // "better_uptime_groups".into(), + // json!([ + // { + // "id": "mm", + // "name": "Matchmaker", + // "monitors": mm_monitors, + // }, + // { + // "id": "cdn", + // "name": "CDN", + // "monitors": [ + // { + // "id": "sandbox", + // "url": format!("https://sandbox.{}", ctx.domain_cdn().unwrap()), + // "public_name": "CDN" + // } + // ] + // }, + // ]), + // ); + + // vars.insert("better_uptime".into(), json!(better_uptime.to_owned())); + // } // Media presets vars.insert( diff --git a/lib/bolt/core/src/dep/terraform/install_scripts/files/envoy.sh b/lib/bolt/core/src/dep/terraform/install_scripts/files/envoy.sh deleted file mode 100644 index e301643cef..0000000000 --- a/lib/bolt/core/src/dep/terraform/install_scripts/files/envoy.sh +++ /dev/null @@ -1,13 +0,0 @@ -version="1.27.1" - -# Install Envoy -mkdir -p "/opt/envoy-${version}" -curl -L "https://github.com/envoyproxy/envoy/releases/download/v${version}/envoy-${version}-linux-x86_64" -o "/tmp/envoy_${version}" -install "/tmp/envoy_${version}" /usr/bin/envoy - -# Install hot-restarter.py -# -# See https://www.envoyproxy.io/docs/envoy/latest/operations/hot_restarter.html -curl -L "https://raw.githubusercontent.com/envoyproxy/envoy/v${version}/restarter/hot-restarter.py" -o "/tmp/envoy_hot_restarter_${version}.py" -install "/tmp/envoy_hot_restarter_${version}.py" /usr/bin/envoy_hot_restarter.py - diff --git a/lib/bolt/core/src/dep/terraform/install_scripts/files/outbound_proxy.sh b/lib/bolt/core/src/dep/terraform/install_scripts/files/outbound_proxy.sh deleted file mode 100644 index f8dbc05825..0000000000 --- a/lib/bolt/core/src/dep/terraform/install_scripts/files/outbound_proxy.sh +++ /dev/null @@ -1,67 +0,0 @@ -# Create user -if ! id -u "outbound_proxy" &>/dev/null; then - useradd -r -s /bin/false outbound_proxy -fi - -# Config -mkdir -p /etc/outbound_proxy - -cat << 'EOF' > /etc/outbound_proxy/envoy.yaml -__ENVOY_CONFIG__ -EOF - -chown -R outbound_proxy:outbound_proxy /etc/outbound_proxy - -# Startup scripts -# -# See https://www.envoyproxy.io/docs/envoy/latest/operations/hot_restarter.html -cat << 'EOF' > /etc/outbound_proxy/start_envoy.sh -#!/bin/bash -set -e -exec /usr/bin/envoy -c /etc/outbound_proxy/envoy.yaml --restart-epoch $RESTART_EPOCH -EOF - -cat << 'EOF' > /etc/outbound_proxy/check_envoy.sh -#!/bin/sh -set -e -/usr/bin/envoy -c /etc/outbound_proxy/envoy.yaml --mode validate -EOF - -cat << 'EOF' > /etc/outbound_proxy/reload_envoy.sh -#!/bin/sh -set -e -/usr/bin/envoy -c /etc/outbound_proxy/envoy.yaml --mode validate -kill -1 $1 -EOF - -chmod +x /etc/outbound_proxy/start_envoy.sh /etc/outbound_proxy/check_envoy.sh /etc/outbound_proxy/reload_envoy.sh - -# Systemd service -cat << 'EOF' > /etc/systemd/system/outbound_proxy.service -[Unit] -Description=Outbound Proxy -After=network.target - -[Service] -User=outbound_proxy -Group=outbound_proxy -Restart=always -ExecStart=/usr/bin/envoy_hot_restarter.py /etc/outbound_proxy/start_envoy.sh -ExecStartPre=/etc/outbound_proxy/check_envoy.sh -ExecReload=/etc/outbound_proxy/reload_envoy.sh $MAINPID -LimitNOFILE=102400 -TimeoutStopSec=10 -KillMode=process - -[Install] -WantedBy=multi-user.target -EOF - -# Start and enable the service -systemctl daemon-reload -systemctl enable outbound_proxy -systemctl start outbound_proxy - -# Reload config if already running -systemctl reload outbound_proxy - diff --git a/lib/bolt/core/src/dep/terraform/install_scripts/mod.rs b/lib/bolt/core/src/dep/terraform/install_scripts/mod.rs deleted file mode 100644 index 9530cf9687..0000000000 --- a/lib/bolt/core/src/dep/terraform/install_scripts/mod.rs +++ /dev/null @@ -1,168 +0,0 @@ -use anyhow::Result; -use indexmap::{indexmap, IndexMap}; -use indoc::formatdoc; -use std::collections::HashMap; - -use crate::{context::ProjectContext, dep::terraform, dep::terraform::servers::Server}; - -pub mod components; - -pub async fn gen( - ctx: &ProjectContext, - server: &Server, - all_servers: &HashMap, - k8s_infra: &terraform::output::K8sInfra, - tls: &terraform::output::Tls, -) -> Result { - let mut script = Vec::new(); - - let mut prometheus_targets = IndexMap::new(); - - // MARK: Common (pre) - script.push(components::common()); - script.push(components::node_exporter()); - script.push(components::sysctl()); - script.push(components::traefik()); - script.push(components::traefik_tunnel(ctx, &k8s_infra, &tls)); - - prometheus_targets.insert( - "node_exporter".into(), - components::VectorPrometheusTarget { - endpoint: "http://127.0.0.1:9100/metrics".into(), - scrape_interval: 15, - }, - ); - - // MARK: Game Guard - if server.pool_id == "gg" { - script.push(components::traefik()); - script.push(components::traefik_instance(components::TraefikInstance { - name: "game_guard".into(), - static_config: gg_traefik_static_config( - server, - &ctx.read_secret(&["rivet", "api_route", "token"]).await?, - ), - dynamic_config: String::new(), - tls_certs: indexmap! { - "letsencrypt_rivet_job".into() => (*tls.tls_cert_letsencrypt_rivet_job).clone(), - }, - tcp_server_transports: Default::default(), - })); - - prometheus_targets.insert( - "game_guard".into(), - components::VectorPrometheusTarget { - endpoint: "http://127.0.0.1:9980/metrics".into(), - scrape_interval: 15, - }, - ); - } - - // MARK: Job - if server.pool_id == "job" { - script.push(components::docker()); - script.push(components::lz4()); - script.push(components::skopeo()); - script.push(components::umoci()); - script.push(components::cnitool()); - script.push(components::cni_plugins()); - script.push(components::nomad(server)); - script.push(components::envoy()); - script.push(components::outbound_proxy(server, all_servers)?); - - prometheus_targets.insert( - "nomad".into(), - components::VectorPrometheusTarget { - endpoint: "http://127.0.0.1:4646/v1/metrics?format=prometheus".into(), - scrape_interval: 15, - }, - ); - } - - // MARK: ATS - if server.pool_id == "ats" { - script.push(components::docker()); - script.push(components::traffic_server(ctx, server).await?); - } - - // MARK: Common (post) - if !prometheus_targets.is_empty() { - script.push(components::vector(&components::VectorConfig { - prometheus_targets, - })); - } - - let joined = script.join("\n\necho \"======\"\n\n"); - Ok(format!("#!/usr/bin/env bash\nset -eu\n\n{joined}")) -} - -fn gg_traefik_static_config(server: &Server, api_route_token: &str) -> String { - let http_provider_endpoint = format!( - "http://127.0.0.1:{port}/traefik/config/game-guard?token={api_route_token}®ion={region}", - port = components::TUNNEL_API_ROUTE_PORT, - region = server.region_id - ); - - let mut config = formatdoc!( - r#" - [entryPoints] - [entryPoints.traefik] - address = "127.0.0.1:9980" - - [entryPoints.lb-80] - address = ":80" - - [entryPoints.lb-443] - address = ":443" - - [api] - insecure = true - - [metrics.prometheus] - # See lib/chirp/metrics/src/buckets.rs - buckets = [0.001, 0.0025, 0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1.0, 2.5, 5.0, 10.0, 25.0, 50.0, 100.0] - addEntryPointsLabels = true - addRoutersLabels = true - addServicesLabels = true - - [providers] - [providers.file] - directory = "/etc/game_guard/dynamic" - - [providers.http] - endpoint = "{http_provider_endpoint}" - pollInterval = "0.5s" - "# - ); - - // TCP ports - for port in 20000..=31999 { - config.push_str(&formatdoc!( - r#" - [entryPoints.lb-{port}-tcp] - address = ":{port}/tcp" - - [entryPoints.lb-{port}-tcp.transport.respondingTimeouts] - readTimeout = "12h" - writeTimeout = "12h" - idleTimeout = "30s" - - "# - )); - } - - // UDP ports - for port in 20000..=31999 { - config.push_str(&formatdoc!( - r#" - [entryPoints.lb-{port}-udp] - address = ":{port}/udp" - - [entryPoints.lb-{port}-udp.udp] - timeout = "15s" - "# - )); - } - - config -} diff --git a/lib/bolt/core/src/dep/terraform/mod.rs b/lib/bolt/core/src/dep/terraform/mod.rs index e48e65c830..df9145e9ed 100644 --- a/lib/bolt/core/src/dep/terraform/mod.rs +++ b/lib/bolt/core/src/dep/terraform/mod.rs @@ -1,9 +1,5 @@ pub mod cli; pub mod gen; -pub mod install_scripts; pub mod net; pub mod output; -pub mod pools; -pub mod regions; pub mod remote_states; -pub mod servers; diff --git a/lib/bolt/core/src/dep/terraform/output.rs b/lib/bolt/core/src/dep/terraform/output.rs index c98db29f3b..6994f13da7 100644 --- a/lib/bolt/core/src/dep/terraform/output.rs +++ b/lib/bolt/core/src/dep/terraform/output.rs @@ -16,23 +16,6 @@ impl Deref for TerraformOutputValue { } } -#[derive(Debug, Clone, Deserialize)] -pub struct Pools { - pub servers: TerraformOutputValue>, -} - -#[derive(Debug, Clone, Deserialize)] -pub struct PoolServer { - // Server - pub region_id: String, - pub pool_id: String, - pub version_id: String, - pub index: usize, - - // New data - pub public_ipv4: String, -} - #[derive(Debug, Clone, Deserialize)] pub struct Cert { pub cert_pem: String, @@ -94,10 +77,6 @@ pub async fn read_k8s_infra(ctx: &ProjectContext) -> K8sInfra { read_plan::(ctx, "k8s_infra").await } -pub async fn read_pools(ctx: &ProjectContext) -> Pools { - read_plan::(ctx, "pools").await -} - pub async fn read_tls(ctx: &ProjectContext) -> Tls { read_plan::(ctx, "tls").await } diff --git a/lib/bolt/core/src/dep/terraform/pools.rs b/lib/bolt/core/src/dep/terraform/pools.rs deleted file mode 100644 index dfd8240e46..0000000000 --- a/lib/bolt/core/src/dep/terraform/pools.rs +++ /dev/null @@ -1,132 +0,0 @@ -// TODO: Move this file to a common place, since this isn't specific to Terraform - -use anyhow::Result; -use derive_builder::Builder; - -use ipnet::Ipv4AddrRange; -use serde::Serialize; -use std::collections::HashMap; - -use super::net; - -use crate::context::ProjectContext; - -#[derive(Serialize, Clone, Builder)] -#[builder(setter(into))] -pub struct Pool { - #[serde(skip)] - pub vlan_addr_range: Ipv4AddrRange, - - /// Volumes attached to this node. - #[builder(default)] - volumes: HashMap, - - /// Cloud-based firewall rules to apply to this node. - /// - /// Additional firewall rules are applied by Terraform depending on the use case. - #[builder(default)] - firewall_inbound: Vec, -} - -#[derive(Serialize, Clone)] -pub struct PoolVolume {} - -#[derive(Serialize, Clone, PartialEq, Eq, PartialOrd, Ord)] -pub struct FirewallRule { - label: String, - ports: String, - protocol: String, - inbound_ipv4_cidr: Vec, - inbound_ipv6_cidr: Vec, -} - -pub async fn build_pools(_ctx: &ProjectContext) -> Result> { - let mut pools = HashMap::::new(); - - pools.insert( - "gg".into(), - PoolBuilder::default() - .vlan_addr_range(net::gg::vlan_addr_range()) - .firewall_inbound(vec![ - // HTTP(S) - FirewallRule { - label: "http-tcp".into(), - ports: "80".into(), - protocol: "tcp".into(), - inbound_ipv4_cidr: vec!["0.0.0.0/0".into()], - inbound_ipv6_cidr: vec!["::/0".into()], - }, - FirewallRule { - label: "http-udp".into(), - ports: "80".into(), - protocol: "udp".into(), - inbound_ipv4_cidr: vec!["0.0.0.0/0".into()], - inbound_ipv6_cidr: vec!["::/0".into()], - }, - FirewallRule { - label: "https-tcp".into(), - ports: "443".into(), - protocol: "tcp".into(), - inbound_ipv4_cidr: vec!["0.0.0.0/0".into()], - inbound_ipv6_cidr: vec!["::/0".into()], - }, - FirewallRule { - label: "https-udp".into(), - ports: "443".into(), - protocol: "udp".into(), - inbound_ipv4_cidr: vec!["0.0.0.0/0".into()], - inbound_ipv6_cidr: vec!["::/0".into()], - }, - // Dynamic TCP - FirewallRule { - label: "dynamic-tcp".into(), - ports: "20000-31999".into(), - protocol: "tcp".into(), - inbound_ipv4_cidr: vec!["0.0.0.0/0".into()], - inbound_ipv6_cidr: vec!["::/0".into()], - }, - // Dynamic UDP - FirewallRule { - label: "dynamic-udp".into(), - ports: "20000-31999".into(), - protocol: "udp".into(), - inbound_ipv4_cidr: vec!["0.0.0.0/0".into()], - inbound_ipv6_cidr: vec!["::/0".into()], - }, - ]) - .build()?, - ); - - pools.insert( - "job".into(), - PoolBuilder::default() - .vlan_addr_range(net::job::vlan_addr_range()) - .firewall_inbound(vec![ - // Ports available to Nomad jobs using the host network - FirewallRule { - label: "nomad-host-tcp".into(), - ports: "26000-31999".into(), - protocol: "tcp".into(), - inbound_ipv4_cidr: vec!["0.0.0.0/0".into()], - inbound_ipv6_cidr: vec!["::/0".into()], - }, - FirewallRule { - label: "nomad-host-udp".into(), - ports: "26000-31999".into(), - protocol: "udp".into(), - inbound_ipv4_cidr: vec!["0.0.0.0/0".into()], - inbound_ipv6_cidr: vec!["::/0".into()], - }, - ]) - .build()?, - ); - - pools.insert( - "ats".into(), - PoolBuilder::default() - .vlan_addr_range(net::ats::vlan_addr_range()) - .build()?, - ); - - Ok(pools) -} diff --git a/lib/bolt/core/src/dep/terraform/regions.rs b/lib/bolt/core/src/dep/terraform/regions.rs deleted file mode 100644 index 2cd1c2db95..0000000000 --- a/lib/bolt/core/src/dep/terraform/regions.rs +++ /dev/null @@ -1,56 +0,0 @@ -use anyhow::*; - -use serde::Serialize; -use std::{collections::HashMap, net::Ipv4Addr}; - -use crate::context::ProjectContext; - -use super::net; - -#[derive(Serialize, Clone)] -pub struct Region { - // Unique UUID that represents this region. - id: String, - - // Name of the server provider to use. - // - // Current options: - // * digitalocean - // * linode - provider: String, - - // This is the name of the provider's region. - // * DigitalOcean: https://docs.digitalocean.com/products/platform/availability-matrix/ - // * Linode: linode-cli regions list - provider_region: String, - - pub netnum: usize, - - vlan: RegionVlan, -} - -#[derive(Serialize, Clone)] -pub struct RegionVlan { - address: Ipv4Addr, - prefix_len: u8, -} - -pub fn build_regions(ctx: &ProjectContext) -> Result> { - let mut regions = HashMap::new(); - for (region_id, region) in &ctx.ns().regions { - regions.insert( - region_id.clone(), - Region { - id: region.id.clone(), - provider: region.provider.clone(), - provider_region: region.provider_region.clone(), - netnum: region.netnum, - vlan: RegionVlan { - address: net::region::vlan_ip_net().addr(), - prefix_len: net::region::vlan_ip_net().prefix_len(), - }, - }, - ); - } - Ok(regions) -} diff --git a/lib/bolt/core/src/dep/terraform/remote_states.rs b/lib/bolt/core/src/dep/terraform/remote_states.rs index bc53c70362..be58853750 100644 --- a/lib/bolt/core/src/dep/terraform/remote_states.rs +++ b/lib/bolt/core/src/dep/terraform/remote_states.rs @@ -10,7 +10,9 @@ use crate::context::ProjectContext; /// for each Terraform plan with the correct state backend. pub fn dependency_graph(_ctx: &ProjectContext) -> HashMap<&'static str, Vec> { hashmap! { - "dns" => vec![RemoteStateBuilder::default().plan_id("pools").build().unwrap(), RemoteStateBuilder::default().plan_id("k8s_infra").build().unwrap()], + "dns" => vec![ + RemoteStateBuilder::default().plan_id("k8s_infra").build().unwrap() + ], "redis_aiven" => vec![ RemoteStateBuilder::default().plan_id("k8s_cluster_aws").build().unwrap() ], diff --git a/lib/bolt/core/src/dep/terraform/servers.rs b/lib/bolt/core/src/dep/terraform/servers.rs deleted file mode 100644 index d8aa0d214c..0000000000 --- a/lib/bolt/core/src/dep/terraform/servers.rs +++ /dev/null @@ -1,88 +0,0 @@ -use anyhow::*; -use serde::Serialize; -use std::{collections::HashMap, net::Ipv4Addr}; - -use crate::context::ProjectContext; - -use super::{pools::Pool, regions::Region}; - -#[derive(Serialize, Clone)] -pub struct Server { - pub region_id: String, - pub pool_id: String, - pub version_id: String, - pub index: usize, - pub name: String, - pub size: String, - pub vlan_ip: Ipv4Addr, - pub volumes: HashMap, - pub tags: Vec, -} - -#[derive(Serialize, Clone)] -pub struct ServerVolume { - size: usize, -} - -pub fn build_servers( - ctx: &ProjectContext, - regions: &HashMap, - pools: &HashMap, -) -> Result> { - let ns = ctx.ns_id(); - - // HACK: Linode requires tags to be > 3 characters. We extend the namespace to make sure it - // meets the minimum length requirement. - let ns_long = format!("rivet-{ns}"); - - let mut servers = HashMap::::new(); - for pool in &ctx.ns().pools { - let region_id = &pool.region; - let pool_id = &pool.pool; - let version_id = &pool.version; - - let _region = regions - .get(region_id) - .expect(&format!("missing region: {region_id}")); - let pool_config = pools - .get(pool_id.as_str()) - .expect(&format!("missing pool: {pool_id}")); - - for i in 0..pool.count { - let name = format!("{ns}-{region_id}-{pool_id}-{version_id}-{i}"); - - let volumes = pool - .volumes - .iter() - .map(|(id, volume)| (id.clone(), ServerVolume { size: volume.size })) - .collect::>(); - - let vlan_ip = pool_config.vlan_addr_range.clone().nth(i).unwrap(); - - let server = Server { - region_id: region_id.clone(), - pool_id: pool_id.clone(), - version_id: version_id.clone(), - index: i, - name: name.clone(), - size: pool.size.clone(), - vlan_ip, - volumes, - - // Tags that will be assigned to the servers. - tags: vec![ - ns_long.clone(), - format!("{ns}-{region_id}"), - format!("{ns}-{pool_id}"), - format!("{ns}-{pool_id}-{version_id}"), - format!("{ns}-{region_id}-{pool_id}"), - format!("{ns}-{region_id}-{pool_id}-{version_id}"), - ], - }; - - servers.insert(name.to_string(), server); - } - } - - Ok(servers) -} diff --git a/lib/bolt/core/src/tasks/api.rs b/lib/bolt/core/src/tasks/api.rs index 0ecef8bfe2..329ac72a83 100644 --- a/lib/bolt/core/src/tasks/api.rs +++ b/lib/bolt/core/src/tasks/api.rs @@ -1,86 +1,84 @@ use anyhow::*; +use serde::Deserialize; use serde_json::json; -use uuid::Uuid; use crate::context::ProjectContext; -/// Converts a team into a developer team via the Rivet API. -pub async fn convert_team(project_ctx: &ProjectContext, team_id: String) -> Result<()> { - if let Err(err) = Uuid::parse_str(&team_id) { - bail!("failed to parse uuid: {}", err); - } - - eprintln!(); - rivet_term::status::progress("Converting team", &team_id); +/// Creates a login link for the hub. +pub async fn access_token_login(project_ctx: &ProjectContext, name: String) -> Result<()> { + rivet_term::status::progress("Logging in as", &name); let api_admin_token = project_ctx .read_secret(&["rivet", "api_admin", "token"]) .await?; let response = reqwest::Client::new() - .post(format!( - "{}/admin/groups/{}/developer", - project_ctx.origin_api(), - team_id, - )) + .post(format!("{}/admin/login", project_ctx.origin_api(),)) .header( reqwest::header::AUTHORIZATION, reqwest::header::HeaderValue::from_str(&format!("Bearer {api_admin_token}"))?, ) - .json(&json!({})) + .json(&json!({ + "name": name, + })) .send() .await?; if !response.status().is_success() { bail!( - "failed to convert team ({}):\n{:#?}", + "failed to login ({}):\n{:#?}", response.status().as_u16(), response.json::().await? ); } + let body = response.json::().await?; + let url = body + .get("url") + .expect("url in login body") + .as_str() + .unwrap(); + eprintln!(); - rivet_term::status::success("Converted", ""); + rivet_term::status::success("Login with this url", ""); + eprintln!("{url}"); Ok(()) } -/// Creates a login link for the hub. -pub async fn access_token_login(project_ctx: &ProjectContext, name: String) -> Result<()> { - rivet_term::status::progress("Logging in as", &name); +#[derive(Deserialize)] +struct GetServerIpsResponse { + ips: Vec, +} +pub async fn get_cluster_server_ips( + project_ctx: &ProjectContext, + query: (&str, &str), +) -> Result> { let api_admin_token = project_ctx .read_secret(&["rivet", "api_admin", "token"]) .await?; let response = reqwest::Client::new() - .post(format!("{}/admin/login", project_ctx.origin_api(),)) + .get(format!( + "{}/admin/cluster/server_ips", + project_ctx.origin_api(), + )) + .query(&[query]) .header( reqwest::header::AUTHORIZATION, reqwest::header::HeaderValue::from_str(&format!("Bearer {api_admin_token}"))?, ) - .json(&json!({ - "name": name, - })) .send() .await?; if !response.status().is_success() { bail!( - "failed to login ({}):\n{:#?}", + "failed to get server ips ({}):\n{:#?}", response.status().as_u16(), response.json::().await? ); } - let body = response.json::().await?; - let url = body - .get("url") - .expect("url in login body") - .as_str() - .unwrap(); + let res = response.json::().await?; - eprintln!(); - rivet_term::status::success("Login with this url", ""); - eprintln!("{url}"); - - Ok(()) + Ok(res.ips) } diff --git a/lib/bolt/core/src/tasks/config/generate_default_regions.rs b/lib/bolt/core/src/tasks/config/generate_default_regions.rs deleted file mode 100644 index b42c562103..0000000000 --- a/lib/bolt/core/src/tasks/config/generate_default_regions.rs +++ /dev/null @@ -1,116 +0,0 @@ -use anyhow::*; -use serde::Deserialize; -use tokio::fs; -use toml_edit::value; -use uuid::Uuid; - -use crate::context; - -struct UniversalRegion { - provider: String, - provider_region: String, - debug: String, -} - -/// Fetches all regions from the supported cloud providers and adds missing -/// regions to the `lib/bolt/config/default_regions.toml` file. -pub async fn generate_default_regions() -> Result<()> { - let term = rivet_term::terminal(); - - let project_root = context::ProjectContextData::seek_project_root().await; - let default_regions_path = project_root.join("lib/bolt/config/default_regions.toml"); - - let toml = fs::read_to_string(&default_regions_path).await?; - let mut doc = toml.parse::()?; - - // Insert missing regions - let universal_regions = fetch_linode_universal_regions().await?; - 'outer: for region in universal_regions { - // Check if the region already exists - for (_, v) in doc.as_table() { - if v["provider"].as_str() == Some(region.provider.as_str()) - && v["provider_region"].as_str() == Some(region.provider_region.as_str()) - { - continue 'outer; - } - } - - // Prompt for new region - let name_id = rivet_term::prompt::PromptBuilder::default() - .message(format!("{}:{}", region.provider, region.provider_region)) - .docs(region.debug) - .build()? - .string(&term) - .await?; - - let netnum = find_max_netnum(&doc) + 1; - - let mut table = toml_edit::table(); - table["id"] = value(Uuid::new_v4().to_string()); - table["provider"] = value(®ion.provider); - table["provider_region"] = value(®ion.provider_region); - table["netnum"] = value(netnum); - doc.as_table_mut()[&name_id] = table; - - rivet_term::status::info("Added region", ""); - - // Save to file after each step - fs::write(&default_regions_path, doc.to_string()).await?; - } - - Ok(()) -} - -async fn fetch_linode_universal_regions() -> Result> { - #[derive(Deserialize, Debug)] - #[allow(unused)] - struct Region { - id: String, - label: String, - } - - #[derive(Deserialize, Debug)] - struct LinodeRegionsResponse { - data: Vec, - } - - // Build client - let linode_token = std::env::var("RIVET_LINODE_TOKEN").context("missing RIVET_LINODE_TOKEN")?; - let mut headers = reqwest::header::HeaderMap::new(); - headers.insert( - reqwest::header::AUTHORIZATION, - reqwest::header::HeaderValue::from_str(&format!("Bearer {linode_token}"))?, - ); - let client = reqwest::Client::builder() - .default_headers(headers) - .build()?; - - // Fetch regions - let resp = client - .get("https://api.linode.com/v4/regions") - .send() - .await?; - let regions: LinodeRegionsResponse = resp.json().await?; - - // Convert to universal regions - let universal_regions = regions - .data - .into_iter() - .map(|x| UniversalRegion { - debug: format!("{x:?}"), - provider: "linode".into(), - provider_region: x.id, - }) - .collect::>(); - - Ok(universal_regions) -} - -/// Finds the highest netnum in the regions configs. -fn find_max_netnum(doc: &toml_edit::Document) -> i64 { - doc.as_table() - .iter() - .map(|(_, v)| v["netnum"].as_integer().unwrap_or(0)) - .max() - .unwrap_or(0) -} diff --git a/lib/bolt/core/src/tasks/config/mod.rs b/lib/bolt/core/src/tasks/config/mod.rs index 0c2aca58b7..3a59a3b804 100644 --- a/lib/bolt/core/src/tasks/config/mod.rs +++ b/lib/bolt/core/src/tasks/config/mod.rs @@ -5,10 +5,7 @@ use toml_edit::value; use crate::context; mod generate; -mod generate_default_regions; - pub use generate::{generate, ConfigGenerator}; -pub use generate_default_regions::generate_default_regions; /// Updates the namespace in `Bolt.local.toml`. pub async fn set_namespace(namespace: &str) -> Result<()> { diff --git a/lib/bolt/core/src/tasks/gen.rs b/lib/bolt/core/src/tasks/gen.rs index 3f40bd8179..055e08b62f 100644 --- a/lib/bolt/core/src/tasks/gen.rs +++ b/lib/bolt/core/src/tasks/gen.rs @@ -189,7 +189,7 @@ fn update_libs<'a>(lib_path: &'a Path) -> BoxFuture<'a, ()> { async move { let mut lib_dir = fs::read_dir(lib_path).await.unwrap(); while let Some(entry) = lib_dir.next_entry().await.unwrap() { - if !entry.metadata().await.unwrap().is_dir() { + if !entry.metadata().await.unwrap().is_dir() || entry.file_name() == "nomad-client" { continue; } @@ -210,7 +210,9 @@ fn update_libs<'a>(lib_path: &'a Path) -> BoxFuture<'a, ()> { } async fn set_license(path: &Path) { - let toml = fs::read_to_string(path).await.unwrap(); + let toml = fs::read_to_string(path) + .await + .expect(&format!("could not read path: {}", path.display())); let mut doc = toml.parse::().unwrap(); let mut array = toml_edit::Array::new(); diff --git a/lib/bolt/core/src/tasks/infra/mod.rs b/lib/bolt/core/src/tasks/infra/mod.rs index c22f19eb1e..8925e12433 100644 --- a/lib/bolt/core/src/tasks/infra/mod.rs +++ b/lib/bolt/core/src/tasks/infra/mod.rs @@ -230,17 +230,6 @@ pub fn build_plan( }, }); - // Pools - if ctx.ns().dns.is_some() { - plan.push(PlanStep { - name_id: "pools", - kind: PlanStepKind::Terraform { - plan_id: "pools".into(), - needs_destroy: true, - }, - }); - } - if let Some(dns) = &ctx.ns().dns { // TODO: Allow manual DNS config diff --git a/lib/bolt/core/src/tasks/ssh.rs b/lib/bolt/core/src/tasks/ssh.rs index 08e3519abc..148446962b 100644 --- a/lib/bolt/core/src/tasks/ssh.rs +++ b/lib/bolt/core/src/tasks/ssh.rs @@ -1,11 +1,12 @@ +use std::{io::Write, os::unix::fs::PermissionsExt}; +use std::{path::Path, sync::Arc}; + use anyhow::*; use duct::cmd; use futures_util::StreamExt; -use std::{io::Write, os::unix::fs::PermissionsExt}; -use std::{path::Path, sync::Arc}; use tokio::task::block_in_place; -use crate::{context::ProjectContext, dep::terraform}; +use crate::{context::ProjectContext, tasks}; pub struct TempSshKey { tempfile: tempfile::NamedTempFile, @@ -54,70 +55,47 @@ pub async fn ip( Ok(()) } -pub async fn name(ctx: &ProjectContext, name: &str, command: Option<&str>) -> Result<()> { - let tf_pools = terraform::output::read_pools(ctx).await; - let server = tf_pools - .servers - .get(name) - .context("failed to find server with name")?; +pub async fn id(ctx: &ProjectContext, server_id: &str, command: Option<&str>) -> Result<()> { + let server_ips = tasks::api::get_cluster_server_ips(&ctx, ("server_id", server_id)).await?; + let server_ip = server_ips + .first() + .context(format!("failed to find server with server id {server_id}"))?; // TODO: Choose correct SSH key let ssh_key = TempSshKey::new(&ctx, "server").await?; - ip(ctx, &server.public_ipv4, &ssh_key, command).await?; + ip(ctx, &server_ip, &ssh_key, command).await?; Ok(()) } -pub async fn pool( - ctx: &ProjectContext, - pool: &str, - region: Option<&str>, - command: Option<&str>, -) -> Result<()> { - // Choose IP - let tf_pools = terraform::output::read_pools(&ctx).await; - let server = tf_pools - .servers - .value - .into_iter() - .map(|x| x.1) - .find(|x| x.pool_id == pool && region.map(|r| &x.region_id == r).unwrap_or(true)) - .expect("failed to find server pool"); +pub async fn pool(ctx: &ProjectContext, pool: &str, command: Option<&str>) -> Result<()> { + let server_ips = tasks::api::get_cluster_server_ips(&ctx, ("pool", pool)).await?; + let server_ip = server_ips + .first() + .context(format!("failed to find server with pool {pool}"))?; let ssh_key = TempSshKey::new(&ctx, "server").await?; - ip(ctx, &server.public_ipv4, &ssh_key, command).await?; + ip(ctx, &server_ip, &ssh_key, command).await?; Ok(()) } -pub async fn pool_all( - ctx: &ProjectContext, - pool: &str, - region: Option<&str>, - command: &str, -) -> Result<()> { +pub async fn pool_all(ctx: &ProjectContext, pool: &str, command: &str) -> Result<()> { + let server_ips = tasks::api::get_cluster_server_ips(&ctx, ("pool", pool)).await?; let ssh_key = Arc::new(TempSshKey::new(&ctx, "server").await?); - let tf_pools = terraform::output::read_pools(&ctx).await; - - futures_util::stream::iter( - tf_pools - .servers - .value - .into_iter() - .filter(|x| x.1.pool_id == pool && region.map(|r| &x.1.region_id == r).unwrap_or(true)), - ) - .map(|(name, server)| { - let ctx = ctx.clone(); - let ssh_key = ssh_key.clone(); - async move { - let res = ip(&ctx, &server.public_ipv4, &ssh_key, Some(command)).await; - println!("{name}: {res:?}"); - } - }) - .buffer_unordered(32) - .collect::>() - .await; + futures_util::stream::iter(server_ips) + .map(|server_ip| { + let ctx = ctx.clone(); + let ssh_key = ssh_key.clone(); + async move { + let res = ip(&ctx, &server_ip, &ssh_key, Some(command)).await; + println!("{res:?}"); + } + }) + .buffer_unordered(32) + .collect::>() + .await; Ok(()) } diff --git a/lib/bolt/core/src/tasks/template.rs b/lib/bolt/core/src/tasks/template.rs index d69f425256..b8d500a870 100644 --- a/lib/bolt/core/src/tasks/template.rs +++ b/lib/bolt/core/src/tasks/template.rs @@ -5,7 +5,7 @@ use std::{ use anyhow::*; use async_recursion::async_recursion; -use tokio::{fs, io::AsyncWriteExt}; +use tokio::fs; use crate::context::ProjectContext; @@ -68,19 +68,50 @@ pub async fn generate(ctx: &mut ProjectContext, opts: TemplateOpts) -> Result<() .await .is_err() { - eprintln!("{}", base_path.display()); - let relative_path = base_path - .strip_prefix(ctx.path()) - .expect("strip path") - .display(); - bail!( - "Package `{}` does not exist at `{}`. Use `--create-pkg` to suppress this message.", + "Package `{}` does not exist under the `{}` root. Try a different root, create the package folder yourself, or use `--create-pkg` to automatically create it.", pkg_name, - relative_path + base_path.display(), ); } + // Touch types lib to force it to rebuild generated proto when making a new package + if create_pkg { + let lib_file = base_path + .join("lib") + .join("types") + .join("build") + .join("src") + .join("lib.rs"); + let alt_lib_file = base_path + .join("lib") + .join("types") + .join("src") + .join("lib.rs"); + + if let Err(err) = fs::OpenOptions::new() + .create(true) + .write(true) + .open(lib_file) + .await + { + if !matches!(err.kind(), std::io::ErrorKind::NotFound) { + return Err(err.into()); + } + } + + if let Err(err) = fs::OpenOptions::new() + .create(true) + .write(true) + .open(alt_lib_file) + .await + { + if !matches!(err.kind(), std::io::ErrorKind::NotFound) { + return Err(err.into()); + } + } + } + // Build templating manager let mut hb = handlebars::Handlebars::new(); hb.register_helper("snake", Box::new(handlebars_helpers::snake)); @@ -278,37 +309,20 @@ async fn generate_worker_partial( { let worker_mod_path = output_path.join("src").join("workers").join("mod.rs"); rivet_term::status::progress("Editing", worker_mod_path.display()); - let mut worker_mod = fs::OpenOptions::new() - .write(true) - .append(true) - .open(worker_mod_path) - .await?; + let worker_mod_str = fs::read_to_string(&worker_mod_path).await?; - worker_mod - .write(format!("pub mod {};\n", snake_name).as_str().as_bytes()) - .await?; - } - - // Update main.rs - { - let main_path = output_path.join("src").join("main.rs"); - rivet_term::status::progress("Editing", main_path.display()); - let main_str = fs::read_to_string(&main_path).await?; - - // Find place to insert text - let insert_idx = main_str - .find("worker_group!") - .and_then(|idx| (&main_str[idx..]).find(']').map(|idx2| idx + idx2)) - .ok_or_else(|| anyhow!("Invalid main.rs file in worker: {}", main_path.display()))? - - 3; + let Some(bracket_idx) = worker_mod_str.find("[") else { + bail!("malformed mod.rs file"); + }; fs::write( - main_path, + worker_mod_path, &format!( - "{}\n\t\t\t{},{}", - &main_str[..insert_idx], + "pub mod {};\n{}\n\t{},{}", + snake_name, + &worker_mod_str[..bracket_idx + 1], snake_name, - &main_str[insert_idx..] + &worker_mod_str[bracket_idx + 1..] ), ) .await?; diff --git a/lib/bolt/core/src/tasks/test.rs b/lib/bolt/core/src/tasks/test.rs index 0bd525aa90..d1d1b93d44 100644 --- a/lib/bolt/core/src/tasks/test.rs +++ b/lib/bolt/core/src/tasks/test.rs @@ -1,7 +1,11 @@ +use std::fmt; + use anyhow::*; use futures_util::{StreamExt, TryStreamExt}; use rand::{seq::SliceRandom, thread_rng}; +use reqwest::header; use rivet_term::console::style; +use serde::Deserialize; use indoc::formatdoc; use std::{ @@ -179,39 +183,8 @@ pub async fn test_services>( // Print results print_results(&test_results); - // Cleanup jobs - { - eprintln!(); - rivet_term::status::progress("Cleaning up jobs", ""); - - let purge = if test_ctx.no_purge { "" } else { "-purge" }; - let cleanup_cmd = formatdoc!( - r#" - nomad job status | - grep -v -e "ID" -e "No running jobs" | - cut -f1 -d ' ' | - xargs -I {{}} nomad job stop {purge} -detach {{}} - "# - ); - - let mut cmd = Command::new("kubectl"); - cmd.args(&[ - "exec", - "service/nomad-server", - "-n", - "nomad", - "--container", - "nomad-instance", - "--", - "sh", - "-c", - &cleanup_cmd, - ]); - cmd.env("KUBECONFIG", ctx.gen_kubeconfig_path()); - - let status = cmd.status().await?; - ensure!(status.success()); - } + cleanup_jobs(ctx, test_ctx.no_purge).await?; + cleanup_servers(ctx).await?; // Error on failure let all_succeeded = test_results @@ -519,6 +492,171 @@ fn print_results(test_results: &[TestResult]) { } } +async fn cleanup_jobs(ctx: &ProjectContext, no_purge: bool) -> Result<()> { + eprintln!(); + rivet_term::status::progress("Cleaning up jobs", ""); + + let purge = if no_purge { "" } else { "-purge" }; + let cleanup_cmd = formatdoc!( + r#" + nomad job status | + grep -v -e "ID" -e "No running jobs" | + cut -f1 -d ' ' | + xargs -I {{}} nomad job stop {purge} -detach {{}} + "# + ); + + let mut cmd = Command::new("kubectl"); + cmd.args(&[ + "exec", + "service/nomad-server", + "-n", + "nomad", + "--container", + "nomad-instance", + "--", + "sh", + "-c", + &cleanup_cmd, + ]); + cmd.env("KUBECONFIG", ctx.gen_kubeconfig_path()); + + let status = cmd.status().await?; + ensure!(status.success()); + + Ok(()) +} + +#[derive(Deserialize)] +struct ApiErrorResponse { + errors: Vec, +} + +impl fmt::Display for ApiErrorResponse { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + for error in &self.errors { + if let Some(field) = &error.field { + write!(f, "{:?}: ", field)?; + } + + writeln!(f, "{}", error.reason)?; + } + + std::result::Result::Ok(()) + } +} + +#[derive(Deserialize)] +struct ApiError { + field: Option, + reason: String, +} + +#[derive(Debug, Deserialize)] +struct TaggedObjectsListResponse { + data: Vec, +} + +#[derive(Debug, Deserialize)] +struct TaggedObject { + #[serde(rename = "type")] + _type: String, + data: Data, +} + +#[derive(Debug, Deserialize)] +struct Data { + id: u64, +} + +// TODO: This only deletes linodes and firewalls, the ssh key still remains +async fn cleanup_servers(ctx: &ProjectContext) -> Result<()> { + eprintln!(); + rivet_term::status::progress("Cleaning up servers", ""); + + // Create client + let api_token = ctx.read_secret(&["linode", "token"]).await?; + let auth = format!("Bearer {}", api_token,); + let mut headers = header::HeaderMap::new(); + headers.insert(header::AUTHORIZATION, header::HeaderValue::from_str(&auth)?); + let client = reqwest::Client::builder() + .default_headers(headers) + .build()?; + + // Fetch all objects with the tag "test" + let test_tag = "test"; + let res = client + .get(format!("https://api.linode.com/v4/tags/{test_tag}")) + .send() + .await?; + + if !res.status().is_success() { + bail!( + "api request failed ({}):\n{}", + res.status(), + res.json::().await? + ); + } + + // Deserialize + let res = res.json::().await?; + + futures_util::stream::iter(res.data) + .map(|object| { + let client = client.clone(); + let obj_type = object._type; + let id = object.data.id; + + async move { + eprintln!("destroying {} {}", obj_type, id); + + // Destroy resource + let res = match obj_type.as_str() { + "linode" => { + client + .delete(format!("https://api.linode.com/v4/linode/instances/{}", id)) + .send() + .await? + } + "firewall" => { + client + .delete(format!( + "https://api.linode.com/v4/networking/firewalls/{}", + id + )) + .send() + .await? + } + _ => { + eprintln!("unknown type tagged with \"test\": {}", obj_type); + return Ok(()); + } + }; + + if !res.status().is_success() { + // Resource does not exist to be deleted, not an error + if res.status() == reqwest::StatusCode::NOT_FOUND { + eprintln!("{} {} doesn't exist, skipping", obj_type, id); + return Ok(()); + } + + bail!( + "api request failed ({}):\n{}", + res.status(), + res.json::().await? + ); + } + + Ok(()) + } + }) + .buffer_unordered(8) + .try_collect::>() + .await?; + + Ok(()) +} + fn gen_test_id() -> String { let mut rng = thread_rng(); (0..8) diff --git a/lib/cache/build/src/req_config.rs b/lib/cache/build/src/req_config.rs index adde31e7c0..629db53c1f 100644 --- a/lib/cache/build/src/req_config.rs +++ b/lib/cache/build/src/req_config.rs @@ -43,13 +43,13 @@ impl RequestConfig { /// Sets the TTL for the keys in ms. /// - /// Defaults to 2 days. + /// Defaults to 2 hours. pub fn ttl(mut self, ttl: i64) -> Self { self.ttl = ttl; self } - /// Deterines if the value for this key can change. If the value is immutable, we apply more + /// Determines if the value for this key can change. If the value is immutable, we apply more /// aggressive caching rules to it. pub fn immutable(mut self) -> Self { self.immutable = true; @@ -622,7 +622,7 @@ impl RequestConfig { { self.fetch_all_proto_with_keys::(base_key, keys, getter) .await - // TODO: Find a way to not allowcate another vec here + // TODO: Find a way to not allocate another vec here .map(|x| x.into_iter().map(|(_, v)| v).collect::>()) } diff --git a/lib/chirp/worker/src/test.rs b/lib/chirp/worker/src/test.rs index 010afad424..044a0c5b0b 100644 --- a/lib/chirp/worker/src/test.rs +++ b/lib/chirp/worker/src/test.rs @@ -8,6 +8,7 @@ use crate::error::ManagerError; #[derive(Clone)] pub struct TestCtx { + name: String, op_ctx: OperationContext<()>, } @@ -27,7 +28,7 @@ impl TestCtx { .wrap_new(&service_name); let conn = rivet_connection::Connection::new(client, pools, cache); let op_ctx = OperationContext::new( - service_name, + service_name.clone(), Duration::from_secs(60), conn, Uuid::new_v4(), @@ -38,13 +39,13 @@ impl TestCtx { Vec::new(), ); - Ok(TestCtx { op_ctx }) + Ok(TestCtx { name: service_name, op_ctx }) } } impl TestCtx { pub fn name(&self) -> &str { - self.op_ctx.name() + &self.name } pub fn chirp(&self) -> &chirp_client::Client { diff --git a/lib/claims/src/lib.rs b/lib/claims/src/lib.rs index 5e2bc2feb1..124b53246b 100644 --- a/lib/claims/src/lib.rs +++ b/lib/claims/src/lib.rs @@ -333,6 +333,17 @@ pub mod ent { }) } } + + #[derive(Clone, Debug)] + pub struct Server {} + + impl TryFrom<&schema::entitlement::Server> for Server { + type Error = GlobalError; + + fn try_from(_value: &schema::entitlement::Server) -> GlobalResult { + Ok(Server {}) + } + } } pub trait ClaimsDecode { @@ -356,6 +367,7 @@ pub trait ClaimsDecode { fn as_cloud_device_link(&self) -> GlobalResult; fn as_bypass(&self) -> GlobalResult; fn as_access_token(&self) -> GlobalResult; + fn as_server(&self) -> GlobalResult; } impl ClaimsDecode for schema::Claims { @@ -606,6 +618,20 @@ impl ClaimsDecode for schema::Claims { .and_then(std::convert::identity) } + fn as_server(&self) -> GlobalResult { + self.entitlements + .iter() + .find_map(|ent| match &ent.kind { + Some(schema::entitlement::Kind::Server(ent)) => Some(ent::Server::try_from(ent)), + _ => None, + }) + .ok_or(err_code!( + CLAIMS_MISSING_ENTITLEMENT, + entitlement = "Server" + )) + .and_then(std::convert::identity) + } + fn as_access_token(&self) -> GlobalResult { self.entitlements .iter() @@ -647,6 +673,7 @@ impl EntitlementTag for schema::Entitlement { schema::entitlement::Kind::CloudDeviceLink(_) => 14, schema::entitlement::Kind::Bypass(_) => 15, schema::entitlement::Kind::AccessToken(_) => 16, + schema::entitlement::Kind::Server(_) => 17, }) } } diff --git a/lib/convert/src/convert/game.rs b/lib/convert/src/convert/game.rs index 33dbcf8af4..690b2b6e54 100644 --- a/lib/convert/src/convert/game.rs +++ b/lib/convert/src/convert/game.rs @@ -2,7 +2,7 @@ use rivet_api::models; use rivet_operation::prelude::*; use types::rivet::backend; -use crate::{convert, fetch, ApiInto, ApiTryInto}; +use crate::{convert, fetch, ApiTryInto}; pub fn handle(game: &backend::game::Game) -> GlobalResult { Ok(models::GameHandle { @@ -45,10 +45,6 @@ pub fn region_summary( region_id: unwrap_ref!(region.region_id).as_uuid(), region_name_id: region.name_id.clone(), provider: region.provider.clone(), - universal_region: unwrap!(backend::region::UniversalRegion::from_i32( - region.universal_region - )) - .api_into(), provider_display_name: region.provider_display_name.clone(), region_display_name: region.region_display_name.clone(), }) diff --git a/lib/convert/src/fetch/identity.rs b/lib/convert/src/fetch/identity.rs index cbb55029e5..a9d2cbb917 100644 --- a/lib/convert/src/fetch/identity.rs +++ b/lib/convert/src/fetch/identity.rs @@ -1,4 +1,4 @@ -use std::collections::{HashMap, HashSet}; +use std::collections::HashSet; use proto::{ backend::{self, pkg::*}, @@ -7,7 +7,7 @@ use proto::{ use rivet_api::models; use rivet_operation::prelude::*; -use crate::{convert, fetch}; +use crate::convert; #[derive(Debug)] pub struct TeamsCtx { @@ -39,7 +39,7 @@ pub async fn handles( let (users, presences_ctx, mutual_follows) = tokio::try_join!( users(ctx, user_ids.clone()), - presence_data(ctx, current_user_id, user_ids.clone(), false), + presence_data(ctx, user_ids.clone(), false), mutual_follows(ctx, current_user_id, raw_user_ids), )?; @@ -78,7 +78,7 @@ pub async fn summaries( let (users, presences_ctx, mutual_follows) = tokio::try_join!( users(ctx, user_ids.clone()), - presence_data(ctx, current_user_id, user_ids.clone(), false), + presence_data(ctx, user_ids.clone(), false), mutual_follows(ctx, current_user_id, raw_user_ids), )?; @@ -127,7 +127,7 @@ pub async fn profiles( self_is_game_linked, ) = tokio::try_join!( users(ctx, user_ids.clone()), - presence_data(ctx, current_user_id, user_ids.clone(), true), + presence_data(ctx, user_ids.clone(), true), teams(ctx, user_ids.clone()), mutual_follows(ctx, current_user_id, raw_user_ids), follows(ctx, user_ids.clone()), @@ -170,7 +170,6 @@ pub async fn users( pub async fn presence_data( ctx: &OperationContext<()>, - current_user_id: Uuid, user_ids: Vec, summary_info: bool, ) -> GlobalResult { diff --git a/lib/convert/src/impls/admin.rs b/lib/convert/src/impls/admin.rs new file mode 100644 index 0000000000..797ea6ddba --- /dev/null +++ b/lib/convert/src/impls/admin.rs @@ -0,0 +1,15 @@ +use proto::backend; +use rivet_operation::prelude::*; +use rivet_api::models; + +use crate::ApiFrom; + +impl ApiFrom for backend::cluster::PoolType { + fn api_from(value: models::AdminPoolType) -> backend::cluster::PoolType { + match value { + models::AdminPoolType::Job => backend::cluster::PoolType::Job, + models::AdminPoolType::Gg => backend::cluster::PoolType::Gg, + models::AdminPoolType::Ats => backend::cluster::PoolType::Ats, + } + } +} diff --git a/lib/convert/src/impls/cloud/mod.rs b/lib/convert/src/impls/cloud/mod.rs index fde4bf5dfe..4494f329b4 100644 --- a/lib/convert/src/impls/cloud/mod.rs +++ b/lib/convert/src/impls/cloud/mod.rs @@ -228,40 +228,3 @@ impl ApiTryFrom for models::CloudRegionTier { }) } } - -impl ApiFrom for models::CloudUniversalRegion { - fn api_from(value: backend::region::UniversalRegion) -> Self { - use backend::region::UniversalRegion::*; - - match value { - Unknown => models::CloudUniversalRegion::Unknown, - Local => models::CloudUniversalRegion::Local, - Amsterdam => models::CloudUniversalRegion::Amsterdam, - Atlanta => models::CloudUniversalRegion::Atlanta, - Bangalore => models::CloudUniversalRegion::Bangalore, - Dallas => models::CloudUniversalRegion::Dallas, - Frankfurt => models::CloudUniversalRegion::Frankfurt, - London => models::CloudUniversalRegion::London, - Mumbai => models::CloudUniversalRegion::Mumbai, - Newark => models::CloudUniversalRegion::Newark, - NewYorkCity => models::CloudUniversalRegion::NewYorkCity, - SanFrancisco => models::CloudUniversalRegion::SanFrancisco, - Singapore => models::CloudUniversalRegion::Singapore, - Sydney => models::CloudUniversalRegion::Sydney, - Tokyo => models::CloudUniversalRegion::Tokyo, - Toronto => models::CloudUniversalRegion::Toronto, - WashingtonDc => models::CloudUniversalRegion::WashingtonDc, - Chicago => models::CloudUniversalRegion::Chicago, - Paris => models::CloudUniversalRegion::Paris, - Seattle => models::CloudUniversalRegion::Seattle, - SaoPaulo => models::CloudUniversalRegion::SaoPaulo, - Stockholm => models::CloudUniversalRegion::Stockholm, - Chennai => models::CloudUniversalRegion::Chennai, - Osaka => models::CloudUniversalRegion::Osaka, - Milan => models::CloudUniversalRegion::Milan, - Miami => models::CloudUniversalRegion::Miami, - Jakarta => models::CloudUniversalRegion::Jakarta, - LosAngeles => models::CloudUniversalRegion::LosAngeles, - } - } -} diff --git a/lib/convert/src/impls/mod.rs b/lib/convert/src/impls/mod.rs index 5faad8a20c..e9c9adf8a1 100644 --- a/lib/convert/src/impls/mod.rs +++ b/lib/convert/src/impls/mod.rs @@ -1,3 +1,4 @@ +pub mod admin; pub mod api; pub mod cloud; pub mod group; diff --git a/lib/formatted-error/src/utils.rs b/lib/formatted-error/src/utils.rs index 6435443a6b..ff7790ff85 100644 --- a/lib/formatted-error/src/utils.rs +++ b/lib/formatted-error/src/utils.rs @@ -11,7 +11,7 @@ pub(crate) fn render_template(template: &'static str, context: &HashMap Result { - let nomad_url = - std::env::var("NOMAD_URL").map_err(|_| NomadError::MissingEnvVar("NOMAD_URL".into()))?; - let config = nomad_client::apis::configuration::Configuration { + let nomad_url = std::env::var("NOMAD_URL") + .map_err(|_| NomadError::MissingEnvVar("NOMAD_URL".into()))?; + let config = Configuration { + base_path: format!("{}/v1", nomad_url), + ..Default::default() + }; + + Ok(config) +} + +pub fn new_config_from_env() -> Result { + let nomad_url = std::env::var("NOMAD_URL") + .map_err(|_| NomadError::MissingEnvVar("NOMAD_URL".into()))?; + let config = nomad_client_new::apis::configuration::Configuration { base_path: format!("{}/v1", nomad_url), ..Default::default() }; diff --git a/lib/nomad-util/src/monitor.rs b/lib/nomad-util/src/monitor.rs index 247f18ff10..0a931c1ce0 100644 --- a/lib/nomad-util/src/monitor.rs +++ b/lib/nomad-util/src/monitor.rs @@ -1,9 +1,10 @@ +use std::{future::Future, pin::Pin}; + use futures_util::{Stream, StreamExt}; -use nomad_client::apis::configuration::Configuration; +use nomad_client_new::apis::configuration::Configuration; use redis::AsyncCommands; use rivet_pools::prelude::*; use serde::{de::DeserializeOwned, Deserialize}; -use std::{future::Future, pin::Pin}; use crate::error::NomadError; @@ -33,7 +34,7 @@ impl NomadEvent { if self.topic != topic || self.r#type != ty { return Ok(None); } - + let payload = serde_json::from_str(self.payload.get())?; Ok(payload) diff --git a/lib/s3-util/src/lib.rs b/lib/s3-util/src/lib.rs index ad40a2d0af..895a0cba36 100644 --- a/lib/s3-util/src/lib.rs +++ b/lib/s3-util/src/lib.rs @@ -190,3 +190,43 @@ impl Client { &self.bucket } } + +pub fn s3_provider_active(svc_name: &str, provider: Provider) -> bool { + let svc_screaming = svc_name.to_uppercase().replace("-", "_"); + let provider_upper = provider.as_str().to_uppercase(); + + std::env::var(format!("S3_{}_BUCKET_{}", provider_upper, svc_screaming)).is_ok() +} + +pub fn s3_region(svc_name: &str, provider: Provider) -> Result { + let svc_screaming = svc_name.to_uppercase().replace("-", "_"); + let provider_upper = provider.as_str().to_uppercase(); + + std::env::var(format!("S3_{}_REGION_{}", provider_upper, svc_screaming)).map_err(Into::into) +} + +pub fn s3_credentials(svc_name: &str, provider: Provider) -> Result<(String, String), Error> { + let svc_screaming = svc_name.to_uppercase().replace("-", "_"); + let provider_upper = provider.as_str().to_uppercase(); + + let access_key_id = std::env::var(format!( + "S3_{}_ACCESS_KEY_ID_{}", + provider_upper, svc_screaming + ))?; + let secret_access_key = std::env::var(format!( + "S3_{}_SECRET_ACCESS_KEY_{}", + provider_upper, svc_screaming + ))?; + + Ok((access_key_id, secret_access_key)) +} + +pub fn s3_endpoint_external(svc_name: &str, provider: Provider) -> Result { + let svc_screaming = svc_name.to_uppercase().replace("-", "_"); + let provider_upper = provider.as_str().to_uppercase(); + + std::env::var(format!( + "S3_{}_ENDPOINT_EXTERNAL_{}", + provider_upper, svc_screaming + )).map_err(Into::into) +} diff --git a/lib/util/core/Cargo.toml b/lib/util/core/Cargo.toml index b4a7b018bb..b1e6837fe6 100644 --- a/lib/util/core/Cargo.toml +++ b/lib/util/core/Cargo.toml @@ -16,6 +16,7 @@ chrono = "0.4" formatted-error = { path = "../../formatted-error", optional = true } futures-util = "0.3" global-error = { path = "../../global-error" } +ipnet = { version = "2.7", features = ["serde"] } lazy_static = "1.4" rand = "0.8" regex = "1.4" diff --git a/lib/util/core/src/feature.rs b/lib/util/core/src/feature.rs index e104bd4686..076c12088b 100644 --- a/lib/util/core/src/feature.rs +++ b/lib/util/core/src/feature.rs @@ -24,5 +24,9 @@ pub fn billing() -> bool { } pub fn job_run() -> bool { - std::env::var("RIVET_HAS_POOLS").ok().is_some() + server_provision() +} + +pub fn server_provision() -> bool { + std::env::var("RIVET_DEFAULT_CLUSTER_CONFIG").ok().is_some() } diff --git a/lib/util/core/src/lib.rs b/lib/util/core/src/lib.rs index aa409c762d..0884772c27 100644 --- a/lib/util/core/src/lib.rs +++ b/lib/util/core/src/lib.rs @@ -15,6 +15,7 @@ pub mod future; pub mod geo; pub mod glob; pub mod math; +pub mod net; pub mod route; pub mod sort; pub mod timestamp; diff --git a/lib/util/core/src/math.rs b/lib/util/core/src/math.rs index ec4319de88..1a8df67e04 100644 --- a/lib/util/core/src/math.rs +++ b/lib/util/core/src/math.rs @@ -1,3 +1,44 @@ +use std::cmp::Ordering; + +fn _cmp_floats(a: f32, b: f32) -> Ordering { + a.partial_cmp(&b).unwrap_or_else(|| { + if a.is_nan() { + if b.is_nan() { + Ordering::Equal + } else { + Ordering::Less + } + } else { + Ordering::Greater + } + }) +} + +fn _cmp_floats_opt(a: Option, b: Option) -> Ordering { + match a.partial_cmp(&b) { + Some(ord) => ord, + None => { + if let (Some(a), Some(b)) = (a, b) { + if a.is_nan() { + if b.is_nan() { + Ordering::Equal + } else { + Ordering::Less + } + } else if b.is_nan() { + Ordering::Greater + } else { + // unreachable + Ordering::Less + } + } else { + // unreachable + Ordering::Less + } + } + } +} + /// Divide integers of any type, rounding up. Panics on dividing by 0. #[macro_export] macro_rules! div_up { diff --git a/lib/util/core/src/net.rs b/lib/util/core/src/net.rs new file mode 100644 index 0000000000..9223ee021e --- /dev/null +++ b/lib/util/core/src/net.rs @@ -0,0 +1,159 @@ +pub struct FirewallRule { + pub label: String, + pub ports: String, + pub protocol: String, + pub inbound_ipv4_cidr: Vec, + pub inbound_ipv6_cidr: Vec, +} + +pub fn default_firewall() -> FirewallRule { + FirewallRule { + label: "ssh".into(), + ports: "22".into(), + protocol: "tcp".into(), + inbound_ipv4_cidr: vec!["0.0.0.0/0".into()], + inbound_ipv6_cidr: vec!["::/0".into()], + } +} + +pub mod region { + use std::net::Ipv4Addr; + + use ipnet::Ipv4Net; + + pub fn vlan_ip_net() -> Ipv4Net { + Ipv4Net::new(Ipv4Addr::new(10, 0, 0, 0), 16).unwrap() + } +} + +pub mod gg { + use std::net::Ipv4Addr; + + use ipnet::{Ipv4AddrRange, Ipv4Net}; + + use super::{default_firewall, FirewallRule, job}; + + pub fn vlan_ip_net() -> Ipv4Net { + Ipv4Net::new(Ipv4Addr::new(10, 0, 0, 0), 26).unwrap() + } + + pub fn vlan_addr_range() -> Ipv4AddrRange { + vlan_ip_net().hosts() + } + + pub fn firewall() -> Vec { + vec![ + default_firewall(), + // HTTP(S) + FirewallRule { + label: "http-tcp".into(), + ports: "80".into(), + protocol: "tcp".into(), + inbound_ipv4_cidr: vec!["0.0.0.0/0".into()], + inbound_ipv6_cidr: vec!["::/0".into()], + }, + FirewallRule { + label: "http-udp".into(), + ports: "80".into(), + protocol: "udp".into(), + inbound_ipv4_cidr: vec!["0.0.0.0/0".into()], + inbound_ipv6_cidr: vec!["::/0".into()], + }, + FirewallRule { + label: "https-tcp".into(), + ports: "443".into(), + protocol: "tcp".into(), + inbound_ipv4_cidr: vec!["0.0.0.0/0".into()], + inbound_ipv6_cidr: vec!["::/0".into()], + }, + FirewallRule { + label: "https-udp".into(), + ports: "443".into(), + protocol: "udp".into(), + inbound_ipv4_cidr: vec!["0.0.0.0/0".into()], + inbound_ipv6_cidr: vec!["::/0".into()], + }, + // Dynamic TCP + FirewallRule { + label: "dynamic-tcp".into(), + ports: format!("{}-{}", job::MIN_INGRESS_PORT_TCP, job::MAX_INGRESS_PORT_TCP), + protocol: "tcp".into(), + inbound_ipv4_cidr: vec!["0.0.0.0/0".into()], + inbound_ipv6_cidr: vec!["::/0".into()], + }, + // Dynamic UDP + FirewallRule { + label: "dynamic-udp".into(), + ports: format!("{}-{}", job::MIN_INGRESS_PORT_UDP, job::MAX_INGRESS_PORT_UDP), + protocol: "udp".into(), + inbound_ipv4_cidr: vec!["0.0.0.0/0".into()], + inbound_ipv6_cidr: vec!["::/0".into()], + }, + ] + } +} + +pub mod ats { + use std::net::Ipv4Addr; + + use ipnet::{Ipv4AddrRange, Ipv4Net}; + + use super::{default_firewall, FirewallRule}; + + pub fn vlan_ip_net() -> Ipv4Net { + Ipv4Net::new(Ipv4Addr::new(10, 0, 0, 64), 26).unwrap() + } + + pub fn vlan_addr_range() -> Ipv4AddrRange { + vlan_ip_net().hosts() + } + + pub fn firewall() -> Vec { + vec![default_firewall()] + } +} + +// 10.0.64-10.0.4.0 reserved for more services + +pub mod job { + use std::net::Ipv4Addr; + + use ipnet::Ipv4AddrRange; + + use super::{default_firewall, FirewallRule}; + + // Port ranges for the load balancer hosts + // 20000-26000 are for traffic from gg on LAN + // 26000-31999 is for host networking only + pub const MIN_INGRESS_PORT_TCP: u16 = 20000; + pub const MIN_HOST_PORT_TCP: u16 = 26000; + pub const MAX_INGRESS_PORT_TCP: u16 = 31999; + pub const MIN_INGRESS_PORT_UDP: u16 = 20000; + pub const MIN_HOST_PORT_UDP: u16 = 26000; + pub const MAX_INGRESS_PORT_UDP: u16 = 31999; + + pub fn vlan_addr_range() -> Ipv4AddrRange { + Ipv4AddrRange::new(Ipv4Addr::new(10, 0, 4, 1), Ipv4Addr::new(10, 0, 255, 254)) + } + + pub fn firewall() -> Vec { + vec![ + default_firewall(), + // Ports available to Nomad jobs using the host network + FirewallRule { + label: "nomad-host-tcp".into(), + ports: format!("{}-{}", MIN_HOST_PORT_TCP, MAX_INGRESS_PORT_TCP), + protocol: "tcp".into(), + inbound_ipv4_cidr: vec!["0.0.0.0/0".into()], + inbound_ipv6_cidr: vec!["::/0".into()], + }, + FirewallRule { + label: "nomad-host-udp".into(), + ports: format!("{}-{}", MIN_HOST_PORT_UDP, MAX_INGRESS_PORT_UDP), + protocol: "udp".into(), + inbound_ipv4_cidr: vec!["0.0.0.0/0".into()], + inbound_ipv6_cidr: vec!["::/0".into()], + }, + ] + } +} diff --git a/lib/util/env/src/lib.rs b/lib/util/env/src/lib.rs index 7200ff1021..70c38712b3 100644 --- a/lib/util/env/src/lib.rs +++ b/lib/util/env/src/lib.rs @@ -1,4 +1,5 @@ use serde::Deserialize; +use uuid::Uuid; #[derive(Debug, thiserror::Error)] pub enum EnvVarError { @@ -13,7 +14,6 @@ lazy_static::lazy_static! { static ref RUN_CONTEXT: Option = std::env::var("RIVET_RUN_CONTEXT") .ok() .and_then(|ctx| RunContext::from_str(&ctx)); - static ref PRIMARY_REGION: Option = std::env::var("RIVET_PRIMARY_REGION").ok(); static ref NAMESPACE: Option = std::env::var("RIVET_NAMESPACE").ok(); static ref CLUSTER_ID: Option = std::env::var("RIVET_CLUSTER_ID").ok(); static ref SOURCE_HASH: Option = std::env::var("RIVET_SOURCE_HASH").ok(); @@ -33,7 +33,6 @@ lazy_static::lazy_static! { static ref BILLING: Option = std::env::var("RIVET_BILLING") .ok() .map(|x| serde_json::from_str(&x).expect("failed to parse billing")); - static ref CLUSTER_TYPE: Option = std::env::var("RIVET_CLUSTER_ID").ok(); } /// Where this code is being written from. This is derived from the `RIVET_RUN_CONTEXT` environment @@ -60,6 +59,11 @@ pub fn run_context() -> RunContext { RUN_CONTEXT.clone().expect("RIVET_RUN_CONTEXT") } +// Cluster id for provisioning servers +pub fn default_cluster_id() -> Uuid { + Uuid::nil() +} + /// The namespace this service is running in. This is derived from the `NAMESPACE` environment /// variable. pub fn namespace() -> &'static str { @@ -137,13 +141,6 @@ pub fn dns_provider() -> Option<&'static str> { DNS_PROVIDER.as_ref().map(|x| x.as_str()) } -pub fn primary_region() -> &'static str { - match &*PRIMARY_REGION { - Some(x) => x.as_str(), - None => panic!("RIVET_PRIMARY_REGION"), - } -} - pub fn chirp_service_name() -> &'static str { match &*CHIRP_SERVICE_NAME { Some(x) => x.as_str(), @@ -187,10 +184,7 @@ pub mod cloudflare { pub fn auth_token() -> &'static str { match &*CLOUDFLARE_AUTH_TOKEN { Some(x) => x.as_str(), - None => panic!( - "{}", - EnvVarError::Missing("CLOUDFLARE_AUTH_TOKEN".to_string()) - ), + None => panic!("{}", EnvVarError::Missing("CLOUDFLARE_AUTH_TOKEN".to_string())), } } @@ -234,7 +228,9 @@ pub async fn read_secret(key: &[impl AsRef]) -> Result var(secret_env_var_key(key)) } -pub async fn read_secret_opt(key: &[impl AsRef]) -> Result, EnvVarError> { +pub async fn read_secret_opt( + key: &[impl AsRef], +) -> Result, EnvVarError> { let env_var = read_secret(key).await; match env_var { @@ -256,7 +252,6 @@ pub fn secret_env_var_key(key: &[impl AsRef]) -> String { pub fn var(name: impl AsRef) -> Result { let env_var = std::env::var(name.as_ref()); - match env_var { Ok(v) => Ok(v), Err(var_error) => match var_error { diff --git a/proto/backend/cluster.proto b/proto/backend/cluster.proto new file mode 100644 index 0000000000..f5459d02c1 --- /dev/null +++ b/proto/backend/cluster.proto @@ -0,0 +1,67 @@ +syntax = "proto3"; + +package rivet.backend.cluster; + +import "proto/common.proto"; +import "proto/backend/net.proto"; + +message Cluster { + rivet.common.Uuid cluster_id = 1; + string name_id = 2; + int64 create_ts = 3; + optional rivet.common.Uuid owner_team_id = 4; +} + +enum Provider { + LINODE = 0; +} + +message Datacenter { + rivet.common.Uuid datacenter_id = 1; + rivet.common.Uuid cluster_id = 2; + string name_id = 3; + string display_name = 4; + + Provider provider = 5; + string provider_datacenter_id = 6; + + repeated Pool pools = 7; + BuildDeliveryMethod build_delivery_method = 8; + // Nomad drain time in seconds. + uint64 drain_timeout = 9; +} + +message Pool { + PoolType pool_type = 1; + repeated Hardware hardware = 2; + uint32 desired_count = 3; + uint32 max_count = 4; +} + +enum PoolType { + JOB = 0; + GG = 1; + ATS = 2; +} + +message Hardware { + string provider_hardware = 1; +} + +enum BuildDeliveryMethod { + TRAFFIC_SERVER = 0; + S3_DIRECT = 1; +} + +message Server { + rivet.common.Uuid server_id = 1; + rivet.common.Uuid datacenter_id = 2; + rivet.common.Uuid cluster_id = 3; + PoolType pool_type = 4; + optional string vlan_ip = 5; + optional string public_ip = 6; + + optional int64 cloud_destroy_ts = 7; + + // TODO: Add the rest of the sql columns +} diff --git a/proto/backend/net.proto b/proto/backend/net.proto index 060ce92c4b..77beee0e57 100644 --- a/proto/backend/net.proto +++ b/proto/backend/net.proto @@ -8,9 +8,14 @@ message ClientInfo { } message IpInfo { + reserved 2, 3; string ip = 1; - double latitude = 2; - double longitude = 3; + Coordinates coords = 4; +} + +message Coordinates { + double latitude = 1; + double longitude = 2; } enum HttpMethod { diff --git a/proto/backend/region.proto b/proto/backend/region.proto index 15eb0f97c9..2100885e91 100644 --- a/proto/backend/region.proto +++ b/proto/backend/region.proto @@ -3,9 +3,11 @@ syntax = "proto3"; package rivet.backend.region; import "proto/common.proto"; +import "proto/backend/net.proto"; +import "proto/backend/cluster.proto"; message Region { - reserved 13; + reserved 6, 10, 11, 12, 13; rivet.common.Uuid region_id = 1; bool enabled = 14; @@ -13,46 +15,14 @@ message Region { string nomad_datacenter = 3; string provider = 4; string provider_region = 5; - rivet.common.Uuid cdn_region_id = 12; - UniversalRegion universal_region = 6; string provider_display_name = 7; string region_display_name = 8; string name_id = 9; - double latitude = 10; - double longitude = 11; -} - -enum UniversalRegion { - UNKNOWN = 0; - LOCAL = 1; + rivet.backend.net.Coordinates coords = 15; - AMSTERDAM = 2; - ATLANTA = 13; - BANGALORE = 3; - DALLAS = 12; - FRANKFURT = 4; - LONDON = 5; - MUMBAI = 10; - NEWARK = 16; - NEW_YORK_CITY = 6; - SAN_FRANCISCO = 7; - SINGAPORE = 8; - SYDNEY = 11; - TOKYO = 15; - TORONTO = 9; - WASHINGTON_DC = 17; - CHICAGO = 18; - PARIS = 19; - SEATTLE = 20; - SAO_PAULO = 21; - STOCKHOLM = 23; - CHENNAI = 24; - OSAKA = 25; - MILAN = 26; - MIAMI = 27; - JAKARTA = 28; - LOS_ANGELES = 29; + // INHERITED FROM Datacenter + rivet.backend.cluster.BuildDeliveryMethod build_delivery_method = 100; } message Tier { diff --git a/proto/claims.proto b/proto/claims.proto index dbc6564ae8..76188eb408 100644 --- a/proto/claims.proto +++ b/proto/claims.proto @@ -107,6 +107,11 @@ message Entitlement { string name = 1; } + // Issued to provisioned servers for communication with our API. + message Server { + + } + oneof kind { Refresh refresh = 1; User user = 2; @@ -123,6 +128,7 @@ message Entitlement { CloudDeviceLink cloud_device_link = 14; Bypass bypass = 15; AccessToken access_token = 16; + Server server = 17; } reserved 13; diff --git a/scripts/openapi/gen_rust.sh b/scripts/openapi/gen_rust.sh index a0c0f5887a..b531acc717 100755 --- a/scripts/openapi/gen_rust.sh +++ b/scripts/openapi/gen_rust.sh @@ -21,6 +21,7 @@ docker run --rm \ # Fix OpenAPI bug (https://github.com/OpenAPITools/openapi-generator/issues/14171) sed -i 's/CloudGamesLogStream/crate::models::CloudGamesLogStream/' "$GEN_PATH_RUST/src/apis/cloud_games_matchmaker_api.rs" sed -i 's/PortalNotificationUnregisterService/crate::models::PortalNotificationUnregisterService/' "$GEN_PATH_RUST/src/apis/portal_notifications_api.rs" +sed -i 's/AdminPoolType/crate::models::AdminPoolType/' "$GEN_PATH_RUST/src/apis/admin_cluster_api.rs" if [ "$FERN_GROUP" == "full" ]; then # Create variant specifically for the CLI @@ -29,4 +30,3 @@ if [ "$FERN_GROUP" == "full" ]; then # HACK: Modify libraries to disallow unknown fields in config find $GEN_PATH_RUST_CLI -name "cloud_version_*.rs" -exec sed -i 's/\(#\[derive.*Deserialize.*\]\)/\1\n#[serde(deny_unknown_fields)]/g' {} \; fi - diff --git a/sdks/full/go/admin/client/client.go b/sdks/full/go/admin/client/client.go index b8075dd7fd..6a3b90a4ae 100644 --- a/sdks/full/go/admin/client/client.go +++ b/sdks/full/go/admin/client/client.go @@ -11,6 +11,7 @@ import ( http "net/http" sdk "sdk" admin "sdk/admin" + clusterclient "sdk/admin/cluster/client" core "sdk/core" ) @@ -18,6 +19,8 @@ type Client struct { baseURL string caller *core.Caller header http.Header + + Cluster *clusterclient.Client } func NewClient(opts ...core.ClientOption) *Client { @@ -29,6 +32,7 @@ func NewClient(opts ...core.ClientOption) *Client { baseURL: options.BaseURL, caller: core.NewCaller(options.HTTPClient), header: options.ToHeader(), + Cluster: clusterclient.NewClient(opts...), } } diff --git a/sdks/full/go/admin/cluster/client/client.go b/sdks/full/go/admin/cluster/client/client.go new file mode 100644 index 0000000000..2c5b5c3690 --- /dev/null +++ b/sdks/full/go/admin/cluster/client/client.go @@ -0,0 +1,123 @@ +// This file was auto-generated by Fern from our API Definition. + +package client + +import ( + bytes "bytes" + context "context" + json "encoding/json" + errors "errors" + fmt "fmt" + io "io" + http "net/http" + url "net/url" + sdk "sdk" + cluster "sdk/admin/cluster" + core "sdk/core" +) + +type Client struct { + baseURL string + caller *core.Caller + header http.Header +} + +func NewClient(opts ...core.ClientOption) *Client { + options := core.NewClientOptions() + for _, opt := range opts { + opt(options) + } + return &Client{ + baseURL: options.BaseURL, + caller: core.NewCaller(options.HTTPClient), + header: options.ToHeader(), + } +} + +func (c *Client) GetServerIps(ctx context.Context, request *cluster.GetServerIpsRequest) (*cluster.GetServerIpsResponse, error) { + baseURL := "https://api.rivet.gg" + if c.baseURL != "" { + baseURL = c.baseURL + } + endpointURL := baseURL + "/" + "cluster/server_ips" + + queryParams := make(url.Values) + if request.ServerId != nil { + queryParams.Add("server_id", fmt.Sprintf("%v", *request.ServerId)) + } + if request.Pool != nil { + queryParams.Add("pool", fmt.Sprintf("%v", *request.Pool)) + } + if len(queryParams) > 0 { + endpointURL += "?" + queryParams.Encode() + } + + errorDecoder := func(statusCode int, body io.Reader) error { + raw, err := io.ReadAll(body) + if err != nil { + return err + } + apiError := core.NewAPIError(statusCode, errors.New(string(raw))) + decoder := json.NewDecoder(bytes.NewReader(raw)) + switch statusCode { + case 500: + value := new(sdk.InternalError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 429: + value := new(sdk.RateLimitError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 403: + value := new(sdk.ForbiddenError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 408: + value := new(sdk.UnauthorizedError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 404: + value := new(sdk.NotFoundError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 400: + value := new(sdk.BadRequestError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + } + return apiError + } + + var response *cluster.GetServerIpsResponse + if err := c.caller.Call( + ctx, + &core.CallParams{ + URL: endpointURL, + Method: http.MethodGet, + Headers: c.header, + Response: &response, + ErrorDecoder: errorDecoder, + }, + ); err != nil { + return nil, err + } + return response, nil +} diff --git a/sdks/full/go/admin/cluster/cluster.go b/sdks/full/go/admin/cluster/cluster.go new file mode 100644 index 0000000000..1d7c9cb0b0 --- /dev/null +++ b/sdks/full/go/admin/cluster/cluster.go @@ -0,0 +1,38 @@ +// This file was auto-generated by Fern from our API Definition. + +package cluster + +import ( + json "encoding/json" + fmt "fmt" + core "sdk/core" +) + +type GetServerIpsResponse struct { + Ips []string `json:"ips,omitempty"` + + _rawJSON json.RawMessage +} + +func (g *GetServerIpsResponse) UnmarshalJSON(data []byte) error { + type unmarshaler GetServerIpsResponse + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *g = GetServerIpsResponse(value) + g._rawJSON = json.RawMessage(data) + return nil +} + +func (g *GetServerIpsResponse) String() string { + if len(g._rawJSON) > 0 { + if value, err := core.StringifyJSON(g._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(g); err == nil { + return value + } + return fmt.Sprintf("%#v", g) +} diff --git a/sdks/full/go/admin/cluster/types.go b/sdks/full/go/admin/cluster/types.go new file mode 100644 index 0000000000..85105c711b --- /dev/null +++ b/sdks/full/go/admin/cluster/types.go @@ -0,0 +1,13 @@ +// This file was auto-generated by Fern from our API Definition. + +package cluster + +import ( + uuid "github.com/google/uuid" + admin "sdk/admin" +) + +type GetServerIpsRequest struct { + ServerId *uuid.UUID `json:"-"` + Pool *admin.PoolType `json:"-"` +} diff --git a/sdks/full/go/admin/types.go b/sdks/full/go/admin/types.go new file mode 100644 index 0000000000..bbe43b6c8e --- /dev/null +++ b/sdks/full/go/admin/types.go @@ -0,0 +1,32 @@ +// This file was auto-generated by Fern from our API Definition. + +package admin + +import ( + fmt "fmt" +) + +type PoolType string + +const ( + PoolTypeJob PoolType = "job" + PoolTypeGg PoolType = "gg" + PoolTypeAts PoolType = "ats" +) + +func NewPoolTypeFromString(s string) (PoolType, error) { + switch s { + case "job": + return PoolTypeJob, nil + case "gg": + return PoolTypeGg, nil + case "ats": + return PoolTypeAts, nil + } + var t PoolType + return "", fmt.Errorf("%s is not a valid %T", s, t) +} + +func (p PoolType) Ptr() *PoolType { + return &p +} diff --git a/sdks/full/go/client/client.go b/sdks/full/go/client/client.go index 93143d6fe0..eea007717b 100644 --- a/sdks/full/go/client/client.go +++ b/sdks/full/go/client/client.go @@ -15,6 +15,7 @@ import ( matchmakerclient "sdk/matchmaker/client" moduleclient "sdk/module/client" portalclient "sdk/portal/client" + provisionclient "sdk/provision/client" ) type Client struct { @@ -28,6 +29,7 @@ type Client struct { Identity *identityclient.Client Kv *kvclient.Client Module *moduleclient.Client + Provision *provisionclient.Client Auth *authclient.Client Job *jobclient.Client Matchmaker *matchmakerclient.Client @@ -49,6 +51,7 @@ func NewClient(opts ...core.ClientOption) *Client { Identity: identityclient.NewClient(opts...), Kv: kvclient.NewClient(opts...), Module: moduleclient.NewClient(opts...), + Provision: provisionclient.NewClient(opts...), Auth: authclient.NewClient(opts...), Job: jobclient.NewClient(opts...), Matchmaker: matchmakerclient.NewClient(opts...), diff --git a/sdks/full/go/cloud/types.go b/sdks/full/go/cloud/types.go index 754d1f4bdf..ec70378cd8 100644 --- a/sdks/full/go/cloud/types.go +++ b/sdks/full/go/cloud/types.go @@ -1060,8 +1060,6 @@ type RegionSummary struct { RegionNameId string `json:"region_name_id"` // The server provider of this region. Provider string `json:"provider"` - // A universal region label given to this region. - UniversalRegion UniversalRegion `json:"universal_region,omitempty"` // Represent a resource's readable display name. ProviderDisplayName string `json:"provider_display_name"` // Represent a resource's readable display name. @@ -1255,106 +1253,6 @@ func (s *SvcPerf) String() string { return fmt.Sprintf("%#v", s) } -type UniversalRegion string - -const ( - UniversalRegionUnknown UniversalRegion = "unknown" - UniversalRegionLocal UniversalRegion = "local" - UniversalRegionAmsterdam UniversalRegion = "amsterdam" - UniversalRegionAtlanta UniversalRegion = "atlanta" - UniversalRegionBangalore UniversalRegion = "bangalore" - UniversalRegionDallas UniversalRegion = "dallas" - UniversalRegionFrankfurt UniversalRegion = "frankfurt" - UniversalRegionLondon UniversalRegion = "london" - UniversalRegionMumbai UniversalRegion = "mumbai" - UniversalRegionNewark UniversalRegion = "newark" - UniversalRegionNewYorkCity UniversalRegion = "new_york_city" - UniversalRegionSanFrancisco UniversalRegion = "san_francisco" - UniversalRegionSingapore UniversalRegion = "singapore" - UniversalRegionSydney UniversalRegion = "sydney" - UniversalRegionTokyo UniversalRegion = "tokyo" - UniversalRegionToronto UniversalRegion = "toronto" - UniversalRegionWashingtonDc UniversalRegion = "washington_dc" - UniversalRegionChicago UniversalRegion = "chicago" - UniversalRegionParis UniversalRegion = "paris" - UniversalRegionSeattle UniversalRegion = "seattle" - UniversalRegionSaoPaulo UniversalRegion = "sao_paulo" - UniversalRegionStockholm UniversalRegion = "stockholm" - UniversalRegionChennai UniversalRegion = "chennai" - UniversalRegionOsaka UniversalRegion = "osaka" - UniversalRegionMilan UniversalRegion = "milan" - UniversalRegionMiami UniversalRegion = "miami" - UniversalRegionJakarta UniversalRegion = "jakarta" - UniversalRegionLosAngeles UniversalRegion = "los_angeles" -) - -func NewUniversalRegionFromString(s string) (UniversalRegion, error) { - switch s { - case "unknown": - return UniversalRegionUnknown, nil - case "local": - return UniversalRegionLocal, nil - case "amsterdam": - return UniversalRegionAmsterdam, nil - case "atlanta": - return UniversalRegionAtlanta, nil - case "bangalore": - return UniversalRegionBangalore, nil - case "dallas": - return UniversalRegionDallas, nil - case "frankfurt": - return UniversalRegionFrankfurt, nil - case "london": - return UniversalRegionLondon, nil - case "mumbai": - return UniversalRegionMumbai, nil - case "newark": - return UniversalRegionNewark, nil - case "new_york_city": - return UniversalRegionNewYorkCity, nil - case "san_francisco": - return UniversalRegionSanFrancisco, nil - case "singapore": - return UniversalRegionSingapore, nil - case "sydney": - return UniversalRegionSydney, nil - case "tokyo": - return UniversalRegionTokyo, nil - case "toronto": - return UniversalRegionToronto, nil - case "washington_dc": - return UniversalRegionWashingtonDc, nil - case "chicago": - return UniversalRegionChicago, nil - case "paris": - return UniversalRegionParis, nil - case "seattle": - return UniversalRegionSeattle, nil - case "sao_paulo": - return UniversalRegionSaoPaulo, nil - case "stockholm": - return UniversalRegionStockholm, nil - case "chennai": - return UniversalRegionChennai, nil - case "osaka": - return UniversalRegionOsaka, nil - case "milan": - return UniversalRegionMilan, nil - case "miami": - return UniversalRegionMiami, nil - case "jakarta": - return UniversalRegionJakarta, nil - case "los_angeles": - return UniversalRegionLosAngeles, nil - } - var t UniversalRegion - return "", fmt.Errorf("%s is not a valid %T", s, t) -} - -func (u UniversalRegion) Ptr() *UniversalRegion { - return &u -} - type BootstrapAccess string const ( diff --git a/sdks/full/go/cloud/version/matchmaker/types.go b/sdks/full/go/cloud/version/matchmaker/types.go index 976de1c19a..bd35c28e98 100644 --- a/sdks/full/go/cloud/version/matchmaker/types.go +++ b/sdks/full/go/cloud/version/matchmaker/types.go @@ -166,9 +166,17 @@ func (n NetworkMode) Ptr() *NetworkMode { return &n } -// Type of network traffic to allow access to this port. -// Configuring `https` or `tcp_tls` will provide TLS termination for you via Game Guard. -// `https` and `tcp_tls` must have `proxy_kind` set to `game_guard`. +// Signifies the protocol of the port. +// Note that when proxying through GameGuard (via `ProxyKind`), the port number returned by `/find`, `/join`, and `/create` will not be the same as the port number configured in the config: +// +// - With HTTP, the port will always be 80. The hostname of the port correctly routes the incoming +// connection to the correct port being used by the game server. +// - With HTTPS, the port will always be 443. The hostname of the port correctly routes the incoming +// connection to the correct port being used by the game server. +// - Using TCP/UDP, the port will be a random number between 26000 and 31999. This gets automatically +// routed to the correct port being used by the game server. +// +// ### Related - cloud.version.matchmaker.GameModeRuntimeDockerPort - cloud.version.matchmaker.ProxyKind - /docs/dynamic-servers/concepts/game-guard - matchmaker.lobbies.find type PortProtocol string const ( @@ -200,11 +208,12 @@ func (p PortProtocol) Ptr() *PortProtocol { return &p } -// Range of ports that can be connected to. -// If configured, `network_mode` must equal `host`. -// Port ranges may overlap between containers, it is the responsibility of the developer to ensure ports are available before using. -// Read more about host networking [here](https://rivet.gg/docs/dynamic-servers/concepts/host-bridge-networking). -// Only available on Rivet Open Source & Enterprise. +// Range of ports that can be connected to. Note that the port range values returned by /find +// +// ### Related +// +// - cloud.version.matchmaker.PortProtocol +// - cloud.version.matchmaker.ProxyKind type PortRange struct { // Unsigned 32 bit integer. Min int `json:"min"` @@ -237,9 +246,9 @@ func (p *PortRange) String() string { return fmt.Sprintf("%#v", p) } -// Range of ports that can be connected to. -// `game_guard` (default) proxies all traffic through [Game Guard](https://rivet.gg/docs/dynamic-servers/concepts/game-guard) to mitigate DDoS attacks and provide TLS termination. -// `none` sends traffic directly to the game server. If configured, `network_mode` must equal `host`. Read more about host networking [here](https://rivet.gg/docs/dynamic-servers/concepts/host-bridge-networking). Only available on Rivet Open Source & Enterprise. +// Denotes what type of proxying to use for ports. Rivet GameGuard adds DoS and DDoS mitigation to incoming connections. +// +// ### Related - /docs/dynamic-servers/concepts/game-guard - cloud.version.matchmaker.PortProtocol type ProxyKind string const ( @@ -568,9 +577,11 @@ func (g *GameModeRuntimeDocker) String() string { return fmt.Sprintf("%#v", g) } -// A docker port. +// Port config for a docker build. type GameModeRuntimeDockerPort struct { // The port number to connect to. + // + // ### Related - cloud.version.matchmaker.PortProtocol - cloud.version.matchmaker.ProxyKind Port *int `json:"port,omitempty"` PortRange *PortRange `json:"port_range,omitempty"` Protocol *PortProtocol `json:"protocol,omitempty"` diff --git a/sdks/full/go/provision/client/client.go b/sdks/full/go/provision/client/client.go new file mode 100644 index 0000000000..b4e269dbc2 --- /dev/null +++ b/sdks/full/go/provision/client/client.go @@ -0,0 +1,30 @@ +// This file was auto-generated by Fern from our API Definition. + +package client + +import ( + http "net/http" + core "sdk/core" + serversclient "sdk/provision/servers/client" +) + +type Client struct { + baseURL string + caller *core.Caller + header http.Header + + Servers *serversclient.Client +} + +func NewClient(opts ...core.ClientOption) *Client { + options := core.NewClientOptions() + for _, opt := range opts { + opt(options) + } + return &Client{ + baseURL: options.BaseURL, + caller: core.NewCaller(options.HTTPClient), + header: options.ToHeader(), + Servers: serversclient.NewClient(opts...), + } +} diff --git a/sdks/full/go/provision/servers/client/client.go b/sdks/full/go/provision/servers/client/client.go new file mode 100644 index 0000000000..67a6276ed0 --- /dev/null +++ b/sdks/full/go/provision/servers/client/client.go @@ -0,0 +1,111 @@ +// This file was auto-generated by Fern from our API Definition. + +package client + +import ( + bytes "bytes" + context "context" + json "encoding/json" + errors "errors" + fmt "fmt" + io "io" + http "net/http" + sdk "sdk" + core "sdk/core" + servers "sdk/provision/servers" +) + +type Client struct { + baseURL string + caller *core.Caller + header http.Header +} + +func NewClient(opts ...core.ClientOption) *Client { + options := core.NewClientOptions() + for _, opt := range opts { + opt(options) + } + return &Client{ + baseURL: options.BaseURL, + caller: core.NewCaller(options.HTTPClient), + header: options.ToHeader(), + } +} + +func (c *Client) GetServerInfo(ctx context.Context, ip string) (*servers.GetServerInfoResponse, error) { + baseURL := "https://api.rivet.gg" + if c.baseURL != "" { + baseURL = c.baseURL + } + endpointURL := fmt.Sprintf(baseURL+"/"+"servers/%v/info", ip) + + errorDecoder := func(statusCode int, body io.Reader) error { + raw, err := io.ReadAll(body) + if err != nil { + return err + } + apiError := core.NewAPIError(statusCode, errors.New(string(raw))) + decoder := json.NewDecoder(bytes.NewReader(raw)) + switch statusCode { + case 500: + value := new(sdk.InternalError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 429: + value := new(sdk.RateLimitError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 403: + value := new(sdk.ForbiddenError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 408: + value := new(sdk.UnauthorizedError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 404: + value := new(sdk.NotFoundError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + case 400: + value := new(sdk.BadRequestError) + value.APIError = apiError + if err := decoder.Decode(value); err != nil { + return apiError + } + return value + } + return apiError + } + + var response *servers.GetServerInfoResponse + if err := c.caller.Call( + ctx, + &core.CallParams{ + URL: endpointURL, + Method: http.MethodGet, + Headers: c.header, + Response: &response, + ErrorDecoder: errorDecoder, + }, + ); err != nil { + return nil, err + } + return response, nil +} diff --git a/sdks/full/go/provision/servers/servers.go b/sdks/full/go/provision/servers/servers.go new file mode 100644 index 0000000000..1be2365b44 --- /dev/null +++ b/sdks/full/go/provision/servers/servers.go @@ -0,0 +1,43 @@ +// This file was auto-generated by Fern from our API Definition. + +package servers + +import ( + json "encoding/json" + fmt "fmt" + uuid "github.com/google/uuid" + core "sdk/core" +) + +type GetServerInfoResponse struct { + Name string `json:"name"` + ServerId uuid.UUID `json:"server_id"` + DatacenterId uuid.UUID `json:"datacenter_id"` + ClusterId uuid.UUID `json:"cluster_id"` + VlanIp string `json:"vlan_ip"` + + _rawJSON json.RawMessage +} + +func (g *GetServerInfoResponse) UnmarshalJSON(data []byte) error { + type unmarshaler GetServerInfoResponse + var value unmarshaler + if err := json.Unmarshal(data, &value); err != nil { + return err + } + *g = GetServerInfoResponse(value) + g._rawJSON = json.RawMessage(data) + return nil +} + +func (g *GetServerInfoResponse) String() string { + if len(g._rawJSON) > 0 { + if value, err := core.StringifyJSON(g._rawJSON); err == nil { + return value + } + } + if value, err := core.StringifyJSON(g); err == nil { + return value + } + return fmt.Sprintf("%#v", g) +} diff --git a/sdks/full/openapi/openapi.yml b/sdks/full/openapi/openapi.yml index 64c4a4c0ec..004b97df0f 100644 --- a/sdks/full/openapi/openapi.yml +++ b/sdks/full/openapi/openapi.yml @@ -60,6 +60,67 @@ paths: application/json: schema: $ref: '#/components/schemas/AdminLoginRequest' + /cluster/server_ips: + get: + operationId: admin_cluster_getServerIps + tags: + - AdminCluster + parameters: + - name: server_id + in: query + required: false + schema: + type: string + format: uuid + - name: pool + in: query + required: false + schema: + $ref: '#/components/schemas/AdminPoolType' + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/AdminClusterGetServerIpsResponse' + '400': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '403': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '404': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '408': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '429': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '500': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + security: *ref_0 /cloud/bootstrap: get: description: Returns the basic information required to use the cloud APIs. @@ -4252,6 +4313,61 @@ paths: data: {} required: - data + /servers/{ip}/info: + get: + operationId: provision_servers_getServerInfo + tags: + - ProvisionServers + parameters: + - name: ip + in: path + required: true + schema: + type: string + responses: + '200': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ProvisionServersGetServerInfoResponse' + '400': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '403': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '404': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '408': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '429': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + '500': + description: '' + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + security: *ref_0 /auth/identity/access-token/complete-verification: post: description: Completes the access token verification process. @@ -8429,6 +8545,15 @@ components: type: string required: - url + AdminClusterGetServerIpsResponse: + type: object + properties: + ips: + type: array + items: + type: string + required: + - ips CloudBootstrapResponse: type: object properties: @@ -9376,6 +9501,34 @@ components: data: {} required: - data + ProvisionServersGetServerInfoResponse: + type: object + properties: + name: + type: string + server_id: + type: string + format: uuid + datacenter_id: + type: string + format: uuid + cluster_id: + type: string + format: uuid + vlan_ip: + type: string + required: + - name + - server_id + - datacenter_id + - cluster_id + - vlan_ip + AdminPoolType: + type: string + enum: + - job + - gg + - ats AuthCompleteStatus: type: string enum: @@ -9954,9 +10107,6 @@ components: provider: type: string description: The server provider of this region. - universal_region: - $ref: '#/components/schemas/CloudUniversalRegion' - description: A universal region label given to this region. provider_display_name: type: string description: Represent a resource's readable display name. @@ -9967,7 +10117,6 @@ components: - region_id - region_name_id - provider - - universal_region - provider_display_name - region_display_name CloudGameLobbyExpenses: @@ -10092,37 +10241,6 @@ components: - disk - bandwidth - price_per_second - CloudUniversalRegion: - type: string - enum: - - unknown - - local - - amsterdam - - atlanta - - bangalore - - dallas - - frankfurt - - london - - mumbai - - newark - - new_york_city - - san_francisco - - singapore - - sydney - - tokyo - - toronto - - washington_dc - - chicago - - paris - - seattle - - sao_paulo - - stockholm - - chennai - - osaka - - milan - - miami - - jakarta - - los_angeles CloudNamespaceFull: type: object description: A full namespace. @@ -10897,17 +11015,16 @@ components: CloudVersionMatchmakerPortRange: type: object description: >- - Range of ports that can be connected to. + Range of ports that can be connected to. Note that the port range values + returned by /find - If configured, `network_mode` must equal `host`. - Port ranges may overlap between containers, it is the responsibility of - the developer to ensure ports are available before using. + ### Related - Read more about host networking - [here](https://rivet.gg/docs/dynamic-servers/concepts/host-bridge-networking). - Only available on Rivet Open Source & Enterprise. + - cloud.version.matchmaker.PortProtocol + + - cloud.version.matchmaker.ProxyKind properties: min: type: integer @@ -10927,28 +11044,38 @@ components: - tcp_tls - udp description: >- - Type of network traffic to allow access to this port. - - Configuring `https` or `tcp_tls` will provide TLS termination for you - via Game Guard. - - `https` and `tcp_tls` must have `proxy_kind` set to `game_guard`. + Signifies the protocol of the port. + + Note that when proxying through GameGuard (via `ProxyKind`), the port + number returned by `/find`, `/join`, and `/create` will not be the same + as the port number configured in the config: + + + - With HTTP, the port will always be 80. The hostname of the port + correctly routes the incoming + connection to the correct port being used by the game server. + - With HTTPS, the port will always be 443. The hostname of the port + correctly routes the incoming + connection to the correct port being used by the game server. + - Using TCP/UDP, the port will be a random number between 26000 and + 31999. This gets automatically + routed to the correct port being used by the game server. + + ### Related - cloud.version.matchmaker.GameModeRuntimeDockerPort - + cloud.version.matchmaker.ProxyKind - + /docs/dynamic-servers/concepts/game-guard - matchmaker.lobbies.find CloudVersionMatchmakerProxyKind: type: string enum: - none - game_guard description: >- - Range of ports that can be connected to. + Denotes what type of proxying to use for ports. Rivet GameGuard adds DoS + and DDoS mitigation to incoming connections. - `game_guard` (default) proxies all traffic through [Game - Guard](https://rivet.gg/docs/dynamic-servers/concepts/game-guard) to - mitigate DDoS attacks and provide TLS termination. - `none` sends traffic directly to the game server. If configured, - `network_mode` must equal `host`. Read more about host networking - [here](https://rivet.gg/docs/dynamic-servers/concepts/host-bridge-networking). - Only available on Rivet Open Source & Enterprise. + ### Related - /docs/dynamic-servers/concepts/game-guard - + cloud.version.matchmaker.PortProtocol CloudVersionMatchmakerCaptcha: type: object description: Matchmaker captcha configuration. @@ -11092,11 +11219,16 @@ components: #/components/schemas/CloudVersionMatchmakerGameModeRuntimeDockerPort CloudVersionMatchmakerGameModeRuntimeDockerPort: type: object - description: A docker port. + description: Port config for a docker build. properties: port: type: integer - description: The port number to connect to. + description: >- + The port number to connect to. + + + ### Related - cloud.version.matchmaker.PortProtocol - + cloud.version.matchmaker.ProxyKind port_range: $ref: '#/components/schemas/CloudVersionMatchmakerPortRange' protocol: diff --git a/sdks/full/openapi_compat/openapi.yml b/sdks/full/openapi_compat/openapi.yml index 502ae594f0..237569ca00 100644 --- a/sdks/full/openapi_compat/openapi.yml +++ b/sdks/full/openapi_compat/openapi.yml @@ -2,6 +2,15 @@ components: schemas: AccountNumber: type: integer + AdminClusterGetServerIpsResponse: + properties: + ips: + items: + type: string + type: array + required: + - ips + type: object AdminLoginRequest: properties: name: @@ -16,6 +25,12 @@ components: required: - url type: object + AdminPoolType: + enum: + - job + - gg + - ats + type: string AuthCompleteStatus: description: Represents the state of an external account linking process. enum: @@ -1581,14 +1596,10 @@ components: readable. Different than `rivet.common#DisplayName` because this should not include special characters and be short. type: string - universal_region: - $ref: '#/components/schemas/CloudUniversalRegion' - description: A universal region label given to this region. required: - region_id - region_name_id - provider - - universal_region - provider_display_name - region_display_name type: object @@ -1736,37 +1747,6 @@ components: - spans - marks type: object - CloudUniversalRegion: - enum: - - unknown - - local - - amsterdam - - atlanta - - bangalore - - dallas - - frankfurt - - london - - mumbai - - newark - - new_york_city - - san_francisco - - singapore - - sydney - - tokyo - - toronto - - washington_dc - - chicago - - paris - - seattle - - sao_paulo - - stockholm - - chennai - - osaka - - milan - - miami - - jakarta - - los_angeles - type: string CloudValidateGroupRequest: properties: display_name: @@ -2221,7 +2201,7 @@ components: type: object type: object CloudVersionMatchmakerGameModeRuntimeDockerPort: - description: A docker port. + description: Port config for a docker build. properties: dev_port: description: _Configures Rivet CLI behavior. Has no effect on server behavior._ @@ -2233,7 +2213,10 @@ components: $ref: '#/components/schemas/CloudVersionMatchmakerPortProtocol' description: _Configures Rivet CLI behavior. Has no effect on server behavior._ port: - description: The port number to connect to. + description: 'The port number to connect to. + + + ### Related - cloud.version.matchmaker.PortProtocol - cloud.version.matchmaker.ProxyKind' type: integer port_range: $ref: '#/components/schemas/CloudVersionMatchmakerPortRange' @@ -2413,12 +2396,18 @@ components: - host type: string CloudVersionMatchmakerPortProtocol: - description: 'Type of network traffic to allow access to this port. - - Configuring `https` or `tcp_tls` will provide TLS termination for you via - Game Guard. - - `https` and `tcp_tls` must have `proxy_kind` set to `game_guard`.' + description: "Signifies the protocol of the port.\nNote that when proxying through\ + \ GameGuard (via `ProxyKind`), the port number returned by `/find`, `/join`,\ + \ and `/create` will not be the same as the port number configured in the\ + \ config:\n\n- With HTTP, the port will always be 80. The hostname of the\ + \ port correctly routes the incoming\n connection to the correct port being\ + \ used by the game server.\n- With HTTPS, the port will always be 443. The\ + \ hostname of the port correctly routes the incoming\n connection to the\ + \ correct port being used by the game server.\n- Using TCP/UDP, the port will\ + \ be a random number between 26000 and 31999. This gets automatically\n routed\ + \ to the correct port being used by the game server.\n\n### Related - cloud.version.matchmaker.GameModeRuntimeDockerPort\ + \ - cloud.version.matchmaker.ProxyKind - /docs/dynamic-servers/concepts/game-guard\ + \ - matchmaker.lobbies.find" enum: - http - https @@ -2427,16 +2416,16 @@ components: - udp type: string CloudVersionMatchmakerPortRange: - description: 'Range of ports that can be connected to. + description: 'Range of ports that can be connected to. Note that the port range + values returned by /find + - If configured, `network_mode` must equal `host`. + ### Related - Port ranges may overlap between containers, it is the responsibility of the - developer to ensure ports are available before using. - Read more about host networking [here](https://rivet.gg/docs/dynamic-servers/concepts/host-bridge-networking). + - cloud.version.matchmaker.PortProtocol - Only available on Rivet Open Source & Enterprise.' + - cloud.version.matchmaker.ProxyKind' properties: max: description: Unsigned 32 bit integer. @@ -2449,14 +2438,11 @@ components: - max type: object CloudVersionMatchmakerProxyKind: - description: 'Range of ports that can be connected to. + description: 'Denotes what type of proxying to use for ports. Rivet GameGuard + adds DoS and DDoS mitigation to incoming connections. - `game_guard` (default) proxies all traffic through [Game Guard](https://rivet.gg/docs/dynamic-servers/concepts/game-guard) - to mitigate DDoS attacks and provide TLS termination. - `none` sends traffic directly to the game server. If configured, `network_mode` - must equal `host`. Read more about host networking [here](https://rivet.gg/docs/dynamic-servers/concepts/host-bridge-networking). - Only available on Rivet Open Source & Enterprise.' + ### Related - /docs/dynamic-servers/concepts/game-guard - cloud.version.matchmaker.PortProtocol' enum: - none - game_guard @@ -4210,6 +4196,28 @@ components: required: - service type: object + ProvisionServersGetServerInfoResponse: + properties: + cluster_id: + format: uuid + type: string + datacenter_id: + format: uuid + type: string + name: + type: string + server_id: + format: uuid + type: string + vlan_ip: + type: string + required: + - name + - server_id + - datacenter_id + - cluster_id + - vlan_ip + type: object Timestamp: description: RFC3339 timestamp type: string @@ -7784,6 +7792,67 @@ paths: security: *id001 tags: - CloudUploads + /cluster/server_ips: + get: + operationId: admin_cluster_getServerIps + parameters: + - in: query + name: server_id + required: false + schema: + format: uuid + type: string + - in: query + name: pool + required: false + schema: + $ref: '#/components/schemas/AdminPoolType' + responses: + '200': + content: + application/json: + schema: + $ref: '#/components/schemas/AdminClusterGetServerIpsResponse' + description: '' + '400': + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + description: '' + '403': + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + description: '' + '404': + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + description: '' + '408': + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + description: '' + '429': + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + description: '' + '500': + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + description: '' + security: *id001 + tags: + - AdminCluster /group/groups: get: description: Returns a list of suggested groups. @@ -12599,6 +12668,61 @@ paths: security: *id001 tags: - PortalNotifications + /servers/{ip}/info: + get: + operationId: provision_servers_getServerInfo + parameters: + - in: path + name: ip + required: true + schema: + type: string + responses: + '200': + content: + application/json: + schema: + $ref: '#/components/schemas/ProvisionServersGetServerInfoResponse' + description: '' + '400': + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + description: '' + '403': + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + description: '' + '404': + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + description: '' + '408': + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + description: '' + '429': + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + description: '' + '500': + content: + application/json: + schema: + $ref: '#/components/schemas/ErrorBody' + description: '' + security: *id001 + tags: + - ProvisionServers servers: - description: Production url: https://api.rivet.gg diff --git a/sdks/full/rust-cli/.openapi-generator/FILES b/sdks/full/rust-cli/.openapi-generator/FILES index f95d096f8d..b20ce3bdf1 100644 --- a/sdks/full/rust-cli/.openapi-generator/FILES +++ b/sdks/full/rust-cli/.openapi-generator/FILES @@ -4,8 +4,11 @@ Cargo.toml README.md docs/AdminApi.md +docs/AdminClusterApi.md +docs/AdminClusterGetServerIpsResponse.md docs/AdminLoginRequest.md docs/AdminLoginResponse.md +docs/AdminPoolType.md docs/AuthCompleteStatus.md docs/AuthIdentityAccessTokenApi.md docs/AuthIdentityCompleteAccessTokenVerificationRequest.md @@ -141,7 +144,6 @@ docs/CloudRegionTierExpenses.md docs/CloudSvcMetrics.md docs/CloudSvcPerf.md docs/CloudTiersApi.md -docs/CloudUniversalRegion.md docs/CloudUploadsApi.md docs/CloudValidateGroupRequest.md docs/CloudValidateGroupResponse.md @@ -326,12 +328,15 @@ docs/PortalNotificationRegisterService.md docs/PortalNotificationUnregisterService.md docs/PortalNotificationsApi.md docs/PortalRegisterNotificationsRequest.md +docs/ProvisionServersApi.md +docs/ProvisionServersGetServerInfoResponse.md docs/UploadPrepareFile.md docs/UploadPresignedRequest.md docs/ValidationError.md docs/WatchResponse.md git_push.sh src/apis/admin_api.rs +src/apis/admin_cluster_api.rs src/apis/auth_identity_access_token_api.rs src/apis/auth_identity_email_api.rs src/apis/auth_tokens_api.rs @@ -369,9 +374,12 @@ src/apis/mod.rs src/apis/module_api.rs src/apis/portal_games_api.rs src/apis/portal_notifications_api.rs +src/apis/provision_servers_api.rs src/lib.rs +src/models/admin_cluster_get_server_ips_response.rs src/models/admin_login_request.rs src/models/admin_login_response.rs +src/models/admin_pool_type.rs src/models/auth_complete_status.rs src/models/auth_identity_complete_access_token_verification_request.rs src/models/auth_identity_complete_email_verification_request.rs @@ -488,7 +496,6 @@ src/models/cloud_region_tier.rs src/models/cloud_region_tier_expenses.rs src/models/cloud_svc_metrics.rs src/models/cloud_svc_perf.rs -src/models/cloud_universal_region.rs src/models/cloud_validate_group_request.rs src/models/cloud_validate_group_response.rs src/models/cloud_version_cdn_config.rs @@ -658,6 +665,7 @@ src/models/portal_notification_register_firebase_service.rs src/models/portal_notification_register_service.rs src/models/portal_notification_unregister_service.rs src/models/portal_register_notifications_request.rs +src/models/provision_servers_get_server_info_response.rs src/models/upload_prepare_file.rs src/models/upload_presigned_request.rs src/models/validation_error.rs diff --git a/sdks/full/rust-cli/README.md b/sdks/full/rust-cli/README.md index a00e327989..698e96a35c 100644 --- a/sdks/full/rust-cli/README.md +++ b/sdks/full/rust-cli/README.md @@ -26,6 +26,7 @@ All URIs are relative to *https://api.rivet.gg* Class | Method | HTTP request | Description ------------ | ------------- | ------------- | ------------- *AdminApi* | [**admin_login**](docs/AdminApi.md#admin_login) | **POST** /admin/login | +*AdminClusterApi* | [**admin_cluster_get_server_ips**](docs/AdminClusterApi.md#admin_cluster_get_server_ips) | **GET** /cluster/server_ips | *AuthIdentityAccessTokenApi* | [**auth_identity_access_token_complete_access_token_verification**](docs/AuthIdentityAccessTokenApi.md#auth_identity_access_token_complete_access_token_verification) | **POST** /auth/identity/access-token/complete-verification | *AuthIdentityEmailApi* | [**auth_identity_email_complete_email_verification**](docs/AuthIdentityEmailApi.md#auth_identity_email_complete_email_verification) | **POST** /auth/identity/email/complete-verification | *AuthIdentityEmailApi* | [**auth_identity_email_start_email_verification**](docs/AuthIdentityEmailApi.md#auth_identity_email_start_email_verification) | **POST** /auth/identity/email/start-verification | @@ -159,12 +160,15 @@ Class | Method | HTTP request | Description *PortalGamesApi* | [**portal_games_get_game_profile**](docs/PortalGamesApi.md#portal_games_get_game_profile) | **GET** /portal/games/{game_name_id}/profile | *PortalNotificationsApi* | [**portal_notifications_register_notifications**](docs/PortalNotificationsApi.md#portal_notifications_register_notifications) | **POST** /portal/notifications/register | *PortalNotificationsApi* | [**portal_notifications_unregister_notifications**](docs/PortalNotificationsApi.md#portal_notifications_unregister_notifications) | **DELETE** /portal/notifications/register | +*ProvisionServersApi* | [**provision_servers_get_server_info**](docs/ProvisionServersApi.md#provision_servers_get_server_info) | **GET** /servers/{ip}/info | ## Documentation For Models + - [AdminClusterGetServerIpsResponse](docs/AdminClusterGetServerIpsResponse.md) - [AdminLoginRequest](docs/AdminLoginRequest.md) - [AdminLoginResponse](docs/AdminLoginResponse.md) + - [AdminPoolType](docs/AdminPoolType.md) - [AuthCompleteStatus](docs/AuthCompleteStatus.md) - [AuthIdentityCompleteAccessTokenVerificationRequest](docs/AuthIdentityCompleteAccessTokenVerificationRequest.md) - [AuthIdentityCompleteEmailVerificationRequest](docs/AuthIdentityCompleteEmailVerificationRequest.md) @@ -281,7 +285,6 @@ Class | Method | HTTP request | Description - [CloudRegionTierExpenses](docs/CloudRegionTierExpenses.md) - [CloudSvcMetrics](docs/CloudSvcMetrics.md) - [CloudSvcPerf](docs/CloudSvcPerf.md) - - [CloudUniversalRegion](docs/CloudUniversalRegion.md) - [CloudValidateGroupRequest](docs/CloudValidateGroupRequest.md) - [CloudValidateGroupResponse](docs/CloudValidateGroupResponse.md) - [CloudVersionCdnConfig](docs/CloudVersionCdnConfig.md) @@ -450,6 +453,7 @@ Class | Method | HTTP request | Description - [PortalNotificationRegisterService](docs/PortalNotificationRegisterService.md) - [PortalNotificationUnregisterService](docs/PortalNotificationUnregisterService.md) - [PortalRegisterNotificationsRequest](docs/PortalRegisterNotificationsRequest.md) + - [ProvisionServersGetServerInfoResponse](docs/ProvisionServersGetServerInfoResponse.md) - [UploadPrepareFile](docs/UploadPrepareFile.md) - [UploadPresignedRequest](docs/UploadPresignedRequest.md) - [ValidationError](docs/ValidationError.md) diff --git a/sdks/full/rust-cli/docs/AdminClusterApi.md b/sdks/full/rust-cli/docs/AdminClusterApi.md new file mode 100644 index 0000000000..193d53e1f9 --- /dev/null +++ b/sdks/full/rust-cli/docs/AdminClusterApi.md @@ -0,0 +1,38 @@ +# \AdminClusterApi + +All URIs are relative to *https://api.rivet.gg* + +Method | HTTP request | Description +------------- | ------------- | ------------- +[**admin_cluster_get_server_ips**](AdminClusterApi.md#admin_cluster_get_server_ips) | **GET** /cluster/server_ips | + + + +## admin_cluster_get_server_ips + +> crate::models::AdminClusterGetServerIpsResponse admin_cluster_get_server_ips(server_id, pool) + + +### Parameters + + +Name | Type | Description | Required | Notes +------------- | ------------- | ------------- | ------------- | ------------- +**server_id** | Option<**uuid::Uuid**> | | | +**pool** | Option<[**AdminPoolType**](.md)> | | | + +### Return type + +[**crate::models::AdminClusterGetServerIpsResponse**](AdminClusterGetServerIpsResponse.md) + +### Authorization + +[BearerAuth](../README.md#BearerAuth) + +### HTTP request headers + +- **Content-Type**: Not defined +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + diff --git a/sdks/full/rust-cli/docs/AdminClusterGetServerIpsResponse.md b/sdks/full/rust-cli/docs/AdminClusterGetServerIpsResponse.md new file mode 100644 index 0000000000..9f56f9efbe --- /dev/null +++ b/sdks/full/rust-cli/docs/AdminClusterGetServerIpsResponse.md @@ -0,0 +1,11 @@ +# AdminClusterGetServerIpsResponse + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**ips** | **Vec** | | + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/full/rust-cli/docs/CloudUniversalRegion.md b/sdks/full/rust-cli/docs/AdminPoolType.md similarity index 92% rename from sdks/full/rust-cli/docs/CloudUniversalRegion.md rename to sdks/full/rust-cli/docs/AdminPoolType.md index f3a1ad9699..fcba04ce3d 100644 --- a/sdks/full/rust-cli/docs/CloudUniversalRegion.md +++ b/sdks/full/rust-cli/docs/AdminPoolType.md @@ -1,4 +1,4 @@ -# CloudUniversalRegion +# AdminPoolType ## Properties diff --git a/sdks/full/rust-cli/docs/CloudRegionSummary.md b/sdks/full/rust-cli/docs/CloudRegionSummary.md index 56a181b231..4f7555146c 100644 --- a/sdks/full/rust-cli/docs/CloudRegionSummary.md +++ b/sdks/full/rust-cli/docs/CloudRegionSummary.md @@ -9,7 +9,6 @@ Name | Type | Description | Notes **region_display_name** | **String** | Represent a resource's readable display name. | **region_id** | [**uuid::Uuid**](uuid::Uuid.md) | | **region_name_id** | **String** | A human readable short identifier used to references resources. Different than a `rivet.common#Uuid` because this is intended to be human readable. Different than `rivet.common#DisplayName` because this should not include special characters and be short. | -**universal_region** | [**crate::models::CloudUniversalRegion**](CloudUniversalRegion.md) | | [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/sdks/full/rust-cli/docs/CloudVersionMatchmakerGameModeRuntimeDockerPort.md b/sdks/full/rust-cli/docs/CloudVersionMatchmakerGameModeRuntimeDockerPort.md index 62cc673f41..31cfd0f65a 100644 --- a/sdks/full/rust-cli/docs/CloudVersionMatchmakerGameModeRuntimeDockerPort.md +++ b/sdks/full/rust-cli/docs/CloudVersionMatchmakerGameModeRuntimeDockerPort.md @@ -7,7 +7,7 @@ Name | Type | Description | Notes **dev_port** | Option<**i32**> | _Configures Rivet CLI behavior. Has no effect on server behavior._ | [optional] **dev_port_range** | Option<[**crate::models::CloudVersionMatchmakerPortRange**](CloudVersionMatchmakerPortRange.md)> | | [optional] **dev_protocol** | Option<[**crate::models::CloudVersionMatchmakerPortProtocol**](CloudVersionMatchmakerPortProtocol.md)> | | [optional] -**port** | Option<**i32**> | The port number to connect to. | [optional] +**port** | Option<**i32**> | The port number to connect to. ### Related - cloud.version.matchmaker.PortProtocol - cloud.version.matchmaker.ProxyKind | [optional] **port_range** | Option<[**crate::models::CloudVersionMatchmakerPortRange**](CloudVersionMatchmakerPortRange.md)> | | [optional] **protocol** | Option<[**crate::models::CloudVersionMatchmakerPortProtocol**](CloudVersionMatchmakerPortProtocol.md)> | | [optional] **proxy** | Option<[**crate::models::CloudVersionMatchmakerProxyKind**](CloudVersionMatchmakerProxyKind.md)> | | [optional] diff --git a/sdks/full/rust-cli/docs/ProvisionServersApi.md b/sdks/full/rust-cli/docs/ProvisionServersApi.md new file mode 100644 index 0000000000..e91b89bd46 --- /dev/null +++ b/sdks/full/rust-cli/docs/ProvisionServersApi.md @@ -0,0 +1,37 @@ +# \ProvisionServersApi + +All URIs are relative to *https://api.rivet.gg* + +Method | HTTP request | Description +------------- | ------------- | ------------- +[**provision_servers_get_server_info**](ProvisionServersApi.md#provision_servers_get_server_info) | **GET** /servers/{ip}/info | + + + +## provision_servers_get_server_info + +> crate::models::ProvisionServersGetServerInfoResponse provision_servers_get_server_info(ip) + + +### Parameters + + +Name | Type | Description | Required | Notes +------------- | ------------- | ------------- | ------------- | ------------- +**ip** | **String** | | [required] | + +### Return type + +[**crate::models::ProvisionServersGetServerInfoResponse**](ProvisionServersGetServerInfoResponse.md) + +### Authorization + +[BearerAuth](../README.md#BearerAuth) + +### HTTP request headers + +- **Content-Type**: Not defined +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + diff --git a/sdks/full/rust-cli/docs/ProvisionServersGetServerInfoResponse.md b/sdks/full/rust-cli/docs/ProvisionServersGetServerInfoResponse.md new file mode 100644 index 0000000000..ea5affe1e7 --- /dev/null +++ b/sdks/full/rust-cli/docs/ProvisionServersGetServerInfoResponse.md @@ -0,0 +1,15 @@ +# ProvisionServersGetServerInfoResponse + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**cluster_id** | [**uuid::Uuid**](uuid::Uuid.md) | | +**datacenter_id** | [**uuid::Uuid**](uuid::Uuid.md) | | +**name** | **String** | | +**server_id** | [**uuid::Uuid**](uuid::Uuid.md) | | +**vlan_ip** | **String** | | + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/full/rust-cli/src/apis/admin_cluster_api.rs b/sdks/full/rust-cli/src/apis/admin_cluster_api.rs new file mode 100644 index 0000000000..5d1d217a2b --- /dev/null +++ b/sdks/full/rust-cli/src/apis/admin_cluster_api.rs @@ -0,0 +1,67 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + + +use reqwest; + +use crate::apis::ResponseContent; +use super::{Error, configuration}; + + +/// struct for typed errors of method [`admin_cluster_get_server_ips`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum AdminClusterGetServerIpsError { + Status400(crate::models::ErrorBody), + Status403(crate::models::ErrorBody), + Status404(crate::models::ErrorBody), + Status408(crate::models::ErrorBody), + Status429(crate::models::ErrorBody), + Status500(crate::models::ErrorBody), + UnknownValue(serde_json::Value), +} + + +pub async fn admin_cluster_get_server_ips(configuration: &configuration::Configuration, server_id: Option<&str>, pool: Option) -> Result> { + let local_var_configuration = configuration; + + let local_var_client = &local_var_configuration.client; + + let local_var_uri_str = format!("{}/cluster/server_ips", local_var_configuration.base_path); + let mut local_var_req_builder = local_var_client.request(reqwest::Method::GET, local_var_uri_str.as_str()); + + if let Some(ref local_var_str) = server_id { + local_var_req_builder = local_var_req_builder.query(&[("server_id", &local_var_str.to_string())]); + } + if let Some(ref local_var_str) = pool { + local_var_req_builder = local_var_req_builder.query(&[("pool", &local_var_str.to_string())]); + } + if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { + local_var_req_builder = local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent.clone()); + } + if let Some(ref local_var_token) = local_var_configuration.bearer_access_token { + local_var_req_builder = local_var_req_builder.bearer_auth(local_var_token.to_owned()); + }; + + let local_var_req = local_var_req_builder.build()?; + let local_var_resp = local_var_client.execute(local_var_req).await?; + + let local_var_status = local_var_resp.status(); + let local_var_content = local_var_resp.text().await?; + + if !local_var_status.is_client_error() && !local_var_status.is_server_error() { + serde_json::from_str(&local_var_content).map_err(Error::from) + } else { + let local_var_entity: Option = serde_json::from_str(&local_var_content).ok(); + let local_var_error = ResponseContent { status: local_var_status, content: local_var_content, entity: local_var_entity }; + Err(Error::ResponseError(local_var_error)) + } +} + diff --git a/sdks/full/rust-cli/src/apis/mod.rs b/sdks/full/rust-cli/src/apis/mod.rs index b22b9678f0..c1427c12f5 100644 --- a/sdks/full/rust-cli/src/apis/mod.rs +++ b/sdks/full/rust-cli/src/apis/mod.rs @@ -91,6 +91,7 @@ pub fn parse_deep_object(prefix: &str, value: &serde_json::Value) -> Vec<(String } pub mod admin_api; +pub mod admin_cluster_api; pub mod auth_identity_access_token_api; pub mod auth_identity_email_api; pub mod auth_tokens_api; @@ -126,5 +127,6 @@ pub mod matchmaker_regions_api; pub mod module_api; pub mod portal_games_api; pub mod portal_notifications_api; +pub mod provision_servers_api; pub mod configuration; diff --git a/sdks/full/rust-cli/src/apis/provision_servers_api.rs b/sdks/full/rust-cli/src/apis/provision_servers_api.rs new file mode 100644 index 0000000000..69aab5a383 --- /dev/null +++ b/sdks/full/rust-cli/src/apis/provision_servers_api.rs @@ -0,0 +1,61 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + + +use reqwest; + +use crate::apis::ResponseContent; +use super::{Error, configuration}; + + +/// struct for typed errors of method [`provision_servers_get_server_info`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum ProvisionServersGetServerInfoError { + Status400(crate::models::ErrorBody), + Status403(crate::models::ErrorBody), + Status404(crate::models::ErrorBody), + Status408(crate::models::ErrorBody), + Status429(crate::models::ErrorBody), + Status500(crate::models::ErrorBody), + UnknownValue(serde_json::Value), +} + + +pub async fn provision_servers_get_server_info(configuration: &configuration::Configuration, ip: &str) -> Result> { + let local_var_configuration = configuration; + + let local_var_client = &local_var_configuration.client; + + let local_var_uri_str = format!("{}/servers/{ip}/info", local_var_configuration.base_path, ip=crate::apis::urlencode(ip)); + let mut local_var_req_builder = local_var_client.request(reqwest::Method::GET, local_var_uri_str.as_str()); + + if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { + local_var_req_builder = local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent.clone()); + } + if let Some(ref local_var_token) = local_var_configuration.bearer_access_token { + local_var_req_builder = local_var_req_builder.bearer_auth(local_var_token.to_owned()); + }; + + let local_var_req = local_var_req_builder.build()?; + let local_var_resp = local_var_client.execute(local_var_req).await?; + + let local_var_status = local_var_resp.status(); + let local_var_content = local_var_resp.text().await?; + + if !local_var_status.is_client_error() && !local_var_status.is_server_error() { + serde_json::from_str(&local_var_content).map_err(Error::from) + } else { + let local_var_entity: Option = serde_json::from_str(&local_var_content).ok(); + let local_var_error = ResponseContent { status: local_var_status, content: local_var_content, entity: local_var_entity }; + Err(Error::ResponseError(local_var_error)) + } +} + diff --git a/sdks/full/rust-cli/src/models/admin_cluster_get_server_ips_response.rs b/sdks/full/rust-cli/src/models/admin_cluster_get_server_ips_response.rs new file mode 100644 index 0000000000..539777c28f --- /dev/null +++ b/sdks/full/rust-cli/src/models/admin_cluster_get_server_ips_response.rs @@ -0,0 +1,28 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct AdminClusterGetServerIpsResponse { + #[serde(rename = "ips")] + pub ips: Vec, +} + +impl AdminClusterGetServerIpsResponse { + pub fn new(ips: Vec) -> AdminClusterGetServerIpsResponse { + AdminClusterGetServerIpsResponse { + ips, + } + } +} + + diff --git a/sdks/full/rust-cli/src/models/admin_pool_type.rs b/sdks/full/rust-cli/src/models/admin_pool_type.rs new file mode 100644 index 0000000000..0bb6980c1b --- /dev/null +++ b/sdks/full/rust-cli/src/models/admin_pool_type.rs @@ -0,0 +1,42 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + + +/// +#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] +pub enum AdminPoolType { + #[serde(rename = "job")] + Job, + #[serde(rename = "gg")] + Gg, + #[serde(rename = "ats")] + Ats, + +} + +impl ToString for AdminPoolType { + fn to_string(&self) -> String { + match self { + Self::Job => String::from("job"), + Self::Gg => String::from("gg"), + Self::Ats => String::from("ats"), + } + } +} + +impl Default for AdminPoolType { + fn default() -> AdminPoolType { + Self::Job + } +} + + + + diff --git a/sdks/full/rust-cli/src/models/cloud_region_summary.rs b/sdks/full/rust-cli/src/models/cloud_region_summary.rs index 397c9ceb8e..f75ea518aa 100644 --- a/sdks/full/rust-cli/src/models/cloud_region_summary.rs +++ b/sdks/full/rust-cli/src/models/cloud_region_summary.rs @@ -28,20 +28,17 @@ pub struct CloudRegionSummary { /// A human readable short identifier used to references resources. Different than a `rivet.common#Uuid` because this is intended to be human readable. Different than `rivet.common#DisplayName` because this should not include special characters and be short. #[serde(rename = "region_name_id")] pub region_name_id: String, - #[serde(rename = "universal_region")] - pub universal_region: crate::models::CloudUniversalRegion, } impl CloudRegionSummary { /// A region summary. - pub fn new(provider: String, provider_display_name: String, region_display_name: String, region_id: uuid::Uuid, region_name_id: String, universal_region: crate::models::CloudUniversalRegion) -> CloudRegionSummary { + pub fn new(provider: String, provider_display_name: String, region_display_name: String, region_id: uuid::Uuid, region_name_id: String) -> CloudRegionSummary { CloudRegionSummary { provider, provider_display_name, region_display_name, region_id, region_name_id, - universal_region, } } } diff --git a/sdks/full/rust-cli/src/models/cloud_universal_region.rs b/sdks/full/rust-cli/src/models/cloud_universal_region.rs deleted file mode 100644 index f709a6d0b6..0000000000 --- a/sdks/full/rust-cli/src/models/cloud_universal_region.rs +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Rivet API - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: 0.0.1 - * - * Generated by: https://openapi-generator.tech - */ - - -/// -#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] -pub enum CloudUniversalRegion { - #[serde(rename = "unknown")] - Unknown, - #[serde(rename = "local")] - Local, - #[serde(rename = "amsterdam")] - Amsterdam, - #[serde(rename = "atlanta")] - Atlanta, - #[serde(rename = "bangalore")] - Bangalore, - #[serde(rename = "dallas")] - Dallas, - #[serde(rename = "frankfurt")] - Frankfurt, - #[serde(rename = "london")] - London, - #[serde(rename = "mumbai")] - Mumbai, - #[serde(rename = "newark")] - Newark, - #[serde(rename = "new_york_city")] - NewYorkCity, - #[serde(rename = "san_francisco")] - SanFrancisco, - #[serde(rename = "singapore")] - Singapore, - #[serde(rename = "sydney")] - Sydney, - #[serde(rename = "tokyo")] - Tokyo, - #[serde(rename = "toronto")] - Toronto, - #[serde(rename = "washington_dc")] - WashingtonDc, - #[serde(rename = "chicago")] - Chicago, - #[serde(rename = "paris")] - Paris, - #[serde(rename = "seattle")] - Seattle, - #[serde(rename = "sao_paulo")] - SaoPaulo, - #[serde(rename = "stockholm")] - Stockholm, - #[serde(rename = "chennai")] - Chennai, - #[serde(rename = "osaka")] - Osaka, - #[serde(rename = "milan")] - Milan, - #[serde(rename = "miami")] - Miami, - #[serde(rename = "jakarta")] - Jakarta, - #[serde(rename = "los_angeles")] - LosAngeles, - -} - -impl ToString for CloudUniversalRegion { - fn to_string(&self) -> String { - match self { - Self::Unknown => String::from("unknown"), - Self::Local => String::from("local"), - Self::Amsterdam => String::from("amsterdam"), - Self::Atlanta => String::from("atlanta"), - Self::Bangalore => String::from("bangalore"), - Self::Dallas => String::from("dallas"), - Self::Frankfurt => String::from("frankfurt"), - Self::London => String::from("london"), - Self::Mumbai => String::from("mumbai"), - Self::Newark => String::from("newark"), - Self::NewYorkCity => String::from("new_york_city"), - Self::SanFrancisco => String::from("san_francisco"), - Self::Singapore => String::from("singapore"), - Self::Sydney => String::from("sydney"), - Self::Tokyo => String::from("tokyo"), - Self::Toronto => String::from("toronto"), - Self::WashingtonDc => String::from("washington_dc"), - Self::Chicago => String::from("chicago"), - Self::Paris => String::from("paris"), - Self::Seattle => String::from("seattle"), - Self::SaoPaulo => String::from("sao_paulo"), - Self::Stockholm => String::from("stockholm"), - Self::Chennai => String::from("chennai"), - Self::Osaka => String::from("osaka"), - Self::Milan => String::from("milan"), - Self::Miami => String::from("miami"), - Self::Jakarta => String::from("jakarta"), - Self::LosAngeles => String::from("los_angeles"), - } - } -} - -impl Default for CloudUniversalRegion { - fn default() -> CloudUniversalRegion { - Self::Unknown - } -} - - - - diff --git a/sdks/full/rust-cli/src/models/cloud_version_matchmaker_game_mode_runtime_docker_port.rs b/sdks/full/rust-cli/src/models/cloud_version_matchmaker_game_mode_runtime_docker_port.rs index 13ca39822a..6c62491236 100644 --- a/sdks/full/rust-cli/src/models/cloud_version_matchmaker_game_mode_runtime_docker_port.rs +++ b/sdks/full/rust-cli/src/models/cloud_version_matchmaker_game_mode_runtime_docker_port.rs @@ -8,7 +8,7 @@ * Generated by: https://openapi-generator.tech */ -/// CloudVersionMatchmakerGameModeRuntimeDockerPort : A docker port. +/// CloudVersionMatchmakerGameModeRuntimeDockerPort : Port config for a docker build. @@ -22,7 +22,7 @@ pub struct CloudVersionMatchmakerGameModeRuntimeDockerPort { pub dev_port_range: Option>, #[serde(rename = "dev_protocol", skip_serializing_if = "Option::is_none")] pub dev_protocol: Option, - /// The port number to connect to. + /// The port number to connect to. ### Related - cloud.version.matchmaker.PortProtocol - cloud.version.matchmaker.ProxyKind #[serde(rename = "port", skip_serializing_if = "Option::is_none")] pub port: Option, #[serde(rename = "port_range", skip_serializing_if = "Option::is_none")] @@ -34,7 +34,7 @@ pub struct CloudVersionMatchmakerGameModeRuntimeDockerPort { } impl CloudVersionMatchmakerGameModeRuntimeDockerPort { - /// A docker port. + /// Port config for a docker build. pub fn new() -> CloudVersionMatchmakerGameModeRuntimeDockerPort { CloudVersionMatchmakerGameModeRuntimeDockerPort { dev_port: None, diff --git a/sdks/full/rust-cli/src/models/cloud_version_matchmaker_port_protocol.rs b/sdks/full/rust-cli/src/models/cloud_version_matchmaker_port_protocol.rs index c711c3b87e..d5e7328356 100644 --- a/sdks/full/rust-cli/src/models/cloud_version_matchmaker_port_protocol.rs +++ b/sdks/full/rust-cli/src/models/cloud_version_matchmaker_port_protocol.rs @@ -8,9 +8,9 @@ * Generated by: https://openapi-generator.tech */ -/// CloudVersionMatchmakerPortProtocol : Type of network traffic to allow access to this port. Configuring `https` or `tcp_tls` will provide TLS termination for you via Game Guard. `https` and `tcp_tls` must have `proxy_kind` set to `game_guard`. +/// CloudVersionMatchmakerPortProtocol : Signifies the protocol of the port. Note that when proxying through GameGuard (via `ProxyKind`), the port number returned by `/find`, `/join`, and `/create` will not be the same as the port number configured in the config: - With HTTP, the port will always be 80. The hostname of the port correctly routes the incoming connection to the correct port being used by the game server. - With HTTPS, the port will always be 443. The hostname of the port correctly routes the incoming connection to the correct port being used by the game server. - Using TCP/UDP, the port will be a random number between 26000 and 31999. This gets automatically routed to the correct port being used by the game server. ### Related - cloud.version.matchmaker.GameModeRuntimeDockerPort - cloud.version.matchmaker.ProxyKind - /docs/dynamic-servers/concepts/game-guard - matchmaker.lobbies.find -/// Type of network traffic to allow access to this port. Configuring `https` or `tcp_tls` will provide TLS termination for you via Game Guard. `https` and `tcp_tls` must have `proxy_kind` set to `game_guard`. +/// Signifies the protocol of the port. Note that when proxying through GameGuard (via `ProxyKind`), the port number returned by `/find`, `/join`, and `/create` will not be the same as the port number configured in the config: - With HTTP, the port will always be 80. The hostname of the port correctly routes the incoming connection to the correct port being used by the game server. - With HTTPS, the port will always be 443. The hostname of the port correctly routes the incoming connection to the correct port being used by the game server. - Using TCP/UDP, the port will be a random number between 26000 and 31999. This gets automatically routed to the correct port being used by the game server. ### Related - cloud.version.matchmaker.GameModeRuntimeDockerPort - cloud.version.matchmaker.ProxyKind - /docs/dynamic-servers/concepts/game-guard - matchmaker.lobbies.find #[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] #[serde(deny_unknown_fields)] pub enum CloudVersionMatchmakerPortProtocol { diff --git a/sdks/full/rust-cli/src/models/cloud_version_matchmaker_port_range.rs b/sdks/full/rust-cli/src/models/cloud_version_matchmaker_port_range.rs index f2d6f86023..9d97a5c6f9 100644 --- a/sdks/full/rust-cli/src/models/cloud_version_matchmaker_port_range.rs +++ b/sdks/full/rust-cli/src/models/cloud_version_matchmaker_port_range.rs @@ -8,7 +8,7 @@ * Generated by: https://openapi-generator.tech */ -/// CloudVersionMatchmakerPortRange : Range of ports that can be connected to. If configured, `network_mode` must equal `host`. Port ranges may overlap between containers, it is the responsibility of the developer to ensure ports are available before using. Read more about host networking [here](https://rivet.gg/docs/dynamic-servers/concepts/host-bridge-networking). Only available on Rivet Open Source & Enterprise. +/// CloudVersionMatchmakerPortRange : Range of ports that can be connected to. Note that the port range values returned by /find ### Related - cloud.version.matchmaker.PortProtocol - cloud.version.matchmaker.ProxyKind @@ -24,7 +24,7 @@ pub struct CloudVersionMatchmakerPortRange { } impl CloudVersionMatchmakerPortRange { - /// Range of ports that can be connected to. If configured, `network_mode` must equal `host`. Port ranges may overlap between containers, it is the responsibility of the developer to ensure ports are available before using. Read more about host networking [here](https://rivet.gg/docs/dynamic-servers/concepts/host-bridge-networking). Only available on Rivet Open Source & Enterprise. + /// Range of ports that can be connected to. Note that the port range values returned by /find ### Related - cloud.version.matchmaker.PortProtocol - cloud.version.matchmaker.ProxyKind pub fn new(max: i32, min: i32) -> CloudVersionMatchmakerPortRange { CloudVersionMatchmakerPortRange { max, diff --git a/sdks/full/rust-cli/src/models/cloud_version_matchmaker_proxy_kind.rs b/sdks/full/rust-cli/src/models/cloud_version_matchmaker_proxy_kind.rs index 52fe3d467d..bd68087379 100644 --- a/sdks/full/rust-cli/src/models/cloud_version_matchmaker_proxy_kind.rs +++ b/sdks/full/rust-cli/src/models/cloud_version_matchmaker_proxy_kind.rs @@ -8,9 +8,9 @@ * Generated by: https://openapi-generator.tech */ -/// CloudVersionMatchmakerProxyKind : Range of ports that can be connected to. `game_guard` (default) proxies all traffic through [Game Guard](https://rivet.gg/docs/dynamic-servers/concepts/game-guard) to mitigate DDoS attacks and provide TLS termination. `none` sends traffic directly to the game server. If configured, `network_mode` must equal `host`. Read more about host networking [here](https://rivet.gg/docs/dynamic-servers/concepts/host-bridge-networking). Only available on Rivet Open Source & Enterprise. +/// CloudVersionMatchmakerProxyKind : Denotes what type of proxying to use for ports. Rivet GameGuard adds DoS and DDoS mitigation to incoming connections. ### Related - /docs/dynamic-servers/concepts/game-guard - cloud.version.matchmaker.PortProtocol -/// Range of ports that can be connected to. `game_guard` (default) proxies all traffic through [Game Guard](https://rivet.gg/docs/dynamic-servers/concepts/game-guard) to mitigate DDoS attacks and provide TLS termination. `none` sends traffic directly to the game server. If configured, `network_mode` must equal `host`. Read more about host networking [here](https://rivet.gg/docs/dynamic-servers/concepts/host-bridge-networking). Only available on Rivet Open Source & Enterprise. +/// Denotes what type of proxying to use for ports. Rivet GameGuard adds DoS and DDoS mitigation to incoming connections. ### Related - /docs/dynamic-servers/concepts/game-guard - cloud.version.matchmaker.PortProtocol #[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] #[serde(deny_unknown_fields)] pub enum CloudVersionMatchmakerProxyKind { diff --git a/sdks/full/rust-cli/src/models/mod.rs b/sdks/full/rust-cli/src/models/mod.rs index a1fbbba440..968c806f3e 100644 --- a/sdks/full/rust-cli/src/models/mod.rs +++ b/sdks/full/rust-cli/src/models/mod.rs @@ -1,7 +1,11 @@ +pub mod admin_cluster_get_server_ips_response; +pub use self::admin_cluster_get_server_ips_response::AdminClusterGetServerIpsResponse; pub mod admin_login_request; pub use self::admin_login_request::AdminLoginRequest; pub mod admin_login_response; pub use self::admin_login_response::AdminLoginResponse; +pub mod admin_pool_type; +pub use self::admin_pool_type::AdminPoolType; pub mod auth_complete_status; pub use self::auth_complete_status::AuthCompleteStatus; pub mod auth_identity_complete_access_token_verification_request; @@ -234,8 +238,6 @@ pub mod cloud_svc_metrics; pub use self::cloud_svc_metrics::CloudSvcMetrics; pub mod cloud_svc_perf; pub use self::cloud_svc_perf::CloudSvcPerf; -pub mod cloud_universal_region; -pub use self::cloud_universal_region::CloudUniversalRegion; pub mod cloud_validate_group_request; pub use self::cloud_validate_group_request::CloudValidateGroupRequest; pub mod cloud_validate_group_response; @@ -572,6 +574,8 @@ pub mod portal_notification_unregister_service; pub use self::portal_notification_unregister_service::PortalNotificationUnregisterService; pub mod portal_register_notifications_request; pub use self::portal_register_notifications_request::PortalRegisterNotificationsRequest; +pub mod provision_servers_get_server_info_response; +pub use self::provision_servers_get_server_info_response::ProvisionServersGetServerInfoResponse; pub mod upload_prepare_file; pub use self::upload_prepare_file::UploadPrepareFile; pub mod upload_presigned_request; diff --git a/sdks/full/rust-cli/src/models/provision_servers_get_server_info_response.rs b/sdks/full/rust-cli/src/models/provision_servers_get_server_info_response.rs new file mode 100644 index 0000000000..46f05efd50 --- /dev/null +++ b/sdks/full/rust-cli/src/models/provision_servers_get_server_info_response.rs @@ -0,0 +1,40 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct ProvisionServersGetServerInfoResponse { + #[serde(rename = "cluster_id")] + pub cluster_id: uuid::Uuid, + #[serde(rename = "datacenter_id")] + pub datacenter_id: uuid::Uuid, + #[serde(rename = "name")] + pub name: String, + #[serde(rename = "server_id")] + pub server_id: uuid::Uuid, + #[serde(rename = "vlan_ip")] + pub vlan_ip: String, +} + +impl ProvisionServersGetServerInfoResponse { + pub fn new(cluster_id: uuid::Uuid, datacenter_id: uuid::Uuid, name: String, server_id: uuid::Uuid, vlan_ip: String) -> ProvisionServersGetServerInfoResponse { + ProvisionServersGetServerInfoResponse { + cluster_id, + datacenter_id, + name, + server_id, + vlan_ip, + } + } +} + + diff --git a/sdks/full/rust/.openapi-generator/FILES b/sdks/full/rust/.openapi-generator/FILES index f95d096f8d..b20ce3bdf1 100644 --- a/sdks/full/rust/.openapi-generator/FILES +++ b/sdks/full/rust/.openapi-generator/FILES @@ -4,8 +4,11 @@ Cargo.toml README.md docs/AdminApi.md +docs/AdminClusterApi.md +docs/AdminClusterGetServerIpsResponse.md docs/AdminLoginRequest.md docs/AdminLoginResponse.md +docs/AdminPoolType.md docs/AuthCompleteStatus.md docs/AuthIdentityAccessTokenApi.md docs/AuthIdentityCompleteAccessTokenVerificationRequest.md @@ -141,7 +144,6 @@ docs/CloudRegionTierExpenses.md docs/CloudSvcMetrics.md docs/CloudSvcPerf.md docs/CloudTiersApi.md -docs/CloudUniversalRegion.md docs/CloudUploadsApi.md docs/CloudValidateGroupRequest.md docs/CloudValidateGroupResponse.md @@ -326,12 +328,15 @@ docs/PortalNotificationRegisterService.md docs/PortalNotificationUnregisterService.md docs/PortalNotificationsApi.md docs/PortalRegisterNotificationsRequest.md +docs/ProvisionServersApi.md +docs/ProvisionServersGetServerInfoResponse.md docs/UploadPrepareFile.md docs/UploadPresignedRequest.md docs/ValidationError.md docs/WatchResponse.md git_push.sh src/apis/admin_api.rs +src/apis/admin_cluster_api.rs src/apis/auth_identity_access_token_api.rs src/apis/auth_identity_email_api.rs src/apis/auth_tokens_api.rs @@ -369,9 +374,12 @@ src/apis/mod.rs src/apis/module_api.rs src/apis/portal_games_api.rs src/apis/portal_notifications_api.rs +src/apis/provision_servers_api.rs src/lib.rs +src/models/admin_cluster_get_server_ips_response.rs src/models/admin_login_request.rs src/models/admin_login_response.rs +src/models/admin_pool_type.rs src/models/auth_complete_status.rs src/models/auth_identity_complete_access_token_verification_request.rs src/models/auth_identity_complete_email_verification_request.rs @@ -488,7 +496,6 @@ src/models/cloud_region_tier.rs src/models/cloud_region_tier_expenses.rs src/models/cloud_svc_metrics.rs src/models/cloud_svc_perf.rs -src/models/cloud_universal_region.rs src/models/cloud_validate_group_request.rs src/models/cloud_validate_group_response.rs src/models/cloud_version_cdn_config.rs @@ -658,6 +665,7 @@ src/models/portal_notification_register_firebase_service.rs src/models/portal_notification_register_service.rs src/models/portal_notification_unregister_service.rs src/models/portal_register_notifications_request.rs +src/models/provision_servers_get_server_info_response.rs src/models/upload_prepare_file.rs src/models/upload_presigned_request.rs src/models/validation_error.rs diff --git a/sdks/full/rust/README.md b/sdks/full/rust/README.md index a00e327989..698e96a35c 100644 --- a/sdks/full/rust/README.md +++ b/sdks/full/rust/README.md @@ -26,6 +26,7 @@ All URIs are relative to *https://api.rivet.gg* Class | Method | HTTP request | Description ------------ | ------------- | ------------- | ------------- *AdminApi* | [**admin_login**](docs/AdminApi.md#admin_login) | **POST** /admin/login | +*AdminClusterApi* | [**admin_cluster_get_server_ips**](docs/AdminClusterApi.md#admin_cluster_get_server_ips) | **GET** /cluster/server_ips | *AuthIdentityAccessTokenApi* | [**auth_identity_access_token_complete_access_token_verification**](docs/AuthIdentityAccessTokenApi.md#auth_identity_access_token_complete_access_token_verification) | **POST** /auth/identity/access-token/complete-verification | *AuthIdentityEmailApi* | [**auth_identity_email_complete_email_verification**](docs/AuthIdentityEmailApi.md#auth_identity_email_complete_email_verification) | **POST** /auth/identity/email/complete-verification | *AuthIdentityEmailApi* | [**auth_identity_email_start_email_verification**](docs/AuthIdentityEmailApi.md#auth_identity_email_start_email_verification) | **POST** /auth/identity/email/start-verification | @@ -159,12 +160,15 @@ Class | Method | HTTP request | Description *PortalGamesApi* | [**portal_games_get_game_profile**](docs/PortalGamesApi.md#portal_games_get_game_profile) | **GET** /portal/games/{game_name_id}/profile | *PortalNotificationsApi* | [**portal_notifications_register_notifications**](docs/PortalNotificationsApi.md#portal_notifications_register_notifications) | **POST** /portal/notifications/register | *PortalNotificationsApi* | [**portal_notifications_unregister_notifications**](docs/PortalNotificationsApi.md#portal_notifications_unregister_notifications) | **DELETE** /portal/notifications/register | +*ProvisionServersApi* | [**provision_servers_get_server_info**](docs/ProvisionServersApi.md#provision_servers_get_server_info) | **GET** /servers/{ip}/info | ## Documentation For Models + - [AdminClusterGetServerIpsResponse](docs/AdminClusterGetServerIpsResponse.md) - [AdminLoginRequest](docs/AdminLoginRequest.md) - [AdminLoginResponse](docs/AdminLoginResponse.md) + - [AdminPoolType](docs/AdminPoolType.md) - [AuthCompleteStatus](docs/AuthCompleteStatus.md) - [AuthIdentityCompleteAccessTokenVerificationRequest](docs/AuthIdentityCompleteAccessTokenVerificationRequest.md) - [AuthIdentityCompleteEmailVerificationRequest](docs/AuthIdentityCompleteEmailVerificationRequest.md) @@ -281,7 +285,6 @@ Class | Method | HTTP request | Description - [CloudRegionTierExpenses](docs/CloudRegionTierExpenses.md) - [CloudSvcMetrics](docs/CloudSvcMetrics.md) - [CloudSvcPerf](docs/CloudSvcPerf.md) - - [CloudUniversalRegion](docs/CloudUniversalRegion.md) - [CloudValidateGroupRequest](docs/CloudValidateGroupRequest.md) - [CloudValidateGroupResponse](docs/CloudValidateGroupResponse.md) - [CloudVersionCdnConfig](docs/CloudVersionCdnConfig.md) @@ -450,6 +453,7 @@ Class | Method | HTTP request | Description - [PortalNotificationRegisterService](docs/PortalNotificationRegisterService.md) - [PortalNotificationUnregisterService](docs/PortalNotificationUnregisterService.md) - [PortalRegisterNotificationsRequest](docs/PortalRegisterNotificationsRequest.md) + - [ProvisionServersGetServerInfoResponse](docs/ProvisionServersGetServerInfoResponse.md) - [UploadPrepareFile](docs/UploadPrepareFile.md) - [UploadPresignedRequest](docs/UploadPresignedRequest.md) - [ValidationError](docs/ValidationError.md) diff --git a/sdks/full/rust/docs/AdminClusterApi.md b/sdks/full/rust/docs/AdminClusterApi.md new file mode 100644 index 0000000000..193d53e1f9 --- /dev/null +++ b/sdks/full/rust/docs/AdminClusterApi.md @@ -0,0 +1,38 @@ +# \AdminClusterApi + +All URIs are relative to *https://api.rivet.gg* + +Method | HTTP request | Description +------------- | ------------- | ------------- +[**admin_cluster_get_server_ips**](AdminClusterApi.md#admin_cluster_get_server_ips) | **GET** /cluster/server_ips | + + + +## admin_cluster_get_server_ips + +> crate::models::AdminClusterGetServerIpsResponse admin_cluster_get_server_ips(server_id, pool) + + +### Parameters + + +Name | Type | Description | Required | Notes +------------- | ------------- | ------------- | ------------- | ------------- +**server_id** | Option<**uuid::Uuid**> | | | +**pool** | Option<[**AdminPoolType**](.md)> | | | + +### Return type + +[**crate::models::AdminClusterGetServerIpsResponse**](AdminClusterGetServerIpsResponse.md) + +### Authorization + +[BearerAuth](../README.md#BearerAuth) + +### HTTP request headers + +- **Content-Type**: Not defined +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + diff --git a/sdks/full/rust/docs/AdminClusterGetServerIpsResponse.md b/sdks/full/rust/docs/AdminClusterGetServerIpsResponse.md new file mode 100644 index 0000000000..9f56f9efbe --- /dev/null +++ b/sdks/full/rust/docs/AdminClusterGetServerIpsResponse.md @@ -0,0 +1,11 @@ +# AdminClusterGetServerIpsResponse + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**ips** | **Vec** | | + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/full/rust/docs/CloudUniversalRegion.md b/sdks/full/rust/docs/AdminPoolType.md similarity index 92% rename from sdks/full/rust/docs/CloudUniversalRegion.md rename to sdks/full/rust/docs/AdminPoolType.md index f3a1ad9699..fcba04ce3d 100644 --- a/sdks/full/rust/docs/CloudUniversalRegion.md +++ b/sdks/full/rust/docs/AdminPoolType.md @@ -1,4 +1,4 @@ -# CloudUniversalRegion +# AdminPoolType ## Properties diff --git a/sdks/full/rust/docs/CloudRegionSummary.md b/sdks/full/rust/docs/CloudRegionSummary.md index 56a181b231..4f7555146c 100644 --- a/sdks/full/rust/docs/CloudRegionSummary.md +++ b/sdks/full/rust/docs/CloudRegionSummary.md @@ -9,7 +9,6 @@ Name | Type | Description | Notes **region_display_name** | **String** | Represent a resource's readable display name. | **region_id** | [**uuid::Uuid**](uuid::Uuid.md) | | **region_name_id** | **String** | A human readable short identifier used to references resources. Different than a `rivet.common#Uuid` because this is intended to be human readable. Different than `rivet.common#DisplayName` because this should not include special characters and be short. | -**universal_region** | [**crate::models::CloudUniversalRegion**](CloudUniversalRegion.md) | | [[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) diff --git a/sdks/full/rust/docs/CloudVersionMatchmakerGameModeRuntimeDockerPort.md b/sdks/full/rust/docs/CloudVersionMatchmakerGameModeRuntimeDockerPort.md index 62cc673f41..31cfd0f65a 100644 --- a/sdks/full/rust/docs/CloudVersionMatchmakerGameModeRuntimeDockerPort.md +++ b/sdks/full/rust/docs/CloudVersionMatchmakerGameModeRuntimeDockerPort.md @@ -7,7 +7,7 @@ Name | Type | Description | Notes **dev_port** | Option<**i32**> | _Configures Rivet CLI behavior. Has no effect on server behavior._ | [optional] **dev_port_range** | Option<[**crate::models::CloudVersionMatchmakerPortRange**](CloudVersionMatchmakerPortRange.md)> | | [optional] **dev_protocol** | Option<[**crate::models::CloudVersionMatchmakerPortProtocol**](CloudVersionMatchmakerPortProtocol.md)> | | [optional] -**port** | Option<**i32**> | The port number to connect to. | [optional] +**port** | Option<**i32**> | The port number to connect to. ### Related - cloud.version.matchmaker.PortProtocol - cloud.version.matchmaker.ProxyKind | [optional] **port_range** | Option<[**crate::models::CloudVersionMatchmakerPortRange**](CloudVersionMatchmakerPortRange.md)> | | [optional] **protocol** | Option<[**crate::models::CloudVersionMatchmakerPortProtocol**](CloudVersionMatchmakerPortProtocol.md)> | | [optional] **proxy** | Option<[**crate::models::CloudVersionMatchmakerProxyKind**](CloudVersionMatchmakerProxyKind.md)> | | [optional] diff --git a/sdks/full/rust/docs/ProvisionServersApi.md b/sdks/full/rust/docs/ProvisionServersApi.md new file mode 100644 index 0000000000..e91b89bd46 --- /dev/null +++ b/sdks/full/rust/docs/ProvisionServersApi.md @@ -0,0 +1,37 @@ +# \ProvisionServersApi + +All URIs are relative to *https://api.rivet.gg* + +Method | HTTP request | Description +------------- | ------------- | ------------- +[**provision_servers_get_server_info**](ProvisionServersApi.md#provision_servers_get_server_info) | **GET** /servers/{ip}/info | + + + +## provision_servers_get_server_info + +> crate::models::ProvisionServersGetServerInfoResponse provision_servers_get_server_info(ip) + + +### Parameters + + +Name | Type | Description | Required | Notes +------------- | ------------- | ------------- | ------------- | ------------- +**ip** | **String** | | [required] | + +### Return type + +[**crate::models::ProvisionServersGetServerInfoResponse**](ProvisionServersGetServerInfoResponse.md) + +### Authorization + +[BearerAuth](../README.md#BearerAuth) + +### HTTP request headers + +- **Content-Type**: Not defined +- **Accept**: application/json + +[[Back to top]](#) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to Model list]](../README.md#documentation-for-models) [[Back to README]](../README.md) + diff --git a/sdks/full/rust/docs/ProvisionServersGetServerInfoResponse.md b/sdks/full/rust/docs/ProvisionServersGetServerInfoResponse.md new file mode 100644 index 0000000000..ea5affe1e7 --- /dev/null +++ b/sdks/full/rust/docs/ProvisionServersGetServerInfoResponse.md @@ -0,0 +1,15 @@ +# ProvisionServersGetServerInfoResponse + +## Properties + +Name | Type | Description | Notes +------------ | ------------- | ------------- | ------------- +**cluster_id** | [**uuid::Uuid**](uuid::Uuid.md) | | +**datacenter_id** | [**uuid::Uuid**](uuid::Uuid.md) | | +**name** | **String** | | +**server_id** | [**uuid::Uuid**](uuid::Uuid.md) | | +**vlan_ip** | **String** | | + +[[Back to Model list]](../README.md#documentation-for-models) [[Back to API list]](../README.md#documentation-for-api-endpoints) [[Back to README]](../README.md) + + diff --git a/sdks/full/rust/src/apis/admin_cluster_api.rs b/sdks/full/rust/src/apis/admin_cluster_api.rs new file mode 100644 index 0000000000..5d1d217a2b --- /dev/null +++ b/sdks/full/rust/src/apis/admin_cluster_api.rs @@ -0,0 +1,67 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + + +use reqwest; + +use crate::apis::ResponseContent; +use super::{Error, configuration}; + + +/// struct for typed errors of method [`admin_cluster_get_server_ips`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum AdminClusterGetServerIpsError { + Status400(crate::models::ErrorBody), + Status403(crate::models::ErrorBody), + Status404(crate::models::ErrorBody), + Status408(crate::models::ErrorBody), + Status429(crate::models::ErrorBody), + Status500(crate::models::ErrorBody), + UnknownValue(serde_json::Value), +} + + +pub async fn admin_cluster_get_server_ips(configuration: &configuration::Configuration, server_id: Option<&str>, pool: Option) -> Result> { + let local_var_configuration = configuration; + + let local_var_client = &local_var_configuration.client; + + let local_var_uri_str = format!("{}/cluster/server_ips", local_var_configuration.base_path); + let mut local_var_req_builder = local_var_client.request(reqwest::Method::GET, local_var_uri_str.as_str()); + + if let Some(ref local_var_str) = server_id { + local_var_req_builder = local_var_req_builder.query(&[("server_id", &local_var_str.to_string())]); + } + if let Some(ref local_var_str) = pool { + local_var_req_builder = local_var_req_builder.query(&[("pool", &local_var_str.to_string())]); + } + if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { + local_var_req_builder = local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent.clone()); + } + if let Some(ref local_var_token) = local_var_configuration.bearer_access_token { + local_var_req_builder = local_var_req_builder.bearer_auth(local_var_token.to_owned()); + }; + + let local_var_req = local_var_req_builder.build()?; + let local_var_resp = local_var_client.execute(local_var_req).await?; + + let local_var_status = local_var_resp.status(); + let local_var_content = local_var_resp.text().await?; + + if !local_var_status.is_client_error() && !local_var_status.is_server_error() { + serde_json::from_str(&local_var_content).map_err(Error::from) + } else { + let local_var_entity: Option = serde_json::from_str(&local_var_content).ok(); + let local_var_error = ResponseContent { status: local_var_status, content: local_var_content, entity: local_var_entity }; + Err(Error::ResponseError(local_var_error)) + } +} + diff --git a/sdks/full/rust/src/apis/mod.rs b/sdks/full/rust/src/apis/mod.rs index b22b9678f0..c1427c12f5 100644 --- a/sdks/full/rust/src/apis/mod.rs +++ b/sdks/full/rust/src/apis/mod.rs @@ -91,6 +91,7 @@ pub fn parse_deep_object(prefix: &str, value: &serde_json::Value) -> Vec<(String } pub mod admin_api; +pub mod admin_cluster_api; pub mod auth_identity_access_token_api; pub mod auth_identity_email_api; pub mod auth_tokens_api; @@ -126,5 +127,6 @@ pub mod matchmaker_regions_api; pub mod module_api; pub mod portal_games_api; pub mod portal_notifications_api; +pub mod provision_servers_api; pub mod configuration; diff --git a/sdks/full/rust/src/apis/provision_servers_api.rs b/sdks/full/rust/src/apis/provision_servers_api.rs new file mode 100644 index 0000000000..69aab5a383 --- /dev/null +++ b/sdks/full/rust/src/apis/provision_servers_api.rs @@ -0,0 +1,61 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + + +use reqwest; + +use crate::apis::ResponseContent; +use super::{Error, configuration}; + + +/// struct for typed errors of method [`provision_servers_get_server_info`] +#[derive(Debug, Clone, Serialize, Deserialize)] +#[serde(untagged)] +pub enum ProvisionServersGetServerInfoError { + Status400(crate::models::ErrorBody), + Status403(crate::models::ErrorBody), + Status404(crate::models::ErrorBody), + Status408(crate::models::ErrorBody), + Status429(crate::models::ErrorBody), + Status500(crate::models::ErrorBody), + UnknownValue(serde_json::Value), +} + + +pub async fn provision_servers_get_server_info(configuration: &configuration::Configuration, ip: &str) -> Result> { + let local_var_configuration = configuration; + + let local_var_client = &local_var_configuration.client; + + let local_var_uri_str = format!("{}/servers/{ip}/info", local_var_configuration.base_path, ip=crate::apis::urlencode(ip)); + let mut local_var_req_builder = local_var_client.request(reqwest::Method::GET, local_var_uri_str.as_str()); + + if let Some(ref local_var_user_agent) = local_var_configuration.user_agent { + local_var_req_builder = local_var_req_builder.header(reqwest::header::USER_AGENT, local_var_user_agent.clone()); + } + if let Some(ref local_var_token) = local_var_configuration.bearer_access_token { + local_var_req_builder = local_var_req_builder.bearer_auth(local_var_token.to_owned()); + }; + + let local_var_req = local_var_req_builder.build()?; + let local_var_resp = local_var_client.execute(local_var_req).await?; + + let local_var_status = local_var_resp.status(); + let local_var_content = local_var_resp.text().await?; + + if !local_var_status.is_client_error() && !local_var_status.is_server_error() { + serde_json::from_str(&local_var_content).map_err(Error::from) + } else { + let local_var_entity: Option = serde_json::from_str(&local_var_content).ok(); + let local_var_error = ResponseContent { status: local_var_status, content: local_var_content, entity: local_var_entity }; + Err(Error::ResponseError(local_var_error)) + } +} + diff --git a/sdks/full/rust/src/models/admin_cluster_get_server_ips_response.rs b/sdks/full/rust/src/models/admin_cluster_get_server_ips_response.rs new file mode 100644 index 0000000000..539777c28f --- /dev/null +++ b/sdks/full/rust/src/models/admin_cluster_get_server_ips_response.rs @@ -0,0 +1,28 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct AdminClusterGetServerIpsResponse { + #[serde(rename = "ips")] + pub ips: Vec, +} + +impl AdminClusterGetServerIpsResponse { + pub fn new(ips: Vec) -> AdminClusterGetServerIpsResponse { + AdminClusterGetServerIpsResponse { + ips, + } + } +} + + diff --git a/sdks/full/rust/src/models/admin_pool_type.rs b/sdks/full/rust/src/models/admin_pool_type.rs new file mode 100644 index 0000000000..0bb6980c1b --- /dev/null +++ b/sdks/full/rust/src/models/admin_pool_type.rs @@ -0,0 +1,42 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + + +/// +#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] +pub enum AdminPoolType { + #[serde(rename = "job")] + Job, + #[serde(rename = "gg")] + Gg, + #[serde(rename = "ats")] + Ats, + +} + +impl ToString for AdminPoolType { + fn to_string(&self) -> String { + match self { + Self::Job => String::from("job"), + Self::Gg => String::from("gg"), + Self::Ats => String::from("ats"), + } + } +} + +impl Default for AdminPoolType { + fn default() -> AdminPoolType { + Self::Job + } +} + + + + diff --git a/sdks/full/rust/src/models/cloud_region_summary.rs b/sdks/full/rust/src/models/cloud_region_summary.rs index 397c9ceb8e..f75ea518aa 100644 --- a/sdks/full/rust/src/models/cloud_region_summary.rs +++ b/sdks/full/rust/src/models/cloud_region_summary.rs @@ -28,20 +28,17 @@ pub struct CloudRegionSummary { /// A human readable short identifier used to references resources. Different than a `rivet.common#Uuid` because this is intended to be human readable. Different than `rivet.common#DisplayName` because this should not include special characters and be short. #[serde(rename = "region_name_id")] pub region_name_id: String, - #[serde(rename = "universal_region")] - pub universal_region: crate::models::CloudUniversalRegion, } impl CloudRegionSummary { /// A region summary. - pub fn new(provider: String, provider_display_name: String, region_display_name: String, region_id: uuid::Uuid, region_name_id: String, universal_region: crate::models::CloudUniversalRegion) -> CloudRegionSummary { + pub fn new(provider: String, provider_display_name: String, region_display_name: String, region_id: uuid::Uuid, region_name_id: String) -> CloudRegionSummary { CloudRegionSummary { provider, provider_display_name, region_display_name, region_id, region_name_id, - universal_region, } } } diff --git a/sdks/full/rust/src/models/cloud_universal_region.rs b/sdks/full/rust/src/models/cloud_universal_region.rs deleted file mode 100644 index f709a6d0b6..0000000000 --- a/sdks/full/rust/src/models/cloud_universal_region.rs +++ /dev/null @@ -1,117 +0,0 @@ -/* - * Rivet API - * - * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) - * - * The version of the OpenAPI document: 0.0.1 - * - * Generated by: https://openapi-generator.tech - */ - - -/// -#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] -pub enum CloudUniversalRegion { - #[serde(rename = "unknown")] - Unknown, - #[serde(rename = "local")] - Local, - #[serde(rename = "amsterdam")] - Amsterdam, - #[serde(rename = "atlanta")] - Atlanta, - #[serde(rename = "bangalore")] - Bangalore, - #[serde(rename = "dallas")] - Dallas, - #[serde(rename = "frankfurt")] - Frankfurt, - #[serde(rename = "london")] - London, - #[serde(rename = "mumbai")] - Mumbai, - #[serde(rename = "newark")] - Newark, - #[serde(rename = "new_york_city")] - NewYorkCity, - #[serde(rename = "san_francisco")] - SanFrancisco, - #[serde(rename = "singapore")] - Singapore, - #[serde(rename = "sydney")] - Sydney, - #[serde(rename = "tokyo")] - Tokyo, - #[serde(rename = "toronto")] - Toronto, - #[serde(rename = "washington_dc")] - WashingtonDc, - #[serde(rename = "chicago")] - Chicago, - #[serde(rename = "paris")] - Paris, - #[serde(rename = "seattle")] - Seattle, - #[serde(rename = "sao_paulo")] - SaoPaulo, - #[serde(rename = "stockholm")] - Stockholm, - #[serde(rename = "chennai")] - Chennai, - #[serde(rename = "osaka")] - Osaka, - #[serde(rename = "milan")] - Milan, - #[serde(rename = "miami")] - Miami, - #[serde(rename = "jakarta")] - Jakarta, - #[serde(rename = "los_angeles")] - LosAngeles, - -} - -impl ToString for CloudUniversalRegion { - fn to_string(&self) -> String { - match self { - Self::Unknown => String::from("unknown"), - Self::Local => String::from("local"), - Self::Amsterdam => String::from("amsterdam"), - Self::Atlanta => String::from("atlanta"), - Self::Bangalore => String::from("bangalore"), - Self::Dallas => String::from("dallas"), - Self::Frankfurt => String::from("frankfurt"), - Self::London => String::from("london"), - Self::Mumbai => String::from("mumbai"), - Self::Newark => String::from("newark"), - Self::NewYorkCity => String::from("new_york_city"), - Self::SanFrancisco => String::from("san_francisco"), - Self::Singapore => String::from("singapore"), - Self::Sydney => String::from("sydney"), - Self::Tokyo => String::from("tokyo"), - Self::Toronto => String::from("toronto"), - Self::WashingtonDc => String::from("washington_dc"), - Self::Chicago => String::from("chicago"), - Self::Paris => String::from("paris"), - Self::Seattle => String::from("seattle"), - Self::SaoPaulo => String::from("sao_paulo"), - Self::Stockholm => String::from("stockholm"), - Self::Chennai => String::from("chennai"), - Self::Osaka => String::from("osaka"), - Self::Milan => String::from("milan"), - Self::Miami => String::from("miami"), - Self::Jakarta => String::from("jakarta"), - Self::LosAngeles => String::from("los_angeles"), - } - } -} - -impl Default for CloudUniversalRegion { - fn default() -> CloudUniversalRegion { - Self::Unknown - } -} - - - - diff --git a/sdks/full/rust/src/models/cloud_version_matchmaker_game_mode_runtime_docker_port.rs b/sdks/full/rust/src/models/cloud_version_matchmaker_game_mode_runtime_docker_port.rs index 6b40c04314..27660b0f01 100644 --- a/sdks/full/rust/src/models/cloud_version_matchmaker_game_mode_runtime_docker_port.rs +++ b/sdks/full/rust/src/models/cloud_version_matchmaker_game_mode_runtime_docker_port.rs @@ -8,7 +8,7 @@ * Generated by: https://openapi-generator.tech */ -/// CloudVersionMatchmakerGameModeRuntimeDockerPort : A docker port. +/// CloudVersionMatchmakerGameModeRuntimeDockerPort : Port config for a docker build. @@ -21,7 +21,7 @@ pub struct CloudVersionMatchmakerGameModeRuntimeDockerPort { pub dev_port_range: Option>, #[serde(rename = "dev_protocol", skip_serializing_if = "Option::is_none")] pub dev_protocol: Option, - /// The port number to connect to. + /// The port number to connect to. ### Related - cloud.version.matchmaker.PortProtocol - cloud.version.matchmaker.ProxyKind #[serde(rename = "port", skip_serializing_if = "Option::is_none")] pub port: Option, #[serde(rename = "port_range", skip_serializing_if = "Option::is_none")] @@ -33,7 +33,7 @@ pub struct CloudVersionMatchmakerGameModeRuntimeDockerPort { } impl CloudVersionMatchmakerGameModeRuntimeDockerPort { - /// A docker port. + /// Port config for a docker build. pub fn new() -> CloudVersionMatchmakerGameModeRuntimeDockerPort { CloudVersionMatchmakerGameModeRuntimeDockerPort { dev_port: None, diff --git a/sdks/full/rust/src/models/cloud_version_matchmaker_port_protocol.rs b/sdks/full/rust/src/models/cloud_version_matchmaker_port_protocol.rs index 6d539cb495..50d9809f63 100644 --- a/sdks/full/rust/src/models/cloud_version_matchmaker_port_protocol.rs +++ b/sdks/full/rust/src/models/cloud_version_matchmaker_port_protocol.rs @@ -8,9 +8,9 @@ * Generated by: https://openapi-generator.tech */ -/// CloudVersionMatchmakerPortProtocol : Type of network traffic to allow access to this port. Configuring `https` or `tcp_tls` will provide TLS termination for you via Game Guard. `https` and `tcp_tls` must have `proxy_kind` set to `game_guard`. +/// CloudVersionMatchmakerPortProtocol : Signifies the protocol of the port. Note that when proxying through GameGuard (via `ProxyKind`), the port number returned by `/find`, `/join`, and `/create` will not be the same as the port number configured in the config: - With HTTP, the port will always be 80. The hostname of the port correctly routes the incoming connection to the correct port being used by the game server. - With HTTPS, the port will always be 443. The hostname of the port correctly routes the incoming connection to the correct port being used by the game server. - Using TCP/UDP, the port will be a random number between 26000 and 31999. This gets automatically routed to the correct port being used by the game server. ### Related - cloud.version.matchmaker.GameModeRuntimeDockerPort - cloud.version.matchmaker.ProxyKind - /docs/dynamic-servers/concepts/game-guard - matchmaker.lobbies.find -/// Type of network traffic to allow access to this port. Configuring `https` or `tcp_tls` will provide TLS termination for you via Game Guard. `https` and `tcp_tls` must have `proxy_kind` set to `game_guard`. +/// Signifies the protocol of the port. Note that when proxying through GameGuard (via `ProxyKind`), the port number returned by `/find`, `/join`, and `/create` will not be the same as the port number configured in the config: - With HTTP, the port will always be 80. The hostname of the port correctly routes the incoming connection to the correct port being used by the game server. - With HTTPS, the port will always be 443. The hostname of the port correctly routes the incoming connection to the correct port being used by the game server. - Using TCP/UDP, the port will be a random number between 26000 and 31999. This gets automatically routed to the correct port being used by the game server. ### Related - cloud.version.matchmaker.GameModeRuntimeDockerPort - cloud.version.matchmaker.ProxyKind - /docs/dynamic-servers/concepts/game-guard - matchmaker.lobbies.find #[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] pub enum CloudVersionMatchmakerPortProtocol { #[serde(rename = "http")] diff --git a/sdks/full/rust/src/models/cloud_version_matchmaker_port_range.rs b/sdks/full/rust/src/models/cloud_version_matchmaker_port_range.rs index 0dc6966322..80f8fbd6f6 100644 --- a/sdks/full/rust/src/models/cloud_version_matchmaker_port_range.rs +++ b/sdks/full/rust/src/models/cloud_version_matchmaker_port_range.rs @@ -8,7 +8,7 @@ * Generated by: https://openapi-generator.tech */ -/// CloudVersionMatchmakerPortRange : Range of ports that can be connected to. If configured, `network_mode` must equal `host`. Port ranges may overlap between containers, it is the responsibility of the developer to ensure ports are available before using. Read more about host networking [here](https://rivet.gg/docs/dynamic-servers/concepts/host-bridge-networking). Only available on Rivet Open Source & Enterprise. +/// CloudVersionMatchmakerPortRange : Range of ports that can be connected to. Note that the port range values returned by /find ### Related - cloud.version.matchmaker.PortProtocol - cloud.version.matchmaker.ProxyKind @@ -23,7 +23,7 @@ pub struct CloudVersionMatchmakerPortRange { } impl CloudVersionMatchmakerPortRange { - /// Range of ports that can be connected to. If configured, `network_mode` must equal `host`. Port ranges may overlap between containers, it is the responsibility of the developer to ensure ports are available before using. Read more about host networking [here](https://rivet.gg/docs/dynamic-servers/concepts/host-bridge-networking). Only available on Rivet Open Source & Enterprise. + /// Range of ports that can be connected to. Note that the port range values returned by /find ### Related - cloud.version.matchmaker.PortProtocol - cloud.version.matchmaker.ProxyKind pub fn new(max: i32, min: i32) -> CloudVersionMatchmakerPortRange { CloudVersionMatchmakerPortRange { max, diff --git a/sdks/full/rust/src/models/cloud_version_matchmaker_proxy_kind.rs b/sdks/full/rust/src/models/cloud_version_matchmaker_proxy_kind.rs index d4c2f20eac..004a94ffe0 100644 --- a/sdks/full/rust/src/models/cloud_version_matchmaker_proxy_kind.rs +++ b/sdks/full/rust/src/models/cloud_version_matchmaker_proxy_kind.rs @@ -8,9 +8,9 @@ * Generated by: https://openapi-generator.tech */ -/// CloudVersionMatchmakerProxyKind : Range of ports that can be connected to. `game_guard` (default) proxies all traffic through [Game Guard](https://rivet.gg/docs/dynamic-servers/concepts/game-guard) to mitigate DDoS attacks and provide TLS termination. `none` sends traffic directly to the game server. If configured, `network_mode` must equal `host`. Read more about host networking [here](https://rivet.gg/docs/dynamic-servers/concepts/host-bridge-networking). Only available on Rivet Open Source & Enterprise. +/// CloudVersionMatchmakerProxyKind : Denotes what type of proxying to use for ports. Rivet GameGuard adds DoS and DDoS mitigation to incoming connections. ### Related - /docs/dynamic-servers/concepts/game-guard - cloud.version.matchmaker.PortProtocol -/// Range of ports that can be connected to. `game_guard` (default) proxies all traffic through [Game Guard](https://rivet.gg/docs/dynamic-servers/concepts/game-guard) to mitigate DDoS attacks and provide TLS termination. `none` sends traffic directly to the game server. If configured, `network_mode` must equal `host`. Read more about host networking [here](https://rivet.gg/docs/dynamic-servers/concepts/host-bridge-networking). Only available on Rivet Open Source & Enterprise. +/// Denotes what type of proxying to use for ports. Rivet GameGuard adds DoS and DDoS mitigation to incoming connections. ### Related - /docs/dynamic-servers/concepts/game-guard - cloud.version.matchmaker.PortProtocol #[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] pub enum CloudVersionMatchmakerProxyKind { #[serde(rename = "none")] diff --git a/sdks/full/rust/src/models/mod.rs b/sdks/full/rust/src/models/mod.rs index a1fbbba440..968c806f3e 100644 --- a/sdks/full/rust/src/models/mod.rs +++ b/sdks/full/rust/src/models/mod.rs @@ -1,7 +1,11 @@ +pub mod admin_cluster_get_server_ips_response; +pub use self::admin_cluster_get_server_ips_response::AdminClusterGetServerIpsResponse; pub mod admin_login_request; pub use self::admin_login_request::AdminLoginRequest; pub mod admin_login_response; pub use self::admin_login_response::AdminLoginResponse; +pub mod admin_pool_type; +pub use self::admin_pool_type::AdminPoolType; pub mod auth_complete_status; pub use self::auth_complete_status::AuthCompleteStatus; pub mod auth_identity_complete_access_token_verification_request; @@ -234,8 +238,6 @@ pub mod cloud_svc_metrics; pub use self::cloud_svc_metrics::CloudSvcMetrics; pub mod cloud_svc_perf; pub use self::cloud_svc_perf::CloudSvcPerf; -pub mod cloud_universal_region; -pub use self::cloud_universal_region::CloudUniversalRegion; pub mod cloud_validate_group_request; pub use self::cloud_validate_group_request::CloudValidateGroupRequest; pub mod cloud_validate_group_response; @@ -572,6 +574,8 @@ pub mod portal_notification_unregister_service; pub use self::portal_notification_unregister_service::PortalNotificationUnregisterService; pub mod portal_register_notifications_request; pub use self::portal_register_notifications_request::PortalRegisterNotificationsRequest; +pub mod provision_servers_get_server_info_response; +pub use self::provision_servers_get_server_info_response::ProvisionServersGetServerInfoResponse; pub mod upload_prepare_file; pub use self::upload_prepare_file::UploadPrepareFile; pub mod upload_presigned_request; diff --git a/sdks/full/rust/src/models/provision_servers_get_server_info_response.rs b/sdks/full/rust/src/models/provision_servers_get_server_info_response.rs new file mode 100644 index 0000000000..46f05efd50 --- /dev/null +++ b/sdks/full/rust/src/models/provision_servers_get_server_info_response.rs @@ -0,0 +1,40 @@ +/* + * Rivet API + * + * No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) + * + * The version of the OpenAPI document: 0.0.1 + * + * Generated by: https://openapi-generator.tech + */ + + + + +#[derive(Clone, Debug, PartialEq, Default, Serialize, Deserialize)] +pub struct ProvisionServersGetServerInfoResponse { + #[serde(rename = "cluster_id")] + pub cluster_id: uuid::Uuid, + #[serde(rename = "datacenter_id")] + pub datacenter_id: uuid::Uuid, + #[serde(rename = "name")] + pub name: String, + #[serde(rename = "server_id")] + pub server_id: uuid::Uuid, + #[serde(rename = "vlan_ip")] + pub vlan_ip: String, +} + +impl ProvisionServersGetServerInfoResponse { + pub fn new(cluster_id: uuid::Uuid, datacenter_id: uuid::Uuid, name: String, server_id: uuid::Uuid, vlan_ip: String) -> ProvisionServersGetServerInfoResponse { + ProvisionServersGetServerInfoResponse { + cluster_id, + datacenter_id, + name, + server_id, + vlan_ip, + } + } +} + + diff --git a/sdks/full/typescript/archive.tgz b/sdks/full/typescript/archive.tgz index f259e6543f..ff6d24c4ad 100644 --- a/sdks/full/typescript/archive.tgz +++ b/sdks/full/typescript/archive.tgz @@ -1,3 +1,3 @@ version https://git-lfs.github.com/spec/v1 -oid sha256:7af7ef55fa7de4e210995e5285ec9b0bda5cac7b36a4aa66ea89f93963ee223e -size 620901 +oid sha256:aa1ed530c1aaf93d16f537e48995340f17208e8db00a1c5dc796703900a4cd07 +size 621697 diff --git a/sdks/full/typescript/src/Client.ts b/sdks/full/typescript/src/Client.ts index 573a2d47bf..25e0f83e73 100644 --- a/sdks/full/typescript/src/Client.ts +++ b/sdks/full/typescript/src/Client.ts @@ -10,6 +10,7 @@ import { Group } from "./api/resources/group/client/Client"; import { Identity } from "./api/resources/identity/client/Client"; import { Kv } from "./api/resources/kv/client/Client"; import { Module } from "./api/resources/module/client/Client"; +import { Provision } from "./api/resources/provision/client/Client"; import { Auth } from "./api/resources/auth/client/Client"; import { Job } from "./api/resources/job/client/Client"; import { Matchmaker } from "./api/resources/matchmaker/client/Client"; @@ -67,6 +68,12 @@ export class RivetClient { return (this._module ??= new Module(this._options)); } + protected _provision: Provision | undefined; + + public get provision(): Provision { + return (this._provision ??= new Provision(this._options)); + } + protected _auth: Auth | undefined; public get auth(): Auth { diff --git a/sdks/full/typescript/src/api/resources/admin/client/Client.ts b/sdks/full/typescript/src/api/resources/admin/client/Client.ts index e06079e150..d165278709 100644 --- a/sdks/full/typescript/src/api/resources/admin/client/Client.ts +++ b/sdks/full/typescript/src/api/resources/admin/client/Client.ts @@ -8,6 +8,7 @@ import * as Rivet from "../../.."; import * as serializers from "../../../../serialization"; import urlJoin from "url-join"; import * as errors from "../../../../errors"; +import { Cluster } from "../resources/cluster/client/Client"; export declare namespace Admin { interface Options { @@ -146,6 +147,12 @@ export class Admin { } } + protected _cluster: Cluster | undefined; + + public get cluster(): Cluster { + return (this._cluster ??= new Cluster(this._options)); + } + protected async _getAuthorizationHeader() { const bearer = await core.Supplier.get(this._options.token); if (bearer != null) { diff --git a/sdks/full/typescript/src/api/resources/admin/index.ts b/sdks/full/typescript/src/api/resources/admin/index.ts index c9240f83b4..a931b36375 100644 --- a/sdks/full/typescript/src/api/resources/admin/index.ts +++ b/sdks/full/typescript/src/api/resources/admin/index.ts @@ -1,2 +1,3 @@ export * from "./types"; +export * from "./resources"; export * from "./client"; diff --git a/sdks/full/typescript/src/api/resources/admin/resources/cluster/client/Client.ts b/sdks/full/typescript/src/api/resources/admin/resources/cluster/client/Client.ts new file mode 100644 index 0000000000..2a14016646 --- /dev/null +++ b/sdks/full/typescript/src/api/resources/admin/resources/cluster/client/Client.ts @@ -0,0 +1,168 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as environments from "../../../../../../environments"; +import * as core from "../../../../../../core"; +import * as Rivet from "../../../../.."; +import urlJoin from "url-join"; +import * as serializers from "../../../../../../serialization"; +import * as errors from "../../../../../../errors"; + +export declare namespace Cluster { + interface Options { + environment?: core.Supplier; + token?: core.Supplier; + fetcher?: core.FetchFunction; + } + + interface RequestOptions { + timeoutInSeconds?: number; + maxRetries?: number; + } +} + +export class Cluster { + constructor(protected readonly _options: Cluster.Options = {}) {} + + /** + * @throws {@link Rivet.InternalError} + * @throws {@link Rivet.RateLimitError} + * @throws {@link Rivet.ForbiddenError} + * @throws {@link Rivet.UnauthorizedError} + * @throws {@link Rivet.NotFoundError} + * @throws {@link Rivet.BadRequestError} + */ + public async getServerIps( + request: Rivet.admin.cluster.GetServerIpsRequest = {}, + requestOptions?: Cluster.RequestOptions + ): Promise { + const { serverId, pool } = request; + const _queryParams: Record = {}; + if (serverId != null) { + _queryParams["server_id"] = serverId; + } + + if (pool != null) { + _queryParams["pool"] = pool; + } + + const _response = await (this._options.fetcher ?? core.fetcher)({ + url: urlJoin( + (await core.Supplier.get(this._options.environment)) ?? environments.RivetEnvironment.Production, + "/cluster/server_ips" + ), + method: "GET", + headers: { + Authorization: await this._getAuthorizationHeader(), + "X-Fern-Language": "JavaScript", + }, + contentType: "application/json", + queryParameters: _queryParams, + timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 180000, + maxRetries: requestOptions?.maxRetries, + }); + if (_response.ok) { + return await serializers.admin.cluster.GetServerIpsResponse.parseOrThrow(_response.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }); + } + + if (_response.error.reason === "status-code") { + switch (_response.error.statusCode) { + case 500: + throw new Rivet.InternalError( + await serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }) + ); + case 429: + throw new Rivet.RateLimitError( + await serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }) + ); + case 403: + throw new Rivet.ForbiddenError( + await serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }) + ); + case 408: + throw new Rivet.UnauthorizedError( + await serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }) + ); + case 404: + throw new Rivet.NotFoundError( + await serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }) + ); + case 400: + throw new Rivet.BadRequestError( + await serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }) + ); + default: + throw new errors.RivetError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } + } + + switch (_response.error.reason) { + case "non-json": + throw new errors.RivetError({ + statusCode: _response.error.statusCode, + body: _response.error.rawBody, + }); + case "timeout": + throw new errors.RivetTimeoutError(); + case "unknown": + throw new errors.RivetError({ + message: _response.error.errorMessage, + }); + } + } + + protected async _getAuthorizationHeader() { + const bearer = await core.Supplier.get(this._options.token); + if (bearer != null) { + return `Bearer ${bearer}`; + } + + return undefined; + } +} diff --git a/sdks/full/typescript/src/api/resources/admin/resources/cluster/client/index.ts b/sdks/full/typescript/src/api/resources/admin/resources/cluster/client/index.ts new file mode 100644 index 0000000000..415726b7fe --- /dev/null +++ b/sdks/full/typescript/src/api/resources/admin/resources/cluster/client/index.ts @@ -0,0 +1 @@ +export * from "./requests"; diff --git a/sdks/full/typescript/src/api/resources/admin/resources/cluster/client/requests/GetServerIpsRequest.ts b/sdks/full/typescript/src/api/resources/admin/resources/cluster/client/requests/GetServerIpsRequest.ts new file mode 100644 index 0000000000..9be545aa46 --- /dev/null +++ b/sdks/full/typescript/src/api/resources/admin/resources/cluster/client/requests/GetServerIpsRequest.ts @@ -0,0 +1,10 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../../../.."; + +export interface GetServerIpsRequest { + serverId?: string; + pool?: Rivet.admin.PoolType; +} diff --git a/sdks/full/typescript/src/api/resources/admin/resources/cluster/client/requests/index.ts b/sdks/full/typescript/src/api/resources/admin/resources/cluster/client/requests/index.ts new file mode 100644 index 0000000000..e849a980a0 --- /dev/null +++ b/sdks/full/typescript/src/api/resources/admin/resources/cluster/client/requests/index.ts @@ -0,0 +1 @@ +export { GetServerIpsRequest } from "./GetServerIpsRequest"; diff --git a/sdks/full/typescript/src/api/resources/admin/resources/cluster/index.ts b/sdks/full/typescript/src/api/resources/admin/resources/cluster/index.ts new file mode 100644 index 0000000000..c9240f83b4 --- /dev/null +++ b/sdks/full/typescript/src/api/resources/admin/resources/cluster/index.ts @@ -0,0 +1,2 @@ +export * from "./types"; +export * from "./client"; diff --git a/sdks/full/typescript/src/api/resources/admin/resources/common/index.ts b/sdks/full/typescript/src/api/resources/admin/resources/common/index.ts new file mode 100644 index 0000000000..eea524d655 --- /dev/null +++ b/sdks/full/typescript/src/api/resources/admin/resources/common/index.ts @@ -0,0 +1 @@ +export * from "./types"; diff --git a/sdks/full/typescript/src/api/resources/admin/resources/index.ts b/sdks/full/typescript/src/api/resources/admin/resources/index.ts new file mode 100644 index 0000000000..d5e4281b66 --- /dev/null +++ b/sdks/full/typescript/src/api/resources/admin/resources/index.ts @@ -0,0 +1,3 @@ +export * as cluster from "./cluster"; +export * as common from "./common"; +export * from "./common/types"; diff --git a/sdks/full/typescript/src/api/resources/index.ts b/sdks/full/typescript/src/api/resources/index.ts index 3715de8a32..fa48c0f4ac 100644 --- a/sdks/full/typescript/src/api/resources/index.ts +++ b/sdks/full/typescript/src/api/resources/index.ts @@ -4,6 +4,7 @@ export * as group from "./group"; export * as identity from "./identity"; export * as kv from "./kv"; export * as module_ from "./module"; +export * as provision from "./provision"; export * as auth from "./auth"; export * as captcha from "./captcha"; export * as common from "./common"; diff --git a/sdks/full/typescript/src/api/resources/provision/client/Client.ts b/sdks/full/typescript/src/api/resources/provision/client/Client.ts new file mode 100644 index 0000000000..2c10c4e170 --- /dev/null +++ b/sdks/full/typescript/src/api/resources/provision/client/Client.ts @@ -0,0 +1,30 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as environments from "../../../../environments"; +import * as core from "../../../../core"; +import { Servers } from "../resources/servers/client/Client"; + +export declare namespace Provision { + interface Options { + environment?: core.Supplier; + token?: core.Supplier; + fetcher?: core.FetchFunction; + } + + interface RequestOptions { + timeoutInSeconds?: number; + maxRetries?: number; + } +} + +export class Provision { + constructor(protected readonly _options: Provision.Options = {}) {} + + protected _servers: Servers | undefined; + + public get servers(): Servers { + return (this._servers ??= new Servers(this._options)); + } +} diff --git a/sdks/full/typescript/src/api/resources/provision/client/index.ts b/sdks/full/typescript/src/api/resources/provision/client/index.ts new file mode 100644 index 0000000000..cb0ff5c3b5 --- /dev/null +++ b/sdks/full/typescript/src/api/resources/provision/client/index.ts @@ -0,0 +1 @@ +export {}; diff --git a/sdks/full/typescript/src/api/resources/provision/index.ts b/sdks/full/typescript/src/api/resources/provision/index.ts new file mode 100644 index 0000000000..4ce0f39077 --- /dev/null +++ b/sdks/full/typescript/src/api/resources/provision/index.ts @@ -0,0 +1,2 @@ +export * from "./resources"; +export * from "./client"; diff --git a/sdks/full/typescript/src/api/resources/provision/resources/index.ts b/sdks/full/typescript/src/api/resources/provision/resources/index.ts new file mode 100644 index 0000000000..ab7c2d0930 --- /dev/null +++ b/sdks/full/typescript/src/api/resources/provision/resources/index.ts @@ -0,0 +1 @@ +export * as servers from "./servers"; diff --git a/sdks/full/typescript/src/api/resources/provision/resources/servers/client/Client.ts b/sdks/full/typescript/src/api/resources/provision/resources/servers/client/Client.ts new file mode 100644 index 0000000000..66dfcc6858 --- /dev/null +++ b/sdks/full/typescript/src/api/resources/provision/resources/servers/client/Client.ts @@ -0,0 +1,157 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as environments from "../../../../../../environments"; +import * as core from "../../../../../../core"; +import * as Rivet from "../../../../.."; +import urlJoin from "url-join"; +import * as serializers from "../../../../../../serialization"; +import * as errors from "../../../../../../errors"; + +export declare namespace Servers { + interface Options { + environment?: core.Supplier; + token?: core.Supplier; + fetcher?: core.FetchFunction; + } + + interface RequestOptions { + timeoutInSeconds?: number; + maxRetries?: number; + } +} + +export class Servers { + constructor(protected readonly _options: Servers.Options = {}) {} + + /** + * @throws {@link Rivet.InternalError} + * @throws {@link Rivet.RateLimitError} + * @throws {@link Rivet.ForbiddenError} + * @throws {@link Rivet.UnauthorizedError} + * @throws {@link Rivet.NotFoundError} + * @throws {@link Rivet.BadRequestError} + */ + public async getServerInfo( + ip: string, + requestOptions?: Servers.RequestOptions + ): Promise { + const _response = await (this._options.fetcher ?? core.fetcher)({ + url: urlJoin( + (await core.Supplier.get(this._options.environment)) ?? environments.RivetEnvironment.Production, + `/servers/${ip}/info` + ), + method: "GET", + headers: { + Authorization: await this._getAuthorizationHeader(), + "X-Fern-Language": "JavaScript", + }, + contentType: "application/json", + timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 180000, + maxRetries: requestOptions?.maxRetries, + }); + if (_response.ok) { + return await serializers.provision.servers.GetServerInfoResponse.parseOrThrow(_response.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }); + } + + if (_response.error.reason === "status-code") { + switch (_response.error.statusCode) { + case 500: + throw new Rivet.InternalError( + await serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }) + ); + case 429: + throw new Rivet.RateLimitError( + await serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }) + ); + case 403: + throw new Rivet.ForbiddenError( + await serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }) + ); + case 408: + throw new Rivet.UnauthorizedError( + await serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }) + ); + case 404: + throw new Rivet.NotFoundError( + await serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }) + ); + case 400: + throw new Rivet.BadRequestError( + await serializers.ErrorBody.parseOrThrow(_response.error.body, { + unrecognizedObjectKeys: "passthrough", + allowUnrecognizedUnionMembers: true, + allowUnrecognizedEnumValues: true, + skipValidation: true, + breadcrumbsPrefix: ["response"], + }) + ); + default: + throw new errors.RivetError({ + statusCode: _response.error.statusCode, + body: _response.error.body, + }); + } + } + + switch (_response.error.reason) { + case "non-json": + throw new errors.RivetError({ + statusCode: _response.error.statusCode, + body: _response.error.rawBody, + }); + case "timeout": + throw new errors.RivetTimeoutError(); + case "unknown": + throw new errors.RivetError({ + message: _response.error.errorMessage, + }); + } + } + + protected async _getAuthorizationHeader() { + const bearer = await core.Supplier.get(this._options.token); + if (bearer != null) { + return `Bearer ${bearer}`; + } + + return undefined; + } +} diff --git a/sdks/full/typescript/src/api/resources/provision/resources/servers/client/index.ts b/sdks/full/typescript/src/api/resources/provision/resources/servers/client/index.ts new file mode 100644 index 0000000000..cb0ff5c3b5 --- /dev/null +++ b/sdks/full/typescript/src/api/resources/provision/resources/servers/client/index.ts @@ -0,0 +1 @@ +export {}; diff --git a/sdks/full/typescript/src/api/resources/provision/resources/servers/index.ts b/sdks/full/typescript/src/api/resources/provision/resources/servers/index.ts new file mode 100644 index 0000000000..c9240f83b4 --- /dev/null +++ b/sdks/full/typescript/src/api/resources/provision/resources/servers/index.ts @@ -0,0 +1,2 @@ +export * from "./types"; +export * from "./client"; diff --git a/sdks/full/typescript/src/serialization/resources/admin/index.ts b/sdks/full/typescript/src/serialization/resources/admin/index.ts index eea524d655..3ce0a3e38e 100644 --- a/sdks/full/typescript/src/serialization/resources/admin/index.ts +++ b/sdks/full/typescript/src/serialization/resources/admin/index.ts @@ -1 +1,2 @@ export * from "./types"; +export * from "./resources"; diff --git a/sdks/full/typescript/src/serialization/resources/admin/resources/cluster/index.ts b/sdks/full/typescript/src/serialization/resources/admin/resources/cluster/index.ts new file mode 100644 index 0000000000..eea524d655 --- /dev/null +++ b/sdks/full/typescript/src/serialization/resources/admin/resources/cluster/index.ts @@ -0,0 +1 @@ +export * from "./types"; diff --git a/sdks/full/typescript/src/serialization/resources/admin/resources/common/index.ts b/sdks/full/typescript/src/serialization/resources/admin/resources/common/index.ts new file mode 100644 index 0000000000..eea524d655 --- /dev/null +++ b/sdks/full/typescript/src/serialization/resources/admin/resources/common/index.ts @@ -0,0 +1 @@ +export * from "./types"; diff --git a/sdks/full/typescript/src/serialization/resources/admin/resources/index.ts b/sdks/full/typescript/src/serialization/resources/admin/resources/index.ts new file mode 100644 index 0000000000..d5e4281b66 --- /dev/null +++ b/sdks/full/typescript/src/serialization/resources/admin/resources/index.ts @@ -0,0 +1,3 @@ +export * as cluster from "./cluster"; +export * as common from "./common"; +export * from "./common/types"; diff --git a/sdks/full/typescript/src/serialization/resources/index.ts b/sdks/full/typescript/src/serialization/resources/index.ts index fb4ec320f6..8b81230cbb 100644 --- a/sdks/full/typescript/src/serialization/resources/index.ts +++ b/sdks/full/typescript/src/serialization/resources/index.ts @@ -4,6 +4,7 @@ export * as group from "./group"; export * as identity from "./identity"; export * as kv from "./kv"; export * as module_ from "./module"; +export * as provision from "./provision"; export * as auth from "./auth"; export * as captcha from "./captcha"; export * as common from "./common"; diff --git a/sdks/full/typescript/src/serialization/resources/provision/index.ts b/sdks/full/typescript/src/serialization/resources/provision/index.ts new file mode 100644 index 0000000000..3e5335fe42 --- /dev/null +++ b/sdks/full/typescript/src/serialization/resources/provision/index.ts @@ -0,0 +1 @@ +export * from "./resources"; diff --git a/sdks/full/typescript/src/serialization/resources/provision/resources/index.ts b/sdks/full/typescript/src/serialization/resources/provision/resources/index.ts new file mode 100644 index 0000000000..ab7c2d0930 --- /dev/null +++ b/sdks/full/typescript/src/serialization/resources/provision/resources/index.ts @@ -0,0 +1 @@ +export * as servers from "./servers"; diff --git a/sdks/full/typescript/src/serialization/resources/provision/resources/servers/index.ts b/sdks/full/typescript/src/serialization/resources/provision/resources/servers/index.ts new file mode 100644 index 0000000000..eea524d655 --- /dev/null +++ b/sdks/full/typescript/src/serialization/resources/provision/resources/servers/index.ts @@ -0,0 +1 @@ +export * from "./types"; diff --git a/sdks/runtime/go/matchmaker/lobbies/client.go b/sdks/runtime/go/matchmaker/lobbies/client.go index a67a635bbe..b06528d216 100644 --- a/sdks/runtime/go/matchmaker/lobbies/client.go +++ b/sdks/runtime/go/matchmaker/lobbies/client.go @@ -36,7 +36,6 @@ func NewClient(opts ...core.ClientOption) *Client { } // Marks the current lobby as ready to accept connections. Players will not be able to connect to this lobby until the lobby is flagged as ready. -// This endpoint requires a [lobby token](/docs/general/concepts/token-types#matchmaker-lobby) for authentication, or a [development namespace token](/docs/general/concepts/token-types#namespace-development) for mock responses. When running on Rivet servers, you can access the given lobby token from the [`RIVET_TOKEN`](/docs/matchmaker/concepts/lobby-env) environment variable. func (c *Client) Ready(ctx context.Context) error { baseURL := "https://api.rivet.gg" if c.baseURL != "" { @@ -116,11 +115,6 @@ func (c *Client) Ready(ctx context.Context) error { // join using the /join endpoint (this can be disabled by the developer by rejecting all new connections // after setting the lobby to closed). // Does not shutdown the lobby. -// -// This endpoint requires a [lobby token](/docs/general/concepts/token-types#matchmaker-lobby) for -// authentication, or a [development namespace token](/docs/general/concepts/token-types#namespace-development) -// for mock responses. When running on Rivet servers, you can access the given lobby token from the -// [`RIVET_TOKEN`](/docs/matchmaker/concepts/lobby-env) environment variable. func (c *Client) SetClosed(ctx context.Context, request *matchmaker.SetLobbyClosedRequest) error { baseURL := "https://api.rivet.gg" if c.baseURL != "" { @@ -197,12 +191,6 @@ func (c *Client) SetClosed(ctx context.Context, request *matchmaker.SetLobbyClos return nil } -// Sets the state JSON of the current lobby. -// -// This endpoint requires a [lobby token](/docs/general/concepts/token-types#matchmaker-lobby) for -// authentication, or a [development namespace token](/docs/general/concepts/token-types#namespace-development) -// for mock responses. When running on Rivet servers, you can access the given lobby token from the -// [`RIVET_TOKEN`](/docs/matchmaker/concepts/lobby-env) environment variable. func (c *Client) SetState(ctx context.Context, request interface{}) error { baseURL := "https://api.rivet.gg" if c.baseURL != "" { @@ -279,12 +267,6 @@ func (c *Client) SetState(ctx context.Context, request interface{}) error { return nil } -// Get the state of any lobby. -// -// This endpoint requires a [lobby token](/docs/general/concepts/token-types#matchmaker-lobby) for -// authentication, or a [development namespace token](/docs/general/concepts/token-types#namespace-development) -// for mock responses. When running on Rivet servers, you can access the given lobby token from the -// [`RIVET_TOKEN`](/docs/matchmaker/concepts/lobby-env) environment variable. func (c *Client) GetState(ctx context.Context, lobbyId uuid.UUID) (interface{}, error) { baseURL := "https://api.rivet.gg" if c.baseURL != "" { @@ -366,12 +348,6 @@ func (c *Client) GetState(ctx context.Context, lobbyId uuid.UUID) (interface{}, // Finds a lobby based on the given criteria. // If a lobby is not found and `prevent_auto_create_lobby` is `false`, // a new lobby will be created. -// -// When [tokenless authentication](/docs/general/concepts/tokenless-authentication/web) is enabled in -// your game namespace, this endpoint does not require a token to authenticate. Otherwise, a -// [development namespace token](/docs/general/concepts/token-types#namespace-development) can be used -// for mock responses and a [public namespace token](/docs/general/concepts/token-types#namespace-public) -// can be used for general authentication. func (c *Client) Find(ctx context.Context, request *matchmaker.FindLobbyRequest) (*matchmaker.FindLobbyResponse, error) { baseURL := "https://api.rivet.gg" if c.baseURL != "" { @@ -458,12 +434,6 @@ func (c *Client) Find(ctx context.Context, request *matchmaker.FindLobbyRequest) // Joins a specific lobby. // This request will use the direct player count configured for the // lobby group. -// -// When [tokenless authentication](/docs/general/concepts/tokenless-authentication/web) is enabled in -// your game namespace, this endpoint does not require a token to authenticate. Otherwise, a -// [development namespace token](/docs/general/concepts/token-types#namespace-development) can be used -// for mock responses and a [public namespace token](/docs/general/concepts/token-types#namespace-public) -// can be used for general authentication. func (c *Client) Join(ctx context.Context, request *matchmaker.JoinLobbyRequest) (*matchmaker.JoinLobbyResponse, error) { baseURL := "https://api.rivet.gg" if c.baseURL != "" { @@ -543,12 +513,6 @@ func (c *Client) Join(ctx context.Context, request *matchmaker.JoinLobbyRequest) } // Creates a custom lobby. -// -// When [tokenless authentication](/docs/general/concepts/tokenless-authentication/web) is enabled in -// your game namespace, this endpoint does not require a token to authenticate. Otherwise, a -// [development namespace token](/docs/general/concepts/token-types#namespace-development) can be used -// for mock responses and a [public namespace token](/docs/general/concepts/token-types#namespace-public) -// can be used for general authentication. func (c *Client) Create(ctx context.Context, request *matchmaker.CreateLobbyRequest) (*matchmaker.CreateLobbyResponse, error) { baseURL := "https://api.rivet.gg" if c.baseURL != "" { @@ -628,12 +592,6 @@ func (c *Client) Create(ctx context.Context, request *matchmaker.CreateLobbyRequ } // Lists all open lobbies. -// -// When [tokenless authentication](/docs/general/concepts/tokenless-authentication/web) is enabled in -// your game namespace, this endpoint does not require a token to authenticate. Otherwise, a -// [development namespace token](/docs/general/concepts/token-types#namespace-development) can be used -// for mock responses and a [public namespace token](/docs/general/concepts/token-types#namespace-public) -// can be used for general authentication. func (c *Client) List(ctx context.Context, request *matchmaker.ListLobbiesRequest) (*matchmaker.ListLobbiesResponse, error) { baseURL := "https://api.rivet.gg" if c.baseURL != "" { diff --git a/sdks/runtime/go/matchmaker/types.go b/sdks/runtime/go/matchmaker/types.go index b172126aeb..3fac4ac025 100644 --- a/sdks/runtime/go/matchmaker/types.go +++ b/sdks/runtime/go/matchmaker/types.go @@ -112,10 +112,10 @@ type JoinPort struct { Host *string `json:"host,omitempty"` Hostname string `json:"hostname"` // The port number for this lobby. Will be null if using a port range. - Port *int `json:"port,omitempty"` - PortRange *JoinPortRange `json:"port_range,omitempty"` + Port *int `json:"port,omitempty"` // Whether or not this lobby port uses TLS. You cannot mix a non-TLS and TLS ports. - IsTls bool `json:"is_tls"` + PortRange *JoinPortRange `json:"port_range,omitempty"` + IsTls bool `json:"is_tls"` _rawJSON json.RawMessage } diff --git a/sdks/runtime/openapi/openapi.yml b/sdks/runtime/openapi/openapi.yml index b81c9b33c5..8b4e09855f 100644 --- a/sdks/runtime/openapi/openapi.yml +++ b/sdks/runtime/openapi/openapi.yml @@ -425,15 +425,6 @@ paths: description: >- Marks the current lobby as ready to accept connections. Players will not be able to connect to this lobby until the lobby is flagged as ready. - - This endpoint requires a [lobby - token](/docs/general/concepts/token-types#matchmaker-lobby) for - authentication, or a [development namespace - token](/docs/general/concepts/token-types#namespace-development) for - mock responses. When running on Rivet servers, you can access the given - lobby token from the - [`RIVET_TOKEN`](/docs/matchmaker/concepts/lobby-env) environment - variable. operationId: matchmaker_lobbies_ready tags: - MatchmakerLobbies @@ -490,19 +481,6 @@ paths: after setting the lobby to closed). Does not shutdown the lobby. - - - This endpoint requires a [lobby - token](/docs/general/concepts/token-types#matchmaker-lobby) for - - authentication, or a [development namespace - token](/docs/general/concepts/token-types#namespace-development) - - for mock responses. When running on Rivet servers, you can access the - given lobby token from the - - [`RIVET_TOKEN`](/docs/matchmaker/concepts/lobby-env) environment - variable. operationId: matchmaker_lobbies_setClosed tags: - MatchmakerLobbies @@ -560,21 +538,6 @@ paths: - is_closed /matchmaker/lobbies/state: put: - description: >- - Sets the state JSON of the current lobby. - - - This endpoint requires a [lobby - token](/docs/general/concepts/token-types#matchmaker-lobby) for - - authentication, or a [development namespace - token](/docs/general/concepts/token-types#namespace-development) - - for mock responses. When running on Rivet servers, you can access the - given lobby token from the - - [`RIVET_TOKEN`](/docs/matchmaker/concepts/lobby-env) environment - variable. operationId: matchmaker_lobbies_setState tags: - MatchmakerLobbies @@ -626,21 +589,6 @@ paths: schema: {} /matchmaker/lobbies/{lobby_id}/state: get: - description: >- - Get the state of any lobby. - - - This endpoint requires a [lobby - token](/docs/general/concepts/token-types#matchmaker-lobby) for - - authentication, or a [development namespace - token](/docs/general/concepts/token-types#namespace-development) - - for mock responses. When running on Rivet servers, you can access the - given lobby token from the - - [`RIVET_TOKEN`](/docs/matchmaker/concepts/lobby-env) environment - variable. operationId: matchmaker_lobbies_getState tags: - MatchmakerLobbies @@ -696,29 +644,10 @@ paths: security: *ref_0 /matchmaker/lobbies/find: post: - description: >- + description: |- Finds a lobby based on the given criteria. - If a lobby is not found and `prevent_auto_create_lobby` is `false`, - a new lobby will be created. - - - When [tokenless - authentication](/docs/general/concepts/tokenless-authentication/web) is - enabled in - - your game namespace, this endpoint does not require a token to - authenticate. Otherwise, a - - [development namespace - token](/docs/general/concepts/token-types#namespace-development) can be - used - - for mock responses and a [public namespace - token](/docs/general/concepts/token-types#namespace-public) - - can be used for general authentication. operationId: matchmaker_lobbies_find tags: - MatchmakerLobbies @@ -802,29 +731,10 @@ paths: - game_modes /matchmaker/lobbies/join: post: - description: >- + description: |- Joins a specific lobby. - This request will use the direct player count configured for the - lobby group. - - - When [tokenless - authentication](/docs/general/concepts/tokenless-authentication/web) is - enabled in - - your game namespace, this endpoint does not require a token to - authenticate. Otherwise, a - - [development namespace - token](/docs/general/concepts/token-types#namespace-development) can be - used - - for mock responses and a [public namespace - token](/docs/general/concepts/token-types#namespace-public) - - can be used for general authentication. operationId: matchmaker_lobbies_join tags: - MatchmakerLobbies @@ -889,25 +799,7 @@ paths: - lobby_id /matchmaker/lobbies/create: post: - description: >- - Creates a custom lobby. - - - When [tokenless - authentication](/docs/general/concepts/tokenless-authentication/web) is - enabled in - - your game namespace, this endpoint does not require a token to - authenticate. Otherwise, a - - [development namespace - token](/docs/general/concepts/token-types#namespace-development) can be - used - - for mock responses and a [public namespace - token](/docs/general/concepts/token-types#namespace-public) - - can be used for general authentication. + description: Creates a custom lobby. operationId: matchmaker_lobbies_create tags: - MatchmakerLobbies @@ -983,25 +875,7 @@ paths: - game_mode /matchmaker/lobbies/list: get: - description: >- - Lists all open lobbies. - - - When [tokenless - authentication](/docs/general/concepts/tokenless-authentication/web) is - enabled in - - your game namespace, this endpoint does not require a token to - authenticate. Otherwise, a - - [development namespace - token](/docs/general/concepts/token-types#namespace-development) can be - used - - for mock responses and a [public namespace - token](/docs/general/concepts/token-types#namespace-public) - - can be used for general authentication. + description: Lists all open lobbies. operationId: matchmaker_lobbies_list tags: - MatchmakerLobbies @@ -1654,11 +1528,11 @@ components: description: The port number for this lobby. Will be null if using a port range. port_range: $ref: '#/components/schemas/MatchmakerJoinPortRange' - is_tls: - type: boolean description: >- Whether or not this lobby port uses TLS. You cannot mix a non-TLS and TLS ports. + is_tls: + type: boolean required: - hostname - is_tls diff --git a/sdks/runtime/openapi_compat/openapi.yml b/sdks/runtime/openapi_compat/openapi.yml index b3c953aa49..97e238f325 100644 --- a/sdks/runtime/openapi_compat/openapi.yml +++ b/sdks/runtime/openapi_compat/openapi.yml @@ -307,8 +307,6 @@ components: hostname: type: string is_tls: - description: Whether or not this lobby port uses TLS. You cannot mix a non-TLS - and TLS ports. type: boolean port: description: The port number for this lobby. Will be null if using a port @@ -316,6 +314,8 @@ components: type: integer port_range: $ref: '#/components/schemas/MatchmakerJoinPortRange' + description: Whether or not this lobby port uses TLS. You cannot mix a non-TLS + and TLS ports. required: - hostname - is_tls @@ -875,18 +875,7 @@ paths: after setting the lobby to closed). - Does not shutdown the lobby. - - - This endpoint requires a [lobby token](/docs/general/concepts/token-types#matchmaker-lobby) - for - - authentication, or a [development namespace token](/docs/general/concepts/token-types#namespace-development) - - for mock responses. When running on Rivet servers, you can access the given - lobby token from the - - [`RIVET_TOKEN`](/docs/matchmaker/concepts/lobby-env) environment variable.' + Does not shutdown the lobby.' operationId: matchmaker_lobbies_setClosed parameters: [] requestBody: @@ -944,21 +933,7 @@ paths: - MatchmakerLobbies /matchmaker/lobbies/create: post: - description: 'Creates a custom lobby. - - - When [tokenless authentication](/docs/general/concepts/tokenless-authentication/web) - is enabled in - - your game namespace, this endpoint does not require a token to authenticate. - Otherwise, a - - [development namespace token](/docs/general/concepts/token-types#namespace-development) - can be used - - for mock responses and a [public namespace token](/docs/general/concepts/token-types#namespace-public) - - can be used for general authentication.' + description: Creates a custom lobby. operationId: matchmaker_lobbies_create parameters: [] requestBody: @@ -1038,21 +1013,7 @@ paths: If a lobby is not found and `prevent_auto_create_lobby` is `false`, - a new lobby will be created. - - - When [tokenless authentication](/docs/general/concepts/tokenless-authentication/web) - is enabled in - - your game namespace, this endpoint does not require a token to authenticate. - Otherwise, a - - [development namespace token](/docs/general/concepts/token-types#namespace-development) - can be used - - for mock responses and a [public namespace token](/docs/general/concepts/token-types#namespace-public) - - can be used for general authentication.' + a new lobby will be created.' operationId: matchmaker_lobbies_find parameters: - in: header @@ -1140,21 +1101,7 @@ paths: This request will use the direct player count configured for the - lobby group. - - - When [tokenless authentication](/docs/general/concepts/tokenless-authentication/web) - is enabled in - - your game namespace, this endpoint does not require a token to authenticate. - Otherwise, a - - [development namespace token](/docs/general/concepts/token-types#namespace-development) - can be used - - for mock responses and a [public namespace token](/docs/general/concepts/token-types#namespace-public) - - can be used for general authentication.' + lobby group.' operationId: matchmaker_lobbies_join parameters: [] requestBody: @@ -1219,21 +1166,7 @@ paths: - MatchmakerLobbies /matchmaker/lobbies/list: get: - description: 'Lists all open lobbies. - - - When [tokenless authentication](/docs/general/concepts/tokenless-authentication/web) - is enabled in - - your game namespace, this endpoint does not require a token to authenticate. - Otherwise, a - - [development namespace token](/docs/general/concepts/token-types#namespace-development) - can be used - - for mock responses and a [public namespace token](/docs/general/concepts/token-types#namespace-public) - - can be used for general authentication.' + description: Lists all open lobbies. operationId: matchmaker_lobbies_list parameters: - in: query @@ -1289,14 +1222,8 @@ paths: - MatchmakerLobbies /matchmaker/lobbies/ready: post: - description: 'Marks the current lobby as ready to accept connections. Players + description: Marks the current lobby as ready to accept connections. Players will not be able to connect to this lobby until the lobby is flagged as ready. - - This endpoint requires a [lobby token](/docs/general/concepts/token-types#matchmaker-lobby) - for authentication, or a [development namespace token](/docs/general/concepts/token-types#namespace-development) - for mock responses. When running on Rivet servers, you can access the given - lobby token from the [`RIVET_TOKEN`](/docs/matchmaker/concepts/lobby-env) - environment variable.' operationId: matchmaker_lobbies_ready parameters: [] responses: @@ -1343,18 +1270,6 @@ paths: - MatchmakerLobbies /matchmaker/lobbies/state: put: - description: 'Sets the state JSON of the current lobby. - - - This endpoint requires a [lobby token](/docs/general/concepts/token-types#matchmaker-lobby) - for - - authentication, or a [development namespace token](/docs/general/concepts/token-types#namespace-development) - - for mock responses. When running on Rivet servers, you can access the given - lobby token from the - - [`RIVET_TOKEN`](/docs/matchmaker/concepts/lobby-env) environment variable.' operationId: matchmaker_lobbies_setState parameters: [] requestBody: @@ -1406,18 +1321,6 @@ paths: - MatchmakerLobbies /matchmaker/lobbies/{lobby_id}/state: get: - description: 'Get the state of any lobby. - - - This endpoint requires a [lobby token](/docs/general/concepts/token-types#matchmaker-lobby) - for - - authentication, or a [development namespace token](/docs/general/concepts/token-types#namespace-development) - - for mock responses. When running on Rivet servers, you can access the given - lobby token from the - - [`RIVET_TOKEN`](/docs/matchmaker/concepts/lobby-env) environment variable.' operationId: matchmaker_lobbies_getState parameters: - in: path diff --git a/sdks/runtime/rust/docs/MatchmakerJoinPort.md b/sdks/runtime/rust/docs/MatchmakerJoinPort.md index d32c534f6b..48dcaeb9f9 100644 --- a/sdks/runtime/rust/docs/MatchmakerJoinPort.md +++ b/sdks/runtime/rust/docs/MatchmakerJoinPort.md @@ -6,7 +6,7 @@ Name | Type | Description | Notes ------------ | ------------- | ------------- | ------------- **host** | Option<**String**> | The host for the given port. Will be null if using a port range. | [optional] **hostname** | **String** | | -**is_tls** | **bool** | Whether or not this lobby port uses TLS. You cannot mix a non-TLS and TLS ports. | +**is_tls** | **bool** | | **port** | Option<**i32**> | The port number for this lobby. Will be null if using a port range. | [optional] **port_range** | Option<[**crate::models::MatchmakerJoinPortRange**](MatchmakerJoinPortRange.md)> | | [optional] diff --git a/sdks/runtime/rust/docs/MatchmakerLobbiesApi.md b/sdks/runtime/rust/docs/MatchmakerLobbiesApi.md index cbecd8d12f..985aac05ba 100644 --- a/sdks/runtime/rust/docs/MatchmakerLobbiesApi.md +++ b/sdks/runtime/rust/docs/MatchmakerLobbiesApi.md @@ -20,7 +20,7 @@ Method | HTTP request | Description > crate::models::MatchmakerCreateLobbyResponse matchmaker_lobbies_create(matchmaker_lobbies_create_request) -Creates a custom lobby. When [tokenless authentication](/docs/general/concepts/tokenless-authentication/web) is enabled in your game namespace, this endpoint does not require a token to authenticate. Otherwise, a [development namespace token](/docs/general/concepts/token-types#namespace-development) can be used for mock responses and a [public namespace token](/docs/general/concepts/token-types#namespace-public) can be used for general authentication. +Creates a custom lobby. ### Parameters @@ -50,7 +50,7 @@ Name | Type | Description | Required | Notes > crate::models::MatchmakerFindLobbyResponse matchmaker_lobbies_find(matchmaker_lobbies_find_request, origin) -Finds a lobby based on the given criteria. If a lobby is not found and `prevent_auto_create_lobby` is `false`, a new lobby will be created. When [tokenless authentication](/docs/general/concepts/tokenless-authentication/web) is enabled in your game namespace, this endpoint does not require a token to authenticate. Otherwise, a [development namespace token](/docs/general/concepts/token-types#namespace-development) can be used for mock responses and a [public namespace token](/docs/general/concepts/token-types#namespace-public) can be used for general authentication. +Finds a lobby based on the given criteria. If a lobby is not found and `prevent_auto_create_lobby` is `false`, a new lobby will be created. ### Parameters @@ -81,8 +81,6 @@ Name | Type | Description | Required | Notes > serde_json::Value matchmaker_lobbies_get_state(lobby_id) -Get the state of any lobby. This endpoint requires a [lobby token](/docs/general/concepts/token-types#matchmaker-lobby) for authentication, or a [development namespace token](/docs/general/concepts/token-types#namespace-development) for mock responses. When running on Rivet servers, you can access the given lobby token from the [`RIVET_TOKEN`](/docs/matchmaker/concepts/lobby-env) environment variable. - ### Parameters @@ -111,7 +109,7 @@ Name | Type | Description | Required | Notes > crate::models::MatchmakerJoinLobbyResponse matchmaker_lobbies_join(matchmaker_lobbies_join_request) -Joins a specific lobby. This request will use the direct player count configured for the lobby group. When [tokenless authentication](/docs/general/concepts/tokenless-authentication/web) is enabled in your game namespace, this endpoint does not require a token to authenticate. Otherwise, a [development namespace token](/docs/general/concepts/token-types#namespace-development) can be used for mock responses and a [public namespace token](/docs/general/concepts/token-types#namespace-public) can be used for general authentication. +Joins a specific lobby. This request will use the direct player count configured for the lobby group. ### Parameters @@ -141,7 +139,7 @@ Name | Type | Description | Required | Notes > crate::models::MatchmakerListLobbiesResponse matchmaker_lobbies_list(include_state) -Lists all open lobbies. When [tokenless authentication](/docs/general/concepts/tokenless-authentication/web) is enabled in your game namespace, this endpoint does not require a token to authenticate. Otherwise, a [development namespace token](/docs/general/concepts/token-types#namespace-development) can be used for mock responses and a [public namespace token](/docs/general/concepts/token-types#namespace-public) can be used for general authentication. +Lists all open lobbies. ### Parameters @@ -171,7 +169,7 @@ Name | Type | Description | Required | Notes > matchmaker_lobbies_ready() -Marks the current lobby as ready to accept connections. Players will not be able to connect to this lobby until the lobby is flagged as ready. This endpoint requires a [lobby token](/docs/general/concepts/token-types#matchmaker-lobby) for authentication, or a [development namespace token](/docs/general/concepts/token-types#namespace-development) for mock responses. When running on Rivet servers, you can access the given lobby token from the [`RIVET_TOKEN`](/docs/matchmaker/concepts/lobby-env) environment variable. +Marks the current lobby as ready to accept connections. Players will not be able to connect to this lobby until the lobby is flagged as ready. ### Parameters @@ -198,7 +196,7 @@ This endpoint does not need any parameter. > matchmaker_lobbies_set_closed(matchmaker_lobbies_set_closed_request) -If `is_closed` is `true`, the matchmaker will no longer route players to the lobby. Players can still join using the /join endpoint (this can be disabled by the developer by rejecting all new connections after setting the lobby to closed). Does not shutdown the lobby. This endpoint requires a [lobby token](/docs/general/concepts/token-types#matchmaker-lobby) for authentication, or a [development namespace token](/docs/general/concepts/token-types#namespace-development) for mock responses. When running on Rivet servers, you can access the given lobby token from the [`RIVET_TOKEN`](/docs/matchmaker/concepts/lobby-env) environment variable. +If `is_closed` is `true`, the matchmaker will no longer route players to the lobby. Players can still join using the /join endpoint (this can be disabled by the developer by rejecting all new connections after setting the lobby to closed). Does not shutdown the lobby. ### Parameters @@ -228,8 +226,6 @@ Name | Type | Description | Required | Notes > matchmaker_lobbies_set_state(body) -Sets the state JSON of the current lobby. This endpoint requires a [lobby token](/docs/general/concepts/token-types#matchmaker-lobby) for authentication, or a [development namespace token](/docs/general/concepts/token-types#namespace-development) for mock responses. When running on Rivet servers, you can access the given lobby token from the [`RIVET_TOKEN`](/docs/matchmaker/concepts/lobby-env) environment variable. - ### Parameters diff --git a/sdks/runtime/rust/src/apis/matchmaker_lobbies_api.rs b/sdks/runtime/rust/src/apis/matchmaker_lobbies_api.rs index 50c2f0f0c6..8b67ff6563 100644 --- a/sdks/runtime/rust/src/apis/matchmaker_lobbies_api.rs +++ b/sdks/runtime/rust/src/apis/matchmaker_lobbies_api.rs @@ -120,7 +120,7 @@ pub enum MatchmakerLobbiesSetStateError { } -/// Creates a custom lobby. When [tokenless authentication](/docs/general/concepts/tokenless-authentication/web) is enabled in your game namespace, this endpoint does not require a token to authenticate. Otherwise, a [development namespace token](/docs/general/concepts/token-types#namespace-development) can be used for mock responses and a [public namespace token](/docs/general/concepts/token-types#namespace-public) can be used for general authentication. +/// Creates a custom lobby. pub async fn matchmaker_lobbies_create(configuration: &configuration::Configuration, matchmaker_lobbies_create_request: crate::models::MatchmakerLobbiesCreateRequest) -> Result> { let local_var_configuration = configuration; @@ -152,7 +152,7 @@ pub async fn matchmaker_lobbies_create(configuration: &configuration::Configurat } } -/// Finds a lobby based on the given criteria. If a lobby is not found and `prevent_auto_create_lobby` is `false`, a new lobby will be created. When [tokenless authentication](/docs/general/concepts/tokenless-authentication/web) is enabled in your game namespace, this endpoint does not require a token to authenticate. Otherwise, a [development namespace token](/docs/general/concepts/token-types#namespace-development) can be used for mock responses and a [public namespace token](/docs/general/concepts/token-types#namespace-public) can be used for general authentication. +/// Finds a lobby based on the given criteria. If a lobby is not found and `prevent_auto_create_lobby` is `false`, a new lobby will be created. pub async fn matchmaker_lobbies_find(configuration: &configuration::Configuration, matchmaker_lobbies_find_request: crate::models::MatchmakerLobbiesFindRequest, origin: Option<&str>) -> Result> { let local_var_configuration = configuration; @@ -187,7 +187,6 @@ pub async fn matchmaker_lobbies_find(configuration: &configuration::Configuratio } } -/// Get the state of any lobby. This endpoint requires a [lobby token](/docs/general/concepts/token-types#matchmaker-lobby) for authentication, or a [development namespace token](/docs/general/concepts/token-types#namespace-development) for mock responses. When running on Rivet servers, you can access the given lobby token from the [`RIVET_TOKEN`](/docs/matchmaker/concepts/lobby-env) environment variable. pub async fn matchmaker_lobbies_get_state(configuration: &configuration::Configuration, lobby_id: &str) -> Result> { let local_var_configuration = configuration; @@ -218,7 +217,7 @@ pub async fn matchmaker_lobbies_get_state(configuration: &configuration::Configu } } -/// Joins a specific lobby. This request will use the direct player count configured for the lobby group. When [tokenless authentication](/docs/general/concepts/tokenless-authentication/web) is enabled in your game namespace, this endpoint does not require a token to authenticate. Otherwise, a [development namespace token](/docs/general/concepts/token-types#namespace-development) can be used for mock responses and a [public namespace token](/docs/general/concepts/token-types#namespace-public) can be used for general authentication. +/// Joins a specific lobby. This request will use the direct player count configured for the lobby group. pub async fn matchmaker_lobbies_join(configuration: &configuration::Configuration, matchmaker_lobbies_join_request: crate::models::MatchmakerLobbiesJoinRequest) -> Result> { let local_var_configuration = configuration; @@ -250,7 +249,7 @@ pub async fn matchmaker_lobbies_join(configuration: &configuration::Configuratio } } -/// Lists all open lobbies. When [tokenless authentication](/docs/general/concepts/tokenless-authentication/web) is enabled in your game namespace, this endpoint does not require a token to authenticate. Otherwise, a [development namespace token](/docs/general/concepts/token-types#namespace-development) can be used for mock responses and a [public namespace token](/docs/general/concepts/token-types#namespace-public) can be used for general authentication. +/// Lists all open lobbies. pub async fn matchmaker_lobbies_list(configuration: &configuration::Configuration, include_state: Option) -> Result> { let local_var_configuration = configuration; @@ -284,7 +283,7 @@ pub async fn matchmaker_lobbies_list(configuration: &configuration::Configuratio } } -/// Marks the current lobby as ready to accept connections. Players will not be able to connect to this lobby until the lobby is flagged as ready. This endpoint requires a [lobby token](/docs/general/concepts/token-types#matchmaker-lobby) for authentication, or a [development namespace token](/docs/general/concepts/token-types#namespace-development) for mock responses. When running on Rivet servers, you can access the given lobby token from the [`RIVET_TOKEN`](/docs/matchmaker/concepts/lobby-env) environment variable. +/// Marks the current lobby as ready to accept connections. Players will not be able to connect to this lobby until the lobby is flagged as ready. pub async fn matchmaker_lobbies_ready(configuration: &configuration::Configuration, ) -> Result<(), Error> { let local_var_configuration = configuration; @@ -315,7 +314,7 @@ pub async fn matchmaker_lobbies_ready(configuration: &configuration::Configurati } } -/// If `is_closed` is `true`, the matchmaker will no longer route players to the lobby. Players can still join using the /join endpoint (this can be disabled by the developer by rejecting all new connections after setting the lobby to closed). Does not shutdown the lobby. This endpoint requires a [lobby token](/docs/general/concepts/token-types#matchmaker-lobby) for authentication, or a [development namespace token](/docs/general/concepts/token-types#namespace-development) for mock responses. When running on Rivet servers, you can access the given lobby token from the [`RIVET_TOKEN`](/docs/matchmaker/concepts/lobby-env) environment variable. +/// If `is_closed` is `true`, the matchmaker will no longer route players to the lobby. Players can still join using the /join endpoint (this can be disabled by the developer by rejecting all new connections after setting the lobby to closed). Does not shutdown the lobby. pub async fn matchmaker_lobbies_set_closed(configuration: &configuration::Configuration, matchmaker_lobbies_set_closed_request: crate::models::MatchmakerLobbiesSetClosedRequest) -> Result<(), Error> { let local_var_configuration = configuration; @@ -347,7 +346,6 @@ pub async fn matchmaker_lobbies_set_closed(configuration: &configuration::Config } } -/// Sets the state JSON of the current lobby. This endpoint requires a [lobby token](/docs/general/concepts/token-types#matchmaker-lobby) for authentication, or a [development namespace token](/docs/general/concepts/token-types#namespace-development) for mock responses. When running on Rivet servers, you can access the given lobby token from the [`RIVET_TOKEN`](/docs/matchmaker/concepts/lobby-env) environment variable. pub async fn matchmaker_lobbies_set_state(configuration: &configuration::Configuration, body: Option) -> Result<(), Error> { let local_var_configuration = configuration; diff --git a/sdks/runtime/rust/src/models/matchmaker_join_port.rs b/sdks/runtime/rust/src/models/matchmaker_join_port.rs index 86e07f170a..900b3980e7 100644 --- a/sdks/runtime/rust/src/models/matchmaker_join_port.rs +++ b/sdks/runtime/rust/src/models/matchmaker_join_port.rs @@ -18,7 +18,6 @@ pub struct MatchmakerJoinPort { pub host: Option, #[serde(rename = "hostname")] pub hostname: String, - /// Whether or not this lobby port uses TLS. You cannot mix a non-TLS and TLS ports. #[serde(rename = "is_tls")] pub is_tls: bool, /// The port number for this lobby. Will be null if using a port range. diff --git a/sdks/runtime/typescript/.gitignore b/sdks/runtime/typescript/.gitignore deleted file mode 100644 index 169a00539f..0000000000 --- a/sdks/runtime/typescript/.gitignore +++ /dev/null @@ -1,14 +0,0 @@ -node_modules -.DS_Store -*.d.ts -dist/ -types/ - -# yarn berry -.pnp.* -.yarn/* -!.yarn/patches -!.yarn/plugins -!.yarn/releases -!.yarn/sdks -!.yarn/versions diff --git a/sdks/runtime/typescript/.prettierrc.yml b/sdks/runtime/typescript/.prettierrc.yml deleted file mode 100644 index 0c06786bf5..0000000000 --- a/sdks/runtime/typescript/.prettierrc.yml +++ /dev/null @@ -1,2 +0,0 @@ -tabWidth: 4 -printWidth: 120 diff --git a/sdks/runtime/typescript/archive.tgz b/sdks/runtime/typescript/archive.tgz deleted file mode 100644 index 19dd5aed7f..0000000000 --- a/sdks/runtime/typescript/archive.tgz +++ /dev/null @@ -1,3 +0,0 @@ -version https://git-lfs.github.com/spec/v1 -oid sha256:c6a6ebe3724f08a24f0338753a426262cff0af22c5ce0035a2dbc097d9ec40f8 -size 371621 diff --git a/sdks/runtime/typescript/build.js b/sdks/runtime/typescript/build.js deleted file mode 100644 index ea2d841bbe..0000000000 --- a/sdks/runtime/typescript/build.js +++ /dev/null @@ -1,62 +0,0 @@ -const { build } = require("esbuild"); - -void main(); - -async function main() { - await bundle({ - platform: "node", - target: "node14", - format: "cjs", - outdir: "node", - }); - await bundle({ - platform: "browser", - format: "esm", - outdir: "browser/esm", - }); - await bundle({ - platform: "browser", - format: "cjs", - outdir: "browser/cjs", - }); -} - -async function bundle({ platform, target, format, outdir }) { - await runEsbuild({ - platform, - target, - format, - entryPoint: "./src/index.ts", - outfile: `./dist/${outdir}/index.js`, - }); - await runEsbuild({ - platform, - target, - format, - entryPoint: "./src/core/index.ts", - outfile: `./dist/${outdir}/core.js`, - }); - await runEsbuild({ - platform, - target, - format, - entryPoint: "./src/serialization/index.ts", - outfile: `./dist/${outdir}/serialization.js`, - }); -} - -async function runEsbuild({ platform, target, format, entryPoint, outfile }) { - await build({ - platform, - target, - format, - entryPoints: [entryPoint], - outfile, - bundle: true, - alias: { - // matches up with tsconfig paths - "@rivet-gg/api": "./src", - }, - external: ['node-fetch'], - }).catch(() => process.exit(1)); -} diff --git a/sdks/runtime/typescript/package.json b/sdks/runtime/typescript/package.json deleted file mode 100644 index 37a7d00d3a..0000000000 --- a/sdks/runtime/typescript/package.json +++ /dev/null @@ -1,61 +0,0 @@ -{ - "name": "@rivet-gg/api", - "version": "0.0.1", - "private": false, - "repository": "https://github.com/rivet-gg/rivet/tree/main/sdks/runtime/typescript", - "publishConfig": { - "registry": "https://registry.npmjs.org/" - }, - "files": [ - "dist", - "types", - "core.d.ts", - "serialization.d.ts" - ], - "exports": { - ".": { - "node": "./dist/node/index.js", - "import": "./dist/browser/esm/index.js", - "require": "./dist/browser/cjs/index.js", - "default": "./dist/browser/cjs/index.js", - "types": "./types/index.d.ts" - }, - "./core": { - "node": "./dist/node/core.js", - "import": "./dist/browser/esm/core.js", - "require": "./dist/browser/cjs/core.js", - "default": "./dist/browser/cjs/core.js", - "types": "./types/core/index.d.ts" - }, - "./serialization": { - "node": "./dist/node/serialization.js", - "import": "./dist/browser/esm/serialization.js", - "require": "./dist/browser/cjs/serialization.js", - "default": "./dist/browser/cjs/serialization.js", - "types": "./types/serialization/index.d.ts" - } - }, - "types": "./types/index.d.ts", - "scripts": { - "format": "prettier --write 'src/**/*.ts'", - "compile": "tsc", - "bundle": "node build.js", - "build": "yarn compile && yarn bundle", - "prepack": "yarn run build" - }, - "dependencies": { - "form-data": "^4.0.0", - "js-base64": "^3.7.5", - "node-fetch": "2", - "qs": "^6.11.2", - "url-join": "^5.0.0" - }, - "devDependencies": { - "@types/node": "17.0.33", - "@types/qs": "6.9.8", - "@types/url-join": "4.0.1", - "esbuild": "^0.19.11", - "prettier": "2.7.1", - "typescript": "4.6.4" - } -} diff --git a/sdks/runtime/typescript/src/api/resources/captcha/resources/config/types/Config.ts b/sdks/runtime/typescript/src/api/resources/captcha/resources/config/types/Config.ts new file mode 100644 index 0000000000..ad2a83861d --- /dev/null +++ b/sdks/runtime/typescript/src/api/resources/captcha/resources/config/types/Config.ts @@ -0,0 +1,13 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../../.."; + +/** + * Methods to verify a captcha + */ +export interface Config { + hcaptcha?: Rivet.captcha.ConfigHcaptcha; + turnstile?: Rivet.captcha.ConfigTurnstile; +} diff --git a/sdks/runtime/typescript/src/api/resources/captcha/resources/config/types/ConfigHcaptcha.ts b/sdks/runtime/typescript/src/api/resources/captcha/resources/config/types/ConfigHcaptcha.ts new file mode 100644 index 0000000000..d0b57ca2f2 --- /dev/null +++ b/sdks/runtime/typescript/src/api/resources/captcha/resources/config/types/ConfigHcaptcha.ts @@ -0,0 +1,10 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +/** + * Captcha configuration. + */ +export interface ConfigHcaptcha { + clientResponse: string; +} diff --git a/sdks/runtime/typescript/src/api/resources/captcha/resources/config/types/ConfigTurnstile.ts b/sdks/runtime/typescript/src/api/resources/captcha/resources/config/types/ConfigTurnstile.ts new file mode 100644 index 0000000000..2a55d75c5d --- /dev/null +++ b/sdks/runtime/typescript/src/api/resources/captcha/resources/config/types/ConfigTurnstile.ts @@ -0,0 +1,10 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +/** + * Captcha configuration. + */ +export interface ConfigTurnstile { + clientResponse: string; +} diff --git a/sdks/runtime/typescript/src/api/resources/captcha/resources/config/types/index.ts b/sdks/runtime/typescript/src/api/resources/captcha/resources/config/types/index.ts new file mode 100644 index 0000000000..e3d7f9330e --- /dev/null +++ b/sdks/runtime/typescript/src/api/resources/captcha/resources/config/types/index.ts @@ -0,0 +1,3 @@ +export * from "./Config"; +export * from "./ConfigHcaptcha"; +export * from "./ConfigTurnstile"; diff --git a/sdks/runtime/typescript/src/api/resources/common/types/DisplayName.ts b/sdks/runtime/typescript/src/api/resources/common/types/DisplayName.ts new file mode 100644 index 0000000000..213d3d6dee --- /dev/null +++ b/sdks/runtime/typescript/src/api/resources/common/types/DisplayName.ts @@ -0,0 +1,5 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +export type DisplayName = string; diff --git a/sdks/runtime/typescript/src/api/resources/common/types/ErrorBody.ts b/sdks/runtime/typescript/src/api/resources/common/types/ErrorBody.ts new file mode 100644 index 0000000000..8e7c2514d4 --- /dev/null +++ b/sdks/runtime/typescript/src/api/resources/common/types/ErrorBody.ts @@ -0,0 +1,12 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../.."; + +export interface ErrorBody { + code: string; + message: string; + documentation?: string; + metadata?: Rivet.ErrorMetadata | undefined; +} diff --git a/sdks/runtime/typescript/src/api/resources/common/types/ErrorMetadata.ts b/sdks/runtime/typescript/src/api/resources/common/types/ErrorMetadata.ts new file mode 100644 index 0000000000..d236d1f19d --- /dev/null +++ b/sdks/runtime/typescript/src/api/resources/common/types/ErrorMetadata.ts @@ -0,0 +1,8 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +/** + * Unstructured metadata relating to an error. Must be manually parsed. + */ +export type ErrorMetadata = unknown; diff --git a/sdks/runtime/typescript/src/api/resources/common/types/Identifier.ts b/sdks/runtime/typescript/src/api/resources/common/types/Identifier.ts new file mode 100644 index 0000000000..616b2523d0 --- /dev/null +++ b/sdks/runtime/typescript/src/api/resources/common/types/Identifier.ts @@ -0,0 +1,8 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +/** + * A human readable short identifier used to references resources. Different than a `uuid` because this is intended to be human readable. Different than `DisplayName` because this should not include special characters and be short. + */ +export type Identifier = string; diff --git a/sdks/runtime/typescript/src/api/resources/common/types/Jwt.ts b/sdks/runtime/typescript/src/api/resources/common/types/Jwt.ts new file mode 100644 index 0000000000..f2ef4c535d --- /dev/null +++ b/sdks/runtime/typescript/src/api/resources/common/types/Jwt.ts @@ -0,0 +1,8 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +/** + * Documentation at https://jwt.io/ + */ +export type Jwt = string; diff --git a/sdks/runtime/typescript/src/api/resources/common/types/WatchResponse.ts b/sdks/runtime/typescript/src/api/resources/common/types/WatchResponse.ts new file mode 100644 index 0000000000..9b74a009b3 --- /dev/null +++ b/sdks/runtime/typescript/src/api/resources/common/types/WatchResponse.ts @@ -0,0 +1,14 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +/** + * Provided by watchable endpoints used in blocking loops. + */ +export interface WatchResponse { + /** + * Index indicating the version of the data responded. + * Pass this to `WatchQuery` to block and wait for the next response. + */ + index: string; +} diff --git a/sdks/runtime/typescript/src/api/resources/common/types/index.ts b/sdks/runtime/typescript/src/api/resources/common/types/index.ts new file mode 100644 index 0000000000..3731e6ed3a --- /dev/null +++ b/sdks/runtime/typescript/src/api/resources/common/types/index.ts @@ -0,0 +1,6 @@ +export * from "./Identifier"; +export * from "./Jwt"; +export * from "./WatchResponse"; +export * from "./DisplayName"; +export * from "./ErrorMetadata"; +export * from "./ErrorBody"; diff --git a/sdks/runtime/typescript/src/api/resources/geo/resources/common/types/Coord.ts b/sdks/runtime/typescript/src/api/resources/geo/resources/common/types/Coord.ts new file mode 100644 index 0000000000..30efaf6eb9 --- /dev/null +++ b/sdks/runtime/typescript/src/api/resources/geo/resources/common/types/Coord.ts @@ -0,0 +1,11 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +/** + * Geographical coordinates for a location on Planet Earth. + */ +export interface Coord { + latitude: number; + longitude: number; +} diff --git a/sdks/runtime/typescript/src/api/resources/geo/resources/common/types/Distance.ts b/sdks/runtime/typescript/src/api/resources/geo/resources/common/types/Distance.ts new file mode 100644 index 0000000000..2775c9cc42 --- /dev/null +++ b/sdks/runtime/typescript/src/api/resources/geo/resources/common/types/Distance.ts @@ -0,0 +1,11 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +/** + * Distance available in multiple units. + */ +export interface Distance { + kilometers: number; + miles: number; +} diff --git a/sdks/runtime/typescript/src/api/resources/geo/resources/common/types/index.ts b/sdks/runtime/typescript/src/api/resources/geo/resources/common/types/index.ts new file mode 100644 index 0000000000..9bedc827df --- /dev/null +++ b/sdks/runtime/typescript/src/api/resources/geo/resources/common/types/index.ts @@ -0,0 +1,2 @@ +export * from "./Coord"; +export * from "./Distance"; diff --git a/sdks/runtime/typescript/src/api/resources/kv/client/Client.ts b/sdks/runtime/typescript/src/api/resources/kv/client/Client.ts index 2b8d581aae..75096067ba 100644 --- a/sdks/runtime/typescript/src/api/resources/kv/client/Client.ts +++ b/sdks/runtime/typescript/src/api/resources/kv/client/Client.ts @@ -57,6 +57,7 @@ export class Kv { method: "GET", headers: { Authorization: await this._getAuthorizationHeader(), + "X-Fern-Language": "JavaScript", }, contentType: "application/json", queryParameters: _queryParams, @@ -176,6 +177,7 @@ export class Kv { method: "PUT", headers: { Authorization: await this._getAuthorizationHeader(), + "X-Fern-Language": "JavaScript", }, contentType: "application/json", body: await serializers.kv.PutRequest.jsonOrThrow(request, { unrecognizedObjectKeys: "strip" }), @@ -296,6 +298,7 @@ export class Kv { method: "DELETE", headers: { Authorization: await this._getAuthorizationHeader(), + "X-Fern-Language": "JavaScript", }, contentType: "application/json", queryParameters: _queryParams, @@ -416,6 +419,7 @@ export class Kv { method: "GET", headers: { Authorization: await this._getAuthorizationHeader(), + "X-Fern-Language": "JavaScript", }, contentType: "application/json", queryParameters: _queryParams, @@ -554,6 +558,7 @@ export class Kv { method: "GET", headers: { Authorization: await this._getAuthorizationHeader(), + "X-Fern-Language": "JavaScript", }, contentType: "application/json", queryParameters: _queryParams, @@ -673,6 +678,7 @@ export class Kv { method: "PUT", headers: { Authorization: await this._getAuthorizationHeader(), + "X-Fern-Language": "JavaScript", }, contentType: "application/json", body: await serializers.kv.PutBatchRequest.jsonOrThrow(request, { unrecognizedObjectKeys: "strip" }), @@ -798,6 +804,7 @@ export class Kv { method: "DELETE", headers: { Authorization: await this._getAuthorizationHeader(), + "X-Fern-Language": "JavaScript", }, contentType: "application/json", queryParameters: _queryParams, diff --git a/sdks/runtime/typescript/src/api/resources/kv/resources/common/types/Directory.ts b/sdks/runtime/typescript/src/api/resources/kv/resources/common/types/Directory.ts new file mode 100644 index 0000000000..d556fdf0dd --- /dev/null +++ b/sdks/runtime/typescript/src/api/resources/kv/resources/common/types/Directory.ts @@ -0,0 +1,5 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +export type Directory = string; diff --git a/sdks/runtime/typescript/src/api/resources/kv/resources/common/types/Entry.ts b/sdks/runtime/typescript/src/api/resources/kv/resources/common/types/Entry.ts new file mode 100644 index 0000000000..9a8b949d2b --- /dev/null +++ b/sdks/runtime/typescript/src/api/resources/kv/resources/common/types/Entry.ts @@ -0,0 +1,14 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../../.."; + +/** + * A key-value entry. + */ +export interface Entry { + key: Rivet.kv.Key; + value?: Rivet.kv.Value; + deleted?: boolean; +} diff --git a/sdks/runtime/typescript/src/api/resources/kv/resources/common/types/Key.ts b/sdks/runtime/typescript/src/api/resources/kv/resources/common/types/Key.ts new file mode 100644 index 0000000000..362cc5df58 --- /dev/null +++ b/sdks/runtime/typescript/src/api/resources/kv/resources/common/types/Key.ts @@ -0,0 +1,12 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +/** + * A string representing a key in the key-value database. + * Maximum length of 512 characters. + * _Recommended Key Path Format_ + * Key path components are split by a slash (e.g. `a/b/c` has the path components `["a", "b", "c"]`). Slashes can be escaped by using a backslash (e.g. `a/b\/c/d` has the path components `["a", "b/c", "d"]`). + * This format is not enforced by Rivet, but the tools built around Rivet KV work better if this format is used. + */ +export type Key = string; diff --git a/sdks/runtime/typescript/src/api/resources/kv/resources/common/types/PutEntry.ts b/sdks/runtime/typescript/src/api/resources/kv/resources/common/types/PutEntry.ts new file mode 100644 index 0000000000..147aa69584 --- /dev/null +++ b/sdks/runtime/typescript/src/api/resources/kv/resources/common/types/PutEntry.ts @@ -0,0 +1,13 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../../.."; + +/** + * A new entry to insert into the key-value database. + */ +export interface PutEntry { + key: Rivet.kv.Key; + value?: Rivet.kv.Value; +} diff --git a/sdks/runtime/typescript/src/api/resources/kv/resources/common/types/Value.ts b/sdks/runtime/typescript/src/api/resources/kv/resources/common/types/Value.ts new file mode 100644 index 0000000000..4f1f92d335 --- /dev/null +++ b/sdks/runtime/typescript/src/api/resources/kv/resources/common/types/Value.ts @@ -0,0 +1,10 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +/** + * A JSON object stored in the KV database. + * A `null` value indicates the entry is deleted. + * Maximum length of 262,144 bytes when encoded. + */ +export type Value = unknown; diff --git a/sdks/runtime/typescript/src/api/resources/kv/resources/common/types/index.ts b/sdks/runtime/typescript/src/api/resources/kv/resources/common/types/index.ts new file mode 100644 index 0000000000..f88e3ccae9 --- /dev/null +++ b/sdks/runtime/typescript/src/api/resources/kv/resources/common/types/index.ts @@ -0,0 +1,5 @@ +export * from "./Key"; +export * from "./Directory"; +export * from "./Value"; +export * from "./Entry"; +export * from "./PutEntry"; diff --git a/sdks/runtime/typescript/src/api/resources/kv/types/GetBatchResponse.ts b/sdks/runtime/typescript/src/api/resources/kv/types/GetBatchResponse.ts new file mode 100644 index 0000000000..33d7ab924e --- /dev/null +++ b/sdks/runtime/typescript/src/api/resources/kv/types/GetBatchResponse.ts @@ -0,0 +1,10 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../.."; + +export interface GetBatchResponse { + entries: Rivet.kv.Entry[]; + watch: Rivet.WatchResponse; +} diff --git a/sdks/runtime/typescript/src/api/resources/kv/types/GetResponse.ts b/sdks/runtime/typescript/src/api/resources/kv/types/GetResponse.ts new file mode 100644 index 0000000000..4042b12f97 --- /dev/null +++ b/sdks/runtime/typescript/src/api/resources/kv/types/GetResponse.ts @@ -0,0 +1,12 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../.."; + +export interface GetResponse { + value?: Rivet.kv.Value; + /** Whether or not the entry has been deleted. Only set when watching this endpoint. */ + deleted?: boolean; + watch: Rivet.WatchResponse; +} diff --git a/sdks/runtime/typescript/src/api/resources/kv/types/ListResponse.ts b/sdks/runtime/typescript/src/api/resources/kv/types/ListResponse.ts new file mode 100644 index 0000000000..b8e4f8b637 --- /dev/null +++ b/sdks/runtime/typescript/src/api/resources/kv/types/ListResponse.ts @@ -0,0 +1,9 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../.."; + +export interface ListResponse { + entries: Rivet.kv.Entry[]; +} diff --git a/sdks/runtime/typescript/src/api/resources/kv/types/PutBatchRequest.ts b/sdks/runtime/typescript/src/api/resources/kv/types/PutBatchRequest.ts new file mode 100644 index 0000000000..d67cf356d0 --- /dev/null +++ b/sdks/runtime/typescript/src/api/resources/kv/types/PutBatchRequest.ts @@ -0,0 +1,10 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../.."; + +export interface PutBatchRequest { + namespaceId?: string; + entries: Rivet.kv.PutEntry[]; +} diff --git a/sdks/runtime/typescript/src/api/resources/kv/types/PutRequest.ts b/sdks/runtime/typescript/src/api/resources/kv/types/PutRequest.ts new file mode 100644 index 0000000000..bfe06966cb --- /dev/null +++ b/sdks/runtime/typescript/src/api/resources/kv/types/PutRequest.ts @@ -0,0 +1,11 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../.."; + +export interface PutRequest { + namespaceId?: string; + key: Rivet.kv.Key; + value?: Rivet.kv.Value; +} diff --git a/sdks/runtime/typescript/src/api/resources/kv/types/index.ts b/sdks/runtime/typescript/src/api/resources/kv/types/index.ts new file mode 100644 index 0000000000..e4596df6de --- /dev/null +++ b/sdks/runtime/typescript/src/api/resources/kv/types/index.ts @@ -0,0 +1,5 @@ +export * from "./GetResponse"; +export * from "./PutRequest"; +export * from "./ListResponse"; +export * from "./GetBatchResponse"; +export * from "./PutBatchRequest"; diff --git a/sdks/runtime/typescript/src/api/resources/matchmaker/resources/common/types/CustomLobbyPublicity.ts b/sdks/runtime/typescript/src/api/resources/matchmaker/resources/common/types/CustomLobbyPublicity.ts new file mode 100644 index 0000000000..bc4b6f7af0 --- /dev/null +++ b/sdks/runtime/typescript/src/api/resources/matchmaker/resources/common/types/CustomLobbyPublicity.ts @@ -0,0 +1,10 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +export type CustomLobbyPublicity = "public" | "private"; + +export const CustomLobbyPublicity = { + Public: "public", + Private: "private", +} as const; diff --git a/sdks/runtime/typescript/src/api/resources/matchmaker/resources/common/types/GameModeInfo.ts b/sdks/runtime/typescript/src/api/resources/matchmaker/resources/common/types/GameModeInfo.ts new file mode 100644 index 0000000000..f28cd48373 --- /dev/null +++ b/sdks/runtime/typescript/src/api/resources/matchmaker/resources/common/types/GameModeInfo.ts @@ -0,0 +1,12 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../../.."; + +/** + * A game mode that the player can join. + */ +export interface GameModeInfo { + gameModeId: Rivet.Identifier; +} diff --git a/sdks/runtime/typescript/src/api/resources/matchmaker/resources/common/types/JoinLobby.ts b/sdks/runtime/typescript/src/api/resources/matchmaker/resources/common/types/JoinLobby.ts new file mode 100644 index 0000000000..eae328d5af --- /dev/null +++ b/sdks/runtime/typescript/src/api/resources/matchmaker/resources/common/types/JoinLobby.ts @@ -0,0 +1,17 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../../.."; + +/** + * A matchmaker lobby. + */ +export interface JoinLobby { + lobbyId: string; + region: Rivet.matchmaker.JoinRegion; + /** **Deprecated** */ + ports: Record; + /** **Deprecated** */ + player: Rivet.matchmaker.JoinPlayer; +} diff --git a/sdks/runtime/typescript/src/api/resources/matchmaker/resources/common/types/JoinPlayer.ts b/sdks/runtime/typescript/src/api/resources/matchmaker/resources/common/types/JoinPlayer.ts new file mode 100644 index 0000000000..07797eaffa --- /dev/null +++ b/sdks/runtime/typescript/src/api/resources/matchmaker/resources/common/types/JoinPlayer.ts @@ -0,0 +1,13 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../../.."; + +/** + * A matchmaker lobby player. + */ +export interface JoinPlayer { + /** Pass this token through the socket to the lobby server. The lobby server will validate this token with `PlayerConnected.player_token` */ + token: Rivet.Jwt; +} diff --git a/sdks/runtime/typescript/src/api/resources/matchmaker/resources/common/types/JoinPort.ts b/sdks/runtime/typescript/src/api/resources/matchmaker/resources/common/types/JoinPort.ts new file mode 100644 index 0000000000..281ab8f1d3 --- /dev/null +++ b/sdks/runtime/typescript/src/api/resources/matchmaker/resources/common/types/JoinPort.ts @@ -0,0 +1,16 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../../.."; + +export interface JoinPort { + /** The host for the given port. Will be null if using a port range. */ + host?: string; + hostname: string; + /** The port number for this lobby. Will be null if using a port range. */ + port?: number; + /** Whether or not this lobby port uses TLS. You cannot mix a non-TLS and TLS ports. */ + portRange?: Rivet.matchmaker.JoinPortRange; + isTls: boolean; +} diff --git a/sdks/runtime/typescript/src/api/resources/matchmaker/resources/common/types/JoinPortRange.ts b/sdks/runtime/typescript/src/api/resources/matchmaker/resources/common/types/JoinPortRange.ts new file mode 100644 index 0000000000..ecb7e81ba2 --- /dev/null +++ b/sdks/runtime/typescript/src/api/resources/matchmaker/resources/common/types/JoinPortRange.ts @@ -0,0 +1,13 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +/** + * Inclusive range of ports that can be connected to. + */ +export interface JoinPortRange { + /** Minimum port that can be connected to. Inclusive range. */ + min: number; + /** Maximum port that can be connected to. Inclusive range. */ + max: number; +} diff --git a/sdks/runtime/typescript/src/api/resources/matchmaker/resources/common/types/JoinRegion.ts b/sdks/runtime/typescript/src/api/resources/matchmaker/resources/common/types/JoinRegion.ts new file mode 100644 index 0000000000..2995a7a5da --- /dev/null +++ b/sdks/runtime/typescript/src/api/resources/matchmaker/resources/common/types/JoinRegion.ts @@ -0,0 +1,13 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../../.."; + +/** + * A matchmaker lobby region. + */ +export interface JoinRegion { + regionId: Rivet.Identifier; + displayName: Rivet.DisplayName; +} diff --git a/sdks/runtime/typescript/src/api/resources/matchmaker/resources/common/types/LobbyInfo.ts b/sdks/runtime/typescript/src/api/resources/matchmaker/resources/common/types/LobbyInfo.ts new file mode 100644 index 0000000000..31018c92d8 --- /dev/null +++ b/sdks/runtime/typescript/src/api/resources/matchmaker/resources/common/types/LobbyInfo.ts @@ -0,0 +1,17 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +/** + * A public lobby in the lobby list. + */ +export interface LobbyInfo { + regionId: string; + gameModeId: string; + lobbyId: string; + maxPlayersNormal: number; + maxPlayersDirect: number; + maxPlayersParty: number; + totalPlayerCount: number; + state?: unknown; +} diff --git a/sdks/runtime/typescript/src/api/resources/matchmaker/resources/common/types/RegionInfo.ts b/sdks/runtime/typescript/src/api/resources/matchmaker/resources/common/types/RegionInfo.ts new file mode 100644 index 0000000000..17451ad3ab --- /dev/null +++ b/sdks/runtime/typescript/src/api/resources/matchmaker/resources/common/types/RegionInfo.ts @@ -0,0 +1,16 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../../.."; + +/** + * A region that the player can connect to. + */ +export interface RegionInfo { + regionId: Rivet.Identifier; + providerDisplayName: string; + regionDisplayName: string; + datacenterCoord: Rivet.geo.Coord; + datacenterDistanceFromClient: Rivet.geo.Distance; +} diff --git a/sdks/runtime/typescript/src/api/resources/matchmaker/resources/common/types/index.ts b/sdks/runtime/typescript/src/api/resources/matchmaker/resources/common/types/index.ts new file mode 100644 index 0000000000..5519ce0ef7 --- /dev/null +++ b/sdks/runtime/typescript/src/api/resources/matchmaker/resources/common/types/index.ts @@ -0,0 +1,9 @@ +export * from "./LobbyInfo"; +export * from "./GameModeInfo"; +export * from "./RegionInfo"; +export * from "./JoinLobby"; +export * from "./JoinRegion"; +export * from "./JoinPort"; +export * from "./JoinPortRange"; +export * from "./JoinPlayer"; +export * from "./CustomLobbyPublicity"; diff --git a/sdks/runtime/typescript/src/api/resources/matchmaker/resources/lobbies/client/Client.ts b/sdks/runtime/typescript/src/api/resources/matchmaker/resources/lobbies/client/Client.ts index 728b7aa4e1..bf27db7338 100644 --- a/sdks/runtime/typescript/src/api/resources/matchmaker/resources/lobbies/client/Client.ts +++ b/sdks/runtime/typescript/src/api/resources/matchmaker/resources/lobbies/client/Client.ts @@ -27,7 +27,6 @@ export class Lobbies { /** * Marks the current lobby as ready to accept connections. Players will not be able to connect to this lobby until the lobby is flagged as ready. - * This endpoint requires a [lobby token](/docs/general/concepts/token-types#matchmaker-lobby) for authentication, or a [development namespace token](/docs/general/concepts/token-types#namespace-development) for mock responses. When running on Rivet servers, you can access the given lobby token from the [`RIVET_TOKEN`](/docs/matchmaker/concepts/lobby-env) environment variable. * @throws {@link Rivet.InternalError} * @throws {@link Rivet.RateLimitError} * @throws {@link Rivet.ForbiddenError} @@ -44,6 +43,7 @@ export class Lobbies { method: "POST", headers: { Authorization: await this._getAuthorizationHeader(), + "X-Fern-Language": "JavaScript", }, contentType: "application/json", timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 180000, @@ -143,11 +143,6 @@ export class Lobbies { * join using the /join endpoint (this can be disabled by the developer by rejecting all new connections * after setting the lobby to closed). * Does not shutdown the lobby. - * - * This endpoint requires a [lobby token](/docs/general/concepts/token-types#matchmaker-lobby) for - * authentication, or a [development namespace token](/docs/general/concepts/token-types#namespace-development) - * for mock responses. When running on Rivet servers, you can access the given lobby token from the - * [`RIVET_TOKEN`](/docs/matchmaker/concepts/lobby-env) environment variable. * @throws {@link Rivet.InternalError} * @throws {@link Rivet.RateLimitError} * @throws {@link Rivet.ForbiddenError} @@ -167,6 +162,7 @@ export class Lobbies { method: "PUT", headers: { Authorization: await this._getAuthorizationHeader(), + "X-Fern-Language": "JavaScript", }, contentType: "application/json", body: await serializers.matchmaker.SetLobbyClosedRequest.jsonOrThrow(request, { @@ -265,12 +261,6 @@ export class Lobbies { } /** - * Sets the state JSON of the current lobby. - * - * This endpoint requires a [lobby token](/docs/general/concepts/token-types#matchmaker-lobby) for - * authentication, or a [development namespace token](/docs/general/concepts/token-types#namespace-development) - * for mock responses. When running on Rivet servers, you can access the given lobby token from the - * [`RIVET_TOKEN`](/docs/matchmaker/concepts/lobby-env) environment variable. * @throws {@link Rivet.InternalError} * @throws {@link Rivet.RateLimitError} * @throws {@link Rivet.ForbiddenError} @@ -287,6 +277,7 @@ export class Lobbies { method: "PUT", headers: { Authorization: await this._getAuthorizationHeader(), + "X-Fern-Language": "JavaScript", }, contentType: "application/json", body: @@ -388,12 +379,6 @@ export class Lobbies { } /** - * Get the state of any lobby. - * - * This endpoint requires a [lobby token](/docs/general/concepts/token-types#matchmaker-lobby) for - * authentication, or a [development namespace token](/docs/general/concepts/token-types#namespace-development) - * for mock responses. When running on Rivet servers, you can access the given lobby token from the - * [`RIVET_TOKEN`](/docs/matchmaker/concepts/lobby-env) environment variable. * @throws {@link Rivet.InternalError} * @throws {@link Rivet.RateLimitError} * @throws {@link Rivet.ForbiddenError} @@ -410,6 +395,7 @@ export class Lobbies { method: "GET", headers: { Authorization: await this._getAuthorizationHeader(), + "X-Fern-Language": "JavaScript", }, contentType: "application/json", timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 180000, @@ -514,12 +500,6 @@ export class Lobbies { * Finds a lobby based on the given criteria. * If a lobby is not found and `prevent_auto_create_lobby` is `false`, * a new lobby will be created. - * - * When [tokenless authentication](/docs/general/concepts/tokenless-authentication/web) is enabled in - * your game namespace, this endpoint does not require a token to authenticate. Otherwise, a - * [development namespace token](/docs/general/concepts/token-types#namespace-development) can be used - * for mock responses and a [public namespace token](/docs/general/concepts/token-types#namespace-public) - * can be used for general authentication. * @throws {@link Rivet.InternalError} * @throws {@link Rivet.RateLimitError} * @throws {@link Rivet.ForbiddenError} @@ -540,6 +520,7 @@ export class Lobbies { method: "POST", headers: { Authorization: await this._getAuthorizationHeader(), + "X-Fern-Language": "JavaScript", origin: origin != null ? origin : undefined, }, contentType: "application/json", @@ -646,12 +627,6 @@ export class Lobbies { * Joins a specific lobby. * This request will use the direct player count configured for the * lobby group. - * - * When [tokenless authentication](/docs/general/concepts/tokenless-authentication/web) is enabled in - * your game namespace, this endpoint does not require a token to authenticate. Otherwise, a - * [development namespace token](/docs/general/concepts/token-types#namespace-development) can be used - * for mock responses and a [public namespace token](/docs/general/concepts/token-types#namespace-public) - * can be used for general authentication. * @throws {@link Rivet.InternalError} * @throws {@link Rivet.RateLimitError} * @throws {@link Rivet.ForbiddenError} @@ -671,6 +646,7 @@ export class Lobbies { method: "POST", headers: { Authorization: await this._getAuthorizationHeader(), + "X-Fern-Language": "JavaScript", }, contentType: "application/json", body: await serializers.matchmaker.JoinLobbyRequest.jsonOrThrow(request, { @@ -776,12 +752,6 @@ export class Lobbies { /** * Creates a custom lobby. - * - * When [tokenless authentication](/docs/general/concepts/tokenless-authentication/web) is enabled in - * your game namespace, this endpoint does not require a token to authenticate. Otherwise, a - * [development namespace token](/docs/general/concepts/token-types#namespace-development) can be used - * for mock responses and a [public namespace token](/docs/general/concepts/token-types#namespace-public) - * can be used for general authentication. * @throws {@link Rivet.InternalError} * @throws {@link Rivet.RateLimitError} * @throws {@link Rivet.ForbiddenError} @@ -801,6 +771,7 @@ export class Lobbies { method: "POST", headers: { Authorization: await this._getAuthorizationHeader(), + "X-Fern-Language": "JavaScript", }, contentType: "application/json", body: await serializers.matchmaker.CreateLobbyRequest.jsonOrThrow(request, { @@ -906,12 +877,6 @@ export class Lobbies { /** * Lists all open lobbies. - * - * When [tokenless authentication](/docs/general/concepts/tokenless-authentication/web) is enabled in - * your game namespace, this endpoint does not require a token to authenticate. Otherwise, a - * [development namespace token](/docs/general/concepts/token-types#namespace-development) can be used - * for mock responses and a [public namespace token](/docs/general/concepts/token-types#namespace-public) - * can be used for general authentication. * @throws {@link Rivet.InternalError} * @throws {@link Rivet.RateLimitError} * @throws {@link Rivet.ForbiddenError} @@ -937,6 +902,7 @@ export class Lobbies { method: "GET", headers: { Authorization: await this._getAuthorizationHeader(), + "X-Fern-Language": "JavaScript", }, contentType: "application/json", queryParameters: _queryParams, diff --git a/sdks/runtime/typescript/src/api/resources/matchmaker/resources/lobbies/types/CreateLobbyResponse.ts b/sdks/runtime/typescript/src/api/resources/matchmaker/resources/lobbies/types/CreateLobbyResponse.ts new file mode 100644 index 0000000000..b28272cde9 --- /dev/null +++ b/sdks/runtime/typescript/src/api/resources/matchmaker/resources/lobbies/types/CreateLobbyResponse.ts @@ -0,0 +1,11 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../../.."; + +export interface CreateLobbyResponse { + lobby: Rivet.matchmaker.JoinLobby; + ports: Record; + player: Rivet.matchmaker.JoinPlayer; +} diff --git a/sdks/runtime/typescript/src/api/resources/matchmaker/resources/lobbies/types/FindLobbyResponse.ts b/sdks/runtime/typescript/src/api/resources/matchmaker/resources/lobbies/types/FindLobbyResponse.ts new file mode 100644 index 0000000000..67f8f31f33 --- /dev/null +++ b/sdks/runtime/typescript/src/api/resources/matchmaker/resources/lobbies/types/FindLobbyResponse.ts @@ -0,0 +1,11 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../../.."; + +export interface FindLobbyResponse { + lobby: Rivet.matchmaker.JoinLobby; + ports: Record; + player: Rivet.matchmaker.JoinPlayer; +} diff --git a/sdks/runtime/typescript/src/api/resources/matchmaker/resources/lobbies/types/JoinLobbyResponse.ts b/sdks/runtime/typescript/src/api/resources/matchmaker/resources/lobbies/types/JoinLobbyResponse.ts new file mode 100644 index 0000000000..526d94df42 --- /dev/null +++ b/sdks/runtime/typescript/src/api/resources/matchmaker/resources/lobbies/types/JoinLobbyResponse.ts @@ -0,0 +1,11 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../../.."; + +export interface JoinLobbyResponse { + lobby: Rivet.matchmaker.JoinLobby; + ports: Record; + player: Rivet.matchmaker.JoinPlayer; +} diff --git a/sdks/runtime/typescript/src/api/resources/matchmaker/resources/lobbies/types/ListLobbiesResponse.ts b/sdks/runtime/typescript/src/api/resources/matchmaker/resources/lobbies/types/ListLobbiesResponse.ts new file mode 100644 index 0000000000..f0fe668fd0 --- /dev/null +++ b/sdks/runtime/typescript/src/api/resources/matchmaker/resources/lobbies/types/ListLobbiesResponse.ts @@ -0,0 +1,11 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../../.."; + +export interface ListLobbiesResponse { + gameModes: Rivet.matchmaker.GameModeInfo[]; + regions: Rivet.matchmaker.RegionInfo[]; + lobbies: Rivet.matchmaker.LobbyInfo[]; +} diff --git a/sdks/runtime/typescript/src/api/resources/matchmaker/resources/lobbies/types/index.ts b/sdks/runtime/typescript/src/api/resources/matchmaker/resources/lobbies/types/index.ts new file mode 100644 index 0000000000..13491dd556 --- /dev/null +++ b/sdks/runtime/typescript/src/api/resources/matchmaker/resources/lobbies/types/index.ts @@ -0,0 +1,4 @@ +export * from "./FindLobbyResponse"; +export * from "./JoinLobbyResponse"; +export * from "./CreateLobbyResponse"; +export * from "./ListLobbiesResponse"; diff --git a/sdks/runtime/typescript/src/api/resources/matchmaker/resources/players/client/Client.ts b/sdks/runtime/typescript/src/api/resources/matchmaker/resources/players/client/Client.ts index 7d32e0edd6..613fd16304 100644 --- a/sdks/runtime/typescript/src/api/resources/matchmaker/resources/players/client/Client.ts +++ b/sdks/runtime/typescript/src/api/resources/matchmaker/resources/players/client/Client.ts @@ -80,6 +80,7 @@ export class Players { method: "POST", headers: { Authorization: await this._getAuthorizationHeader(), + "X-Fern-Language": "JavaScript", }, contentType: "application/json", body: await serializers.matchmaker.PlayerConnectedRequest.jsonOrThrow(request, { @@ -198,6 +199,7 @@ export class Players { method: "POST", headers: { Authorization: await this._getAuthorizationHeader(), + "X-Fern-Language": "JavaScript", }, contentType: "application/json", body: await serializers.matchmaker.PlayerDisconnectedRequest.jsonOrThrow(request, { @@ -315,6 +317,7 @@ export class Players { method: "GET", headers: { Authorization: await this._getAuthorizationHeader(), + "X-Fern-Language": "JavaScript", }, contentType: "application/json", timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 180000, diff --git a/sdks/runtime/typescript/src/api/resources/matchmaker/resources/players/types/GameModeStatistics.ts b/sdks/runtime/typescript/src/api/resources/matchmaker/resources/players/types/GameModeStatistics.ts new file mode 100644 index 0000000000..432caa5ae6 --- /dev/null +++ b/sdks/runtime/typescript/src/api/resources/matchmaker/resources/players/types/GameModeStatistics.ts @@ -0,0 +1,10 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../../.."; + +export interface GameModeStatistics { + playerCount: number; + regions: Record; +} diff --git a/sdks/runtime/typescript/src/api/resources/matchmaker/resources/players/types/GetStatisticsResponse.ts b/sdks/runtime/typescript/src/api/resources/matchmaker/resources/players/types/GetStatisticsResponse.ts new file mode 100644 index 0000000000..d67b3dbab2 --- /dev/null +++ b/sdks/runtime/typescript/src/api/resources/matchmaker/resources/players/types/GetStatisticsResponse.ts @@ -0,0 +1,10 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../../.."; + +export interface GetStatisticsResponse { + playerCount: number; + gameModes: Record; +} diff --git a/sdks/runtime/typescript/src/api/resources/matchmaker/resources/players/types/RegionStatistics.ts b/sdks/runtime/typescript/src/api/resources/matchmaker/resources/players/types/RegionStatistics.ts new file mode 100644 index 0000000000..bf547e60c4 --- /dev/null +++ b/sdks/runtime/typescript/src/api/resources/matchmaker/resources/players/types/RegionStatistics.ts @@ -0,0 +1,7 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +export interface RegionStatistics { + playerCount: number; +} diff --git a/sdks/runtime/typescript/src/api/resources/matchmaker/resources/players/types/index.ts b/sdks/runtime/typescript/src/api/resources/matchmaker/resources/players/types/index.ts new file mode 100644 index 0000000000..e6a2666580 --- /dev/null +++ b/sdks/runtime/typescript/src/api/resources/matchmaker/resources/players/types/index.ts @@ -0,0 +1,3 @@ +export * from "./GetStatisticsResponse"; +export * from "./GameModeStatistics"; +export * from "./RegionStatistics"; diff --git a/sdks/runtime/typescript/src/api/resources/matchmaker/resources/regions/client/Client.ts b/sdks/runtime/typescript/src/api/resources/matchmaker/resources/regions/client/Client.ts index aa6e05cbaa..da34c7981a 100644 --- a/sdks/runtime/typescript/src/api/resources/matchmaker/resources/regions/client/Client.ts +++ b/sdks/runtime/typescript/src/api/resources/matchmaker/resources/regions/client/Client.ts @@ -45,6 +45,7 @@ export class Regions { method: "GET", headers: { Authorization: await this._getAuthorizationHeader(), + "X-Fern-Language": "JavaScript", }, contentType: "application/json", timeoutMs: requestOptions?.timeoutInSeconds != null ? requestOptions.timeoutInSeconds * 1000 : 180000, diff --git a/sdks/runtime/typescript/src/api/resources/matchmaker/resources/regions/types/ListRegionsResponse.ts b/sdks/runtime/typescript/src/api/resources/matchmaker/resources/regions/types/ListRegionsResponse.ts new file mode 100644 index 0000000000..655fa4f9dd --- /dev/null +++ b/sdks/runtime/typescript/src/api/resources/matchmaker/resources/regions/types/ListRegionsResponse.ts @@ -0,0 +1,9 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as Rivet from "../../../../.."; + +export interface ListRegionsResponse { + regions: Rivet.matchmaker.RegionInfo[]; +} diff --git a/sdks/runtime/typescript/src/api/resources/matchmaker/resources/regions/types/index.ts b/sdks/runtime/typescript/src/api/resources/matchmaker/resources/regions/types/index.ts new file mode 100644 index 0000000000..dd60cc81be --- /dev/null +++ b/sdks/runtime/typescript/src/api/resources/matchmaker/resources/regions/types/index.ts @@ -0,0 +1 @@ +export * from "./ListRegionsResponse"; diff --git a/sdks/runtime/typescript/src/core/fetcher/Fetcher.ts b/sdks/runtime/typescript/src/core/fetcher/Fetcher.ts index 19de5d475e..e25819afbd 100644 --- a/sdks/runtime/typescript/src/core/fetcher/Fetcher.ts +++ b/sdks/runtime/typescript/src/core/fetcher/Fetcher.ts @@ -2,6 +2,10 @@ import { default as FormData } from "form-data"; import qs from "qs"; import { APIResponse } from "./APIResponse"; +if (typeof window === "undefined") { + global.fetch = require("node-fetch"); +} + export type FetchFunction = (args: Fetcher.Args) => Promise>; export declare namespace Fetcher { @@ -73,20 +77,18 @@ async function fetcherImpl(args: Fetcher.Args): Promise => { const controller = new AbortController(); let abortId = undefined; if (args.timeoutMs != null) { abortId = setTimeout(() => controller.abort(), args.timeoutMs); } - const response = await fetchFn(url, { + const response = await fetch(url, { method: args.method, headers, body, signal: controller.signal, - credentials: args.withCredentials ? "include" : undefined, + credentials: args.withCredentials ? "same-origin" : undefined, }); if (abortId != null) { clearTimeout(abortId); @@ -117,21 +119,18 @@ async function fetcherImpl(args: Fetcher.Args): Promise 0) { - try { - body = JSON.parse(text); - } catch (err) { - return { - ok: false, - error: { - reason: "non-json", - statusCode: response.status, - rawBody: text, - }, - }; - } + } else if (response.body != null) { + try { + body = await response.json(); + } catch (err) { + return { + ok: false, + error: { + reason: "non-json", + statusCode: response.status, + rawBody: await response.text(), + }, + }; } } diff --git a/sdks/runtime/typescript/src/serialization/resources/captcha/resources/config/types/Config.ts b/sdks/runtime/typescript/src/serialization/resources/captcha/resources/config/types/Config.ts new file mode 100644 index 0000000000..8497e68361 --- /dev/null +++ b/sdks/runtime/typescript/src/serialization/resources/captcha/resources/config/types/Config.ts @@ -0,0 +1,24 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../.."; +import * as Rivet from "../../../../../../api"; +import * as core from "../../../../../../core"; + +export const Config: core.serialization.ObjectSchema = + core.serialization.object({ + hcaptcha: core.serialization + .lazyObject(async () => (await import("../../../../..")).captcha.ConfigHcaptcha) + .optional(), + turnstile: core.serialization + .lazyObject(async () => (await import("../../../../..")).captcha.ConfigTurnstile) + .optional(), + }); + +export declare namespace Config { + interface Raw { + hcaptcha?: serializers.captcha.ConfigHcaptcha.Raw | null; + turnstile?: serializers.captcha.ConfigTurnstile.Raw | null; + } +} diff --git a/sdks/runtime/typescript/src/serialization/resources/captcha/resources/config/types/ConfigHcaptcha.ts b/sdks/runtime/typescript/src/serialization/resources/captcha/resources/config/types/ConfigHcaptcha.ts new file mode 100644 index 0000000000..7c60ba5bfa --- /dev/null +++ b/sdks/runtime/typescript/src/serialization/resources/captcha/resources/config/types/ConfigHcaptcha.ts @@ -0,0 +1,20 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../.."; +import * as Rivet from "../../../../../../api"; +import * as core from "../../../../../../core"; + +export const ConfigHcaptcha: core.serialization.ObjectSchema< + serializers.captcha.ConfigHcaptcha.Raw, + Rivet.captcha.ConfigHcaptcha +> = core.serialization.object({ + clientResponse: core.serialization.property("client_response", core.serialization.string()), +}); + +export declare namespace ConfigHcaptcha { + interface Raw { + client_response: string; + } +} diff --git a/sdks/runtime/typescript/src/serialization/resources/captcha/resources/config/types/ConfigTurnstile.ts b/sdks/runtime/typescript/src/serialization/resources/captcha/resources/config/types/ConfigTurnstile.ts new file mode 100644 index 0000000000..b7614ac985 --- /dev/null +++ b/sdks/runtime/typescript/src/serialization/resources/captcha/resources/config/types/ConfigTurnstile.ts @@ -0,0 +1,20 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../.."; +import * as Rivet from "../../../../../../api"; +import * as core from "../../../../../../core"; + +export const ConfigTurnstile: core.serialization.ObjectSchema< + serializers.captcha.ConfigTurnstile.Raw, + Rivet.captcha.ConfigTurnstile +> = core.serialization.object({ + clientResponse: core.serialization.property("client_response", core.serialization.string()), +}); + +export declare namespace ConfigTurnstile { + interface Raw { + client_response: string; + } +} diff --git a/sdks/runtime/typescript/src/serialization/resources/captcha/resources/config/types/index.ts b/sdks/runtime/typescript/src/serialization/resources/captcha/resources/config/types/index.ts new file mode 100644 index 0000000000..e3d7f9330e --- /dev/null +++ b/sdks/runtime/typescript/src/serialization/resources/captcha/resources/config/types/index.ts @@ -0,0 +1,3 @@ +export * from "./Config"; +export * from "./ConfigHcaptcha"; +export * from "./ConfigTurnstile"; diff --git a/sdks/runtime/typescript/src/serialization/resources/common/types/DisplayName.ts b/sdks/runtime/typescript/src/serialization/resources/common/types/DisplayName.ts new file mode 100644 index 0000000000..f315a99305 --- /dev/null +++ b/sdks/runtime/typescript/src/serialization/resources/common/types/DisplayName.ts @@ -0,0 +1,14 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../.."; +import * as Rivet from "../../../../api"; +import * as core from "../../../../core"; + +export const DisplayName: core.serialization.Schema = + core.serialization.string(); + +export declare namespace DisplayName { + type Raw = string; +} diff --git a/sdks/runtime/typescript/src/serialization/resources/common/types/ErrorBody.ts b/sdks/runtime/typescript/src/serialization/resources/common/types/ErrorBody.ts new file mode 100644 index 0000000000..522f9d39b0 --- /dev/null +++ b/sdks/runtime/typescript/src/serialization/resources/common/types/ErrorBody.ts @@ -0,0 +1,24 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../.."; +import * as Rivet from "../../../../api"; +import * as core from "../../../../core"; + +export const ErrorBody: core.serialization.ObjectSchema = + core.serialization.object({ + code: core.serialization.string(), + message: core.serialization.string(), + documentation: core.serialization.string().optional(), + metadata: core.serialization.lazy(async () => (await import("../../..")).ErrorMetadata).optional(), + }); + +export declare namespace ErrorBody { + interface Raw { + code: string; + message: string; + documentation?: string | null; + metadata?: (serializers.ErrorMetadata.Raw | undefined) | null; + } +} diff --git a/sdks/runtime/typescript/src/serialization/resources/common/types/ErrorMetadata.ts b/sdks/runtime/typescript/src/serialization/resources/common/types/ErrorMetadata.ts new file mode 100644 index 0000000000..c060927b7b --- /dev/null +++ b/sdks/runtime/typescript/src/serialization/resources/common/types/ErrorMetadata.ts @@ -0,0 +1,14 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../.."; +import * as Rivet from "../../../../api"; +import * as core from "../../../../core"; + +export const ErrorMetadata: core.serialization.Schema = + core.serialization.unknown(); + +export declare namespace ErrorMetadata { + type Raw = unknown; +} diff --git a/sdks/runtime/typescript/src/serialization/resources/common/types/Identifier.ts b/sdks/runtime/typescript/src/serialization/resources/common/types/Identifier.ts new file mode 100644 index 0000000000..043a639c46 --- /dev/null +++ b/sdks/runtime/typescript/src/serialization/resources/common/types/Identifier.ts @@ -0,0 +1,14 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../.."; +import * as Rivet from "../../../../api"; +import * as core from "../../../../core"; + +export const Identifier: core.serialization.Schema = + core.serialization.string(); + +export declare namespace Identifier { + type Raw = string; +} diff --git a/sdks/runtime/typescript/src/serialization/resources/common/types/Jwt.ts b/sdks/runtime/typescript/src/serialization/resources/common/types/Jwt.ts new file mode 100644 index 0000000000..936f045bfa --- /dev/null +++ b/sdks/runtime/typescript/src/serialization/resources/common/types/Jwt.ts @@ -0,0 +1,13 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../.."; +import * as Rivet from "../../../../api"; +import * as core from "../../../../core"; + +export const Jwt: core.serialization.Schema = core.serialization.string(); + +export declare namespace Jwt { + type Raw = string; +} diff --git a/sdks/runtime/typescript/src/serialization/resources/common/types/WatchResponse.ts b/sdks/runtime/typescript/src/serialization/resources/common/types/WatchResponse.ts new file mode 100644 index 0000000000..c19fb79c53 --- /dev/null +++ b/sdks/runtime/typescript/src/serialization/resources/common/types/WatchResponse.ts @@ -0,0 +1,18 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../.."; +import * as Rivet from "../../../../api"; +import * as core from "../../../../core"; + +export const WatchResponse: core.serialization.ObjectSchema = + core.serialization.object({ + index: core.serialization.string(), + }); + +export declare namespace WatchResponse { + interface Raw { + index: string; + } +} diff --git a/sdks/runtime/typescript/src/serialization/resources/common/types/index.ts b/sdks/runtime/typescript/src/serialization/resources/common/types/index.ts new file mode 100644 index 0000000000..3731e6ed3a --- /dev/null +++ b/sdks/runtime/typescript/src/serialization/resources/common/types/index.ts @@ -0,0 +1,6 @@ +export * from "./Identifier"; +export * from "./Jwt"; +export * from "./WatchResponse"; +export * from "./DisplayName"; +export * from "./ErrorMetadata"; +export * from "./ErrorBody"; diff --git a/sdks/runtime/typescript/src/serialization/resources/geo/resources/common/types/Coord.ts b/sdks/runtime/typescript/src/serialization/resources/geo/resources/common/types/Coord.ts new file mode 100644 index 0000000000..c62dbff1b5 --- /dev/null +++ b/sdks/runtime/typescript/src/serialization/resources/geo/resources/common/types/Coord.ts @@ -0,0 +1,20 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../.."; +import * as Rivet from "../../../../../../api"; +import * as core from "../../../../../../core"; + +export const Coord: core.serialization.ObjectSchema = + core.serialization.object({ + latitude: core.serialization.number(), + longitude: core.serialization.number(), + }); + +export declare namespace Coord { + interface Raw { + latitude: number; + longitude: number; + } +} diff --git a/sdks/runtime/typescript/src/serialization/resources/geo/resources/common/types/Distance.ts b/sdks/runtime/typescript/src/serialization/resources/geo/resources/common/types/Distance.ts new file mode 100644 index 0000000000..1f842b82a7 --- /dev/null +++ b/sdks/runtime/typescript/src/serialization/resources/geo/resources/common/types/Distance.ts @@ -0,0 +1,20 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../.."; +import * as Rivet from "../../../../../../api"; +import * as core from "../../../../../../core"; + +export const Distance: core.serialization.ObjectSchema = + core.serialization.object({ + kilometers: core.serialization.number(), + miles: core.serialization.number(), + }); + +export declare namespace Distance { + interface Raw { + kilometers: number; + miles: number; + } +} diff --git a/sdks/runtime/typescript/src/serialization/resources/geo/resources/common/types/index.ts b/sdks/runtime/typescript/src/serialization/resources/geo/resources/common/types/index.ts new file mode 100644 index 0000000000..9bedc827df --- /dev/null +++ b/sdks/runtime/typescript/src/serialization/resources/geo/resources/common/types/index.ts @@ -0,0 +1,2 @@ +export * from "./Coord"; +export * from "./Distance"; diff --git a/sdks/runtime/typescript/src/serialization/resources/kv/resources/common/types/Directory.ts b/sdks/runtime/typescript/src/serialization/resources/kv/resources/common/types/Directory.ts new file mode 100644 index 0000000000..551cfbd892 --- /dev/null +++ b/sdks/runtime/typescript/src/serialization/resources/kv/resources/common/types/Directory.ts @@ -0,0 +1,14 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../.."; +import * as Rivet from "../../../../../../api"; +import * as core from "../../../../../../core"; + +export const Directory: core.serialization.Schema = + core.serialization.string(); + +export declare namespace Directory { + type Raw = string; +} diff --git a/sdks/runtime/typescript/src/serialization/resources/kv/resources/common/types/Entry.ts b/sdks/runtime/typescript/src/serialization/resources/kv/resources/common/types/Entry.ts new file mode 100644 index 0000000000..d621b55722 --- /dev/null +++ b/sdks/runtime/typescript/src/serialization/resources/kv/resources/common/types/Entry.ts @@ -0,0 +1,22 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../.."; +import * as Rivet from "../../../../../../api"; +import * as core from "../../../../../../core"; + +export const Entry: core.serialization.ObjectSchema = + core.serialization.object({ + key: core.serialization.lazy(async () => (await import("../../../../..")).kv.Key), + value: core.serialization.lazy(async () => (await import("../../../../..")).kv.Value), + deleted: core.serialization.boolean().optional(), + }); + +export declare namespace Entry { + interface Raw { + key: serializers.kv.Key.Raw; + value?: serializers.kv.Value.Raw; + deleted?: boolean | null; + } +} diff --git a/sdks/runtime/typescript/src/serialization/resources/kv/resources/common/types/Key.ts b/sdks/runtime/typescript/src/serialization/resources/kv/resources/common/types/Key.ts new file mode 100644 index 0000000000..f1c6b42a71 --- /dev/null +++ b/sdks/runtime/typescript/src/serialization/resources/kv/resources/common/types/Key.ts @@ -0,0 +1,13 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../.."; +import * as Rivet from "../../../../../../api"; +import * as core from "../../../../../../core"; + +export const Key: core.serialization.Schema = core.serialization.string(); + +export declare namespace Key { + type Raw = string; +} diff --git a/sdks/runtime/typescript/src/serialization/resources/kv/resources/common/types/PutEntry.ts b/sdks/runtime/typescript/src/serialization/resources/kv/resources/common/types/PutEntry.ts new file mode 100644 index 0000000000..a309e573c0 --- /dev/null +++ b/sdks/runtime/typescript/src/serialization/resources/kv/resources/common/types/PutEntry.ts @@ -0,0 +1,20 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../.."; +import * as Rivet from "../../../../../../api"; +import * as core from "../../../../../../core"; + +export const PutEntry: core.serialization.ObjectSchema = + core.serialization.object({ + key: core.serialization.lazy(async () => (await import("../../../../..")).kv.Key), + value: core.serialization.lazy(async () => (await import("../../../../..")).kv.Value), + }); + +export declare namespace PutEntry { + interface Raw { + key: serializers.kv.Key.Raw; + value?: serializers.kv.Value.Raw; + } +} diff --git a/sdks/runtime/typescript/src/serialization/resources/kv/resources/common/types/Value.ts b/sdks/runtime/typescript/src/serialization/resources/kv/resources/common/types/Value.ts new file mode 100644 index 0000000000..2e5d3b8267 --- /dev/null +++ b/sdks/runtime/typescript/src/serialization/resources/kv/resources/common/types/Value.ts @@ -0,0 +1,13 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../.."; +import * as Rivet from "../../../../../../api"; +import * as core from "../../../../../../core"; + +export const Value: core.serialization.Schema = core.serialization.unknown(); + +export declare namespace Value { + type Raw = unknown; +} diff --git a/sdks/runtime/typescript/src/serialization/resources/kv/resources/common/types/index.ts b/sdks/runtime/typescript/src/serialization/resources/kv/resources/common/types/index.ts new file mode 100644 index 0000000000..f88e3ccae9 --- /dev/null +++ b/sdks/runtime/typescript/src/serialization/resources/kv/resources/common/types/index.ts @@ -0,0 +1,5 @@ +export * from "./Key"; +export * from "./Directory"; +export * from "./Value"; +export * from "./Entry"; +export * from "./PutEntry"; diff --git a/sdks/runtime/typescript/src/serialization/resources/kv/types/GetBatchResponse.ts b/sdks/runtime/typescript/src/serialization/resources/kv/types/GetBatchResponse.ts new file mode 100644 index 0000000000..59e09e1c57 --- /dev/null +++ b/sdks/runtime/typescript/src/serialization/resources/kv/types/GetBatchResponse.ts @@ -0,0 +1,22 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../.."; +import * as Rivet from "../../../../api"; +import * as core from "../../../../core"; + +export const GetBatchResponse: core.serialization.ObjectSchema< + serializers.kv.GetBatchResponse.Raw, + Rivet.kv.GetBatchResponse +> = core.serialization.object({ + entries: core.serialization.list(core.serialization.lazyObject(async () => (await import("../../..")).kv.Entry)), + watch: core.serialization.lazyObject(async () => (await import("../../..")).WatchResponse), +}); + +export declare namespace GetBatchResponse { + interface Raw { + entries: serializers.kv.Entry.Raw[]; + watch: serializers.WatchResponse.Raw; + } +} diff --git a/sdks/runtime/typescript/src/serialization/resources/kv/types/GetResponse.ts b/sdks/runtime/typescript/src/serialization/resources/kv/types/GetResponse.ts new file mode 100644 index 0000000000..4ca3a825a6 --- /dev/null +++ b/sdks/runtime/typescript/src/serialization/resources/kv/types/GetResponse.ts @@ -0,0 +1,22 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../.."; +import * as Rivet from "../../../../api"; +import * as core from "../../../../core"; + +export const GetResponse: core.serialization.ObjectSchema = + core.serialization.object({ + value: core.serialization.lazy(async () => (await import("../../..")).kv.Value), + deleted: core.serialization.boolean().optional(), + watch: core.serialization.lazyObject(async () => (await import("../../..")).WatchResponse), + }); + +export declare namespace GetResponse { + interface Raw { + value?: serializers.kv.Value.Raw; + deleted?: boolean | null; + watch: serializers.WatchResponse.Raw; + } +} diff --git a/sdks/runtime/typescript/src/serialization/resources/kv/types/ListResponse.ts b/sdks/runtime/typescript/src/serialization/resources/kv/types/ListResponse.ts new file mode 100644 index 0000000000..ddf5176c0c --- /dev/null +++ b/sdks/runtime/typescript/src/serialization/resources/kv/types/ListResponse.ts @@ -0,0 +1,20 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../.."; +import * as Rivet from "../../../../api"; +import * as core from "../../../../core"; + +export const ListResponse: core.serialization.ObjectSchema = + core.serialization.object({ + entries: core.serialization.list( + core.serialization.lazyObject(async () => (await import("../../..")).kv.Entry) + ), + }); + +export declare namespace ListResponse { + interface Raw { + entries: serializers.kv.Entry.Raw[]; + } +} diff --git a/sdks/runtime/typescript/src/serialization/resources/kv/types/PutBatchRequest.ts b/sdks/runtime/typescript/src/serialization/resources/kv/types/PutBatchRequest.ts new file mode 100644 index 0000000000..e98c4d5acc --- /dev/null +++ b/sdks/runtime/typescript/src/serialization/resources/kv/types/PutBatchRequest.ts @@ -0,0 +1,22 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../.."; +import * as Rivet from "../../../../api"; +import * as core from "../../../../core"; + +export const PutBatchRequest: core.serialization.ObjectSchema< + serializers.kv.PutBatchRequest.Raw, + Rivet.kv.PutBatchRequest +> = core.serialization.object({ + namespaceId: core.serialization.property("namespace_id", core.serialization.string().optional()), + entries: core.serialization.list(core.serialization.lazyObject(async () => (await import("../../..")).kv.PutEntry)), +}); + +export declare namespace PutBatchRequest { + interface Raw { + namespace_id?: string | null; + entries: serializers.kv.PutEntry.Raw[]; + } +} diff --git a/sdks/runtime/typescript/src/serialization/resources/kv/types/PutRequest.ts b/sdks/runtime/typescript/src/serialization/resources/kv/types/PutRequest.ts new file mode 100644 index 0000000000..578bfed0b0 --- /dev/null +++ b/sdks/runtime/typescript/src/serialization/resources/kv/types/PutRequest.ts @@ -0,0 +1,22 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../.."; +import * as Rivet from "../../../../api"; +import * as core from "../../../../core"; + +export const PutRequest: core.serialization.ObjectSchema = + core.serialization.object({ + namespaceId: core.serialization.property("namespace_id", core.serialization.string().optional()), + key: core.serialization.lazy(async () => (await import("../../..")).kv.Key), + value: core.serialization.lazy(async () => (await import("../../..")).kv.Value), + }); + +export declare namespace PutRequest { + interface Raw { + namespace_id?: string | null; + key: serializers.kv.Key.Raw; + value?: serializers.kv.Value.Raw; + } +} diff --git a/sdks/runtime/typescript/src/serialization/resources/kv/types/index.ts b/sdks/runtime/typescript/src/serialization/resources/kv/types/index.ts new file mode 100644 index 0000000000..e4596df6de --- /dev/null +++ b/sdks/runtime/typescript/src/serialization/resources/kv/types/index.ts @@ -0,0 +1,5 @@ +export * from "./GetResponse"; +export * from "./PutRequest"; +export * from "./ListResponse"; +export * from "./GetBatchResponse"; +export * from "./PutBatchRequest"; diff --git a/sdks/runtime/typescript/src/serialization/resources/matchmaker/resources/common/types/CustomLobbyPublicity.ts b/sdks/runtime/typescript/src/serialization/resources/matchmaker/resources/common/types/CustomLobbyPublicity.ts new file mode 100644 index 0000000000..a82627c1d5 --- /dev/null +++ b/sdks/runtime/typescript/src/serialization/resources/matchmaker/resources/common/types/CustomLobbyPublicity.ts @@ -0,0 +1,16 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../.."; +import * as Rivet from "../../../../../../api"; +import * as core from "../../../../../../core"; + +export const CustomLobbyPublicity: core.serialization.Schema< + serializers.matchmaker.CustomLobbyPublicity.Raw, + Rivet.matchmaker.CustomLobbyPublicity +> = core.serialization.enum_(["public", "private"]); + +export declare namespace CustomLobbyPublicity { + type Raw = "public" | "private"; +} diff --git a/sdks/runtime/typescript/src/serialization/resources/matchmaker/resources/common/types/GameModeInfo.ts b/sdks/runtime/typescript/src/serialization/resources/matchmaker/resources/common/types/GameModeInfo.ts new file mode 100644 index 0000000000..d84d040283 --- /dev/null +++ b/sdks/runtime/typescript/src/serialization/resources/matchmaker/resources/common/types/GameModeInfo.ts @@ -0,0 +1,23 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../.."; +import * as Rivet from "../../../../../../api"; +import * as core from "../../../../../../core"; + +export const GameModeInfo: core.serialization.ObjectSchema< + serializers.matchmaker.GameModeInfo.Raw, + Rivet.matchmaker.GameModeInfo +> = core.serialization.object({ + gameModeId: core.serialization.property( + "game_mode_id", + core.serialization.lazy(async () => (await import("../../../../..")).Identifier) + ), +}); + +export declare namespace GameModeInfo { + interface Raw { + game_mode_id: serializers.Identifier.Raw; + } +} diff --git a/sdks/runtime/typescript/src/serialization/resources/matchmaker/resources/common/types/JoinLobby.ts b/sdks/runtime/typescript/src/serialization/resources/matchmaker/resources/common/types/JoinLobby.ts new file mode 100644 index 0000000000..ac863e7e43 --- /dev/null +++ b/sdks/runtime/typescript/src/serialization/resources/matchmaker/resources/common/types/JoinLobby.ts @@ -0,0 +1,29 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../.."; +import * as Rivet from "../../../../../../api"; +import * as core from "../../../../../../core"; + +export const JoinLobby: core.serialization.ObjectSchema< + serializers.matchmaker.JoinLobby.Raw, + Rivet.matchmaker.JoinLobby +> = core.serialization.object({ + lobbyId: core.serialization.property("lobby_id", core.serialization.string()), + region: core.serialization.lazyObject(async () => (await import("../../../../..")).matchmaker.JoinRegion), + ports: core.serialization.record( + core.serialization.string(), + core.serialization.lazyObject(async () => (await import("../../../../..")).matchmaker.JoinPort) + ), + player: core.serialization.lazyObject(async () => (await import("../../../../..")).matchmaker.JoinPlayer), +}); + +export declare namespace JoinLobby { + interface Raw { + lobby_id: string; + region: serializers.matchmaker.JoinRegion.Raw; + ports: Record; + player: serializers.matchmaker.JoinPlayer.Raw; + } +} diff --git a/sdks/runtime/typescript/src/serialization/resources/matchmaker/resources/common/types/JoinPlayer.ts b/sdks/runtime/typescript/src/serialization/resources/matchmaker/resources/common/types/JoinPlayer.ts new file mode 100644 index 0000000000..b671fb8b3e --- /dev/null +++ b/sdks/runtime/typescript/src/serialization/resources/matchmaker/resources/common/types/JoinPlayer.ts @@ -0,0 +1,20 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../.."; +import * as Rivet from "../../../../../../api"; +import * as core from "../../../../../../core"; + +export const JoinPlayer: core.serialization.ObjectSchema< + serializers.matchmaker.JoinPlayer.Raw, + Rivet.matchmaker.JoinPlayer +> = core.serialization.object({ + token: core.serialization.lazy(async () => (await import("../../../../..")).Jwt), +}); + +export declare namespace JoinPlayer { + interface Raw { + token: serializers.Jwt.Raw; + } +} diff --git a/sdks/runtime/typescript/src/serialization/resources/matchmaker/resources/common/types/JoinPort.ts b/sdks/runtime/typescript/src/serialization/resources/matchmaker/resources/common/types/JoinPort.ts new file mode 100644 index 0000000000..6f9413c750 --- /dev/null +++ b/sdks/runtime/typescript/src/serialization/resources/matchmaker/resources/common/types/JoinPort.ts @@ -0,0 +1,31 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../.."; +import * as Rivet from "../../../../../../api"; +import * as core from "../../../../../../core"; + +export const JoinPort: core.serialization.ObjectSchema = + core.serialization.object({ + host: core.serialization.string().optional(), + hostname: core.serialization.string(), + port: core.serialization.number().optional(), + portRange: core.serialization.property( + "port_range", + core.serialization + .lazyObject(async () => (await import("../../../../..")).matchmaker.JoinPortRange) + .optional() + ), + isTls: core.serialization.property("is_tls", core.serialization.boolean()), + }); + +export declare namespace JoinPort { + interface Raw { + host?: string | null; + hostname: string; + port?: number | null; + port_range?: serializers.matchmaker.JoinPortRange.Raw | null; + is_tls: boolean; + } +} diff --git a/sdks/runtime/typescript/src/serialization/resources/matchmaker/resources/common/types/JoinPortRange.ts b/sdks/runtime/typescript/src/serialization/resources/matchmaker/resources/common/types/JoinPortRange.ts new file mode 100644 index 0000000000..f970256c39 --- /dev/null +++ b/sdks/runtime/typescript/src/serialization/resources/matchmaker/resources/common/types/JoinPortRange.ts @@ -0,0 +1,22 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../.."; +import * as Rivet from "../../../../../../api"; +import * as core from "../../../../../../core"; + +export const JoinPortRange: core.serialization.ObjectSchema< + serializers.matchmaker.JoinPortRange.Raw, + Rivet.matchmaker.JoinPortRange +> = core.serialization.object({ + min: core.serialization.number(), + max: core.serialization.number(), +}); + +export declare namespace JoinPortRange { + interface Raw { + min: number; + max: number; + } +} diff --git a/sdks/runtime/typescript/src/serialization/resources/matchmaker/resources/common/types/JoinRegion.ts b/sdks/runtime/typescript/src/serialization/resources/matchmaker/resources/common/types/JoinRegion.ts new file mode 100644 index 0000000000..1bd0c52094 --- /dev/null +++ b/sdks/runtime/typescript/src/serialization/resources/matchmaker/resources/common/types/JoinRegion.ts @@ -0,0 +1,28 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../.."; +import * as Rivet from "../../../../../../api"; +import * as core from "../../../../../../core"; + +export const JoinRegion: core.serialization.ObjectSchema< + serializers.matchmaker.JoinRegion.Raw, + Rivet.matchmaker.JoinRegion +> = core.serialization.object({ + regionId: core.serialization.property( + "region_id", + core.serialization.lazy(async () => (await import("../../../../..")).Identifier) + ), + displayName: core.serialization.property( + "display_name", + core.serialization.lazy(async () => (await import("../../../../..")).DisplayName) + ), +}); + +export declare namespace JoinRegion { + interface Raw { + region_id: serializers.Identifier.Raw; + display_name: serializers.DisplayName.Raw; + } +} diff --git a/sdks/runtime/typescript/src/serialization/resources/matchmaker/resources/common/types/LobbyInfo.ts b/sdks/runtime/typescript/src/serialization/resources/matchmaker/resources/common/types/LobbyInfo.ts new file mode 100644 index 0000000000..0e89aadff0 --- /dev/null +++ b/sdks/runtime/typescript/src/serialization/resources/matchmaker/resources/common/types/LobbyInfo.ts @@ -0,0 +1,34 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../.."; +import * as Rivet from "../../../../../../api"; +import * as core from "../../../../../../core"; + +export const LobbyInfo: core.serialization.ObjectSchema< + serializers.matchmaker.LobbyInfo.Raw, + Rivet.matchmaker.LobbyInfo +> = core.serialization.object({ + regionId: core.serialization.property("region_id", core.serialization.string()), + gameModeId: core.serialization.property("game_mode_id", core.serialization.string()), + lobbyId: core.serialization.property("lobby_id", core.serialization.string()), + maxPlayersNormal: core.serialization.property("max_players_normal", core.serialization.number()), + maxPlayersDirect: core.serialization.property("max_players_direct", core.serialization.number()), + maxPlayersParty: core.serialization.property("max_players_party", core.serialization.number()), + totalPlayerCount: core.serialization.property("total_player_count", core.serialization.number()), + state: core.serialization.unknown().optional(), +}); + +export declare namespace LobbyInfo { + interface Raw { + region_id: string; + game_mode_id: string; + lobby_id: string; + max_players_normal: number; + max_players_direct: number; + max_players_party: number; + total_player_count: number; + state?: unknown | null; + } +} diff --git a/sdks/runtime/typescript/src/serialization/resources/matchmaker/resources/common/types/RegionInfo.ts b/sdks/runtime/typescript/src/serialization/resources/matchmaker/resources/common/types/RegionInfo.ts new file mode 100644 index 0000000000..6f320c36ca --- /dev/null +++ b/sdks/runtime/typescript/src/serialization/resources/matchmaker/resources/common/types/RegionInfo.ts @@ -0,0 +1,37 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../.."; +import * as Rivet from "../../../../../../api"; +import * as core from "../../../../../../core"; + +export const RegionInfo: core.serialization.ObjectSchema< + serializers.matchmaker.RegionInfo.Raw, + Rivet.matchmaker.RegionInfo +> = core.serialization.object({ + regionId: core.serialization.property( + "region_id", + core.serialization.lazy(async () => (await import("../../../../..")).Identifier) + ), + providerDisplayName: core.serialization.property("provider_display_name", core.serialization.string()), + regionDisplayName: core.serialization.property("region_display_name", core.serialization.string()), + datacenterCoord: core.serialization.property( + "datacenter_coord", + core.serialization.lazyObject(async () => (await import("../../../../..")).geo.Coord) + ), + datacenterDistanceFromClient: core.serialization.property( + "datacenter_distance_from_client", + core.serialization.lazyObject(async () => (await import("../../../../..")).geo.Distance) + ), +}); + +export declare namespace RegionInfo { + interface Raw { + region_id: serializers.Identifier.Raw; + provider_display_name: string; + region_display_name: string; + datacenter_coord: serializers.geo.Coord.Raw; + datacenter_distance_from_client: serializers.geo.Distance.Raw; + } +} diff --git a/sdks/runtime/typescript/src/serialization/resources/matchmaker/resources/common/types/index.ts b/sdks/runtime/typescript/src/serialization/resources/matchmaker/resources/common/types/index.ts new file mode 100644 index 0000000000..5519ce0ef7 --- /dev/null +++ b/sdks/runtime/typescript/src/serialization/resources/matchmaker/resources/common/types/index.ts @@ -0,0 +1,9 @@ +export * from "./LobbyInfo"; +export * from "./GameModeInfo"; +export * from "./RegionInfo"; +export * from "./JoinLobby"; +export * from "./JoinRegion"; +export * from "./JoinPort"; +export * from "./JoinPortRange"; +export * from "./JoinPlayer"; +export * from "./CustomLobbyPublicity"; diff --git a/sdks/runtime/typescript/src/serialization/resources/matchmaker/resources/lobbies/types/CreateLobbyResponse.ts b/sdks/runtime/typescript/src/serialization/resources/matchmaker/resources/lobbies/types/CreateLobbyResponse.ts new file mode 100644 index 0000000000..cb70e64ac3 --- /dev/null +++ b/sdks/runtime/typescript/src/serialization/resources/matchmaker/resources/lobbies/types/CreateLobbyResponse.ts @@ -0,0 +1,27 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../.."; +import * as Rivet from "../../../../../../api"; +import * as core from "../../../../../../core"; + +export const CreateLobbyResponse: core.serialization.ObjectSchema< + serializers.matchmaker.CreateLobbyResponse.Raw, + Rivet.matchmaker.CreateLobbyResponse +> = core.serialization.object({ + lobby: core.serialization.lazyObject(async () => (await import("../../../../..")).matchmaker.JoinLobby), + ports: core.serialization.record( + core.serialization.string(), + core.serialization.lazyObject(async () => (await import("../../../../..")).matchmaker.JoinPort) + ), + player: core.serialization.lazyObject(async () => (await import("../../../../..")).matchmaker.JoinPlayer), +}); + +export declare namespace CreateLobbyResponse { + interface Raw { + lobby: serializers.matchmaker.JoinLobby.Raw; + ports: Record; + player: serializers.matchmaker.JoinPlayer.Raw; + } +} diff --git a/sdks/runtime/typescript/src/serialization/resources/matchmaker/resources/lobbies/types/FindLobbyResponse.ts b/sdks/runtime/typescript/src/serialization/resources/matchmaker/resources/lobbies/types/FindLobbyResponse.ts new file mode 100644 index 0000000000..9eae87d8c5 --- /dev/null +++ b/sdks/runtime/typescript/src/serialization/resources/matchmaker/resources/lobbies/types/FindLobbyResponse.ts @@ -0,0 +1,27 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../.."; +import * as Rivet from "../../../../../../api"; +import * as core from "../../../../../../core"; + +export const FindLobbyResponse: core.serialization.ObjectSchema< + serializers.matchmaker.FindLobbyResponse.Raw, + Rivet.matchmaker.FindLobbyResponse +> = core.serialization.object({ + lobby: core.serialization.lazyObject(async () => (await import("../../../../..")).matchmaker.JoinLobby), + ports: core.serialization.record( + core.serialization.string(), + core.serialization.lazyObject(async () => (await import("../../../../..")).matchmaker.JoinPort) + ), + player: core.serialization.lazyObject(async () => (await import("../../../../..")).matchmaker.JoinPlayer), +}); + +export declare namespace FindLobbyResponse { + interface Raw { + lobby: serializers.matchmaker.JoinLobby.Raw; + ports: Record; + player: serializers.matchmaker.JoinPlayer.Raw; + } +} diff --git a/sdks/runtime/typescript/src/serialization/resources/matchmaker/resources/lobbies/types/JoinLobbyResponse.ts b/sdks/runtime/typescript/src/serialization/resources/matchmaker/resources/lobbies/types/JoinLobbyResponse.ts new file mode 100644 index 0000000000..289cf984c4 --- /dev/null +++ b/sdks/runtime/typescript/src/serialization/resources/matchmaker/resources/lobbies/types/JoinLobbyResponse.ts @@ -0,0 +1,27 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../.."; +import * as Rivet from "../../../../../../api"; +import * as core from "../../../../../../core"; + +export const JoinLobbyResponse: core.serialization.ObjectSchema< + serializers.matchmaker.JoinLobbyResponse.Raw, + Rivet.matchmaker.JoinLobbyResponse +> = core.serialization.object({ + lobby: core.serialization.lazyObject(async () => (await import("../../../../..")).matchmaker.JoinLobby), + ports: core.serialization.record( + core.serialization.string(), + core.serialization.lazyObject(async () => (await import("../../../../..")).matchmaker.JoinPort) + ), + player: core.serialization.lazyObject(async () => (await import("../../../../..")).matchmaker.JoinPlayer), +}); + +export declare namespace JoinLobbyResponse { + interface Raw { + lobby: serializers.matchmaker.JoinLobby.Raw; + ports: Record; + player: serializers.matchmaker.JoinPlayer.Raw; + } +} diff --git a/sdks/runtime/typescript/src/serialization/resources/matchmaker/resources/lobbies/types/ListLobbiesResponse.ts b/sdks/runtime/typescript/src/serialization/resources/matchmaker/resources/lobbies/types/ListLobbiesResponse.ts new file mode 100644 index 0000000000..5de03f5fd7 --- /dev/null +++ b/sdks/runtime/typescript/src/serialization/resources/matchmaker/resources/lobbies/types/ListLobbiesResponse.ts @@ -0,0 +1,33 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../.."; +import * as Rivet from "../../../../../../api"; +import * as core from "../../../../../../core"; + +export const ListLobbiesResponse: core.serialization.ObjectSchema< + serializers.matchmaker.ListLobbiesResponse.Raw, + Rivet.matchmaker.ListLobbiesResponse +> = core.serialization.object({ + gameModes: core.serialization.property( + "game_modes", + core.serialization.list( + core.serialization.lazyObject(async () => (await import("../../../../..")).matchmaker.GameModeInfo) + ) + ), + regions: core.serialization.list( + core.serialization.lazyObject(async () => (await import("../../../../..")).matchmaker.RegionInfo) + ), + lobbies: core.serialization.list( + core.serialization.lazyObject(async () => (await import("../../../../..")).matchmaker.LobbyInfo) + ), +}); + +export declare namespace ListLobbiesResponse { + interface Raw { + game_modes: serializers.matchmaker.GameModeInfo.Raw[]; + regions: serializers.matchmaker.RegionInfo.Raw[]; + lobbies: serializers.matchmaker.LobbyInfo.Raw[]; + } +} diff --git a/sdks/runtime/typescript/src/serialization/resources/matchmaker/resources/lobbies/types/index.ts b/sdks/runtime/typescript/src/serialization/resources/matchmaker/resources/lobbies/types/index.ts new file mode 100644 index 0000000000..13491dd556 --- /dev/null +++ b/sdks/runtime/typescript/src/serialization/resources/matchmaker/resources/lobbies/types/index.ts @@ -0,0 +1,4 @@ +export * from "./FindLobbyResponse"; +export * from "./JoinLobbyResponse"; +export * from "./CreateLobbyResponse"; +export * from "./ListLobbiesResponse"; diff --git a/sdks/runtime/typescript/src/serialization/resources/matchmaker/resources/players/types/GameModeStatistics.ts b/sdks/runtime/typescript/src/serialization/resources/matchmaker/resources/players/types/GameModeStatistics.ts new file mode 100644 index 0000000000..37ce994b1d --- /dev/null +++ b/sdks/runtime/typescript/src/serialization/resources/matchmaker/resources/players/types/GameModeStatistics.ts @@ -0,0 +1,25 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../.."; +import * as Rivet from "../../../../../../api"; +import * as core from "../../../../../../core"; + +export const GameModeStatistics: core.serialization.ObjectSchema< + serializers.matchmaker.GameModeStatistics.Raw, + Rivet.matchmaker.GameModeStatistics +> = core.serialization.object({ + playerCount: core.serialization.property("player_count", core.serialization.number()), + regions: core.serialization.record( + core.serialization.lazy(async () => (await import("../../../../..")).Identifier), + core.serialization.lazyObject(async () => (await import("../../../../..")).matchmaker.RegionStatistics) + ), +}); + +export declare namespace GameModeStatistics { + interface Raw { + player_count: number; + regions: Record; + } +} diff --git a/sdks/runtime/typescript/src/serialization/resources/matchmaker/resources/players/types/GetStatisticsResponse.ts b/sdks/runtime/typescript/src/serialization/resources/matchmaker/resources/players/types/GetStatisticsResponse.ts new file mode 100644 index 0000000000..625d73d4a4 --- /dev/null +++ b/sdks/runtime/typescript/src/serialization/resources/matchmaker/resources/players/types/GetStatisticsResponse.ts @@ -0,0 +1,28 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../.."; +import * as Rivet from "../../../../../../api"; +import * as core from "../../../../../../core"; + +export const GetStatisticsResponse: core.serialization.ObjectSchema< + serializers.matchmaker.GetStatisticsResponse.Raw, + Rivet.matchmaker.GetStatisticsResponse +> = core.serialization.object({ + playerCount: core.serialization.property("player_count", core.serialization.number()), + gameModes: core.serialization.property( + "game_modes", + core.serialization.record( + core.serialization.lazy(async () => (await import("../../../../..")).Identifier), + core.serialization.lazyObject(async () => (await import("../../../../..")).matchmaker.GameModeStatistics) + ) + ), +}); + +export declare namespace GetStatisticsResponse { + interface Raw { + player_count: number; + game_modes: Record; + } +} diff --git a/sdks/runtime/typescript/src/serialization/resources/matchmaker/resources/players/types/RegionStatistics.ts b/sdks/runtime/typescript/src/serialization/resources/matchmaker/resources/players/types/RegionStatistics.ts new file mode 100644 index 0000000000..da7ec7253e --- /dev/null +++ b/sdks/runtime/typescript/src/serialization/resources/matchmaker/resources/players/types/RegionStatistics.ts @@ -0,0 +1,20 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../.."; +import * as Rivet from "../../../../../../api"; +import * as core from "../../../../../../core"; + +export const RegionStatistics: core.serialization.ObjectSchema< + serializers.matchmaker.RegionStatistics.Raw, + Rivet.matchmaker.RegionStatistics +> = core.serialization.object({ + playerCount: core.serialization.property("player_count", core.serialization.number()), +}); + +export declare namespace RegionStatistics { + interface Raw { + player_count: number; + } +} diff --git a/sdks/runtime/typescript/src/serialization/resources/matchmaker/resources/players/types/index.ts b/sdks/runtime/typescript/src/serialization/resources/matchmaker/resources/players/types/index.ts new file mode 100644 index 0000000000..e6a2666580 --- /dev/null +++ b/sdks/runtime/typescript/src/serialization/resources/matchmaker/resources/players/types/index.ts @@ -0,0 +1,3 @@ +export * from "./GetStatisticsResponse"; +export * from "./GameModeStatistics"; +export * from "./RegionStatistics"; diff --git a/sdks/runtime/typescript/src/serialization/resources/matchmaker/resources/regions/types/ListRegionsResponse.ts b/sdks/runtime/typescript/src/serialization/resources/matchmaker/resources/regions/types/ListRegionsResponse.ts new file mode 100644 index 0000000000..608d29cf57 --- /dev/null +++ b/sdks/runtime/typescript/src/serialization/resources/matchmaker/resources/regions/types/ListRegionsResponse.ts @@ -0,0 +1,22 @@ +/** + * This file was auto-generated by Fern from our API Definition. + */ + +import * as serializers from "../../../../.."; +import * as Rivet from "../../../../../../api"; +import * as core from "../../../../../../core"; + +export const ListRegionsResponse: core.serialization.ObjectSchema< + serializers.matchmaker.ListRegionsResponse.Raw, + Rivet.matchmaker.ListRegionsResponse +> = core.serialization.object({ + regions: core.serialization.list( + core.serialization.lazyObject(async () => (await import("../../../../..")).matchmaker.RegionInfo) + ), +}); + +export declare namespace ListRegionsResponse { + interface Raw { + regions: serializers.matchmaker.RegionInfo.Raw[]; + } +} diff --git a/sdks/runtime/typescript/src/serialization/resources/matchmaker/resources/regions/types/index.ts b/sdks/runtime/typescript/src/serialization/resources/matchmaker/resources/regions/types/index.ts new file mode 100644 index 0000000000..dd60cc81be --- /dev/null +++ b/sdks/runtime/typescript/src/serialization/resources/matchmaker/resources/regions/types/index.ts @@ -0,0 +1 @@ +export * from "./ListRegionsResponse"; diff --git a/sdks/runtime/typescript/tsconfig.json b/sdks/runtime/typescript/tsconfig.json deleted file mode 100644 index 6386dc3821..0000000000 --- a/sdks/runtime/typescript/tsconfig.json +++ /dev/null @@ -1,27 +0,0 @@ -{ - "compilerOptions": { - "extendedDiagnostics": true, - "strict": true, - "target": "esnext", - "module": "esnext", - "moduleResolution": "node", - "esModuleInterop": true, - "skipLibCheck": true, - "declaration": true, - "emitDeclarationOnly": true, - "sourceMap": true, - "noUnusedParameters": true, - "outDir": "types", - "rootDir": "src", - "baseUrl": "src", - "paths": { - "@rivet-gg/api": [ - "." - ] - } - }, - "include": [ - "src" - ], - "exclude": [] -} \ No newline at end of file diff --git a/sdks/runtime/typescript/yarn.lock b/sdks/runtime/typescript/yarn.lock deleted file mode 100644 index 834dd40aed..0000000000 --- a/sdks/runtime/typescript/yarn.lock +++ /dev/null @@ -1,340 +0,0 @@ -# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY. -# yarn lockfile v1 - - -"@esbuild/aix-ppc64@0.19.11": - version "0.19.11" - resolved "https://registry.yarnpkg.com/@esbuild/aix-ppc64/-/aix-ppc64-0.19.11.tgz#2acd20be6d4f0458bc8c784103495ff24f13b1d3" - integrity sha512-FnzU0LyE3ySQk7UntJO4+qIiQgI7KoODnZg5xzXIrFJlKd2P2gwHsHY4927xj9y5PJmJSzULiUCWmv7iWnNa7g== - -"@esbuild/android-arm64@0.19.11": - version "0.19.11" - resolved "https://registry.yarnpkg.com/@esbuild/android-arm64/-/android-arm64-0.19.11.tgz#b45d000017385c9051a4f03e17078abb935be220" - integrity sha512-aiu7K/5JnLj//KOnOfEZ0D90obUkRzDMyqd/wNAUQ34m4YUPVhRZpnqKV9uqDGxT7cToSDnIHsGooyIczu9T+Q== - -"@esbuild/android-arm@0.19.11": - version "0.19.11" - resolved "https://registry.yarnpkg.com/@esbuild/android-arm/-/android-arm-0.19.11.tgz#f46f55414e1c3614ac682b29977792131238164c" - integrity sha512-5OVapq0ClabvKvQ58Bws8+wkLCV+Rxg7tUVbo9xu034Nm536QTII4YzhaFriQ7rMrorfnFKUsArD2lqKbFY4vw== - -"@esbuild/android-x64@0.19.11": - version "0.19.11" - resolved "https://registry.yarnpkg.com/@esbuild/android-x64/-/android-x64-0.19.11.tgz#bfc01e91740b82011ef503c48f548950824922b2" - integrity sha512-eccxjlfGw43WYoY9QgB82SgGgDbibcqyDTlk3l3C0jOVHKxrjdc9CTwDUQd0vkvYg5um0OH+GpxYvp39r+IPOg== - -"@esbuild/darwin-arm64@0.19.11": - version "0.19.11" - resolved "https://registry.yarnpkg.com/@esbuild/darwin-arm64/-/darwin-arm64-0.19.11.tgz#533fb7f5a08c37121d82c66198263dcc1bed29bf" - integrity sha512-ETp87DRWuSt9KdDVkqSoKoLFHYTrkyz2+65fj9nfXsaV3bMhTCjtQfw3y+um88vGRKRiF7erPrh/ZuIdLUIVxQ== - -"@esbuild/darwin-x64@0.19.11": - version "0.19.11" - resolved "https://registry.yarnpkg.com/@esbuild/darwin-x64/-/darwin-x64-0.19.11.tgz#62f3819eff7e4ddc656b7c6815a31cf9a1e7d98e" - integrity sha512-fkFUiS6IUK9WYUO/+22omwetaSNl5/A8giXvQlcinLIjVkxwTLSktbF5f/kJMftM2MJp9+fXqZ5ezS7+SALp4g== - -"@esbuild/freebsd-arm64@0.19.11": - version "0.19.11" - resolved "https://registry.yarnpkg.com/@esbuild/freebsd-arm64/-/freebsd-arm64-0.19.11.tgz#d478b4195aa3ca44160272dab85ef8baf4175b4a" - integrity sha512-lhoSp5K6bxKRNdXUtHoNc5HhbXVCS8V0iZmDvyWvYq9S5WSfTIHU2UGjcGt7UeS6iEYp9eeymIl5mJBn0yiuxA== - -"@esbuild/freebsd-x64@0.19.11": - version "0.19.11" - resolved "https://registry.yarnpkg.com/@esbuild/freebsd-x64/-/freebsd-x64-0.19.11.tgz#7bdcc1917409178257ca6a1a27fe06e797ec18a2" - integrity sha512-JkUqn44AffGXitVI6/AbQdoYAq0TEullFdqcMY/PCUZ36xJ9ZJRtQabzMA+Vi7r78+25ZIBosLTOKnUXBSi1Kw== - -"@esbuild/linux-arm64@0.19.11": - version "0.19.11" - resolved "https://registry.yarnpkg.com/@esbuild/linux-arm64/-/linux-arm64-0.19.11.tgz#58ad4ff11685fcc735d7ff4ca759ab18fcfe4545" - integrity sha512-LneLg3ypEeveBSMuoa0kwMpCGmpu8XQUh+mL8XXwoYZ6Be2qBnVtcDI5azSvh7vioMDhoJFZzp9GWp9IWpYoUg== - -"@esbuild/linux-arm@0.19.11": - version "0.19.11" - resolved "https://registry.yarnpkg.com/@esbuild/linux-arm/-/linux-arm-0.19.11.tgz#ce82246d873b5534d34de1e5c1b33026f35e60e3" - integrity sha512-3CRkr9+vCV2XJbjwgzjPtO8T0SZUmRZla+UL1jw+XqHZPkPgZiyWvbDvl9rqAN8Zl7qJF0O/9ycMtjU67HN9/Q== - -"@esbuild/linux-ia32@0.19.11": - version "0.19.11" - resolved "https://registry.yarnpkg.com/@esbuild/linux-ia32/-/linux-ia32-0.19.11.tgz#cbae1f313209affc74b80f4390c4c35c6ab83fa4" - integrity sha512-caHy++CsD8Bgq2V5CodbJjFPEiDPq8JJmBdeyZ8GWVQMjRD0sU548nNdwPNvKjVpamYYVL40AORekgfIubwHoA== - -"@esbuild/linux-loong64@0.19.11": - version "0.19.11" - resolved "https://registry.yarnpkg.com/@esbuild/linux-loong64/-/linux-loong64-0.19.11.tgz#5f32aead1c3ec8f4cccdb7ed08b166224d4e9121" - integrity sha512-ppZSSLVpPrwHccvC6nQVZaSHlFsvCQyjnvirnVjbKSHuE5N24Yl8F3UwYUUR1UEPaFObGD2tSvVKbvR+uT1Nrg== - -"@esbuild/linux-mips64el@0.19.11": - version "0.19.11" - resolved "https://registry.yarnpkg.com/@esbuild/linux-mips64el/-/linux-mips64el-0.19.11.tgz#38eecf1cbb8c36a616261de858b3c10d03419af9" - integrity sha512-B5x9j0OgjG+v1dF2DkH34lr+7Gmv0kzX6/V0afF41FkPMMqaQ77pH7CrhWeR22aEeHKaeZVtZ6yFwlxOKPVFyg== - -"@esbuild/linux-ppc64@0.19.11": - version "0.19.11" - resolved "https://registry.yarnpkg.com/@esbuild/linux-ppc64/-/linux-ppc64-0.19.11.tgz#9c5725a94e6ec15b93195e5a6afb821628afd912" - integrity sha512-MHrZYLeCG8vXblMetWyttkdVRjQlQUb/oMgBNurVEnhj4YWOr4G5lmBfZjHYQHHN0g6yDmCAQRR8MUHldvvRDA== - -"@esbuild/linux-riscv64@0.19.11": - version "0.19.11" - resolved "https://registry.yarnpkg.com/@esbuild/linux-riscv64/-/linux-riscv64-0.19.11.tgz#2dc4486d474a2a62bbe5870522a9a600e2acb916" - integrity sha512-f3DY++t94uVg141dozDu4CCUkYW+09rWtaWfnb3bqe4w5NqmZd6nPVBm+qbz7WaHZCoqXqHz5p6CM6qv3qnSSQ== - -"@esbuild/linux-s390x@0.19.11": - version "0.19.11" - resolved "https://registry.yarnpkg.com/@esbuild/linux-s390x/-/linux-s390x-0.19.11.tgz#4ad8567df48f7dd4c71ec5b1753b6f37561a65a8" - integrity sha512-A5xdUoyWJHMMlcSMcPGVLzYzpcY8QP1RtYzX5/bS4dvjBGVxdhuiYyFwp7z74ocV7WDc0n1harxmpq2ePOjI0Q== - -"@esbuild/linux-x64@0.19.11": - version "0.19.11" - resolved "https://registry.yarnpkg.com/@esbuild/linux-x64/-/linux-x64-0.19.11.tgz#b7390c4d5184f203ebe7ddaedf073df82a658766" - integrity sha512-grbyMlVCvJSfxFQUndw5mCtWs5LO1gUlwP4CDi4iJBbVpZcqLVT29FxgGuBJGSzyOxotFG4LoO5X+M1350zmPA== - -"@esbuild/netbsd-x64@0.19.11": - version "0.19.11" - resolved "https://registry.yarnpkg.com/@esbuild/netbsd-x64/-/netbsd-x64-0.19.11.tgz#d633c09492a1721377f3bccedb2d821b911e813d" - integrity sha512-13jvrQZJc3P230OhU8xgwUnDeuC/9egsjTkXN49b3GcS5BKvJqZn86aGM8W9pd14Kd+u7HuFBMVtrNGhh6fHEQ== - -"@esbuild/openbsd-x64@0.19.11": - version "0.19.11" - resolved "https://registry.yarnpkg.com/@esbuild/openbsd-x64/-/openbsd-x64-0.19.11.tgz#17388c76e2f01125bf831a68c03a7ffccb65d1a2" - integrity sha512-ysyOGZuTp6SNKPE11INDUeFVVQFrhcNDVUgSQVDzqsqX38DjhPEPATpid04LCoUr2WXhQTEZ8ct/EgJCUDpyNw== - -"@esbuild/sunos-x64@0.19.11": - version "0.19.11" - resolved "https://registry.yarnpkg.com/@esbuild/sunos-x64/-/sunos-x64-0.19.11.tgz#e320636f00bb9f4fdf3a80e548cb743370d41767" - integrity sha512-Hf+Sad9nVwvtxy4DXCZQqLpgmRTQqyFyhT3bZ4F2XlJCjxGmRFF0Shwn9rzhOYRB61w9VMXUkxlBy56dk9JJiQ== - -"@esbuild/win32-arm64@0.19.11": - version "0.19.11" - resolved "https://registry.yarnpkg.com/@esbuild/win32-arm64/-/win32-arm64-0.19.11.tgz#c778b45a496e90b6fc373e2a2bb072f1441fe0ee" - integrity sha512-0P58Sbi0LctOMOQbpEOvOL44Ne0sqbS0XWHMvvrg6NE5jQ1xguCSSw9jQeUk2lfrXYsKDdOe6K+oZiwKPilYPQ== - -"@esbuild/win32-ia32@0.19.11": - version "0.19.11" - resolved "https://registry.yarnpkg.com/@esbuild/win32-ia32/-/win32-ia32-0.19.11.tgz#481a65fee2e5cce74ec44823e6b09ecedcc5194c" - integrity sha512-6YOrWS+sDJDmshdBIQU+Uoyh7pQKrdykdefC1avn76ss5c+RN6gut3LZA4E2cH5xUEp5/cA0+YxRaVtRAb0xBg== - -"@esbuild/win32-x64@0.19.11": - version "0.19.11" - resolved "https://registry.yarnpkg.com/@esbuild/win32-x64/-/win32-x64-0.19.11.tgz#a5d300008960bb39677c46bf16f53ec70d8dee04" - integrity sha512-vfkhltrjCAb603XaFhqhAF4LGDi2M4OrCRrFusyQ+iTLQ/o60QQXxc9cZC/FFpihBI9N1Grn6SMKVJ4KP7Fuiw== - -"@types/node@17.0.33": - version "17.0.33" - resolved "https://registry.yarnpkg.com/@types/node/-/node-17.0.33.tgz#3c1879b276dc63e73030bb91165e62a4509cd506" - integrity sha512-miWq2m2FiQZmaHfdZNcbpp9PuXg34W5JZ5CrJ/BaS70VuhoJENBEQybeiYSaPBRNq6KQGnjfEnc/F3PN++D+XQ== - -"@types/qs@6.9.8": - version "6.9.8" - resolved "https://registry.yarnpkg.com/@types/qs/-/qs-6.9.8.tgz#f2a7de3c107b89b441e071d5472e6b726b4adf45" - integrity sha512-u95svzDlTysU5xecFNTgfFG5RUWu1A9P0VzgpcIiGZA9iraHOdSzcxMxQ55DyeRaGCSxQi7LxXDI4rzq/MYfdg== - -"@types/url-join@4.0.1": - version "4.0.1" - resolved "https://registry.yarnpkg.com/@types/url-join/-/url-join-4.0.1.tgz#4989c97f969464647a8586c7252d97b449cdc045" - integrity sha512-wDXw9LEEUHyV+7UWy7U315nrJGJ7p1BzaCxDpEoLr789Dk1WDVMMlf3iBfbG2F8NdWnYyFbtTxUn2ZNbm1Q4LQ== - -asynckit@^0.4.0: - version "0.4.0" - resolved "https://registry.yarnpkg.com/asynckit/-/asynckit-0.4.0.tgz#c79ed97f7f34cb8f2ba1bc9790bcc366474b4b79" - integrity sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q== - -call-bind@^1.0.0: - version "1.0.5" - resolved "https://registry.yarnpkg.com/call-bind/-/call-bind-1.0.5.tgz#6fa2b7845ce0ea49bf4d8b9ef64727a2c2e2e513" - integrity sha512-C3nQxfFZxFRVoJoGKKI8y3MOEo129NQ+FgQ08iye+Mk4zNZZGdjfs06bVTr+DBSlA66Q2VEcMki/cUCP4SercQ== - dependencies: - function-bind "^1.1.2" - get-intrinsic "^1.2.1" - set-function-length "^1.1.1" - -combined-stream@^1.0.8: - version "1.0.8" - resolved "https://registry.yarnpkg.com/combined-stream/-/combined-stream-1.0.8.tgz#c3d45a8b34fd730631a110a8a2520682b31d5a7f" - integrity sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg== - dependencies: - delayed-stream "~1.0.0" - -define-data-property@^1.1.1: - version "1.1.1" - resolved "https://registry.yarnpkg.com/define-data-property/-/define-data-property-1.1.1.tgz#c35f7cd0ab09883480d12ac5cb213715587800b3" - integrity sha512-E7uGkTzkk1d0ByLeSc6ZsFS79Axg+m1P/VsgYsxHgiuc3tFSj+MjMIwe90FC4lOAZzNBdY7kkO2P2wKdsQ1vgQ== - dependencies: - get-intrinsic "^1.2.1" - gopd "^1.0.1" - has-property-descriptors "^1.0.0" - -delayed-stream@~1.0.0: - version "1.0.0" - resolved "https://registry.yarnpkg.com/delayed-stream/-/delayed-stream-1.0.0.tgz#df3ae199acadfb7d440aaae0b29e2272b24ec619" - integrity sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ== - -esbuild@^0.19.11: - version "0.19.11" - resolved "https://registry.yarnpkg.com/esbuild/-/esbuild-0.19.11.tgz#4a02dca031e768b5556606e1b468fe72e3325d96" - integrity sha512-HJ96Hev2hX/6i5cDVwcqiJBBtuo9+FeIJOtZ9W1kA5M6AMJRHUZlpYZ1/SbEwtO0ioNAW8rUooVpC/WehY2SfA== - optionalDependencies: - "@esbuild/aix-ppc64" "0.19.11" - "@esbuild/android-arm" "0.19.11" - "@esbuild/android-arm64" "0.19.11" - "@esbuild/android-x64" "0.19.11" - "@esbuild/darwin-arm64" "0.19.11" - "@esbuild/darwin-x64" "0.19.11" - "@esbuild/freebsd-arm64" "0.19.11" - "@esbuild/freebsd-x64" "0.19.11" - "@esbuild/linux-arm" "0.19.11" - "@esbuild/linux-arm64" "0.19.11" - "@esbuild/linux-ia32" "0.19.11" - "@esbuild/linux-loong64" "0.19.11" - "@esbuild/linux-mips64el" "0.19.11" - "@esbuild/linux-ppc64" "0.19.11" - "@esbuild/linux-riscv64" "0.19.11" - "@esbuild/linux-s390x" "0.19.11" - "@esbuild/linux-x64" "0.19.11" - "@esbuild/netbsd-x64" "0.19.11" - "@esbuild/openbsd-x64" "0.19.11" - "@esbuild/sunos-x64" "0.19.11" - "@esbuild/win32-arm64" "0.19.11" - "@esbuild/win32-ia32" "0.19.11" - "@esbuild/win32-x64" "0.19.11" - -form-data@^4.0.0: - version "4.0.0" - resolved "https://registry.yarnpkg.com/form-data/-/form-data-4.0.0.tgz#93919daeaf361ee529584b9b31664dc12c9fa452" - integrity sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww== - dependencies: - asynckit "^0.4.0" - combined-stream "^1.0.8" - mime-types "^2.1.12" - -function-bind@^1.1.2: - version "1.1.2" - resolved "https://registry.yarnpkg.com/function-bind/-/function-bind-1.1.2.tgz#2c02d864d97f3ea6c8830c464cbd11ab6eab7a1c" - integrity sha512-7XHNxH7qX9xG5mIwxkhumTox/MIRNcOgDrxWsMt2pAr23WHp6MrRlN7FBSFpCpr+oVO0F744iUgR82nJMfG2SA== - -get-intrinsic@^1.0.2, get-intrinsic@^1.1.3, get-intrinsic@^1.2.1, get-intrinsic@^1.2.2: - version "1.2.2" - resolved "https://registry.yarnpkg.com/get-intrinsic/-/get-intrinsic-1.2.2.tgz#281b7622971123e1ef4b3c90fd7539306da93f3b" - integrity sha512-0gSo4ml/0j98Y3lngkFEot/zhiCeWsbYIlZ+uZOVgzLyLaUw7wxUL+nCTP0XJvJg1AXulJRI3UJi8GsbDuxdGA== - dependencies: - function-bind "^1.1.2" - has-proto "^1.0.1" - has-symbols "^1.0.3" - hasown "^2.0.0" - -gopd@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/gopd/-/gopd-1.0.1.tgz#29ff76de69dac7489b7c0918a5788e56477c332c" - integrity sha512-d65bNlIadxvpb/A2abVdlqKqV563juRnZ1Wtk6s1sIR8uNsXR70xqIzVqxVf1eTqDunwT2MkczEeaezCKTZhwA== - dependencies: - get-intrinsic "^1.1.3" - -has-property-descriptors@^1.0.0: - version "1.0.1" - resolved "https://registry.yarnpkg.com/has-property-descriptors/-/has-property-descriptors-1.0.1.tgz#52ba30b6c5ec87fd89fa574bc1c39125c6f65340" - integrity sha512-VsX8eaIewvas0xnvinAe9bw4WfIeODpGYikiWYLH+dma0Jw6KHYqWiWfhQlgOVK8D6PvjubK5Uc4P0iIhIcNVg== - dependencies: - get-intrinsic "^1.2.2" - -has-proto@^1.0.1: - version "1.0.1" - resolved "https://registry.yarnpkg.com/has-proto/-/has-proto-1.0.1.tgz#1885c1305538958aff469fef37937c22795408e0" - integrity sha512-7qE+iP+O+bgF9clE5+UoBFzE65mlBiVj3tKCrlNQ0Ogwm0BjpT/gK4SlLYDMybDh5I3TCTKnPPa0oMG7JDYrhg== - -has-symbols@^1.0.3: - version "1.0.3" - resolved "https://registry.yarnpkg.com/has-symbols/-/has-symbols-1.0.3.tgz#bb7b2c4349251dce87b125f7bdf874aa7c8b39f8" - integrity sha512-l3LCuF6MgDNwTDKkdYGEihYjt5pRPbEg46rtlmnSPlUbgmB8LOIrKJbYYFBSbnPaJexMKtiPO8hmeRjRz2Td+A== - -hasown@^2.0.0: - version "2.0.0" - resolved "https://registry.yarnpkg.com/hasown/-/hasown-2.0.0.tgz#f4c513d454a57b7c7e1650778de226b11700546c" - integrity sha512-vUptKVTpIJhcczKBbgnS+RtcuYMB8+oNzPK2/Hp3hanz8JmpATdmmgLgSaadVREkDm+e2giHwY3ZRkyjSIDDFA== - dependencies: - function-bind "^1.1.2" - -js-base64@^3.7.5: - version "3.7.5" - resolved "https://registry.yarnpkg.com/js-base64/-/js-base64-3.7.5.tgz#21e24cf6b886f76d6f5f165bfcd69cc55b9e3fca" - integrity sha512-3MEt5DTINKqfScXKfJFrRbxkrnk2AxPWGBL/ycjz4dK8iqiSJ06UxD8jh8xuh6p10TX4t2+7FsBYVxxQbMg+qA== - -mime-db@1.52.0: - version "1.52.0" - resolved "https://registry.yarnpkg.com/mime-db/-/mime-db-1.52.0.tgz#bbabcdc02859f4987301c856e3387ce5ec43bf70" - integrity sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg== - -mime-types@^2.1.12: - version "2.1.35" - resolved "https://registry.yarnpkg.com/mime-types/-/mime-types-2.1.35.tgz#381a871b62a734450660ae3deee44813f70d959a" - integrity sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw== - dependencies: - mime-db "1.52.0" - -node-fetch@2: - version "2.7.0" - resolved "https://registry.yarnpkg.com/node-fetch/-/node-fetch-2.7.0.tgz#d0f0fa6e3e2dc1d27efcd8ad99d550bda94d187d" - integrity sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A== - dependencies: - whatwg-url "^5.0.0" - -object-inspect@^1.9.0: - version "1.13.1" - resolved "https://registry.yarnpkg.com/object-inspect/-/object-inspect-1.13.1.tgz#b96c6109324ccfef6b12216a956ca4dc2ff94bc2" - integrity sha512-5qoj1RUiKOMsCCNLV1CBiPYE10sziTsnmNxkAI/rZhiD63CF7IqdFGC/XzjWjpSgLf0LxXX3bDFIh0E18f6UhQ== - -prettier@2.7.1: - version "2.7.1" - resolved "https://registry.yarnpkg.com/prettier/-/prettier-2.7.1.tgz#e235806850d057f97bb08368a4f7d899f7760c64" - integrity sha512-ujppO+MkdPqoVINuDFDRLClm7D78qbDt0/NR+wp5FqEZOoTNAjPHWj17QRhu7geIHJfcNhRk1XVQmF8Bp3ye+g== - -qs@^6.11.2: - version "6.11.2" - resolved "https://registry.yarnpkg.com/qs/-/qs-6.11.2.tgz#64bea51f12c1f5da1bc01496f48ffcff7c69d7d9" - integrity sha512-tDNIz22aBzCDxLtVH++VnTfzxlfeK5CbqohpSqpJgj1Wg/cQbStNAz3NuqCs5vV+pjBsK4x4pN9HlVh7rcYRiA== - dependencies: - side-channel "^1.0.4" - -set-function-length@^1.1.1: - version "1.1.1" - resolved "https://registry.yarnpkg.com/set-function-length/-/set-function-length-1.1.1.tgz#4bc39fafb0307224a33e106a7d35ca1218d659ed" - integrity sha512-VoaqjbBJKiWtg4yRcKBQ7g7wnGnLV3M8oLvVWwOk2PdYY6PEFegR1vezXR0tw6fZGF9csVakIRjrJiy2veSBFQ== - dependencies: - define-data-property "^1.1.1" - get-intrinsic "^1.2.1" - gopd "^1.0.1" - has-property-descriptors "^1.0.0" - -side-channel@^1.0.4: - version "1.0.4" - resolved "https://registry.yarnpkg.com/side-channel/-/side-channel-1.0.4.tgz#efce5c8fdc104ee751b25c58d4290011fa5ea2cf" - integrity sha512-q5XPytqFEIKHkGdiMIrY10mvLRvnQh42/+GoBlFW3b2LXLE2xxJpZFdm94we0BaoV3RwJyGqg5wS7epxTv0Zvw== - dependencies: - call-bind "^1.0.0" - get-intrinsic "^1.0.2" - object-inspect "^1.9.0" - -tr46@~0.0.3: - version "0.0.3" - resolved "https://registry.yarnpkg.com/tr46/-/tr46-0.0.3.tgz#8184fd347dac9cdc185992f3a6622e14b9d9ab6a" - integrity sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw== - -typescript@4.6.4: - version "4.6.4" - resolved "https://registry.yarnpkg.com/typescript/-/typescript-4.6.4.tgz#caa78bbc3a59e6a5c510d35703f6a09877ce45e9" - integrity sha512-9ia/jWHIEbo49HfjrLGfKbZSuWo9iTMwXO+Ca3pRsSpbsMbc7/IU8NKdCZVRRBafVPGnoJeFL76ZOAA84I9fEg== - -url-join@^5.0.0: - version "5.0.0" - resolved "https://registry.yarnpkg.com/url-join/-/url-join-5.0.0.tgz#c2f1e5cbd95fa91082a93b58a1f42fecb4bdbcf1" - integrity sha512-n2huDr9h9yzd6exQVnH/jU5mr+Pfx08LRXXZhkLLetAMESRj+anQsTAh940iMrIetKAmry9coFuZQ2jY8/p3WA== - -webidl-conversions@^3.0.0: - version "3.0.1" - resolved "https://registry.yarnpkg.com/webidl-conversions/-/webidl-conversions-3.0.1.tgz#24534275e2a7bc6be7bc86611cc16ae0a5654871" - integrity sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ== - -whatwg-url@^5.0.0: - version "5.0.0" - resolved "https://registry.yarnpkg.com/whatwg-url/-/whatwg-url-5.0.0.tgz#966454e8765462e37644d3626f6742ce8b70965d" - integrity sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw== - dependencies: - tr46 "~0.0.3" - webidl-conversions "^3.0.0" diff --git a/shell.nix b/shell.nix index b14b01d2d5..9c4d19fdef 100644 --- a/shell.nix +++ b/shell.nix @@ -94,8 +94,26 @@ in source <(kubectl completion bash) # Automatically connect to correct cluster - alias kubectl='KUBECONFIG=$(bolt output project-root)/gen/k8s/kubeconfig/$(bolt output namespace).yml kubectl' - alias helm='KUBECONFIG=$(bolt output project-root)/gen/k8s/kubeconfig/$(bolt output namespace).yml helm' + alias kubectl='KUBECONFIG=$(get_config) && export KUBECONFIG && kubectl' + alias helm='KUBECONFIG=$(get_config) && export KUBECONFIG && helm' + + get_config() { + ROOT=$(bolt output project-root) + + if [ $? -ne 0 ]; then + echo $ROOT + return 1 + fi + + NS=$(bolt output namespace) + + if [ $? -ne 0 ]; then + echo $NS + return 1 + fi + + echo "$ROOT/gen/k8s/kubeconfig/$NS.yml" + } # Fix dynamic library path to fix issue with Python export LD_LIBRARY_PATH="${pkgs.clang}/resource-root/lib:${pkgs.lib.strings.makeLibraryPath [ pkgs.openssl ]}" diff --git a/svc/Cargo.lock b/svc/Cargo.lock index 281f3e69d7..99b1f26aa8 100644 --- a/svc/Cargo.lock +++ b/svc/Cargo.lock @@ -30,9 +30,9 @@ dependencies = [ [[package]] name = "ahash" -version = "0.8.9" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d713b3834d76b85304d4d525563c1276e2e30dc97cc67bfb4585a4a29fc2c89f" +checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" dependencies = [ "cfg-if", "getrandom", @@ -43,9 +43,9 @@ dependencies = [ [[package]] name = "aho-corasick" -version = "1.1.2" +version = "1.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b2969dcb958b36655471fc61f7e416fa76033bdd4bfed0678d8fee1e2d07a1f0" +checksum = "8e60d3430d3a69478ad0993f19238d2df97c507009a52b3c10addcd7f6bcb916" dependencies = [ "memchr", ] @@ -73,9 +73,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.80" +version = "1.0.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5ad32ce52e4161730f7098c077cd2ed6229b5804ccf99e5366be1ab72a98b4e1" +checksum = "0952808a6c2afd1aa8947271f3a60f1a6763c7b912d210184c5149b5cf147247" [[package]] name = "api-admin" @@ -86,7 +86,9 @@ dependencies = [ "async_once", "chirp-client", "chrono", - "http 0.2.11", + "cluster-server-get", + "cluster-server-list", + "http 0.2.12", "hyper", "lazy_static", "prost 0.10.4", @@ -95,6 +97,7 @@ dependencies = [ "rivet-cache", "rivet-claims", "rivet-connection", + "rivet-convert", "rivet-health-checks", "rivet-matchmaker", "rivet-operation", @@ -127,7 +130,7 @@ dependencies = [ "email-verification-create", "faker-user", "headers", - "http 0.2.11", + "http 0.2.12", "hyper", "lazy_static", "prost 0.10.4", @@ -171,7 +174,7 @@ dependencies = [ "chirp-client", "chrono", "faker-game", - "http 0.2.11", + "http 0.2.12", "hyper", "lazy_static", "prost 0.10.4", @@ -226,6 +229,7 @@ dependencies = [ "cloud-namespace-token-public-create", "cloud-version-get", "cloud-version-publish", + "cluster-datacenter-list", "custom-user-avatar-list-for-game", "custom-user-avatar-upload-complete", "faker-region", @@ -248,7 +252,7 @@ dependencies = [ "game-version-get", "game-version-list", "game-version-validate", - "http 0.2.11", + "http 0.2.12", "hyper", "job-log-read", "job-run-get", @@ -320,7 +324,7 @@ dependencies = [ "futures-util", "game-get", "game-resolve-namespace-id", - "http 0.2.11", + "http 0.2.12", "hyper", "lazy_static", "prost 0.10.4", @@ -377,7 +381,7 @@ dependencies = [ "futures-util", "global-error", "headers", - "http 0.2.11", + "http 0.2.12", "hyper", "lazy_static", "prost 0.10.4", @@ -441,7 +445,7 @@ dependencies = [ "game-user-link-get", "game-user-recent-session-list", "game-user-recommend", - "http 0.2.11", + "http 0.2.12", "hyper", "identity-config-version-get", "job-run-get", @@ -500,7 +504,7 @@ dependencies = [ "chirp-client", "chrono", "faker-job-run", - "http 0.2.11", + "http 0.2.12", "hyper", "job-run-get", "lazy_static", @@ -545,7 +549,7 @@ dependencies = [ "game-get", "game-namespace-get", "game-user-get", - "http 0.2.11", + "http 0.2.12", "hyper", "kv-config-version-get", "kv-get", @@ -610,7 +614,7 @@ dependencies = [ "game-namespace-get", "game-namespace-resolve-url", "game-user-get", - "http 0.2.11", + "http 0.2.12", "hyper", "job-run-get", "lazy_static", @@ -669,7 +673,7 @@ dependencies = [ "game-get", "game-namespace-get", "game-user-get", - "http 0.2.11", + "http 0.2.12", "hyper", "lazy_static", "mm-lobby-get", @@ -722,10 +726,11 @@ dependencies = [ "api-matchmaker", "api-module", "api-portal", + "api-provision", "api-status", "async-trait", "chirp-client", - "http 0.2.11", + "http 0.2.12", "hyper", "rivet-operation", "tokio", @@ -746,7 +751,7 @@ dependencies = [ "futures-util", "game-get", "game-resolve-name-id", - "http 0.2.11", + "http 0.2.12", "hyper", "lazy_static", "prost 0.10.4", @@ -779,6 +784,38 @@ dependencies = [ "uuid", ] +[[package]] +name = "api-provision" +version = "0.0.1" +dependencies = [ + "api-helper", + "async-trait", + "chirp-client", + "chrono", + "cluster-datacenter-get", + "cluster-server-get", + "cluster-server-resolve-for-ip", + "http 0.2.12", + "hyper", + "lazy_static", + "prost 0.10.4", + "rivet-api", + "rivet-cache", + "rivet-claims", + "rivet-health-checks", + "rivet-operation", + "rivet-pools", + "rivet-util-cluster", + "serde", + "serde_json", + "thiserror", + "tokio", + "tracing", + "tracing-subscriber", + "url", + "uuid", +] + [[package]] name = "api-route" version = "0.0.1" @@ -798,11 +835,10 @@ dependencies = [ "faker-job-run", "faker-region", "game-get", - "http 0.2.11", + "http 0.2.12", "hyper", "lazy_static", "prost 0.10.4", - "region-resolve", "reqwest", "rivet-cache", "rivet-claims", @@ -819,7 +855,6 @@ dependencies = [ "serde_json", "thiserror", "tokio", - "toml 0.5.11", "tracing", "tracing-subscriber", "url", @@ -839,7 +874,7 @@ dependencies = [ "futures-util", "game-namespace-resolve-name-id", "game-resolve-name-id", - "http 0.2.11", + "http 0.2.12", "hyper", "lazy_static", "prost 0.10.4", @@ -873,9 +908,9 @@ dependencies = [ [[package]] name = "arc-swap" -version = "1.6.0" +version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bddcadddf5e9015d310179a59bb28c4d4b9920ad0f11e8e14dbadf654890c9a6" +checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457" [[package]] name = "async-nats" @@ -886,7 +921,7 @@ dependencies = [ "base64 0.21.7", "bytes", "futures", - "http 0.2.11", + "http 0.2.12", "memchr", "nkeys", "nuid", @@ -926,13 +961,13 @@ dependencies = [ [[package]] name = "async-trait" -version = "0.1.77" +version = "0.1.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c980ee35e870bd1a4d2c8294d4c04d0499e67bca1e4b5cefcc693c2fa00caea9" +checksum = "a507401cad91ec6a857ed5513a2073c82a9b9048762b885bb98655b306964681" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.58", ] [[package]] @@ -950,21 +985,11 @@ dependencies = [ "num-traits", ] -[[package]] -name = "atomic-write-file" -version = "0.1.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "edcdbedc2236483ab103a53415653d6b4442ea6141baf1ffa85df29635e88436" -dependencies = [ - "nix", - "rand", -] - [[package]] name = "autocfg" -version = "1.1.0" +version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d468802bab17cbc0cc575e9b053f41e72aa36bfa6b7f55e3529ffa43161b97fa" +checksum = "f1fdabc7756949593fe60f30ec81974b613357de856987752631dea1e3394c80" [[package]] name = "aws-endpoint" @@ -975,7 +1000,7 @@ dependencies = [ "aws-smithy-http 0.52.0", "aws-smithy-types 0.52.0", "aws-types", - "http 0.2.11", + "http 0.2.12", "regex", "tracing", ] @@ -990,7 +1015,7 @@ dependencies = [ "aws-smithy-types 0.52.0", "aws-types", "bytes", - "http 0.2.11", + "http 0.2.12", "http-body", "lazy_static", "percent-encoding", @@ -1020,7 +1045,7 @@ dependencies = [ "bytes", "bytes-utils", "fastrand 1.9.0", - "http 0.2.11", + "http 0.2.12", "http-body", "tokio-stream", "tower", @@ -1037,7 +1062,7 @@ dependencies = [ "aws-smithy-eventstream", "aws-smithy-http 0.52.0", "aws-types", - "http 0.2.11", + "http 0.2.12", "tracing", ] @@ -1053,7 +1078,7 @@ dependencies = [ "form_urlencoded", "hex", "hmac", - "http 0.2.11", + "http 0.2.12", "once_cell", "percent-encoding", "regex", @@ -1098,7 +1123,7 @@ dependencies = [ "crc32c", "crc32fast", "hex", - "http 0.2.11", + "http 0.2.12", "http-body", "md-5", "pin-project-lite", @@ -1119,7 +1144,7 @@ dependencies = [ "aws-smithy-types 0.41.0", "bytes", "fastrand 1.9.0", - "http 0.2.11", + "http 0.2.12", "http-body", "hyper", "hyper-rustls 0.22.1", @@ -1143,7 +1168,7 @@ dependencies = [ "aws-smithy-types 0.52.0", "bytes", "fastrand 1.9.0", - "http 0.2.11", + "http 0.2.12", "http-body", "hyper", "hyper-rustls 0.23.2", @@ -1175,7 +1200,7 @@ dependencies = [ "bytes", "bytes-utils", "futures-core", - "http 0.2.11", + "http 0.2.12", "http-body", "hyper", "once_cell", @@ -1197,7 +1222,7 @@ dependencies = [ "bytes", "bytes-utils", "futures-core", - "http 0.2.11", + "http 0.2.12", "http-body", "hyper", "once_cell", @@ -1217,7 +1242,7 @@ checksum = "8017959786cce64e690214d303d062c97fcd38a68df7cb444255e534c9bbce49" dependencies = [ "aws-smithy-http 0.41.0", "bytes", - "http 0.2.11", + "http 0.2.12", "http-body", "pin-project", "tower", @@ -1233,7 +1258,7 @@ dependencies = [ "aws-smithy-http 0.52.0", "aws-smithy-types 0.52.0", "bytes", - "http 0.2.11", + "http 0.2.12", "http-body", "pin-project-lite", "tower", @@ -1255,7 +1280,7 @@ version = "0.41.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7c7f957a2250cc0fa4ccf155e00aeac9a81f600df7cd4ecc910c75030e6534f5" dependencies = [ - "itoa 1.0.10", + "itoa 1.0.11", "num-integer", "ryu", "time", @@ -1268,7 +1293,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "987b1e37febb9bd409ca0846e82d35299e572ad8279bc404778caeb5fc05ad56" dependencies = [ "base64-simd", - "itoa 1.0.10", + "itoa 1.0.11", "num-integer", "ryu", "time", @@ -1293,7 +1318,7 @@ dependencies = [ "aws-smithy-client 0.52.0", "aws-smithy-http 0.52.0", "aws-smithy-types 0.52.0", - "http 0.2.11", + "http 0.2.12", "rustc_version", "tracing", "zeroize", @@ -1310,10 +1335,10 @@ dependencies = [ "bitflags 1.3.2", "bytes", "futures-util", - "http 0.2.11", + "http 0.2.12", "http-body", "hyper", - "itoa 1.0.10", + "itoa 1.0.11", "matchit", "memchr", "mime", @@ -1336,7 +1361,7 @@ dependencies = [ "async-trait", "bytes", "futures-util", - "http 0.2.11", + "http 0.2.12", "http-body", "mime", "rustversion", @@ -1346,9 +1371,9 @@ dependencies = [ [[package]] name = "backtrace" -version = "0.3.69" +version = "0.3.71" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2089b7e3f35b9dd2d0ed921ead4f6d318c27680d4a5bd167b3ee120edb105837" +checksum = "26b05800d2e817c8b3b4b54abd461726265fa9789ae34330622f2db9ee696f9d" dependencies = [ "addr2line", "cc", @@ -1359,6 +1384,12 @@ dependencies = [ "rustc-demangle", ] +[[package]] +name = "base16ct" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4c7f02d4ea65f2c1853089ffd8d2787bdbc63de2f0d29dedbcf8ccdfa0ccd4cf" + [[package]] name = "base64" version = "0.13.1" @@ -1412,9 +1443,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.4.2" +version = "2.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ed570934406eb16438a4e976b1b4500774099c13b8cb96eec99f620f05090ddf" +checksum = "cf4b9d6a944f767f8e5e0db018570623c85f3d925ac718db4e06d0187adb21c1" dependencies = [ "serde", ] @@ -1453,9 +1484,9 @@ dependencies = [ [[package]] name = "bstr" -version = "1.9.0" +version = "1.9.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c48f0051a4b4c5e0b6d365cd04af53aeaa209e3cc15ec2cdb69e73cc87fbd0dc" +checksum = "05efc5cfd9110c8416e471df0e96702d58690178e206e61b7173706673c93706" dependencies = [ "memchr", ] @@ -1530,9 +1561,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.15.1" +version = "3.15.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c764d619ca78fccbf3069b37bd7af92577f044bb15236036662d79b6559f25b7" +checksum = "7ff69b9dd49fd426c69a0db9fc04dd934cdb6645ff000864d98f7e2af8830eaa" [[package]] name = "byteorder" @@ -1542,9 +1573,9 @@ checksum = "1fd0f2584146f6f2ef48085050886acf353beff7305ebd1ae69500e27c67f64b" [[package]] name = "bytes" -version = "1.5.0" +version = "1.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2bd12c1caf447e69cd4528f47f94d203fd2582878ecb9e9465484c4148a8223" +checksum = "514de17de45fdb8dc022b1a7975556c53c86f9f0aa5f534b98977b171857c2c9" dependencies = [ "serde", ] @@ -1638,9 +1669,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.0.86" +version = "1.0.90" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f9fa1897e4325be0d68d48df6aa1a71ac2ed4d27723887e7754192705350730" +checksum = "8cd6604a82acf3039f1144f54b8eb34e91ffba622051189e71b781822d5ee1f5" [[package]] name = "cdn-namespace-auth-user-remove" @@ -2026,14 +2057,14 @@ version = "0.1.0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.58", ] [[package]] name = "chrono" -version = "0.4.34" +version = "0.4.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bc015644b92d5890fab7489e49d21f879d5c990186827d42ec511919404f38b" +checksum = "8a0d04d43504c61aa6c7531f1871dd0d418d91130162063b789da00fd7057a5e" dependencies = [ "android-tzdata", "iana-time-zone", @@ -2041,7 +2072,7 @@ dependencies = [ "num-traits", "serde", "wasm-bindgen", - "windows-targets 0.52.0", + "windows-targets 0.52.4", ] [[package]] @@ -2289,6 +2320,205 @@ dependencies = [ "rivet-runtime", ] +[[package]] +name = "cloudflare" +version = "0.10.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0778f99ea7ad39b49b758eb418da7117b93232a5f6a09f9b79a094b77ac88cc2" +dependencies = [ + "anyhow", + "async-trait", + "base64 0.13.1", + "cfg-if", + "chrono", + "http 0.2.12", + "percent-encoding", + "reqwest", + "serde", + "serde_json", + "serde_qs", + "serde_with", + "url", + "uuid", +] + +[[package]] +name = "cluster-datacenter-get" +version = "0.0.1" +dependencies = [ + "chirp-client", + "chirp-worker", + "prost 0.10.4", + "rivet-operation", + "sqlx", +] + +[[package]] +name = "cluster-datacenter-list" +version = "0.0.1" +dependencies = [ + "chirp-client", + "chirp-worker", + "prost 0.10.4", + "rivet-operation", + "sqlx", +] + +[[package]] +name = "cluster-datacenter-location-get" +version = "0.0.1" +dependencies = [ + "chirp-client", + "chirp-worker", + "ip-info", + "rivet-operation", + "sqlx", +] + +[[package]] +name = "cluster-datacenter-resolve-for-name-id" +version = "0.0.1" +dependencies = [ + "chirp-client", + "chirp-worker", + "prost 0.10.4", + "rivet-operation", + "sqlx", +] + +[[package]] +name = "cluster-datacenter-topology-get" +version = "0.0.1" +dependencies = [ + "chirp-client", + "chirp-worker", + "lazy_static", + "nomad-util", + "nomad_client", + "prost 0.10.4", + "rivet-operation", + "sqlx", +] + +[[package]] +name = "cluster-default-update" +version = "0.0.1" +dependencies = [ + "chirp-client", + "chirp-worker", + "cluster-datacenter-get", + "cluster-datacenter-list", + "cluster-get", + "prost 0.10.4", + "reqwest", + "rivet-connection", + "rivet-operation", + "rivet-pools", + "serde", + "serde_json", + "tokio", + "tracing", + "tracing-subscriber", + "uuid", +] + +[[package]] +name = "cluster-gc" +version = "0.0.1" +dependencies = [ + "chirp-client", + "chirp-worker", + "cluster-datacenter-get", + "rivet-connection", + "rivet-health-checks", + "rivet-metrics", + "rivet-operation", + "rivet-runtime", + "rivet-util-cluster", + "sqlx", + "tokio", + "tracing", + "tracing-subscriber", +] + +[[package]] +name = "cluster-get" +version = "0.0.1" +dependencies = [ + "chirp-client", + "chirp-worker", + "prost 0.10.4", + "rivet-operation", + "sqlx", +] + +[[package]] +name = "cluster-server-get" +version = "0.0.1" +dependencies = [ + "chirp-client", + "chirp-worker", + "prost 0.10.4", + "rivet-operation", + "sqlx", +] + +[[package]] +name = "cluster-server-list" +version = "0.0.1" +dependencies = [ + "chirp-client", + "chirp-worker", + "prost 0.10.4", + "rivet-operation", + "sqlx", +] + +[[package]] +name = "cluster-server-resolve-for-ip" +version = "0.0.1" +dependencies = [ + "chirp-client", + "chirp-worker", + "prost 0.10.4", + "rivet-operation", + "sqlx", +] + +[[package]] +name = "cluster-worker" +version = "0.0.1" +dependencies = [ + "anyhow", + "chirp-client", + "chirp-worker", + "chrono", + "cloudflare", + "cluster-datacenter-get", + "cluster-datacenter-list", + "cluster-datacenter-topology-get", + "include_dir", + "indoc", + "lazy_static", + "linode-instance-type-get", + "linode-server-destroy", + "linode-server-provision", + "maplit", + "nomad-util", + "nomad_client", + "rivet-convert", + "rivet-health-checks", + "rivet-metrics", + "rivet-runtime", + "rivet-util-cluster", + "s3-util", + "serde_yaml", + "sqlx", + "ssh2", + "thiserror", + "token-create", +] + [[package]] name = "combine" version = "4.6.6" @@ -2415,9 +2645,9 @@ dependencies = [ [[package]] name = "crossbeam-channel" -version = "0.5.11" +version = "0.5.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "176dc175b78f56c0f321911d9c8eb2b77a78a4860b9c19db83835fea1a46649b" +checksum = "ab3db02a9c5b5121e1e42fbdb1aeb65f5e02624cc58c43f2884c6ccac0b82f95" dependencies = [ "crossbeam-utils", ] @@ -2437,6 +2667,18 @@ version = "0.8.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "248e3bacc7dc6baa3b21e405ee045c3047101a49145e7e9eca583ab4c2ca5345" +[[package]] +name = "crypto-bigint" +version = "0.5.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0dc92fb57ca44df6db8059111ab3af99a63d5d0f8375d9972e319a379c6bab76" +dependencies = [ + "generic-array", + "rand_core", + "subtle", + "zeroize", +] + [[package]] name = "crypto-common" version = "0.1.6" @@ -2454,7 +2696,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ac574ff4d437a7b5ad237ef331c17ccca63c46479e5b5453eb8e10bb99a759fe" dependencies = [ "csv-core", - "itoa 1.0.10", + "itoa 1.0.11", "ryu", "serde", ] @@ -2501,7 +2743,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.58", ] [[package]] @@ -2536,9 +2778,9 @@ dependencies = [ [[package]] name = "darling" -version = "0.20.6" +version = "0.20.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c376d08ea6aa96aafe61237c7200d1241cb177b7d3a542d791f2d118e9cbb955" +checksum = "54e36fcd13ed84ffdfda6f5be89b31287cbb80c439841fe69e04841435464391" dependencies = [ "darling_core", "darling_macro", @@ -2546,27 +2788,27 @@ dependencies = [ [[package]] name = "darling_core" -version = "0.20.6" +version = "0.20.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33043dcd19068b8192064c704b3f83eb464f91f1ff527b44a4e2b08d9cdb8855" +checksum = "9c2cf1c23a687a1feeb728783b993c4e1ad83d99f351801977dd809b48d0a70f" dependencies = [ "fnv", "ident_case", "proc-macro2", "quote", "strsim", - "syn 2.0.50", + "syn 2.0.58", ] [[package]] name = "darling_macro" -version = "0.20.6" +version = "0.20.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5a91391accf613803c2a9bf9abccdbaa07c54b4244a5b64883f9c3c137c86be" +checksum = "a668eda54683121533a393014d8692171709ff57a7d61f187b6e782719f8933f" dependencies = [ "darling_core", "quote", - "syn 2.0.50", + "syn 2.0.58", ] [[package]] @@ -2579,7 +2821,7 @@ dependencies = [ "hashbrown 0.14.3", "lock_api", "once_cell", - "parking_lot_core", + "parking_lot_core 0.9.9", ] [[package]] @@ -2604,9 +2846,9 @@ dependencies = [ [[package]] name = "der" -version = "0.7.8" +version = "0.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fffa369a668c8af7dbf8b5e56c9f744fbd399949ed171606040001947de40b1c" +checksum = "f55bf8e7b65898637379c1b74eb1551107c8294ed26d855ceb9fd1a09cfc9bc0" dependencies = [ "const-oid", "pem-rfc7468", @@ -2641,6 +2883,20 @@ version = "0.15.7" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1aaf95b3e5c8f23aa320147307562d361db0ae0d51242340f558153b4eb2439b" +[[package]] +name = "ecdsa" +version = "0.16.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ee27f32b5c5292967d2d4a9d7f1e0b0aed2c15daded5a60300e4abb9d8020bca" +dependencies = [ + "der", + "digest", + "elliptic-curve", + "rfc6979", + "signature", + "spki", +] + [[package]] name = "ed25519" version = "2.2.3" @@ -2672,6 +2928,25 @@ dependencies = [ "serde", ] +[[package]] +name = "elliptic-curve" +version = "0.13.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b5e6043086bf7973472e0c7dff2142ea0b680d30e18d9cc40f267efbf222bd47" +dependencies = [ + "base16ct", + "crypto-bigint", + "digest", + "ff", + "generic-array", + "group", + "pkcs8", + "rand_core", + "sec1", + "subtle", + "zeroize", +] + [[package]] name = "email-address-parser" version = "1.0.3" @@ -2753,7 +3028,7 @@ dependencies = [ "heck 0.4.1", "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.58", ] [[package]] @@ -2805,7 +3080,7 @@ dependencies = [ "chirp-client", "chirp-worker", "chrono", - "http 0.2.11", + "http 0.2.12", "prost 0.10.4", "reqwest", "rivet-operation", @@ -3016,9 +3291,9 @@ dependencies = [ [[package]] name = "fastrand" -version = "2.0.1" +version = "2.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "25cbce373ec4653f1a01a31e8a5e5ec0c622dc27ff9c4e6606eefef5cbbed4a5" +checksum = "658bd65b1cf4c852a3cc96f18a8ce7b5640f6b703f905c7d74532294c2a63984" [[package]] name = "fcm" @@ -3034,11 +3309,21 @@ dependencies = [ "serde_json", ] +[[package]] +name = "ff" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ded41244b729663b1e574f1b4fb731469f69f79c17667b5d776b16cda0479449" +dependencies = [ + "rand_core", + "subtle", +] + [[package]] name = "fiat-crypto" -version = "0.2.6" +version = "0.2.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1676f435fc1dadde4d03e43f5d62b259e1ce5f40bd4ffb21db2b42ebe59c1382" +checksum = "c007b1ae3abe1cb6f85a16305acd418b7ca6343b953633fee2b76d8f108b830f" [[package]] name = "finl_unicode" @@ -3109,7 +3394,7 @@ version = "0.1.0" dependencies = [ "gray_matter", "hashbrown 0.12.3", - "http 0.2.11", + "http 0.2.12", "indoc", "lazy_static", "serde", @@ -3171,7 +3456,7 @@ checksum = "1d930c203dd0b6ff06e0201a4a2fe9149b43c684fd4420555b26d21b1a02956f" dependencies = [ "futures-core", "lock_api", - "parking_lot", + "parking_lot 0.12.1", ] [[package]] @@ -3188,7 +3473,7 @@ checksum = "87750cf4b7a4c0625b1529e4c543c2182106e4dedc60a2a6455e00d212c489ac" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.58", ] [[package]] @@ -3205,9 +3490,9 @@ checksum = "38d84fa142264698cdce1a9f9172cf383a0c82de1bddcf3092901442c4097004" [[package]] name = "futures-timer" -version = "3.0.2" +version = "3.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e64b03909df88034c26dc1547e8970b91f98bdb65165d6a4e9110d94263dbb2c" +checksum = "f288b0a4f20f9a56b5d1da57e2227c661b7b16168e2f72365f57b63326e29b24" [[package]] name = "futures-util" @@ -3673,6 +3958,7 @@ checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a" dependencies = [ "typenum", "version_check", + "zeroize", ] [[package]] @@ -3703,7 +3989,7 @@ name = "global-error" version = "0.1.5" dependencies = [ "formatted-error", - "http 0.2.11", + "http 0.2.12", "serde", "serde_json", "thiserror", @@ -3722,7 +4008,7 @@ dependencies = [ "futures-timer", "no-std-compat", "nonzero_ext", - "parking_lot", + "parking_lot 0.12.1", "portable-atomic", "quanta", "rand", @@ -3742,19 +4028,30 @@ dependencies = [ "yaml-rust", ] +[[package]] +name = "group" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f0f9ef7462f7c099f518d754361858f86d8a07af53ba9af0fe635bbccb151a63" +dependencies = [ + "ff", + "rand_core", + "subtle", +] + [[package]] name = "h2" -version = "0.3.24" +version = "0.3.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb2c4422095b67ee78da96fbb51a4cc413b3b25883c7717ff7ca1ab31022c9c9" +checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8" dependencies = [ "bytes", "fnv", "futures-core", "futures-sink", "futures-util", - "http 0.2.11", - "indexmap 2.2.3", + "http 0.2.12", + "indexmap 2.2.6", "slab", "tokio", "tokio-util 0.7.10", @@ -3776,7 +4073,7 @@ version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "290f1a1d9242c78d09ce40a5e87e7554ee637af1351968159f4952f028f75604" dependencies = [ - "ahash 0.8.9", + "ahash 0.8.11", "allocator-api2", ] @@ -3811,7 +4108,7 @@ dependencies = [ "base64 0.21.7", "bytes", "headers-core", - "http 0.2.11", + "http 0.2.12", "httpdate", "mime", "sha1", @@ -3823,7 +4120,7 @@ version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e7f66481bfee273957b1f20485a4ff3362987f85b2c236580d81b4eb7a326429" dependencies = [ - "http 0.2.11", + "http 0.2.12", ] [[package]] @@ -3846,9 +4143,9 @@ dependencies = [ [[package]] name = "hermit-abi" -version = "0.3.6" +version = "0.3.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd5256b483761cd23699d0da46cc6fd2ee3be420bbe6d020ae4a091e70b7e9fd" +checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" [[package]] name = "hex" @@ -3896,24 +4193,24 @@ dependencies = [ [[package]] name = "http" -version = "0.2.11" +version = "0.2.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8947b1a6fad4393052c7ba1f4cd97bed3e953a95c79c92ad9b051a04611d9fbb" +checksum = "601cbb57e577e2f5ef5be8e7b83f0f63994f25aa94d673e54a92d5c516d101f1" dependencies = [ "bytes", "fnv", - "itoa 1.0.10", + "itoa 1.0.11", ] [[package]] name = "http" -version = "1.0.0" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b32afd38673a8016f7c9ae69e5af41a58f81b1d31689040f2f1959594ce194ea" +checksum = "21b9ddb458710bc376481b842f5da65cdf31522de232c1ca8146abce2a358258" dependencies = [ "bytes", "fnv", - "itoa 1.0.10", + "itoa 1.0.11", ] [[package]] @@ -3923,7 +4220,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7ceab25649e9960c0311ea418d17bee82c0dcec1bd053b5f9a66e265a693bed2" dependencies = [ "bytes", - "http 0.2.11", + "http 0.2.12", "pin-project-lite", ] @@ -3956,13 +4253,13 @@ dependencies = [ "futures-core", "futures-util", "h2", - "http 0.2.11", + "http 0.2.12", "http-body", "httparse", "httpdate", - "itoa 1.0.10", + "itoa 1.0.11", "pin-project-lite", - "socket2 0.5.5", + "socket2 0.5.6", "tokio", "tower-service", "tracing", @@ -3992,7 +4289,7 @@ version = "0.23.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1788965e61b367cd03a62950836d5cd41560c3577d90e40e0819373194d1661c" dependencies = [ - "http 0.2.11", + "http 0.2.12", "hyper", "log", "rustls 0.20.9", @@ -4009,7 +4306,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec3efd23720e2049821a693cbc7e65ea87c72f1c58ff2f9522ff332b1491e590" dependencies = [ "futures-util", - "http 0.2.11", + "http 0.2.12", "hyper", "rustls 0.21.10", "tokio", @@ -4150,6 +4447,25 @@ dependencies = [ "unicode-normalization", ] +[[package]] +name = "include_dir" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "18762faeff7122e89e0857b02f7ce6fcc0d101d5e9ad2ad7846cc01d61b7f19e" +dependencies = [ + "include_dir_macros", +] + +[[package]] +name = "include_dir_macros" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b139284b5cf57ecfa712bcc66950bb635b31aff41c188e8a4cfc758eca374a3f" +dependencies = [ + "proc-macro2", + "quote", +] + [[package]] name = "indexmap" version = "1.9.3" @@ -4163,9 +4479,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.2.3" +version = "2.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "233cf39063f058ea2caae4091bf4a3ef70a653afbc026f5c4a4135d114e3c177" +checksum = "168fb715dda47215e360912c096649d23d58bf392ac62f73919e831745e40f26" dependencies = [ "equivalent", "hashbrown 0.14.3", @@ -4216,7 +4532,7 @@ version = "0.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b58db92f96b720de98181bbbe63c831e87005ab460c1bf306eb2622b4707997f" dependencies = [ - "socket2 0.5.5", + "socket2 0.5.6", "widestring", "windows-sys 0.48.0", "winreg", @@ -4227,6 +4543,9 @@ name = "ipnet" version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8f518f335dce6725a761382244631d86cf0ccb2863413590b31338feb467f9c3" +dependencies = [ + "serde", +] [[package]] name = "itertools" @@ -4254,9 +4573,9 @@ checksum = "b71991ff56294aa922b450139ee08b3bfc70982c6b2c7562771375cf73542dd4" [[package]] name = "itoa" -version = "1.0.10" +version = "1.0.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b1a46d1a171d865aa5f83f92695765caa047a9b4cbae2cbf37dbd613a793fd4c" +checksum = "49f1f14873335454500d59611f1cf4a4b0f786f9ac11f4312a78e4cf2566695b" [[package]] name = "job-gc" @@ -4346,7 +4665,6 @@ dependencies = [ "indoc", "job-run-get", "lazy_static", - "prost 0.10.4", "reqwest", "rivet-operation", "rivet-util-job", @@ -4354,33 +4672,6 @@ dependencies = [ "serde_urlencoded", ] -[[package]] -name = "job-run-nomad-monitor" -version = "0.0.1" -dependencies = [ - "chirp-client", - "chirp-worker", - "chrono", - "futures-util", - "indoc", - "lazy_static", - "nomad-client", - "nomad-util", - "prost 0.10.4", - "rivet-connection", - "rivet-health-checks", - "rivet-metrics", - "rivet-operation", - "rivet-pools", - "rivet-runtime", - "rivet-util-job", - "serde", - "serde_json", - "tokio", - "tracing", - "tracing-subscriber", -] - [[package]] name = "job-run-worker" version = "0.0.1" @@ -4397,6 +4688,7 @@ dependencies = [ "lazy_static", "nomad-client", "nomad-util", + "nomad_client", "rand", "region-get", "reqwest", @@ -4415,9 +4707,9 @@ dependencies = [ [[package]] name = "js-sys" -version = "0.3.68" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "406cda4b368d531c842222cf9d2600a9a4acce8d29423695379c6868a143a9ee" +checksum = "29c15563dc2726973df627357ce0c9ddddbea194836909d655df6a75d2cf296d" dependencies = [ "wasm-bindgen", ] @@ -4563,12 +4855,115 @@ dependencies = [ "vcpkg", ] -[[package]] -name = "linked-hash-map" -version = "0.5.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" - +[[package]] +name = "libssh2-sys" +version = "0.3.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2dc8a030b787e2119a731f1951d6a773e2280c660f8ec4b0f5e1505a386e71ee" +dependencies = [ + "cc", + "libc", + "libz-sys", + "openssl-sys", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "libz-sys" +version = "1.1.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5e143b5e666b2695d28f6bca6497720813f699c9602dd7f5cac91008b8ada7f9" +dependencies = [ + "cc", + "libc", + "pkg-config", + "vcpkg", +] + +[[package]] +name = "linked-hash-map" +version = "0.5.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0717cef1bc8b636c6e1c1bbdefc09e6322da8a9321966e8928ef80d20f7f770f" + +[[package]] +name = "linode-gc" +version = "0.0.1" +dependencies = [ + "chirp-client", + "chirp-worker", + "chrono", + "reqwest", + "rivet-connection", + "rivet-health-checks", + "rivet-metrics", + "rivet-operation", + "rivet-runtime", + "rivet-util-linode", + "serde", + "serde_json", + "sqlx", + "tokio", + "tracing", + "tracing-subscriber", +] + +[[package]] +name = "linode-instance-type-get" +version = "0.0.1" +dependencies = [ + "chirp-client", + "chirp-worker", + "rivet-operation", + "rivet-util-cluster", + "rivet-util-linode", + "sqlx", +] + +[[package]] +name = "linode-server-destroy" +version = "0.0.1" +dependencies = [ + "chirp-client", + "chirp-worker", + "linode-server-provision", + "reqwest", + "rivet-operation", + "rivet-util-cluster", + "rivet-util-linode", + "sqlx", +] + +[[package]] +name = "linode-server-provision" +version = "0.0.1" +dependencies = [ + "chirp-client", + "chirp-worker", + "linode-server-destroy", + "reqwest", + "rivet-operation", + "rivet-util-cluster", + "rivet-util-linode", + "sqlx", +] + +[[package]] +name = "linode-worker" +version = "0.0.1" +dependencies = [ + "chirp-client", + "chirp-worker", + "rivet-convert", + "rivet-health-checks", + "rivet-metrics", + "rivet-runtime", + "rivet-util-cluster", + "rivet-util-linode", + "sqlx", +] + [[package]] name = "linux-raw-sys" version = "0.4.13" @@ -4704,9 +5099,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.20" +version = "0.4.21" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" +checksum = "90ed8c1e510134f979dbc4f070f87d4313098b704861a105fe34231c70a3901c" [[package]] name = "lru-cache" @@ -4776,9 +5171,9 @@ dependencies = [ [[package]] name = "memchr" -version = "2.7.1" +version = "2.7.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "523dc4f511e55ab87b694dc30d0f820d60906ef06413f93d4d7a1385599cc149" +checksum = "6c8640c5d730cb13ebd907d8d04b52f55ac9a2eec55b440c8892f40d56c76c1d" [[package]] name = "mime" @@ -4813,9 +5208,9 @@ dependencies = [ [[package]] name = "mio" -version = "0.8.10" +version = "0.8.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f3d0b296e374a4e6f3c7b0a1f5a51d748a0d34c85e7dc48fc3fa9a87657fe09" +checksum = "a4a650543ca06a924e8b371db273b2756685faae30f8487da1b56505a8f78b0c" dependencies = [ "libc", "wasi", @@ -4970,6 +5365,7 @@ dependencies = [ "rivet-util-job", "rivet-util-mm", "s3-util", + "sqlx", "tier-list", "upload-get", ] @@ -5237,7 +5633,7 @@ dependencies = [ "game-namespace-version-set", "game-version-get", "heck 0.3.3", - "http 0.2.11", + "http 0.2.12", "job-run-get", "lazy_static", "maplit", @@ -5314,7 +5710,6 @@ dependencies = [ "prost 0.10.4", "rivet-operation", "sqlx", - "unzip-n", ] [[package]] @@ -5414,11 +5809,13 @@ dependencies = [ "cf-custom-hostname-worker", "chirp-client", "cloud-worker", + "cluster-worker", "external-worker", "game-user-worker", "job-log-worker", "job-run-worker", "kv-worker", + "linode-worker", "mm-worker", "module-worker", "push-notification-worker", @@ -5464,17 +5861,6 @@ dependencies = [ "tempfile", ] -[[package]] -name = "nix" -version = "0.27.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2eb04e9c688eff1c89d72b407f168cf79bb9e867a9d3323ed6c01519eb9cc053" -dependencies = [ - "bitflags 2.4.2", - "cfg-if", - "libc", -] - [[package]] name = "nkeys" version = "0.3.2" @@ -5520,6 +5906,33 @@ dependencies = [ "url", ] +[[package]] +name = "nomad-monitor" +version = "0.0.1" +dependencies = [ + "chirp-client", + "chirp-worker", + "chrono", + "futures-util", + "indoc", + "lazy_static", + "nomad-util", + "nomad_client", + "prost 0.10.4", + "rivet-connection", + "rivet-health-checks", + "rivet-metrics", + "rivet-operation", + "rivet-pools", + "rivet-runtime", + "rivet-util-job", + "serde", + "serde_json", + "tokio", + "tracing", + "tracing-subscriber", +] + [[package]] name = "nomad-util" version = "0.1.0" @@ -5528,6 +5941,7 @@ dependencies = [ "bytes", "futures-util", "nomad-client", + "nomad_client", "reqwest", "rivet-pools", "serde", @@ -5537,6 +5951,18 @@ dependencies = [ "tracing", ] +[[package]] +name = "nomad_client" +version = "1.1.4" +source = "git+https://github.com/rivet-gg/nomad-client?rev=abb66bf0c30c7ff5b0c695dae952481c33e538b5#abb66bf0c30c7ff5b0c695dae952481c33e538b5" +dependencies = [ + "reqwest", + "serde", + "serde_derive", + "serde_json", + "url", +] + [[package]] name = "nonzero_ext" version = "0.3.0" @@ -5669,7 +6095,7 @@ version = "0.10.64" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "95a0481286a310808298130d22dd1fef0fa571e05a8f44ec801801e84b216b1f" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.5.0", "cfg-if", "foreign-types", "libc", @@ -5686,7 +6112,7 @@ checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.58", ] [[package]] @@ -5697,9 +6123,9 @@ checksum = "ff011a302c396a5197692431fc1948019154afc178baf7d8e37367442a4601cf" [[package]] name = "openssl-sys" -version = "0.9.100" +version = "0.9.102" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae94056a791d0e1217d18b6cbdccb02c61e3054fc69893607f4067e3bb0b1fd1" +checksum = "c597637d56fbc83893a35eb0dd04b2b8e7a50c91e64e9493e398b5df4fb45fa2" dependencies = [ "cc", "libc", @@ -5719,6 +6145,55 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" +[[package]] +name = "p256" +version = "0.13.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c9863ad85fa8f4460f9c48cb909d38a0d689dba1f6f6988a5e3e0d31071bcd4b" +dependencies = [ + "ecdsa", + "elliptic-curve", + "primeorder", + "sha2", +] + +[[package]] +name = "p384" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70786f51bcc69f6a4c0360e063a4cac5419ef7c5cd5b3c99ad70f3be5ba79209" +dependencies = [ + "ecdsa", + "elliptic-curve", + "primeorder", + "sha2", +] + +[[package]] +name = "p521" +version = "0.13.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0fc9e2161f1f215afdfce23677034ae137bbd45016a880c2eb3ba8eb95f085b2" +dependencies = [ + "base16ct", + "ecdsa", + "elliptic-curve", + "primeorder", + "rand_core", + "sha2", +] + +[[package]] +name = "parking_lot" +version = "0.11.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7d17b78036a60663b797adeaee46f5c9dfebb86948d1255007a1d6be0271ff99" +dependencies = [ + "instant", + "lock_api", + "parking_lot_core 0.8.6", +] + [[package]] name = "parking_lot" version = "0.12.1" @@ -5726,7 +6201,21 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3742b2c103b9f06bc9fff0a37ff4912935851bee6d36f3c02bcc755bcfec228f" dependencies = [ "lock_api", - "parking_lot_core", + "parking_lot_core 0.9.9", +] + +[[package]] +name = "parking_lot_core" +version = "0.8.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "60a2cfe6f0ad2bfc16aefa463b497d5c7a5ecd44a23efa72aa342d90177356dc" +dependencies = [ + "cfg-if", + "instant", + "libc", + "redox_syscall 0.2.16", + "smallvec", + "winapi", ] [[package]] @@ -5737,7 +6226,7 @@ checksum = "4c42a9226546d68acdd9c0a280d17ce19bfe27a46bf68784e4066115788d008e" dependencies = [ "cfg-if", "libc", - "redox_syscall", + "redox_syscall 0.4.1", "smallvec", "windows-targets 0.48.5", ] @@ -5784,9 +6273,9 @@ dependencies = [ [[package]] name = "pest" -version = "2.7.7" +version = "2.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "219c0dcc30b6a27553f9cc242972b67f75b60eb0db71f0b5462f38b058c41546" +checksum = "311fb059dee1a7b802f036316d790138c613a4e8b180c822e3925a662e9f0c95" dependencies = [ "memchr", "thiserror", @@ -5795,9 +6284,9 @@ dependencies = [ [[package]] name = "pest_derive" -version = "2.7.7" +version = "2.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22e1288dbd7786462961e69bfd4df7848c1e37e8b74303dbdab82c3a9cdd2809" +checksum = "f73541b156d32197eecda1a4014d7f868fd2bcb3c550d5386087cfba442bf69c" dependencies = [ "pest", "pest_generator", @@ -5805,22 +6294,22 @@ dependencies = [ [[package]] name = "pest_generator" -version = "2.7.7" +version = "2.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1381c29a877c6d34b8c176e734f35d7f7f5b3adaefe940cb4d1bb7af94678e2e" +checksum = "c35eeed0a3fab112f75165fdc026b3913f4183133f19b49be773ac9ea966e8bd" dependencies = [ "pest", "pest_meta", "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.58", ] [[package]] name = "pest_meta" -version = "2.7.7" +version = "2.7.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0934d6907f148c22a3acbda520c7eed243ad7487a30f51f6ce52b58b7077a8a" +checksum = "2adbf29bb9776f28caece835398781ab24435585fe0d4dc1374a61db5accedca" dependencies = [ "once_cell", "pest", @@ -5834,34 +6323,34 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e1d3afd2628e69da2be385eb6f2fd57c8ac7977ceeff6dc166ff1657b0e386a9" dependencies = [ "fixedbitset", - "indexmap 2.2.3", + "indexmap 2.2.6", ] [[package]] name = "pin-project" -version = "1.1.4" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0302c4a0442c456bd56f841aee5c3bfd17967563f6fadc9ceb9f9c23cf3807e0" +checksum = "b6bf43b791c5b9e34c3d182969b4abb522f9343702850a2e57f460d00d09b4b3" dependencies = [ "pin-project-internal", ] [[package]] name = "pin-project-internal" -version = "1.1.4" +version = "1.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "266c042b60c9c76b8d53061e52b2e0d1116abc57cefc8c5cd671619a56ac3690" +checksum = "2f38a4412a78282e09a2cf38d195ea5420d15ba0602cb375210efbc877243965" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.58", ] [[package]] name = "pin-project-lite" -version = "0.2.13" +version = "0.2.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8afb450f006bf6385ca15ef45d71d2288452bc3683ce2e2cacc0d18e4be60b58" +checksum = "bda66fc9667c18cb2758a2ac84d1167245054bcf85d5d1aaa6923f45801bdd02" [[package]] name = "pin-utils" @@ -5898,9 +6387,9 @@ checksum = "d231b230927b5e4ad203db57bbcbee2802f6bce620b1e4a9024a07d94e2907ec" [[package]] name = "platforms" -version = "3.3.0" +version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "626dec3cac7cc0e1577a2ec3fc496277ec2baa084bebad95bb6fdbfae235f84c" +checksum = "db23d408679286588f4d4644f965003d056e3dd5abcaaa938116871d7ce2fee7" [[package]] name = "portable-atomic" @@ -5941,6 +6430,15 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "primeorder" +version = "0.13.6" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "353e1ca18966c16d9deb1c69278edbc5f194139612772bd9537af60ac231e1e6" +dependencies = [ + "elliptic-curve", +] + [[package]] name = "proc-macro-error" version = "1.0.4" @@ -5967,9 +6465,9 @@ dependencies = [ [[package]] name = "proc-macro2" -version = "1.0.78" +version = "1.0.79" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e2422ad645d89c99f8f3e6b88a9fdeca7fabeac836b1002371c4367c8f984aae" +checksum = "e835ff2298f5721608eb1a980ecaee1aef2c132bf95ecc026a11b7bf3c01c02e" dependencies = [ "unicode-ident", ] @@ -5996,7 +6494,7 @@ dependencies = [ "fnv", "lazy_static", "memchr", - "parking_lot", + "parking_lot 0.12.1", "protobuf", "thiserror", ] @@ -6110,9 +6608,9 @@ dependencies = [ [[package]] name = "quanta" -version = "0.12.2" +version = "0.12.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9ca0b7bac0b97248c40bb77288fc52029cf1459c0461ea1b05ee32ccf011de2c" +checksum = "8e5167a477619228a0b284fac2674e3c388cba90631d7b7de620e6f1fcd08da5" dependencies = [ "crossbeam-utils", "libc", @@ -6183,7 +6681,7 @@ version = "11.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9d86a7c4638d42c44551f4791a20e687dbb4c3de1f33c43dd71e355cd429def1" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.5.0", ] [[package]] @@ -6197,7 +6695,7 @@ dependencies = [ "combine", "futures", "futures-util", - "itoa 1.0.10", + "itoa 1.0.11", "native-tls", "percent-encoding", "pin-project-lite", @@ -6220,6 +6718,15 @@ dependencies = [ "regex", ] +[[package]] +name = "redox_syscall" +version = "0.2.16" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "fb5a58c1855b4b6819d59012155603f0b22ad30cad752600aadfcb695265519a" +dependencies = [ + "bitflags 1.3.2", +] + [[package]] name = "redox_syscall" version = "0.4.1" @@ -6231,14 +6738,14 @@ dependencies = [ [[package]] name = "regex" -version = "1.10.3" +version = "1.10.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b62dbe01f0b06f9d8dc7d49e05a0785f153b00b2c227856282f671e0318c9b15" +checksum = "c117dbdfde9c8308975b6a18d71f3f385c89461f7b3fb054288ecf2a2058ba4c" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.5", - "regex-syntax 0.8.2", + "regex-automata 0.4.6", + "regex-syntax 0.8.3", ] [[package]] @@ -6252,13 +6759,13 @@ dependencies = [ [[package]] name = "regex-automata" -version = "0.4.5" +version = "0.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5bb987efffd3c6d0d8f5f89510bb458559eab11e4f869acb20bf845e016259cd" +checksum = "86b83b8b9847f9bf95ef68afb0b8e6cdb80f498442f5179a29fad448fcc1eaea" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.8.2", + "regex-syntax 0.8.3", ] [[package]] @@ -6269,20 +6776,9 @@ checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" [[package]] name = "regex-syntax" -version = "0.8.2" +version = "0.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c08c74e62047bb2de4ff487b251e4a92e24f48745648451635cec7d591162d9f" - -[[package]] -name = "region-config-get" -version = "0.0.1" -dependencies = [ - "chirp-client", - "chirp-worker", - "rivet-operation", - "serde", - "sqlx", -] +checksum = "adad44e29e4c806119491a7f06f03de4d1af22c3a680dd47f1e6e179439d1f56" [[package]] name = "region-get" @@ -6290,9 +6786,10 @@ version = "0.0.1" dependencies = [ "chirp-client", "chirp-worker", + "cluster-datacenter-get", + "cluster-datacenter-location-get", "faker-region", "prost 0.10.4", - "region-config-get", "rivet-operation", "sqlx", ] @@ -6303,9 +6800,9 @@ version = "0.0.1" dependencies = [ "chirp-client", "chirp-worker", + "cluster-datacenter-list", "faker-region", "prost 0.10.4", - "region-config-get", "rivet-operation", "sqlx", ] @@ -6331,9 +6828,10 @@ version = "0.0.1" dependencies = [ "chirp-client", "chirp-worker", + "cluster-datacenter-get", + "cluster-datacenter-list", "faker-region", "prost 0.10.4", - "region-config-get", "region-get", "rivet-operation", "sqlx", @@ -6341,9 +6839,9 @@ dependencies = [ [[package]] name = "reqwest" -version = "0.11.24" +version = "0.11.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c6920094eb85afde5e4a138be3f2de8bbdf28000f0029e72c45025a56b042251" +checksum = "dd67538700a17451e7cba03ac727fb961abb7607553461627b97de0b89cf4a62" dependencies = [ "base64 0.21.7", "bytes", @@ -6351,7 +6849,7 @@ dependencies = [ "futures-core", "futures-util", "h2", - "http 0.2.11", + "http 0.2.12", "http-body", "hyper", "hyper-rustls 0.24.2", @@ -6396,6 +6894,16 @@ dependencies = [ "quick-error", ] +[[package]] +name = "rfc6979" +version = "0.4.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8dd2a808d456c4a54e300a23e9f5a67e122c3024119acbfd73e3bf664491cb2" +dependencies = [ + "hmac", + "subtle", +] + [[package]] name = "ring" version = "0.16.20" @@ -6448,7 +6956,7 @@ dependencies = [ "aws-smithy-json", "aws-smithy-types 0.41.0", "bytes", - "http 0.2.11", + "http 0.2.12", "tower", ] @@ -6499,7 +7007,7 @@ dependencies = [ "aws-smithy-json", "aws-smithy-types 0.41.0", "bytes", - "http 0.2.11", + "http 0.2.12", "tower", ] @@ -6595,7 +7103,7 @@ dependencies = [ "aws-smithy-json", "aws-smithy-types 0.41.0", "bytes", - "http 0.2.11", + "http 0.2.12", "tower", ] @@ -6632,7 +7140,7 @@ dependencies = [ "aws-smithy-json", "aws-smithy-types 0.41.0", "bytes", - "http 0.2.11", + "http 0.2.12", "tower", ] @@ -6645,7 +7153,7 @@ dependencies = [ "aws-smithy-json", "aws-smithy-types 0.41.0", "bytes", - "http 0.2.11", + "http 0.2.12", "tower", ] @@ -6667,7 +7175,7 @@ dependencies = [ "aws-smithy-json", "aws-smithy-types 0.41.0", "bytes", - "http 0.2.11", + "http 0.2.12", "tower", ] @@ -6680,7 +7188,7 @@ dependencies = [ "aws-smithy-json", "aws-smithy-types 0.41.0", "bytes", - "http 0.2.11", + "http 0.2.12", "tower", ] @@ -6735,7 +7243,7 @@ version = "0.1.0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.58", ] [[package]] @@ -6770,7 +7278,7 @@ dependencies = [ "aws-smithy-json", "aws-smithy-types 0.41.0", "bytes", - "http 0.2.11", + "http 0.2.12", "tower", ] @@ -6792,7 +7300,7 @@ dependencies = [ "aws-smithy-json", "aws-smithy-types 0.41.0", "bytes", - "http 0.2.11", + "http 0.2.12", "tower", ] @@ -6818,7 +7326,7 @@ dependencies = [ "aws-smithy-json", "aws-smithy-types 0.41.0", "bytes", - "http 0.2.11", + "http 0.2.12", "tower", ] @@ -6841,6 +7349,7 @@ dependencies = [ "chrono", "futures-util", "global-error", + "ipnet", "lazy_static", "rand", "regex", @@ -6873,6 +7382,16 @@ dependencies = [ name = "rivet-util-cdn" version = "0.1.0" +[[package]] +name = "rivet-util-cluster" +version = "0.1.0" +dependencies = [ + "rivet-util", + "tokio", + "types", + "uuid", +] + [[package]] name = "rivet-util-env" version = "0.1.0" @@ -6903,6 +7422,19 @@ dependencies = [ name = "rivet-util-kv" version = "0.1.0" +[[package]] +name = "rivet-util-linode" +version = "0.1.0" +dependencies = [ + "chrono", + "rand", + "reqwest", + "rivet-operation", + "serde", + "serde_json", + "ssh-key", +] + [[package]] name = "rivet-util-macros" version = "0.1.0" @@ -6919,7 +7451,7 @@ dependencies = [ "bit-vec", "chirp-client", "heck 0.3.3", - "http 0.2.11", + "http 0.2.12", "ip-info", "mm-lobby-list-for-user-id", "region-get", @@ -6974,6 +7506,7 @@ dependencies = [ "pkcs1", "pkcs8", "rand_core", + "sha2", "signature", "spki", "subtle", @@ -6997,11 +7530,11 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.31" +version = "0.38.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ea3e1a662af26cd7a3ba09c0297a31af215563ecf42817c98df621387f4e949" +checksum = "65e04861e65f21776e67888bfbea442b3642beaa0138fdb1dd7a84a52dffdb89" dependencies = [ - "bitflags 2.4.2", + "bitflags 2.5.0", "errno", "libc", "linux-raw-sys", @@ -7108,7 +7641,7 @@ dependencies = [ "aws-smithy-async 0.52.0", "aws-smithy-http 0.52.0", "aws-smithy-types 0.52.0", - "http 0.2.11", + "http 0.2.12", "thiserror", "tokio", "tracing", @@ -7171,11 +7704,25 @@ dependencies = [ "syn 1.0.109", ] +[[package]] +name = "sec1" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d3e97a565f76233a6003f9f5c54be1d9c5bdfa3eccfb189469f11ec4901c47dc" +dependencies = [ + "base16ct", + "der", + "generic-array", + "pkcs8", + "subtle", + "zeroize", +] + [[package]] name = "security-framework" -version = "2.9.2" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05b64fb303737d99b81884b2c63433e9ae28abebe5eb5045dcdd175dc2ecf4de" +checksum = "770452e37cad93e0a50d5abc3990d2bc351c36d0328f86cefec2f2fb206eaef6" dependencies = [ "bitflags 1.3.2", "core-foundation", @@ -7186,9 +7733,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.9.1" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e932934257d3b408ed8f30db49d85ea163bfe74961f017f405b025af298f0c7a" +checksum = "41f3cc463c0ef97e11c3461a9d3787412d30e8e7eb907c79180c4a57bf7c04ef" dependencies = [ "core-foundation-sys", "libc", @@ -7226,7 +7773,7 @@ checksum = "7eb0b34b42edc17f6b7cac84a52a1c5f0e1bb2227e997ca9011ea3dd34e8610b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.58", ] [[package]] @@ -7242,22 +7789,33 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.114" +version = "1.0.115" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5f09b1bd632ef549eaa9f60a1f8de742bdbc698e6cee2095fc84dde5f549ae0" +checksum = "12dc5c46daa8e9fdf4f5e71b6cf9a53f2487da0e86e55808e2d35539666497dd" dependencies = [ - "itoa 1.0.10", + "itoa 1.0.11", "ryu", "serde", ] [[package]] name = "serde_nanos" -version = "0.1.3" +version = "0.1.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a93142f0367a4cc53ae0fead1bcda39e85beccfad3dcd717656cacab94b12985" +dependencies = [ + "serde", +] + +[[package]] +name = "serde_qs" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ae801b7733ca8d6a2b580debe99f67f36826a0f5b8a36055dc6bc40f8d6bc71" +checksum = "8cac3f1e2ca2fe333923a1ae72caca910b98ed0630bb35ef6f8c8517d6e81afa" dependencies = [ + "percent-encoding", "serde", + "thiserror", ] [[package]] @@ -7268,7 +7826,7 @@ checksum = "0b2e6b945e9d3df726b65d6ee24060aff8e3533d431f677a9695db04eff9dfdb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.58", ] [[package]] @@ -7287,7 +7845,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d3491c14715ca2294c4d6a88f15e84739788c1d030eed8c110436aafdaa2f3fd" dependencies = [ "form_urlencoded", - "itoa 1.0.10", + "itoa 1.0.11", "ryu", "serde", ] @@ -7317,7 +7875,20 @@ dependencies = [ "darling", "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.58", +] + +[[package]] +name = "serde_yaml" +version = "0.9.34+deprecated" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "6a8b1a1a2ebf674015cc02edccce75287f1a0130d394307b36743c2f5d504b47" +dependencies = [ + "indexmap 2.2.6", + "itoa 1.0.11", + "ryu", + "serde", + "unsafe-libyaml", ] [[package]] @@ -7420,9 +7991,9 @@ dependencies = [ [[package]] name = "smallvec" -version = "1.13.1" +version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6ecd384b10a64542d77071bd64bd7b231f4ed5940fba55e98c3de13824cf3d7" +checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" [[package]] name = "socket2" @@ -7436,12 +8007,12 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.5" +version = "0.5.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7b5fac59a5cb5dd637972e5fca70daf0523c9067fcdc4842f053dae04a18f8e9" +checksum = "05ffd9c0a93b7543e062e759284fcf5f5e3b098501104bfbdde4d404db792871" dependencies = [ "libc", - "windows-sys 0.48.0", + "windows-sys 0.52.0", ] [[package]] @@ -7491,9 +8062,9 @@ dependencies = [ [[package]] name = "sqlx" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dba03c279da73694ef99763320dea58b51095dfe87d001b1d4b5fe78ba8763cf" +checksum = "c9a2ccff1a000a5a59cd33da541d9f2fdcd9e6e8229cc200565942bff36d0aaa" dependencies = [ "sqlx-core", "sqlx-macros", @@ -7504,18 +8075,17 @@ dependencies = [ [[package]] name = "sqlx-core" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d84b0a3c3739e220d94b3239fd69fb1f74bc36e16643423bd99de3b43c21bfbd" +checksum = "24ba59a9342a3d9bab6c56c118be528b27c9b60e490080e9711a04dccac83ef6" dependencies = [ - "ahash 0.8.9", + "ahash 0.8.11", "atoi", "bit-vec", "byteorder", "bytes", "crc", "crossbeam-queue", - "dotenvy", "either", "event-listener", "futures-channel", @@ -7525,7 +8095,7 @@ dependencies = [ "futures-util", "hashlink", "hex", - "indexmap 2.2.3", + "indexmap 2.2.6", "log", "memchr", "native-tls", @@ -7547,9 +8117,9 @@ dependencies = [ [[package]] name = "sqlx-macros" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89961c00dc4d7dffb7aee214964b065072bff69e36ddb9e2c107541f75e4f2a5" +checksum = "4ea40e2345eb2faa9e1e5e326db8c34711317d2b5e08d0d5741619048a803127" dependencies = [ "proc-macro2", "quote", @@ -7560,11 +8130,10 @@ dependencies = [ [[package]] name = "sqlx-macros-core" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d0bd4519486723648186a08785143599760f7cc81c52334a55d6a83ea1e20841" +checksum = "5833ef53aaa16d860e92123292f1f6a3d53c34ba8b1969f152ef1a7bb803f3c8" dependencies = [ - "atomic-write-file", "dotenvy", "either", "heck 0.4.1", @@ -7587,13 +8156,13 @@ dependencies = [ [[package]] name = "sqlx-mysql" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e37195395df71fd068f6e2082247891bc11e3289624bbc776a0cdfa1ca7f1ea4" +checksum = "1ed31390216d20e538e447a7a9b959e06ed9fc51c37b514b46eb758016ecd418" dependencies = [ "atoi", "base64 0.21.7", - "bitflags 2.4.2", + "bitflags 2.5.0", "byteorder", "bytes", "crc", @@ -7608,7 +8177,7 @@ dependencies = [ "hex", "hkdf", "hmac", - "itoa 1.0.10", + "itoa 1.0.11", "log", "md-5", "memchr", @@ -7630,14 +8199,14 @@ dependencies = [ [[package]] name = "sqlx-postgres" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d6ac0ac3b7ccd10cc96c7ab29791a7dd236bd94021f31eec7ba3d46a74aa1c24" +checksum = "7c824eb80b894f926f89a0b9da0c7f435d27cdd35b8c655b114e58223918577e" dependencies = [ "atoi", "base64 0.21.7", "bit-vec", - "bitflags 2.4.2", + "bitflags 2.5.0", "byteorder", "crc", "dotenvy", @@ -7650,7 +8219,7 @@ dependencies = [ "hkdf", "hmac", "home", - "itoa 1.0.10", + "itoa 1.0.11", "log", "md-5", "memchr", @@ -7658,7 +8227,6 @@ dependencies = [ "rand", "serde", "serde_json", - "sha1", "sha2", "smallvec", "sqlx-core", @@ -7671,9 +8239,9 @@ dependencies = [ [[package]] name = "sqlx-sqlite" -version = "0.7.3" +version = "0.7.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "210976b7d948c7ba9fced8ca835b11cbb2d677c59c79de41ac0d397e14547490" +checksum = "b244ef0a8414da0bed4bb1910426e890b19e5e9bccc27ada6b797d05c55ae0aa" dependencies = [ "atoi", "flume", @@ -7693,6 +8261,59 @@ dependencies = [ "uuid", ] +[[package]] +name = "ssh-cipher" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "caac132742f0d33c3af65bfcde7f6aa8f62f0e991d80db99149eb9d44708784f" +dependencies = [ + "cipher", + "ssh-encoding", +] + +[[package]] +name = "ssh-encoding" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "eb9242b9ef4108a78e8cd1a2c98e193ef372437f8c22be363075233321dd4a15" +dependencies = [ + "base64ct", + "pem-rfc7468", + "sha2", +] + +[[package]] +name = "ssh-key" +version = "0.6.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3b71299a724c8d84956caaf8fc3b3ea57c3587fe2d0b800cd0dc1f3599905d7e" +dependencies = [ + "p256", + "p384", + "p521", + "rand_core", + "rsa", + "sec1", + "sha2", + "signature", + "ssh-cipher", + "ssh-encoding", + "subtle", + "zeroize", +] + +[[package]] +name = "ssh2" +version = "0.9.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e7fe461910559f6d5604c3731d00d2aafc4a83d1665922e280f42f9a168d5455" +dependencies = [ + "bitflags 1.3.2", + "libc", + "libssh2-sys", + "parking_lot 0.11.2", +] + [[package]] name = "static_assertions" version = "1.1.0" @@ -7754,7 +8375,7 @@ dependencies = [ "proc-macro2", "quote", "rustversion", - "syn 2.0.50", + "syn 2.0.58", ] [[package]] @@ -7776,9 +8397,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.50" +version = "2.0.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74f1bdc9872430ce9b75da68329d1c1746faf50ffac5f19e02b71e37ff881ffb" +checksum = "44cfb93f38070beee36b3fef7d4f5a16f27751d94b187b666a5cc5e9b0d30687" dependencies = [ "proc-macro2", "quote", @@ -8057,34 +8678,34 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.10.0" +version = "3.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a365e8cd18e44762ef95d87f284f4b5cd04107fec2ff3052bd6a3e6069669e67" +checksum = "85b77fafb263dd9d05cbeac119526425676db3784113aa9295c88498cbf8bff1" dependencies = [ "cfg-if", - "fastrand 2.0.1", + "fastrand 2.0.2", "rustix", "windows-sys 0.52.0", ] [[package]] name = "thiserror" -version = "1.0.57" +version = "1.0.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e45bcbe8ed29775f228095caf2cd67af7a4ccf756ebff23a306bf3e8b47b24b" +checksum = "03468839009160513471e86a034bb2c5c0e4baae3b43f79ffc55c4a5427b3297" dependencies = [ "thiserror-impl", ] [[package]] name = "thiserror-impl" -version = "1.0.57" +version = "1.0.58" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a953cb265bef375dae3de6663da4d3804eee9682ea80d8e2542529b73c531c81" +checksum = "c61f3ba182994efc43764a46c018c347bc492c79f024e705f46567b418f6d4f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.58", ] [[package]] @@ -8103,8 +8724,12 @@ version = "0.0.1" dependencies = [ "chirp-client", "chirp-worker", + "cluster-datacenter-get", + "cluster-datacenter-list", + "linode-instance-type-get", "prost 0.10.4", "rivet-operation", + "rivet-util-cluster", ] [[package]] @@ -8114,7 +8739,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c8248b6521bb14bc45b4067159b9b6ad792e2d6d754d6c41fb50e29fefe38749" dependencies = [ "deranged", - "itoa 1.0.10", + "itoa 1.0.11", "num-conv", "powerfmt", "serde", @@ -8210,19 +8835,19 @@ dependencies = [ [[package]] name = "tokio" -version = "1.36.0" +version = "1.37.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61285f6515fa018fb2d1e46eb21223fff441ee8db5d0f1435e8ab4f5cdb80931" +checksum = "1adbebffeca75fcfd058afa480fb6c0b81e165a0323f9c9d39c9697e37c46787" dependencies = [ "backtrace", "bytes", "libc", "mio", "num_cpus", - "parking_lot", + "parking_lot 0.12.1", "pin-project-lite", "signal-hook-registry", - "socket2 0.5.5", + "socket2 0.5.6", "tokio-macros", "tracing", "windows-sys 0.48.0", @@ -8246,7 +8871,7 @@ checksum = "5b8a1e28f2deaa14e508979454cb3a223b10b938b45af148bc0986de36f1923b" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.58", ] [[package]] @@ -8304,9 +8929,9 @@ dependencies = [ [[package]] name = "tokio-stream" -version = "0.1.14" +version = "0.1.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "397c988d37662c7dda6d2208364a706264bf3d6138b11d436cbac0ad38832842" +checksum = "267ac89e0bec6e691e5813911606935d77c476ff49024f98abcea3e7b15e37af" dependencies = [ "futures-core", "pin-project-lite", @@ -8391,7 +9016,7 @@ version = "0.19.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1b5bb770da30e5cbfde35a2d7b9b8a2c4b8ef89548a7a6aeab5c9a576e3e7421" dependencies = [ - "indexmap 2.2.3", + "indexmap 2.2.6", "serde", "serde_spanned", "toml_datetime", @@ -8411,7 +9036,7 @@ dependencies = [ "futures-core", "futures-util", "h2", - "http 0.2.11", + "http 0.2.12", "http-body", "hyper", "hyper-timeout", @@ -8478,7 +9103,7 @@ checksum = "34704c8d6ebcbc939824180af020566b01a7c01f80641264eba0999f6c2b6be7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.58", ] [[package]] @@ -8566,7 +9191,7 @@ dependencies = [ "ipconfig", "lru-cache", "once_cell", - "parking_lot", + "parking_lot 0.12.1", "rand", "resolv-conf", "smallvec", @@ -8591,7 +9216,7 @@ dependencies = [ "byteorder", "bytes", "data-encoding", - "http 1.0.0", + "http 1.1.0", "httparse", "log", "native-tls", @@ -8613,7 +9238,7 @@ name = "types" version = "0.1.0" dependencies = [ "chirp-types", - "http 0.2.11", + "http 0.2.12", "prost 0.10.4", "prost-types 0.10.1", "serde", @@ -8685,6 +9310,12 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "39ec24b3121d976906ece63c9daad25b85969647682eee313cb5779fdd69e14e" +[[package]] +name = "unsafe-libyaml" +version = "0.2.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "673aac59facbab8a9007c7f6108d11f63b603f7cabff99fabf650fea5c32b861" + [[package]] name = "untrusted" version = "0.7.1" @@ -9275,9 +9906,9 @@ checksum = "09cc8ee72d2a9becf2f2febe0205bbed8fc6615b7cb429ad062dc7b7ddd036a9" [[package]] name = "uuid" -version = "1.7.0" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f00cc9702ca12d3c81455259621e676d0f7251cec66a21e98fe2e9a37db93b2a" +checksum = "a183cf7feeba97b4dd1c0d46788634f6221d87fa961b305bed08c851829efcc0" dependencies = [ "getrandom", "serde", @@ -9316,11 +9947,17 @@ version = "0.11.0+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" +[[package]] +name = "wasite" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b8dad83b4f25e74f184f64c43b150b91efe7647395b42289f38e50566d82855b" + [[package]] name = "wasm-bindgen" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c1e124130aee3fb58c5bdd6b639a0509486b0338acaaae0c84a5124b0f588b7f" +checksum = "4be2531df63900aeb2bca0daaaddec08491ee64ceecbee5076636a3b026795a8" dependencies = [ "cfg-if", "wasm-bindgen-macro", @@ -9328,24 +9965,24 @@ dependencies = [ [[package]] name = "wasm-bindgen-backend" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9e7e1900c352b609c8488ad12639a311045f40a35491fb69ba8c12f758af70b" +checksum = "614d787b966d3989fa7bb98a654e369c762374fd3213d212cfc0251257e747da" dependencies = [ "bumpalo", "log", "once_cell", "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.58", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.41" +version = "0.4.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "877b9c3f61ceea0e56331985743b13f3d25c406a7098d45180fb5f09bc19ed97" +checksum = "76bc14366121efc8dbb487ab05bcc9d346b3b5ec0eaa76e46594cabbe51762c0" dependencies = [ "cfg-if", "js-sys", @@ -9355,9 +9992,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b30af9e2d358182b5c7449424f017eba305ed32a7010509ede96cdc4696c46ed" +checksum = "a1f8823de937b71b9460c0c34e25f3da88250760bec0ebac694b49997550d726" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -9365,22 +10002,22 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "642f325be6301eb8107a83d12a8ac6c1e1c54345a7ef1a9261962dfefda09e66" +checksum = "e94f17b526d0a461a191c78ea52bbce64071ed5c04c9ffe424dcb38f74171bb7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.58", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.91" +version = "0.2.92" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f186bd2dcf04330886ce82d6f33dd75a7bfcf69ecf5763b89fcde53b6ac9838" +checksum = "af190c94f2773fdb3729c55b007a722abb5384da03bc0986df4c289bf5567e96" [[package]] name = "wasm-streams" @@ -9397,9 +10034,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.68" +version = "0.3.69" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "96565907687f7aceb35bc5fc03770a8a0471d82e479f25832f54a0e3f4b28446" +checksum = "77afa9a11836342370f4817622a2f0f418b134426d91a82dfb48f532d2ec13ef" dependencies = [ "js-sys", "wasm-bindgen", @@ -9454,9 +10091,13 @@ dependencies = [ [[package]] name = "whoami" -version = "1.4.1" +version = "1.5.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22fc3756b8a9133049b26c7f61ab35416c130e8c09b660f5b3958b446f52cc50" +checksum = "a44ab49fad634e88f55bf8f9bb3abd2f27d7204172a112c7c9987e01c1c94ea9" +dependencies = [ + "redox_syscall 0.4.1", + "wasite", +] [[package]] name = "widestring" @@ -9492,7 +10133,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "33ab640c8d7e35bf8ba19b884ba838ceb4fba93a4e8c65a9059d08afcfc683d9" dependencies = [ - "windows-targets 0.52.0", + "windows-targets 0.52.4", ] [[package]] @@ -9510,7 +10151,7 @@ version = "0.52.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "282be5f36a8ce781fad8c8ae18fa3f9beff57ec1b52cb3de0789201425d9a33d" dependencies = [ - "windows-targets 0.52.0", + "windows-targets 0.52.4", ] [[package]] @@ -9530,17 +10171,17 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.52.0" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a18201040b24831fbb9e4eb208f8892e1f50a37feb53cc7ff887feb8f50e7cd" +checksum = "7dd37b7e5ab9018759f893a1952c9420d060016fc19a472b4bb20d1bdd694d1b" dependencies = [ - "windows_aarch64_gnullvm 0.52.0", - "windows_aarch64_msvc 0.52.0", - "windows_i686_gnu 0.52.0", - "windows_i686_msvc 0.52.0", - "windows_x86_64_gnu 0.52.0", - "windows_x86_64_gnullvm 0.52.0", - "windows_x86_64_msvc 0.52.0", + "windows_aarch64_gnullvm 0.52.4", + "windows_aarch64_msvc 0.52.4", + "windows_i686_gnu 0.52.4", + "windows_i686_msvc 0.52.4", + "windows_x86_64_gnu 0.52.4", + "windows_x86_64_gnullvm 0.52.4", + "windows_x86_64_msvc 0.52.4", ] [[package]] @@ -9551,9 +10192,9 @@ checksum = "2b38e32f0abccf9987a4e3079dfb67dcd799fb61361e53e2882c3cbaf0d905d8" [[package]] name = "windows_aarch64_gnullvm" -version = "0.52.0" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb7764e35d4db8a7921e09562a0304bf2f93e0a51bfccee0bd0bb0b666b015ea" +checksum = "bcf46cf4c365c6f2d1cc93ce535f2c8b244591df96ceee75d8e83deb70a9cac9" [[package]] name = "windows_aarch64_msvc" @@ -9563,9 +10204,9 @@ checksum = "dc35310971f3b2dbbf3f0690a219f40e2d9afcf64f9ab7cc1be722937c26b4bc" [[package]] name = "windows_aarch64_msvc" -version = "0.52.0" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbaa0368d4f1d2aaefc55b6fcfee13f41544ddf36801e793edbbfd7d7df075ef" +checksum = "da9f259dd3bcf6990b55bffd094c4f7235817ba4ceebde8e6d11cd0c5633b675" [[package]] name = "windows_i686_gnu" @@ -9575,9 +10216,9 @@ checksum = "a75915e7def60c94dcef72200b9a8e58e5091744960da64ec734a6c6e9b3743e" [[package]] name = "windows_i686_gnu" -version = "0.52.0" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a28637cb1fa3560a16915793afb20081aba2c92ee8af57b4d5f28e4b3e7df313" +checksum = "b474d8268f99e0995f25b9f095bc7434632601028cf86590aea5c8a5cb7801d3" [[package]] name = "windows_i686_msvc" @@ -9587,9 +10228,9 @@ checksum = "8f55c233f70c4b27f66c523580f78f1004e8b5a8b659e05a4eb49d4166cca406" [[package]] name = "windows_i686_msvc" -version = "0.52.0" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ffe5e8e31046ce6230cc7215707b816e339ff4d4d67c65dffa206fd0f7aa7b9a" +checksum = "1515e9a29e5bed743cb4415a9ecf5dfca648ce85ee42e15873c3cd8610ff8e02" [[package]] name = "windows_x86_64_gnu" @@ -9599,9 +10240,9 @@ checksum = "53d40abd2583d23e4718fddf1ebec84dbff8381c07cae67ff7768bbf19c6718e" [[package]] name = "windows_x86_64_gnu" -version = "0.52.0" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d6fa32db2bc4a2f5abeacf2b69f7992cd09dca97498da74a151a3132c26befd" +checksum = "5eee091590e89cc02ad514ffe3ead9eb6b660aedca2183455434b93546371a03" [[package]] name = "windows_x86_64_gnullvm" @@ -9611,9 +10252,9 @@ checksum = "0b7b52767868a23d5bab768e390dc5f5c55825b6d30b86c844ff2dc7414044cc" [[package]] name = "windows_x86_64_gnullvm" -version = "0.52.0" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a657e1e9d3f514745a572a6846d3c7aa7dbe1658c056ed9c3344c4109a6949e" +checksum = "77ca79f2451b49fa9e2af39f0747fe999fcda4f5e241b2898624dca97a1f2177" [[package]] name = "windows_x86_64_msvc" @@ -9623,9 +10264,9 @@ checksum = "ed94fce61571a4006852b7389a063ab983c02eb1bb37b47f8272ce92d06d9538" [[package]] name = "windows_x86_64_msvc" -version = "0.52.0" +version = "0.52.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dff9641d1cd4be8d1a070daf9e3773c5f67e78b4d9d42263020c057706765c04" +checksum = "32b752e52a2da0ddfbdbcc6fceadfeede4c939ed16d13e648833a61dfb611ed8" [[package]] name = "winnow" @@ -9678,7 +10319,7 @@ checksum = "9ce1b18ccd8e73a9321186f97e46f9f04b778851177567b1975109d26a08d2a6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.50", + "syn 2.0.58", ] [[package]] diff --git a/svc/Cargo.toml b/svc/Cargo.toml index cc5674b226..5bad3bb65d 100644 --- a/svc/Cargo.toml +++ b/svc/Cargo.toml @@ -15,6 +15,7 @@ members = [ "api/module", "api/monolith", "api/portal", + "api/provision", "api/route", "api/status", "pkg/build/ops/create", @@ -58,6 +59,18 @@ members = [ "pkg/cloud/ops/version-get", "pkg/cloud/ops/version-publish", "pkg/cloud/worker", + "pkg/cluster/ops/datacenter-get", + "pkg/cluster/ops/datacenter-list", + "pkg/cluster/ops/datacenter-location-get", + "pkg/cluster/ops/datacenter-resolve-for-name-id", + "pkg/cluster/ops/datacenter-topology-get", + "pkg/cluster/ops/get", + "pkg/cluster/ops/server-get", + "pkg/cluster/ops/server-list", + "pkg/cluster/ops/server-resolve-for-ip", + "pkg/cluster/standalone/default-update", + "pkg/cluster/standalone/gc", + "pkg/cluster/worker", "pkg/custom-user-avatar/ops/list-for-game", "pkg/custom-user-avatar/ops/upload-complete", "pkg/debug/ops/email-res", @@ -120,7 +133,6 @@ members = [ "pkg/job-log/worker", "pkg/job-run/ops/get", "pkg/job-run/ops/metrics-log", - "pkg/job-run/standalone/nomad-monitor", "pkg/job-run/worker", "pkg/job/standalone/gc", "pkg/kv-config/ops/namespace-create", @@ -131,6 +143,11 @@ members = [ "pkg/kv/ops/get", "pkg/kv/ops/list", "pkg/kv/worker", + "pkg/linode/ops/instance-type-get", + "pkg/linode/ops/server-destroy", + "pkg/linode/ops/server-provision", + "pkg/linode/standalone/gc", + "pkg/linode/worker", "pkg/load-test/standalone/api-cloud", "pkg/load-test/standalone/mm", "pkg/load-test/standalone/mm-sustain", @@ -175,11 +192,11 @@ members = [ "pkg/module/ops/version-get", "pkg/module/worker", "pkg/monolith/standalone/worker", + "pkg/nomad/standalone/monitor", "pkg/nsfw/ops/image-score", "pkg/perf/ops/log-get", "pkg/profanity/ops/check", "pkg/push-notification/worker", - "pkg/region/ops/config-get", "pkg/region/ops/get", "pkg/region/ops/list", "pkg/region/ops/recommend", diff --git a/svc/api/admin/Cargo.toml b/svc/api/admin/Cargo.toml index 7107e809d0..10c777d26e 100644 --- a/svc/api/admin/Cargo.toml +++ b/svc/api/admin/Cargo.toml @@ -6,6 +6,7 @@ edition = "2021" license = "Apache-2.0" [dependencies] +rivet-convert = { path = "../../../lib/convert" } api-helper = { path = "../../../lib/api-helper/build" } async_once = "0.2" async-trait = "0.1" @@ -39,6 +40,8 @@ url = "2.2.2" uuid = { version = "1", features = ["v4"] } util-mm = { package = "rivet-util-mm", path = "../../pkg/mm/util" } +cluster-server-get = { path = "../../pkg/cluster/ops/server-get" } +cluster-server-list = { path = "../../pkg/cluster/ops/server-list" } token-create = { path = "../../pkg/token/ops/create" } [dev-dependencies] diff --git a/svc/api/admin/src/route/cluster.rs b/svc/api/admin/src/route/cluster.rs new file mode 100644 index 0000000000..06dcea96a4 --- /dev/null +++ b/svc/api/admin/src/route/cluster.rs @@ -0,0 +1,72 @@ +use api_helper::{anchor::WatchIndexQuery, ctx::Ctx}; + +use proto::backend; +use rivet_api::models; +use rivet_convert::ApiInto; +use rivet_operation::prelude::*; +use serde::Deserialize; + +use crate::auth::Auth; + +// MARK: GET /server_ip +#[derive(Debug, Clone, Deserialize)] +pub struct ServerIpsQuery { + server_id: Option, + pool: Option, +} + +pub async fn server_ips( + ctx: Ctx, + watch_index: WatchIndexQuery, + query: ServerIpsQuery, +) -> GlobalResult { + if query.server_id.is_none() && query.pool.is_none() { + bail_with!( + API_BAD_QUERY, + error = "expected one of: `server_id`, `pool`" + ); + } + + let ips = match (query.server_id, query.pool) { + (Some(server_id), _) => { + let servers_res = op!([ctx] cluster_server_get { + server_ids: vec![server_id.into()], + }) + .await?; + let public_ip = servers_res + .servers + .first() + .and_then(|server| server.public_ip.clone()); + + public_ip.into_iter().collect::>() + } + (_, Some(pool)) => { + let pool_type = Some(ApiInto::::api_into(pool)); + + let cluster_id = util::env::default_cluster_id(); + let server_list_res = op!([ctx] cluster_server_list { + cluster_ids: vec![cluster_id.into()], + }) + .await?; + let cluster = unwrap!(server_list_res.clusters.first()); + + let servers_res = op!([ctx] cluster_server_get { + server_ids: cluster.server_ids.clone(), + }) + .await?; + + servers_res + .servers + .iter() + .filter(|server| { + backend::cluster::PoolType::from_i32(server.pool_type) == pool_type + }) + .filter_map(|server| server.public_ip.clone()) + .collect::>() + } + // Handled earlier + (None, None) => unreachable!(), + }; + + Ok(models::AdminClusterGetServerIpsResponse { ips }) +} diff --git a/svc/api/admin/src/route/mod.rs b/svc/api/admin/src/route/mod.rs index eea2c3876e..22b4eb4096 100644 --- a/svc/api/admin/src/route/mod.rs +++ b/svc/api/admin/src/route/mod.rs @@ -2,6 +2,7 @@ use api_helper::define_router; use hyper::{Body, Request, Response}; use rivet_api::models; +pub mod cluster; pub mod login; pub async fn handle( @@ -19,6 +20,12 @@ pub async fn handle( define_router! { routes: { + "cluster" / "server_ips": { + GET: cluster::server_ips( + query: cluster::ServerIpsQuery, + ), + }, + "login": { POST: login::login( body: models::AdminLoginRequest, diff --git a/svc/api/cloud/Cargo.toml b/svc/api/cloud/Cargo.toml index 9093068299..7c6bea3eb3 100644 --- a/svc/api/cloud/Cargo.toml +++ b/svc/api/cloud/Cargo.toml @@ -63,6 +63,7 @@ cloud-namespace-token-development-create = { path = "../../pkg/cloud/ops/namespa cloud-namespace-token-public-create = { path = "../../pkg/cloud/ops/namespace-token-public-create" } cloud-version-get = { path = "../../pkg/cloud/ops/version-get" } cloud-version-publish = { path = "../../pkg/cloud/ops/version-publish" } +cluster-datacenter-list = { path = "../../pkg/cluster/ops/datacenter-list" } custom-user-avatar-list-for-game = { path = "../../pkg/custom-user-avatar/ops/list-for-game" } custom-user-avatar-upload-complete = { path = "../../pkg/custom-user-avatar/ops/upload-complete" } game-banner-upload-complete = { path = "../../pkg/game/ops/banner-upload-complete" } diff --git a/svc/api/cloud/src/route/tiers.rs b/svc/api/cloud/src/route/tiers.rs index 6613d4d77d..3c7669c81b 100644 --- a/svc/api/cloud/src/route/tiers.rs +++ b/svc/api/cloud/src/route/tiers.rs @@ -10,9 +10,14 @@ pub async fn list_tiers( ctx: Ctx, _watch_index: WatchIndexQuery, ) -> GlobalResult { - // TODO: fill in user regions. `region_ids` doesn't actually do anything for now so its not important + let datacenters_res = op!([ctx] cluster_datacenter_list { + cluster_ids: vec![util::env::default_cluster_id().into()], + }) + .await?; + let cluster = unwrap!(datacenters_res.clusters.first()); + let res = op!([ctx] tier_list { - region_ids: vec![Uuid::new_v4().into()], + region_ids: cluster.datacenter_ids.clone(), }) .await?; diff --git a/svc/api/identity/src/route/activities.rs b/svc/api/identity/src/route/activities.rs index 20f2eaef86..3e31e1b58d 100644 --- a/svc/api/identity/src/route/activities.rs +++ b/svc/api/identity/src/route/activities.rs @@ -142,7 +142,7 @@ pub async fn activities( } }, fetch::identity::users(ctx.op_ctx(), user_ids.clone()), - fetch::identity::presence_data(ctx.op_ctx(), current_user_id, user_ids, true), + fetch::identity::presence_data(ctx.op_ctx(), user_ids, true), fetch_recent_games(ctx.op_ctx(), current_user_id, &game_user), fetch_suggested_groups(ctx.op_ctx(), current_user_id), )?; diff --git a/svc/api/matchmaker/src/route/lobbies.rs b/svc/api/matchmaker/src/route/lobbies.rs index 32b7d18fcb..4481a40852 100644 --- a/svc/api/matchmaker/src/route/lobbies.rs +++ b/svc/api/matchmaker/src/route/lobbies.rs @@ -510,7 +510,7 @@ pub async fn list( .regions .iter() .map(|(region, recommend)| utils::build_region_openapi(region, recommend.as_ref())) - .collect(); + .collect::>>()?; let game_modes = meta .lobby_groups @@ -662,8 +662,10 @@ async fn fetch_lobby_list_meta( if let Some((lat, long)) = coords { let res = op!([ctx] region_recommend { region_ids: region_ids_proto.clone(), - latitude: Some(lat), - longitude: Some(long), + coords: Some(backend::net::Coordinates { + latitude: lat, + longitude: long, + }), ..Default::default() }) .await?; @@ -1218,9 +1220,11 @@ async fn resolve_region_ids( // Auto-select the closest region if let Some((lat, long)) = coords { let recommend_res = op!([ctx] region_recommend { - latitude: Some(lat), - longitude: Some(long), region_ids: enabled_region_ids, + coords: Some(backend::net::Coordinates { + latitude: lat, + longitude: long, + }), ..Default::default() }) .await?; diff --git a/svc/api/matchmaker/src/route/regions.rs b/svc/api/matchmaker/src/route/regions.rs index 5e8b16384a..c2dc3d3b29 100644 --- a/svc/api/matchmaker/src/route/regions.rs +++ b/svc/api/matchmaker/src/route/regions.rs @@ -1,4 +1,5 @@ use api_helper::{anchor::WatchIndexQuery, ctx::Ctx}; +use proto::backend; use rivet_matchmaker_server::models; use rivet_operation::prelude::*; use std::collections::HashSet; @@ -69,8 +70,10 @@ pub async fn list( if let Some((lat, long)) = coords { let res = op!([ctx] region_recommend { region_ids: enabled_region_ids.clone(), - latitude: Some(lat), - longitude: Some(long), + coords: Some(backend::net::Coordinates { + latitude: lat, + longitude: long, + }), ..Default::default() }) .await?; @@ -94,7 +97,7 @@ pub async fn list( None }; - Ok(utils::build_region(region, recommend)) + utils::build_region(region, recommend) }) .collect::>>()?; diff --git a/svc/api/matchmaker/src/utils.rs b/svc/api/matchmaker/src/utils.rs index f1baa4c0d5..14fe7a97d1 100644 --- a/svc/api/matchmaker/src/utils.rs +++ b/svc/api/matchmaker/src/utils.rs @@ -5,14 +5,16 @@ use rivet_operation::prelude::*; pub fn build_region( region: &backend::region::Region, recommend: Option<®ion::recommend::response::Region>, -) -> models::RegionInfo { - models::RegionInfo { +) -> GlobalResult { + let coords = unwrap_ref!(region.coords); + + Ok(models::RegionInfo { region_id: region.name_id.clone(), provider_display_name: region.provider_display_name.clone(), region_display_name: region.region_display_name.clone(), datacenter_coord: models::Coord { - latitude: region.latitude, - longitude: region.longitude, + latitude: coords.latitude, + longitude: coords.longitude, }, datacenter_distance_from_client: if let Some(recommend) = recommend { models::Distance { @@ -25,20 +27,22 @@ pub fn build_region( miles: 0.0, } }, - } + }) } pub fn build_region_openapi( region: &backend::region::Region, recommend: Option<®ion::recommend::response::Region>, -) -> rivet_api::models::MatchmakerRegionInfo { - rivet_api::models::MatchmakerRegionInfo { +) -> GlobalResult { + let coords = unwrap_ref!(region.coords); + + Ok(rivet_api::models::MatchmakerRegionInfo { region_id: region.name_id.clone(), provider_display_name: region.provider_display_name.clone(), region_display_name: region.region_display_name.clone(), datacenter_coord: Box::new(rivet_api::models::GeoCoord { - latitude: region.latitude, - longitude: region.longitude, + latitude: coords.latitude, + longitude: coords.longitude, }), datacenter_distance_from_client: Box::new(if let Some(recommend) = recommend { rivet_api::models::GeoDistance { @@ -51,5 +55,5 @@ pub fn build_region_openapi( miles: 0.0, } }), - } + }) } diff --git a/svc/api/matchmaker/tests/common.rs b/svc/api/matchmaker/tests/common.rs index f05f842925..30f42ef03f 100644 --- a/svc/api/matchmaker/tests/common.rs +++ b/svc/api/matchmaker/tests/common.rs @@ -439,8 +439,8 @@ pub async fn assert_lobby_state( { let p = ports.get("test-5051-tcp").unwrap(); assert!( - p.port.unwrap() >= util_job::consts::MIN_INGRESS_PORT_TCP as i32 - && p.port.unwrap() <= util_job::consts::MAX_INGRESS_PORT_TCP as i32 + p.port.unwrap() >= util::net::job::MIN_INGRESS_PORT_TCP as i32 + && p.port.unwrap() <= util::net::job::MAX_INGRESS_PORT_TCP as i32 ); assert!(!p.is_tls); } @@ -448,8 +448,8 @@ pub async fn assert_lobby_state( { let p = ports.get("test-5051-tls").unwrap(); assert!( - p.port.unwrap() >= util_job::consts::MIN_INGRESS_PORT_TCP as i32 - && p.port.unwrap() <= util_job::consts::MAX_INGRESS_PORT_TCP as i32 + p.port.unwrap() >= util::net::job::MIN_INGRESS_PORT_TCP as i32 + && p.port.unwrap() <= util::net::job::MAX_INGRESS_PORT_TCP as i32 ); assert!(p.is_tls); } @@ -457,8 +457,8 @@ pub async fn assert_lobby_state( { let p = ports.get("test-5052-udp").unwrap(); assert!( - p.port.unwrap() >= util_job::consts::MIN_INGRESS_PORT_UDP as i32 - && p.port.unwrap() <= util_job::consts::MAX_INGRESS_PORT_UDP as i32 + p.port.unwrap() >= util::net::job::MIN_INGRESS_PORT_UDP as i32 + && p.port.unwrap() <= util::net::job::MAX_INGRESS_PORT_UDP as i32 ); assert!(!p.is_tls); } @@ -605,8 +605,8 @@ pub async fn assert_lobby_state( // { // let p = ports.get("test-5051-tcp").unwrap(); // assert!( -// p.port().unwrap() >= util_job::consts::MIN_INGRESS_PORT_TCP as i32 -// && p.port().unwrap() <= util_job::consts::MAX_INGRESS_PORT_TCP as i32 +// p.port().unwrap() >= util::net::job::MIN_INGRESS_PORT_TCP as i32 +// && p.port().unwrap() <= util::net::job::MAX_INGRESS_PORT_TCP as i32 // ); // assert!(!p.is_tls().unwrap()); // } @@ -614,8 +614,8 @@ pub async fn assert_lobby_state( // { // let p = ports.get("test-5051-tls").unwrap(); // assert!( -// p.port().unwrap() >= util_job::consts::MIN_INGRESS_PORT_TCP as i32 -// && p.port().unwrap() <= util_job::consts::MAX_INGRESS_PORT_TCP as i32 +// p.port().unwrap() >= util::net::job::MIN_INGRESS_PORT_TCP as i32 +// && p.port().unwrap() <= util::net::job::MAX_INGRESS_PORT_TCP as i32 // ); // assert!(p.is_tls().unwrap()); // } @@ -623,8 +623,8 @@ pub async fn assert_lobby_state( // { // let p = ports.get("test-5052-udp").unwrap(); // assert!( -// p.port().unwrap() >= util_job::consts::MIN_INGRESS_PORT_UDP as i32 -// && p.port().unwrap() <= util_job::consts::MAX_INGRESS_PORT_UDP as i32 +// p.port().unwrap() >= util::net::job::MIN_INGRESS_PORT_UDP as i32 +// && p.port().unwrap() <= util::net::job::MAX_INGRESS_PORT_UDP as i32 // ); // assert!(!p.is_tls().unwrap()); // } diff --git a/svc/api/monolith/Cargo.toml b/svc/api/monolith/Cargo.toml index 40a107ecf3..b7ff224107 100644 --- a/svc/api/monolith/Cargo.toml +++ b/svc/api/monolith/Cargo.toml @@ -32,4 +32,5 @@ api-kv = { path = "../kv" } api-matchmaker = { path = "../matchmaker" } api-module = { path = "../module" } api-portal = { path = "../portal" } +api-provision = { path = "../provision" } api-status = { path = "../status" } diff --git a/svc/api/monolith/Service.toml b/svc/api/monolith/Service.toml index cf956340c1..9a66673aeb 100644 --- a/svc/api/monolith/Service.toml +++ b/svc/api/monolith/Service.toml @@ -73,6 +73,12 @@ path = "/v1" subdomain = "portal.api" add-path = "/portal" +[[api.router.mounts]] +deprecated = true +path = "/v1" +subdomain = "provision.api" +add-path = "/provision" + [[api.router.mounts]] deprecated = true path = "/v1" diff --git a/svc/api/monolith/src/route/mod.rs b/svc/api/monolith/src/route/mod.rs index 175fcd815a..5c74c4d930 100644 --- a/svc/api/monolith/src/route/mod.rs +++ b/svc/api/monolith/src/route/mod.rs @@ -62,6 +62,10 @@ define_router! { path: api_portal::route::Router, prefix: "portal" }, + { + path: api_provision::route::Router, + prefix: "provision" + }, { path: api_status::route::Router, prefix: "status" diff --git a/svc/api/provision/Cargo.toml b/svc/api/provision/Cargo.toml new file mode 100644 index 0000000000..e5b0f5ad7d --- /dev/null +++ b/svc/api/provision/Cargo.toml @@ -0,0 +1,36 @@ +[package] +name = "api-provision" +version = "0.0.1" +authors = ["Rivet Gaming, LLC "] +edition = "2018" +license = "Apache-2.0" + +[dependencies] +api-helper = { path = "../../../lib/api-helper/build" } +async-trait = "0.1" +chirp-client = { path = "../../../lib/chirp/client" } +rivet-operation = { path = "../../../lib/operation/core" } +chrono = "0.4" +http = "0.2" +hyper = { version = "0.14", features = ["server", "http1", "stream", "tcp"] } +lazy_static = "1.4" +prost = "0.10" +rivet-api = { path = "../../../sdks/full/rust" } +rivet-cache = { path = "../../../lib/cache/build" } +rivet-claims = { path = "../../../lib/claims" } +rivet-health-checks = { path = "../../../lib/health-checks" } +rivet-pools = { path = "../../../lib/pools" } +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" +thiserror = "1.0" +tokio = { version = "1.29" } +tracing = "0.1" +tracing-subscriber = { version = "0.3", default-features = false, features = ["fmt", "json", "ansi"] } +url = "2.2.2" +uuid = { version = "1", features = ["v4"] } +util-cluster = { package = "rivet-util-cluster", path = "../../pkg/cluster/util" } + +cluster-datacenter-get = { path = "../../pkg/cluster/ops/datacenter-get" } +cluster-server-get = { path = "../../pkg/cluster/ops/server-get" } +cluster-server-resolve-for-ip = { path = "../../pkg/cluster/ops/server-resolve-for-ip" } + diff --git a/svc/api/provision/Service.toml b/svc/api/provision/Service.toml new file mode 100644 index 0000000000..e57c40a9ef --- /dev/null +++ b/svc/api/provision/Service.toml @@ -0,0 +1,8 @@ +[service] +name = "api-provision" +essential = true + +[runtime] +kind = "rust" + +[api-routes] diff --git a/svc/api/provision/src/auth.rs b/svc/api/provision/src/auth.rs new file mode 100644 index 0000000000..fc3d2aa971 --- /dev/null +++ b/svc/api/provision/src/auth.rs @@ -0,0 +1,46 @@ +use api_helper::{ + auth::{ApiAuth, AuthRateLimitCtx}, + util::as_auth_expired, +}; +use proto::claims::Claims; +use rivet_claims::ClaimsDecode; +use rivet_operation::prelude::*; + +/// Information derived from the authentication middleware. +pub struct Auth { + claims: Option, +} + +#[async_trait] +impl ApiAuth for Auth { + async fn new( + api_token: Option, + rate_limit_ctx: AuthRateLimitCtx<'_>, + ) -> GlobalResult { + Self::rate_limit(rate_limit_ctx).await?; + + Ok(Auth { + claims: if let Some(api_token) = api_token { + Some(as_auth_expired(rivet_claims::decode(&api_token)?)?) + } else { + None + }, + }) + } + + async fn rate_limit(_rate_limit_ctx: AuthRateLimitCtx<'_>) -> GlobalResult<()> { + Ok(()) + } +} + +impl Auth { + pub fn claims(&self) -> GlobalResult<&Claims> { + self.claims + .as_ref() + .ok_or_else(|| err_code!(API_UNAUTHORIZED, reason = "No bearer token provided.")) + } + + pub fn server(&self) -> GlobalResult { + self.claims()?.as_server() + } +} diff --git a/svc/api/provision/src/lib.rs b/svc/api/provision/src/lib.rs new file mode 100644 index 0000000000..eeaeaaafb7 --- /dev/null +++ b/svc/api/provision/src/lib.rs @@ -0,0 +1,2 @@ +pub mod auth; +pub mod route; diff --git a/svc/api/provision/src/main.rs b/svc/api/provision/src/main.rs new file mode 100644 index 0000000000..4e48ceab13 --- /dev/null +++ b/svc/api/provision/src/main.rs @@ -0,0 +1,5 @@ +use api_helper::start; + +fn main() { + start(api_provision::route::handle); +} diff --git a/svc/api/provision/src/route/mod.rs b/svc/api/provision/src/route/mod.rs new file mode 100644 index 0000000000..f83bc10c04 --- /dev/null +++ b/svc/api/provision/src/route/mod.rs @@ -0,0 +1,29 @@ +use std::net::Ipv4Addr; + +use api_helper::define_router; +use hyper::{Body, Request, Response}; + +pub mod servers; + +pub async fn handle( + shared_client: chirp_client::SharedClientHandle, + pools: rivet_pools::Pools, + cache: rivet_cache::Cache, + ray_id: uuid::Uuid, + request: Request, +) -> Result, http::Error> { + let response = Response::builder(); + + // Handle route + Router::handle(shared_client, pools, cache, ray_id, request, response).await +} + +define_router! { + routes: { + "servers" / Ipv4Addr / "info": { + GET: servers::info( + internal_endpoint: true, + ), + }, + }, +} diff --git a/svc/api/provision/src/route/servers.rs b/svc/api/provision/src/route/servers.rs new file mode 100644 index 0000000000..b6047e7270 --- /dev/null +++ b/svc/api/provision/src/route/servers.rs @@ -0,0 +1,55 @@ +use std::net::Ipv4Addr; + +use api_helper::{anchor::WatchIndexQuery, ctx::Ctx}; +use proto::backend; +use rivet_api::models; +use rivet_operation::prelude::*; + +use crate::auth::Auth; + +// MARK: GET /servers/{}/info +pub async fn info( + ctx: Ctx, + public_ip: Ipv4Addr, + _watch_index: WatchIndexQuery, +) -> GlobalResult { + ctx.auth().server()?; + + // Find server based on public ip + let servers_res = op!([ctx] cluster_server_resolve_for_ip { + ips: vec![public_ip.to_string()], + }) + .await?; + let server = unwrap!(servers_res.servers.first(), "server not found"); + let server_id = unwrap!(server.server_id); + + // Get server info + let server_res = op!([ctx] cluster_server_get { + server_ids: vec![server_id], + }) + .await?; + let server = unwrap!(server_res.servers.first(), "server not found"); + + // Get datacenter info + let datacenter_id = unwrap!(server.datacenter_id); + let datacenter_res = op!([ctx] cluster_datacenter_get { + datacenter_ids: vec![datacenter_id], + }) + .await?; + let datacenter = unwrap!(datacenter_res.datacenters.first()); + + let pool_type = unwrap!(backend::cluster::PoolType::from_i32(server.pool_type)); + let name = util_cluster::server_name( + &datacenter.provider_datacenter_id, + pool_type, + server_id.as_uuid(), + ); + + Ok(models::ProvisionServersGetServerInfoResponse { + name, + server_id: server_id.as_uuid(), + datacenter_id: datacenter_id.as_uuid(), + cluster_id: unwrap_ref!(server.cluster_id).as_uuid(), + vlan_ip: unwrap_ref!(server.vlan_ip, "server should have vlan ip by now").clone(), + }) +} diff --git a/svc/api/provision/tests/basic.rs b/svc/api/provision/tests/basic.rs new file mode 100644 index 0000000000..6c8ea4d0f2 --- /dev/null +++ b/svc/api/provision/tests/basic.rs @@ -0,0 +1 @@ +// TODO: diff --git a/svc/api/route/Cargo.toml b/svc/api/route/Cargo.toml index 17938e933f..cd1220f9f1 100644 --- a/svc/api/route/Cargo.toml +++ b/svc/api/route/Cargo.toml @@ -26,7 +26,6 @@ serde = { version = "1.0", features = ["derive"] } serde_json = "1.0" thiserror = "1.0" tokio = { version = "1.29" } -toml = "0.5" tracing = "0.1" tracing-subscriber = { version = "0.3", default-features = false, features = [ "fmt", @@ -38,8 +37,6 @@ util-cdn = { package = "rivet-util-cdn", path = "../../pkg/cdn/util" } util-job = { package = "rivet-util-job", path = "../../pkg/job/util" } uuid = { version = "1", features = ["v4"] } -region-resolve = { path = "../../pkg/region/ops/resolve" } - [dev-dependencies] rivet-connection = { path = "../../../lib/connection" } rivet-route = { path = "../../../lib/smithy-output/api-route/rust" } diff --git a/svc/api/route/src/route/traefik/game_guard.rs b/svc/api/route/src/route/traefik/game_guard.rs index e3b38ef905..e94bb79264 100644 --- a/svc/api/route/src/route/traefik/game_guard.rs +++ b/svc/api/route/src/route/traefik/game_guard.rs @@ -15,14 +15,14 @@ use crate::{auth::Auth, route::traefik}; #[serde(deny_unknown_fields)] pub struct ConfigQuery { token: String, - region: String, + datacenter: Uuid, } #[tracing::instrument(skip(ctx))] pub async fn config( ctx: Ctx, _watch_index: WatchIndexQuery, - ConfigQuery { token, region }: ConfigQuery, + ConfigQuery { token, datacenter }: ConfigQuery, ) -> GlobalResult { ensure_eq_with!( token, @@ -32,7 +32,7 @@ pub async fn config( ); // Fetch configs and catch any errors - let config = build_job(&ctx, ®ion).await?; + let config = build_job(&ctx, datacenter).await?; // tracing::info!( // http_services = ?config.http.services.len(), @@ -58,20 +58,12 @@ pub async fn config( #[tracing::instrument(skip(ctx))] pub async fn build_job( ctx: &Ctx, - region: &str, + region_id: Uuid, ) -> GlobalResult { let mut config = traefik::TraefikConfigResponse::default(); - // TODO: Cache this - // Determine the region from the query - let region_resolve_res = op!([ctx] region_resolve { - name_ids: vec![region.to_string()], - }) - .await?; - let region_id = unwrap_ref!(unwrap_ref!(region_resolve_res.regions.first()).region_id); - let redis_job = ctx.op_ctx().redis_job().await?; - let job_runs_fetch = fetch_job_runs(redis_job, region_id.as_uuid()).await?; + let job_runs_fetch = fetch_job_runs(redis_job, region_id).await?; config.http.middlewares.insert( "job-rate-limit".to_owned(), diff --git a/svc/pkg/cluster/db/cluster/Service.toml b/svc/pkg/cluster/db/cluster/Service.toml new file mode 100644 index 0000000000..ebd618b178 --- /dev/null +++ b/svc/pkg/cluster/db/cluster/Service.toml @@ -0,0 +1,7 @@ +[service] +name = "db-cluster" + +[runtime] +kind = "crdb" + +[database] diff --git a/svc/pkg/cluster/db/cluster/migrations/20231201000927_init.down.sql b/svc/pkg/cluster/db/cluster/migrations/20231201000927_init.down.sql new file mode 100644 index 0000000000..e69de29bb2 diff --git a/svc/pkg/cluster/db/cluster/migrations/20231201000927_init.up.sql b/svc/pkg/cluster/db/cluster/migrations/20231201000927_init.up.sql new file mode 100644 index 0000000000..e6394d5b2a --- /dev/null +++ b/svc/pkg/cluster/db/cluster/migrations/20231201000927_init.up.sql @@ -0,0 +1,85 @@ +CREATE TABLE clusters ( + cluster_id UUID PRIMARY KEY, + name_id TEXT NOT NULL, + owner_team_id UUID, + create_ts INT NOT NULL +); + +CREATE TABLE datacenters ( + datacenter_id UUID PRIMARY KEY, + cluster_id UUID NOT NULL REFERENCES clusters (cluster_id), + name_id TEXT NOT NULL, + display_name TEXT NOT NULL, + provider INT NOT NULL, + provider_datacenter_id TEXT NOT NULL, + provider_api_token TEXT, + pools BYTES NOT NULL, + build_delivery_method INT NOT NULL, + drain_timeout INT NOT NULL, + + UNIQUE (cluster_id, name_id), + INDEX (cluster_id) +); + +CREATE TABLE servers ( + server_id UUID PRIMARY KEY, + datacenter_id UUID NOT NULL, + cluster_id UUID NOT NULL REFERENCES clusters (cluster_id), + pool_type INT NOT NULL, + + -- Null until actual server is provisioned + provider_server_id TEXT, + provider_hardware TEXT, + vlan_ip TEXT, + network_idx INT, + public_ip TEXT, + + -- Null until nomad node successfully registers + nomad_node_id TEXT, + + create_ts INT NOT NULL, + nomad_join_ts INT, + -- Null if not draining + drain_ts INT, + -- When the server was marked to be deleted by rivet + cloud_destroy_ts INT, + taint_ts INT, + + -- Used when determining which server this ip belongs to + INDEX (public_ip) +); + +-- Stores data for destroying linode resources +CREATE TABLE linode_misc ( + server_id UUID PRIMARY KEY REFERENCES servers (server_id), + ssh_key_id INT NOT NULL, + linode_id INT, + firewall_id INT +); + +-- Stores data for destroying cloudflare resources +CREATE TABLE cloudflare_misc ( + server_id UUID PRIMARY KEY REFERENCES servers (server_id), + dns_record_id TEXT NOT NULL, + secondary_dns_record_id TEXT +); + +CREATE TABLE server_images ( + -- A string denoting what type of image this is (ex. "linode-us-southeast-job") + variant TEXT PRIMARY KEY, + create_ts INT NOT NULL, + image_id TEXT +); + +CREATE TABLE server_images_linode_misc ( + variant TEXT PRIMARY KEY, + ssh_key_id INT NOT NULL, + linode_id INT, + firewall_id INT, + disk_id INT, + public_ip TEXT, + image_id TEXT, + + INDEX (public_ip), + INDEX (image_id) +); diff --git a/svc/pkg/cluster/ops/datacenter-get/Cargo.toml b/svc/pkg/cluster/ops/datacenter-get/Cargo.toml new file mode 100644 index 0000000000..aa7d3bbbb8 --- /dev/null +++ b/svc/pkg/cluster/ops/datacenter-get/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "cluster-datacenter-get" +version = "0.0.1" +edition = "2018" +authors = ["Rivet Gaming, LLC "] +license = "Apache-2.0" + +[dependencies] +chirp-client = { path = "../../../../../lib/chirp/client" } +prost = "0.10" +rivet-operation = { path = "../../../../../lib/operation/core" } + +[dependencies.sqlx] +version = "0.7" +default-features = false + +[dev-dependencies] +chirp-worker = { path = "../../../../../lib/chirp/worker" } diff --git a/svc/pkg/cluster/ops/datacenter-get/Service.toml b/svc/pkg/cluster/ops/datacenter-get/Service.toml new file mode 100644 index 0000000000..a0f9d3cb55 --- /dev/null +++ b/svc/pkg/cluster/ops/datacenter-get/Service.toml @@ -0,0 +1,10 @@ +[service] +name = "cluster-datacenter-get" + +[runtime] +kind = "rust" + +[operation] + +[databases] +db-cluster = {} diff --git a/svc/pkg/cluster/ops/datacenter-get/src/lib.rs b/svc/pkg/cluster/ops/datacenter-get/src/lib.rs new file mode 100644 index 0000000000..cc414987e0 --- /dev/null +++ b/svc/pkg/cluster/ops/datacenter-get/src/lib.rs @@ -0,0 +1,76 @@ +use std::convert::{TryFrom, TryInto}; + +use proto::backend::{self, pkg::*}; +use rivet_operation::prelude::*; + +#[derive(sqlx::FromRow)] +struct Datacenter { + datacenter_id: Uuid, + cluster_id: Uuid, + name_id: String, + display_name: String, + provider: i64, + provider_datacenter_id: String, + pools: Vec, + build_delivery_method: i64, + drain_timeout: i64, +} + +impl TryFrom for backend::cluster::Datacenter { + type Error = GlobalError; + + fn try_from(value: Datacenter) -> GlobalResult { + let pools = cluster::msg::datacenter_create::Pools::decode(value.pools.as_slice())?.pools; + + Ok(backend::cluster::Datacenter { + datacenter_id: Some(value.datacenter_id.into()), + cluster_id: Some(value.cluster_id.into()), + name_id: value.name_id, + display_name: value.display_name, + provider: value.provider as i32, + provider_datacenter_id: value.provider_datacenter_id, + pools, + build_delivery_method: value.build_delivery_method as i32, + drain_timeout: value.drain_timeout as u64, + }) + } +} + +#[operation(name = "cluster-datacenter-get")] +pub async fn handle( + ctx: OperationContext, +) -> GlobalResult { + let datacenter_ids = ctx + .datacenter_ids + .iter() + .map(common::Uuid::as_uuid) + .collect::>(); + + let configs = sql_fetch_all!( + [ctx, Datacenter] + " + SELECT + datacenter_id, + cluster_id, + name_id, + display_name, + provider, + provider_datacenter_id, + provider_api_token, + pools, + build_delivery_method, + drain_timeout + FROM db_cluster.datacenters + WHERE datacenter_id = ANY($1) + ", + datacenter_ids, + ) + .await?; + + Ok(cluster::datacenter_get::Response { + datacenters: configs + .into_iter() + .map(TryInto::try_into) + .collect::>>()?, + }) +} diff --git a/svc/pkg/cluster/ops/datacenter-get/tests/integration.rs b/svc/pkg/cluster/ops/datacenter-get/tests/integration.rs new file mode 100644 index 0000000000..00d49be6a0 --- /dev/null +++ b/svc/pkg/cluster/ops/datacenter-get/tests/integration.rs @@ -0,0 +1,46 @@ +use chirp_worker::prelude::*; +use proto::backend::{self, pkg::*}; + +#[worker_test] +async fn empty(ctx: TestCtx) { + let datacenter_id = Uuid::new_v4(); + let cluster_id = Uuid::new_v4(); + + msg!([ctx] cluster::msg::create(cluster_id) -> cluster::msg::create_complete { + cluster_id: Some(cluster_id.into()), + name_id: util::faker::ident(), + owner_team_id: None, + }) + .await + .unwrap(); + + let dc = backend::cluster::Datacenter { + datacenter_id: Some(datacenter_id.into()), + cluster_id: Some(cluster_id.into()), + name_id: util::faker::ident(), + display_name: util::faker::ident(), + + provider: backend::cluster::Provider::Linode as i32, + provider_datacenter_id: "us-southeast".to_string(), + + pools: Vec::new(), + + build_delivery_method: backend::cluster::BuildDeliveryMethod::TrafficServer as i32, + drain_timeout: 0, + }; + + msg!([ctx] cluster::msg::datacenter_create(datacenter_id) -> cluster::msg::datacenter_scale { + config: Some(dc.clone()), + }) + .await + .unwrap(); + + let res = op!([ctx] cluster_datacenter_get { + datacenter_ids: vec![datacenter_id.into()], + }) + .await + .unwrap(); + let datacenter = res.datacenters.first().expect("datacenter not found"); + + assert_eq!(datacenter_id, datacenter.datacenter_id.unwrap().as_uuid()); +} diff --git a/svc/pkg/cluster/ops/datacenter-list/Cargo.toml b/svc/pkg/cluster/ops/datacenter-list/Cargo.toml new file mode 100644 index 0000000000..544a88793d --- /dev/null +++ b/svc/pkg/cluster/ops/datacenter-list/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "cluster-datacenter-list" +version = "0.0.1" +edition = "2018" +authors = ["Rivet Gaming, LLC "] +license = "Apache-2.0" + +[dependencies] +chirp-client = { path = "../../../../../lib/chirp/client" } +prost = "0.10" +rivet-operation = { path = "../../../../../lib/operation/core" } + +[dependencies.sqlx] +version = "0.7" +default-features = false + +[dev-dependencies] +chirp-worker = { path = "../../../../../lib/chirp/worker" } diff --git a/svc/pkg/cluster/ops/datacenter-list/Service.toml b/svc/pkg/cluster/ops/datacenter-list/Service.toml new file mode 100644 index 0000000000..ebad6361d3 --- /dev/null +++ b/svc/pkg/cluster/ops/datacenter-list/Service.toml @@ -0,0 +1,10 @@ +[service] +name = "cluster-datacenter-list" + +[runtime] +kind = "rust" + +[operation] + +[databases] +db-cluster = {} diff --git a/svc/pkg/cluster/ops/datacenter-list/src/lib.rs b/svc/pkg/cluster/ops/datacenter-list/src/lib.rs new file mode 100644 index 0000000000..674e76562c --- /dev/null +++ b/svc/pkg/cluster/ops/datacenter-list/src/lib.rs @@ -0,0 +1,62 @@ +use std::collections::HashMap; + +use proto::backend::pkg::*; +use rivet_operation::prelude::*; + +#[derive(sqlx::FromRow)] +struct Datacenter { + cluster_id: Uuid, + datacenter_id: Uuid, +} + +#[operation(name = "cluster-datacenter-list")] +pub async fn handle( + ctx: OperationContext, +) -> GlobalResult { + let cluster_ids = ctx + .cluster_ids + .iter() + .map(common::Uuid::as_uuid) + .collect::>(); + + let datacenters = sql_fetch_all!( + [ctx, Datacenter] + " + SELECT + cluster_id, + datacenter_id + FROM db_cluster.datacenters + WHERE cluster_id = ANY($1) + ", + &cluster_ids + ) + .await?; + + // Fill in empty clusters + let mut dcs_by_cluster_id = cluster_ids + .iter() + .map(|cluster_id| (*cluster_id, Vec::new())) + .collect::>>(); + + for dc in datacenters { + dcs_by_cluster_id + .entry(dc.cluster_id) + .or_default() + .push(dc.datacenter_id); + } + + Ok(cluster::datacenter_list::Response { + clusters: dcs_by_cluster_id + .into_iter() + .map( + |(cluster_id, datacenter_ids)| cluster::datacenter_list::response::Cluster { + cluster_id: Some(cluster_id.into()), + datacenter_ids: datacenter_ids + .into_iter() + .map(Into::into) + .collect::>(), + }, + ) + .collect::>(), + }) +} diff --git a/svc/pkg/cluster/ops/datacenter-list/tests/integration.rs b/svc/pkg/cluster/ops/datacenter-list/tests/integration.rs new file mode 100644 index 0000000000..54c447e182 --- /dev/null +++ b/svc/pkg/cluster/ops/datacenter-list/tests/integration.rs @@ -0,0 +1,50 @@ +use chirp_worker::prelude::*; +use proto::backend::{self, pkg::*}; + +#[worker_test] +async fn empty(ctx: TestCtx) { + let datacenter_id = Uuid::new_v4(); + let cluster_id = Uuid::new_v4(); + + msg!([ctx] cluster::msg::create(cluster_id) -> cluster::msg::create_complete { + cluster_id: Some(cluster_id.into()), + name_id: util::faker::ident(), + owner_team_id: None, + }) + .await + .unwrap(); + + let dc = backend::cluster::Datacenter { + datacenter_id: Some(datacenter_id.into()), + cluster_id: Some(cluster_id.into()), + name_id: util::faker::ident(), + display_name: util::faker::ident(), + + provider: backend::cluster::Provider::Linode as i32, + provider_datacenter_id: "us-southeast".to_string(), + + pools: Vec::new(), + + build_delivery_method: backend::cluster::BuildDeliveryMethod::TrafficServer as i32, + drain_timeout: 0, + }; + + msg!([ctx] cluster::msg::datacenter_create(datacenter_id) -> cluster::msg::datacenter_scale { + config: Some(dc.clone()), + }) + .await + .unwrap(); + + let res = op!([ctx] cluster_datacenter_list { + cluster_ids: vec![cluster_id.into()], + }) + .await + .unwrap(); + let cluster = res.clusters.first().unwrap(); + + assert_eq!(1, cluster.datacenter_ids.len()); + assert_eq!( + datacenter_id, + cluster.datacenter_ids.first().unwrap().as_uuid(), + ); +} diff --git a/svc/pkg/region/ops/config-get/Cargo.toml b/svc/pkg/cluster/ops/datacenter-location-get/Cargo.toml similarity index 82% rename from svc/pkg/region/ops/config-get/Cargo.toml rename to svc/pkg/cluster/ops/datacenter-location-get/Cargo.toml index b899bce41f..3a307b2550 100644 --- a/svc/pkg/region/ops/config-get/Cargo.toml +++ b/svc/pkg/cluster/ops/datacenter-location-get/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "region-config-get" +name = "cluster-datacenter-location-get" version = "0.0.1" edition = "2021" authors = ["Rivet Gaming, LLC "] @@ -8,7 +8,8 @@ license = "Apache-2.0" [dependencies] chirp-client = { path = "../../../../../lib/chirp/client" } rivet-operation = { path = "../../../../../lib/operation/core" } -serde = { version = "1.0", features = ["derive"] } + +ip-info = { path = "../../../ip/ops/info" } [dependencies.sqlx] version = "0.7" diff --git a/svc/pkg/region/ops/config-get/Service.toml b/svc/pkg/cluster/ops/datacenter-location-get/Service.toml similarity index 53% rename from svc/pkg/region/ops/config-get/Service.toml rename to svc/pkg/cluster/ops/datacenter-location-get/Service.toml index 26bf93c9d7..f6c3656b99 100644 --- a/svc/pkg/region/ops/config-get/Service.toml +++ b/svc/pkg/cluster/ops/datacenter-location-get/Service.toml @@ -1,5 +1,5 @@ [service] -name = "region-config-get" +name = "cluster-datacenter-location-get" [runtime] kind = "rust" diff --git a/svc/pkg/cluster/ops/datacenter-location-get/src/lib.rs b/svc/pkg/cluster/ops/datacenter-location-get/src/lib.rs new file mode 100644 index 0000000000..151a165710 --- /dev/null +++ b/svc/pkg/cluster/ops/datacenter-location-get/src/lib.rs @@ -0,0 +1,89 @@ +use std::collections::HashMap; + +use futures_util::{StreamExt, TryStreamExt}; +use proto::backend::{self, pkg::*}; +use rivet_operation::prelude::*; + +#[operation(name = "cluster-datacenter-location-get")] +pub async fn handle( + ctx: OperationContext, +) -> GlobalResult { + let datacenter_ids = ctx + .datacenter_ids + .iter() + .map(common::Uuid::as_uuid) + .collect::>(); + + // Fetch the gg node public ip for each datacenter (there may be more than one, hence `DISTINCT`) + let gg_node_rows = sql_fetch_all!( + [ctx, (Uuid, Option,)] + " + SELECT DISTINCT + datacenter_id, public_ip + FROM db_cluster.servers + WHERE + datacenter_id = ANY($1) AND + pool_type = $2 + ", + &datacenter_ids, + backend::cluster::PoolType::Gg as i64, + ) + .await? + .into_iter() + .filter_map(|(datacenter_id, public_ip)| public_ip.map(|ip| (datacenter_id, ip))); + + let coords_res = futures_util::stream::iter(gg_node_rows) + .map(|(datacenter_id, public_ip)| { + let ctx = ctx.base(); + + async move { + // Fetch IP info of GG node (this is cached inside `ip_info`) + let ip_info_res = op!([ctx] ip_info { + ip: public_ip, + provider: ip::info::Provider::IpInfoIo as i32, + }) + .await?; + + GlobalResult::Ok(( + datacenter_id, + ip_info_res + .ip_info + .as_ref() + .and_then(|info| info.coords.clone()), + )) + } + }) + .buffer_unordered(8) + .try_collect::>() + .await?; + + // Fill in default values + let mut datacenter_locations = datacenter_ids + .into_iter() + .map(|datacenter_id| { + ( + datacenter_id, + cluster::datacenter_location_get::response::Datacenter { + datacenter_id: Some(datacenter_id.into()), + coords: None, + }, + ) + }) + .collect::>(); + + // Insert coords + for (datacenter_id, coords) in coords_res { + let entry = datacenter_locations + .entry(datacenter_id) + .or_insert_with(|| cluster::datacenter_location_get::response::Datacenter { + datacenter_id: Some(datacenter_id.into()), + coords: None, + }); + + entry.coords = coords; + } + + Ok(cluster::datacenter_location_get::Response { + datacenters: datacenter_locations.into_values().collect::>(), + }) +} diff --git a/svc/pkg/region/ops/config-get/tests/integration.rs b/svc/pkg/cluster/ops/datacenter-location-get/tests/integration.rs similarity index 58% rename from svc/pkg/region/ops/config-get/tests/integration.rs rename to svc/pkg/cluster/ops/datacenter-location-get/tests/integration.rs index ef41c6dcbb..d7d641e21e 100644 --- a/svc/pkg/region/ops/config-get/tests/integration.rs +++ b/svc/pkg/cluster/ops/datacenter-location-get/tests/integration.rs @@ -2,5 +2,5 @@ use chirp_worker::prelude::*; #[worker_test] async fn basic(ctx: TestCtx) { - let _ = op!([ctx] region_config_get {}).await.unwrap(); + // TODO: } diff --git a/svc/pkg/cluster/ops/datacenter-resolve-for-name-id/Cargo.toml b/svc/pkg/cluster/ops/datacenter-resolve-for-name-id/Cargo.toml new file mode 100644 index 0000000000..6d2da13280 --- /dev/null +++ b/svc/pkg/cluster/ops/datacenter-resolve-for-name-id/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "cluster-datacenter-resolve-for-name-id" +version = "0.0.1" +edition = "2018" +authors = ["Rivet Gaming, LLC "] +license = "Apache-2.0" + +[dependencies] +chirp-client = { path = "../../../../../lib/chirp/client" } +prost = "0.10" +rivet-operation = { path = "../../../../../lib/operation/core" } + +[dependencies.sqlx] +version = "0.7" +default-features = false + +[dev-dependencies] +chirp-worker = { path = "../../../../../lib/chirp/worker" } diff --git a/svc/pkg/cluster/ops/datacenter-resolve-for-name-id/Service.toml b/svc/pkg/cluster/ops/datacenter-resolve-for-name-id/Service.toml new file mode 100644 index 0000000000..aa845fc9af --- /dev/null +++ b/svc/pkg/cluster/ops/datacenter-resolve-for-name-id/Service.toml @@ -0,0 +1,10 @@ +[service] +name = "cluster-datacenter-resolve-for-name-id" + +[runtime] +kind = "rust" + +[operation] + +[databases] +db-cluster = {} diff --git a/svc/pkg/cluster/ops/datacenter-resolve-for-name-id/src/lib.rs b/svc/pkg/cluster/ops/datacenter-resolve-for-name-id/src/lib.rs new file mode 100644 index 0000000000..95706c5148 --- /dev/null +++ b/svc/pkg/cluster/ops/datacenter-resolve-for-name-id/src/lib.rs @@ -0,0 +1,41 @@ +use proto::backend::{self, pkg::*}; +use rivet_operation::prelude::*; + +#[derive(sqlx::FromRow)] +struct Datacenter { + datacenter_id: Uuid, + name_id: String, +} + +#[operation(name = "cluster-datacenter-resolve-for-name-id")] +pub async fn handle( + ctx: OperationContext, +) -> GlobalResult { + let cluster_id = unwrap_ref!(ctx.cluster_id).as_uuid(); + + let datacenters = sql_fetch_all!( + [ctx, Datacenter] + " + SELECT + datacenter_id, + name_id + FROM db_cluster.datacenters + WHERE + cluster_id = $1 AND + name_id = ANY($2) + ", + &cluster_id, + &ctx.name_ids, + ) + .await? + .into_iter() + .map( + |dc| cluster::datacenter_resolve_for_name_id::response::Datacenter { + datacenter_id: Some(dc.datacenter_id.into()), + name_id: dc.name_id, + }, + ) + .collect::>(); + + Ok(cluster::datacenter_resolve_for_name_id::Response { datacenters }) +} diff --git a/svc/pkg/cluster/ops/datacenter-resolve-for-name-id/tests/integration.rs b/svc/pkg/cluster/ops/datacenter-resolve-for-name-id/tests/integration.rs new file mode 100644 index 0000000000..6b32a72b0c --- /dev/null +++ b/svc/pkg/cluster/ops/datacenter-resolve-for-name-id/tests/integration.rs @@ -0,0 +1,52 @@ +use chirp_worker::prelude::*; +use proto::backend::{self, pkg::*}; + +#[worker_test] +async fn empty(ctx: TestCtx) { + let datacenter_id = Uuid::new_v4(); + let cluster_id = Uuid::new_v4(); + let dc_name_id = util::faker::ident(); + + msg!([ctx] cluster::msg::create(cluster_id) -> cluster::msg::create_complete { + cluster_id: Some(cluster_id.into()), + name_id: util::faker::ident(), + owner_team_id: None, + }) + .await + .unwrap(); + + let dc = backend::cluster::Datacenter { + datacenter_id: Some(datacenter_id.into()), + cluster_id: Some(cluster_id.into()), + name_id: dc_name_id.clone(), + display_name: util::faker::ident(), + + provider: backend::cluster::Provider::Linode as i32, + provider_datacenter_id: "us-southeast".to_string(), + + pools: Vec::new(), + + build_delivery_method: backend::cluster::BuildDeliveryMethod::TrafficServer as i32, + drain_timeout: 0, + }; + + msg!([ctx] cluster::msg::datacenter_create(datacenter_id) -> cluster::msg::datacenter_scale { + config: Some(dc.clone()), + }) + .await + .unwrap(); + + let res = op!([ctx] cluster_datacenter_resolve_for_name_id { + cluster_id: Some(cluster_id.into()), + name_ids: vec![dc_name_id], + }) + .await + .unwrap(); + + let datacenter = res.datacenters.first().expect("datacenter not found"); + assert_eq!( + datacenter_id, + datacenter.datacenter_id.unwrap().as_uuid(), + "wrong datacenter returned" + ); +} diff --git a/svc/pkg/cluster/ops/datacenter-topology-get/Cargo.toml b/svc/pkg/cluster/ops/datacenter-topology-get/Cargo.toml new file mode 100644 index 0000000000..eeba6022b4 --- /dev/null +++ b/svc/pkg/cluster/ops/datacenter-topology-get/Cargo.toml @@ -0,0 +1,24 @@ +[package] +name = "cluster-datacenter-topology-get" +version = "0.0.1" +edition = "2018" +authors = ["Rivet Gaming, LLC "] +license = "Apache-2.0" + +[dependencies] +chirp-client = { path = "../../../../../lib/chirp/client" } +lazy_static = "1.4" +nomad-util = { path = "../../../../../lib/nomad-util" } +prost = "0.10" +rivet-operation = { path = "../../../../../lib/operation/core" } + +[dependencies.nomad_client] +git = "https://github.com/rivet-gg/nomad-client" +rev = "abb66bf0c30c7ff5b0c695dae952481c33e538b5" # pragma: allowlist secret + +[dependencies.sqlx] +version = "0.7" +default-features = false + +[dev-dependencies] +chirp-worker = { path = "../../../../../lib/chirp/worker" } diff --git a/svc/pkg/cluster/ops/datacenter-topology-get/README.md b/svc/pkg/cluster/ops/datacenter-topology-get/README.md new file mode 100644 index 0000000000..b24df20683 --- /dev/null +++ b/svc/pkg/cluster/ops/datacenter-topology-get/README.md @@ -0,0 +1,3 @@ +# datacenter-topology-get + +Fetch the nomad topology for all job servers in a datacenter diff --git a/svc/pkg/cluster/ops/datacenter-topology-get/Service.toml b/svc/pkg/cluster/ops/datacenter-topology-get/Service.toml new file mode 100644 index 0000000000..3c31348cfd --- /dev/null +++ b/svc/pkg/cluster/ops/datacenter-topology-get/Service.toml @@ -0,0 +1,10 @@ +[service] +name = "cluster-datacenter-topology-get" + +[runtime] +kind = "rust" + +[operation] + +[databases] +db-cluster = {} diff --git a/svc/pkg/cluster/ops/datacenter-topology-get/src/lib.rs b/svc/pkg/cluster/ops/datacenter-topology-get/src/lib.rs new file mode 100644 index 0000000000..e5d4bfb0ee --- /dev/null +++ b/svc/pkg/cluster/ops/datacenter-topology-get/src/lib.rs @@ -0,0 +1,173 @@ +use std::collections::HashMap; + +use nomad_client::apis::{allocations_api, configuration::Configuration, nodes_api}; +use proto::backend::pkg::*; +use rivet_operation::prelude::*; + +lazy_static::lazy_static! { + static ref NOMAD_CONFIG: Configuration = + nomad_util::new_config_from_env().unwrap(); +} + +#[derive(sqlx::FromRow)] +struct Server { + server_id: Uuid, + datacenter_id: Uuid, + nomad_node_id: String, +} + +#[operation(name = "cluster-datacenter-topology-get")] +pub async fn handle( + ctx: OperationContext, +) -> GlobalResult { + let datacenter_ids = ctx + .datacenter_ids + .iter() + .map(common::Uuid::as_uuid) + .collect::>(); + + let servers = sql_fetch_all!( + [ctx, Server] + " + SELECT + server_id, datacenter_id, nomad_node_id + FROM db_cluster.servers + WHERE + datacenter_id = ANY($1) AND + nomad_node_id IS NOT NULL AND + cloud_destroy_ts IS NULL AND + taint_ts IS NULL + ", + &datacenter_ids, + ) + .await?; + + // Fetch batch data from nomad + let (allocation_info, node_info) = tokio::try_join!( + async { + allocations_api::get_allocations( + &NOMAD_CONFIG, + None, + None, + None, + None, + None, + None, + None, + None, + None, + Some(true), + None, + ) + .await + .map_err(Into::::into) + }, + async { + nodes_api::get_nodes( + &NOMAD_CONFIG, + None, + None, + None, + None, + None, + None, + None, + None, + None, + Some(true), + ) + .await + .map_err(Into::::into) + }, + )?; + + // Fill in empty datacenters + let mut datacenters = datacenter_ids + .iter() + .map(|datacenter_id| { + ( + *datacenter_id, + cluster::datacenter_topology_get::response::Datacenter { + datacenter_id: Some((*datacenter_id).into()), + servers: Vec::new(), + }, + ) + }) + .collect::>(); + + for server in servers { + let mut usage = cluster::datacenter_topology_get::response::Stats { + cpu: 0, + memory: 0, + disk: 0, + }; + + // Aggregate all allocated resources for this node + for alloc in &allocation_info { + let alloc_node_id = unwrap_ref!(alloc.node_id); + + if alloc_node_id == &server.nomad_node_id { + let resources = unwrap_ref!(alloc.allocated_resources); + let shared_resources = unwrap_ref!(resources.shared); + + // Task states don't exist until a task starts + if let Some(task_states) = &alloc.task_states { + let tasks = unwrap_ref!(resources.tasks); + + for (task_name, task) in tasks { + let task_state = unwrap!(task_states.get(task_name)); + let state = unwrap_ref!(task_state.state); + + // Only count pending, running, or failed tasks + if state != "pending" && state != "running" && state != "failed" { + continue; + } + + let cpu = unwrap_ref!(task.cpu); + let memory = unwrap_ref!(task.memory); + + usage.cpu += unwrap!(cpu.cpu_shares) as u64; + usage.memory += unwrap!(memory.memory_mb) as u64; + } + } + + usage.disk += unwrap!(shared_resources.disk_mb) as u64; + } + } + + // Get node resource limits + let node = unwrap!( + node_info.iter().find(|node| node + .ID + .as_ref() + .map_or(false, |node_id| node_id == &server.nomad_node_id)), + format!("node not found {}", server.nomad_node_id) + ); + let resources = unwrap_ref!(node.node_resources); + let limits = cluster::datacenter_topology_get::response::Stats { + cpu: unwrap!(unwrap_ref!(resources.cpu).cpu_shares) as u64, + memory: unwrap!(unwrap_ref!(resources.memory).memory_mb) as u64, + disk: unwrap!(unwrap_ref!(resources.disk).disk_mb) as u64, + }; + + let datacenter = datacenters.entry(server.datacenter_id).or_insert_with(|| { + cluster::datacenter_topology_get::response::Datacenter { + datacenter_id: Some(server.datacenter_id.into()), + servers: Vec::new(), + } + }); + + datacenter + .servers + .push(cluster::datacenter_topology_get::response::Server { + server_id: Some(server.server_id.into()), + node_id: server.nomad_node_id, + usage: Some(usage), + limits: Some(limits), + }); + } + + Ok(cluster::datacenter_topology_get::Response { + datacenters: datacenters.into_values().collect::>(), + }) +} diff --git a/svc/pkg/cluster/ops/datacenter-topology-get/tests/integration.rs b/svc/pkg/cluster/ops/datacenter-topology-get/tests/integration.rs new file mode 100644 index 0000000000..c31c959da6 --- /dev/null +++ b/svc/pkg/cluster/ops/datacenter-topology-get/tests/integration.rs @@ -0,0 +1,10 @@ +use chirp_worker::prelude::*; + +#[worker_test] +async fn empty(ctx: TestCtx) { + op!([ctx] cluster_datacenter_topology_get { + datacenter_ids: vec![], + }) + .await + .unwrap(); +} diff --git a/svc/pkg/cluster/ops/get/Cargo.toml b/svc/pkg/cluster/ops/get/Cargo.toml new file mode 100644 index 0000000000..b7a9ccd9d2 --- /dev/null +++ b/svc/pkg/cluster/ops/get/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "cluster-get" +version = "0.0.1" +edition = "2018" +authors = ["Rivet Gaming, LLC "] +license = "Apache-2.0" + +[dependencies] +chirp-client = { path = "../../../../../lib/chirp/client" } +prost = "0.10" +rivet-operation = { path = "../../../../../lib/operation/core" } + +[dependencies.sqlx] +version = "0.7" +default-features = false + +[dev-dependencies] +chirp-worker = { path = "../../../../../lib/chirp/worker" } diff --git a/svc/pkg/cluster/ops/get/Service.toml b/svc/pkg/cluster/ops/get/Service.toml new file mode 100644 index 0000000000..06f53f69b5 --- /dev/null +++ b/svc/pkg/cluster/ops/get/Service.toml @@ -0,0 +1,10 @@ +[service] +name = "cluster-get" + +[runtime] +kind = "rust" + +[operation] + +[databases] +db-cluster = {} diff --git a/svc/pkg/cluster/ops/get/src/lib.rs b/svc/pkg/cluster/ops/get/src/lib.rs new file mode 100644 index 0000000000..e4892bc357 --- /dev/null +++ b/svc/pkg/cluster/ops/get/src/lib.rs @@ -0,0 +1,53 @@ +use proto::backend::{self, pkg::*}; +use rivet_operation::prelude::*; + +#[derive(sqlx::FromRow)] +struct Cluster { + cluster_id: Uuid, + name_id: String, + owner_team_id: Option, + create_ts: i64, +} + +impl From for backend::cluster::Cluster { + fn from(value: Cluster) -> Self { + backend::cluster::Cluster { + cluster_id: Some(value.cluster_id.into()), + name_id: value.name_id, + owner_team_id: value.owner_team_id.map(Into::into), + create_ts: value.create_ts, + } + } +} + +#[operation(name = "cluster-get")] +pub async fn handle( + ctx: OperationContext, +) -> GlobalResult { + let crdb = ctx.crdb().await?; + let cluster_ids = ctx + .cluster_ids + .iter() + .map(common::Uuid::as_uuid) + .collect::>(); + + let clusters = sql_fetch_all!( + [ctx, Cluster, &crdb] + " + SELECT + cluster_id, + name_id, + owner_team_id, + create_ts + FROM db_cluster.clusters + WHERE cluster_id = ANY($1) + ", + cluster_ids + ) + .await? + .into_iter() + .map(Into::into) + .collect::>(); + + Ok(cluster::get::Response { clusters }) +} diff --git a/svc/pkg/cluster/ops/get/tests/integration.rs b/svc/pkg/cluster/ops/get/tests/integration.rs new file mode 100644 index 0000000000..c273681e3a --- /dev/null +++ b/svc/pkg/cluster/ops/get/tests/integration.rs @@ -0,0 +1,24 @@ +use chirp_worker::prelude::*; +use proto::backend::pkg::*; + +#[worker_test] +async fn empty(ctx: TestCtx) { + let cluster_id = Uuid::new_v4(); + + msg!([ctx] cluster::msg::create(cluster_id) -> cluster::msg::create_complete { + cluster_id: Some(cluster_id.into()), + name_id: util::faker::ident(), + owner_team_id: None, + }) + .await + .unwrap(); + + let res = op!([ctx] cluster_get { + cluster_ids: vec![cluster_id.into()], + }) + .await + .unwrap(); + let cluster = res.clusters.first().expect("cluster not found"); + + assert_eq!(cluster_id, cluster.cluster_id.unwrap().as_uuid()); +} diff --git a/svc/pkg/cluster/ops/server-get/Cargo.toml b/svc/pkg/cluster/ops/server-get/Cargo.toml new file mode 100644 index 0000000000..8bad1a8c56 --- /dev/null +++ b/svc/pkg/cluster/ops/server-get/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "cluster-server-get" +version = "0.0.1" +edition = "2018" +authors = ["Rivet Gaming, LLC "] +license = "Apache-2.0" + +[dependencies] +chirp-client = { path = "../../../../../lib/chirp/client" } +prost = "0.10" +rivet-operation = { path = "../../../../../lib/operation/core" } + +[dependencies.sqlx] +version = "0.7" +default-features = false + +[dev-dependencies] +chirp-worker = { path = "../../../../../lib/chirp/worker" } diff --git a/svc/pkg/cluster/ops/server-get/Service.toml b/svc/pkg/cluster/ops/server-get/Service.toml new file mode 100644 index 0000000000..496afacad3 --- /dev/null +++ b/svc/pkg/cluster/ops/server-get/Service.toml @@ -0,0 +1,10 @@ +[service] +name = "cluster-server-get" + +[runtime] +kind = "rust" + +[operation] + +[databases] +db-cluster = {} diff --git a/svc/pkg/cluster/ops/server-get/src/lib.rs b/svc/pkg/cluster/ops/server-get/src/lib.rs new file mode 100644 index 0000000000..73d8c0059d --- /dev/null +++ b/svc/pkg/cluster/ops/server-get/src/lib.rs @@ -0,0 +1,60 @@ +use proto::backend::{self, pkg::*}; +use rivet_operation::prelude::*; + +#[derive(sqlx::FromRow)] +struct Server { + server_id: Uuid, + datacenter_id: Uuid, + cluster_id: Uuid, + pool_type: i64, + vlan_ip: Option, + public_ip: Option, + cloud_destroy_ts: Option, +} + +impl From for backend::cluster::Server { + fn from(value: Server) -> Self { + backend::cluster::Server { + server_id: Some(value.server_id.into()), + datacenter_id: Some(value.datacenter_id.into()), + cluster_id: Some(value.cluster_id.into()), + pool_type: value.pool_type as i32, + vlan_ip: value.vlan_ip, + public_ip: value.public_ip, + cloud_destroy_ts: value.cloud_destroy_ts, + } + } +} + +#[operation(name = "cluster-server-get")] +pub async fn handle( + ctx: OperationContext, +) -> GlobalResult { + let server_ids = ctx + .server_ids + .iter() + .map(common::Uuid::as_uuid) + .collect::>(); + + let servers = sql_fetch_all!( + [ctx, Server] + " + SELECT + server_id, + datacenter_id, + cluster_id, + pool_type, + vlan_ip, + public_ip, + cloud_destroy_ts + FROM db_cluster.servers + WHERE server_id = ANY($1) + ", + server_ids + ) + .await?; + + Ok(cluster::server_get::Response { + servers: servers.into_iter().map(Into::into).collect::>(), + }) +} diff --git a/svc/pkg/cluster/ops/server-get/tests/integration.rs b/svc/pkg/cluster/ops/server-get/tests/integration.rs new file mode 100644 index 0000000000..e7e0b4b495 --- /dev/null +++ b/svc/pkg/cluster/ops/server-get/tests/integration.rs @@ -0,0 +1,7 @@ +use chirp_worker::prelude::*; +use proto::backend::pkg::*; + +#[worker_test] +async fn empty(ctx: TestCtx) { + // TODO: +} diff --git a/svc/pkg/cluster/ops/server-list/Cargo.toml b/svc/pkg/cluster/ops/server-list/Cargo.toml new file mode 100644 index 0000000000..e7a984a2cc --- /dev/null +++ b/svc/pkg/cluster/ops/server-list/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "cluster-server-list" +version = "0.0.1" +edition = "2018" +authors = ["Rivet Gaming, LLC "] +license = "Apache-2.0" + +[dependencies] +chirp-client = { path = "../../../../../lib/chirp/client" } +prost = "0.10" +rivet-operation = { path = "../../../../../lib/operation/core" } + +[dependencies.sqlx] +version = "0.7" +default-features = false + +[dev-dependencies] +chirp-worker = { path = "../../../../../lib/chirp/worker" } diff --git a/svc/pkg/cluster/ops/server-list/Service.toml b/svc/pkg/cluster/ops/server-list/Service.toml new file mode 100644 index 0000000000..f0def326f0 --- /dev/null +++ b/svc/pkg/cluster/ops/server-list/Service.toml @@ -0,0 +1,10 @@ +[service] +name = "cluster-server-list" + +[runtime] +kind = "rust" + +[operation] + +[databases] +db-cluster = {} diff --git a/svc/pkg/cluster/ops/server-list/src/lib.rs b/svc/pkg/cluster/ops/server-list/src/lib.rs new file mode 100644 index 0000000000..c825d2f3a1 --- /dev/null +++ b/svc/pkg/cluster/ops/server-list/src/lib.rs @@ -0,0 +1,79 @@ +use std::collections::HashMap; + +use proto::backend::pkg::*; +use rivet_operation::prelude::*; + +#[derive(sqlx::FromRow)] +struct Server { + cluster_id: Uuid, + server_id: Uuid, +} + +#[operation(name = "cluster-server-list")] +pub async fn handle( + ctx: OperationContext, +) -> GlobalResult { + let cluster_ids = ctx + .cluster_ids + .iter() + .map(common::Uuid::as_uuid) + .collect::>(); + + let servers = if ctx.include_destroyed { + sql_fetch_all!( + [ctx, Server] + " + SELECT + cluster_id, + server_id + FROM db_cluster.servers + WHERE + cluster_id = ANY($1) AND + taint_ts IS NULL + ", + &cluster_ids + ) + .await? + } else { + sql_fetch_all!( + [ctx, Server] + " + SELECT + cluster_id, + server_id + FROM db_cluster.servers + WHERE + cluster_id = ANY($1) AND + cloud_destroy_ts IS NULL AND + taint_ts IS NULL + ", + &cluster_ids + ) + .await? + }; + + // Fill in empty clusters + let mut dcs_by_cluster_id = cluster_ids + .iter() + .map(|cluster_id| (*cluster_id, Vec::new())) + .collect::>>(); + + for dc in servers { + dcs_by_cluster_id + .entry(dc.cluster_id) + .or_default() + .push(dc.server_id); + } + + Ok(cluster::server_list::Response { + clusters: dcs_by_cluster_id + .into_iter() + .map( + |(cluster_id, server_ids)| cluster::server_list::response::Cluster { + cluster_id: Some(cluster_id.into()), + server_ids: server_ids.into_iter().map(Into::into).collect::>(), + }, + ) + .collect::>(), + }) +} diff --git a/svc/pkg/cluster/ops/server-list/tests/integration.rs b/svc/pkg/cluster/ops/server-list/tests/integration.rs new file mode 100644 index 0000000000..e7e0b4b495 --- /dev/null +++ b/svc/pkg/cluster/ops/server-list/tests/integration.rs @@ -0,0 +1,7 @@ +use chirp_worker::prelude::*; +use proto::backend::pkg::*; + +#[worker_test] +async fn empty(ctx: TestCtx) { + // TODO: +} diff --git a/svc/pkg/cluster/ops/server-resolve-for-ip/Cargo.toml b/svc/pkg/cluster/ops/server-resolve-for-ip/Cargo.toml new file mode 100644 index 0000000000..5ebcc9c6f9 --- /dev/null +++ b/svc/pkg/cluster/ops/server-resolve-for-ip/Cargo.toml @@ -0,0 +1,18 @@ +[package] +name = "cluster-server-resolve-for-ip" +version = "0.0.1" +edition = "2018" +authors = ["Rivet Gaming, LLC "] +license = "Apache-2.0" + +[dependencies] +chirp-client = { path = "../../../../../lib/chirp/client" } +prost = "0.10" +rivet-operation = { path = "../../../../../lib/operation/core" } + +[dependencies.sqlx] +version = "0.7" +default-features = false + +[dev-dependencies] +chirp-worker = { path = "../../../../../lib/chirp/worker" } diff --git a/svc/pkg/cluster/ops/server-resolve-for-ip/Service.toml b/svc/pkg/cluster/ops/server-resolve-for-ip/Service.toml new file mode 100644 index 0000000000..0ad9fa42df --- /dev/null +++ b/svc/pkg/cluster/ops/server-resolve-for-ip/Service.toml @@ -0,0 +1,10 @@ +[service] +name = "cluster-server-resolve-for-ip" + +[runtime] +kind = "rust" + +[operation] + +[databases] +db-cluster = {} diff --git a/svc/pkg/cluster/ops/server-resolve-for-ip/src/lib.rs b/svc/pkg/cluster/ops/server-resolve-for-ip/src/lib.rs new file mode 100644 index 0000000000..8c5371e691 --- /dev/null +++ b/svc/pkg/cluster/ops/server-resolve-for-ip/src/lib.rs @@ -0,0 +1,38 @@ +use proto::backend::pkg::*; +use rivet_operation::prelude::*; + +#[derive(sqlx::FromRow)] +struct Server { + server_id: Uuid, + public_ip: String, +} + +impl From for cluster::server_resolve_for_ip::response::Server { + fn from(value: Server) -> Self { + cluster::server_resolve_for_ip::response::Server { + server_id: Some(value.server_id.into()), + public_ip: value.public_ip, + } + } +} + +#[operation(name = "cluster-server-resolve-for-ip")] +pub async fn handle( + ctx: OperationContext, +) -> GlobalResult { + let servers = sql_fetch_all!( + [ctx, Server] + " + SELECT + server_id, public_ip + FROM db_cluster.servers + WHERE public_ip = ANY($1) + ", + &ctx.ips + ) + .await?; + + Ok(cluster::server_resolve_for_ip::Response { + servers: servers.into_iter().map(Into::into).collect::>(), + }) +} diff --git a/svc/pkg/cluster/ops/server-resolve-for-ip/tests/integration.rs b/svc/pkg/cluster/ops/server-resolve-for-ip/tests/integration.rs new file mode 100644 index 0000000000..e7e0b4b495 --- /dev/null +++ b/svc/pkg/cluster/ops/server-resolve-for-ip/tests/integration.rs @@ -0,0 +1,7 @@ +use chirp_worker::prelude::*; +use proto::backend::pkg::*; + +#[worker_test] +async fn empty(ctx: TestCtx) { + // TODO: +} diff --git a/svc/pkg/cluster/standalone/default-update/Cargo.toml b/svc/pkg/cluster/standalone/default-update/Cargo.toml new file mode 100644 index 0000000000..efe135b218 --- /dev/null +++ b/svc/pkg/cluster/standalone/default-update/Cargo.toml @@ -0,0 +1,27 @@ +[package] +name = "cluster-default-update" +version = "0.0.1" +edition = "2018" +authors = ["Rivet Gaming, LLC "] +license = "Apache-2.0" + +[dependencies] +chirp-client = { path = "../../../../../lib/chirp/client" } +rivet-operation = { path = "../../../../../lib/operation/core" } +prost = "0.10" +rivet-connection = { path = "../../../../../lib/connection" } +reqwest = "0.11" +rivet-pools = { path = "../../../../../lib/pools" } +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" +tokio = { version = "1.29", features = ["full"] } +tracing = "0.1" +tracing-subscriber = { version = "0.3", default-features = false, features = ["fmt", "json", "ansi"] } +uuid = { version = "1", features = ["v4"] } + +cluster-get = { path = "../../ops/get" } +cluster-datacenter-get = { path = "../../ops/datacenter-get" } +cluster-datacenter-list = { path = "../../ops/datacenter-list" } + +[dev-dependencies] +chirp-worker = { path = "../../../../../lib/chirp/worker" } diff --git a/svc/pkg/cluster/standalone/default-update/Service.toml b/svc/pkg/cluster/standalone/default-update/Service.toml new file mode 100644 index 0000000000..9da9ae8631 --- /dev/null +++ b/svc/pkg/cluster/standalone/default-update/Service.toml @@ -0,0 +1,11 @@ +[service] +name = "cluster-default-update" +essential = true + +[runtime] +kind = "rust" + +[oneshot] + +[databases] +db-build = {} diff --git a/svc/pkg/cluster/standalone/default-update/src/lib.rs b/svc/pkg/cluster/standalone/default-update/src/lib.rs new file mode 100644 index 0000000000..99f266520c --- /dev/null +++ b/svc/pkg/cluster/standalone/default-update/src/lib.rs @@ -0,0 +1,261 @@ +use std::collections::HashMap; + +use proto::backend::{self, pkg::*}; +use rivet_operation::prelude::*; +use serde::Deserialize; +use uuid::Uuid; + +#[derive(Deserialize)] +struct Cluster { + name_id: String, + datacenters: HashMap, +} + +#[derive(Deserialize)] +struct Datacenter { + datacenter_id: Uuid, + display_name: String, + provider: Provider, + provider_datacenter_name: String, + pools: HashMap, + build_delivery_method: BuildDeliveryMethod, + drain_timeout: u64, +} + +#[derive(Deserialize)] +enum Provider { + #[serde(rename = "linode")] + Linode, +} + +impl From for backend::cluster::Provider { + fn from(value: Provider) -> backend::cluster::Provider { + match value { + Provider::Linode => backend::cluster::Provider::Linode, + } + } +} + +#[derive(Deserialize)] +struct Pool { + hardware: Vec, + desired_count: u32, + max_count: u32, +} + +#[derive(Deserialize, PartialEq, Eq, Hash)] +enum PoolType { + #[serde(rename = "job")] + Job, + #[serde(rename = "gg")] + Gg, + #[serde(rename = "ats")] + Ats, +} + +impl From for backend::cluster::PoolType { + fn from(value: PoolType) -> backend::cluster::PoolType { + match value { + PoolType::Job => backend::cluster::PoolType::Job, + PoolType::Gg => backend::cluster::PoolType::Gg, + PoolType::Ats => backend::cluster::PoolType::Ats, + } + } +} + +#[derive(Deserialize)] +struct Hardware { + name: String, +} + +impl From for backend::cluster::Hardware { + fn from(value: Hardware) -> backend::cluster::Hardware { + backend::cluster::Hardware { + provider_hardware: value.name, + } + } +} + +#[derive(Deserialize)] +enum BuildDeliveryMethod { + #[serde(rename = "traffic_server")] + TrafficServer, + #[serde(rename = "s3_direct")] + S3Direct, +} + +impl From for backend::cluster::BuildDeliveryMethod { + fn from(value: BuildDeliveryMethod) -> backend::cluster::BuildDeliveryMethod { + match value { + BuildDeliveryMethod::TrafficServer => { + backend::cluster::BuildDeliveryMethod::TrafficServer + } + BuildDeliveryMethod::S3Direct => backend::cluster::BuildDeliveryMethod::S3Direct, + } + } +} + +#[tracing::instrument] +pub async fn run_from_env(use_autoscaler: bool) -> GlobalResult<()> { + let pools = rivet_pools::from_env("cluster-default-update").await?; + let client = + chirp_client::SharedClient::from_env(pools.clone())?.wrap_new("cluster-default-update"); + let cache = rivet_cache::CacheInner::from_env(pools.clone())?; + let ctx = OperationContext::new( + "cluster-default-update".into(), + std::time::Duration::from_secs(60), + rivet_connection::Connection::new(client, pools, cache), + Uuid::new_v4(), + Uuid::new_v4(), + util::timestamp::now(), + util::timestamp::now(), + (), + Vec::new(), + ); + + // Read config from env + let Some(config_json) = util::env::var("RIVET_DEFAULT_CLUSTER_CONFIG").ok() else { + tracing::warn!("no cluster config set in namespace config"); + return Ok(()); + }; + let config = serde_json::from_str::(&config_json)?; + + let taint = util::env::var("RIVET_TAINT_DEFAULT_CLUSTER") + .ok() + .unwrap_or_else(|| "0".to_string()) + == "1"; + + // HACK: When deploying both monolith worker and this service for the first time, there is a race + // condition which might result in the message being published from here but not caught by + // monolith-worker, resulting in nothing happening. + tokio::time::sleep(std::time::Duration::from_secs(5)).await; + + let cluster_id = util::env::default_cluster_id(); + + let (cluster_res, datacenter_list_res) = tokio::try_join!( + // Check if cluster already exists + op!([ctx] cluster_get { + cluster_ids: vec![cluster_id.into()], + }), + op!([ctx] cluster_datacenter_list { + cluster_ids: vec![cluster_id.into()], + }), + )?; + + // Get all datacenters + let cluster = unwrap!(datacenter_list_res.clusters.first()); + let datacenters_res = op!([ctx] cluster_datacenter_get { + datacenter_ids: cluster.datacenter_ids.clone(), + }) + .await?; + + if cluster_res.clusters.is_empty() { + tracing::warn!("creating default cluster"); + + msg!([ctx] cluster::msg::create(cluster_id) -> cluster::msg::create_complete { + cluster_id: Some(cluster_id.into()), + name_id: config.name_id.clone(), + owner_team_id: None, + }) + .await?; + } + + for existing_datacenter in &datacenters_res.datacenters { + let datacenter_id = unwrap_ref!(existing_datacenter.datacenter_id).as_uuid(); + + if !config + .datacenters + .iter() + .any(|(_, dc)| dc.datacenter_id == datacenter_id) + { + // TODO: Delete datacenters + } + } + + for (name_id, datacenter) in config.datacenters { + let datacenter_id_proto = Some(datacenter.datacenter_id.into()); + let existing_datacenter = datacenters_res + .datacenters + .iter() + .any(|dc| dc.datacenter_id == datacenter_id_proto); + + // Update existing datacenter + if existing_datacenter { + let new_pools = datacenter + .pools + .into_iter() + .map(|(pool_type, pool)| { + let desired_count = match pool_type { + PoolType::Ats => Some(pool.desired_count), + PoolType::Job | PoolType::Gg => { + if use_autoscaler { + None + } else { + Some(pool.desired_count) + } + } + }; + + cluster::msg::datacenter_update::PoolUpdate { + pool_type: Into::::into(pool_type) as i32, + hardware: pool + .hardware + .into_iter() + .map(Into::into) + .collect::>(), + desired_count, + max_count: Some(pool.max_count), + } + }) + .collect::>(); + + msg!([ctx] @wait cluster::msg::datacenter_update(datacenter.datacenter_id) { + datacenter_id: datacenter_id_proto, + pools: new_pools, + // Convert from seconds to ms + drain_timeout: Some(datacenter.drain_timeout * 1000), + }) + .await?; + } + // Create new datacenter + else { + msg!([ctx] @wait cluster::msg::datacenter_create(datacenter.datacenter_id) { + config: Some(backend::cluster::Datacenter { + datacenter_id: datacenter_id_proto, + cluster_id: Some(cluster_id.into()), + name_id, + display_name: datacenter.display_name, + + provider: Into::::into(datacenter.provider) as i32, + provider_datacenter_id: datacenter.provider_datacenter_name, + + pools: datacenter.pools.into_iter().map(|(pool_type, pool)| { + backend::cluster::Pool { + pool_type: Into::::into(pool_type) as i32, + hardware: pool.hardware.into_iter().map(Into::into).collect::>(), + desired_count: pool.desired_count, + max_count: pool.max_count, + } + }).collect::>(), + + build_delivery_method: Into::::into(datacenter.build_delivery_method) as i32, + drain_timeout: datacenter.drain_timeout, + }), + }) + .await?; + } + + // TODO: Both this message and datacenter-create/datacenter-update (above) publish datacenter-scale. + // This results in double provisioning until datacenter-scale is published again, cleaning up the + // excess. + // Taint datacenter + if taint { + msg!([ctx] @wait cluster::msg::datacenter_taint(datacenter.datacenter_id) { + datacenter_id: datacenter_id_proto, + }) + .await?; + } + } + + Ok(()) +} diff --git a/svc/pkg/cluster/standalone/default-update/src/main.rs b/svc/pkg/cluster/standalone/default-update/src/main.rs new file mode 100644 index 0000000000..8fad089303 --- /dev/null +++ b/svc/pkg/cluster/standalone/default-update/src/main.rs @@ -0,0 +1,17 @@ +use rivet_operation::prelude::*; + +#[tokio::main] +async fn main() -> GlobalResult<()> { + tracing_subscriber::fmt() + .json() + .with_max_level(tracing::Level::INFO) + .with_span_events(tracing_subscriber::fmt::format::FmtSpan::NONE) + .init(); + + // TODO: When running bolt up, this service gets created first before `cluster-worker` so the messages + // sent from here are received but effectively forgotten because `cluster-worker` gets restarted + // immediately afterwards. + tokio::time::sleep(std::time::Duration::from_secs(3)).await; + + cluster_default_update::run_from_env(false).await +} diff --git a/svc/pkg/cluster/standalone/default-update/tests/integration.rs b/svc/pkg/cluster/standalone/default-update/tests/integration.rs new file mode 100644 index 0000000000..83977fe299 --- /dev/null +++ b/svc/pkg/cluster/standalone/default-update/tests/integration.rs @@ -0,0 +1,10 @@ +#[tokio::test(flavor = "multi_thread")] +async fn basic() { + tracing_subscriber::fmt() + .json() + .with_max_level(tracing::Level::INFO) + .with_span_events(tracing_subscriber::fmt::format::FmtSpan::NONE) + .init(); + + cluster_default_update::run_from_env(false).await.unwrap(); +} diff --git a/svc/pkg/cluster/standalone/gc/Cargo.toml b/svc/pkg/cluster/standalone/gc/Cargo.toml new file mode 100644 index 0000000000..0fb3ee42fe --- /dev/null +++ b/svc/pkg/cluster/standalone/gc/Cargo.toml @@ -0,0 +1,27 @@ +[package] +name = "cluster-gc" +version = "0.0.1" +edition = "2018" +authors = ["Rivet Gaming, LLC "] +license = "Apache-2.0" + +[dependencies] +chirp-client = { path = "../../../../../lib/chirp/client" } +rivet-connection = { path = "../../../../../lib/connection" } +rivet-health-checks = { path = "../../../../../lib/health-checks" } +rivet-metrics = { path = "../../../../../lib/metrics" } +rivet-operation = { path = "../../../../../lib/operation/core" } +rivet-runtime = { path = "../../../../../lib/runtime" } +tokio = { version = "1.29", features = ["full"] } +tracing = "0.1" +tracing-subscriber = { version = "0.3", default-features = false, features = ["fmt", "json", "ansi"] } + +cluster-datacenter-get = { path = "../../ops/datacenter-get" } + +[dependencies.sqlx] +version = "0.7" +default-features = false + +[dev-dependencies] +chirp-worker = { path = "../../../../../lib/chirp/worker" } +util-cluster = { package = "rivet-util-cluster", path = "../../util" } diff --git a/svc/pkg/cluster/standalone/gc/Service.toml b/svc/pkg/cluster/standalone/gc/Service.toml new file mode 100644 index 0000000000..97fb39c98e --- /dev/null +++ b/svc/pkg/cluster/standalone/gc/Service.toml @@ -0,0 +1,8 @@ +[service] +name = "cluster-gc" + +[runtime] +kind = "rust" + +[headless] +singleton = true diff --git a/svc/pkg/cluster/standalone/gc/src/lib.rs b/svc/pkg/cluster/standalone/gc/src/lib.rs new file mode 100644 index 0000000000..cdbeca620b --- /dev/null +++ b/svc/pkg/cluster/standalone/gc/src/lib.rs @@ -0,0 +1,108 @@ +use std::collections::HashMap; + +use proto::backend::{self, pkg::*}; +use rivet_operation::prelude::*; + +#[derive(sqlx::FromRow)] +struct Server { + datacenter_id: Uuid, + server_id: Uuid, + drain_ts: i64, +} + +#[tracing::instrument(skip_all)] +pub async fn run_from_env(ts: i64, pools: rivet_pools::Pools) -> GlobalResult<()> { + let client = chirp_client::SharedClient::from_env(pools.clone())?.wrap_new("cluster-gc"); + let cache = rivet_cache::CacheInner::from_env(pools.clone())?; + let ctx = OperationContext::new( + "cluster-gc".into(), + std::time::Duration::from_secs(60), + rivet_connection::Connection::new(client, pools, cache), + Uuid::new_v4(), + Uuid::new_v4(), + util::timestamp::now(), + util::timestamp::now(), + (), + Vec::new(), + ); + let crdb = ctx.crdb().await?; + + // Select all draining gg servers + let gg_servers = sql_fetch_all!( + [ctx, Server, &crdb] + " + SELECT datacenter_id, server_id, drain_ts + FROM db_cluster.servers + WHERE + pool_type = $1 AND + cloud_destroy_ts IS NULL AND + drain_ts IS NOT NULL + ", + backend::cluster::PoolType::Gg as i64 + ) + .await?; + + if gg_servers.is_empty() { + return Ok(()); + } + + let datacenters_res = op!([ctx] cluster_datacenter_get { + datacenter_ids: gg_servers + .iter() + .map(|server| server.datacenter_id.into()) + .collect::>(), + }) + .await?; + + // Collect into hashmap for better reads + let datacenters = datacenters_res + .datacenters + .iter() + .map(|dc| Ok((unwrap_ref!(dc.datacenter_id).as_uuid(), dc))) + .collect::>>()?; + + // Filter all gg servers that are finished draining + let destroy_server_ids = gg_servers + .iter() + .map(|server| { + let datacenter_config = unwrap!(datacenters.get(&server.datacenter_id)); + let drain_cutoff = ts - datacenter_config.drain_timeout as i64; + + Ok((server, drain_cutoff)) + }) + .filter_map(|res| match res { + Ok((server, drain_cutoff)) => { + if server.drain_ts < drain_cutoff { + Some(Ok(server.server_id)) + } else { + None + } + } + Err(err) => Some(Err(err)), + }) + .collect::>>()?; + + // Mark as destroyed + sql_execute!( + [ctx, &crdb] + " + UPDATE db_cluster.servers + SET cloud_destroy_ts = $2 + WHERE + server_id = ANY($1) + ", + &destroy_server_ids, + util::timestamp::now(), + ) + .await?; + + for server_id in destroy_server_ids { + msg!([ctx] cluster::msg::server_destroy(server_id) { + server_id: Some(server_id.into()), + force: false, + }) + .await?; + } + + Ok(()) +} diff --git a/svc/pkg/cluster/standalone/gc/src/main.rs b/svc/pkg/cluster/standalone/gc/src/main.rs new file mode 100644 index 0000000000..142bfd5105 --- /dev/null +++ b/svc/pkg/cluster/standalone/gc/src/main.rs @@ -0,0 +1,31 @@ +use std::time::Duration; + +use rivet_operation::prelude::*; + +fn main() -> GlobalResult<()> { + rivet_runtime::run(start()).unwrap() +} + +async fn start() -> GlobalResult<()> { + let pools = rivet_pools::from_env("cluster-gc").await?; + + tokio::task::Builder::new() + .name("cluster_gc::health_checks") + .spawn(rivet_health_checks::run_standalone( + rivet_health_checks::Config { + pools: Some(pools.clone()), + }, + ))?; + + tokio::task::Builder::new() + .name("cluster_gc::metrics") + .spawn(rivet_metrics::run_standalone())?; + + let mut interval = tokio::time::interval(Duration::from_secs(120)); + loop { + interval.tick().await; + + let ts = util::timestamp::now(); + cluster_gc::run_from_env(ts, pools.clone()).await?; + } +} diff --git a/svc/pkg/cluster/standalone/gc/tests/integration.rs b/svc/pkg/cluster/standalone/gc/tests/integration.rs new file mode 100644 index 0000000000..e3e3a5ec11 --- /dev/null +++ b/svc/pkg/cluster/standalone/gc/tests/integration.rs @@ -0,0 +1,171 @@ +use chirp_worker::prelude::*; +use proto::backend::{self, pkg::*}; + +use ::cluster_gc::run_from_env; + +const DRAIN_TIMEOUT: i64 = 1000 * 60 * 60; + +#[tokio::test(flavor = "multi_thread")] +async fn basic() { + if !util::feature::server_provision() { + return; + } + + tracing_subscriber::fmt() + .json() + .with_max_level(tracing::Level::INFO) + .with_span_events(tracing_subscriber::fmt::format::FmtSpan::NONE) + .init(); + + let ctx = TestCtx::from_env("cluster-gc-test").await.unwrap(); + let pools = rivet_pools::from_env("cluster-gc-test").await.unwrap(); + + let server_id = Uuid::new_v4(); + let datacenter_id = Uuid::new_v4(); + let cluster_id = Uuid::new_v4(); + + let dc = setup(&ctx, server_id, datacenter_id, cluster_id).await; + + msg!([ctx] cluster::msg::server_provision(server_id) { + cluster_id: Some(cluster_id.into()), + datacenter_id: Some(datacenter_id.into()), + server_id: Some(server_id.into()), + pool_type: dc.pools.first().unwrap().pool_type, + provider: dc.provider, + tags: vec!["test".to_string()], + }) + .await + .unwrap(); + + // Wait for server to have an ip + loop { + tokio::time::sleep(std::time::Duration::from_secs(5)).await; + + let (exists,) = sql_fetch_one!( + [ctx, (bool,)] + " + SELECT EXISTS ( + SELECT 1 + FROM db_cluster.servers + WHERE + server_id = $1 AND + public_ip IS NOT NULL + ) + ", + server_id, + ) + .await + .unwrap(); + + if exists { + break; + } + } + + // Start drain + sql_execute!( + [ctx] + " + UPDATE db_cluster.servers + SET drain_ts = $2 + WHERE server_id = $1 + ", + server_id, + util::timestamp::now(), + ) + .await + .unwrap(); + msg!([ctx] @wait cluster::msg::server_drain(server_id) { + server_id: Some(server_id.into()), + }) + .await + .unwrap(); + + let mut sub = subscribe!([ctx] cluster::msg::server_destroy(server_id)) + .await + .unwrap(); + + // Run GC + let ts = util::timestamp::now() + DRAIN_TIMEOUT + 1; + run_from_env(ts, pools).await.unwrap(); + + // Check that destroy message was sent + sub.next().await.unwrap(); + + // Clean up afterwards so we don't litter + msg!([ctx] @wait cluster::msg::server_destroy(server_id) { + server_id: Some(server_id.into()), + force: false, + }) + .await + .unwrap(); +} + +async fn setup( + ctx: &TestCtx, + server_id: Uuid, + datacenter_id: Uuid, + cluster_id: Uuid, +) -> backend::cluster::Datacenter { + let pool_type = backend::cluster::PoolType::Gg as i32; + + msg!([ctx] cluster::msg::create(cluster_id) -> cluster::msg::create_complete { + cluster_id: Some(cluster_id.into()), + name_id: util::faker::ident(), + owner_team_id: None, + }) + .await + .unwrap(); + + let dc = backend::cluster::Datacenter { + datacenter_id: Some(datacenter_id.into()), + cluster_id: Some(cluster_id.into()), + name_id: util::faker::ident(), + display_name: util::faker::ident(), + + provider: backend::cluster::Provider::Linode as i32, + provider_datacenter_id: "us-southeast".to_string(), + + pools: vec![backend::cluster::Pool { + pool_type, + hardware: vec![backend::cluster::Hardware { + provider_hardware: util_cluster::test::HARDWARE.to_string(), + }], + desired_count: 0, + max_count: 0, + }], + + build_delivery_method: backend::cluster::BuildDeliveryMethod::TrafficServer as i32, + drain_timeout: DRAIN_TIMEOUT as u64, + }; + + msg!([ctx] cluster::msg::datacenter_create(datacenter_id) -> cluster::msg::datacenter_scale { + config: Some(dc.clone()), + }) + .await + .unwrap(); + + // Write new server to db + sql_execute!( + [ctx] + " + INSERT INTO db_cluster.servers ( + server_id, + datacenter_id, + cluster_id, + pool_type, + create_ts + ) + VALUES ($1, $2, $3, $4, $5) + ", + server_id, + datacenter_id, + cluster_id, + pool_type as i64, + util::timestamp::now(), + ) + .await + .unwrap(); + + dc +} diff --git a/svc/pkg/cluster/types/datacenter-get.proto b/svc/pkg/cluster/types/datacenter-get.proto new file mode 100644 index 0000000000..eb992858a0 --- /dev/null +++ b/svc/pkg/cluster/types/datacenter-get.proto @@ -0,0 +1,14 @@ +syntax = "proto3"; + +package rivet.backend.pkg.cluster.datacenter_get; + +import "proto/common.proto"; +import "proto/backend/cluster.proto"; + +message Request { + repeated rivet.common.Uuid datacenter_ids = 1; +} + +message Response { + repeated rivet.backend.cluster.Datacenter datacenters = 1; +} diff --git a/svc/pkg/cluster/types/datacenter-list.proto b/svc/pkg/cluster/types/datacenter-list.proto new file mode 100644 index 0000000000..c774f704fa --- /dev/null +++ b/svc/pkg/cluster/types/datacenter-list.proto @@ -0,0 +1,19 @@ +syntax = "proto3"; + +package rivet.backend.pkg.cluster.datacenter_list; + +import "proto/common.proto"; +import "proto/backend/cluster.proto"; + +message Request { + repeated rivet.common.Uuid cluster_ids = 1; +} + +message Response { + message Cluster { + rivet.common.Uuid cluster_id = 1; + repeated rivet.common.Uuid datacenter_ids = 2; + } + + repeated Cluster clusters = 1; +} diff --git a/svc/pkg/cluster/types/datacenter-location-get.proto b/svc/pkg/cluster/types/datacenter-location-get.proto new file mode 100644 index 0000000000..5be47351d9 --- /dev/null +++ b/svc/pkg/cluster/types/datacenter-location-get.proto @@ -0,0 +1,19 @@ +syntax = "proto3"; + +package rivet.backend.pkg.cluster.datacenter_location_get; + +import "proto/common.proto"; +import "proto/backend/net.proto"; + +message Request { + repeated rivet.common.Uuid datacenter_ids = 1; +} + +message Response { + message Datacenter { + rivet.common.Uuid datacenter_id = 1; + optional rivet.backend.net.Coordinates coords = 2; + } + + repeated Datacenter datacenters = 1; +} diff --git a/svc/pkg/cluster/types/datacenter-resolve-for-name-id.proto b/svc/pkg/cluster/types/datacenter-resolve-for-name-id.proto new file mode 100644 index 0000000000..6390d1cbeb --- /dev/null +++ b/svc/pkg/cluster/types/datacenter-resolve-for-name-id.proto @@ -0,0 +1,20 @@ +syntax = "proto3"; + +package rivet.backend.pkg.cluster.datacenter_resolve_for_name_id; + +import "proto/common.proto"; +import "proto/backend/cluster.proto"; + +message Request { + rivet.common.Uuid cluster_id = 1; + repeated string name_ids = 2; +} + +message Response { + message Datacenter { + rivet.common.Uuid datacenter_id = 1; + string name_id = 2; + } + + repeated Datacenter datacenters = 1; +} diff --git a/svc/pkg/cluster/types/datacenter-topology-get.proto b/svc/pkg/cluster/types/datacenter-topology-get.proto new file mode 100644 index 0000000000..be9a478628 --- /dev/null +++ b/svc/pkg/cluster/types/datacenter-topology-get.proto @@ -0,0 +1,32 @@ +syntax = "proto3"; + +package rivet.backend.pkg.cluster.datacenter_topology_get; + +import "proto/common.proto"; + +message Request { + repeated rivet.common.Uuid datacenter_ids = 1; +} + +message Response { + message Server { + rivet.common.Uuid server_id = 1; + string node_id = 2; + + Stats usage = 3; + Stats limits = 4; + } + + message Datacenter { + rivet.common.Uuid datacenter_id = 1; + repeated Server servers = 2; + } + + message Stats { + uint64 cpu = 1; // mhz + uint64 memory = 2; // mb + uint64 disk = 3; // mb + } + + repeated Datacenter datacenters = 1; +} diff --git a/svc/pkg/cluster/types/get.proto b/svc/pkg/cluster/types/get.proto new file mode 100644 index 0000000000..3d995108a3 --- /dev/null +++ b/svc/pkg/cluster/types/get.proto @@ -0,0 +1,14 @@ +syntax = "proto3"; + +package rivet.backend.pkg.cluster.get; + +import "proto/common.proto"; +import "proto/backend/cluster.proto"; + +message Request { + repeated rivet.common.Uuid cluster_ids = 1; +} + +message Response { + repeated rivet.backend.cluster.Cluster clusters = 1; +} diff --git a/svc/pkg/cluster/types/msg/create-complete.proto b/svc/pkg/cluster/types/msg/create-complete.proto new file mode 100644 index 0000000000..cd62379cac --- /dev/null +++ b/svc/pkg/cluster/types/msg/create-complete.proto @@ -0,0 +1,13 @@ +syntax = "proto3"; + +package rivet.backend.pkg.cluster.msg.create_complete; + +import "proto/common.proto"; + +/// name = "msg-cluster-create-complete" +/// parameters = [ +/// { name = "cluster_id" }, +/// ] +message Message { + rivet.common.Uuid cluster_id = 1; +} diff --git a/svc/pkg/cluster/types/msg/create.proto b/svc/pkg/cluster/types/msg/create.proto new file mode 100644 index 0000000000..2c24142cc5 --- /dev/null +++ b/svc/pkg/cluster/types/msg/create.proto @@ -0,0 +1,15 @@ +syntax = "proto3"; + +package rivet.backend.pkg.cluster.msg.create; + +import "proto/common.proto"; + +/// name = "msg-cluster-create" +/// parameters = [ +/// { name = "cluster_id" }, +/// ] +message Message { + rivet.common.Uuid cluster_id = 1; + string name_id = 2; + optional rivet.common.Uuid owner_team_id = 3; +} diff --git a/svc/pkg/cluster/types/msg/datacenter-create.proto b/svc/pkg/cluster/types/msg/datacenter-create.proto new file mode 100644 index 0000000000..f74a4d9731 --- /dev/null +++ b/svc/pkg/cluster/types/msg/datacenter-create.proto @@ -0,0 +1,19 @@ +syntax = "proto3"; + +package rivet.backend.pkg.cluster.msg.datacenter_create; + +import "proto/common.proto"; +import "proto/backend/cluster.proto"; + +/// name = "msg-cluster-datacenter-create" +/// parameters = [ +/// { name = "datacenter_id" }, +/// ] +message Message { + rivet.backend.cluster.Datacenter config = 1; +} + +// Helper proto for writing to sql +message Pools { + repeated rivet.backend.cluster.Pool pools = 1; +} diff --git a/svc/pkg/cluster/types/msg/datacenter-scale.proto b/svc/pkg/cluster/types/msg/datacenter-scale.proto new file mode 100644 index 0000000000..cbe7a468a7 --- /dev/null +++ b/svc/pkg/cluster/types/msg/datacenter-scale.proto @@ -0,0 +1,13 @@ +syntax = "proto3"; + +package rivet.backend.pkg.cluster.msg.datacenter_scale; + +import "proto/common.proto"; + +/// name = "msg-cluster-datacenter-scale" +/// parameters = [ +/// { name = "datacenter_id" }, +/// ] +message Message { + rivet.common.Uuid datacenter_id = 1; +} diff --git a/svc/pkg/cluster/types/msg/datacenter-taint.proto b/svc/pkg/cluster/types/msg/datacenter-taint.proto new file mode 100644 index 0000000000..410eabb2b1 --- /dev/null +++ b/svc/pkg/cluster/types/msg/datacenter-taint.proto @@ -0,0 +1,13 @@ +syntax = "proto3"; + +package rivet.backend.pkg.cluster.msg.datacenter_taint; + +import "proto/common.proto"; + +/// name = "msg-cluster-datacenter-taint" +/// parameters = [ +/// { name = "datacenter_id" }, +/// ] +message Message { + rivet.common.Uuid datacenter_id = 1; +} diff --git a/svc/pkg/cluster/types/msg/datacenter-update.proto b/svc/pkg/cluster/types/msg/datacenter-update.proto new file mode 100644 index 0000000000..39423f0893 --- /dev/null +++ b/svc/pkg/cluster/types/msg/datacenter-update.proto @@ -0,0 +1,25 @@ +syntax = "proto3"; + +package rivet.backend.pkg.cluster.msg.datacenter_update; + +import "proto/common.proto"; +import "proto/backend/cluster.proto"; + +/// name = "msg-cluster-datacenter-update" +/// parameters = [ +/// { name = "datacenter_id" }, +/// ] +message Message { + rivet.common.Uuid datacenter_id = 1; + repeated PoolUpdate pools = 2; + optional uint64 drain_timeout = 3; +} + +message PoolUpdate { + rivet.backend.cluster.PoolType pool_type = 1; + + // Each can be optionally updated + repeated rivet.backend.cluster.Hardware hardware = 2; + optional uint32 desired_count = 3; + optional uint32 max_count = 4; +} diff --git a/svc/pkg/cluster/types/msg/server-destroy-complete.proto b/svc/pkg/cluster/types/msg/server-destroy-complete.proto new file mode 100644 index 0000000000..a580ab1dc5 --- /dev/null +++ b/svc/pkg/cluster/types/msg/server-destroy-complete.proto @@ -0,0 +1,13 @@ +syntax = "proto3"; + +package rivet.backend.pkg.cluster.msg.server_destroy_complete; + +import "proto/common.proto"; + +/// name = "msg-cluster-server-destroy-complete" +/// parameters = [ +/// { name = "server_id" }, +/// ] +message Message { + rivet.common.Uuid server_id = 1; +} diff --git a/svc/pkg/cluster/types/msg/server-destroy.proto b/svc/pkg/cluster/types/msg/server-destroy.proto new file mode 100644 index 0000000000..7e65d04479 --- /dev/null +++ b/svc/pkg/cluster/types/msg/server-destroy.proto @@ -0,0 +1,15 @@ +syntax = "proto3"; + +package rivet.backend.pkg.cluster.msg.server_destroy; + +import "proto/common.proto"; + +/// name = "msg-cluster-server-destroy" +/// parameters = [ +/// { name = "server_id" }, +/// ] +message Message { + rivet.common.Uuid server_id = 1; + // Destroys the server even if it isn't fully provisioned yet + bool force = 2; +} diff --git a/svc/pkg/cluster/types/msg/server-dns-create.proto b/svc/pkg/cluster/types/msg/server-dns-create.proto new file mode 100644 index 0000000000..2a19067c28 --- /dev/null +++ b/svc/pkg/cluster/types/msg/server-dns-create.proto @@ -0,0 +1,13 @@ +syntax = "proto3"; + +package rivet.backend.pkg.cluster.msg.server_dns_create; + +import "proto/common.proto"; + +/// name = "msg-cluster-server-dns-create" +/// parameters = [ +/// { name = "server_id" }, +/// ] +message Message { + rivet.common.Uuid server_id = 1; +} diff --git a/svc/pkg/cluster/types/msg/server-dns-delete.proto b/svc/pkg/cluster/types/msg/server-dns-delete.proto new file mode 100644 index 0000000000..63786e8d56 --- /dev/null +++ b/svc/pkg/cluster/types/msg/server-dns-delete.proto @@ -0,0 +1,13 @@ +syntax = "proto3"; + +package rivet.backend.pkg.cluster.msg.server_dns_delete; + +import "proto/common.proto"; + +/// name = "msg-cluster-server-dns-delete" +/// parameters = [ +/// { name = "server_id" }, +/// ] +message Message { + rivet.common.Uuid server_id = 1; +} diff --git a/svc/pkg/cluster/types/msg/server-drain.proto b/svc/pkg/cluster/types/msg/server-drain.proto new file mode 100644 index 0000000000..b4770d0d87 --- /dev/null +++ b/svc/pkg/cluster/types/msg/server-drain.proto @@ -0,0 +1,13 @@ +syntax = "proto3"; + +package rivet.backend.pkg.cluster.msg.server_drain; + +import "proto/common.proto"; + +/// name = "msg-cluster-server-drain" +/// parameters = [ +/// { name = "server_id" }, +/// ] +message Message { + rivet.common.Uuid server_id = 1; +} diff --git a/svc/pkg/cluster/types/msg/server-install-complete.proto b/svc/pkg/cluster/types/msg/server-install-complete.proto new file mode 100644 index 0000000000..cd53811833 --- /dev/null +++ b/svc/pkg/cluster/types/msg/server-install-complete.proto @@ -0,0 +1,17 @@ +syntax = "proto3"; + +package rivet.backend.pkg.cluster.msg.server_install_complete; + +import "proto/common.proto"; +import "proto/backend/cluster.proto"; + +/// name = "msg-cluster-server-install-complete" +/// parameters = [ +/// { name = "public_ip" }, +/// ] +message Message { + string ip = 1; + // If set in server install message + optional rivet.common.Uuid server_id = 2; + rivet.backend.cluster.Provider provider = 3; +} diff --git a/svc/pkg/cluster/types/msg/server-install.proto b/svc/pkg/cluster/types/msg/server-install.proto new file mode 100644 index 0000000000..5f6d7fb811 --- /dev/null +++ b/svc/pkg/cluster/types/msg/server-install.proto @@ -0,0 +1,21 @@ +syntax = "proto3"; + +package rivet.backend.pkg.cluster.msg.server_install; + +import "proto/common.proto"; +import "proto/backend/cluster.proto"; + +/// name = "msg-cluster-server-install" +/// parameters = [ +/// { name = "public_ip" }, +/// ] +message Message { + string public_ip = 1; + rivet.backend.cluster.PoolType pool_type = 2; + // Only if installing on a "server" (see cluster database). Used to check if + // the server is currently being deleted + optional rivet.common.Uuid server_id = 3; + // Simply passed to the install complete message + rivet.backend.cluster.Provider provider = 4; + bool initialize_immediately = 5; +} diff --git a/svc/pkg/cluster/types/msg/server-provision.proto b/svc/pkg/cluster/types/msg/server-provision.proto new file mode 100644 index 0000000000..9eccbbbb15 --- /dev/null +++ b/svc/pkg/cluster/types/msg/server-provision.proto @@ -0,0 +1,19 @@ +syntax = "proto3"; + +package rivet.backend.pkg.cluster.msg.server_provision; + +import "proto/common.proto"; +import "proto/backend/cluster.proto"; + +/// name = "msg-cluster-server-provision" +/// parameters = [ +/// { name = "server_id" }, +/// ] +message Message { + rivet.common.Uuid cluster_id = 1; + rivet.common.Uuid datacenter_id = 2; + rivet.common.Uuid server_id = 3; + rivet.backend.cluster.PoolType pool_type = 4; + rivet.backend.cluster.Provider provider = 5; + repeated string tags = 6; +} diff --git a/svc/pkg/cluster/types/msg/server-undrain.proto b/svc/pkg/cluster/types/msg/server-undrain.proto new file mode 100644 index 0000000000..cc8feb1525 --- /dev/null +++ b/svc/pkg/cluster/types/msg/server-undrain.proto @@ -0,0 +1,14 @@ +syntax = "proto3"; + +package rivet.backend.pkg.cluster.msg.server_undrain; + +import "proto/common.proto"; +import "proto/backend/cluster.proto"; + +/// name = "msg-cluster-server-undrain" +/// parameters = [ +/// { name = "server_id" }, +/// ] +message Message { + rivet.common.Uuid server_id = 1; +} diff --git a/svc/pkg/cluster/types/server-get.proto b/svc/pkg/cluster/types/server-get.proto new file mode 100644 index 0000000000..a994fd3dd9 --- /dev/null +++ b/svc/pkg/cluster/types/server-get.proto @@ -0,0 +1,14 @@ +syntax = "proto3"; + +package rivet.backend.pkg.cluster.server_get; + +import "proto/common.proto"; +import "proto/backend/cluster.proto"; + +message Request { + repeated rivet.common.Uuid server_ids = 1; +} + +message Response { + repeated rivet.backend.cluster.Server servers = 1; +} diff --git a/svc/pkg/cluster/types/server-list.proto b/svc/pkg/cluster/types/server-list.proto new file mode 100644 index 0000000000..e88bf382c7 --- /dev/null +++ b/svc/pkg/cluster/types/server-list.proto @@ -0,0 +1,20 @@ +syntax = "proto3"; + +package rivet.backend.pkg.cluster.server_list; + +import "proto/common.proto"; +import "proto/backend/cluster.proto"; + +message Request { + repeated rivet.common.Uuid cluster_ids = 1; + bool include_destroyed = 2; +} + +message Response { + message Cluster { + rivet.common.Uuid cluster_id = 1; + repeated rivet.common.Uuid server_ids = 2; + } + + repeated Cluster clusters = 1; +} diff --git a/svc/pkg/cluster/types/server-resolve-for-ip.proto b/svc/pkg/cluster/types/server-resolve-for-ip.proto new file mode 100644 index 0000000000..19bd25ee5e --- /dev/null +++ b/svc/pkg/cluster/types/server-resolve-for-ip.proto @@ -0,0 +1,19 @@ +syntax = "proto3"; + +package rivet.backend.pkg.cluster.server_resolve_for_ip; + +import "proto/common.proto"; +import "proto/backend/cluster.proto"; + +message Request { + repeated string ips = 1; +} + +message Response { + message Server { + string public_ip = 1; + rivet.common.Uuid server_id = 2; + } + + repeated Server servers = 1; +} diff --git a/svc/pkg/cluster/util/Cargo.toml b/svc/pkg/cluster/util/Cargo.toml new file mode 100644 index 0000000000..962aedb730 --- /dev/null +++ b/svc/pkg/cluster/util/Cargo.toml @@ -0,0 +1,16 @@ +[package] +name = "rivet-util-cluster" +version = "0.1.0" +edition = "2021" +authors = ["Rivet Gaming, LLC "] +license = "Apache-2.0" + +[dependencies] +rivet-util = { path = "../../../../lib/util/core" } +types = { path = "../../../../lib/types/core" } +uuid = { version = "1", features = ["v4", "serde"] } + +[build-dependencies] +hex = "0.4" +sha2 = "0.10" +tokio = { version = "1.29", features = ["full"] } diff --git a/svc/pkg/cluster/util/build.rs b/svc/pkg/cluster/util/build.rs new file mode 100644 index 0000000000..a2e32f0ec1 --- /dev/null +++ b/svc/pkg/cluster/util/build.rs @@ -0,0 +1,69 @@ +use tokio::{fs, process::Command}; +use sha2::{Digest, Sha256}; + +// NOTE: This only gets the hash of the folder. Any template variables changed in the install scripts +// will not update the hash. +// Get a hash of the server install worker +#[tokio::main] +async fn main() { + let current_dir = std::env::current_dir().unwrap(); + let server_install_path = { + let mut dir = current_dir.clone(); + dir.pop(); + + dir.join("worker") + .join("src") + .join("workers") + .join("server_install") + }; + + // Add rereun statement + println!("cargo:rerun-if-changed={}", server_install_path.display(),); + + let mut util_path = std::env::current_dir().unwrap(); + util_path.pop(); + let util_path = util_path.join("worker").join("src").join("workers").join("server_install"); + + // Compute the git diff between the current branch and the local changes + let mut cmd = Command::new("git").arg("diff").arg("--minimal").arg("HEAD").arg("--").arg(util_path).output() + .await + .unwrap(); + + if !cmd.status.success() { + panic!( + "failed to get git diff ({}):\n{}", + cmd.status, + String::from_utf8(cmd.stderr).unwrap() + ); + } + + let source_diff = String::from_utf8(cmd.stdout).unwrap(); + + // If there is no diff, use the git commit hash + let source_hash = if source_diff.is_empty() { + let cmd = Command::new("git") + .arg("rev-parse") + .arg("HEAD:svc/pkg/cluster/worker/src/workers/server_install") + .output() + .await + .unwrap(); + + if !cmd.status.success() { + panic!( + "failed to get git diff ({}):\n{}", + cmd.status, + String::from_utf8(cmd.stderr).unwrap() + ); + } + + String::from_utf8(cmd.stdout).unwrap() + } else { + // Get hash of diff + hex::encode(Sha256::digest(source_diff.as_bytes())) + }; + + fs::create_dir_all(current_dir.join("gen")).await.unwrap(); + fs::write(current_dir.join("gen").join("hash.txt"), source_hash.trim()) + .await + .unwrap(); +} diff --git a/svc/pkg/cluster/util/src/lib.rs b/svc/pkg/cluster/util/src/lib.rs new file mode 100644 index 0000000000..c49883ace7 --- /dev/null +++ b/svc/pkg/cluster/util/src/lib.rs @@ -0,0 +1,109 @@ +use types::rivet::backend::{self, pkg::*}; +use uuid::Uuid; + +pub mod test; + +// NOTE: We don't reserve CPU because Nomad is running as a higher priority process than the rest and +// shouldn't be doing much heavy lifting. +const RESERVE_SYSTEM_MEMORY: u64 = 512; +// See module.traefik_job resources +const RESERVE_LB_MEMORY: u64 = 512; +const RESERVE_MEMORY: u64 = RESERVE_SYSTEM_MEMORY + RESERVE_LB_MEMORY; + +/// Provider agnostic hardware specs. +#[derive(Debug)] +pub struct JobNodeConfig { + pub cpu_cores: u64, + pub cpu: u64, + pub memory: u64, + pub disk: u64, + pub bandwidth: u64, +} + +impl JobNodeConfig { + pub fn from_linode(instance_type: &linode::instance_type_get::response::InstanceType) -> JobNodeConfig { + // Account for kernel memory overhead + // https://www.linode.com/community/questions/17791/why-doesnt-free-m-match-the-full-amount-of-ram-of-my-nanode-plan + let memory = instance_type.memory * 96 / 100; + // Remove reserved resources + let memory = memory - RESERVE_MEMORY; + + JobNodeConfig { + cpu_cores: instance_type.vcpus, + cpu: instance_type.vcpus * 1999, + memory, + disk: instance_type.disk, + bandwidth: instance_type.transfer * 1000, + } + } + + pub fn cpu_per_core(&self) -> u64 { + 1999 + } + + pub fn memory_per_core(&self) -> u64 { + self.memory / self.cpu_cores + } + + pub fn disk_per_core(&self) -> u64 { + self.disk / self.cpu_cores + } + + pub fn bandwidth_per_core(&self) -> u64 { + self.bandwidth / self.cpu_cores + } +} + +pub fn server_name( + provider_datacenter_id: &str, + pool_type: backend::cluster::PoolType, + server_id: Uuid, +) -> String { + let ns = rivet_util::env::namespace(); + let pool_type_str = match pool_type { + backend::cluster::PoolType::Job => "job", + backend::cluster::PoolType::Gg => "gg", + backend::cluster::PoolType::Ats => "ats", + }; + + format!( + "{ns}-{provider_datacenter_id}-{pool_type_str}-{server_id}", + ) +} + +// Use the hash of the server install script in the image variant so that if the install scripts are updated +// we wont be using the old image anymore +const CLUSTER_SERVER_INSTALL_HASH: &str = include_str!("../gen/hash.txt"); + +// Used for linode labels which have to be between 3 and 64 characters for some reason +pub fn simple_image_variant( + provider_datacenter_id: &str, + pool_type: backend::cluster::PoolType, +) -> String { + let ns = rivet_util::env::namespace(); + let pool_type_str = match pool_type { + backend::cluster::PoolType::Job => "job", + backend::cluster::PoolType::Gg => "gg", + backend::cluster::PoolType::Ats => "ats", + }; + + format!("{ns}-{provider_datacenter_id}-{pool_type_str}") +} + +pub fn image_variant( + provider: backend::cluster::Provider, + provider_datacenter_id: &str, + pool_type: backend::cluster::PoolType, +) -> String { + let ns = rivet_util::env::namespace(); + let provider_str = match provider { + backend::cluster::Provider::Linode => "linode", + }; + let pool_type_str = match pool_type { + backend::cluster::PoolType::Job => "job", + backend::cluster::PoolType::Gg => "gg", + backend::cluster::PoolType::Ats => "ats", + }; + + format!("{ns}-{CLUSTER_SERVER_INSTALL_HASH}-{provider_str}-{provider_datacenter_id}-{pool_type_str}") +} diff --git a/svc/pkg/cluster/util/src/test.rs b/svc/pkg/cluster/util/src/test.rs new file mode 100644 index 0000000000..93babf64cf --- /dev/null +++ b/svc/pkg/cluster/util/src/test.rs @@ -0,0 +1 @@ +pub const HARDWARE: &str = "g6-nanode-1"; diff --git a/svc/pkg/cluster/worker/Cargo.toml b/svc/pkg/cluster/worker/Cargo.toml new file mode 100644 index 0000000000..f540fc3b7d --- /dev/null +++ b/svc/pkg/cluster/worker/Cargo.toml @@ -0,0 +1,46 @@ +[package] +name = "cluster-worker" +version = "0.0.1" +edition = "2018" +authors = ["Rivet Gaming, LLC "] +license = "Apache-2.0" + +[dependencies] +anyhow = "1.0" +chirp-client = { path = "../../../../lib/chirp/client" } +chirp-worker = { path = "../../../../lib/chirp/worker" } +chrono = "0.4" +cloudflare = "0.10.1" +include_dir = "0.7.3" +indoc = "1.0" +lazy_static = "1.4.0" +maplit = "1.0" +nomad-util = { path = "../../../../lib/nomad-util" } +rivet-convert = { path = "../../../../lib/convert" } +rivet-health-checks = { path = "../../../../lib/health-checks" } +rivet-metrics = { path = "../../../../lib/metrics" } +rivet-runtime = { path = "../../../../lib/runtime" } +s3-util = { path = "../../../../lib/s3-util" } +serde_yaml = "0.9" +ssh2 = "0.9.4" +thiserror = "1.0" +util-cluster = { package = "rivet-util-cluster", path = "../util" } + +cluster-datacenter-get = { path = "../ops/datacenter-get" } +cluster-datacenter-list = { path = "../ops/datacenter-list" } +cluster-datacenter-topology-get = { path = "../ops/datacenter-topology-get" } +linode-instance-type-get = { path = "../../linode/ops/instance-type-get" } +linode-server-destroy = { path = "../../linode/ops/server-destroy" } +linode-server-provision = { path = "../../linode/ops/server-provision" } +token-create = { path = "../../token/ops/create" } + +[dependencies.nomad_client] +git = "https://github.com/rivet-gg/nomad-client" +rev = "abb66bf0c30c7ff5b0c695dae952481c33e538b5" # pragma: allowlist secret + +[dependencies.sqlx] +version = "0.7" +default-features = false + +[dev-dependencies] +chirp-worker = { path = "../../../../lib/chirp/worker" } diff --git a/svc/pkg/cluster/worker/Service.toml b/svc/pkg/cluster/worker/Service.toml new file mode 100644 index 0000000000..e4296c0caa --- /dev/null +++ b/svc/pkg/cluster/worker/Service.toml @@ -0,0 +1,15 @@ +[service] +name = "cluster-worker" + +[runtime] +kind = "rust" + +[consumer] + +[secrets] +"rivet/api_route/token" = {} +"cloudflare/terraform/auth_token" = {} +"ssh/server/private_key_openssh" = {} + +[databases] +bucket-build = {} diff --git a/svc/pkg/cluster/worker/src/lib.rs b/svc/pkg/cluster/worker/src/lib.rs new file mode 100644 index 0000000000..c604a529b6 --- /dev/null +++ b/svc/pkg/cluster/worker/src/lib.rs @@ -0,0 +1,8 @@ +pub mod workers; + +#[derive(thiserror::Error, Debug)] +#[error("cloudflare: {source}")] +pub struct CloudflareError { + #[from] + source: anyhow::Error, +} diff --git a/svc/pkg/cluster/worker/src/workers/create.rs b/svc/pkg/cluster/worker/src/workers/create.rs new file mode 100644 index 0000000000..8f6aee608d --- /dev/null +++ b/svc/pkg/cluster/worker/src/workers/create.rs @@ -0,0 +1,33 @@ +use chirp_worker::prelude::*; +use proto::backend::pkg::*; + +#[worker(name = "cluster-create")] +async fn worker(ctx: &OperationContext) -> GlobalResult<()> { + let cluster_id = unwrap_ref!(ctx.cluster_id).as_uuid(); + let owner_team_id = ctx.owner_team_id.map(|id| id.as_uuid()); + + sql_execute!( + [ctx] + " + INSERT INTO db_cluster.clusters ( + cluster_id, + name_id, + owner_team_id, + create_ts + ) + VALUES ($1, $2, $3, $4) + ", + cluster_id, + &ctx.name_id, + owner_team_id, + util::timestamp::now(), + ) + .await?; + + msg!([ctx] cluster::msg::create_complete(cluster_id) { + cluster_id: ctx.cluster_id + }) + .await?; + + Ok(()) +} diff --git a/svc/pkg/cluster/worker/src/workers/datacenter_create.rs b/svc/pkg/cluster/worker/src/workers/datacenter_create.rs new file mode 100644 index 0000000000..5079618d9a --- /dev/null +++ b/svc/pkg/cluster/worker/src/workers/datacenter_create.rs @@ -0,0 +1,61 @@ +use chirp_worker::prelude::*; +use proto::backend::pkg::*; + +#[worker(name = "cluster-datacenter-create")] +async fn worker( + ctx: &OperationContext, +) -> GlobalResult<()> { + let mut config = unwrap_ref!(ctx.config).clone(); + let cluster_id = unwrap_ref!(config.cluster_id).as_uuid(); + let datacenter_id = unwrap_ref!(config.datacenter_id).as_uuid(); + + // Ensure that the desired count is below the max count + for pool in &mut config.pools { + if pool.desired_count > pool.max_count { + pool.desired_count = pool.max_count; + } + } + + // Copy pools config to write to db + let pools = cluster::msg::datacenter_create::Pools { + pools: config.pools.clone(), + }; + + let mut pools_buf = Vec::with_capacity(pools.encoded_len()); + pools.encode(&mut pools_buf)?; + + sql_execute!( + [ctx] + " + INSERT INTO db_cluster.datacenters ( + datacenter_id, + cluster_id, + name_id, + display_name, + provider, + provider_datacenter_id, + pools, + build_delivery_method, + drain_timeout + ) + VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9) + ", + datacenter_id, + cluster_id, + &config.name_id, + &config.display_name, + config.provider as i64, + &config.provider_datacenter_id, + pools_buf, + config.build_delivery_method as i64, + config.drain_timeout as i64 + ) + .await?; + + msg!([ctx] cluster::msg::datacenter_scale(datacenter_id) { + datacenter_id: config.datacenter_id, + }) + .await?; + + Ok(()) +} diff --git a/svc/pkg/cluster/worker/src/workers/datacenter_scale.rs b/svc/pkg/cluster/worker/src/workers/datacenter_scale.rs new file mode 100644 index 0000000000..5e4b642d78 --- /dev/null +++ b/svc/pkg/cluster/worker/src/workers/datacenter_scale.rs @@ -0,0 +1,491 @@ +use std::{ + cmp::Ordering, + collections::HashMap, + iter::{DoubleEndedIterator, Iterator}, +}; + +use chirp_worker::prelude::*; +use futures_util::{StreamExt, TryStreamExt}; +use proto::backend::{self, pkg::*}; + +#[derive(sqlx::FromRow)] +struct ServerRow { + server_id: Uuid, + pool_type: i64, + nomad_node_id: Option, + drain_ts: Option, +} + +struct Server { + server_id: Uuid, + pool_type: backend::cluster::PoolType, + nomad_node_id: Option, + is_draining: bool, +} + +#[worker(name = "cluster-datacenter-scale")] +async fn worker( + ctx: &OperationContext, +) -> GlobalResult<()> { + let crdb = ctx.crdb().await?; + let datacenter_id = unwrap_ref!(ctx.datacenter_id).as_uuid(); + + let (servers, datacenter_res, topology_res) = tokio::try_join!( + // Get only ACTIVE servers + sql_fetch_all!( + [ctx, ServerRow] + " + SELECT + server_id, pool_type, nomad_node_id, drain_ts + FROM db_cluster.servers + WHERE + datacenter_id = $1 AND + -- Filters out servers that are being destroyed/already destroyed + cloud_destroy_ts IS NULL AND + taint_ts IS NULL + ", + datacenter_id, + ), + op!([ctx] cluster_datacenter_get { + datacenter_ids: vec![datacenter_id.into()], + }), + op!([ctx] cluster_datacenter_topology_get { + datacenter_ids: vec![datacenter_id.into()], + }), + )?; + + let mut servers = servers + .into_iter() + .map(|row| { + Ok(Server { + server_id: row.server_id, + pool_type: unwrap!(backend::cluster::PoolType::from_i32(row.pool_type as i32)), + nomad_node_id: row.nomad_node_id, + is_draining: row.drain_ts.is_some(), + }) + }) + .collect::>>()?; + + let topology = unwrap!(topology_res.datacenters.first()); + let memory_by_server = topology + .servers + .iter() + .map(|server| { + Ok(( + unwrap_ref!(server.server_id).as_uuid(), + unwrap_ref!(server.usage).memory, + )) + }) + .collect::>>()?; + + // TODO: Sort gg servers by cpu usage + // Sort job servers by memory usage using cluster-datacenter-topology-get + servers.sort_by_key(|server| memory_by_server.get(&server.server_id)); + + let dc = unwrap!(datacenter_res.datacenters.first()); + let cluster_id = unwrap_ref!(dc.cluster_id).as_uuid(); + + for pool in &dc.pools { + scale_servers(ctx, &crdb, cluster_id, dc, &servers, pool).await?; + } + + Ok(()) +} + +async fn scale_servers( + ctx: &OperationContext, + crdb: &CrdbPool, + cluster_id: Uuid, + dc: &backend::cluster::Datacenter, + servers: &[Server], + pool: &backend::cluster::Pool, +) -> GlobalResult<()> { + let pool_type = unwrap!(backend::cluster::PoolType::from_i32(pool.pool_type)); + let desired_count = pool.desired_count.min(pool.max_count) as usize; + + let servers_in_pool = servers + .iter() + .filter(|server| server.pool_type == pool_type); + let draining_servers = servers_in_pool + .clone() + .filter(|server| server.is_draining) + .collect::>(); + let active_server_count = servers_in_pool.clone().count() - draining_servers.len(); + + match desired_count.cmp(&active_server_count) { + Ordering::Less => match pool_type { + backend::cluster::PoolType::Job => { + scale_down_job_servers( + ctx, + crdb, + dc, + servers_in_pool, + active_server_count, + desired_count, + ) + .await? + } + backend::cluster::PoolType::Gg => { + scale_down_gg_servers( + ctx, + crdb, + dc, + servers_in_pool, + active_server_count, + desired_count, + ) + .await? + } + backend::cluster::PoolType::Ats => { + scale_down_ats_servers( + ctx, + crdb, + dc, + servers_in_pool, + active_server_count, + desired_count, + ) + .await? + } + }, + Ordering::Greater => { + scale_up_servers( + ctx, + crdb, + cluster_id, + dc, + draining_servers, + active_server_count, + desired_count, + pool_type, + ) + .await?; + } + Ordering::Equal => {} + } + + Ok(()) +} + +async fn scale_down_job_servers<'a, I: Iterator + Clone>( + ctx: &OperationContext, + crdb: &CrdbPool, + dc: &backend::cluster::Datacenter, + servers: I, + active_server_count: usize, + desired_count: usize, +) -> GlobalResult<()> { + let datacenter_id = unwrap_ref!(dc.datacenter_id).as_uuid(); + + tracing::info!( + ?datacenter_id, + active=%active_server_count, + desired=%desired_count, + "scaling down job" + ); + + let (nomad_servers, no_nomad_servers) = servers + .clone() + .partition::, _>(|server| server.nomad_node_id.is_some()); + + let destroy_count = (active_server_count - desired_count).min(no_nomad_servers.len()); + let drain_count = active_server_count - desired_count - destroy_count; + + // Destroy servers + if destroy_count != 0 { + tracing::info!(count=%destroy_count, "destroying servers"); + + let destroy_candidates = no_nomad_servers.iter().take(destroy_count); + + // Mark servers for destruction in db + sql_execute!( + [ctx, &crdb] + " + UPDATE db_cluster.servers + SET cloud_destroy_ts = $2 + WHERE server_id = ANY($1) + ", + destroy_candidates.clone() + .map(|server| server.server_id) + .collect::>(), + util::timestamp::now(), + ) + .await?; + + for server in destroy_candidates { + tracing::info!( + server_id=%server.server_id, + nomad_node_id=?server.nomad_node_id, + "destroying server" + ); + + msg!([ctx] cluster::msg::server_destroy(server.server_id) { + server_id: Some(server.server_id.into()), + force: false, + }) + .await?; + } + } + + // Drain servers + if drain_count != 0 { + tracing::info!(count=%drain_count, "draining servers"); + + let drain_candidates = nomad_servers.iter().rev().take(drain_count); + + // Mark servers as draining in db + sql_execute!( + [ctx, &crdb] + " + UPDATE db_cluster.servers + SET drain_ts = $2 + WHERE server_id = ANY($1) + ", + drain_candidates.clone() + .map(|server| server.server_id) + .collect::>(), + util::timestamp::now(), + ) + .await?; + + for server in drain_candidates { + tracing::info!( + server_id=%server.server_id, + nomad_node_id=?server.nomad_node_id, + "draining server" + ); + + msg!([ctx] cluster::msg::server_drain(server.server_id) { + server_id: Some(server.server_id.into()), + }) + .await?; + } + } + + Ok(()) +} + +async fn scale_down_gg_servers<'a, I: Iterator + DoubleEndedIterator + Clone>( + ctx: &OperationContext, + crdb: &CrdbPool, + dc: &backend::cluster::Datacenter, + servers: I, + active_server_count: usize, + desired_count: usize, +) -> GlobalResult<()> { + let datacenter_id = unwrap_ref!(dc.datacenter_id).as_uuid(); + + tracing::info!( + ?datacenter_id, + active=%active_server_count, + desired=%desired_count, + "scaling down gg" + ); + + let drain_count = active_server_count - desired_count; + + // Drain servers + if drain_count != 0 { + tracing::info!(count=%drain_count, "draining servers"); + + let drain_candidates = servers.rev().take(drain_count); + + // Mark servers as draining in db + sql_execute!( + [ctx, &crdb] + " + UPDATE db_cluster.servers + SET drain_ts = $2 + WHERE server_id = ANY($1) + ", + drain_candidates.clone() + .map(|server| server.server_id) + .collect::>(), + util::timestamp::now(), + ) + .await?; + + for server in drain_candidates { + tracing::info!( + server_id=%server.server_id, + "draining server" + ); + + msg!([ctx] cluster::msg::server_drain(server.server_id) { + server_id: Some(server.server_id.into()), + }) + .await?; + } + } + + Ok(()) +} + +async fn scale_down_ats_servers< + 'a, + I: Iterator + DoubleEndedIterator + Clone, +>( + ctx: &OperationContext, + crdb: &CrdbPool, + dc: &backend::cluster::Datacenter, + servers: I, + active_server_count: usize, + desired_count: usize, +) -> GlobalResult<()> { + let datacenter_id = unwrap_ref!(dc.datacenter_id).as_uuid(); + + tracing::info!( + ?datacenter_id, + active=%active_server_count, + desired=%desired_count, + "scaling down ats" + ); + + let destroy_count = active_server_count - desired_count; + + // Destroy servers + if destroy_count != 0 { + tracing::info!(count=%destroy_count, "destroying servers"); + + let destroy_candidates = servers.take(destroy_count); + + // Mark servers for destruction in db + sql_execute!( + [ctx, &crdb] + " + UPDATE db_cluster.servers + SET cloud_destroy_ts = $2 + WHERE server_id = ANY($1) + ", + destroy_candidates.clone() + .map(|server| server.server_id) + .collect::>(), + util::timestamp::now(), + ) + .await?; + + for server in destroy_candidates { + tracing::info!( + server_id=%server.server_id, + "destroying server" + ); + + msg!([ctx] cluster::msg::server_destroy(server.server_id) { + server_id: Some(server.server_id.into()), + force: false, + }) + .await?; + } + } + + Ok(()) +} + +async fn scale_up_servers( + ctx: &OperationContext, + crdb: &CrdbPool, + cluster_id: Uuid, + dc: &backend::cluster::Datacenter, + draining_servers: Vec<&Server>, + active_server_count: usize, + desired_count: usize, + pool_type: backend::cluster::PoolType, +) -> GlobalResult<()> { + let datacenter_id = unwrap_ref!(dc.datacenter_id).as_uuid(); + + tracing::info!( + ?datacenter_id, + active=%active_server_count, + draining=%draining_servers.len(), + desired=%desired_count, + ?pool_type, + "scaling up" + ); + + let undrain_count = (desired_count - active_server_count).min(draining_servers.len()); + let provision_count = desired_count - active_server_count - undrain_count; + + // Undrain servers + if undrain_count != 0 { + tracing::info!(count=%undrain_count, "undraining servers"); + + // Because job servers are ordered by memory usage, this will undrain the servers with the most memory + // usage + let undrain_candidates = draining_servers.iter().take(undrain_count); + + // Mark servers as not draining in db + sql_execute!( + [ctx, &crdb] + " + UPDATE db_cluster.servers + SET drain_ts = NULL + WHERE server_id = ANY($1) + ", + undrain_candidates.clone() + .map(|server| server.server_id) + .collect::>(), + ) + .await?; + + for draining_server in undrain_candidates { + tracing::info!( + server_id=%draining_server.server_id, + nomad_node_id=?draining_server.nomad_node_id, + "undraining server" + ); + + msg!([ctx] cluster::msg::server_undrain(draining_server.server_id) { + server_id: Some(draining_server.server_id.into()), + }) + .await?; + } + } + + // Create new servers + if provision_count != 0 { + tracing::info!(count=%provision_count, "provisioning servers"); + + futures_util::stream::iter(0..provision_count) + .map(|_| async { + let server_id = Uuid::new_v4(); + + // Write new server to db + sql_execute!( + [ctx, &crdb] + " + INSERT INTO db_cluster.servers ( + server_id, + datacenter_id, + cluster_id, + pool_type, + create_ts + ) + VALUES ($1, $2, $3, $4, $5) + ", + server_id, + datacenter_id, + cluster_id, + pool_type as i64, + util::timestamp::now(), + ) + .await?; + + msg!([ctx] cluster::msg::server_provision(server_id) { + cluster_id: Some(cluster_id.into()), + datacenter_id: dc.datacenter_id, + server_id: Some(server_id.into()), + pool_type: pool_type as i32, + provider: dc.provider, + tags: Vec::new(), + }) + .await?; + + GlobalResult::Ok(()) + }) + .buffer_unordered(16) + .try_collect::>() + .await?; + } + + Ok(()) +} diff --git a/svc/pkg/cluster/worker/src/workers/datacenter_taint.rs b/svc/pkg/cluster/worker/src/workers/datacenter_taint.rs new file mode 100644 index 0000000000..e3c3c59d7d --- /dev/null +++ b/svc/pkg/cluster/worker/src/workers/datacenter_taint.rs @@ -0,0 +1,32 @@ +use chirp_worker::prelude::*; +use proto::backend::pkg::*; + +#[worker(name = "cluster-datacenter-taint")] +async fn worker( + ctx: &OperationContext, +) -> GlobalResult<()> { + let datacenter_id = unwrap_ref!(ctx.datacenter_id).as_uuid(); + + // Taint server records + sql_execute!( + [ctx] + " + UPDATE db_cluster.servers + SET taint_ts = $2 + WHERE + datacenter_id = $1 AND + taint_ts IS NULL + ", + &datacenter_id, + util::timestamp::now(), + ) + .await?; + + // Trigger rescale + msg!([ctx] cluster::msg::datacenter_scale(datacenter_id) { + datacenter_id: Some(datacenter_id.into()), + }) + .await?; + + Ok(()) +} diff --git a/svc/pkg/cluster/worker/src/workers/datacenter_taint_complete.rs b/svc/pkg/cluster/worker/src/workers/datacenter_taint_complete.rs new file mode 100644 index 0000000000..6a254c154e --- /dev/null +++ b/svc/pkg/cluster/worker/src/workers/datacenter_taint_complete.rs @@ -0,0 +1,53 @@ +use chirp_worker::prelude::*; +use proto::backend::{self, pkg::*}; + +// We wait until a nomad node is registered before destroying tainted servers +#[worker(name = "cluster-datacenter-taint-complete")] +async fn worker( + ctx: &OperationContext, +) -> GlobalResult<()> { + let server_id = unwrap_ref!(ctx.server_id).as_uuid(); + + // NOTE: This does not set the drain ts even though job and gg servers will be drained + // Mark tainted servers of the same datacenter for destruction in db. + let tainted_servers = sql_fetch_all!( + [ctx, (Uuid, i64)] + " + UPDATE db_cluster.servers as s2 + SET cloud_destroy_ts = $2 + FROM db_cluster.servers as s1 + WHERE + s1.datacenter_id = s2.datacenter_id AND + s1.server_id = $1 AND + s2.taint_ts IS NOT NULL AND + s2.cloud_destroy_ts IS NULL + RETURNING s2.server_id, s2.pool_type + ", + &server_id, + util::timestamp::now(), + ) + .await?; + + // Destroy all tainted servers + for (server_id, pool_type) in tainted_servers { + let pool_type = unwrap!(backend::cluster::PoolType::from_i32(pool_type as i32)); + + match pool_type { + backend::cluster::PoolType::Gg | backend::cluster::PoolType::Job => { + msg!([ctx] cluster::msg::server_drain(server_id) { + server_id: Some(server_id.into()), + }) + .await?; + } + backend::cluster::PoolType::Ats => { + msg!([ctx] cluster::msg::server_destroy(server_id) { + server_id: Some(server_id.into()), + force: false, + }) + .await?; + } + } + } + + Ok(()) +} diff --git a/svc/pkg/cluster/worker/src/workers/datacenter_update.rs b/svc/pkg/cluster/worker/src/workers/datacenter_update.rs new file mode 100644 index 0000000000..3de36f0f59 --- /dev/null +++ b/svc/pkg/cluster/worker/src/workers/datacenter_update.rs @@ -0,0 +1,70 @@ +use chirp_worker::prelude::*; +use proto::backend::pkg::*; + +#[worker(name = "cluster-datacenter-update")] +async fn worker( + ctx: &OperationContext, +) -> GlobalResult<()> { + let datacenter_id = unwrap_ref!(ctx.datacenter_id).as_uuid(); + + let datacenter_res = op!([ctx] cluster_datacenter_get { + datacenter_ids: vec![datacenter_id.into()], + }) + .await?; + let datacenter_config = unwrap!( + datacenter_res.datacenters.first(), + "datacenter does not exist" + ); + + // Update config + let mut new_config = datacenter_config.clone(); + + for pool in &ctx.pools { + let mut current_pool = unwrap!( + new_config + .pools + .iter_mut() + .find(|p| p.pool_type == pool.pool_type), + "attempting to update pool that doesn't exist in current config" + ); + + // Update pool config + if !pool.hardware.is_empty() { + current_pool.hardware = pool.hardware.clone(); + } + if let Some(desired_count) = pool.desired_count { + current_pool.desired_count = desired_count; + } + if let Some(max_count) = pool.max_count { + current_pool.max_count = max_count; + } + } + + if let Some(drain_timeout) = ctx.drain_timeout { + new_config.drain_timeout = drain_timeout; + } + + // Encode config + let mut config_buf = Vec::with_capacity(new_config.encoded_len()); + new_config.encode(&mut config_buf)?; + + // Write config + sql_execute!( + [ctx] + " + UPDATE db_cluster.datacenters + SET config = $2 + WHERE datacenter_id = $1 + ", + datacenter_id, + config_buf, + ) + .await?; + + msg!([ctx] cluster::msg::datacenter_scale(datacenter_id) { + datacenter_id: ctx.datacenter_id, + }) + .await?; + + Ok(()) +} diff --git a/svc/pkg/cluster/worker/src/workers/mod.rs b/svc/pkg/cluster/worker/src/workers/mod.rs new file mode 100644 index 0000000000..ad788e39f0 --- /dev/null +++ b/svc/pkg/cluster/worker/src/workers/mod.rs @@ -0,0 +1,35 @@ +pub mod create; +pub mod datacenter_create; +pub mod datacenter_scale; +pub mod datacenter_taint; +pub mod datacenter_taint_complete; +pub mod datacenter_update; +pub mod nomad_node_drain_complete; +pub mod nomad_node_registered; +pub mod server_destroy; +pub mod server_dns_create; +pub mod server_dns_delete; +pub mod server_drain; +pub mod server_install; +pub mod server_install_complete; +pub mod server_provision; +pub mod server_undrain; + +chirp_worker::workers![ + server_dns_delete, + server_install_complete, + datacenter_taint, + datacenter_taint_complete, + server_dns_create, + nomad_node_drain_complete, + datacenter_update, + nomad_node_registered, + datacenter_create, + create, + server_destroy, + server_install, + server_drain, + server_provision, + datacenter_scale, + server_undrain, +]; diff --git a/svc/pkg/cluster/worker/src/workers/nomad_node_drain_complete.rs b/svc/pkg/cluster/worker/src/workers/nomad_node_drain_complete.rs new file mode 100644 index 0000000000..8bf7b78a96 --- /dev/null +++ b/svc/pkg/cluster/worker/src/workers/nomad_node_drain_complete.rs @@ -0,0 +1,41 @@ +use chirp_worker::prelude::*; +use proto::backend::pkg::*; + +#[worker(name = "cluster-nomad-node-drain-complete")] +async fn worker( + ctx: &OperationContext, +) -> GlobalResult<()> { + let server_id = unwrap_ref!(ctx.server_id).as_uuid(); + + let (datacenter_id,) = sql_fetch_one!( + [ctx, (Uuid,)] + " + UPDATE db_cluster.servers + SET cloud_destroy_ts = $2 + WHERE + server_id = $1 + RETURNING datacenter_id + ", + &server_id, + util::timestamp::now(), + ) + .await?; + + msg!([ctx] cluster::msg::server_destroy(server_id) { + server_id: Some(server_id.into()), + force: false, + }) + .await?; + + // In the case of completely deleting an entire datacenter, we would first set the desired count of all + // of the pools to 0. However, we cannot delete all of the GG nodes if there are still job nodes draining + // because connections may still be open through it. So one GG node is is left in this case (see + // `cluster-datacenter-scale`). This message is published so that `cluster-datacenter-scale` checks again + // if there are still job nodes active, and if not, deletes the last GG node. + msg!([ctx] cluster::msg::datacenter_scale(datacenter_id) { + datacenter_id: Some(datacenter_id.into()), + }) + .await?; + + Ok(()) +} diff --git a/svc/pkg/cluster/worker/src/workers/nomad_node_registered.rs b/svc/pkg/cluster/worker/src/workers/nomad_node_registered.rs new file mode 100644 index 0000000000..ee469a526a --- /dev/null +++ b/svc/pkg/cluster/worker/src/workers/nomad_node_registered.rs @@ -0,0 +1,28 @@ +use chirp_worker::prelude::*; +use proto::backend::pkg::*; + +#[worker(name = "cluster-nomad-node-registered")] +async fn worker( + ctx: &OperationContext, +) -> GlobalResult<()> { + let server_id = unwrap_ref!(ctx.server_id).as_uuid(); + + sql_execute!( + [ctx] + " + UPDATE db_cluster.servers + SET + nomad_node_id = $2, + nomad_join_ts = $3 + WHERE + server_id = $1 AND + nomad_node_id IS NULL + ", + &server_id, + &ctx.node_id, + util::timestamp::now(), + ) + .await?; + + Ok(()) +} diff --git a/svc/pkg/cluster/worker/src/workers/server_destroy.rs b/svc/pkg/cluster/worker/src/workers/server_destroy.rs new file mode 100644 index 0000000000..6326e9cba8 --- /dev/null +++ b/svc/pkg/cluster/worker/src/workers/server_destroy.rs @@ -0,0 +1,69 @@ +use chirp_worker::prelude::*; +use proto::backend::{self, pkg::*}; + +#[derive(sqlx::FromRow)] +struct Server { + datacenter_id: Uuid, + pool_type: i64, + provider_server_id: Option, +} + +#[worker(name = "cluster-server-destroy")] +async fn worker(ctx: &OperationContext) -> GlobalResult<()> { + let server_id = unwrap_ref!(ctx.server_id).as_uuid(); + let crdb = ctx.crdb().await?; + + let server = sql_fetch_one!( + [ctx, Server, &crdb] + " + SELECT + datacenter_id, pool_type, provider_server_id + FROM db_cluster.servers AS s + LEFT JOIN db_cluster.cloudflare_misc AS cf + ON s.server_id = cf.server_id + WHERE s.server_id = $1 + ", + &server_id, + util::timestamp::now(), + ) + .await?; + if server.provider_server_id.is_none() && !ctx.force { + bail!("server is not completely provisioned yet, retrying"); + } + + let datacenter_res = op!([ctx] cluster_datacenter_get { + datacenter_ids: vec![server.datacenter_id.into()], + }) + .await?; + let datacenter = unwrap!(datacenter_res.datacenters.first()); + let provider = unwrap!(backend::cluster::Provider::from_i32(datacenter.provider)); + + match provider { + backend::cluster::Provider::Linode => { + tracing::info!(?server_id, "destroying linode server"); + + op!([ctx] linode_server_destroy { + server_id: ctx.server_id, + }) + .await?; + } + } + + // Delete DNS record + let pool_type = unwrap!(backend::cluster::PoolType::from_i32( + server.pool_type as i32 + )); + if let backend::cluster::PoolType::Gg = pool_type { + msg!([ctx] cluster::msg::server_dns_delete(server_id) { + server_id: ctx.server_id, + }) + .await?; + } + + msg!([ctx] cluster::msg::server_destroy_complete(server_id) { + server_id: ctx.server_id, + }) + .await?; + + Ok(()) +} diff --git a/svc/pkg/cluster/worker/src/workers/server_dns_create.rs b/svc/pkg/cluster/worker/src/workers/server_dns_create.rs new file mode 100644 index 0000000000..48535c2383 --- /dev/null +++ b/svc/pkg/cluster/worker/src/workers/server_dns_create.rs @@ -0,0 +1,112 @@ +use std::net::Ipv4Addr; + +use chirp_worker::prelude::*; +use cloudflare::{endpoints as cf, framework as cf_framework, framework::async_api::ApiClient}; +use proto::backend::pkg::*; + +#[derive(sqlx::FromRow)] +struct Server { + datacenter_id: Uuid, + public_ip: String, + cloud_destroy_ts: Option, +} + +#[worker(name = "cluster-server-dns-create")] +async fn worker( + ctx: &OperationContext, +) -> GlobalResult<()> { + let server_id = unwrap_ref!(ctx.server_id).as_uuid(); + + let server = sql_fetch_one!( + [ctx, Server] + " + SELECT + datacenter_id, public_ip, cloud_destroy_ts + FROM db_cluster.servers + WHERE server_id = $1 + ", + server_id, + ) + .await?; + + if server.cloud_destroy_ts.is_some() { + tracing::info!("server marked for deletion, not creating dns record"); + return Ok(()); + } + + let cf_token = util::env::read_secret(&["cloudflare", "terraform", "auth_token"]).await?; + let zone_id = unwrap!(util::env::cloudflare::zone::job::id(), "dns not configured"); + let public_ip = server.public_ip.as_str().parse::()?; + + // Create cloudflare HTTP client + let client = cf_framework::async_api::Client::new( + cf_framework::auth::Credentials::UserAuthToken { token: cf_token }, + Default::default(), + cf_framework::Environment::Production, + ) + .map_err(crate::CloudflareError::from)?; + + let record_name = format!( + "*.lobby.{}.{}", + server.datacenter_id, + unwrap!(util::env::domain_job()), + ); + let create_record_res = client + .request(&cf::dns::CreateDnsRecord { + zone_identifier: zone_id, + params: cf::dns::CreateDnsRecordParams { + name: &record_name, + content: cf::dns::DnsContent::A { content: public_ip }, + proxied: Some(false), + ttl: Some(60), + priority: None, + }, + }) + .await?; + let record_id = create_record_res.result.id; + + let secondary_record_name = format!( + "lobby.{}.{}", + server.datacenter_id, + unwrap!(util::env::domain_job()), + ); + let create_secondary_record_res = client + .request(&cf::dns::CreateDnsRecord { + zone_identifier: zone_id, + params: cf::dns::CreateDnsRecordParams { + name: &secondary_record_name, + content: cf::dns::DnsContent::A { content: public_ip }, + proxied: Some(false), + ttl: Some(60), + priority: None, + }, + }) + .await; + + // Optionally get secondary record id + let secondary_dns_record_id = create_secondary_record_res + .as_ref() + .ok() + .map(|res| res.result.id.clone()); + + // Save record ids for deletion + sql_execute!( + [ctx] + " + INSERT INTO db_cluster.cloudflare_misc ( + server_id, + dns_record_id, + secondary_dns_record_id + ) + VALUES ($1, $2, $3) + ", + server_id, + record_id, + secondary_dns_record_id, + ) + .await?; + + create_secondary_record_res?; + + Ok(()) +} diff --git a/svc/pkg/cluster/worker/src/workers/server_dns_delete.rs b/svc/pkg/cluster/worker/src/workers/server_dns_delete.rs new file mode 100644 index 0000000000..956bf982e4 --- /dev/null +++ b/svc/pkg/cluster/worker/src/workers/server_dns_delete.rs @@ -0,0 +1,70 @@ +use chirp_worker::prelude::*; +use cloudflare::{endpoints as cf, framework as cf_framework, framework::async_api::ApiClient}; +use proto::backend::pkg::*; + +#[worker(name = "cluster-server-dns-delete")] +async fn worker( + ctx: &OperationContext, +) -> GlobalResult<()> { + let server_id = unwrap_ref!(ctx.server_id).as_uuid(); + let crdb = ctx.crdb().await?; + + let row = sql_fetch_optional!( + [ctx, (String, Option), &crdb] + " + SELECT dns_record_id, secondary_dns_record_id + FROM db_cluster.cloudflare_misc + WHERE server_id = $1 + ", + &server_id, + util::timestamp::now(), + ) + .await?; + let Some((dns_record_id, secondary_dns_record_id)) = row else { + tracing::warn!(?server_id, "server has no dns record"); + + return Ok(()); + }; + + let cf_token = util::env::read_secret(&["cloudflare", "terraform", "auth_token"]).await?; + let zone_id = unwrap!(util::env::cloudflare::zone::job::id(), "dns not configured"); + + // Create cloudflare HTTP client + let client = cf_framework::async_api::Client::new( + cf_framework::auth::Credentials::UserAuthToken { token: cf_token }, + Default::default(), + cf_framework::Environment::Production, + ) + .map_err(crate::CloudflareError::from)?; + + // Delete main record + client + .request(&cf::dns::DeleteDnsRecord { + zone_identifier: zone_id, + identifier: &dns_record_id, + }) + .await?; + + // Delete secondary record + if let Some(secondary_dns_record_id) = secondary_dns_record_id { + client + .request(&cf::dns::DeleteDnsRecord { + zone_identifier: zone_id, + identifier: &secondary_dns_record_id, + }) + .await?; + } + + // Remove db record + sql_execute!( + [ctx, &crdb] + " + DELETE FROM db_cluster.cloudflare_misc + WHERE server_id = $1 + ", + server_id, + ) + .await?; + + Ok(()) +} diff --git a/svc/pkg/cluster/worker/src/workers/server_drain.rs b/svc/pkg/cluster/worker/src/workers/server_drain.rs new file mode 100644 index 0000000000..7aebb208f3 --- /dev/null +++ b/svc/pkg/cluster/worker/src/workers/server_drain.rs @@ -0,0 +1,99 @@ +use chirp_worker::prelude::*; +use nomad_client::{ + apis::{configuration::Configuration, nodes_api}, + models, +}; +use proto::backend::{self, pkg::*}; + +lazy_static::lazy_static! { + static ref NOMAD_CONFIG: Configuration = + nomad_util::new_config_from_env().unwrap(); +} + +#[derive(sqlx::FromRow)] +struct Server { + datacenter_id: Uuid, + pool_type: i64, + nomad_node_id: Option, +} + +#[worker(name = "cluster-server-drain")] +async fn worker(ctx: &OperationContext) -> GlobalResult<()> { + let server_id = unwrap_ref!(ctx.server_id).as_uuid(); + + let server = sql_fetch_one!( + [ctx, Server] + " + SELECT + datacenter_id, pool_type, nomad_node_id + FROM db_cluster.servers + WHERE server_id = $1 + ", + server_id, + ) + .await?; + + // Fetch datacenter config + let datacenter_res = op!([ctx] cluster_datacenter_get { + datacenter_ids: vec![server.datacenter_id.into()], + }) + .await?; + let datacenter = unwrap!(datacenter_res.datacenters.first()); + + let pool_type = unwrap!(backend::cluster::PoolType::from_i32( + server.pool_type as i32 + )); + match pool_type { + backend::cluster::PoolType::Job => { + let Some(nomad_node_id) = server.nomad_node_id else { + tracing::error!("server does not have nomad running, cannot drain"); + return Ok(()); + }; + + nodes_api::update_node_drain( + &NOMAD_CONFIG, + &nomad_node_id, + models::NodeUpdateDrainRequest { + drain_spec: Some(Box::new(models::DrainSpec { + deadline: Some(datacenter.drain_timeout as i64), + ignore_system_jobs: None, + })), + mark_eligible: None, + meta: None, + node_id: Some(nomad_node_id.clone()), + }, + None, + None, + None, + None, + None, + None, + None, + None, + None, + ) + .await?; + + // Prevent new matchmaker requests to the node running on this server + msg!([ctx] mm::msg::nomad_node_closed_set(&nomad_node_id) { + datacenter_id: Some(server.datacenter_id.into()), + nomad_node_id: nomad_node_id.clone(), + is_closed: true, + }) + .await?; + } + backend::cluster::PoolType::Gg => { + // Delete DNS record + msg!([ctx] cluster::msg::server_dns_delete(server_id) { + server_id: ctx.server_id, + }) + .await?; + } + _ => { + // Gracefully fail + tracing::error!("cannot undrain this pool type: {:?}", pool_type); + } + } + + Ok(()) +} diff --git a/lib/bolt/core/src/dep/terraform/install_scripts/components.rs b/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/components.rs similarity index 58% rename from lib/bolt/core/src/dep/terraform/install_scripts/components.rs rename to svc/pkg/cluster/worker/src/workers/server_install/install_scripts/components.rs index 76350046ff..16d9f3ab86 100644 --- a/lib/bolt/core/src/dep/terraform/install_scripts/components.rs +++ b/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/components.rs @@ -1,15 +1,10 @@ use std::collections::HashMap; -use anyhow::{Context, Result}; -use indexmap::IndexMap; +use chirp_worker::prelude::*; +use include_dir::{include_dir, Dir}; use indoc::{formatdoc, indoc}; -use serde_json::json; -use tokio::fs; - -use crate::{ - context::ProjectContext, - dep::terraform::{net, output::Cert, servers::Server}, -}; +use proto::backend; +use s3_util::Provider; /// Service that gets exposed from the Traefik tunnel. pub struct TunnelService { @@ -52,10 +47,12 @@ pub const TUNNEL_SERVICES: &[TunnelService] = &[ ]; pub fn common() -> String { - vec![ - format!("apt-get update -y"), - format!("apt-get install -y apt-transport-https ca-certificates gnupg2 software-properties-common curl jq unzip"), - ].join("\n") + indoc!( + " + apt-get update -y + apt-get install -y apt-transport-https ca-certificates gnupg2 software-properties-common curl jq unzip + " + ).to_string() } pub fn node_exporter() -> String { @@ -70,44 +67,45 @@ pub fn docker() -> String { } pub fn lz4() -> String { - format!("apt-get install -y lz4") + "apt-get install -y lz4".to_string() } pub fn skopeo() -> String { - format!("apt-get install -y skopeo") + "apt-get install -y skopeo".to_string() } pub fn umoci() -> String { - formatdoc!( + indoc!( r#" curl -Lf -o /usr/bin/umoci "https://github.com/opencontainers/umoci/releases/download/v0.4.7/umoci.amd64" chmod +x /usr/bin/umoci "# - ) + ).to_string() } pub fn cnitool() -> String { - formatdoc!( + indoc!( r#" curl -Lf -o /usr/bin/cnitool "https://github.com/rivet-gg/cni/releases/download/v1.1.2-build3/cnitool" chmod +x /usr/bin/cnitool "# - ) + ).to_string() } pub fn cni_plugins() -> String { include_str!("files/cni_plugins.sh").to_string() } -pub fn nomad(server: &Server) -> String { +pub fn nomad_install() -> String { + include_str!("files/nomad_install.sh").to_string() +} + +pub fn nomad_configure() -> String { let servers = &["127.0.0.1:5000", "127.0.0.1:5001", "127.0.0.1:5002"]; - include_str!("files/nomad.sh") - .replace("__REGION_ID__", &server.region_id) - .replace("__NODE_NAME__", &server.name) + include_str!("files/nomad_configure.sh") // HACK: Hardcoded to Linode .replace("__PUBLIC_IFACE__", "eth0") - .replace("__VLAN_IP__", &server.vlan_ip.to_string()) // HACK: Hardcoded to Linode .replace("__VLAN_IFACE__", "eth1") .replace( @@ -118,8 +116,14 @@ pub fn nomad(server: &Server) -> String { .collect::>() .join(", "), ) - .replace("__GG_VLAN_SUBNET__", &net::gg::vlan_ip_net().to_string()) - .replace("__ATS_VLAN_SUBNET__", &net::ats::vlan_ip_net().to_string()) + .replace( + "__GG_VLAN_SUBNET__", + &util::net::gg::vlan_ip_net().to_string(), + ) + .replace( + "__ATS_VLAN_SUBNET__", + &util::net::ats::vlan_ip_net().to_string(), + ) } /// Installs Traefik, but does not create the Traefik service. @@ -127,18 +131,23 @@ pub fn traefik() -> String { include_str!("files/traefik.sh").to_string() } +pub struct TlsCert { + pub cert_pem: String, + pub key_pem: String, +} + pub struct TraefikInstance { pub name: String, pub static_config: String, pub dynamic_config: String, - pub tls_certs: IndexMap, - pub tcp_server_transports: IndexMap, + pub tls_certs: HashMap, + pub tcp_server_transports: HashMap, } pub struct ServerTransport { pub server_name: String, pub root_cas: Vec, - pub certs: Vec, + pub certs: Vec, } /// Creates a Traefik instance. @@ -243,31 +252,32 @@ pub fn traefik_instance(config: TraefikInstance) -> String { script } -pub fn traefik_tunnel( - _ctx: &ProjectContext, - k8s_infra: &crate::dep::terraform::output::K8sInfra, - tls: &crate::dep::terraform::output::Tls, -) -> String { +pub fn traefik_tunnel() -> GlobalResult { // Build transports for each service - let mut tcp_server_transports = IndexMap::new(); + let mut tcp_server_transports = HashMap::new(); for TunnelService { name, .. } in TUNNEL_SERVICES { tcp_server_transports.insert( name.to_string(), ServerTransport { server_name: format!("{name}.tunnel.rivet.gg"), - root_cas: vec![(*tls.root_ca_cert_pem).clone()], - certs: vec![(*tls.tls_cert_locally_signed_job).clone()], + root_cas: vec![util::env::var("TLS_ROOT_CA_CERT_PEM")?], + certs: vec![TlsCert { + cert_pem: util::env::var("TLS_CERT_LOCALLY_SIGNED_JOB_CERT_PEM")?, + key_pem: util::env::var("TLS_CERT_LOCALLY_SIGNED_JOB_KEY_PEM")?, + }], }, ); } - traefik_instance(TraefikInstance { + Ok(traefik_instance(TraefikInstance { name: "tunnel".into(), static_config: tunnel_traefik_static_config(), - dynamic_config: tunnel_traefik_dynamic_config(&*k8s_infra.traefik_tunnel_external_ip), + dynamic_config: tunnel_traefik_dynamic_config(&util::env::var( + "K8S_TRAEFIK_TUNNEL_EXTERNAL_IP", + )?), tls_certs: Default::default(), tcp_server_transports, - }) + })) } fn tunnel_traefik_static_config() -> String { @@ -314,8 +324,12 @@ fn tunnel_traefik_dynamic_config(tunnel_external_ip: &str) -> String { config } +pub fn vector_install() -> String { + include_str!("files/vector_install.sh").to_string() +} + pub struct VectorConfig { - pub prometheus_targets: IndexMap, + pub prometheus_targets: HashMap, } pub struct VectorPrometheusTarget { @@ -323,7 +337,7 @@ pub struct VectorPrometheusTarget { pub scrape_interval: usize, } -pub fn vector(config: &VectorConfig) -> String { +pub fn vector_configure(config: &VectorConfig, pool_type: backend::cluster::PoolType) -> String { let sources = config .prometheus_targets .keys() @@ -331,18 +345,28 @@ pub fn vector(config: &VectorConfig) -> String { .collect::>() .join(", "); + let pool_type_str = match pool_type { + backend::cluster::PoolType::Job => "job", + backend::cluster::PoolType::Gg => "gg", + backend::cluster::PoolType::Ats => "ats", + }; + let mut config_str = formatdoc!( r#" [api] enabled = true - + [transforms.add_meta] type = "remap" inputs = [{sources}] source = ''' + .tags.server_id = "__SERVER_ID__" + .tags.datacenter_id = "__DATACENTER_ID__" + .tags.cluster_id = "__CLUSTER_ID__" + .tags.pool_type = "{pool_type_str}" .tags.public_ip = "${{PUBLIC_IP}}" ''' - + [sinks.vector_sink] type = "vector" inputs = ["add_meta"] @@ -370,12 +394,18 @@ pub fn vector(config: &VectorConfig) -> String { )); } - include_str!("files/vector.sh").replace("__VECTOR_CONFIG__", &config_str) + include_str!("files/vector_configure.sh").replace("__VECTOR_CONFIG__", &config_str) } -pub async fn traffic_server(ctx: &ProjectContext, server: &Server) -> Result { +const TRAFFIC_SERVER_IMAGE: &str = "ghcr.io/rivet-gg/apache-traffic-server:9934dc2"; + +pub fn traffic_server_install() -> String { + include_str!("files/traffic_server_install.sh").replace("__IMAGE__", TRAFFIC_SERVER_IMAGE) +} + +pub async fn traffic_server_configure() -> GlobalResult { // Write config to files - let config = traffic_server_config(ctx, server).await?; + let config = traffic_server_config().await?; let mut config_scripts = config .into_iter() .map(|(k, v)| format!("cat << 'EOF' > /etc/trafficserver/{k}\n{v}\nEOF\n")) @@ -393,45 +423,21 @@ pub async fn traffic_server(ctx: &ProjectContext, server: &Server) -> Result Result> { - let config_dir = ctx - .path() - .join("infra") - .join("misc") - .join("game_guard") - .join("traffic_server"); +static TRAFFIC_SERVER_CONFIG_DIR: Dir<'_> = include_dir!( + "$CARGO_MANIFEST_DIR/src/workers/server_install/install_scripts/files/traffic_server" +); +async fn traffic_server_config() -> GlobalResult> { // Static files - let mut config_files = Vec::<(String, String)>::new(); - let mut static_dir = fs::read_dir(config_dir.join("etc")).await?; - while let Some(entry) = static_dir.next_entry().await? { - let meta = entry.metadata().await?; - if meta.is_file() { - let key = entry - .path() - .file_name() - .context("path.file_name")? - .to_str() - .context("as_str")? - .to_string(); - let value = fs::read_to_string(entry.path()).await?; - let value = value.replace("__VLAN_IP__", &server.vlan_ip.to_string()); - config_files.push((key, value)); - } - } + let mut config_files = Vec::new(); + collect_config_files(&TRAFFIC_SERVER_CONFIG_DIR, &mut config_files)?; // Storage (default value of 64 gets overwritten in config script) let volume_size = 64; @@ -442,20 +448,19 @@ async fn traffic_server_config( // Remap & S3 let mut remap = String::new(); - let (default_s3_provider, _) = ctx.default_s3_provider()?; - if ctx.ns().s3.providers.minio.is_some() { - let output = gen_s3_provider(ctx, s3_util::Provider::Minio, default_s3_provider).await?; + let default_s3_provider = Provider::default()?; + if s3_util::s3_provider_active("bucket-build", Provider::Minio) { + let output = gen_s3_provider(Provider::Minio, default_s3_provider).await?; remap.push_str(&output.append_remap); config_files.extend(output.config_files); } - if ctx.ns().s3.providers.backblaze.is_some() { - let output = - gen_s3_provider(ctx, s3_util::Provider::Backblaze, default_s3_provider).await?; + if s3_util::s3_provider_active("bucket-build", Provider::Backblaze) { + let output = gen_s3_provider(Provider::Backblaze, default_s3_provider).await?; remap.push_str(&output.append_remap); config_files.extend(output.config_files); } - if ctx.ns().s3.providers.aws.is_some() { - let output = gen_s3_provider(ctx, s3_util::Provider::Aws, default_s3_provider).await?; + if s3_util::s3_provider_active("bucket-build", Provider::Aws) { + let output = gen_s3_provider(Provider::Aws, default_s3_provider).await?; remap.push_str(&output.append_remap); config_files.extend(output.config_files); } @@ -464,6 +469,25 @@ async fn traffic_server_config( Ok(config_files) } +fn collect_config_files( + dir: &include_dir::Dir, + config_files: &mut Vec<(String, String)>, +) -> GlobalResult<()> { + for entry in dir.entries() { + match entry { + include_dir::DirEntry::File(file) => { + let key = unwrap!(unwrap!(file.path().file_name()).to_str()).to_string(); + + let value = unwrap!(file.contents_utf8()); + config_files.push((key, value.to_string())); + } + include_dir::DirEntry::Dir(dir) => collect_config_files(dir, config_files)?, + } + } + + Ok(()) +} + struct GenRemapS3ProviderOutput { /// Append to remap.config append_remap: String, @@ -473,14 +497,14 @@ struct GenRemapS3ProviderOutput { } async fn gen_s3_provider( - ctx: &ProjectContext, - provider: s3_util::Provider, - default_s3_provider: s3_util::Provider, -) -> Result { + provider: Provider, + default_s3_provider: Provider, +) -> GlobalResult { let mut remap = String::new(); let provider_name = provider.as_str(); - let config = ctx.s3_config(provider).await?; - let creds = ctx.s3_credentials(provider).await?; + let endpoint_external = s3_util::s3_endpoint_external("bucket-build", provider)?; + let region = s3_util::s3_region("bucket-build", provider)?; + let (access_key_id, secret_access_key) = s3_util::s3_credentials("bucket-build", provider)?; // Build plugin chain let plugins = format!("@plugin=tslua.so @pparam=/etc/trafficserver/strip_headers.lua @plugin=s3_auth.so @pparam=--config @pparam=s3_auth_v4_{provider_name}.config"); @@ -488,15 +512,11 @@ async fn gen_s3_provider( // Add remap remap.push_str(&format!( "map /s3-cache/{provider_name} {endpoint_external} {plugins}\n", - endpoint_external = config.endpoint_external )); // Add default route if default_s3_provider == provider { - remap.push_str(&format!( - "map /s3-cache {endpoint_external} {plugins}\n", - endpoint_external = config.endpoint_external, - )); + remap.push_str(&format!("map /s3-cache {endpoint_external} {plugins}\n",)); } // Add credentials @@ -505,13 +525,11 @@ async fn gen_s3_provider( format!("s3_auth_v4_{provider_name}.config"), formatdoc!( r#" - access_key={access_key} - secret_key={secret_key} + access_key={access_key_id} + secret_key={secret_access_key} version=4 v4-region-map=s3_region_map_{provider_name}.config "#, - access_key = creds.access_key_id, - secret_key = creds.access_key_secret, ), )); config_files.push(( @@ -521,8 +539,8 @@ async fn gen_s3_provider( # Default region {s3_host}: {s3_region} "#, - s3_host = config.endpoint_external.split_once("://").unwrap().1, - s3_region = config.region, + s3_host = endpoint_external.split_once("://").unwrap().1, + s3_region = region, ), )); @@ -532,109 +550,22 @@ async fn gen_s3_provider( }) } -pub fn envoy() -> String { - include_str!("files/envoy.sh").to_string() -} +pub fn rivet_create_hook(initialize_immediately: bool) -> GlobalResult { + let domain_main_api = unwrap!(util::env::domain_main_api(), "no cdn"); + let mut script = + include_str!("files/rivet_create_hook.sh").replace("__DOMAIN_MAIN_API__", domain_main_api); -pub fn outbound_proxy(server: &Server, all_servers: &HashMap) -> Result { - // Build ATS endpoints - let mut ats_servers = all_servers - .values() - .filter(|x| server.region_id == x.region_id && x.pool_id == "ats") - .collect::>(); - // Use the same sorting as ATS for consistent Maglev hashing - ats_servers.sort_by_key(|x| x.index); - let ats_endpoints = ats_servers - .iter() - .map(|x| { - json!({ - "endpoint": { - "address": { - "socket_address": { - "address": x.vlan_ip.to_string(), - "port_value": 8080 - } - } - } - }) - }) - .collect::>(); + if initialize_immediately { + script.push_str("systemctl start rivet_hook\n"); + } - // Build config - let config = json!({ - "static_resources": { - "listeners": [{ - "name": "ats", - "address": { - "socket_address": { - "address": "0.0.0.0", - "port_value": 8080 - } - }, - "filter_chains": [{ - "filters": [{ - "name": "envoy.filters.network.http_connection_manager", - "typed_config": { - "@type": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager", - "stat_prefix": "ingress_http", - "route_config": { - "name": "local_route", - "virtual_hosts": [ - { - "name": "backend", - "domains": ["*"], - "routes": [{ - "match": { "prefix": "/" }, - "route": { - "cluster": "ats_backend", - "hash_policy": [{ - "header": { "header_name": ":path" } - }] - } - }] - } - ] - }, - "http_filters": [ - { - "name": "envoy.filters.http.router", - "typed_config": { - "@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router" - } - } - ] - } - }] - }] - }], - "clusters": [{ - "name": "ats_backend", - "connect_timeout": "1s", - // Use consistent hashing to reliably send the same request to the same server - // - // In order for this to work, the load balancer must be configured with the same: - // - Table size - // - List of backend nodes (in the same order) - // - Hash key for each endpoint (uses the host by default) - // - // See https://www.envoyproxy.io/docs/envoy/latest/intro/arch_overview/upstream/load_balancing/load_balancers#arch-overview-load-balancing-types-maglev - "lb_policy": "MAGLEV", - "maglev_lb_config": { - // Ensure the same table size for consistent hashing across load balancers - "table_size": 65537 - }, - "load_assignment": { - "cluster_name": "ats_backend", - "endpoints": [ - { - "lb_endpoints": ats_endpoints - } - ] - } - }] - } - }); + Ok(script) +} + +pub fn rivet_fetch_info(server_token: &str) -> GlobalResult { + let domain_main_api = unwrap!(util::env::domain_main_api(), "no cdn"); - let yaml_config = serde_yaml::to_string(&config)?; - Ok(include_str!("files/outbound_proxy.sh").replace("__ENVOY_CONFIG__", &yaml_config)) + Ok(include_str!("files/rivet_fetch_info.sh") + .replace("__SERVER_TOKEN__", server_token) + .replace("__DOMAIN_MAIN_API__", domain_main_api)) } diff --git a/lib/bolt/core/src/dep/terraform/install_scripts/files/cni_plugins.sh b/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/cni_plugins.sh similarity index 100% rename from lib/bolt/core/src/dep/terraform/install_scripts/files/cni_plugins.sh rename to svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/cni_plugins.sh diff --git a/lib/bolt/core/src/dep/terraform/install_scripts/files/docker.sh b/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/docker.sh similarity index 94% rename from lib/bolt/core/src/dep/terraform/install_scripts/files/docker.sh rename to svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/docker.sh index 513812a9fd..bd54002eae 100644 --- a/lib/bolt/core/src/dep/terraform/install_scripts/files/docker.sh +++ b/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/docker.sh @@ -4,9 +4,8 @@ curl -fsSL https://download.docker.com/linux/debian/gpg | apt-key add - # Add Docker repository echo 'deb [arch=amd64] https://download.docker.com/linux/debian bullseye stable' > /etc/apt/sources.list.d/docker.list -# Install Docker -apt-get update -y -apt-get install -y docker-ce docker-ce-cli containerd.io +# Create directories +mkdir -p /etc/docker # Add daemon.json # @@ -26,6 +25,9 @@ cat << 'EOF' > /etc/docker/daemon.json } EOF +# Install Docker +apt-get update -y +apt-get install -y docker-ce docker-ce-cli containerd.io + # Test Docker installation docker run hello-world - diff --git a/lib/bolt/core/src/dep/terraform/install_scripts/files/node_exporter.sh b/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/node_exporter.sh similarity index 100% rename from lib/bolt/core/src/dep/terraform/install_scripts/files/node_exporter.sh rename to svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/node_exporter.sh diff --git a/lib/bolt/core/src/dep/terraform/install_scripts/files/nomad.sh b/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/nomad_configure.sh similarity index 89% rename from lib/bolt/core/src/dep/terraform/install_scripts/files/nomad.sh rename to svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/nomad_configure.sh index cd06f5e87e..75264cc868 100644 --- a/lib/bolt/core/src/dep/terraform/install_scripts/files/nomad.sh +++ b/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/nomad_configure.sh @@ -1,43 +1,5 @@ -# !!!!!!!!!!!!!!!!!!!!! -# -# DO NOT UPGRADE -# -# This is the last MPL -# licensed version of -# Nomad. -# -# !!!!!!!!!!!!!!!!!!!!!! -version="1.6.0" - PUBLIC_IP=$(ip -4 route get 1.0.0.0 | awk '{print $7; exit}') -# Allow container traffic to be routed through IP tables -# -# See https://developer.hashicorp.com/nomad/docs/install#post-installation-steps -cat << 'EOF' > /etc/sysctl.d/20-nomad.conf -net.bridge.bridge-nf-call-arptables = 1 -net.bridge.bridge-nf-call-ip6tables = 1 -net.bridge.bridge-nf-call-iptables = 1 -EOF - -sysctl --system - -# Download and unzip nomad -mkdir -p /opt/nomad-$version -curl -L -o /tmp/nomad.zip https://releases.hashicorp.com/nomad/$version/nomad_${version}_linux_amd64.zip -unzip -o /tmp/nomad.zip -d /opt/nomad-$version/ - -# TODO: Verify hash - -# Create symlink in /usr/local/bin -ln -sf /opt/nomad-$version/nomad /usr/local/bin/nomad - -# Test nomad installation -if ! nomad version | grep -q "Nomad v$version"; then - echo "Nomad version mismatch" - exit 1 -fi - # Create admin chain that only accepts traffic from the GG subnet # # See Nomad equivalent: https://github.com/hashicorp/nomad/blob/a8f0f2612ef9d283ed903721f8453a0c0c3f51c5/client/allocrunner/networking_bridge_linux.go#L73 @@ -330,7 +292,7 @@ mkdir -p /etc/nomad.d # Copy HCL files cat << EOF > /etc/nomad.d/common.hcl region = "global" -datacenter = "__REGION_ID__" +datacenter = "__DATACENTER_ID__" data_dir = "/opt/nomad/data" name = "__NODE_NAME__" @@ -370,6 +332,9 @@ client { min_dynamic_port = 20000 max_dynamic_port = 25999 + # Intentionally high kill timeout, killing allocations is handled manually + max_kill_timeout = "86400s" + server_join { retry_join = [ __SERVER_JOIN__ @@ -379,21 +344,18 @@ client { } meta { - # See https://github.com/hashicorp/nomad/issues/9887 - "connect.sidecar_image" = "envoyproxy/envoy:v1.18.3" + "pool-type" = "job" + "server-id" = "__SERVER_ID__" + "datacenter-id" = "__DATACENTER_ID__" + "cluster-id" = "__CLUSTER_ID__" - "pool-id" = "job" "network-vlan-ipv4" = "__VLAN_IP__" "network-public-ipv4" = "${PUBLIC_IP}" } - # TODO: This is disabled on job nodes for now because this prevents - # scheduling full cores at max capacity reserved { - # See tier_list::RESERVE_SYSTEM_CPU - # cpu = 500 - # See tier_list::RESERVE_SYSTEM_MEMORY - # memory = 512 + # See tier_list::RESERVE_MEMORY + memory = 1024 disk = 10000 } } diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/nomad_install.sh b/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/nomad_install.sh new file mode 100644 index 0000000000..418b9effb9 --- /dev/null +++ b/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/nomad_install.sh @@ -0,0 +1,37 @@ +# !!!!!!!!!!!!!!!!!!!!! +# +# DO NOT UPGRADE +# +# This is the last MPL +# licensed version of +# Nomad. +# +# !!!!!!!!!!!!!!!!!!!!!! +version="1.6.0" + +# Allow container traffic to be routed through IP tables +# +# See https://developer.hashicorp.com/nomad/docs/install#post-installation-steps +cat << 'EOF' > /etc/sysctl.d/20-nomad.conf +net.bridge.bridge-nf-call-arptables = 1 +net.bridge.bridge-nf-call-ip6tables = 1 +net.bridge.bridge-nf-call-iptables = 1 +EOF + +sysctl --system + +# Download and unzip nomad +mkdir -p /opt/nomad-$version +curl -L -o /tmp/nomad.zip https://releases.hashicorp.com/nomad/$version/nomad_${version}_linux_amd64.zip +unzip -o /tmp/nomad.zip -d /opt/nomad-$version/ + +# TODO: Verify hash + +# Create symlink in /usr/local/bin +ln -sf /opt/nomad-$version/nomad /usr/local/bin/nomad + +# Test nomad installation +if ! nomad version | grep -q "Nomad v$version"; then + echo "Nomad version mismatch" + exit 1 +fi diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/rivet_create_hook.sh b/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/rivet_create_hook.sh new file mode 100644 index 0000000000..906dc860db --- /dev/null +++ b/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/rivet_create_hook.sh @@ -0,0 +1,22 @@ +# Create systemd service file +cat << 'EOF' > /etc/systemd/system/rivet_hook.service +[Unit] +Description=Rivet Hook +Requires=network-online.target +After=network-online.target +ConditionPathExists=!/var/tmp/rivet_hook.completed + +[Service] +User=root +Group=root +Type=oneshot +ExecStart=/usr/bin/rivet_hook.sh +ExecStartPost=/bin/touch /var/tmp/rivet_hook.completed + +[Install] +WantedBy=multi-user.target +EOF + +# Enable initialze script to run on reboot +systemctl daemon-reload +systemctl enable rivet_hook diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/rivet_fetch_info.sh b/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/rivet_fetch_info.sh new file mode 100644 index 0000000000..c9f3219386 --- /dev/null +++ b/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/rivet_fetch_info.sh @@ -0,0 +1,26 @@ +PUBLIC_IP=$(ip -4 route get 1.0.0.0 | awk '{print $7; exit}') + +# Get server info from rivet +response=$( + curl -f \ + -H "Authorization: Bearer __SERVER_TOKEN__" \ + "https://__DOMAIN_MAIN_API__/provision/servers/$PUBLIC_IP/info" +) + +# Fetch data +name=$(echo $response | jq -r '.name') +server_id=$(echo $response | jq -r '.server_id') +datacenter_id=$(echo $response | jq -r '.datacenter_id') +cluster_id=$(echo $response | jq -r '.cluster_id') +vlan_ip=$(echo $response | jq -r '.vlan_ip') + +# Template initialize script +initialize_script="/usr/bin/rivet_initialize.sh" +sed -i "s/__NODE_NAME__/$name/g" $initialize_script +sed -i "s/__SERVER_ID__/$server_id/g" $initialize_script +sed -i "s/__DATACENTER_ID__/$datacenter_id/g" $initialize_script +sed -i "s/__CLUSTER_ID__/$cluster_id/g" $initialize_script +sed -i "s/__VLAN_IP__/$vlan_ip/g" $initialize_script + +# Run initialize script +"$initialize_script" diff --git a/lib/bolt/core/src/dep/terraform/install_scripts/files/sysctl.sh b/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/sysctl.sh similarity index 100% rename from lib/bolt/core/src/dep/terraform/install_scripts/files/sysctl.sh rename to svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/sysctl.sh diff --git a/lib/bolt/core/src/dep/terraform/install_scripts/files/traefik.sh b/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traefik.sh similarity index 100% rename from lib/bolt/core/src/dep/terraform/install_scripts/files/traefik.sh rename to svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traefik.sh diff --git a/lib/bolt/core/src/dep/terraform/install_scripts/files/traefik_instance.sh b/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traefik_instance.sh similarity index 100% rename from lib/bolt/core/src/dep/terraform/install_scripts/files/traefik_instance.sh rename to svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traefik_instance.sh diff --git a/infra/misc/game_guard/traffic_server/etc/cache.config b/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/cache.config similarity index 100% rename from infra/misc/game_guard/traffic_server/etc/cache.config rename to svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/cache.config diff --git a/infra/misc/game_guard/traffic_server/etc/hosting.config b/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/hosting.config similarity index 100% rename from infra/misc/game_guard/traffic_server/etc/hosting.config rename to svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/hosting.config diff --git a/infra/misc/game_guard/traffic_server/etc/ip_allow.yaml b/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/ip_allow.yaml similarity index 100% rename from infra/misc/game_guard/traffic_server/etc/ip_allow.yaml rename to svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/ip_allow.yaml diff --git a/infra/misc/game_guard/traffic_server/etc/logging.yaml b/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/logging.yaml similarity index 100% rename from infra/misc/game_guard/traffic_server/etc/logging.yaml rename to svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/logging.yaml diff --git a/infra/misc/game_guard/traffic_server/etc/parent.config b/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/parent.config similarity index 100% rename from infra/misc/game_guard/traffic_server/etc/parent.config rename to svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/parent.config diff --git a/infra/misc/game_guard/traffic_server/etc/plugin.config b/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/plugin.config similarity index 100% rename from infra/misc/game_guard/traffic_server/etc/plugin.config rename to svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/plugin.config diff --git a/infra/misc/game_guard/traffic_server/etc/records.config b/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/records.config similarity index 100% rename from infra/misc/game_guard/traffic_server/etc/records.config rename to svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/records.config diff --git a/infra/misc/game_guard/traffic_server/etc/sni.yaml b/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/sni.yaml similarity index 100% rename from infra/misc/game_guard/traffic_server/etc/sni.yaml rename to svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/sni.yaml diff --git a/infra/misc/game_guard/traffic_server/etc/socks.config b/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/socks.config similarity index 100% rename from infra/misc/game_guard/traffic_server/etc/socks.config rename to svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/socks.config diff --git a/infra/misc/game_guard/traffic_server/etc/splitdns.config b/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/splitdns.config similarity index 100% rename from infra/misc/game_guard/traffic_server/etc/splitdns.config rename to svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/splitdns.config diff --git a/infra/misc/game_guard/traffic_server/etc/ssl_multicert.config b/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/ssl_multicert.config similarity index 100% rename from infra/misc/game_guard/traffic_server/etc/ssl_multicert.config rename to svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/ssl_multicert.config diff --git a/infra/misc/game_guard/traffic_server/etc/strategies.yaml b/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/strategies.yaml similarity index 100% rename from infra/misc/game_guard/traffic_server/etc/strategies.yaml rename to svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/strategies.yaml diff --git a/infra/misc/game_guard/traffic_server/etc/strip_headers.lua b/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/strip_headers.lua similarity index 100% rename from infra/misc/game_guard/traffic_server/etc/strip_headers.lua rename to svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/strip_headers.lua diff --git a/infra/misc/game_guard/traffic_server/etc/trafficserver-release b/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/trafficserver-release similarity index 100% rename from infra/misc/game_guard/traffic_server/etc/trafficserver-release rename to svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/trafficserver-release diff --git a/infra/misc/game_guard/traffic_server/etc/volume.config b/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/volume.config similarity index 100% rename from infra/misc/game_guard/traffic_server/etc/volume.config rename to svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server/etc/volume.config diff --git a/lib/bolt/core/src/dep/terraform/install_scripts/files/traffic_server.sh b/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server_configure.sh similarity index 91% rename from lib/bolt/core/src/dep/terraform/install_scripts/files/traffic_server.sh rename to svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server_configure.sh index beeebf054c..c2e2214766 100644 --- a/lib/bolt/core/src/dep/terraform/install_scripts/files/traffic_server.sh +++ b/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server_configure.sh @@ -9,6 +9,7 @@ mkdir -p /etc/trafficserver /var/cache/trafficserver /run/trafficserver /var/log # Write config __CONFIG__ +# NOTE: the /run directory is often mounted as a tmpfs and thus will not retain its permissions after a reboot. # Change owner chown -R trafficserver:trafficserver /etc/trafficserver /var/cache/trafficserver /run/trafficserver /var/log/trafficserver diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server_install.sh b/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server_install.sh new file mode 100644 index 0000000000..d112aa7062 --- /dev/null +++ b/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/traffic_server_install.sh @@ -0,0 +1 @@ +/usr/bin/docker pull "__IMAGE__" diff --git a/lib/bolt/core/src/dep/terraform/install_scripts/files/vector.sh b/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/vector_configure.sh similarity index 61% rename from lib/bolt/core/src/dep/terraform/install_scripts/files/vector.sh rename to svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/vector_configure.sh index 429ed6076d..8284d4ab0e 100644 --- a/lib/bolt/core/src/dep/terraform/install_scripts/files/vector.sh +++ b/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/vector_configure.sh @@ -1,33 +1,13 @@ PUBLIC_IP=$(ip -4 route get 1.0.0.0 | awk '{print $7; exit}') -version="0.34.1" - -# Create vector user -if ! id -u "vector" &>/dev/null; then - useradd -r -s /bin/false vector -fi - -# Install vector -mkdir -p "/opt/vector-${version}" -curl -L "https://github.com/vectordotdev/vector/releases/download/v${version}/vector-${version}-x86_64-unknown-linux-gnu.tar.gz" -o "/tmp/vector_${version}.tar.gz" -tar zxvf "/tmp/vector_${version}.tar.gz" -C "/opt/vector-${version}" -install -o vector -g vector "/opt/vector-${version}/vector-x86_64-unknown-linux-gnu/bin/vector" /usr/bin/ - -# Check vector version -if vector --version | grep "vector ${version}"; then - echo "Successfully installed Vector ${version}" -else - echo "Vector version mismatch" - exit 1 -fi - # Write config mkdir -p /etc/vector /var/lib/vector -cat << 'EOF' > /etc/vector/vector.toml +cat << EOF > /etc/vector/vector.toml __VECTOR_CONFIG__ EOF +# Vector user created in vector_install.sh script chown -R vector:vector /etc/vector /var/lib/vector # Systemd service diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/vector_install.sh b/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/vector_install.sh new file mode 100644 index 0000000000..e0b76b7909 --- /dev/null +++ b/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/files/vector_install.sh @@ -0,0 +1,20 @@ +version="0.34.1" + +# Create vector user +if ! id -u "vector" &>/dev/null; then + useradd -r -s /bin/false vector +fi + +# Install vector +mkdir -p "/opt/vector-${version}" +curl -L "https://github.com/vectordotdev/vector/releases/download/v${version}/vector-${version}-x86_64-unknown-linux-gnu.tar.gz" -o "/tmp/vector_${version}.tar.gz" +tar zxvf "/tmp/vector_${version}.tar.gz" -C "/opt/vector-${version}" +install -o vector -g vector "/opt/vector-${version}/vector-x86_64-unknown-linux-gnu/bin/vector" /usr/bin/ + +# Check vector version +if vector --version | grep "vector ${version}"; then + echo "Successfully installed Vector ${version}" +else + echo "Vector version mismatch" + exit 1 +fi diff --git a/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/mod.rs b/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/mod.rs new file mode 100644 index 0000000000..694dc34016 --- /dev/null +++ b/svc/pkg/cluster/worker/src/workers/server_install/install_scripts/mod.rs @@ -0,0 +1,196 @@ +use std::collections::HashMap; + +use chirp_worker::prelude::*; +use indoc::formatdoc; +use maplit::hashmap; +use proto::backend; + +pub mod components; + +// This script installs all of the software that doesn't need to know anything about the server running +// it (doesn't need to know server id, datacenter id, vlan ip, etc) +pub async fn gen_install( + pool_type: backend::cluster::PoolType, + initialize_immediately: bool, +) -> GlobalResult { + // MARK: Common (pre) + let mut script = vec![ + components::common(), + components::node_exporter(), + components::sysctl(), + components::traefik(), + components::traefik_tunnel()?, + components::vector_install(), + ]; + + // MARK: Specific pool components + match pool_type { + backend::cluster::PoolType::Job => { + script.push(components::docker()); + script.push(components::lz4()); + script.push(components::skopeo()); + script.push(components::umoci()); + script.push(components::cnitool()); + script.push(components::cni_plugins()); + script.push(components::nomad_install()); + } + backend::cluster::PoolType::Gg => {} + backend::cluster::PoolType::Ats => { + script.push(components::docker()); + script.push(components::traffic_server_install()); + } + } + + // MARK: Common (post) + script.push(components::rivet_create_hook(initialize_immediately)?); + + let joined = script.join("\n\necho \"======\"\n\n"); + Ok(format!("#!/usr/bin/env bash\nset -eu\n\n{joined}")) +} + +// This script is run by systemd on startup and gets the server's data from the Rivet API +pub async fn gen_hook(server_token: &str) -> GlobalResult { + let mut script = vec![components::rivet_fetch_info(server_token)?]; + + let joined = script.join("\n\necho \"======\"\n\n"); + Ok(format!("#!/usr/bin/env bash\nset -eu\n\n{joined}")) +} + +// This script is templated on the server itself after fetching server data from the Rivet API +// (see gen_hook) After being templated, it is run. +pub async fn gen_initialize(pool_type: backend::cluster::PoolType) -> GlobalResult { + let mut script = Vec::new(); + + let mut prometheus_targets = HashMap::new(); + + // MARK: Common (pre) + prometheus_targets.insert( + "node_exporter".into(), + components::VectorPrometheusTarget { + endpoint: "http://127.0.0.1:9100/metrics".into(), + scrape_interval: 15, + }, + ); + + // MARK: Specific pool components + match pool_type { + backend::cluster::PoolType::Job => { + script.push(components::nomad_configure()); + + prometheus_targets.insert( + "nomad".into(), + components::VectorPrometheusTarget { + endpoint: "http://127.0.0.1:4646/v1/metrics?format=prometheus".into(), + scrape_interval: 15, + }, + ); + } + backend::cluster::PoolType::Gg => { + script.push(components::traefik_instance(components::TraefikInstance { + name: "game_guard".into(), + static_config: gg_traefik_static_config().await?, + dynamic_config: String::new(), + tls_certs: hashmap! { + "letsencrypt_rivet_job".into() => components::TlsCert { + cert_pem: util::env::var("TLS_CERT_LETSENCRYPT_RIVET_JOB_CERT_PEM")?, + key_pem: util::env::var("TLS_CERT_LETSENCRYPT_RIVET_JOB_KEY_PEM")?, + }, + }, + tcp_server_transports: Default::default(), + })); + + prometheus_targets.insert( + "game_guard".into(), + components::VectorPrometheusTarget { + endpoint: "http://127.0.0.1:9980/metrics".into(), + scrape_interval: 15, + }, + ); + } + backend::cluster::PoolType::Ats => { + script.push(components::traffic_server_configure().await?); + } + } + + // MARK: Common (post) + if !prometheus_targets.is_empty() { + script.push(components::vector_configure( + &components::VectorConfig { prometheus_targets }, + pool_type, + )); + } + + let joined = script.join("\n\necho \"======\"\n\n"); + Ok(format!("#!/usr/bin/env bash\nset -eu\n\n{joined}")) +} + +async fn gg_traefik_static_config() -> GlobalResult { + let api_route_token = &util::env::read_secret(&["rivet", "api_route", "token"]).await?; + let http_provider_endpoint = format!( + "http://127.0.0.1:{port}/traefik/config/game-guard?token={api_route_token}&datacenter=__DATACENTER_ID__", + port = components::TUNNEL_API_ROUTE_PORT, + ); + + let mut config = formatdoc!( + r#" + [entryPoints] + [entryPoints.traefik] + address = "127.0.0.1:9980" + + [entryPoints.lb-80] + address = ":80" + + [entryPoints.lb-443] + address = ":443" + + [api] + insecure = true + + [metrics.prometheus] + # See lib/chirp/metrics/src/buckets.rs + buckets = [0.001, 0.0025, 0.005, 0.01, 0.025, 0.05, 0.1, 0.25, 0.5, 1.0, 2.5, 5.0, 10.0, 25.0, 50.0, 100.0] + addEntryPointsLabels = true + addRoutersLabels = true + addServicesLabels = true + + [providers] + [providers.file] + directory = "/etc/game_guard/dynamic" + + [providers.http] + endpoint = "{http_provider_endpoint}" + pollInterval = "0.5s" + "# + ); + + // TCP ports + for port in util::net::job::MIN_INGRESS_PORT_TCP..=util::net::job::MAX_INGRESS_PORT_TCP { + config.push_str(&formatdoc!( + r#" + [entryPoints.lb-{port}-tcp] + address = ":{port}/tcp" + + [entryPoints.lb-{port}-tcp.transport.respondingTimeouts] + readTimeout = "12h" + writeTimeout = "12h" + idleTimeout = "30s" + + "# + )); + } + + // UDP ports + for port in util::net::job::MIN_INGRESS_PORT_UDP..=util::net::job::MAX_INGRESS_PORT_UDP { + config.push_str(&formatdoc!( + r#" + [entryPoints.lb-{port}-udp] + address = ":{port}/udp" + + [entryPoints.lb-{port}-udp.udp] + timeout = "15s" + "# + )); + } + + Ok(config) +} diff --git a/svc/pkg/cluster/worker/src/workers/server_install/mod.rs b/svc/pkg/cluster/worker/src/workers/server_install/mod.rs new file mode 100644 index 0000000000..f24aff5b11 --- /dev/null +++ b/svc/pkg/cluster/worker/src/workers/server_install/mod.rs @@ -0,0 +1,177 @@ +use std::{ + io::{Read, Write}, + net::TcpStream, + path::Path, +}; + +use chirp_worker::prelude::*; +use proto::backend::{self, pkg::*}; +use ssh2::Session; + +mod install_scripts; + +// 6 months +pub const TOKEN_TTL: i64 = util::duration::days(30 * 6); + +#[worker(name = "cluster-server-install", timeout = 200)] +async fn worker(ctx: &OperationContext) -> GlobalResult<()> { + // Check for stale message + if ctx.req_dt() > util::duration::hours(1) { + tracing::warn!("discarding stale message"); + + return Ok(()); + } + + if let Some(server_id) = ctx.server_id { + let (is_destroying,) = sql_fetch_one!( + [ctx, (bool,)] + " + SELECT EXISTS( + SELECT 1 + FROM db_cluster.servers + WHERE server_id = $1 AND + cloud_destroy_ts IS NOT NULL + ) + ", + server_id.as_uuid(), + ) + .await?; + + if is_destroying { + tracing::info!("server marked for deletion, not installing"); + return Ok(()); + } + } + + let public_ip = ctx.public_ip.clone(); + let pool_type = unwrap!(backend::cluster::PoolType::from_i32(ctx.pool_type)); + let private_key_openssh = + util::env::read_secret(&["ssh", "server", "private_key_openssh"]).await?; + + // Create server token for authenticating API calls from the server + let token_res = op!([ctx] token_create { + token_config: Some(token::create::request::TokenConfig { + ttl: TOKEN_TTL, + }), + refresh_token_config: None, + issuer: "cluster-worker-server-install".to_owned(), + client: None, + kind: Some(token::create::request::Kind::New(token::create::request::KindNew { + entitlements: vec![ + proto::claims::Entitlement { + kind: Some( + proto::claims::entitlement::Kind::Server(proto::claims::entitlement::Server { }) + ) + } + ], + })), + label: Some("srv".to_owned()), + ..Default::default() + }) + .await?; + let server_token = &unwrap_ref!(token_res.token).token; + + let install_script = + install_scripts::gen_install(pool_type, ctx.initialize_immediately).await?; + let hook_script = install_scripts::gen_hook(server_token).await?; + let initialize_script = install_scripts::gen_initialize(pool_type).await?; + + // Spawn blocking thread for ssh (no async support) + tokio::task::spawn_blocking(move || { + tracing::info!(%public_ip, "connecting over ssh"); + let tcp = TcpStream::connect((public_ip.as_str(), 22))?; + let mut sess = Session::new()?; + sess.set_tcp_stream(tcp); + + if let Err(err) = sess.handshake() { + tracing::error!(%public_ip, ?err, "failed to connect over ssh"); + retry_bail!("failed to connect over ssh"); + } + + tracing::info!(%public_ip, "connected"); + + sess.userauth_pubkey_memory("root", None, &private_key_openssh, None)?; + + tracing::info!("authenticated"); + + tracing::info!("writing scripts"); + + write_script(&sess, "rivet_install", &install_script)?; + write_script(&sess, "rivet_hook", &hook_script)?; + write_script(&sess, "rivet_initialize", &initialize_script)?; + + tracing::info!("executing install script"); + + let mut channel = sess.channel_session()?; + + // Cannot run more than one command at a time in a channel, simply combine them + let script = [ + "chmod +x /usr/bin/rivet_install.sh", + "chmod +x /usr/bin/rivet_hook.sh", + "chmod +x /usr/bin/rivet_initialize.sh", + "/usr/bin/rivet_install.sh", + ] + .join(" && "); + + channel.exec(&script)?; + + let mut stdout = String::new(); + channel.read_to_string(&mut stdout)?; + let mut stderr = String::new(); + channel.stderr().read_to_string(&mut stderr)?; + + channel.wait_close()?; + + if channel.exit_status()? != 0 { + tracing::error!(%stdout, %stderr, "failed to run script"); + bail!("failed to run script"); + } + + tracing::info!("install successful"); + + GlobalResult::Ok(()) + }) + .await??; + + msg!([ctx] cluster::msg::server_install_complete(&ctx.public_ip) { + ip: ctx.public_ip.clone(), + server_id: ctx.server_id, + provider: ctx.provider, + }) + .await?; + + Ok(()) +} + +fn write_script(sess: &Session, script_name: &str, content: &str) -> GlobalResult<()> { + let bytes = content.as_bytes(); + + let mut script_file = sess.scp_send( + Path::new(&format!("/usr/bin/{script_name}.sh")), + 0o644, + bytes.len() as u64, + None, + )?; + + // Write script in chunks + let mut idx = 0; + loop { + let start = idx; + let end = (idx + 1024).min(bytes.len()); + + script_file.write_all(&bytes[start..end])?; + + idx = end; + if idx >= bytes.len() { + break; + } + } + + // Close the channel and wait for the whole content to be transferred + script_file.send_eof()?; + script_file.wait_eof()?; + script_file.close()?; + script_file.wait_close()?; + + Ok(()) +} diff --git a/svc/pkg/cluster/worker/src/workers/server_install_complete.rs b/svc/pkg/cluster/worker/src/workers/server_install_complete.rs new file mode 100644 index 0000000000..58ddea9e7b --- /dev/null +++ b/svc/pkg/cluster/worker/src/workers/server_install_complete.rs @@ -0,0 +1,22 @@ +use chirp_worker::prelude::*; +use proto::backend::{self, pkg::*}; + +#[worker(name = "cluster-server-install-complete")] +async fn worker( + ctx: &OperationContext, +) -> GlobalResult<()> { + let provider = unwrap!(backend::cluster::Provider::from_i32(ctx.provider)); + + match provider { + backend::cluster::Provider::Linode => { + if ctx.server_id.is_none() { + msg!([ctx] linode::msg::prebake_install_complete(&ctx.ip) { + ip: ctx.ip.clone(), + }) + .await?; + } + } + } + + Ok(()) +} diff --git a/svc/pkg/cluster/worker/src/workers/server_provision.rs b/svc/pkg/cluster/worker/src/workers/server_provision.rs new file mode 100644 index 0000000000..6351f2bcb0 --- /dev/null +++ b/svc/pkg/cluster/worker/src/workers/server_provision.rs @@ -0,0 +1,232 @@ +use chirp_worker::prelude::*; +use proto::backend::{self, cluster::PoolType, pkg::*}; +use rand::Rng; + +struct ProvisionResponse { + provider_server_id: String, + provider_hardware: String, + public_ip: String, + already_installed: bool, +} + +#[worker(name = "cluster-server-provision", timeout = 150)] +async fn worker( + ctx: &OperationContext, +) -> GlobalResult<()> { + let crdb = ctx.crdb().await?; + + let datacenter_id = unwrap!(ctx.datacenter_id); + let server_id = unwrap_ref!(ctx.server_id).as_uuid(); + let pool_type = unwrap!(backend::cluster::PoolType::from_i32(ctx.pool_type)); + let provider = unwrap!(backend::cluster::Provider::from_i32(ctx.provider)); + + // Check if server is already provisioned + // NOTE: sql record already exists before this worker is called + let (provider_server_id, destroyed) = sql_fetch_one!( + [ctx, (Option, bool), &crdb] + " + SELECT + provider_server_id, cloud_destroy_ts IS NOT NULL + FROM db_cluster.servers + WHERE server_id = $1 + ", + server_id, + ) + .await?; + if let Some(provider_server_id) = provider_server_id { + tracing::warn!( + ?server_id, + ?provider_server_id, + "server is already provisioned" + ); + return Ok(()); + } + if destroyed { + tracing::warn!(?server_id, "attempting to provision a destroyed server"); + return Ok(()); + } + + // Fetch datacenter config + let datacenter_res = op!([ctx] cluster_datacenter_get { + datacenter_ids: vec![datacenter_id], + }) + .await?; + let datacenter = unwrap!(datacenter_res.datacenters.first()); + let pool = unwrap!( + datacenter + .pools + .iter() + .find(|p| p.pool_type == ctx.pool_type), + "datacenter does not have this type of pool configured" + ); + + // Get a new vlan ip + let vlan_ip = get_vlan_ip(ctx, &crdb, server_id, pool_type).await?; + + let provision_res = match provider { + backend::cluster::Provider::Linode => { + let mut hardware_list = pool.hardware.iter(); + + // Iterate through list of hardware and attempt to schedule a server. Goes to the next + // hardware if an error happens during provisioning + loop { + // List exhausted + let Some(hardware) = hardware_list.next() else { + break None; + }; + + tracing::info!( + "attempting to provision hardware: {}", + hardware.provider_hardware + ); + + let res = op!([ctx] linode_server_provision { + server_id: ctx.server_id, + provider_datacenter_id: datacenter.provider_datacenter_id.clone(), + hardware: Some(hardware.clone()), + pool_type: ctx.pool_type, + vlan_ip: vlan_ip.clone(), + tags: ctx.tags.clone(), + }) + .await; + + match res { + Ok(res) => { + break Some(ProvisionResponse { + provider_server_id: res.provider_server_id.clone(), + provider_hardware: hardware.provider_hardware.clone(), + public_ip: res.public_ip.clone(), + already_installed: res.already_installed, + }) + } + Err(err) => { + tracing::error!( + ?err, + ?server_id, + "failed to provision server, cleaning up" + ); + + cleanup(ctx, server_id).await?; + } + } + } + } + }; + + // Update DB regardless of success (have to set vlan_ip) + sql_execute!( + [ctx, &crdb] + " + UPDATE db_cluster.servers + SET + provider_server_id = $2, + provider_hardware = $3, + vlan_ip = $4, + public_ip = $5 + WHERE server_id = $1 + ", + server_id, + provision_res.as_ref().map(|res| &res.provider_server_id), + provision_res.as_ref().map(|res| &res.provider_hardware), + vlan_ip, + provision_res.as_ref().map(|res| &res.public_ip), + ) + .await?; + + if let Some(provision_res) = provision_res { + // Install components + if !provision_res.already_installed { + msg!([ctx] cluster::msg::server_install(&provision_res.public_ip) { + public_ip: provision_res.public_ip, + pool_type: ctx.pool_type, + server_id: ctx.server_id, + provider: ctx.provider, + initialize_immediately: true, + }) + .await?; + } + + // Create DNS record + if let backend::cluster::PoolType::Gg = pool_type { + msg!([ctx] cluster::msg::server_dns_create(server_id) { + server_id: ctx.server_id, + }) + .await?; + } + } else { + tracing::error!(?server_id, hardware_options=?pool.hardware.len(), "failed all attempts to provision server"); + bail!("failed all attempts to provision server"); + } + + Ok(()) +} + +async fn get_vlan_ip( + ctx: &OperationContext, + crdb: &CrdbPool, + server_id: Uuid, + pool_type: backend::cluster::PoolType, +) -> GlobalResult { + // Find next available vlan index + let mut vlan_addr_range = match pool_type { + PoolType::Job => util::net::job::vlan_addr_range(), + PoolType::Gg => util::net::gg::vlan_addr_range(), + PoolType::Ats => util::net::ats::vlan_addr_range(), + }; + let max_idx = vlan_addr_range.count() as i64; + let (network_idx,) = sql_fetch_one!( + [ctx, (i64,), &crdb] + " + WITH + get_next_network_idx AS ( + SELECT mod(idx + $1, $2) AS idx + FROM generate_series(0, $2) AS s(idx) + WHERE NOT EXISTS ( + SELECT 1 + FROM db_cluster.servers + WHERE + pool_type = $3 AND + network_idx = mod(idx + $1, $2) + ) + LIMIT 1 + ), + update_network_idx AS ( + UPDATE db_cluster.servers + SET network_idx = (SELECT idx FROM get_next_network_idx) + WHERE server_id = $4 + RETURNING 1 + ) + SELECT idx FROM get_next_network_idx + ", + // Choose a random index to start from for better index spread + rand::thread_rng().gen_range(0i64..max_idx), + max_idx, + pool_type as i64, + server_id + ) + .await?; + + let vlan_ip = unwrap!(vlan_addr_range.nth(network_idx as usize)); + + Ok(vlan_ip.to_string()) +} + +// This function is used to destroy leftovers from a failed partial provision. +async fn cleanup( + ctx: &OperationContext, + server_id: Uuid, +) -> GlobalResult<()> { + // NOTE: Usually before publishing this message we would set `cloud_destroy_ts`. We do not set it here + // because this message will be retried with the same server id + + // Wait for server to complete destroying so we don't get a primary key conflict (the same server id + // will be used to try and provision the next hardware option) + msg!([ctx] cluster::msg::server_destroy(server_id) -> cluster::msg::server_destroy_complete { + server_id: Some(server_id.into()), + // We force destroy because the provision process failed + force: true, + }) + .await?; + + Ok(()) +} diff --git a/svc/pkg/cluster/worker/src/workers/server_undrain.rs b/svc/pkg/cluster/worker/src/workers/server_undrain.rs new file mode 100644 index 0000000000..cd7c54f6b0 --- /dev/null +++ b/svc/pkg/cluster/worker/src/workers/server_undrain.rs @@ -0,0 +1,90 @@ +use chirp_worker::prelude::*; +use nomad_client::{ + apis::{configuration::Configuration, nodes_api}, + models, +}; +use proto::backend::{self, pkg::*}; + +lazy_static::lazy_static! { + static ref NOMAD_CONFIG: Configuration = + nomad_util::new_config_from_env().unwrap(); +} + +#[derive(sqlx::FromRow)] +struct Server { + datacenter_id: Uuid, + pool_type: i64, + nomad_node_id: Option, +} + +#[worker(name = "cluster-server-undrain")] +async fn worker(ctx: &OperationContext) -> GlobalResult<()> { + let server_id = unwrap_ref!(ctx.server_id).as_uuid(); + + // NOTE: `drain_ts` will already be set to null before this worker is called + let server = sql_fetch_one!( + [ctx, Server] + " + SELECT + datacenter_id, pool_type, nomad_node_id + FROM db_cluster.servers + WHERE server_id = $1 + ", + server_id + ) + .await?; + + let pool_type = unwrap!(backend::cluster::PoolType::from_i32( + server.pool_type as i32 + )); + match pool_type { + backend::cluster::PoolType::Job => { + let Some(nomad_node_id) = server.nomad_node_id else { + tracing::error!("server does not have nomad running, cannot undrain"); + return Ok(()); + }; + + nodes_api::update_node_drain( + &NOMAD_CONFIG, + &nomad_node_id, + models::NodeUpdateDrainRequest { + drain_spec: None, + mark_eligible: Some(true), + meta: None, + node_id: Some(nomad_node_id.clone()), + }, + None, + None, + None, + None, + None, + None, + None, + None, + None, + ) + .await?; + + // Allow new matchmaker requests to the node running on this server + msg!([ctx] mm::msg::nomad_node_closed_set(&nomad_node_id) { + datacenter_id: Some(server.datacenter_id.into()), + nomad_node_id: nomad_node_id.clone(), + is_closed: false, + }) + .await?; + } + backend::cluster::PoolType::Gg => { + // Recreate DNS record + msg!([ctx] cluster::msg::server_dns_create(server_id) { + server_id: ctx.server_id, + }) + .await?; + } + _ => { + // Gracefully fail + tracing::error!("cannot undrain this pool type: {:?}", pool_type); + } + } + + Ok(()) +} diff --git a/svc/pkg/cluster/worker/tests/create.rs b/svc/pkg/cluster/worker/tests/create.rs new file mode 100644 index 0000000000..0ff58e2993 --- /dev/null +++ b/svc/pkg/cluster/worker/tests/create.rs @@ -0,0 +1,16 @@ +use chirp_worker::prelude::*; +use proto::backend::pkg::*; + +#[worker_test] +async fn create(ctx: TestCtx) { + let cluster_id = Uuid::new_v4(); + let owner_team_id = Uuid::new_v4(); + + msg!([ctx] cluster::msg::create(cluster_id) -> cluster::msg::create_complete { + cluster_id: Some(cluster_id.into()), + owner_team_id: Some(owner_team_id.into()), + name_id: util::faker::ident(), + }) + .await + .unwrap(); +} diff --git a/svc/pkg/cluster/worker/tests/datacenter_create.rs b/svc/pkg/cluster/worker/tests/datacenter_create.rs new file mode 100644 index 0000000000..e41447994d --- /dev/null +++ b/svc/pkg/cluster/worker/tests/datacenter_create.rs @@ -0,0 +1,37 @@ +use chirp_worker::prelude::*; +use proto::backend::{self, pkg::*}; + +#[worker_test] +async fn datacenter_create(ctx: TestCtx) { + let datacenter_id = Uuid::new_v4(); + let cluster_id = Uuid::new_v4(); + + msg!([ctx] cluster::msg::create(cluster_id) -> cluster::msg::create_complete { + cluster_id: Some(cluster_id.into()), + name_id: util::faker::ident(), + owner_team_id: None, + }) + .await + .unwrap(); + + let dc = backend::cluster::Datacenter { + datacenter_id: Some(datacenter_id.into()), + cluster_id: Some(cluster_id.into()), + name_id: util::faker::ident(), + display_name: util::faker::ident(), + + provider: backend::cluster::Provider::Linode as i32, + provider_datacenter_id: "us-southeast".to_string(), + + pools: Vec::new(), + + build_delivery_method: backend::cluster::BuildDeliveryMethod::TrafficServer as i32, + drain_timeout: 0, + }; + + msg!([ctx] cluster::msg::datacenter_create(datacenter_id) -> cluster::msg::datacenter_scale { + config: Some(dc.clone()), + }) + .await + .unwrap(); +} diff --git a/svc/pkg/cluster/worker/tests/datacenter_scale.rs b/svc/pkg/cluster/worker/tests/datacenter_scale.rs new file mode 100644 index 0000000000..cf1bb16cc0 --- /dev/null +++ b/svc/pkg/cluster/worker/tests/datacenter_scale.rs @@ -0,0 +1,17 @@ +use chirp_worker::prelude::*; +use proto::backend::pkg::*; + +#[worker_test] +async fn datacenter_scale(_ctx: TestCtx) { + if !util::feature::server_provision() { + return; + } + + // msg!([ctx] cluster::msg::datacenter_scale() { + + // }) + // .await + // .unwrap(); + + todo!(); +} diff --git a/svc/pkg/cluster/worker/tests/datacenter_taint.rs b/svc/pkg/cluster/worker/tests/datacenter_taint.rs new file mode 100644 index 0000000000..ae07e5bb88 --- /dev/null +++ b/svc/pkg/cluster/worker/tests/datacenter_taint.rs @@ -0,0 +1,178 @@ +use std::time::Duration; + +use chirp_worker::prelude::*; +use proto::backend::{self, pkg::*}; + +#[worker_test] +async fn datacenter_taint(ctx: TestCtx) { + if !util::feature::server_provision() { + return; + } + + let server_id = Uuid::new_v4(); + let datacenter_id = Uuid::new_v4(); + let cluster_id = Uuid::new_v4(); + + let dc = setup(&ctx, server_id, datacenter_id, cluster_id).await; + + // Manually create a server + msg!([ctx] cluster::msg::server_provision(server_id) { + cluster_id: Some(cluster_id.into()), + datacenter_id: Some(datacenter_id.into()), + server_id: Some(server_id.into()), + pool_type: dc.pools.first().unwrap().pool_type, + provider: dc.provider, + tags: vec!["test".to_string()], + }) + .await + .unwrap(); + + // Wait for server to have an ip + loop { + tokio::time::sleep(std::time::Duration::from_secs(5)).await; + + let (exists,) = sql_fetch_one!( + [ctx, (bool,)] + " + SELECT EXISTS ( + SELECT 1 + FROM db_cluster.servers + WHERE + server_id = $1 AND + public_ip IS NOT NULL + ) + ", + server_id, + ) + .await + .unwrap(); + + if exists { + break; + } + } + + // Increase desired count (this wont provision anything, we manually created a server) + msg!([ctx] cluster::msg::datacenter_update(datacenter_id) -> cluster::msg::datacenter_scale { + datacenter_id: Some(datacenter_id.into()), + pools: vec![cluster::msg::datacenter_update::PoolUpdate { + pool_type: backend::cluster::PoolType::Job as i32, + hardware: Vec::new(), + desired_count: Some(1), + max_count: Some(1), + }], + drain_timeout: None, + }) + .await + .unwrap(); + + // TODO: The servers brought up by this taint (and subsequent scale) aren't tagged as "test" so they wont + // be garbage collected if the test fails + // Taint datacenter + msg!([ctx] cluster::msg::datacenter_taint(datacenter_id) -> cluster::msg::datacenter_scale { + datacenter_id: Some(datacenter_id.into()), + }) + .await + .unwrap(); + + // Validate state + let (taint_ts,) = sql_fetch_one!( + [ctx, (Option,)] + " + SELECT taint_ts + FROM db_cluster.servers + WHERE server_id = $1 + ", + server_id, + ) + .await + .unwrap(); + + taint_ts.expect("did not taint server"); + + // Downscale datacenter (so it destroys the new server) + msg!([ctx] cluster::msg::datacenter_update(datacenter_id) -> cluster::msg::datacenter_scale { + datacenter_id: Some(datacenter_id.into()), + pools: vec![cluster::msg::datacenter_update::PoolUpdate { + pool_type: backend::cluster::PoolType::Job as i32, + hardware: Vec::new(), + desired_count: Some(0), + max_count: Some(0), + }], + drain_timeout: None, + }) + .await + .unwrap(); + + // Wait for datacenter scale to destroy servers + tokio::time::sleep(Duration::from_secs(2)).await; +} + +async fn setup( + ctx: &TestCtx, + server_id: Uuid, + datacenter_id: Uuid, + cluster_id: Uuid, +) -> backend::cluster::Datacenter { + let pool_type = backend::cluster::PoolType::Job as i32; + + msg!([ctx] cluster::msg::create(cluster_id) -> cluster::msg::create_complete { + cluster_id: Some(cluster_id.into()), + name_id: util::faker::ident(), + owner_team_id: None, + }) + .await + .unwrap(); + + let dc = backend::cluster::Datacenter { + datacenter_id: Some(datacenter_id.into()), + cluster_id: Some(cluster_id.into()), + name_id: util::faker::ident(), + display_name: util::faker::ident(), + + provider: backend::cluster::Provider::Linode as i32, + provider_datacenter_id: "us-southeast".to_string(), + + pools: vec![backend::cluster::Pool { + pool_type, + hardware: vec![backend::cluster::Hardware { + provider_hardware: util_cluster::test::HARDWARE.to_string(), + }], + desired_count: 0, + max_count: 0, + }], + + build_delivery_method: backend::cluster::BuildDeliveryMethod::TrafficServer as i32, + drain_timeout: 0, + }; + + msg!([ctx] cluster::msg::datacenter_create(datacenter_id) -> cluster::msg::datacenter_scale { + config: Some(dc.clone()), + }) + .await + .unwrap(); + + // Write new server to db + sql_execute!( + [ctx] + " + INSERT INTO db_cluster.servers ( + server_id, + datacenter_id, + cluster_id, + pool_type, + create_ts + ) + VALUES ($1, $2, $3, $4, $5) + ", + server_id, + datacenter_id, + cluster_id, + pool_type as i64, + util::timestamp::now(), + ) + .await + .unwrap(); + + dc +} diff --git a/svc/pkg/cluster/worker/tests/datacenter_taint_complete.rs b/svc/pkg/cluster/worker/tests/datacenter_taint_complete.rs new file mode 100644 index 0000000000..f48715a648 --- /dev/null +++ b/svc/pkg/cluster/worker/tests/datacenter_taint_complete.rs @@ -0,0 +1,181 @@ +use std::time::Duration; + +use chirp_worker::prelude::*; +use proto::backend::{self, pkg::*}; + +#[worker_test] +async fn datacenter_taint_complete(ctx: TestCtx) { + if !util::feature::server_provision() { + return; + } + + let server_id = Uuid::new_v4(); + let datacenter_id = Uuid::new_v4(); + let cluster_id = Uuid::new_v4(); + + let dc = setup(&ctx, server_id, datacenter_id, cluster_id).await; + + // Manually create a server + msg!([ctx] cluster::msg::server_provision(server_id) { + cluster_id: Some(cluster_id.into()), + datacenter_id: Some(datacenter_id.into()), + server_id: Some(server_id.into()), + pool_type: dc.pools.first().unwrap().pool_type, + provider: dc.provider, + tags: vec!["test".to_string()], + }) + .await + .unwrap(); + + // Wait for server to have an ip + loop { + tokio::time::sleep(std::time::Duration::from_secs(5)).await; + + let (exists,) = sql_fetch_one!( + [ctx, (bool,)] + " + SELECT EXISTS ( + SELECT 1 + FROM db_cluster.servers + WHERE + server_id = $1 AND + public_ip IS NOT NULL + ) + ", + server_id, + ) + .await + .unwrap(); + + if exists { + break; + } + } + + // Increase desired count (this wont provision anything, we manually created a server) + msg!([ctx] cluster::msg::datacenter_update(datacenter_id) -> cluster::msg::datacenter_scale { + datacenter_id: Some(datacenter_id.into()), + pools: vec![cluster::msg::datacenter_update::PoolUpdate { + pool_type: backend::cluster::PoolType::Job as i32, + hardware: Vec::new(), + desired_count: Some(1), + max_count: Some(1), + }], + drain_timeout: None, + }) + .await + .unwrap(); + + // Taint datacenter + msg!([ctx] @notrace cluster::msg::datacenter_taint(datacenter_id) -> cluster::msg::server_destroy(server_id) { + datacenter_id: Some(datacenter_id.into()), + }).await.unwrap(); + + // Validate state + let server_rows = sql_fetch_all!( + [ctx, (Uuid, Option)] + " + SELECT server_id, cloud_destroy_ts + FROM db_cluster.servers + WHERE datacenter_id = $1 + ", + datacenter_id, + ) + .await + .unwrap(); + + assert_eq!(2, server_rows.len(), "did not provision new server"); + assert!( + server_rows.iter().any( + |(row_server_id, cloud_destroy_ts)| row_server_id == &server_id + && cloud_destroy_ts.is_some() + ), + "did not destroy old server" + ); + + // Downscale datacenter (so it destroys the new server) + msg!([ctx] cluster::msg::datacenter_update(datacenter_id) -> cluster::msg::datacenter_scale { + datacenter_id: Some(datacenter_id.into()), + pools: vec![cluster::msg::datacenter_update::PoolUpdate { + pool_type: backend::cluster::PoolType::Job as i32, + hardware: Vec::new(), + desired_count: Some(0), + max_count: Some(0), + }], + drain_timeout: None, + }) + .await + .unwrap(); + + // Wait for datacenter scale to destroy servers + tokio::time::sleep(Duration::from_secs(2)).await; +} + +async fn setup( + ctx: &TestCtx, + server_id: Uuid, + datacenter_id: Uuid, + cluster_id: Uuid, +) -> backend::cluster::Datacenter { + let pool_type = backend::cluster::PoolType::Job as i32; + + msg!([ctx] cluster::msg::create(cluster_id) -> cluster::msg::create_complete { + cluster_id: Some(cluster_id.into()), + name_id: util::faker::ident(), + owner_team_id: None, + }) + .await + .unwrap(); + + let dc = backend::cluster::Datacenter { + datacenter_id: Some(datacenter_id.into()), + cluster_id: Some(cluster_id.into()), + name_id: util::faker::ident(), + display_name: util::faker::ident(), + + provider: backend::cluster::Provider::Linode as i32, + provider_datacenter_id: "us-southeast".to_string(), + + pools: vec![backend::cluster::Pool { + pool_type, + hardware: vec![backend::cluster::Hardware { + provider_hardware: util_cluster::test::HARDWARE.to_string(), + }], + desired_count: 0, + max_count: 0, + }], + + build_delivery_method: backend::cluster::BuildDeliveryMethod::TrafficServer as i32, + drain_timeout: 3600, + }; + + msg!([ctx] cluster::msg::datacenter_create(datacenter_id) -> cluster::msg::datacenter_scale { + config: Some(dc.clone()), + }) + .await + .unwrap(); + + // Write new server to db + sql_execute!( + [ctx] + " + INSERT INTO db_cluster.servers ( + server_id, + datacenter_id, + cluster_id, + pool_type, + create_ts + ) + VALUES ($1, $2, $3, $4, $5) + ", + server_id, + datacenter_id, + cluster_id, + pool_type as i64, + util::timestamp::now(), + ) + .await + .unwrap(); + + dc +} diff --git a/svc/pkg/cluster/worker/tests/datacenter_update.rs b/svc/pkg/cluster/worker/tests/datacenter_update.rs new file mode 100644 index 0000000000..7b2d75c0b4 --- /dev/null +++ b/svc/pkg/cluster/worker/tests/datacenter_update.rs @@ -0,0 +1,68 @@ +use chirp_worker::prelude::*; +use proto::backend::{self, pkg::*}; + +#[worker_test] +async fn datacenter_update(ctx: TestCtx) { + let datacenter_id = Uuid::new_v4(); + let cluster_id = Uuid::new_v4(); + + msg!([ctx] cluster::msg::create(cluster_id) -> cluster::msg::create_complete { + cluster_id: Some(cluster_id.into()), + name_id: util::faker::ident(), + owner_team_id: None, + }) + .await + .unwrap(); + + let dc = backend::cluster::Datacenter { + datacenter_id: Some(datacenter_id.into()), + cluster_id: Some(cluster_id.into()), + name_id: util::faker::ident(), + display_name: util::faker::ident(), + + provider: backend::cluster::Provider::Linode as i32, + provider_datacenter_id: "us-southeast".to_string(), + + pools: vec![backend::cluster::Pool { + pool_type: backend::cluster::PoolType::Ats as i32, + hardware: Vec::new(), + desired_count: 0, + max_count: 0, + }], + + build_delivery_method: backend::cluster::BuildDeliveryMethod::TrafficServer as i32, + drain_timeout: 0, + }; + + msg!([ctx] cluster::msg::datacenter_create(datacenter_id) -> cluster::msg::datacenter_scale { + config: Some(dc.clone()), + }) + .await + .unwrap(); + + msg!([ctx] cluster::msg::datacenter_update(datacenter_id) -> cluster::msg::datacenter_scale { + datacenter_id: Some(datacenter_id.into()), + pools: vec![cluster::msg::datacenter_update::PoolUpdate { + pool_type: backend::cluster::PoolType::Ats as i32, + hardware: Vec::new(), + desired_count: Some(1), + max_count: None, + }], + drain_timeout: None, + }) + .await + .unwrap(); + + let datacenter_res = op!([ctx] cluster_datacenter_get { + datacenter_ids: vec![datacenter_id.into()], + }) + .await + .unwrap(); + let updated_dc = datacenter_res.datacenters.first().unwrap(); + + assert_ne!( + dc.pools.first().unwrap().desired_count, + updated_dc.pools.first().unwrap().desired_count, + "datacenter not updated" + ); +} diff --git a/svc/pkg/cluster/worker/tests/nomad_node_drain_complete.rs b/svc/pkg/cluster/worker/tests/nomad_node_drain_complete.rs new file mode 100644 index 0000000000..5272621243 --- /dev/null +++ b/svc/pkg/cluster/worker/tests/nomad_node_drain_complete.rs @@ -0,0 +1,101 @@ +use chirp_worker::prelude::*; +use proto::backend::{self, pkg::*}; + +#[worker_test] +async fn nomad_node_drain_complete(ctx: TestCtx) { + if !util::feature::server_provision() { + return; + } + + let server_id = Uuid::new_v4(); + let datacenter_id = Uuid::new_v4(); + let cluster_id = Uuid::new_v4(); + + let dc = setup(&ctx, server_id, datacenter_id, cluster_id).await; + + msg!([ctx] @notrace cluster::msg::server_provision(server_id) -> nomad::msg::monitor_node_registered { + cluster_id: Some(cluster_id.into()), + datacenter_id: Some(datacenter_id.into()), + server_id: Some(server_id.into()), + pool_type: dc.pools.first().unwrap().pool_type, + provider: dc.provider, + tags: vec!["test".to_string()], + }) + .await + .unwrap(); + + msg!([ctx] @notrace cluster::msg::server_drain(server_id) -> cluster::msg::server_destroy { + server_id: Some(server_id.into()), + }) + .await + .unwrap(); +} + +async fn setup( + ctx: &TestCtx, + server_id: Uuid, + datacenter_id: Uuid, + cluster_id: Uuid, +) -> backend::cluster::Datacenter { + let pool_type = backend::cluster::PoolType::Job as i32; + + msg!([ctx] cluster::msg::create(cluster_id) -> cluster::msg::create_complete { + cluster_id: Some(cluster_id.into()), + name_id: util::faker::ident(), + owner_team_id: None, + }) + .await + .unwrap(); + + let dc = backend::cluster::Datacenter { + datacenter_id: Some(datacenter_id.into()), + cluster_id: Some(cluster_id.into()), + name_id: util::faker::ident(), + display_name: util::faker::ident(), + + provider: backend::cluster::Provider::Linode as i32, + provider_datacenter_id: "us-southeast".to_string(), + + pools: vec![backend::cluster::Pool { + pool_type, + hardware: vec![backend::cluster::Hardware { + provider_hardware: util_cluster::test::HARDWARE.to_string(), + }], + desired_count: 0, + max_count: 0, + }], + + build_delivery_method: backend::cluster::BuildDeliveryMethod::TrafficServer as i32, + drain_timeout: 0, + }; + + msg!([ctx] cluster::msg::datacenter_create(datacenter_id) -> cluster::msg::datacenter_scale { + config: Some(dc.clone()), + }) + .await + .unwrap(); + + // Write new server to db + sql_execute!( + [ctx] + " + INSERT INTO db_cluster.servers ( + server_id, + datacenter_id, + cluster_id, + pool_type, + create_ts + ) + VALUES ($1, $2, $3, $4, $5) + ", + server_id, + datacenter_id, + cluster_id, + pool_type as i64, + util::timestamp::now(), + ) + .await + .unwrap(); + + dc +} diff --git a/svc/pkg/cluster/worker/tests/nomad_node_registered.rs b/svc/pkg/cluster/worker/tests/nomad_node_registered.rs new file mode 100644 index 0000000000..00cad2be75 --- /dev/null +++ b/svc/pkg/cluster/worker/tests/nomad_node_registered.rs @@ -0,0 +1,103 @@ +use chirp_worker::prelude::*; +use proto::backend::{self, pkg::*}; + +#[worker_test] +async fn nomad_node_registered(ctx: TestCtx) { + if !util::feature::server_provision() { + return; + } + + let server_id = Uuid::new_v4(); + let datacenter_id = Uuid::new_v4(); + let cluster_id = Uuid::new_v4(); + + let dc = setup(&ctx, server_id, datacenter_id, cluster_id).await; + + msg!([ctx] @notrace cluster::msg::server_provision(server_id) -> nomad::msg::monitor_node_registered { + cluster_id: Some(cluster_id.into()), + datacenter_id: Some(datacenter_id.into()), + server_id: Some(server_id.into()), + pool_type: dc.pools.first().unwrap().pool_type, + provider: dc.provider, + tags: vec!["test".to_string()], + }) + .await + .unwrap(); + + // Clean up afterwards so we don't litter + msg!([ctx] @wait cluster::msg::server_destroy(server_id) { + server_id: Some(server_id.into()), + force: false, + }) + .await + .unwrap(); +} + +async fn setup( + ctx: &TestCtx, + server_id: Uuid, + datacenter_id: Uuid, + cluster_id: Uuid, +) -> backend::cluster::Datacenter { + let pool_type = backend::cluster::PoolType::Job as i32; + + msg!([ctx] cluster::msg::create(cluster_id) -> cluster::msg::create_complete { + cluster_id: Some(cluster_id.into()), + name_id: util::faker::ident(), + owner_team_id: None, + }) + .await + .unwrap(); + + let dc = backend::cluster::Datacenter { + datacenter_id: Some(datacenter_id.into()), + cluster_id: Some(cluster_id.into()), + name_id: util::faker::ident(), + display_name: util::faker::ident(), + + provider: backend::cluster::Provider::Linode as i32, + provider_datacenter_id: "us-southeast".to_string(), + + pools: vec![backend::cluster::Pool { + pool_type, + hardware: vec![backend::cluster::Hardware { + provider_hardware: util_cluster::test::HARDWARE.to_string(), + }], + desired_count: 0, + max_count: 0, + }], + + build_delivery_method: backend::cluster::BuildDeliveryMethod::TrafficServer as i32, + drain_timeout: 0, + }; + + msg!([ctx] cluster::msg::datacenter_create(datacenter_id) -> cluster::msg::datacenter_scale { + config: Some(dc.clone()), + }) + .await + .unwrap(); + + // Write new server to db + sql_execute!( + [ctx] + " + INSERT INTO db_cluster.servers ( + server_id, + datacenter_id, + cluster_id, + pool_type, + create_ts + ) + VALUES ($1, $2, $3, $4, $5) + ", + server_id, + datacenter_id, + cluster_id, + pool_type as i64, + util::timestamp::now(), + ) + .await + .unwrap(); + + dc +} diff --git a/svc/pkg/cluster/worker/tests/server_destroy.rs b/svc/pkg/cluster/worker/tests/server_destroy.rs new file mode 100644 index 0000000000..559d9cfa31 --- /dev/null +++ b/svc/pkg/cluster/worker/tests/server_destroy.rs @@ -0,0 +1,127 @@ +use chirp_worker::prelude::*; +use proto::backend::{self, pkg::*}; + +#[worker_test] +async fn server_destroy(ctx: TestCtx) { + if !util::feature::server_provision() { + return; + } + + let server_id = Uuid::new_v4(); + let datacenter_id = Uuid::new_v4(); + let cluster_id = Uuid::new_v4(); + + let dc = setup(&ctx, server_id, datacenter_id, cluster_id).await; + + msg!([ctx] cluster::msg::server_provision(server_id) { + cluster_id: Some(cluster_id.into()), + datacenter_id: Some(datacenter_id.into()), + server_id: Some(server_id.into()), + pool_type: dc.pools.first().unwrap().pool_type, + provider: dc.provider, + tags: vec!["test".to_string()], + }) + .await + .unwrap(); + + // Wait for server to have an ip + loop { + tokio::time::sleep(std::time::Duration::from_secs(5)).await; + + let (exists,) = sql_fetch_one!( + [ctx, (bool,)] + " + SELECT EXISTS ( + SELECT 1 + FROM db_cluster.servers + WHERE + server_id = $1 AND + public_ip IS NOT NULL + ) + ", + server_id, + ) + .await + .unwrap(); + + if exists { + break; + } + } + + msg!([ctx] cluster::msg::server_destroy(server_id) -> cluster::msg::server_destroy_complete { + server_id: Some(server_id.into()), + force: false, + }) + .await + .unwrap(); +} + +async fn setup( + ctx: &TestCtx, + server_id: Uuid, + datacenter_id: Uuid, + cluster_id: Uuid, +) -> backend::cluster::Datacenter { + let pool_type = backend::cluster::PoolType::Ats as i32; + + msg!([ctx] cluster::msg::create(cluster_id) -> cluster::msg::create_complete { + cluster_id: Some(cluster_id.into()), + name_id: util::faker::ident(), + owner_team_id: None, + }) + .await + .unwrap(); + + let dc = backend::cluster::Datacenter { + datacenter_id: Some(datacenter_id.into()), + cluster_id: Some(cluster_id.into()), + name_id: util::faker::ident(), + display_name: util::faker::ident(), + + provider: backend::cluster::Provider::Linode as i32, + provider_datacenter_id: "us-southeast".to_string(), + + pools: vec![backend::cluster::Pool { + pool_type, + hardware: vec![backend::cluster::Hardware { + provider_hardware: util_cluster::test::HARDWARE.to_string(), + }], + desired_count: 0, + max_count: 0, + }], + + build_delivery_method: backend::cluster::BuildDeliveryMethod::TrafficServer as i32, + drain_timeout: 0, + }; + + msg!([ctx] cluster::msg::datacenter_create(datacenter_id) -> cluster::msg::datacenter_scale { + config: Some(dc.clone()), + }) + .await + .unwrap(); + + // Write new server to db + sql_execute!( + [ctx] + " + INSERT INTO db_cluster.servers ( + server_id, + datacenter_id, + cluster_id, + pool_type, + create_ts + ) + VALUES ($1, $2, $3, $4, $5) + ", + server_id, + datacenter_id, + cluster_id, + pool_type as i64, + util::timestamp::now(), + ) + .await + .unwrap(); + + dc +} diff --git a/svc/pkg/cluster/worker/tests/server_dns_create.rs b/svc/pkg/cluster/worker/tests/server_dns_create.rs new file mode 100644 index 0000000000..23eed2d153 --- /dev/null +++ b/svc/pkg/cluster/worker/tests/server_dns_create.rs @@ -0,0 +1,121 @@ +use chirp_worker::prelude::*; +use proto::backend::{self, pkg::*}; + +#[worker_test] +async fn server_dns_create(ctx: TestCtx) { + if !util::feature::server_provision() { + return; + } + + let server_id = Uuid::new_v4(); + let datacenter_id = Uuid::new_v4(); + let cluster_id = Uuid::new_v4(); + + let dc = setup(&ctx, server_id, datacenter_id, cluster_id).await; + + msg!([ctx] cluster::msg::server_provision(server_id) -> cluster::msg::server_dns_create { + cluster_id: Some(cluster_id.into()), + datacenter_id: Some(datacenter_id.into()), + server_id: Some(server_id.into()), + pool_type: dc.pools.first().unwrap().pool_type, + provider: dc.provider, + tags: vec!["test".to_string()], + }) + .await + .unwrap(); + + // Clean up afterwards so we don't litter + msg!([ctx] @wait cluster::msg::server_destroy(server_id) { + server_id: Some(server_id.into()), + force: false, + }) + .await + .unwrap(); + + tokio::time::sleep(std::time::Duration::from_secs(5)).await; + + let (exists,) = sql_fetch_one!( + [ctx, (bool,)] + " + SELECT EXISTS ( + SELECT 1 + FROM db_cluster.cloudflare_misc + WHERE server_id = $1 + ) + ", + server_id, + ) + .await + .unwrap(); + + assert!(exists, "dns record not created"); +} + +async fn setup( + ctx: &TestCtx, + server_id: Uuid, + datacenter_id: Uuid, + cluster_id: Uuid, +) -> backend::cluster::Datacenter { + let pool_type = backend::cluster::PoolType::Gg as i32; + + msg!([ctx] cluster::msg::create(cluster_id) -> cluster::msg::create_complete { + cluster_id: Some(cluster_id.into()), + name_id: util::faker::ident(), + owner_team_id: None, + }) + .await + .unwrap(); + + let dc = backend::cluster::Datacenter { + datacenter_id: Some(datacenter_id.into()), + cluster_id: Some(cluster_id.into()), + name_id: util::faker::ident(), + display_name: util::faker::ident(), + + provider: backend::cluster::Provider::Linode as i32, + provider_datacenter_id: "us-southeast".to_string(), + + pools: vec![backend::cluster::Pool { + pool_type, + hardware: vec![backend::cluster::Hardware { + provider_hardware: util_cluster::test::HARDWARE.to_string(), + }], + desired_count: 0, + max_count: 0, + }], + + build_delivery_method: backend::cluster::BuildDeliveryMethod::TrafficServer as i32, + drain_timeout: 0, + }; + + msg!([ctx] cluster::msg::datacenter_create(datacenter_id) -> cluster::msg::datacenter_scale { + config: Some(dc.clone()), + }) + .await + .unwrap(); + + // Write new server to db + sql_execute!( + [ctx] + " + INSERT INTO db_cluster.servers ( + server_id, + datacenter_id, + cluster_id, + pool_type, + create_ts + ) + VALUES ($1, $2, $3, $4, $5) + ", + server_id, + datacenter_id, + cluster_id, + pool_type as i64, + util::timestamp::now(), + ) + .await + .unwrap(); + + dc +} diff --git a/svc/pkg/cluster/worker/tests/server_dns_delete.rs b/svc/pkg/cluster/worker/tests/server_dns_delete.rs new file mode 100644 index 0000000000..9ae9739b31 --- /dev/null +++ b/svc/pkg/cluster/worker/tests/server_dns_delete.rs @@ -0,0 +1,120 @@ +use chirp_worker::prelude::*; +use proto::backend::{self, pkg::*}; + +#[worker_test] +async fn server_dns_delete(ctx: TestCtx) { + if !util::feature::server_provision() { + return; + } + + let server_id = Uuid::new_v4(); + let datacenter_id = Uuid::new_v4(); + let cluster_id = Uuid::new_v4(); + + let dc = setup(&ctx, server_id, datacenter_id, cluster_id).await; + + msg!([ctx] cluster::msg::server_provision(server_id) -> cluster::msg::server_dns_create { + cluster_id: Some(cluster_id.into()), + datacenter_id: Some(datacenter_id.into()), + server_id: Some(server_id.into()), + pool_type: dc.pools.first().unwrap().pool_type, + provider: dc.provider, + tags: vec!["test".to_string()], + }) + .await + .unwrap(); + + msg!([ctx] cluster::msg::server_destroy(server_id) -> cluster::msg::server_dns_delete { + server_id: Some(server_id.into()), + force: false, + }) + .await + .unwrap(); + + tokio::time::sleep(std::time::Duration::from_secs(5)).await; + + let (exists,) = sql_fetch_one!( + [ctx, (bool,)] + " + SELECT EXISTS ( + SELECT 1 + FROM db_cluster.cloudflare_misc + WHERE server_id = $1 + ) + ", + server_id, + ) + .await + .unwrap(); + + assert!(!exists, "dns record not deleted"); +} + +async fn setup( + ctx: &TestCtx, + server_id: Uuid, + datacenter_id: Uuid, + cluster_id: Uuid, +) -> backend::cluster::Datacenter { + let pool_type = backend::cluster::PoolType::Gg as i32; + + msg!([ctx] cluster::msg::create(cluster_id) -> cluster::msg::create_complete { + cluster_id: Some(cluster_id.into()), + name_id: util::faker::ident(), + owner_team_id: None, + }) + .await + .unwrap(); + + let dc = backend::cluster::Datacenter { + datacenter_id: Some(datacenter_id.into()), + cluster_id: Some(cluster_id.into()), + name_id: util::faker::ident(), + display_name: util::faker::ident(), + + provider: backend::cluster::Provider::Linode as i32, + provider_datacenter_id: "us-southeast".to_string(), + + pools: vec![backend::cluster::Pool { + pool_type, + hardware: vec![backend::cluster::Hardware { + provider_hardware: util_cluster::test::HARDWARE.to_string(), + }], + desired_count: 0, + max_count: 0, + }], + + build_delivery_method: backend::cluster::BuildDeliveryMethod::TrafficServer as i32, + drain_timeout: 0, + }; + + msg!([ctx] cluster::msg::datacenter_create(datacenter_id) -> cluster::msg::datacenter_scale { + config: Some(dc.clone()), + }) + .await + .unwrap(); + + // Write new server to db + sql_execute!( + [ctx] + " + INSERT INTO db_cluster.servers ( + server_id, + datacenter_id, + cluster_id, + pool_type, + create_ts + ) + VALUES ($1, $2, $3, $4, $5) + ", + server_id, + datacenter_id, + cluster_id, + pool_type as i64, + util::timestamp::now(), + ) + .await + .unwrap(); + + dc +} diff --git a/svc/pkg/cluster/worker/tests/server_drain.rs b/svc/pkg/cluster/worker/tests/server_drain.rs new file mode 100644 index 0000000000..39fd272f7e --- /dev/null +++ b/svc/pkg/cluster/worker/tests/server_drain.rs @@ -0,0 +1,185 @@ +use chirp_worker::prelude::*; +use proto::backend::{self, pkg::*}; + +#[worker_test] +async fn server_drain(ctx: TestCtx) { + if !util::feature::server_provision() { + return; + } + + let server_id = Uuid::new_v4(); + let datacenter_id = Uuid::new_v4(); + let cluster_id = Uuid::new_v4(); + + let dc = setup( + &ctx, + backend::cluster::PoolType::Job, + server_id, + datacenter_id, + cluster_id, + ) + .await; + + msg!([ctx] @notrace cluster::msg::server_provision(server_id) -> nomad::msg::monitor_node_registered { + cluster_id: Some(cluster_id.into()), + datacenter_id: Some(datacenter_id.into()), + server_id: Some(server_id.into()), + pool_type: dc.pools.first().unwrap().pool_type, + provider: dc.provider, + tags: vec!["test".to_string()], + }) + .await + .unwrap(); + + msg!([ctx] @notrace cluster::msg::server_drain(server_id) -> nomad::msg::monitor_node_drain_complete { + server_id: Some(server_id.into()), + }) + .await + .unwrap(); + + // Clean up afterwards so we don't litter + msg!([ctx] @wait cluster::msg::server_destroy(server_id) { + server_id: Some(server_id.into()), + force: false, + }) + .await + .unwrap(); +} + +#[worker_test] +async fn gg_server_drain(ctx: TestCtx) { + if !util::feature::server_provision() { + return; + } + + let server_id = Uuid::new_v4(); + let datacenter_id = Uuid::new_v4(); + let cluster_id = Uuid::new_v4(); + + let dc = setup( + &ctx, + backend::cluster::PoolType::Gg, + server_id, + datacenter_id, + cluster_id, + ) + .await; + + msg!([ctx] cluster::msg::server_provision(server_id) -> cluster::msg::server_dns_create { + cluster_id: Some(cluster_id.into()), + datacenter_id: Some(datacenter_id.into()), + server_id: Some(server_id.into()), + pool_type: dc.pools.first().unwrap().pool_type, + provider: dc.provider, + tags: vec!["test".to_string()], + }) + .await + .unwrap(); + + // Wait for server to have a dns record + loop { + tokio::time::sleep(std::time::Duration::from_secs(5)).await; + + let (exists,) = sql_fetch_one!( + [ctx, (bool,)] + " + SELECT EXISTS ( + SELECT 1 + FROM db_cluster.cloudflare_misc + WHERE server_id = $1 + ) + ", + server_id, + ) + .await + .unwrap(); + + if exists { + break; + } + } + + msg!([ctx] cluster::msg::server_drain(server_id) -> cluster::msg::server_dns_delete { + server_id: Some(server_id.into()), + }) + .await + .unwrap(); + + // Clean up afterwards so we don't litter + msg!([ctx] @wait cluster::msg::server_destroy(server_id) { + server_id: Some(server_id.into()), + force: false, + }) + .await + .unwrap(); +} + +async fn setup( + ctx: &TestCtx, + pool_type: backend::cluster::PoolType, + server_id: Uuid, + datacenter_id: Uuid, + cluster_id: Uuid, +) -> backend::cluster::Datacenter { + let pool_type = pool_type as i32; + + msg!([ctx] cluster::msg::create(cluster_id) -> cluster::msg::create_complete { + cluster_id: Some(cluster_id.into()), + name_id: util::faker::ident(), + owner_team_id: None, + }) + .await + .unwrap(); + + let dc = backend::cluster::Datacenter { + datacenter_id: Some(datacenter_id.into()), + cluster_id: Some(cluster_id.into()), + name_id: util::faker::ident(), + display_name: util::faker::ident(), + + provider: backend::cluster::Provider::Linode as i32, + provider_datacenter_id: "us-southeast".to_string(), + + pools: vec![backend::cluster::Pool { + pool_type, + hardware: vec![backend::cluster::Hardware { + provider_hardware: util_cluster::test::HARDWARE.to_string(), + }], + desired_count: 0, + max_count: 0, + }], + + build_delivery_method: backend::cluster::BuildDeliveryMethod::TrafficServer as i32, + drain_timeout: 0, + }; + + msg!([ctx] cluster::msg::datacenter_create(datacenter_id) -> cluster::msg::datacenter_scale { + config: Some(dc.clone()), + }) + .await + .unwrap(); + + // Write new server to db + sql_execute!( + [ctx] + " + INSERT INTO db_cluster.servers ( + server_id, + datacenter_id, + cluster_id, + pool_type, + create_ts + ) + VALUES ($1, $2, $3, $4, $5) + ", + server_id, + datacenter_id, + cluster_id, + pool_type as i64, + util::timestamp::now(), + ) + .await + .unwrap(); + + dc +} diff --git a/svc/pkg/cluster/worker/tests/server_install.rs b/svc/pkg/cluster/worker/tests/server_install.rs new file mode 100644 index 0000000000..9d81fdd162 --- /dev/null +++ b/svc/pkg/cluster/worker/tests/server_install.rs @@ -0,0 +1,132 @@ +use chirp_worker::prelude::*; +use proto::backend::{self, pkg::*}; + +#[worker_test] +async fn server_install(ctx: TestCtx) { + if !util::feature::server_provision() { + return; + } + + let server_id = Uuid::new_v4(); + let datacenter_id = Uuid::new_v4(); + let cluster_id = Uuid::new_v4(); + + let dc = setup(&ctx, server_id, datacenter_id, cluster_id).await; + + msg!([ctx] cluster::msg::server_provision(server_id) { + cluster_id: Some(cluster_id.into()), + datacenter_id: Some(datacenter_id.into()), + server_id: Some(server_id.into()), + pool_type: dc.pools.first().unwrap().pool_type, + provider: dc.provider, + tags: vec!["test".to_string()], + }) + .await + .unwrap(); + + // Wait for server to have an ip + let public_ip = loop { + tokio::time::sleep(std::time::Duration::from_secs(5)).await; + + let row = sql_fetch_optional!( + [ctx, (String,)] + " + SELECT public_ip + FROM db_cluster.servers + WHERE + server_id = $1 AND + public_ip IS NOT NULL + ", + server_id, + ) + .await + .unwrap(); + + if let Some((public_ip,)) = row { + break public_ip; + } + }; + + // Wait for install to complete + let mut sub = subscribe!([ctx] cluster::msg::server_install_complete(public_ip)) + .await + .unwrap(); + sub.next().await.unwrap(); + + // Clean up afterwards so we don't litter + msg!([ctx] @wait cluster::msg::server_destroy(server_id) { + server_id: Some(server_id.into()), + force: false, + }) + .await + .unwrap(); +} + +async fn setup( + ctx: &TestCtx, + server_id: Uuid, + datacenter_id: Uuid, + cluster_id: Uuid, +) -> backend::cluster::Datacenter { + let pool_type = backend::cluster::PoolType::Ats as i32; + + msg!([ctx] cluster::msg::create(cluster_id) -> cluster::msg::create_complete { + cluster_id: Some(cluster_id.into()), + name_id: util::faker::ident(), + owner_team_id: None, + }) + .await + .unwrap(); + + let dc = backend::cluster::Datacenter { + datacenter_id: Some(datacenter_id.into()), + cluster_id: Some(cluster_id.into()), + name_id: util::faker::ident(), + display_name: util::faker::ident(), + + provider: backend::cluster::Provider::Linode as i32, + provider_datacenter_id: "us-southeast".to_string(), + + pools: vec![backend::cluster::Pool { + pool_type, + hardware: vec![backend::cluster::Hardware { + provider_hardware: util_cluster::test::HARDWARE.to_string(), + }], + desired_count: 0, + max_count: 0, + }], + + build_delivery_method: backend::cluster::BuildDeliveryMethod::TrafficServer as i32, + drain_timeout: 0, + }; + + msg!([ctx] cluster::msg::datacenter_create(datacenter_id) -> cluster::msg::datacenter_scale { + config: Some(dc.clone()), + }) + .await + .unwrap(); + + // Write new server to db + sql_execute!( + [ctx] + " + INSERT INTO db_cluster.servers ( + server_id, + datacenter_id, + cluster_id, + pool_type, + create_ts + ) + VALUES ($1, $2, $3, $4, $5) + ", + server_id, + datacenter_id, + cluster_id, + pool_type as i64, + util::timestamp::now(), + ) + .await + .unwrap(); + + dc +} diff --git a/svc/pkg/cluster/worker/tests/server_install_complete.rs b/svc/pkg/cluster/worker/tests/server_install_complete.rs new file mode 100644 index 0000000000..46597d8f6d --- /dev/null +++ b/svc/pkg/cluster/worker/tests/server_install_complete.rs @@ -0,0 +1,13 @@ +use chirp_worker::prelude::*; +use proto::backend::pkg::*; + +#[worker_test] +async fn server_install_complete(ctx: TestCtx) { + // msg!([ctx] cluster::msg::server_install_complete() { + + // }) + // .await + // .unwrap(); + + todo!(); +} diff --git a/svc/pkg/cluster/worker/tests/server_provision.rs b/svc/pkg/cluster/worker/tests/server_provision.rs new file mode 100644 index 0000000000..3440533720 --- /dev/null +++ b/svc/pkg/cluster/worker/tests/server_provision.rs @@ -0,0 +1,128 @@ +use chirp_worker::prelude::*; +use proto::backend::{self, pkg::*}; + +#[worker_test] +async fn server_provision(ctx: TestCtx) { + if !util::feature::server_provision() { + return; + } + + let server_id = Uuid::new_v4(); + let datacenter_id = Uuid::new_v4(); + let cluster_id = Uuid::new_v4(); + + let dc = setup(&ctx, server_id, datacenter_id, cluster_id).await; + + msg!([ctx] cluster::msg::server_provision(server_id) { + cluster_id: Some(cluster_id.into()), + datacenter_id: Some(datacenter_id.into()), + server_id: Some(server_id.into()), + pool_type: dc.pools.first().unwrap().pool_type, + provider: dc.provider, + tags: vec!["test".to_string()], + }) + .await + .unwrap(); + + // Wait for server to have an ip + loop { + tokio::time::sleep(std::time::Duration::from_secs(5)).await; + + let (exists,) = sql_fetch_one!( + [ctx, (bool,)] + " + SELECT EXISTS ( + SELECT 1 + FROM db_cluster.servers + WHERE + server_id = $1 AND + public_ip IS NOT NULL + ) + ", + server_id, + ) + .await + .unwrap(); + + if exists { + break; + } + } + + // Clean up afterwards so we don't litter + msg!([ctx] @wait cluster::msg::server_destroy(server_id) { + server_id: Some(server_id.into()), + force: false, + }) + .await + .unwrap(); +} + +async fn setup( + ctx: &TestCtx, + server_id: Uuid, + datacenter_id: Uuid, + cluster_id: Uuid, +) -> backend::cluster::Datacenter { + let pool_type = backend::cluster::PoolType::Ats as i32; + + msg!([ctx] cluster::msg::create(cluster_id) -> cluster::msg::create_complete { + cluster_id: Some(cluster_id.into()), + name_id: util::faker::ident(), + owner_team_id: None, + }) + .await + .unwrap(); + + let dc = backend::cluster::Datacenter { + datacenter_id: Some(datacenter_id.into()), + cluster_id: Some(cluster_id.into()), + name_id: util::faker::ident(), + display_name: util::faker::ident(), + + provider: backend::cluster::Provider::Linode as i32, + provider_datacenter_id: "us-southeast".to_string(), + + pools: vec![backend::cluster::Pool { + pool_type, + hardware: vec![backend::cluster::Hardware { + provider_hardware: util_cluster::test::HARDWARE.to_string(), + }], + desired_count: 0, + max_count: 0, + }], + + build_delivery_method: backend::cluster::BuildDeliveryMethod::TrafficServer as i32, + drain_timeout: 0, + }; + + msg!([ctx] cluster::msg::datacenter_create(datacenter_id) -> cluster::msg::datacenter_scale { + config: Some(dc.clone()), + }) + .await + .unwrap(); + + // Write new server to db + sql_execute!( + [ctx] + " + INSERT INTO db_cluster.servers ( + server_id, + datacenter_id, + cluster_id, + pool_type, + create_ts + ) + VALUES ($1, $2, $3, $4, $5) + ", + server_id, + datacenter_id, + cluster_id, + pool_type as i64, + util::timestamp::now(), + ) + .await + .unwrap(); + + dc +} diff --git a/svc/pkg/cluster/worker/tests/server_undrain.rs b/svc/pkg/cluster/worker/tests/server_undrain.rs new file mode 100644 index 0000000000..09a59dcbfd --- /dev/null +++ b/svc/pkg/cluster/worker/tests/server_undrain.rs @@ -0,0 +1,25 @@ +use chirp_worker::prelude::*; +use proto::backend::pkg::*; + +#[worker_test] +async fn server_undrain(ctx: TestCtx) { + if !util::feature::server_provision() { + return; + } + + // Difficult test to write: + // 1. Create job server + // 2. Wait for nomad to register + // 3. Create lobby on node + // 4. Drain node + // 5. Before the drain ends (somehow manage to postpone it), undrain the node + // 6. Check that the node is not draining anymore + + // msg!([ctx] cluster::msg::server_undrain() { + + // }) + // .await + // .unwrap(); + + todo!(); +} diff --git a/svc/pkg/email/ops/send/src/lib.rs b/svc/pkg/email/ops/send/src/lib.rs index 6e381bc36f..ce99c0625d 100644 --- a/svc/pkg/email/ops/send/src/lib.rs +++ b/svc/pkg/email/ops/send/src/lib.rs @@ -6,7 +6,7 @@ use serde::{Deserialize, Serialize}; use serde_json::json; lazy_static::lazy_static! { - static ref SENDGRID_KEY: String = std::env::var("SENDGRID_KEY").expect("no sendgrid key"); + static ref SENDGRID_KEY: String = util::env::var("SENDGRID_KEY").unwrap(); } #[derive(Serialize, Deserialize)] diff --git a/svc/pkg/faker/ops/job-template/templates/allocate.rs b/svc/pkg/faker/ops/job-template/templates/allocate.rs deleted file mode 100644 index 807bb3eff3..0000000000 --- a/svc/pkg/faker/ops/job-template/templates/allocate.rs +++ /dev/null @@ -1,35 +0,0 @@ -// cargo-deps: ctrlc="3.4.2" - -use std::{env, thread, time::Duration, sync::{Arc, atomic::{AtomicBool, Ordering}}}; - -fn main() { - // Parse the first command-line argument as the array size - let args: Vec = env::args().collect(); - if args.len() < 2 { - eprintln!("missing size arg"); - std::process::exit(1); - } - let size = args[1].parse::().expect("Error parsing size"); - - // Allocate the array - println!("allocating {size} bytes"); - let _array = vec![0u8; size]; - - handle_ctrl_c(); - - println!("exiting"); -} - -fn handle_ctrl_c() { - let running = Arc::new(AtomicBool::new(true)); - let r = running.clone(); - - ctrlc::set_handler(move || { - r.store(false, Ordering::SeqCst); - }).expect("Error setting Ctrl-C handler"); - - // Wait for ctrl + c - while running.load(Ordering::SeqCst) { - thread::sleep(Duration::from_secs(1)); - } -} diff --git a/svc/pkg/faker/ops/region/src/lib.rs b/svc/pkg/faker/ops/region/src/lib.rs index c453548101..c869eb0934 100644 --- a/svc/pkg/faker/ops/region/src/lib.rs +++ b/svc/pkg/faker/ops/region/src/lib.rs @@ -15,11 +15,7 @@ async fn handle( region_ids: region_list.region_ids.clone(), }) .await?; - let region = region_get - .regions - .iter() - .find(|x| x.name_id == util::env::primary_region()); - let region = unwrap!(region, "primary region not listed in region list"); + let region = unwrap!(region_get.regions.first()); Ok(faker::region::Response { region_id: region.region_id, diff --git a/svc/pkg/ip/ops/info/Service.toml b/svc/pkg/ip/ops/info/Service.toml index 8617e1695a..1a6ded9652 100644 --- a/svc/pkg/ip/ops/info/Service.toml +++ b/svc/pkg/ip/ops/info/Service.toml @@ -8,3 +8,6 @@ kind = "rust" [databases] db-ip-info = {} + +[secrets] +"ip_info/token" = { optional = true } diff --git a/svc/pkg/ip/ops/info/src/lib.rs b/svc/pkg/ip/ops/info/src/lib.rs index 46b0771e95..4bc9aabc24 100644 --- a/svc/pkg/ip/ops/info/src/lib.rs +++ b/svc/pkg/ip/ops/info/src/lib.rs @@ -1,8 +1,6 @@ use proto::backend::{self, pkg::*}; use rivet_operation::prelude::*; -const IP_INFO_TOKEN: &str = "1a0c0cea381431"; - /// Parsed response from ipinfo.io. We can't retrieve data from bogon or anycast /// addresses. #[derive(serde::Deserialize)] @@ -53,13 +51,16 @@ async fn fetch_ip_info_io( tracing::info!("found cached ip info"); ip_info_raw } else { + let api_url = + if let Some(ip_info_token) = util::env::read_secret_opt(&["ip_info", "token"]).await? { + format!("https://ipinfo.io/{}?token={}", ip_str, ip_info_token) + } else { + format!("https://ipinfo.io/{}", ip_str) + }; + // Fetch IP data from external service tracing::info!(?ip_str, "fetching fresh ip info"); - let ip_info_res = reqwest::get(format!( - "https://ipinfo.io/{}?token={}", - ip_str, IP_INFO_TOKEN - )) - .await?; + let ip_info_res = reqwest::get(api_url).await?; if !ip_info_res.status().is_success() { tracing::error!(status = ?ip_info_res.status(), "failed to fetch ip info, using fallback"); @@ -97,8 +98,10 @@ async fn fetch_ip_info_io( Some(backend::net::IpInfo { ip: ip_str.to_string(), - latitude, - longitude, + coords: Some(backend::net::Coordinates { + latitude, + longitude, + }), }) } IpInfoParsed::Bogon { .. } => { diff --git a/svc/pkg/job-run/ops/get/src/lib.rs b/svc/pkg/job-run/ops/get/src/lib.rs index 41710c6816..6d3bf3e54c 100644 --- a/svc/pkg/job-run/ops/get/src/lib.rs +++ b/svc/pkg/job-run/ops/get/src/lib.rs @@ -62,66 +62,61 @@ async fn handle( // Query the run data let (runs, run_networks, run_ports, run_meta_nomad, run_proxied_ports) = tokio::try_join!( // runs - async { - sql_fetch_all!( - [ctx, Run] - " - SELECT run_id, region_id, create_ts, start_ts, stop_ts, cleanup_ts - FROM db_job_state.runs - WHERE run_id = ANY($1) - ", - &run_ids, - ) - .await - }, + sql_fetch_all!( + [ctx, Run] + " + SELECT run_id, region_id, create_ts, start_ts, stop_ts, cleanup_ts + FROM db_job_state.runs + WHERE run_id = ANY($1) + ", + &run_ids, + ), // run_networks - async { - sql_fetch_all!( - [ctx, RunNetwork] - " - SELECT run_id, ip, mode - FROM db_job_state.run_networks - WHERE run_id = ANY($1) - ", - &run_ids, - ) - .await - }, + sql_fetch_all!( + [ctx, RunNetwork] + " + SELECT run_id, ip, mode + FROM db_job_state.run_networks + WHERE run_id = ANY($1) + ", + &run_ids, + ), // run_ports - async { - sql_fetch_all!( - [ctx, RunPort] - " - SELECT run_id, label, ip, source, target - FROM db_job_state.run_ports - WHERE run_id = ANY($1) - ", - &run_ids, - ) - .await - }, + sql_fetch_all!( + [ctx, RunPort] + " + SELECT run_id, label, ip, source, target + FROM db_job_state.run_ports + WHERE run_id = ANY($1) + ", + &run_ids, + ), // run_meta_nomad - async { - sql_fetch_all!( - [ctx, RunMetaNomad] - "SELECT run_id, dispatched_job_id, alloc_id, node_id, node_name, node_public_ipv4, node_vlan_ipv4, alloc_state FROM db_job_state.run_meta_nomad WHERE run_id = ANY($1)", - &run_ids, - ) - .await - }, + sql_fetch_all!( + [ctx, RunMetaNomad] + " + SELECT + run_id, dispatched_job_id, alloc_id, + node_id, node_name, node_public_ipv4, + node_vlan_ipv4, alloc_state + FROM db_job_state.run_meta_nomad + WHERE run_id = ANY($1) + ", + &run_ids, + ), // run_proxied_ports - async { - sql_fetch_all!( - [ctx, RunProxiedPort] - " - SELECT run_id, target_nomad_port_label, ingress_port, ingress_hostnames, proxy_protocol, ssl_domain_mode - FROM db_job_state.run_proxied_ports - WHERE run_id = ANY($1) - ", - &run_ids, - ) - .await - }, + sql_fetch_all!( + [ctx, RunProxiedPort] + " + SELECT + run_id, target_nomad_port_label, ingress_port, + ingress_hostnames, proxy_protocol, + ssl_domain_mode + FROM db_job_state.run_proxied_ports + WHERE run_id = ANY($1) + ", + &run_ids, + ), )?; // Build the runs diff --git a/svc/pkg/job-run/ops/metrics-log/Cargo.toml b/svc/pkg/job-run/ops/metrics-log/Cargo.toml index 6a5c2521b6..a2c26e5535 100644 --- a/svc/pkg/job-run/ops/metrics-log/Cargo.toml +++ b/svc/pkg/job-run/ops/metrics-log/Cargo.toml @@ -8,7 +8,7 @@ license = "Apache-2.0" [dependencies] chirp-client = { path = "../../../../../lib/chirp/client" } indoc = "1.0" -prost = "0.10" +lazy_static = "1.4" reqwest = "0.11" rivet-operation = { path = "../../../../../lib/operation/core" } serde = { version = "1.0", features = ["derive"] } diff --git a/svc/pkg/job-run/ops/metrics-log/src/lib.rs b/svc/pkg/job-run/ops/metrics-log/src/lib.rs index b50bf64842..283d3d22b1 100644 --- a/svc/pkg/job-run/ops/metrics-log/src/lib.rs +++ b/svc/pkg/job-run/ops/metrics-log/src/lib.rs @@ -4,12 +4,6 @@ use reqwest::StatusCode; use rivet_operation::prelude::*; use serde::Deserialize; -#[derive(Debug, thiserror::Error)] -enum Error { - #[error("prometheus error: {0}")] - PrometheusError(String), -} - #[derive(Debug, Deserialize)] struct PrometheusResponse { data: PrometheusData, @@ -41,12 +35,14 @@ impl QueryTiming { } } +lazy_static::lazy_static! { + static ref PROMETHEUS_URL: String = util::env::var("PROMETHEUS_URL").unwrap(); +} + #[operation(name = "job-run-metrics-log")] async fn handle( ctx: OperationContext, ) -> GlobalResult { - let prometheus_url = std::env::var("PROMETHEUS_URL")?; - let mut metrics = Vec::new(); for metric in &ctx.metrics { @@ -58,7 +54,7 @@ async fn handle( // relabel action in the Kubernetes config. let (mem_allocated, cpu_usage, mem_usage) = tokio::try_join!( handle_request( - &prometheus_url, + &PROMETHEUS_URL, None, formatdoc!( " @@ -72,7 +68,7 @@ async fn handle( ) ), handle_request( - &prometheus_url, + &PROMETHEUS_URL, query_timing.as_ref(), formatdoc!( " @@ -86,7 +82,7 @@ async fn handle( ) ), handle_request( - &prometheus_url, + &PROMETHEUS_URL, query_timing.as_ref(), // Fall back to `nomad_client_allocs_memory_rss` since `nomad_client_allocs_memory_usage` is // not available in `raw_exec`. @@ -149,16 +145,15 @@ async fn handle_request( // Query prometheus let res = reqwest::Client::new().get(req_url).send().await?; - if res.status() != StatusCode::OK { + if !res.status().is_success() { let status = res.status(); let text = res.text().await?; - return Err(Error::PrometheusError(format!( - "failed prometheus request: ({}) {}", - status, text - )) - .into()); + bail!(format!("failed prometheus request: ({}) {}", status, text)); } - Ok(unwrap!(res.json::().await?.data.result.first()).clone()) + let body = res.json::().await?; + let data = unwrap!(body.data.result.first()).clone(); + + Ok(data) } diff --git a/svc/pkg/job-run/ops/metrics-log/tests/integration.rs b/svc/pkg/job-run/ops/metrics-log/tests/integration.rs index b2ddad5cce..11fcd713c7 100644 --- a/svc/pkg/job-run/ops/metrics-log/tests/integration.rs +++ b/svc/pkg/job-run/ops/metrics-log/tests/integration.rs @@ -89,7 +89,7 @@ async fn memory_stress(ctx: TestCtx) { kind: Some( faker::job_template::request::Kind::Stress( faker::job_template::request::Stress { - flags: "--vm 1 --vm-bytes 4K --vm-hang 0".into(), + flags: "--vm 1 --vm-bytes 40M --vm-hang 0".into(), }, ) ), @@ -140,7 +140,7 @@ async fn memory_stress(ctx: TestCtx) { let metrics = metrics_res.metrics.first().unwrap(); let memory = *metrics.memory.last().unwrap(); - if memory > 10000000000 { + if memory > 80_000_000 { tracing::info!(?memory, "received valid memory metrics"); break; } else { diff --git a/svc/pkg/job-run/standalone/nomad-monitor/README.md b/svc/pkg/job-run/standalone/nomad-monitor/README.md deleted file mode 100644 index 4ba47931b2..0000000000 --- a/svc/pkg/job-run/standalone/nomad-monitor/README.md +++ /dev/null @@ -1 +0,0 @@ -# job-run-alloc-plan-monitor diff --git a/svc/pkg/job-run/standalone/nomad-monitor/src/lib.rs b/svc/pkg/job-run/standalone/nomad-monitor/src/lib.rs deleted file mode 100644 index 5290b4b585..0000000000 --- a/svc/pkg/job-run/standalone/nomad-monitor/src/lib.rs +++ /dev/null @@ -1 +0,0 @@ -pub mod monitors; diff --git a/svc/pkg/job-run/standalone/nomad-monitor/src/main.rs b/svc/pkg/job-run/standalone/nomad-monitor/src/main.rs deleted file mode 100644 index 15e12b5458..0000000000 --- a/svc/pkg/job-run/standalone/nomad-monitor/src/main.rs +++ /dev/null @@ -1,40 +0,0 @@ -use rivet_operation::prelude::*; - -fn main() -> GlobalResult<()> { - rivet_runtime::run(start()).unwrap() -} - -async fn start() -> GlobalResult<()> { - let pools = rivet_pools::from_env("job-run-alloc-plan-monitor").await?; - let shared_client = chirp_client::SharedClient::from_env(pools.clone())?; - let redis_job = pools.redis("persistent")?; - - tokio::task::Builder::new() - .name("job_run_alloc_plan_monitor::health_checks") - .spawn(rivet_health_checks::run_standalone( - rivet_health_checks::Config { - pools: Some(pools.clone()), - }, - ))?; - - tokio::task::Builder::new() - .name("job_run_alloc_plan_monitor::metrics") - .spawn(rivet_metrics::run_standalone())?; - - tokio::try_join!( - job_run_nomad_monitor::monitors::alloc_plan::start( - shared_client.clone(), - redis_job.clone() - ), - job_run_nomad_monitor::monitors::alloc_update::start( - shared_client.clone(), - redis_job.clone() - ), - job_run_nomad_monitor::monitors::eval_update::start( - shared_client.clone(), - redis_job.clone() - ), - )?; - - Ok(()) -} diff --git a/svc/pkg/job-run/standalone/nomad-monitor/src/monitors/alloc_plan.rs b/svc/pkg/job-run/standalone/nomad-monitor/src/monitors/alloc_plan.rs deleted file mode 100644 index ef635ab08d..0000000000 --- a/svc/pkg/job-run/standalone/nomad-monitor/src/monitors/alloc_plan.rs +++ /dev/null @@ -1,75 +0,0 @@ -use proto::backend::pkg::*; - -use rivet_operation::prelude::*; -use serde::Deserialize; - -#[derive(Debug, Deserialize)] -#[serde(rename_all = "PascalCase")] -struct PlanResult { - allocation: nomad_client::models::Allocation, -} - -#[tracing::instrument(skip_all)] -pub async fn start( - shared_client: chirp_client::SharedClientHandle, - redis_job: RedisPool, -) -> GlobalResult<()> { - let redis_index_key = "nomad:monitor_index:job_run_alloc_plan_monitor"; - - let configuration = nomad_util::config_from_env()?; - nomad_util::monitor::Monitor::run( - configuration, - redis_job.clone(), - redis_index_key, - &["Allocation"], - move |event| { - let client = shared_client.clone().wrap_new("job-run-alloc-plan-monitor"); - async move { - if let Some(payload) = event - .decode::("Allocation", "PlanResult") - .unwrap() - { - let spawn_res = tokio::task::Builder::new() - .name("job_run_alloc_plan_monitor::handle_event") - .spawn(handle(client, payload, event.payload.to_string())); - if let Err(err) = spawn_res { - tracing::error!(?err, "failed to spawn handle_event task"); - } - } - } - }, - ) - .await?; - - Ok(()) -} - -#[tracing::instrument(skip_all)] -async fn handle(client: chirp_client::Client, payload: PlanResult, payload_json: String) { - match handle_inner(client, &payload, payload_json).await { - Ok(_) => {} - Err(err) => { - tracing::error!(?err, ?payload, "error handling event"); - } - } -} -async fn handle_inner( - client: chirp_client::Client, - PlanResult { allocation: alloc }: &PlanResult, - payload_json: String, -) -> GlobalResult<()> { - let job_id = unwrap_ref!(alloc.job_id, "alloc has no job id"); - - if !util_job::is_nomad_job_run(job_id) { - tracing::info!(%job_id, "disregarding event"); - return Ok(()); - } - - msg!([client] job_run::msg::nomad_monitor_alloc_plan(job_id) { - dispatched_job_id: job_id.clone(), - payload_json: payload_json, - }) - .await?; - - Ok(()) -} diff --git a/svc/pkg/job-run/standalone/nomad-monitor/src/monitors/alloc_update.rs b/svc/pkg/job-run/standalone/nomad-monitor/src/monitors/alloc_update.rs deleted file mode 100644 index 633daaa5c8..0000000000 --- a/svc/pkg/job-run/standalone/nomad-monitor/src/monitors/alloc_update.rs +++ /dev/null @@ -1,75 +0,0 @@ -use proto::backend::pkg::*; -use rivet_operation::prelude::*; -use serde::Deserialize; - -#[derive(Debug, Clone, Deserialize)] -#[serde(rename_all = "PascalCase")] -struct AllocationUpdated { - allocation: nomad_client::models::Allocation, -} - -#[tracing::instrument(skip_all)] -pub async fn start( - shared_client: chirp_client::SharedClientHandle, - redis_job: RedisPool, -) -> GlobalResult<()> { - let redis_index_key = "nomad:monitor_index:job_run_alloc_update_monitor"; - - let configuration = nomad_util::config_from_env().unwrap(); - nomad_util::monitor::Monitor::run( - configuration, - redis_job, - redis_index_key, - &["Allocation"], - move |event| { - let client = shared_client.clone().wrap_new("job-alloc-updated-monitor"); - async move { - if let Some(payload) = event - .decode::("Allocation", "AllocationUpdated") - .unwrap() - { - let spawn_res = tokio::task::Builder::new() - .name("job_run_alloc_update_monitor::handle_event") - .spawn(handle(client, payload, event.payload.to_string())); - if let Err(err) = spawn_res { - tracing::error!(?err, "failed to spawn handle_event task"); - } - } - } - }, - ) - .await?; - - Ok(()) -} - -#[tracing::instrument(skip_all)] -async fn handle(client: chirp_client::Client, payload: AllocationUpdated, payload_json: String) { - match handle_inner(client, &payload, payload_json).await { - Ok(_) => {} - Err(err) => { - tracing::error!(?err, ?payload, "error handling event"); - } - } -} - -async fn handle_inner( - client: chirp_client::Client, - AllocationUpdated { allocation: alloc }: &AllocationUpdated, - payload_json: String, -) -> GlobalResult<()> { - let job_id = unwrap_ref!(alloc.job_id); - - if !util_job::is_nomad_job_run(job_id) { - tracing::info!(%job_id, "disregarding event"); - return Ok(()); - } - - msg!([client] job_run::msg::nomad_monitor_alloc_update(job_id) { - dispatched_job_id: job_id.clone(), - payload_json: payload_json, - }) - .await?; - - Ok(()) -} diff --git a/svc/pkg/job-run/standalone/nomad-monitor/src/monitors/eval_update.rs b/svc/pkg/job-run/standalone/nomad-monitor/src/monitors/eval_update.rs deleted file mode 100644 index a7ef8e8919..0000000000 --- a/svc/pkg/job-run/standalone/nomad-monitor/src/monitors/eval_update.rs +++ /dev/null @@ -1,97 +0,0 @@ -use rivet_operation::prelude::*; -use serde::Deserialize; - -use proto::backend::pkg::*; - -lazy_static::lazy_static! { - static ref NOMAD_CONFIG: nomad_client::apis::configuration::Configuration = - nomad_util::config_from_env().unwrap(); -} - -#[derive(Debug, Deserialize)] -#[serde(rename_all = "PascalCase")] -struct PlanResult { - evaluation: nomad_client::models::Evaluation, -} - -#[tracing::instrument(skip_all)] -pub async fn start( - shared_client: chirp_client::SharedClientHandle, - redis_job: RedisPool, -) -> GlobalResult<()> { - let redis_index_key = "nomad:monitor_index:job_run_eval_update_monitor"; - - let configuration = nomad_util::config_from_env().unwrap(); - nomad_util::monitor::Monitor::run( - configuration, - redis_job, - redis_index_key, - &["Evaluation"], - move |event| { - let client = shared_client - .clone() - .wrap_new("job-run-eval-update-monitor"); - async move { - if let Some(payload) = event - .decode::("Evaluation", "EvaluationUpdated") - .unwrap() - { - // We can't decode this with serde, so manually deserialize the response - let spawn_res = tokio::task::Builder::new() - .name("job_run_eval_update_monitor::handle_event") - .spawn(handle(client, payload, event.payload.to_string())); - if let Err(err) = spawn_res { - tracing::error!(?err, "failed to spawn handle_event task"); - } - } - } - }, - ) - .await?; - - Ok(()) -} - -#[tracing::instrument(skip_all)] -async fn handle(client: chirp_client::Client, payload: PlanResult, payload_json: String) { - match handle_inner(client, &payload, payload_json).await { - Ok(_) => {} - Err(err) => { - tracing::error!(?err, ?payload, "error handling event"); - } - } -} - -async fn handle_inner( - client: chirp_client::Client, - PlanResult { evaluation: eval }: &PlanResult, - payload_json: String, -) -> GlobalResult<()> { - let job_id = unwrap_ref!(eval.job_id, "eval has no job id"); - let triggered_by = unwrap_ref!(eval.triggered_by).as_str(); - let eval_status_raw = unwrap_ref!(eval.status).as_str(); - - // Ignore jobs we don't care about - if !util_job::is_nomad_job_run(job_id) || triggered_by != "job-register" { - tracing::info!(%job_id, "disregarding event"); - return Ok(()); - } - - // Ignore statuses we don't care about - if eval_status_raw != "complete" { - tracing::info!( - %job_id, - ?eval_status_raw, - "ignoring status" - ); - return Ok(()); - } - - msg!([client] job_run::msg::nomad_monitor_eval_update(job_id) { - dispatched_job_id: job_id.clone(), - payload_json: payload_json, - }) - .await?; - - Ok(()) -} diff --git a/svc/pkg/job-run/types/msg/dns-record-create-complete.proto b/svc/pkg/job-run/types/msg/dns-record-create-complete.proto deleted file mode 100644 index 9e131eb54d..0000000000 --- a/svc/pkg/job-run/types/msg/dns-record-create-complete.proto +++ /dev/null @@ -1,15 +0,0 @@ -syntax = "proto3"; - -package rivet.backend.pkg.job_run.msg.dns_record_create_complete; - -import "proto/common.proto"; - -/// name = "msg-job-run-dns-record-create-complete" -/// deduplicate = true -/// parameters = [ -/// { name = "run_id" }, -/// ] -message Message { - -} - diff --git a/svc/pkg/job-run/types/msg/nomad-dispatched-job.proto b/svc/pkg/job-run/types/msg/nomad-dispatched-job.proto deleted file mode 100644 index d1ea8aeaab..0000000000 --- a/svc/pkg/job-run/types/msg/nomad-dispatched-job.proto +++ /dev/null @@ -1,16 +0,0 @@ -syntax = "proto3"; - -package rivet.backend.pkg.job_run.msg.nomad_dispatched_job; - -import "proto/common.proto"; - -/// name = "msg-job-run-nomad-dispatched-job" -/// parameters = [ -/// { name = "run_id" }, -/// { name = "dispatched_job_id" }, -/// ] -message Message { - rivet.common.Uuid run_id = 1; - string dispatched_job_id = 2; -} - diff --git a/svc/pkg/job-run/worker/Cargo.toml b/svc/pkg/job-run/worker/Cargo.toml index c99057a345..7eab2d4205 100644 --- a/svc/pkg/job-run/worker/Cargo.toml +++ b/svc/pkg/job-run/worker/Cargo.toml @@ -26,6 +26,11 @@ util-job = { package = "rivet-util-job", path = "../../job/util" } region-get = { path = "../../region/ops/get" } token-create = { path = "../../token/ops/create" } +[dependencies.nomad_client_new] +package = "nomad_client" +git = "https://github.com/rivet-gg/nomad-client" +rev = "abb66bf0c30c7ff5b0c695dae952481c33e538b5" # pragma: allowlist secret + [dependencies.sqlx] version = "0.7" default-features = false diff --git a/svc/pkg/job-run/worker/src/lib.rs b/svc/pkg/job-run/worker/src/lib.rs index 3719b10aa8..468098e10f 100644 --- a/svc/pkg/job-run/worker/src/lib.rs +++ b/svc/pkg/job-run/worker/src/lib.rs @@ -1 +1,6 @@ pub mod workers; + +lazy_static::lazy_static! { + pub static ref NEW_NOMAD_CONFIG: nomad_client_new::apis::configuration::Configuration = + nomad_util::new_config_from_env().unwrap(); +} diff --git a/svc/pkg/job-run/worker/src/workers/create/create_job.rs b/svc/pkg/job-run/worker/src/workers/create/create_job.rs index f27534ff05..bedf9e410d 100644 --- a/svc/pkg/job-run/worker/src/workers/create/create_job.rs +++ b/svc/pkg/job-run/worker/src/workers/create/create_job.rs @@ -5,7 +5,7 @@ use proto::backend; use serde_json::json; use sha2::{Digest, Sha256}; -use super::NOMAD_CONFIG; +use crate::NEW_NOMAD_CONFIG; // TODO: Only run create job if run job returns job not found @@ -21,7 +21,7 @@ pub async fn create_job( Ok(job_id) } -fn override_job_id(job_id: &str, job: &mut nomad_client::models::Job) { +fn override_job_id(job_id: &str, job: &mut nomad_client_new::models::Job) { job.ID = Some(job_id.into()); job.name = Some(job_id.into()); } @@ -29,8 +29,8 @@ fn override_job_id(job_id: &str, job: &mut nomad_client::models::Job) { fn build_job( base_job_json: &str, region: &backend::region::Region, -) -> GlobalResult<(String, nomad_client::models::Job)> { - let base_job = serde_json::from_str::(base_job_json)?; +) -> GlobalResult<(String, nomad_client_new::models::Job)> { + let base_job = serde_json::from_str::(base_job_json)?; // Modify the job spec let mut job = modify_job_spec(base_job, region)?; @@ -61,9 +61,9 @@ fn build_job( /// Modifies the provided job spec to be compatible with the Rivet job runtime. fn modify_job_spec( - mut job: nomad_client::models::Job, + mut job: nomad_client_new::models::Job, region: &backend::region::Region, -) -> GlobalResult { +) -> GlobalResult { // Replace all job IDs with a placeholder value in order to create a // deterministic job spec. override_job_id("__PLACEHOLDER__", &mut job); @@ -103,7 +103,7 @@ fn modify_job_spec( let main_task = unwrap!( task_group .tasks - .iter() + .iter_mut() .flatten() .find(|x| x.name.as_deref() == Some(util_job::RUN_MAIN_TASK_NAME)), "must have main task" @@ -116,12 +116,17 @@ fn modify_job_spec( "main task must not have a lifecycle hook" ); + // Disable logs + // if let Some(log_config) = main_task.log_config.as_mut() { + // log_config.disabled = Some(true); + // } + // Configure networks let networks = unwrap!(task_group.networks.as_mut()); ensure_eq!(1, networks.len(), "must have exactly 1 network"); let network = unwrap!(networks.first_mut()); // Disable IPv6 DNS since Docker doesn't support IPv6 yet - network.DNS = Some(Box::new(nomad_client::models::NetworkDns { + network.DNS = Some(Box::new(nomad_client_new::models::DnsConfig { servers: Some(vec![ // Google "8.8.8.8".into(), @@ -132,22 +137,22 @@ fn modify_job_spec( // Disable default search from the host searches: Some(Vec::new()), options: Some(vec!["rotate".into(), "edns0".into(), "attempts:2".into()]), - ..nomad_client::models::NetworkDns::new() + ..nomad_client_new::models::DnsConfig::new() })); // Disable rescheduling, since job-run doesn't support this at the moment - task_group.reschedule_policy = Some(Box::new(nomad_client::models::ReschedulePolicy { + task_group.reschedule_policy = Some(Box::new(nomad_client_new::models::ReschedulePolicy { attempts: Some(0), unlimited: Some(false), - ..nomad_client::models::ReschedulePolicy::new() + ..nomad_client_new::models::ReschedulePolicy::new() })); // Disable restarts. Our Nomad monitoring workflow doesn't support restarts // at the moment. - task_group.restart_policy = Some(Box::new(nomad_client::models::RestartPolicy { + task_group.restart_policy = Some(Box::new(nomad_client_new::models::RestartPolicy { attempts: Some(0), // unlimited: Some(false), - ..nomad_client::models::RestartPolicy::new() + ..nomad_client_new::models::RestartPolicy::new() })); // Add cleanup task @@ -157,8 +162,8 @@ fn modify_job_spec( Ok(job) } -fn gen_cleanup_task() -> nomad_client::models::Task { - use nomad_client::models::*; +fn gen_cleanup_task() -> nomad_client_new::models::Task { + use nomad_client_new::models::*; Task { name: Some(util_job::RUN_CLEANUP_TASK_NAME.into()), @@ -187,7 +192,7 @@ fn gen_cleanup_task() -> nomad_client::models::Task { embedded_tmpl: Some(formatdoc!( r#" import ssl - import urllib.request, json, os, mimetypes, sys + import urllib.request, json, os, mimetypes, sys, socket BEARER = '{{{{env "NOMAD_META_JOB_RUN_TOKEN"}}}}' @@ -240,6 +245,7 @@ fn gen_cleanup_task() -> nomad_client::models::Task { log_config: Some(Box::new(LogConfig { max_files: Some(4), max_file_size_mb: Some(2), + disabled: Some(false), })), ..Task::new() } @@ -248,24 +254,22 @@ fn gen_cleanup_task() -> nomad_client::models::Task { #[tracing::instrument] async fn submit_job( job_id: &str, - job: nomad_client::models::Job, + job: nomad_client_new::models::Job, region: &backend::region::Region, ) -> GlobalResult<()> { tracing::info!("submitting job"); - nomad_client::apis::jobs_api::update_job( - &NOMAD_CONFIG, + nomad_client_new::apis::jobs_api::post_job( + &NEW_NOMAD_CONFIG, job_id, - None, + nomad_client_new::models::JobRegisterRequest { + job: Some(Box::new(job)), + ..nomad_client_new::models::JobRegisterRequest::new() + }, Some(®ion.nomad_region), None, None, - Some(nomad_client::models::RegisterJobRequest { - job: Some(Box::new(job)), - enforce_index: None, - job_modify_index: None, - policy_override: None, - }), + None, ) .await?; @@ -317,8 +321,8 @@ mod tests { } } - fn gen_job(x: &str) -> nomad_client::models::Job { - use nomad_client::models::*; + fn gen_job(x: &str) -> nomad_client_new::models::Job { + use nomad_client_new::models::*; // This job ID will be overridden, so it should not matter what we put // here @@ -343,6 +347,7 @@ mod tests { // So we can access it from the test mode: Some("cni/rivet-job".into()), dynamic_ports: Some(vec![Port { + host_network: None, label: Some("http".into()), value: None, to: Some(80), @@ -351,7 +356,6 @@ mod tests { }]), services: Some(vec![Service { provider: Some("nomad".into()), - ID: Some("test-job".into()), name: Some("test-job".into()), tags: Some(vec!["test".into()]), ..Service::new() diff --git a/svc/pkg/job-run/worker/src/workers/create/mod.rs b/svc/pkg/job-run/worker/src/workers/create/mod.rs index 2621e7ff30..8957d8465e 100644 --- a/svc/pkg/job-run/worker/src/workers/create/mod.rs +++ b/svc/pkg/job-run/worker/src/workers/create/mod.rs @@ -4,6 +4,8 @@ use chirp_worker::prelude::*; use proto::backend::{self, pkg::*}; use tokio::time::Duration; +use crate::NEW_NOMAD_CONFIG; + mod create_job; // TODO: Reduce disk space for allocations @@ -18,11 +20,6 @@ const MAX_PARAMETER_VALUE_LEN: usize = 8_192; // 8 KB /// See also svc/pkg/mm/worker/src/workers/lobby_ready_set.rs @ TRAEFIK_GRACE_MS const TRAEFIK_GRACE: Duration = Duration::from_secs(1); -lazy_static::lazy_static! { - static ref NOMAD_CONFIG: nomad_client::apis::configuration::Configuration = - nomad_util::config_from_env().unwrap(); -} - #[tracing::instrument] async fn fail( client: &chirp_client::Client, @@ -141,12 +138,6 @@ async fn worker(ctx: &OperationContext) -> Global }) .await?; - msg!([ctx] job_run::msg::nomad_dispatched_job(run_id, &nomad_dispatched_job_id) { - run_id: Some(run_id.into()), - dispatched_job_id: nomad_dispatched_job_id.clone(), - }) - .await?; - Ok(()) } @@ -162,14 +153,11 @@ async fn run_job( ("job_run_id".into(), run_id.to_string()), ("job_run_token".into(), job_run_token), ]; - let dispatch_res = nomad_client::apis::jobs_api::dispatch_job( - &NOMAD_CONFIG, + let dispatch_res = nomad_client_new::apis::jobs_api::post_job_dispatch( + &NEW_NOMAD_CONFIG, nomad_job_id, - None, - Some(nomad_region), - None, - None, - Some(nomad_client::models::JobDispatchRequest { + nomad_client_new::models::JobDispatchRequest { + job_id: Some(nomad_job_id.to_string()), payload: None, meta: Some( req.parameters @@ -178,7 +166,11 @@ async fn run_job( .chain(job_params.into_iter()) .collect::>(), ), - }), + }, + Some(nomad_region), + None, + None, + None, ) .await; match dispatch_res { @@ -358,7 +350,7 @@ async fn choose_ingress_port( ctx, tx, proxied_port.proxy_protocol, - util_job::consts::MIN_INGRESS_PORT_TCP..=util_job::consts::MAX_INGRESS_PORT_TCP, + util::net::job::MIN_INGRESS_PORT_TCP..=util::net::job::MAX_INGRESS_PORT_TCP, ) .await? } @@ -367,7 +359,7 @@ async fn choose_ingress_port( ctx, tx, proxied_port.proxy_protocol, - util_job::consts::MIN_INGRESS_PORT_UDP..=util_job::consts::MAX_INGRESS_PORT_UDP, + util::net::job::MIN_INGRESS_PORT_UDP..=util::net::job::MAX_INGRESS_PORT_UDP, ) .await? } diff --git a/svc/pkg/job-run/worker/src/workers/mod.rs b/svc/pkg/job-run/worker/src/workers/mod.rs index 14c8a1d1f8..3aa1a4d606 100644 --- a/svc/pkg/job-run/worker/src/workers/mod.rs +++ b/svc/pkg/job-run/worker/src/workers/mod.rs @@ -8,8 +8,8 @@ mod stop; chirp_worker::workers![ cleanup, create, + stop, nomad_monitor_alloc_plan, nomad_monitor_alloc_update, nomad_monitor_eval_update, - stop, ]; diff --git a/svc/pkg/job-run/worker/src/workers/nomad_monitor_alloc_plan.rs b/svc/pkg/job-run/worker/src/workers/nomad_monitor_alloc_plan.rs index 391d3be767..7e774fbdba 100644 --- a/svc/pkg/job-run/worker/src/workers/nomad_monitor_alloc_plan.rs +++ b/svc/pkg/job-run/worker/src/workers/nomad_monitor_alloc_plan.rs @@ -3,15 +3,12 @@ use proto::backend::{self, pkg::*}; use redis::AsyncCommands; use serde::Deserialize; -lazy_static::lazy_static! { - static ref NOMAD_CONFIG: nomad_client::apis::configuration::Configuration = - nomad_util::config_from_env().unwrap(); -} +use crate::NEW_NOMAD_CONFIG; #[derive(Debug, Deserialize)] #[serde(rename_all = "PascalCase")] struct PlanResult { - allocation: nomad_client::models::Allocation, + allocation: nomad_client_new::models::Allocation, } #[derive(Debug, sqlx::FromRow)] @@ -44,7 +41,7 @@ struct RunData { #[worker(name = "job-run-nomad-monitor-alloc-plan")] async fn worker( - ctx: &OperationContext, + ctx: &OperationContext, ) -> GlobalResult<()> { let mut redis_job = ctx.redis_job().await?; @@ -61,9 +58,14 @@ async fn worker( } // Fetch node metadata - let node = nomad_client::apis::nodes_api::get_node( - &NOMAD_CONFIG, - nomad_node_id, + let node = nomad_client_new::apis::nodes_api::get_node( + &NEW_NOMAD_CONFIG, + &nomad_node_id, + None, + None, + None, + None, + None, None, None, None, @@ -207,7 +209,7 @@ struct DbOutput { /// Returns `None` if the run could not be found. #[tracing::instrument(skip_all)] async fn update_db( - ctx: OperationContext, + ctx: OperationContext, tx: &mut sqlx::Transaction<'_, sqlx::Postgres>, now: i64, RunData { diff --git a/svc/pkg/job-run/worker/src/workers/nomad_monitor_alloc_update.rs b/svc/pkg/job-run/worker/src/workers/nomad_monitor_alloc_update.rs index 0571da3dae..c7122f9901 100644 --- a/svc/pkg/job-run/worker/src/workers/nomad_monitor_alloc_update.rs +++ b/svc/pkg/job-run/worker/src/workers/nomad_monitor_alloc_update.rs @@ -5,7 +5,7 @@ use serde::Deserialize; #[derive(Debug, Clone, Deserialize)] #[serde(rename_all = "PascalCase")] struct AllocationUpdated { - allocation: nomad_client::models::Allocation, + allocation: nomad_client_new::models::Allocation, } #[derive(Debug, Copy, Clone)] @@ -17,8 +17,10 @@ enum TaskState { #[worker(name = "job-run-nomad-monitor-alloc-update")] async fn worker( - ctx: &OperationContext, + ctx: &OperationContext, ) -> GlobalResult<()> { + let crdb = ctx.crdb().await?; + let AllocationUpdated { allocation: alloc } = serde_json::from_str(&ctx.payload_json)?; let alloc_state_json = serde_json::to_value(&alloc)?; diff --git a/svc/pkg/job-run/worker/src/workers/nomad_monitor_eval_update.rs b/svc/pkg/job-run/worker/src/workers/nomad_monitor_eval_update.rs index d2185d0261..27fe5b7855 100644 --- a/svc/pkg/job-run/worker/src/workers/nomad_monitor_eval_update.rs +++ b/svc/pkg/job-run/worker/src/workers/nomad_monitor_eval_update.rs @@ -2,15 +2,12 @@ use chirp_worker::prelude::*; use proto::backend::pkg::*; use serde::Deserialize; -lazy_static::lazy_static! { - static ref NOMAD_CONFIG: nomad_client::apis::configuration::Configuration = - nomad_util::config_from_env().unwrap(); -} +use crate::NEW_NOMAD_CONFIG; #[derive(Debug, Deserialize)] #[serde(rename_all = "PascalCase")] struct PlanResult { - evaluation: nomad_client::models::Evaluation, + evaluation: nomad_client_new::models::Evaluation, } #[derive(Debug, Copy, Clone)] @@ -28,8 +25,10 @@ struct RunRow { #[worker(name = "job-run-nomad-monitor-eval-update")] async fn worker( - ctx: &OperationContext, + ctx: &OperationContext, ) -> GlobalResult<()> { + let crdb = ctx.crdb().await?; + let payload_value = serde_json::from_str::(&ctx.payload_json)?; let PlanResult { evaluation: eval } = serde_json::from_str::(&ctx.payload_json)?; @@ -135,14 +134,15 @@ async fn worker( // Stop the job from attempting to run on another node. This will // be called in job-run-stop too, but we want to catch this earlier. - match nomad_client::apis::jobs_api::stop_job( - &NOMAD_CONFIG, + match nomad_client_new::apis::jobs_api::delete_job( + &NEW_NOMAD_CONFIG, job_id, None, Some(®ion.nomad_region), None, None, Some(false), + None, ) .await { diff --git a/svc/pkg/job-run/worker/src/workers/stop.rs b/svc/pkg/job-run/worker/src/workers/stop.rs index b3f4964189..ba96e36966 100644 --- a/svc/pkg/job-run/worker/src/workers/stop.rs +++ b/svc/pkg/job-run/worker/src/workers/stop.rs @@ -1,10 +1,6 @@ use chirp_worker::prelude::*; use proto::backend::pkg::*; - -lazy_static::lazy_static! { - static ref NOMAD_CONFIG: nomad_client::apis::configuration::Configuration = - nomad_util::config_from_env().unwrap(); -} +use tokio::task; #[derive(Debug, sqlx::FromRow)] struct RunRow { @@ -15,9 +11,17 @@ struct RunRow { #[derive(Debug, sqlx::FromRow)] struct RunMetaNomadRow { + alloc_id: Option, dispatched_job_id: Option, } +use crate::NEW_NOMAD_CONFIG; + +lazy_static::lazy_static! { + static ref NOMAD_CONFIG: nomad_client::apis::configuration::Configuration = + nomad_util::config_from_env().unwrap(); +} + #[worker(name = "job-run-stop")] async fn worker(ctx: &OperationContext) -> GlobalResult<()> { // NOTE: Idempotent @@ -61,22 +65,30 @@ async fn worker(ctx: &OperationContext) -> GlobalRe // functionality if the job dies immediately. You can set it to false to // debug lobbies, but it's preferred to extract metadata from the // job-run-stop lifecycle event. - if let Some(dispatched_job_id) = &run_meta_nomad_row - .as_ref() - .and_then(|x| x.dispatched_job_id.as_ref()) + if let Some(RunMetaNomadRow { + alloc_id, + dispatched_job_id: Some(dispatched_job_id), + }) = &run_meta_nomad_row { - match nomad_client::apis::jobs_api::stop_job( - &NOMAD_CONFIG, + match nomad_client_new::apis::jobs_api::delete_job( + &NEW_NOMAD_CONFIG, dispatched_job_id, None, Some(®ion.nomad_region), None, None, Some(false), // TODO: Maybe change back to true for performance? + None, ) .await { - Ok(_) => tracing::info!("job stopped"), + Ok(_) => { + tracing::info!("job stopped"); + + if let Some(alloc_id) = alloc_id { + kill_allocation(region.nomad_region.clone(), alloc_id.clone()); + } + } Err(err) => { tracing::warn!(?err, "error thrown while stopping job, probably a 404, will continue as if stopped normally"); } @@ -113,7 +125,7 @@ async fn update_db( let run_meta_nomad_row = sql_fetch_optional!( [ctx, RunMetaNomadRow, @tx tx] " - SELECT dispatched_job_id + SELECT alloc_id, dispatched_job_id FROM db_job_state.run_meta_nomad WHERE run_id = $1 FOR UPDATE @@ -154,3 +166,33 @@ async fn update_db( Ok(Some((run_row, run_meta_nomad_row))) } + +// Kills the allocation after 30 seconds +fn kill_allocation(nomad_region: String, alloc_id: String) { + task::spawn(async move { + tokio::time::sleep(util_job::JOB_STOP_TIMEOUT).await; + + tracing::info!(?alloc_id, "manually killing allocation"); + + if let Err(err) = nomad_client::apis::allocations_api::signal_allocation( + &NOMAD_CONFIG, + &alloc_id, + None, + Some(&nomad_region), + None, + None, + Some(nomad_client::models::AllocSignalRequest { + task: None, + signal: Some("SIGKILL".to_string()), + }), + ) + .await + { + tracing::warn!( + ?err, + ?alloc_id, + "error while trying to manually kill allocation" + ); + } + }); +} diff --git a/svc/pkg/job-run/worker/tests/create.rs b/svc/pkg/job-run/worker/tests/create.rs index 946ae3c4ff..2a70924252 100644 --- a/svc/pkg/job-run/worker/tests/create.rs +++ b/svc/pkg/job-run/worker/tests/create.rs @@ -17,7 +17,6 @@ async fn basic_http(ctx: TestCtx) { let region_res = op!([ctx] faker_region {}).await.unwrap(); let region_id = region_res.region_id.as_ref().unwrap().as_uuid(); - let region_name_id = ®ion_res.region.as_ref().unwrap().name_id; let template_res = op!([ctx] faker_job_template { kind: Some(faker::job_template::request::Kind::EchoServer(Default::default())), @@ -36,8 +35,8 @@ async fn basic_http(ctx: TestCtx) { let mut started_sub = subscribe!([ctx] job_run::msg::started(run_id)) .await .unwrap(); - let ingress_hostname_http = format!("test-{run_id}-http.lobby.{region_name_id}.{domain_job}"); - let ingress_hostname_https = format!("test-{run_id}-https.lobby.{region_name_id}.{domain_job}"); + let ingress_hostname_http = format!("test-{run_id}-http.lobby.{region_id}.{domain_job}"); + let ingress_hostname_https = format!("test-{run_id}-https.lobby.{region_id}.{domain_job}"); msg!([ctx] job_run::msg::create(run_id) { run_id: Some(run_id.into()), region_id: Some(region_id.into()), @@ -107,7 +106,6 @@ async fn basic_tcp(ctx: TestCtx) { let region_res = op!([ctx] faker_region {}).await.unwrap(); let region_id = region_res.region_id.as_ref().unwrap().as_uuid(); - let region_name_id = ®ion_res.region.as_ref().unwrap().name_id; let template_res = op!([ctx] faker_job_template { kind: Some(faker::job_template::request::Kind::EchoServerTcp(Default::default())), @@ -126,9 +124,8 @@ async fn basic_tcp(ctx: TestCtx) { let mut started_sub = subscribe!([ctx] job_run::msg::started(run_id)) .await .unwrap(); - let ingress_hostname_tcp = format!("test-{run_id}-tcp.lobby.{region_name_id}.{domain_job}"); - let ingress_hostname_tcp_tls = - format!("test-{run_id}-tcp-tls.lobby.{region_name_id}.{domain_job}"); + let ingress_hostname_tcp = format!("test-{run_id}-tcp.lobby.{region_id}.{domain_job}"); + let ingress_hostname_tcp_tls = format!("test-{run_id}-tcp-tls.lobby.{region_id}.{domain_job}"); msg!([ctx] job_run::msg::create(run_id) { run_id: Some(run_id.into()), region_id: Some(region_id.into()), @@ -217,7 +214,6 @@ async fn basic_udp(ctx: TestCtx) { let region_res = op!([ctx] faker_region {}).await.unwrap(); let region_id = region_res.region_id.as_ref().unwrap().as_uuid(); - let region_name_id = ®ion_res.region.as_ref().unwrap().name_id; let template_res = op!([ctx] faker_job_template { kind: Some(faker::job_template::request::Kind::EchoServerUdp(Default::default())), @@ -236,7 +232,7 @@ async fn basic_udp(ctx: TestCtx) { let mut started_sub = subscribe!([ctx] job_run::msg::started(run_id)) .await .unwrap(); - let ingress_hostname_udp = format!("test-{run_id}-udp.lobby.{region_name_id}.{domain_job}"); + let ingress_hostname_udp = format!("test-{run_id}-udp.lobby.{region_id}.{domain_job}"); msg!([ctx] job_run::msg::create(run_id) { run_id: Some(run_id.into()), region_id: Some(region_id.into()), diff --git a/svc/pkg/job-run/worker/tests/nomad_monitor_alloc_plan.rs b/svc/pkg/job-run/worker/tests/nomad_monitor_alloc_plan.rs index 564ad113a5..4d8730e5db 100644 --- a/svc/pkg/job-run/worker/tests/nomad_monitor_alloc_plan.rs +++ b/svc/pkg/job-run/worker/tests/nomad_monitor_alloc_plan.rs @@ -1,3 +1,5 @@ +use chirp_worker::prelude::*; + // #[worker_test] // async fn basic(_ctx: TestCtx) { // // TODO: diff --git a/svc/pkg/job-run/worker/tests/nomad_monitor_alloc_update.rs b/svc/pkg/job-run/worker/tests/nomad_monitor_alloc_update.rs index 564ad113a5..4d8730e5db 100644 --- a/svc/pkg/job-run/worker/tests/nomad_monitor_alloc_update.rs +++ b/svc/pkg/job-run/worker/tests/nomad_monitor_alloc_update.rs @@ -1,3 +1,5 @@ +use chirp_worker::prelude::*; + // #[worker_test] // async fn basic(_ctx: TestCtx) { // // TODO: diff --git a/svc/pkg/job-run/worker/tests/nomad_monitor_eval_update.rs b/svc/pkg/job-run/worker/tests/nomad_monitor_eval_update.rs index 564ad113a5..4d8730e5db 100644 --- a/svc/pkg/job-run/worker/tests/nomad_monitor_eval_update.rs +++ b/svc/pkg/job-run/worker/tests/nomad_monitor_eval_update.rs @@ -1,3 +1,5 @@ +use chirp_worker::prelude::*; + // #[worker_test] // async fn basic(_ctx: TestCtx) { // // TODO: diff --git a/svc/pkg/job/util/src/consts.rs b/svc/pkg/job/util/src/consts.rs deleted file mode 100644 index ad79e49e04..0000000000 --- a/svc/pkg/job/util/src/consts.rs +++ /dev/null @@ -1,8 +0,0 @@ -// Port ranges for the load balancer hosts -// -// Also see lib/bolt/core/src/dep/terraform/pools.rs and -// lib/bolt/core/src/dep/terraform/install_scripts/mod.rs -pub const MIN_INGRESS_PORT_TCP: u16 = 20000; -pub const MAX_INGRESS_PORT_TCP: u16 = 31999; -pub const MIN_INGRESS_PORT_UDP: u16 = 20000; -pub const MAX_INGRESS_PORT_UDP: u16 = 31999; diff --git a/svc/pkg/job/util/src/lib.rs b/svc/pkg/job/util/src/lib.rs index 1f789be9bf..3636344426 100644 --- a/svc/pkg/job/util/src/lib.rs +++ b/svc/pkg/job/util/src/lib.rs @@ -1,4 +1,5 @@ -pub mod consts; +use std::time::Duration; + pub mod key; /// Determines if a Nomad job is dispatched from our run. @@ -9,6 +10,9 @@ pub fn is_nomad_job_run(job_id: &str) -> bool { job_id.starts_with("job-") && job_id.contains("/dispatch-") } +// Timeout from when `stop_job` is called and the kill signal is sent +pub const JOB_STOP_TIMEOUT: Duration = Duration::from_secs(30); + pub const TASK_CLEANUP_CPU: i32 = 50; // Query Prometheus with: diff --git a/svc/pkg/linode/ops/instance-type-get/Cargo.toml b/svc/pkg/linode/ops/instance-type-get/Cargo.toml new file mode 100644 index 0000000000..ba4fee3c81 --- /dev/null +++ b/svc/pkg/linode/ops/instance-type-get/Cargo.toml @@ -0,0 +1,19 @@ +[package] +name = "linode-instance-type-get" +version = "0.0.1" +edition = "2018" +authors = ["Rivet Gaming, LLC "] +license = "Apache-2.0" + +[dependencies] +chirp-client = { path = "../../../../../lib/chirp/client" } +rivet-operation = { path = "../../../../../lib/operation/core" } +util-linode = { package = "rivet-util-linode", path = "../../util" } + +[dependencies.sqlx] +version = "0.7" +default-features = false + +[dev-dependencies] +chirp-worker = { path = "../../../../../lib/chirp/worker" } +util-cluster = { package = "rivet-util-cluster", path = "../../../cluster/util" } diff --git a/svc/pkg/linode/ops/instance-type-get/Service.toml b/svc/pkg/linode/ops/instance-type-get/Service.toml new file mode 100644 index 0000000000..182cee9939 --- /dev/null +++ b/svc/pkg/linode/ops/instance-type-get/Service.toml @@ -0,0 +1,10 @@ +[service] +name = "linode-instance-type-get" + +[runtime] +kind = "rust" + +[operation] + +[secrets] +"linode/token" = {} diff --git a/svc/pkg/linode/ops/instance-type-get/src/lib.rs b/svc/pkg/linode/ops/instance-type-get/src/lib.rs new file mode 100644 index 0000000000..baee9eac4b --- /dev/null +++ b/svc/pkg/linode/ops/instance-type-get/src/lib.rs @@ -0,0 +1,43 @@ +use proto::backend::pkg::*; +use rivet_operation::prelude::*; +use util_linode::api; + +#[operation(name = "linode-instance-type-get")] +pub async fn handle( + ctx: OperationContext, +) -> GlobalResult { + // Build HTTP client + let client = util_linode::Client::new().await?; + + // Get hardware stats from linode and cache + let instance_types_res = ctx + .cache() + .ttl(util::duration::days(1)) + .fetch_one_proto("instance_type", "linode", { + let client = client.clone(); + move |mut cache, key| { + let client = client.clone(); + async move { + let api_res = api::list_instance_types(&client).await?; + + cache.resolve( + &key, + linode::instance_type_get::CacheInstanceTypes { + instance_types: api_res.into_iter().map(Into::into).collect::>(), + }, + ); + + Ok(cache) + } + } + }) + .await?; + + let instance_types = unwrap!(instance_types_res) + .instance_types + .into_iter() + .filter(|ty| ctx.hardware_ids.iter().any(|h| h == &ty.hardware_id)) + .collect::>(); + + Ok(linode::instance_type_get::Response { instance_types }) +} diff --git a/svc/pkg/linode/ops/instance-type-get/tests/integration.rs b/svc/pkg/linode/ops/instance-type-get/tests/integration.rs new file mode 100644 index 0000000000..dbbd485e90 --- /dev/null +++ b/svc/pkg/linode/ops/instance-type-get/tests/integration.rs @@ -0,0 +1,12 @@ +use chirp_worker::prelude::*; + +#[worker_test] +async fn basic(ctx: TestCtx) { + let res = op!([ctx] linode_instance_type_get { + hardware_ids: vec![util_cluster::test::HARDWARE.to_string()], + }) + .await + .unwrap(); + + tracing::info!(?res); +} diff --git a/svc/pkg/linode/ops/server-destroy/Cargo.toml b/svc/pkg/linode/ops/server-destroy/Cargo.toml new file mode 100644 index 0000000000..96fd64eed3 --- /dev/null +++ b/svc/pkg/linode/ops/server-destroy/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "linode-server-destroy" +version = "0.0.1" +edition = "2018" +authors = ["Rivet Gaming, LLC "] +license = "Apache-2.0" + +[dependencies] +chirp-client = { path = "../../../../../lib/chirp/client" } +rivet-operation = { path = "../../../../../lib/operation/core" } +reqwest = { version = "0.11", features = ["json"] } +util-cluster = { package = "rivet-util-cluster", path = "../../../cluster/util" } +util-linode = { package = "rivet-util-linode", path = "../../util" } + +[dependencies.sqlx] +version = "0.7" +default-features = false + +[dev-dependencies] +chirp-worker = { path = "../../../../../lib/chirp/worker" } + +linode-server-provision = { path = "../server-provision" } diff --git a/svc/pkg/linode/ops/server-destroy/Service.toml b/svc/pkg/linode/ops/server-destroy/Service.toml new file mode 100644 index 0000000000..ddc07bf2d3 --- /dev/null +++ b/svc/pkg/linode/ops/server-destroy/Service.toml @@ -0,0 +1,10 @@ +[service] +name = "linode-server-destroy" + +[runtime] +kind = "rust" + +[operation] + +[secrets] +"linode/token" = {} diff --git a/svc/pkg/linode/ops/server-destroy/src/lib.rs b/svc/pkg/linode/ops/server-destroy/src/lib.rs new file mode 100644 index 0000000000..e7616c0d16 --- /dev/null +++ b/svc/pkg/linode/ops/server-destroy/src/lib.rs @@ -0,0 +1,60 @@ +use proto::backend::pkg::*; +use rivet_operation::prelude::*; +use util_linode::api; + +#[derive(sqlx::FromRow)] +struct LinodeData { + ssh_key_id: i64, + linode_id: Option, + firewall_id: Option, +} + +#[operation(name = "linode-server-destroy")] +pub async fn handle( + ctx: OperationContext, +) -> GlobalResult { + let crdb = ctx.crdb().await?; + let server_id = unwrap_ref!(ctx.server_id).as_uuid(); + + let data = sql_fetch_optional!( + [ctx, LinodeData, &crdb] + " + SELECT ssh_key_id, linode_id, firewall_id + FROM db_cluster.linode_misc + WHERE server_id = $1 + ", + server_id, + ) + .await?; + + let Some(data) = data else { + tracing::warn!("deleting server that doesn't exist"); + return Ok(linode::server_destroy::Response {}); + }; + + // Build HTTP client + let client = util_linode::Client::new().await?; + + if let Some(linode_id) = data.linode_id { + api::delete_instance(&client, linode_id).await?; + } + + api::delete_ssh_key(&client, data.ssh_key_id).await?; + + if let Some(firewall_id) = data.firewall_id { + api::delete_firewall(&client, firewall_id).await?; + } + + // Remove record + sql_execute!( + [ctx, &crdb] + " + DELETE FROM db_cluster.linode_misc + WHERE server_id = $1 + ", + server_id, + ) + .await?; + + Ok(linode::server_destroy::Response {}) +} diff --git a/svc/pkg/linode/ops/server-destroy/tests/integration.rs b/svc/pkg/linode/ops/server-destroy/tests/integration.rs new file mode 100644 index 0000000000..96e3262b70 --- /dev/null +++ b/svc/pkg/linode/ops/server-destroy/tests/integration.rs @@ -0,0 +1,99 @@ +use std::net::Ipv4Addr; + +use chirp_worker::prelude::*; +use proto::backend::{self, pkg::*}; + +#[worker_test] +async fn basic(ctx: TestCtx) { + let server_id = Uuid::new_v4(); + let datacenter_id = Uuid::new_v4(); + let cluster_id = Uuid::new_v4(); + let pool_type = backend::cluster::PoolType::Job; + + let vlan_ip = setup(&ctx, server_id, datacenter_id, cluster_id, pool_type).await; + + // Create server + let res = op!([ctx] linode_server_provision { + server_id: Some(server_id.into()), + provider_datacenter_id: "us-southeast".to_string(), + hardware: Some(backend::cluster::Hardware { + provider_hardware: util_cluster::test::HARDWARE.to_string(), + }), + pool_type: pool_type as i32, + vlan_ip: vlan_ip.to_string(), + tags: vec!["test".to_string()], + }) + .await + .unwrap(); + + // Set as provisioned + sql_execute!( + [ctx] + " + UPDATE db_cluster.servers + SET provider_server_id = $1 + WHERE server_id = $2 + ", + &res.provider_server_id, + server_id, + ) + .await + .unwrap(); + + op!([ctx] linode_server_destroy { + server_id: Some(server_id.into()), + }) + .await + .unwrap(); + + // Should do nothing + op!([ctx] linode_server_destroy { + server_id: Some(server_id.into()), + }) + .await + .unwrap(); +} + +async fn setup( + ctx: &TestCtx, + server_id: Uuid, + datacenter_id: Uuid, + cluster_id: Uuid, + pool_type: backend::cluster::PoolType, +) -> Ipv4Addr { + msg!([ctx] cluster::msg::create(cluster_id) -> cluster::msg::create_complete { + cluster_id: Some(cluster_id.into()), + name_id: util::faker::ident(), + owner_team_id: None, + }) + .await + .unwrap(); + + // TODO: This might collide if the test fails, its static + let vlan_ip = util::net::job::vlan_addr_range().last().unwrap(); + + // Insert fake record to appease foreign key constraint (both sql calls in this test are normally done + // by `cluster-server-provision`) + sql_execute!( + [ctx] + " + INSERT INTO db_cluster.servers ( + server_id, + datacenter_id, + cluster_id, + pool_type, + create_ts + ) + VALUES ($1, $2, $3, $4, $5) + ", + server_id, + datacenter_id, + cluster_id, + pool_type as i64, + util::timestamp::now(), + ) + .await + .unwrap(); + + vlan_ip +} diff --git a/svc/pkg/linode/ops/server-provision/Cargo.toml b/svc/pkg/linode/ops/server-provision/Cargo.toml new file mode 100644 index 0000000000..76f539e6a9 --- /dev/null +++ b/svc/pkg/linode/ops/server-provision/Cargo.toml @@ -0,0 +1,22 @@ +[package] +name = "linode-server-provision" +version = "0.0.1" +edition = "2018" +authors = ["Rivet Gaming, LLC "] +license = "Apache-2.0" + +[dependencies] +chirp-client = { path = "../../../../../lib/chirp/client" } +rivet-operation = { path = "../../../../../lib/operation/core" } +reqwest = { version = "0.11", features = ["json"] } +util-cluster = { package = "rivet-util-cluster", path = "../../../cluster/util" } +util-linode = { package = "rivet-util-linode", path = "../../util" } + +[dependencies.sqlx] +version = "0.7" +default-features = false + +[dev-dependencies] +chirp-worker = { path = "../../../../../lib/chirp/worker" } + +linode-server-destroy = { path = "../server-destroy" } diff --git a/svc/pkg/linode/ops/server-provision/Service.toml b/svc/pkg/linode/ops/server-provision/Service.toml new file mode 100644 index 0000000000..a0c82d8275 --- /dev/null +++ b/svc/pkg/linode/ops/server-provision/Service.toml @@ -0,0 +1,11 @@ +[service] +name = "linode-server-provision" + +[runtime] +kind = "rust" + +[operation] + +[secrets] +"linode/token" = {} +"ssh/server/private_key_openssh" = {} diff --git a/svc/pkg/linode/ops/server-provision/src/lib.rs b/svc/pkg/linode/ops/server-provision/src/lib.rs new file mode 100644 index 0000000000..a19a4147d1 --- /dev/null +++ b/svc/pkg/linode/ops/server-provision/src/lib.rs @@ -0,0 +1,224 @@ +use proto::backend::{self, cluster::PoolType, pkg::*}; +use rivet_operation::prelude::*; +use util_linode::api; + +#[operation(name = "linode-server-provision", timeout = 150)] +pub async fn handle( + ctx: OperationContext, +) -> GlobalResult { + let crdb = ctx.crdb().await?; + let server_id = unwrap_ref!(ctx.server_id).as_uuid(); + let provider_datacenter_id = ctx.provider_datacenter_id.clone(); + let pool_type = unwrap!(PoolType::from_i32(ctx.pool_type)); + let provider_hardware = unwrap_ref!(ctx.hardware).provider_hardware.clone(); + + let ns = util::env::namespace(); + let pool_type_str = match pool_type { + PoolType::Job => "job", + PoolType::Gg => "gg", + PoolType::Ats => "ats", + }; + // Linode label must be 3-64 characters, UUID's are 36 + let name = format!("{ns}-{server_id}"); + + let tags = ctx + .tags + .iter() + .cloned() + .chain([ + // HACK: Linode requires tags to be > 3 characters. We extend the namespace to make sure it + // meets the minimum length requirement. + format!("rivet-{ns}"), + format!("{ns}-{provider_datacenter_id}"), + format!("{ns}-{pool_type_str}"), + format!("{ns}-{provider_datacenter_id}-{pool_type_str}"), + ]) + .collect::>(); + + let firewall_inbound = match pool_type { + PoolType::Job => util::net::job::firewall(), + PoolType::Gg => util::net::gg::firewall(), + PoolType::Ats => util::net::ats::firewall(), + }; + + // Build context + let server = api::ProvisionCtx { + datacenter: provider_datacenter_id, + name, + hardware: provider_hardware, + vlan_ip: Some(ctx.vlan_ip.clone()), + tags, + firewall_inbound, + }; + + // Build HTTP client + let client = util_linode::Client::new().await?; + + // Create SSH key + let ssh_key_res = api::create_ssh_key(&client, &server_id.to_string()).await?; + + // Write SSH key id + sql_execute!( + [ctx, &crdb] + " + INSERT INTO db_cluster.linode_misc ( + server_id, + ssh_key_id + ) + VALUES ($1, $2) + ", + server_id, + ssh_key_res.id as i64, + ) + .await?; + + let create_instance_res = + api::create_instance(&client, &server, &ssh_key_res.public_key).await?; + let linode_id = create_instance_res.id; + + // Write linode id + sql_execute!( + [ctx, &crdb] + " + UPDATE db_cluster.linode_misc + SET linode_id = $2 + WHERE server_id = $1 + ", + server_id, + linode_id as i64, + ) + .await?; + + api::wait_instance_ready(&client, linode_id).await?; + + let (create_disks_res, used_custom_image) = create_disks( + &ctx, + &crdb, + &client, + &server, + pool_type, + &ssh_key_res.public_key, + linode_id, + create_instance_res.specs.disk, + ) + .await?; + + api::create_instance_config(&client, &server, linode_id, &create_disks_res).await?; + + let firewall_res = api::create_firewall(&client, &server, linode_id).await?; + + // Write firewall id + sql_execute!( + [ctx, &crdb] + " + UPDATE db_cluster.linode_misc + SET firewall_id = $2 + WHERE server_id = $1 + ", + server_id, + firewall_res.id as i64, + ) + .await?; + + api::boot_instance(&client, linode_id).await?; + + let public_ip = api::get_public_ip(&client, linode_id).await?; + + Ok(linode::server_provision::Response { + provider_server_id: linode_id.to_string(), + public_ip: public_ip.to_string(), + already_installed: used_custom_image, + }) +} + +async fn create_disks( + ctx: &OperationContext, + crdb: &CrdbPool, + client: &util_linode::Client, + server: &api::ProvisionCtx, + pool_type: PoolType, + ssh_key: &str, + linode_id: u64, + server_disk_size: u64, +) -> GlobalResult<(api::CreateDisksResponse, bool)> { + // Try to get custom image (if exists) + let image_variant = util_cluster::image_variant( + backend::cluster::Provider::Linode, + &server.datacenter, + pool_type, + ); + let (custom_image, updated) = get_custom_image(ctx, crdb, &image_variant).await?; + + // Default image + let used_custom_image = custom_image.is_some(); + let image = if let Some(custom_image) = custom_image { + tracing::info!("using custom image {}", custom_image); + + custom_image + } else { + tracing::info!("custom image not ready yet, continuing normally"); + + "linode/debian11".to_string() + }; + + // Start custom image creation process + if updated { + msg!([ctx] linode::msg::prebake_provision(&image_variant) { + variant: image_variant, + provider_datacenter_id: server.datacenter.clone(), + pool_type: pool_type as i32, + tags: Vec::new(), + }) + .await?; + } + + let create_disks_res = + api::create_disks(client, ssh_key, linode_id, &image, server_disk_size).await?; + + Ok((create_disks_res, used_custom_image)) +} + +async fn get_custom_image( + ctx: &OperationContext, + crdb: &CrdbPool, + variant: &str, +) -> GlobalResult<(Option, bool)> { + // Get the custom image id for this server, or insert a record and start creating one + let (image_id, updated) = sql_fetch_one!( + [ctx, (Option, bool), &crdb] + " + WITH + updated AS ( + INSERT INTO db_cluster.server_images AS s ( + variant, create_ts + ) + VALUES ($1, $2) + ON CONFLICT (variant) DO UPDATE + SET + image_id = NULL, + create_ts = $2 + WHERE s.create_ts < $3 + RETURNING variant + ), + selected AS ( + SELECT variant, image_id + FROM db_cluster.server_images + WHERE variant = $1 + ) + SELECT + selected.image_id AS image_id, + (updated.variant IS NOT NULL) AS updated + FROM selected + FULL OUTER JOIN updated + ON selected.variant = updated.variant; + ", + variant, + util::timestamp::now(), + // 5 month expiration + util::timestamp::now() - util::duration::days(5 * 30), + ) + .await?; + + // Updated is true if this specific sql call either reset (if expired) or inserted the row + Ok((image_id, updated)) +} diff --git a/svc/pkg/linode/ops/server-provision/tests/integration.rs b/svc/pkg/linode/ops/server-provision/tests/integration.rs new file mode 100644 index 0000000000..e1871bca44 --- /dev/null +++ b/svc/pkg/linode/ops/server-provision/tests/integration.rs @@ -0,0 +1,93 @@ +use std::net::Ipv4Addr; + +use chirp_worker::prelude::*; +use proto::backend::{self, pkg::*}; + +#[worker_test] +async fn basic(ctx: TestCtx) { + let server_id = Uuid::new_v4(); + let datacenter_id = Uuid::new_v4(); + let cluster_id = Uuid::new_v4(); + let pool_type = backend::cluster::PoolType::Job; + + let vlan_ip = setup(&ctx, server_id, datacenter_id, cluster_id, pool_type).await; + + // Create server + let res = op!([ctx] linode_server_provision { + server_id: Some(server_id.into()), + provider_datacenter_id: "us-southeast".to_string(), + hardware: Some(backend::cluster::Hardware { + provider_hardware: util_cluster::test::HARDWARE.to_string(), + }), + pool_type: pool_type as i32, + vlan_ip: vlan_ip.to_string(), + tags: vec!["test".to_string()], + }) + .await + .unwrap(); + + // Set as provisioned + sql_execute!( + [ctx] + " + UPDATE db_cluster.servers + SET provider_server_id = $1 + WHERE server_id = $2 + ", + &res.provider_server_id, + server_id, + ) + .await + .unwrap(); + + // Destroy server after test is complete so we don't litter + op!([ctx] linode_server_destroy { + server_id: Some(server_id.into()), + }) + .await + .unwrap(); +} + +async fn setup( + ctx: &TestCtx, + server_id: Uuid, + datacenter_id: Uuid, + cluster_id: Uuid, + pool_type: backend::cluster::PoolType, +) -> Ipv4Addr { + msg!([ctx] cluster::msg::create(cluster_id) -> cluster::msg::create_complete { + cluster_id: Some(cluster_id.into()), + name_id: util::faker::ident(), + owner_team_id: None, + }) + .await + .unwrap(); + + // TODO: This might collide if the test fails, its static + let vlan_ip = util::net::job::vlan_addr_range().last().unwrap(); + + // Insert fake record to appease foreign key constraint (both sql calls in this test are normally done + // by `cluster-server-provision`) + sql_execute!( + [ctx] + " + INSERT INTO db_cluster.servers ( + server_id, + datacenter_id, + cluster_id, + pool_type, + create_ts + ) + VALUES ($1, $2, $3, $4, $5) + ", + server_id, + datacenter_id, + cluster_id, + pool_type as i64, + util::timestamp::now(), + ) + .await + .unwrap(); + + vlan_ip +} diff --git a/svc/pkg/linode/standalone/gc/Cargo.toml b/svc/pkg/linode/standalone/gc/Cargo.toml new file mode 100644 index 0000000000..099e41ccc2 --- /dev/null +++ b/svc/pkg/linode/standalone/gc/Cargo.toml @@ -0,0 +1,29 @@ +[package] +name = "linode-gc" +version = "0.0.1" +edition = "2018" +authors = ["Rivet Gaming, LLC "] +license = "Apache-2.0" + +[dependencies] +chirp-client = { path = "../../../../../lib/chirp/client" } +chrono = "0.4" +reqwest = "0.11" +rivet-connection = { path = "../../../../../lib/connection" } +rivet-health-checks = { path = "../../../../../lib/health-checks" } +rivet-metrics = { path = "../../../../../lib/metrics" } +rivet-operation = { path = "../../../../../lib/operation/core" } +rivet-runtime = { path = "../../../../../lib/runtime" } +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" +tokio = { version = "1.29", features = ["full"] } +tracing = "0.1" +tracing-subscriber = { version = "0.3", default-features = false, features = ["fmt", "json", "ansi"] } +util-linode = { package = "rivet-util-linode", path = "../../util" } + +[dependencies.sqlx] +version = "0.7" +default-features = false + +[dev-dependencies] +chirp-worker = { path = "../../../../../lib/chirp/worker" } diff --git a/svc/pkg/linode/standalone/gc/Service.toml b/svc/pkg/linode/standalone/gc/Service.toml new file mode 100644 index 0000000000..b4beedb888 --- /dev/null +++ b/svc/pkg/linode/standalone/gc/Service.toml @@ -0,0 +1,11 @@ +[service] +name = "linode-gc" + +[runtime] +kind = "rust" + +[headless] +singleton = true + +[secrets] +"linode/token" = {} diff --git a/svc/pkg/linode/standalone/gc/src/lib.rs b/svc/pkg/linode/standalone/gc/src/lib.rs new file mode 100644 index 0000000000..4d2c980539 --- /dev/null +++ b/svc/pkg/linode/standalone/gc/src/lib.rs @@ -0,0 +1,159 @@ +use futures_util::{StreamExt, TryStreamExt}; +use reqwest::header; +use rivet_operation::prelude::*; +use serde_json::json; +use util_linode::api; + +#[derive(sqlx::FromRow)] +struct PrebakeServer { + variant: String, + ssh_key_id: i64, + linode_id: Option, + firewall_id: Option, +} + +#[tracing::instrument(skip_all)] +pub async fn run_from_env(pools: rivet_pools::Pools) -> GlobalResult<()> { + let client = chirp_client::SharedClient::from_env(pools.clone())?.wrap_new("linode-gc"); + let cache = rivet_cache::CacheInner::from_env(pools.clone())?; + let ctx = OperationContext::new( + "linode-gc".into(), + std::time::Duration::from_secs(60), + rivet_connection::Connection::new(client, pools, cache), + Uuid::new_v4(), + Uuid::new_v4(), + util::timestamp::now(), + util::timestamp::now(), + (), + Vec::new(), + ); + let crdb = ctx.crdb().await?; + + let filter = json!({ + "status": "available", + "type": "manual" + }); + let mut headers = header::HeaderMap::new(); + headers.insert( + "X-Filter", + header::HeaderValue::from_str(&serde_json::to_string(&filter)?)?, + ); + + // Build HTTP client + let client = util_linode::Client::new_with_headers(headers).await?; + + let complete_images = api::list_custom_images(&client).await?; + + delete_expired_images(&client, &complete_images).await?; + + // Get image ids + let image_ids = complete_images + .into_iter() + .map(|x| x.id.clone()) + .collect::>(); + if image_ids.len() == 100 { + tracing::warn!("page limit reached, new images may not be returned"); + } + + let prebake_servers = sql_fetch_all!( + [ctx, PrebakeServer, &crdb] + " + SELECT variant, ssh_key_id, linode_id, firewall_id + FROM db_cluster.server_images_linode_misc + WHERE image_id = ANY($1) + ", + image_ids, + ) + .await?; + + if prebake_servers.is_empty() { + return Ok(()); + } + + let variants = prebake_servers + .iter() + .map(|server| server.variant.clone()) + .collect::>(); + + // Update image id so it can now be used in provisioning + sql_execute!( + [ctx, &crdb] + " + UPDATE db_cluster.server_images AS i + SET image_id = m.image_id + FROM db_cluster.server_images_linode_misc AS m + WHERE + m.variant = ANY($1) AND + i.variant = m.variant + ", + &variants + ) + .await?; + + // Remove records + sql_execute!( + [ctx, &crdb] + " + DELETE FROM db_cluster.server_images_linode_misc + WHERE variant = ANY($1) + ", + variants, + ) + .await?; + + futures_util::stream::iter(prebake_servers.iter()) + .map(|server| { + let client = client.clone(); + + async move { destroy(&client, server).await } + }) + .buffer_unordered(8) + .try_collect::>() + .await?; + + Ok(()) +} + +async fn delete_expired_images( + client: &util_linode::Client, + complete_images: &[api::CustomImage], +) -> GlobalResult<()> { + let expiration = chrono::Utc::now() - chrono::Duration::days(6 * 30); + + let expired_images = complete_images + .iter() + .filter(|img| img.created < expiration); + + let expired_images_count = expired_images.clone().count(); + if expired_images_count != 0 { + tracing::info!(count=?expired_images_count, "deleting expired images"); + } + + futures_util::stream::iter(expired_images) + .map(|img| { + let client = client.clone(); + + async move { api::delete_custom_image(&client, &img.id).await } + }) + .buffer_unordered(8) + .try_collect::>() + .await?; + + Ok(()) +} + +// NOTE: We do not use `cluster-server-destroy` here because this is a prebake server (only +// `cluster-server-install` works with both) +async fn destroy(client: &util_linode::Client, server: &PrebakeServer) -> GlobalResult<()> { + if let Some(linode_id) = server.linode_id { + api::delete_instance(client, linode_id).await?; + } + + api::delete_ssh_key(client, server.ssh_key_id).await?; + + if let Some(firewall_id) = server.firewall_id { + api::delete_firewall(client, firewall_id).await?; + } + + Ok(()) +} diff --git a/svc/pkg/linode/standalone/gc/src/main.rs b/svc/pkg/linode/standalone/gc/src/main.rs new file mode 100644 index 0000000000..ba458e3761 --- /dev/null +++ b/svc/pkg/linode/standalone/gc/src/main.rs @@ -0,0 +1,30 @@ +use std::time::Duration; + +use rivet_operation::prelude::*; + +fn main() -> GlobalResult<()> { + rivet_runtime::run(start()).unwrap() +} + +async fn start() -> GlobalResult<()> { + let pools = rivet_pools::from_env("linode-gc").await?; + + tokio::task::Builder::new() + .name("linode_gc::health_checks") + .spawn(rivet_health_checks::run_standalone( + rivet_health_checks::Config { + pools: Some(pools.clone()), + }, + ))?; + + tokio::task::Builder::new() + .name("linode_gc::metrics") + .spawn(rivet_metrics::run_standalone())?; + + let mut interval = tokio::time::interval(Duration::from_secs(15)); + loop { + interval.tick().await; + + linode_gc::run_from_env(pools.clone()).await?; + } +} diff --git a/svc/pkg/linode/standalone/gc/tests/integration.rs b/svc/pkg/linode/standalone/gc/tests/integration.rs new file mode 100644 index 0000000000..bb268b9d02 --- /dev/null +++ b/svc/pkg/linode/standalone/gc/tests/integration.rs @@ -0,0 +1,18 @@ +use chirp_worker::prelude::*; + +use ::linode_gc::run_from_env; + +#[tokio::test(flavor = "multi_thread")] +async fn basic() { + tracing_subscriber::fmt() + .json() + .with_max_level(tracing::Level::INFO) + .with_span_events(tracing_subscriber::fmt::format::FmtSpan::NONE) + .init(); + + let pools = rivet_pools::from_env("linode-gc-test").await.unwrap(); + + run_from_env(pools).await.unwrap(); + + // TODO: Check that image_id was set in `server_images` table +} diff --git a/svc/pkg/linode/types/instance-type-get.proto b/svc/pkg/linode/types/instance-type-get.proto new file mode 100644 index 0000000000..81d5694500 --- /dev/null +++ b/svc/pkg/linode/types/instance-type-get.proto @@ -0,0 +1,26 @@ +syntax = "proto3"; + +package rivet.backend.pkg.linode.instance_type_get; + +import "proto/common.proto"; + +message Request { + repeated string hardware_ids = 1; +} + +message Response { + message InstanceType { + string hardware_id = 1; + uint64 memory = 2; + uint64 disk = 3; + uint64 vcpus = 4; + uint64 transfer = 5; + } + + repeated InstanceType instance_types = 1; +} + +message CacheInstanceTypes { + repeated Response.InstanceType instance_types = 1; +} + diff --git a/svc/pkg/linode/types/msg/prebake-install-complete.proto b/svc/pkg/linode/types/msg/prebake-install-complete.proto new file mode 100644 index 0000000000..0aec721148 --- /dev/null +++ b/svc/pkg/linode/types/msg/prebake-install-complete.proto @@ -0,0 +1,13 @@ +syntax = "proto3"; + +package rivet.backend.pkg.linode.msg.prebake_install_complete; + +import "proto/common.proto"; + +/// name = "msg-linode-prebake-install-complete" +/// parameters = [ +/// { name = "ip" }, +/// ] +message Message { + string ip = 1; +} diff --git a/svc/pkg/linode/types/msg/prebake-provision.proto b/svc/pkg/linode/types/msg/prebake-provision.proto new file mode 100644 index 0000000000..0f7b27a1d7 --- /dev/null +++ b/svc/pkg/linode/types/msg/prebake-provision.proto @@ -0,0 +1,17 @@ +syntax = "proto3"; + +package rivet.backend.pkg.linode.msg.prebake_provision; + +import "proto/common.proto"; +import "proto/backend/cluster.proto"; + +/// name = "msg-linode-prebake-provision" +/// parameters = [ +/// { name = "variant" }, +/// ] +message Message { + string variant = 1; + string provider_datacenter_id = 2; + rivet.backend.cluster.PoolType pool_type = 3; + repeated string tags = 4; +} diff --git a/svc/pkg/linode/types/server-destroy.proto b/svc/pkg/linode/types/server-destroy.proto new file mode 100644 index 0000000000..400ec126ed --- /dev/null +++ b/svc/pkg/linode/types/server-destroy.proto @@ -0,0 +1,13 @@ +syntax = "proto3"; + +package rivet.backend.pkg.linode.server_destroy; + +import "proto/common.proto"; +import "proto/backend/cluster.proto"; + +message Request { + rivet.common.Uuid server_id = 1; +} + +message Response { +} diff --git a/svc/pkg/linode/types/server-provision.proto b/svc/pkg/linode/types/server-provision.proto new file mode 100644 index 0000000000..c33b37aeb8 --- /dev/null +++ b/svc/pkg/linode/types/server-provision.proto @@ -0,0 +1,21 @@ +syntax = "proto3"; + +package rivet.backend.pkg.linode.server_provision; + +import "proto/common.proto"; +import "proto/backend/cluster.proto"; + +message Request { + rivet.common.Uuid server_id = 1; + string provider_datacenter_id = 2; + rivet.backend.cluster.Hardware hardware = 3; + rivet.backend.cluster.PoolType pool_type = 4; + string vlan_ip = 5; + repeated string tags = 6; +} + +message Response { + string provider_server_id = 1; + string public_ip = 2; + bool already_installed = 3; +} diff --git a/svc/pkg/linode/util/Cargo.toml b/svc/pkg/linode/util/Cargo.toml new file mode 100644 index 0000000000..4f385e77db --- /dev/null +++ b/svc/pkg/linode/util/Cargo.toml @@ -0,0 +1,15 @@ +[package] +name = "rivet-util-linode" +version = "0.1.0" +edition = "2021" +authors = ["Rivet Gaming, LLC "] +license = "Apache-2.0" + +[dependencies] +chrono = "0.4" +rand = "0.8" +reqwest = { version = "0.11", features = ["json"] } +rivet-operation = { path = "../../../../lib/operation/core" } +serde = { version = "1.0", features = ["derive"] } +serde_json = "1.0" +ssh-key = "0.6.3" diff --git a/svc/pkg/linode/util/src/api.rs b/svc/pkg/linode/util/src/api.rs new file mode 100644 index 0000000000..8816817fa1 --- /dev/null +++ b/svc/pkg/linode/util/src/api.rs @@ -0,0 +1,500 @@ +use std::{net::Ipv4Addr, str, time::Duration}; + +use chrono::{DateTime, Utc}; +use proto::backend::pkg::*; +use rivet_operation::prelude::*; +use serde::{Deserialize, Deserializer}; +use serde_json::json; +use ssh_key::PrivateKey; + +use crate::{generate_password, ApiErrorResponse, Client}; + +pub struct ProvisionCtx { + pub datacenter: String, + pub name: String, + pub hardware: String, + pub vlan_ip: Option, + pub tags: Vec, + pub firewall_inbound: Vec, +} + +#[derive(Deserialize)] +struct CreateSshKeyResponse { + id: u64, +} + +pub struct SshKeyResponse { + pub id: u64, + pub public_key: String, +} + +pub async fn create_ssh_key( + client: &Client, + label: &str, +) -> GlobalResult { + tracing::info!("creating linode ssh key"); + + let private_key_openssh = + util::env::read_secret(&["ssh", "server", "private_key_openssh"]).await?; + let private_key = PrivateKey::from_openssh(private_key_openssh.as_bytes())?; + + // Extract the public key + let public_key = private_key.public_key().to_string(); + + let res = client + .post::( + "/profile/sshkeys", + json!({ + // Label must be < 64 characters for some stupid reason + "label": label, + "ssh_key": public_key, + }), + ) + .await?; + + Ok(SshKeyResponse { + id: res.id, + public_key, + }) +} + +#[derive(Deserialize)] +pub struct CreateInstanceResponse { + pub id: u64, + pub specs: InstanceSpec, +} + +#[derive(Deserialize)] +pub struct InstanceSpec { + pub disk: u64, +} + +pub async fn create_instance( + client: &Client, + server: &ProvisionCtx, + ssh_key: &str, +) -> GlobalResult { + let ns = util::env::namespace(); + + tracing::info!("creating linode instance"); + + client + .post( + "/linode/instances", + json!({ + "label": server.name, + "group": ns, + "region": server.datacenter, + "type": server.hardware, + "authorized_keys": vec![ssh_key], + "tags": server.tags, + "private_ip": true, + "backups_enabled": false, + }), + ) + .await +} + +#[derive(Deserialize)] +pub struct CreateDiskResponse { + pub id: u64, +} + +pub struct CreateDisksResponse { + pub boot_id: u64, + pub swap_id: u64, +} + +pub async fn create_disks( + client: &Client, + ssh_key: &str, + linode_id: u64, + image: &str, + server_disk_size: u64, +) -> GlobalResult { + tracing::info!("creating boot disk"); + + let boot_disk_res = client + .post::( + &format!("/linode/instances/{linode_id}/disks"), + json!({ + "label": "boot", + "size": server_disk_size - 512, + "authorized_keys": vec![ssh_key], + "root_pass": generate_password(16), + "image": image, + }), + ) + .await?; + + wait_disk_ready(client, linode_id, boot_disk_res.id).await?; + + tracing::info!("creating swap disk"); + + let swap_disk_res = client + .post::( + &format!("/linode/instances/{linode_id}/disks"), + json!({ + "label": "swap", + "size": 512, + "filesystem": "swap", + }), + ) + .await?; + + Ok(CreateDisksResponse { + boot_id: boot_disk_res.id, + swap_id: swap_disk_res.id, + }) +} + +pub async fn create_instance_config( + client: &Client, + server: &ProvisionCtx, + linode_id: u64, + disks: &CreateDisksResponse, +) -> GlobalResult<()> { + tracing::info!("creating instance config"); + + let ns = util::env::namespace(); + + let interfaces = if let Some(vlan_ip) = &server.vlan_ip { + let region_vlan = util::net::region::vlan_ip_net(); + let ipam_address = format!("{}/{}", vlan_ip, region_vlan.prefix_len()); + + json!([ + { + "purpose": "public", + }, + { + "purpose": "vlan", + "label": format!("{ns}-vlan"), + "ipam_address": ipam_address, + }, + ]) + } else { + json!([{ + "purpose": "public", + }]) + }; + + client + .post_no_res( + &format!("/linode/instances/{linode_id}/configs"), + json!({ + "label": "boot_config", + "booted": true, + "kernel": "linode/latest-64bit", + "root_device": "/dev/sda", + "devices": { + "sda": { + "disk_id": disks.boot_id, + }, + "sdb": { + "disk_id": disks.swap_id, + }, + }, + "interfaces": interfaces, + }), + ) + .await +} + +#[derive(Deserialize)] +pub struct CreateFirewallResponse { + pub id: u64, +} + +pub async fn create_firewall( + client: &Client, + server: &ProvisionCtx, + linode_id: u64, +) -> GlobalResult { + tracing::info!("creating firewall"); + + let ns = util::env::namespace(); + + let firewall_inbound = server + .firewall_inbound + .iter() + .map(|rule| { + json!({ + "label": rule.label, + "action": "ACCEPT", + "protocol": rule.protocol.to_uppercase(), + "ports": rule.ports, + "addresses": { + "ipv4": rule.inbound_ipv4_cidr, + "ipv6": rule.inbound_ipv6_cidr, + }, + + }) + }) + .collect::>(); + + client + .post( + "/networking/firewalls", + json!({ + // Label doesn't matter + "label": format!("{ns}-{}", generate_password(16)), + "rules": { + "inbound": firewall_inbound, + "inbound_policy": "DROP", + "outbound_policy": "ACCEPT", + }, + "devices": { + "linodes": [linode_id], + }, + "tags": server.tags, + }), + ) + .await +} + +pub async fn boot_instance(client: &Client, linode_id: u64) -> GlobalResult<()> { + tracing::info!("booting instance"); + + client + .post_no_res(&format!("/linode/instances/{linode_id}/boot"), json!({})) + .await +} + +#[derive(Deserialize)] +pub struct LinodeInstanceResponse { + status: String, +} + +// Helpful: https://www.linode.com/community/questions/11588/linodeerrorsapierror-400-linode-busy +/// Polls linode API until an instance is available. +pub async fn wait_instance_ready(client: &Client, linode_id: u64) -> GlobalResult<()> { + tracing::info!("waiting for instance to be ready"); + + loop { + let res = client + .get::(&format!("/linode/instances/{linode_id}")) + .await?; + + // Check if ready + match res.status.as_str() { + "booting" | "rebooting" | "shutting_down" | "provisioning" | "deleting" + | "migrating" | "rebuilding" | "cloning" | "restoring" => {} + _ => break, + } + + tokio::time::sleep(Duration::from_secs(1)).await; + } + + Ok(()) +} + +#[derive(Deserialize)] +pub struct LinodeDiskResponse { + status: String, +} + +/// Polls linode API until a linode disk is available. +pub async fn wait_disk_ready(client: &Client, linode_id: u64, disk_id: u64) -> GlobalResult<()> { + tracing::info!("waiting for linode disk to be ready"); + + loop { + let res = client + .inner() + .get(&format!( + "https://api.linode.com/v4/linode/instances/{linode_id}/disks/{disk_id}" + )) + .send() + .await?; + + // Manually handle the disk showing up as not found yet + if res.status() == reqwest::StatusCode::NOT_FOUND { + tracing::info!("disk not found yet"); + } else { + if !res.status().is_success() { + tracing::info!(status=?res.status(), "api request failed"); + bail_with!(ERROR, error = res.json::().await?); + } + + let res = res.json::().await?; + + // Check if ready + match res.status.as_str() { + "not ready" => {} + _ => break, + } + } + + tokio::time::sleep(Duration::from_secs(3)).await; + } + + Ok(()) +} + +#[derive(Deserialize)] +pub struct GetPublicIpResponse { + ipv4: LinodeIpv4, +} + +#[derive(Deserialize)] +pub struct LinodeIpv4 { + public: Vec, +} + +#[derive(Deserialize)] +pub struct LinodeIpv4Config { + address: Ipv4Addr, +} + +pub async fn get_public_ip(client: &Client, linode_id: u64) -> GlobalResult { + tracing::info!("getting ip"); + + let res = client + .get::(&format!("/linode/instances/{linode_id}/ips")) + .await?; + let public = unwrap!(res.ipv4.public.first()); + + Ok(public.address) +} + +pub async fn delete_ssh_key(client: &Client, ssh_key_id: i64) -> GlobalResult<()> { + tracing::info!("deleting linode ssh key"); + + client + .delete(&format!("/profile/sshkeys/{ssh_key_id}")) + .await +} + +pub async fn delete_instance(client: &Client, linode_id: i64) -> GlobalResult<()> { + tracing::info!(?linode_id, "deleting linode instance"); + + client + .delete(&format!("/linode/instances/{linode_id}")) + .await +} + +pub async fn delete_firewall(client: &Client, firewall_id: i64) -> GlobalResult<()> { + tracing::info!("deleting firewall"); + + client + .delete(&format!("/networking/firewalls/{firewall_id}")) + .await +} + +pub async fn shut_down(client: &Client, linode_id: i64) -> GlobalResult<()> { + tracing::info!("shutting down instance"); + + client + .post_no_res( + &format!("/linode/instances/{linode_id}/shutdown"), + json!({}), + ) + .await +} + +#[derive(Deserialize)] +pub struct CreateCustomImageResponse { + pub id: String, +} + +pub async fn create_custom_image( + client: &Client, + variant: &str, + disk_id: i64, +) -> GlobalResult { + tracing::info!("creating custom image"); + + client + .post( + "/images", + json!({ + "disk_id": disk_id, + "label": variant, + }), + ) + .await +} + +pub async fn delete_custom_image(client: &Client, image_id: &str) -> GlobalResult<()> { + tracing::info!(?image_id, "deleting custom image"); + + client.delete(&format!("/images/{image_id}")).await +} + +#[derive(Deserialize)] +pub struct ListCustomImagesResponse { + pub data: Vec, +} + +#[derive(Deserialize)] +pub struct CustomImage { + pub id: String, + pub created_by: Option, + #[serde(deserialize_with = "deserialize_date")] + pub created: DateTime, +} + +pub async fn list_custom_images(client: &Client) -> GlobalResult> { + tracing::info!("listing custom images"); + + let res = client.get::("/images").await?; + + Ok(res + .data + .into_iter() + .filter(|img| { + img.created_by + .as_ref() + .map(|created_by| created_by != "linode") + .unwrap_or_default() + }) + .collect::>()) +} + +#[derive(Deserialize)] +pub struct ListInstanceTypesResponse { + pub data: Vec, +} + +#[derive(Deserialize)] +pub struct InstanceType { + pub id: String, + pub memory: u64, + pub disk: u64, + pub vcpus: u64, + pub transfer: u64, + pub network_out: u64, +} + +impl From for linode::instance_type_get::response::InstanceType { + fn from(value: InstanceType) -> Self { + linode::instance_type_get::response::InstanceType { + hardware_id: value.id, + memory: value.memory, + disk: value.disk, + vcpus: value.vcpus, + transfer: value.transfer, + // network_out: value.network_out, + } + } +} + +pub async fn list_instance_types(client: &Client) -> GlobalResult> { + tracing::info!("listing instance types"); + + let res = client + .get::("/linode/types") + .await?; + + Ok(res.data) +} + +fn deserialize_date<'de, D>(deserializer: D) -> Result, D::Error> +where + D: Deserializer<'de>, +{ + // Add Z timezone specifier + let s = format!("{}Z", String::deserialize(deserializer)?); + DateTime::parse_from_rfc3339(&s) + .map_err(serde::de::Error::custom) + .map(|dt| dt.with_timezone(&Utc)) +} diff --git a/svc/pkg/linode/util/src/consts.rs b/svc/pkg/linode/util/src/consts.rs new file mode 100644 index 0000000000..e291fbca52 --- /dev/null +++ b/svc/pkg/linode/util/src/consts.rs @@ -0,0 +1 @@ +pub const PREBAKE_HARDWARE: &str = "g6-nanode-1"; diff --git a/svc/pkg/linode/util/src/lib.rs b/svc/pkg/linode/util/src/lib.rs new file mode 100644 index 0000000000..00b66a0c46 --- /dev/null +++ b/svc/pkg/linode/util/src/lib.rs @@ -0,0 +1,199 @@ +use std::{fmt, time::Duration}; + +use rand::{distributions::Alphanumeric, Rng}; +use reqwest::header; +use rivet_operation::prelude::*; +use serde::{de::DeserializeOwned, Deserialize}; + +pub mod consts; +pub mod api; + +#[derive(Clone)] +pub struct Client { + // Safe to clone, has inner Arc + inner: reqwest::Client, + max_retries: u8, +} + +impl Client { + pub async fn new() -> GlobalResult { + let api_token = util::env::read_secret(&["linode", "token"]).await?; + let auth = format!("Bearer {}", api_token); + let mut headers = header::HeaderMap::new(); + headers.insert(header::AUTHORIZATION, header::HeaderValue::from_str(&auth)?); + + let client = reqwest::Client::builder() + .default_headers(headers) + .build()?; + + Ok(Client { + inner: client, + max_retries: 8, + }) + } + + pub async fn new_with_headers(mut headers: header::HeaderMap) -> GlobalResult { + let api_token = util::env::read_secret(&["linode", "token"]).await?; + let auth = format!("Bearer {}", api_token); + headers.insert(header::AUTHORIZATION, header::HeaderValue::from_str(&auth)?); + + let client = reqwest::Client::builder() + .default_headers(headers) + .build()?; + + Ok(Client { + inner: client, + max_retries: 8, + }) + } + + pub fn inner(&self) -> &reqwest::Client { + &self.inner + } + + async fn request( + &self, + req: reqwest::RequestBuilder, + body: Option, + skip_404: bool, + ) -> GlobalResult { + let mut retries = 0; + + loop { + let req = if let Some(body) = &body { + unwrap!(req.try_clone()).json(body) + } else { + unwrap!(req.try_clone()) + }; + let res = req.send().await?; + + if !res.status().is_success() { + match res.status() { + reqwest::StatusCode::TOO_MANY_REQUESTS => { + if retries >= self.max_retries { + tracing::info!("all retry attempts failed"); + } else { + tracing::info!("being rate limited, retrying"); + + retries += 1; + + let retry_time = res + .headers() + .get("Retry-After") + .map(|x| x.to_str()) + .transpose()? + .map(|x| x.parse::()) + .transpose()? + .unwrap_or(5); + tokio::time::sleep(Duration::from_secs(retry_time)).await; + + continue; + } + } + reqwest::StatusCode::NOT_FOUND => { + if skip_404 { + tracing::info!("resource not found"); + break Ok(res); + } + } + _ => {} + } + + tracing::info!(status=?res.status(), "api request failed"); + bail!(res.json::().await?.to_string()); + } + + break Ok(res); + } + } + + pub async fn get(&self, endpoint: &str) -> GlobalResult { + let res = self + .request( + self.inner + .get(&format!("https://api.linode.com/v4{endpoint}")), + None, + false, + ) + .await?; + + res.json::().await.map_err(|err| err.into()) + } + + pub async fn delete(&self, endpoint: &str) -> GlobalResult<()> { + self.request( + self.inner + .delete(&format!("https://api.linode.com/v4{endpoint}")), + None, + true, + ) + .await?; + + Ok(()) + } + + pub async fn post( + &self, + endpoint: &str, + body: serde_json::Value, + ) -> GlobalResult { + let res = self + .request( + self.inner + .post(&format!("https://api.linode.com/v4{endpoint}")) + .header("content-type", "application/json"), + Some(body), + false, + ) + .await?; + + res.json::().await.map_err(|err| err.into()) + } + + pub async fn post_no_res(&self, endpoint: &str, body: serde_json::Value) -> GlobalResult<()> { + self.request( + self.inner + .post(&format!("https://api.linode.com/v4{endpoint}")) + .header("content-type", "application/json"), + Some(body), + false, + ) + .await?; + + Ok(()) + } +} + +#[derive(Deserialize)] +pub struct ApiErrorResponse { + errors: Vec, +} + +impl fmt::Display for ApiErrorResponse { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + for error in &self.errors { + if let Some(field) = &error.field { + write!(f, "{:?}: ", field)?; + } + + writeln!(f, "{}", error.reason)?; + } + + Ok(()) + } +} + +#[derive(Deserialize)] +struct ApiError { + field: Option, + reason: String, +} + +/// Generates a random string for a secret. +pub(crate) fn generate_password(length: usize) -> String { + rand::thread_rng() + .sample_iter(&Alphanumeric) + .take(length) + .map(char::from) + .collect() +} diff --git a/svc/pkg/linode/worker/Cargo.toml b/svc/pkg/linode/worker/Cargo.toml new file mode 100644 index 0000000000..182a8b5122 --- /dev/null +++ b/svc/pkg/linode/worker/Cargo.toml @@ -0,0 +1,23 @@ +[package] +name = "linode-worker" +version = "0.0.1" +edition = "2018" +authors = ["Rivet Gaming, LLC "] +license = "Apache-2.0" + +[dependencies] +rivet-convert = { path = "../../../../lib/convert" } +chirp-client = { path = "../../../../lib/chirp/client" } +chirp-worker = { path = "../../../../lib/chirp/worker" } +rivet-health-checks = { path = "../../../../lib/health-checks" } +rivet-metrics = { path = "../../../../lib/metrics" } +rivet-runtime = { path = "../../../../lib/runtime" } +util-cluster = { package = "rivet-util-cluster", path = "../../cluster/util" } +util-linode = { package = "rivet-util-linode", path = "../util" } + +[dependencies.sqlx] +version = "0.7" +default-features = false + +[dev-dependencies] +chirp-worker = { path = "../../../../lib/chirp/worker" } diff --git a/svc/pkg/linode/worker/Service.toml b/svc/pkg/linode/worker/Service.toml new file mode 100644 index 0000000000..5671569f1c --- /dev/null +++ b/svc/pkg/linode/worker/Service.toml @@ -0,0 +1,11 @@ +[service] +name = "linode-worker" + +[runtime] +kind = "rust" + +[consumer] + +[secrets] +"linode/token" = {} +"ssh/server/private_key_openssh" = {} diff --git a/svc/pkg/linode/worker/src/lib.rs b/svc/pkg/linode/worker/src/lib.rs new file mode 100644 index 0000000000..3719b10aa8 --- /dev/null +++ b/svc/pkg/linode/worker/src/lib.rs @@ -0,0 +1 @@ +pub mod workers; diff --git a/svc/pkg/linode/worker/src/workers/mod.rs b/svc/pkg/linode/worker/src/workers/mod.rs new file mode 100644 index 0000000000..d54e70dc69 --- /dev/null +++ b/svc/pkg/linode/worker/src/workers/mod.rs @@ -0,0 +1,4 @@ +pub mod prebake_install_complete; +pub mod prebake_provision; + +chirp_worker::workers![prebake_install_complete, prebake_provision,]; diff --git a/svc/pkg/linode/worker/src/workers/prebake_install_complete.rs b/svc/pkg/linode/worker/src/workers/prebake_install_complete.rs new file mode 100644 index 0000000000..55b8d420a3 --- /dev/null +++ b/svc/pkg/linode/worker/src/workers/prebake_install_complete.rs @@ -0,0 +1,55 @@ +use chirp_worker::prelude::*; +use proto::backend::pkg::*; +use util_linode::api; + +#[derive(sqlx::FromRow)] +struct PrebakeServer { + variant: String, + linode_id: i64, + disk_id: i64, +} + +#[worker(name = "linode-prebake-install-complete")] +async fn worker( + ctx: &OperationContext, +) -> GlobalResult<()> { + let crdb = ctx.crdb().await?; + let prebake_server = sql_fetch_one!( + [ctx, PrebakeServer, &crdb] + " + SELECT variant, linode_id, disk_id + FROM db_cluster.server_images_linode_misc + WHERE public_ip = $1 + ", + &ctx.ip, + ) + .await?; + + // Build HTTP client + let client = util_linode::Client::new().await?; + + // Shut down server before creating custom image + api::shut_down(&client, prebake_server.linode_id).await?; + + // NOTE: Linode imposes a restriction of 50 characters on custom image labels, so unfortunately we cannot + // use the image variant as the name. All we need from the label is for it to be unique. Keep in mind that + // the UUID and hyphen take 37 characters, leaving us with 13 for the namespace name + let name = format!("{}-{}", util::env::namespace(), Uuid::new_v4()); + + let create_image_res = api::create_custom_image(&client, &name, prebake_server.disk_id).await?; + + // Write image id + sql_execute!( + [ctx, &crdb] + " + UPDATE db_cluster.server_images_linode_misc + SET image_id = $2 + WHERE variant = $1 + ", + &prebake_server.variant, + create_image_res.id, + ) + .await?; + + Ok(()) +} diff --git a/svc/pkg/linode/worker/src/workers/prebake_provision.rs b/svc/pkg/linode/worker/src/workers/prebake_provision.rs new file mode 100644 index 0000000000..bd18b68d5a --- /dev/null +++ b/svc/pkg/linode/worker/src/workers/prebake_provision.rs @@ -0,0 +1,214 @@ +use chirp_worker::prelude::*; +use proto::backend::{self, cluster::PoolType, pkg::*}; +use util_linode::api; + +#[worker(name = "linode-prebake-provision")] +async fn worker( + ctx: &OperationContext, +) -> GlobalResult<()> { + let crdb = ctx.crdb().await?; + let pool_type = unwrap!(PoolType::from_i32(ctx.pool_type)); + + let ns = util::env::namespace(); + let pool_type_str = match pool_type { + PoolType::Job => "job", + PoolType::Gg => "gg", + PoolType::Ats => "ats", + }; + let provider_datacenter_id = &ctx.provider_datacenter_id; + + let name = util_cluster::simple_image_variant(provider_datacenter_id, pool_type); + + let tags = ctx + .tags + .iter() + .cloned() + .chain([ + "prebake".to_string(), + format!("rivet-{ns}"), + format!("{ns}-{provider_datacenter_id}"), + format!("{ns}-{pool_type_str}"), + format!("{ns}-{provider_datacenter_id}-{pool_type_str}"), + ]) + .collect::>(); + + // Build context + let prebake_server = api::ProvisionCtx { + datacenter: provider_datacenter_id.clone(), + name, + hardware: util_linode::consts::PREBAKE_HARDWARE.to_string(), + vlan_ip: None, + tags, + firewall_inbound: vec![util::net::default_firewall()], + }; + + // Build HTTP client + let client = util_linode::Client::new().await?; + + match provision(ctx, &crdb, &client, &prebake_server).await { + Ok(public_ip) => { + // Continue to install + msg!([ctx] cluster::msg::server_install(&public_ip) { + public_ip: public_ip, + pool_type: ctx.pool_type, + server_id: None, + provider: backend::cluster::Provider::Linode as i32, + initialize_immediately: false, + }) + .await?; + } + // Handle provisioning errors gracefully + Err(err) => { + tracing::error!(?err, "failed to provision server, destroying"); + destroy(ctx, &crdb, &client).await?; + + // NOTE: This will retry indefinitely to provision a prebake server + retry_bail!("failed to provision server"); + } + } + + Ok(()) +} + +async fn provision( + ctx: &OperationContext, + crdb: &CrdbPool, + client: &util_linode::Client, + server: &api::ProvisionCtx, +) -> GlobalResult { + // Create SSH key + let ssh_key_res = api::create_ssh_key(client, &Uuid::new_v4().to_string()).await?; + + // Write SSH key id + sql_execute!( + [ctx, &crdb] + " + INSERT INTO db_cluster.server_images_linode_misc ( + variant, + ssh_key_id + ) + VALUES ($1, $2) + ", + &ctx.variant, + ssh_key_res.id as i64, + ) + .await?; + + let create_instance_res = api::create_instance(client, server, &ssh_key_res.public_key).await?; + let linode_id = create_instance_res.id; + + // Write linode id + sql_execute!( + [ctx, &crdb] + " + UPDATE db_cluster.server_images_linode_misc + SET linode_id = $2 + WHERE variant = $1 + ", + &ctx.variant, + linode_id as i64, + ) + .await?; + + api::wait_instance_ready(client, linode_id).await?; + + let create_disks_res = api::create_disks( + client, + &ssh_key_res.public_key, + linode_id, + "linode/debian11", + create_instance_res.specs.disk, + ) + .await?; + + api::create_instance_config(client, server, linode_id, &create_disks_res).await?; + + let firewall_res = api::create_firewall(client, server, linode_id).await?; + + // Write firewall id + sql_execute!( + [ctx, &crdb] + " + UPDATE db_cluster.server_images_linode_misc + SET firewall_id = $2 + WHERE variant = $1 + ", + &ctx.variant, + firewall_res.id as i64, + ) + .await?; + + api::boot_instance(client, linode_id).await?; + + let public_ip = api::get_public_ip(client, linode_id).await?.to_string(); + + // Write SSH key id + sql_execute!( + [ctx, &crdb] + " + UPDATE db_cluster.server_images_linode_misc + SET + disk_id = $2, + public_ip = $3 + WHERE variant = $1 + ", + &ctx.variant, + create_disks_res.boot_id as i64, + &public_ip, + ) + .await?; + + Ok(public_ip) +} + +#[derive(sqlx::FromRow)] +struct LinodeData { + ssh_key_id: i64, + linode_id: Option, + firewall_id: Option, +} + +async fn destroy( + ctx: &OperationContext, + crdb: &CrdbPool, + client: &util_linode::Client, +) -> GlobalResult<()> { + let data = sql_fetch_optional!( + [ctx, LinodeData, &crdb] + " + SELECT ssh_key_id, linode_id, firewall_id + FROM db_cluster.server_images_linode_misc + WHERE variant = $1 + ", + &ctx.variant, + ) + .await?; + + let Some(data) = data else { + tracing::warn!("deleting server that doesn't exist"); + return Ok(()); + }; + + if let Some(linode_id) = data.linode_id { + api::delete_instance(client, linode_id).await?; + } + + api::delete_ssh_key(client, data.ssh_key_id).await?; + + if let Some(firewall_id) = data.firewall_id { + api::delete_firewall(client, firewall_id).await?; + } + + // Remove record + sql_execute!( + [ctx, &crdb] + " + DELETE FROM db_cluster.server_images_linode_misc + WHERE variant = $1 + ", + &ctx.variant, + ) + .await?; + + Ok(()) +} diff --git a/svc/pkg/linode/worker/tests/prebake_install_complete.rs b/svc/pkg/linode/worker/tests/prebake_install_complete.rs new file mode 100644 index 0000000000..18e529b1ae --- /dev/null +++ b/svc/pkg/linode/worker/tests/prebake_install_complete.rs @@ -0,0 +1,79 @@ +use chirp_worker::prelude::*; +use proto::backend::{self, pkg::*}; + +#[worker_test] +async fn prebake_install_complete(ctx: TestCtx) { + if !util::feature::server_provision() { + return; + } + + let image_variant = util::faker::ident(); + let pool_type = backend::cluster::PoolType::Ats; + + msg!([ctx] linode::msg::prebake_provision(&image_variant) { + variant: image_variant.clone(), + provider_datacenter_id: "us-southeast".to_string(), + pool_type: pool_type as i32, + tags: vec!["test".to_string()], + }) + .await + .unwrap(); + + // Wait for server to have an ip + let public_ip = loop { + tokio::time::sleep(std::time::Duration::from_secs(5)).await; + + let row = sql_fetch_optional!( + [ctx, (String,)] + " + SELECT public_ip + FROM db_cluster.server_images_linode_misc + WHERE + variant = $1 AND + public_ip IS NOT NULL + ", + &image_variant, + ) + .await + .unwrap(); + + if let Some((public_ip,)) = row { + break public_ip; + } + }; + + tokio::time::sleep(std::time::Duration::from_secs(500)).await; + + // Wait for install to complete + let mut sub = subscribe!([ctx] cluster::msg::server_install_complete(public_ip)) + .await + .unwrap(); + sub.next().await.unwrap(); + + // Wait for server to have an image id + loop { + tokio::time::sleep(std::time::Duration::from_secs(2)).await; + + let (exists,) = sql_fetch_one!( + [ctx, (bool,)] + " + SELECT EXISTS ( + SELECT 1 + FROM db_cluster.server_images_linode_misc + WHERE + variant = $1 AND + image_id IS NOT NULL + ) + ", + &image_variant, + ) + .await + .unwrap(); + + if exists { + break; + } + } + + todo!(""); +} diff --git a/svc/pkg/linode/worker/tests/prebake_provision.rs b/svc/pkg/linode/worker/tests/prebake_provision.rs new file mode 100644 index 0000000000..2a6d0fee5b --- /dev/null +++ b/svc/pkg/linode/worker/tests/prebake_provision.rs @@ -0,0 +1,42 @@ +use chirp_worker::prelude::*; +use proto::backend::{self, pkg::*}; + +#[worker_test] +async fn prebake_provision(ctx: TestCtx) { + let image_variant = util::faker::ident(); + let pool_type = backend::cluster::PoolType::Ats; + + msg!([ctx] linode::msg::prebake_provision(&image_variant) { + variant: image_variant.clone(), + provider_datacenter_id: "us-southeast".to_string(), + pool_type: pool_type as i32, + tags: vec!["test".to_string()], + }) + .await + .unwrap(); + + // Wait for server to have an ip + loop { + tokio::time::sleep(std::time::Duration::from_secs(5)).await; + + let (exists,) = sql_fetch_one!( + [ctx, (bool,)] + " + SELECT EXISTS ( + SELECT 1 + FROM db_cluster.server_images_linode_misc + WHERE + variant = $1 AND + public_ip IS NOT NULL + ) + ", + &image_variant, + ) + .await + .unwrap(); + + if exists { + break; + } + } +} diff --git a/svc/pkg/load-test/standalone/mm-sustain/README.md b/svc/pkg/load-test/standalone/mm-sustain/README.md new file mode 100644 index 0000000000..372000bf01 --- /dev/null +++ b/svc/pkg/load-test/standalone/mm-sustain/README.md @@ -0,0 +1,7 @@ +# mm-sustain + +This load test is meant to catch sporadic errors in the matchmaker that may not +show up during routine tests. +This works by spawning X workers in parallel that repeatedly create, connect +to, then destroys a lobby. It can be updated to pause on a lobby that has a +problem detected so it can be diagnosed manually. diff --git a/svc/pkg/mm-config/ops/version-prepare/Cargo.toml b/svc/pkg/mm-config/ops/version-prepare/Cargo.toml index 68c976b680..f88c053755 100644 --- a/svc/pkg/mm-config/ops/version-prepare/Cargo.toml +++ b/svc/pkg/mm-config/ops/version-prepare/Cargo.toml @@ -21,6 +21,10 @@ upload-get = { path = "../../../upload/ops/get" } region-get = { path = "../../../region/ops/get" } tier-list = { path = "../../../tier/ops/list" } +[dependencies.sqlx] +version = "0.7" +default-features = false + [dev-dependencies] chirp-worker = { path = "../../../../../lib/chirp/worker" } util-mm = { package = "rivet-util-mm", path = "../../../mm/util" } diff --git a/svc/pkg/mm-config/ops/version-prepare/src/lib.rs b/svc/pkg/mm-config/ops/version-prepare/src/lib.rs index dc9dc1bb22..41dcdacb79 100644 --- a/svc/pkg/mm-config/ops/version-prepare/src/lib.rs +++ b/svc/pkg/mm-config/ops/version-prepare/src/lib.rs @@ -1,11 +1,14 @@ -mod prewarm_ats; +use std::collections::hash_map::DefaultHasher; +use std::hash::Hasher; use proto::backend::{self, pkg::*}; use rivet_operation::prelude::*; -use std::collections::HashSet; +use std::collections::{HashMap, HashSet}; use crate::prewarm_ats::PrewarmAtsContext; +mod prewarm_ats; + #[operation(name = "mm-config-version-prepare")] async fn handle( ctx: OperationContext, @@ -16,7 +19,7 @@ async fn handle( // List of build paths that will be used to prewarm the ATS cache let mut prewarm_ctx = PrewarmAtsContext { region_ids: HashSet::new(), - paths: HashSet::new(), + paths: HashMap::new(), total_size: 0, }; @@ -86,7 +89,7 @@ async fn handle( }); } - crate::prewarm_ats::prewarm_ats_cache(ctx.chirp(), prewarm_ctx).await?; + crate::prewarm_ats::prewarm_ats_cache(&ctx, prewarm_ctx).await?; Ok(mm_config::version_prepare::Response { config_ctx: Some(backend::matchmaker::VersionConfigCtx { @@ -198,7 +201,13 @@ async fn validate_build( namespace = util::env::namespace(), file_name = util_build::file_name(build_kind, build_compression), ); - if prewarm_ctx.paths.insert(path.clone()) { + if !prewarm_ctx.paths.contains_key(&path) { + // Hash build id + let mut hasher = DefaultHasher::new(); + hasher.write(build_id.as_bytes()); + let build_id_hash = hasher.finish(); + + prewarm_ctx.paths.insert(path.clone(), build_id_hash); prewarm_ctx.total_size += upload.content_length; } } diff --git a/svc/pkg/mm-config/ops/version-prepare/src/prewarm_ats.rs b/svc/pkg/mm-config/ops/version-prepare/src/prewarm_ats.rs index cf8b7f62c5..0a82a0a81b 100644 --- a/svc/pkg/mm-config/ops/version-prepare/src/prewarm_ats.rs +++ b/svc/pkg/mm-config/ops/version-prepare/src/prewarm_ats.rs @@ -1,4 +1,4 @@ -use proto::backend::pkg::*; +use proto::backend::{self, pkg::*}; use rivet_operation::prelude::*; use serde_json::json; use std::collections::{HashMap, HashSet}; @@ -7,11 +7,17 @@ use std::collections::{HashMap, HashSet}; #[derive(Debug)] pub struct PrewarmAtsContext { pub region_ids: HashSet, - pub paths: HashSet, + pub paths: HashMap, #[allow(unused)] pub total_size: u64, } +#[derive(sqlx::FromRow)] +struct VlanIp { + datacenter_id: Uuid, + vlan_ip: String, +} + /// Requests resources from the ATS cache to make sure any subsequent requests will be faster. /// /// This is important for games that (a) don't have idle lobbies and need the lobbies to start @@ -29,7 +35,7 @@ pub struct PrewarmAtsContext { /// already be in the cache. #[tracing::instrument] pub async fn prewarm_ats_cache( - client: &chirp_client::Client, + ctx: &OperationContext, prewarm_ctx: PrewarmAtsContext, ) -> GlobalResult<()> { if prewarm_ctx.paths.is_empty() { @@ -38,21 +44,52 @@ pub async fn prewarm_ats_cache( let job_spec_json = serde_json::to_string(&gen_prewarm_job(prewarm_ctx.paths.len())?)?; + // Get all vlan ips + let vlan_ips = sql_fetch_all!( + [ctx, VlanIp] + " + SELECT + datacenter_id, vlan_ip + FROM db_cluster.servers + WHERE + datacenter_id = ANY($1) AND + pool_type = $2 AND + vlan_ip IS NOT NULL AND + cloud_destroy_ts IS NULL + ", + // NOTE: region_id is just the old name for datacenter_id + prewarm_ctx.region_ids.iter().cloned().collect::>(), + backend::cluster::PoolType::Ats as i64, + ) + .await?; + for region_id in prewarm_ctx.region_ids { + let mut vlan_ips_in_region = vlan_ips.iter().filter(|row| row.datacenter_id == region_id); + let vlan_ip_count = vlan_ips_in_region.clone().count() as i64; + + ensure!(vlan_ip_count != 0, "no ats servers found"); + // Pass artifact URLs to the job let parameters = prewarm_ctx .paths .iter() .enumerate() - .map(|(i, path)| job_run::msg::create::Parameter { - key: format!("artifact_url_{i}"), - value: format!("http://127.0.0.1:8080{path}"), + .map(|(i, (path, build_id_hash))| { + // NOTE: The algorithm here for deterministically choosing the vlan ip should match the one + // used in the SQL statement in mm-lobby-create @ resolve_image_artifact_url + let idx = (*build_id_hash as i64 % vlan_ip_count).abs() as usize; + let vlan_ip = &unwrap!(vlan_ips_in_region.nth(idx), "no vlan ip").vlan_ip; + + Ok(job_run::msg::create::Parameter { + key: format!("artifact_url_{i}"), + value: format!("http://{vlan_ip}:8080{path}"), + }) }) - .collect::>(); + .collect::>>()?; // Run the job and forget about it let run_id = Uuid::new_v4(); - msg!([client] job_run::msg::create(run_id) { + msg!([ctx] job_run::msg::create(run_id) { run_id: Some(run_id.into()), region_id: Some(region_id.into()), parameters: parameters, diff --git a/svc/pkg/mm/standalone/gc/src/lib.rs b/svc/pkg/mm/standalone/gc/src/lib.rs index 75b41b728a..cd728135a4 100644 --- a/svc/pkg/mm/standalone/gc/src/lib.rs +++ b/svc/pkg/mm/standalone/gc/src/lib.rs @@ -2,8 +2,21 @@ use proto::backend::pkg::*; use redis::AsyncCommands; use rivet_operation::prelude::*; -#[tracing::instrument] -pub async fn run_from_env(ts: i64, ctx: OperationContext<()>) -> GlobalResult<()> { +#[tracing::instrument(skip_all)] +pub async fn run_from_env(ts: i64, pools: rivet_pools::Pools) -> GlobalResult<()> { + let client = chirp_client::SharedClient::from_env(pools.clone())?.wrap_new("mm-gc"); + let cache = rivet_cache::CacheInner::from_env(pools.clone())?; + let ctx = OperationContext::new( + "mm-gc".into(), + std::time::Duration::from_secs(60), + rivet_connection::Connection::new(client, pools.clone(), cache.clone()), + Uuid::new_v4(), + Uuid::new_v4(), + ts, + ts, + (), + Vec::new(), + ); let redis_mm = ctx.redis_mm().await?; let mut return_err: Option = None; diff --git a/svc/pkg/mm/standalone/gc/src/main.rs b/svc/pkg/mm/standalone/gc/src/main.rs index 3545a69eab..b9ec1a992a 100644 --- a/svc/pkg/mm/standalone/gc/src/main.rs +++ b/svc/pkg/mm/standalone/gc/src/main.rs @@ -1,6 +1,7 @@ -use rivet_operation::prelude::*; use std::time::Duration; +use rivet_operation::prelude::*; + fn main() -> GlobalResult<()> { rivet_runtime::run(start()).unwrap() } @@ -9,7 +10,6 @@ async fn start() -> GlobalResult<()> { // TODO: Handle ctrl-c let pools = rivet_pools::from_env("mm-gc").await?; - let cache = rivet_cache::CacheInner::from_env(pools.clone())?; tokio::task::Builder::new() .name("mm_gc::health_checks") @@ -28,19 +28,6 @@ async fn start() -> GlobalResult<()> { interval.tick().await; let ts = util::timestamp::now(); - let client = chirp_client::SharedClient::from_env(pools.clone())?.wrap_new("mm-gc"); - let ctx = OperationContext::new( - "mm-gc".into(), - std::time::Duration::from_secs(60), - rivet_connection::Connection::new(client, pools.clone(), cache.clone()), - Uuid::new_v4(), - Uuid::new_v4(), - ts, - ts, - (), - Vec::new(), - ); - - mm_gc::run_from_env(ts, ctx).await?; + mm_gc::run_from_env(ts, pools.clone()).await?; } } diff --git a/svc/pkg/mm/standalone/gc/tests/integration.rs b/svc/pkg/mm/standalone/gc/tests/integration.rs index b73840584b..6bf6f06a18 100644 --- a/svc/pkg/mm/standalone/gc/tests/integration.rs +++ b/svc/pkg/mm/standalone/gc/tests/integration.rs @@ -30,7 +30,7 @@ async fn all() { } async fn remove_unready_lobbies(ctx: TestCtx) { - let _pools = rivet_pools::from_env("mm-gc-test").await.unwrap(); + let pools = rivet_pools::from_env("mm-gc-test").await.unwrap(); let lobby = op!([ctx] faker_mm_lobby { skip_set_ready: true, @@ -42,7 +42,7 @@ async fn remove_unready_lobbies(ctx: TestCtx) { // Check that it didn't remove lobbies it shouldn't { - run_from_env(util::timestamp::now(), ctx.op_ctx().base()) + run_from_env(util::timestamp::now(), pools.clone()) .await .unwrap(); tokio::time::sleep(Duration::from_secs(1)).await; @@ -70,7 +70,7 @@ async fn remove_unready_lobbies(ctx: TestCtx) { util::timestamp::now() + util_mm::consts::LOBBY_READY_TIMEOUT + util::duration::seconds(1), - ctx.op_ctx().base(), + pools.clone(), ) .await .unwrap(); @@ -90,7 +90,7 @@ async fn remove_unready_lobbies(ctx: TestCtx) { } async fn remove_unregistered_players(ctx: TestCtx) { - let _pools = rivet_pools::from_env("mm-gc-test").await.unwrap(); + let pools = rivet_pools::from_env("mm-gc-test").await.unwrap(); let lobby = op!([ctx] faker_mm_lobby { ..Default::default() @@ -121,7 +121,7 @@ async fn remove_unregistered_players(ctx: TestCtx) { // Check that it didn't remove players it shouldn't { - run_from_env(util::timestamp::now(), ctx.op_ctx().base()) + run_from_env(util::timestamp::now(), pools.clone()) .await .unwrap(); tokio::time::sleep(Duration::from_secs(1)).await; @@ -153,7 +153,7 @@ async fn remove_unregistered_players(ctx: TestCtx) { util::timestamp::now() + util_mm::consts::PLAYER_READY_TIMEOUT + util::duration::seconds(1), - ctx.op_ctx().base(), + pools.clone(), ) .await .unwrap(); @@ -176,7 +176,7 @@ async fn remove_unregistered_players(ctx: TestCtx) { } async fn remove_auto_remove_players(ctx: TestCtx) { - let _pools = rivet_pools::from_env("mm-gc-test").await.unwrap(); + let pools = rivet_pools::from_env("mm-gc-test").await.unwrap(); let lobby = op!([ctx] faker_mm_lobby { ..Default::default() @@ -214,7 +214,7 @@ async fn remove_auto_remove_players(ctx: TestCtx) { // Check that it didn't remove players it shouldn't { - run_from_env(util::timestamp::now(), ctx.op_ctx().base()) + run_from_env(util::timestamp::now(), pools.clone()) .await .unwrap(); tokio::time::sleep(Duration::from_secs(1)).await; @@ -223,7 +223,7 @@ async fn remove_auto_remove_players(ctx: TestCtx) { util::timestamp::now() + util_mm::consts::PLAYER_READY_TIMEOUT + util::duration::seconds(1), - ctx.op_ctx().base(), + pools.clone(), ) .await .unwrap(); @@ -256,7 +256,7 @@ async fn remove_auto_remove_players(ctx: TestCtx) { util::timestamp::now() + util_mm::consts::PLAYER_AUTO_REMOVE_TIMEOUT + util::duration::seconds(1), - ctx.op_ctx().base(), + pools.clone(), ) .await .unwrap(); diff --git a/svc/pkg/mm/types/msg/nomad-node-closed-set.proto b/svc/pkg/mm/types/msg/nomad-node-closed-set.proto new file mode 100644 index 0000000000..2479f42000 --- /dev/null +++ b/svc/pkg/mm/types/msg/nomad-node-closed-set.proto @@ -0,0 +1,15 @@ +syntax = "proto3"; + +package rivet.backend.pkg.mm.msg.nomad_node_closed_set; + +import "proto/common.proto"; + +/// name = "msg-mm-nomad-node-closed-set" +/// parameters = [ +/// { name = "nomad_node_id" }, +/// ] +message Message { + rivet.common.Uuid datacenter_id = 1; + string nomad_node_id = 2; + bool is_closed = 3; +} diff --git a/svc/pkg/mm/util/src/key.rs b/svc/pkg/mm/util/src/key.rs index ef03e569af..d63055425c 100644 --- a/svc/pkg/mm/util/src/key.rs +++ b/svc/pkg/mm/util/src/key.rs @@ -62,6 +62,8 @@ pub mod lobby_config { pub is_custom: bool, #[serde(rename = "st", skip_serializing_if = "Option::is_none")] pub state_json: Option, + #[serde(rename = "nc")] + pub is_node_closed: bool, } pub const NAMESPACE_ID: &str = "ns"; @@ -75,6 +77,7 @@ pub mod lobby_config { pub const IS_CLOSED: &str = "c"; pub const IS_CUSTOM: &str = "cu"; pub const STATE_JSON: &str = "st"; + pub const IS_NODE_CLOSED: &str = "nc"; } /// HASH @@ -203,6 +206,16 @@ pub fn player_auto_remove() -> String { "{global}:mm:player:auto_remove".to_string() } +/// is closed +pub fn node_is_closed( + node_id: &str, +) -> String { + format!( + "{{global}}:mm:node:{}:is_closed", + node_id, + ) +} + // Placeholder key pub fn empty() -> String { "{global}".to_string() diff --git a/svc/pkg/mm/util/src/verification.rs b/svc/pkg/mm/util/src/verification.rs index 1f9a7a7d28..54924b103c 100644 --- a/svc/pkg/mm/util/src/verification.rs +++ b/svc/pkg/mm/util/src/verification.rs @@ -317,7 +317,8 @@ pub async fn verify_config( .await?; let (latitude, longitude) = ip_res .ip_info - .map(|ip_info| (ip_info.latitude, ip_info.longitude)) + .and_then(|ip_info| ip_info.coords) + .map(|coords| (coords.latitude, coords.longitude)) .unzip(); GlobalResult::Ok(( diff --git a/svc/pkg/mm/worker/Cargo.toml b/svc/pkg/mm/worker/Cargo.toml index b3a9b4d705..8eebbaad0f 100644 --- a/svc/pkg/mm/worker/Cargo.toml +++ b/svc/pkg/mm/worker/Cargo.toml @@ -26,7 +26,7 @@ s3-util = { path = "../../../../lib/s3-util" } serde = { version = "1.0", features = ["derive"] } util-build = { package = "rivet-util-build", path = "../../build/util" } util-job = { package = "rivet-util-job", path = "../../job/util" } -util-mm = { package = "rivet-util-mm", path = "../../mm/util" } +util-mm = { package = "rivet-util-mm", path = "../util" } build-get = { path = "../../build/ops/get" } game-get = { path = "../../game/ops/get" } diff --git a/svc/pkg/mm/worker/redis-scripts/lobby_find.lua b/svc/pkg/mm/worker/redis-scripts/lobby_find.lua index 5fb396aeba..d74590e978 100644 --- a/svc/pkg/mm/worker/redis-scripts/lobby_find.lua +++ b/svc/pkg/mm/worker/redis-scripts/lobby_find.lua @@ -32,11 +32,14 @@ if query.kind.direct ~= nil then return { 'err', 'LOBBY_NOT_FOUND' } end - -- Check lobby is not closed - -- local is_closed = redis.call('HGET', key_direct_lobby_config, 'c') == '1' - -- if is_closed then - -- return {'err', 'LOBBY_CLOSED'} - -- end + -- Check that lobby and node are not closed + local config_keys = redis.call('HMGET', key_direct_lobby_config, 'c', 'nc') + local is_closed = config_keys[1] == '1' + local is_node_closed = config_keys[2] == '1' + + if is_closed or is_node_closed then + return {'err', 'LOBBY_CLOSED'} + end -- Get max player count local max_player_count = nil diff --git a/svc/pkg/mm/worker/redis-scripts/nomad_node_closed_set.lua b/svc/pkg/mm/worker/redis-scripts/nomad_node_closed_set.lua new file mode 100644 index 0000000000..e5a0a51117 --- /dev/null +++ b/svc/pkg/mm/worker/redis-scripts/nomad_node_closed_set.lua @@ -0,0 +1,25 @@ +local lobby_count = ARGV[1] + +for i=1,lobby_count do + local real_i = i - 1 + local lobby_id = ARGV[real_i * 3 + 2] + local max_players_normal = tonumber(ARGV[real_i * 3 + 3]) + local max_players_party = tonumber(ARGV[real_i * 3 + 4]) + + local key_lobby_config = KEYS[real_i * 4 + 2] + + redis.call('HSET', key_lobby_config, 'nc', 0) + + local is_closed = redis.call('HGET', key_lobby_config, 'c') + + -- Don't modify closed lobbies + if is_closed ~= '1' then + local key_lobby_player_ids = KEYS[real_i * 4 + 3] + local key_lobby_available_spots_normal = KEYS[real_i * 4 + 4] + local key_lobby_available_spots_party = KEYS[real_i * 4 + 5] + + local player_count = redis.call('ZCARD', key_lobby_player_ids) + redis.call('ZADD', key_lobby_available_spots_normal, max_players_normal - player_count, lobby_id) + redis.call('ZADD', key_lobby_available_spots_party, max_players_party - player_count, lobby_id) + end +end diff --git a/svc/pkg/mm/worker/src/workers/lobby_create/mod.rs b/svc/pkg/mm/worker/src/workers/lobby_create/mod.rs index 6c874975e7..e2c891f8f4 100644 --- a/svc/pkg/mm/worker/src/workers/lobby_create/mod.rs +++ b/svc/pkg/mm/worker/src/workers/lobby_create/mod.rs @@ -1,8 +1,11 @@ +use std::collections::hash_map::DefaultHasher; +use std::hash::Hasher; +use std::ops::Deref; + use chirp_worker::prelude::*; use proto::backend::{self, pkg::*}; use redis::AsyncCommands; use serde_json::json; -use std::ops::Deref; mod nomad_job; mod oci_config; @@ -203,6 +206,7 @@ async fn worker(ctx: &OperationContext) -> Globa ready_ts: None, is_custom: ctx.is_custom, state_json: None, + is_node_closed: false, })?) .arg(ctx.ts() + util_mm::consts::LOBBY_READY_TIMEOUT) .key(key::lobby_config(lobby_id)) @@ -370,9 +374,7 @@ async fn fetch_version( }) .await?; - let version = unwrap_ref!(get_res.versions.first(), "version not found") - .deref() - .clone(); + let version = unwrap!(get_res.versions.first(), "version not found").clone(); Ok(version) } @@ -569,7 +571,7 @@ async fn update_db( opts.creator_user_id, opts.is_custom, opts.publicity - .unwrap_or(backend::matchmaker::lobby::Publicity::Public) as i32 as i64, + .unwrap_or(backend::matchmaker::lobby::Publicity::Public) as i64, ) .await?; @@ -639,13 +641,13 @@ async fn create_docker_job( && port.port_range.is_none() }) .flat_map(|port| { - let mut ports = vec![direct_proxied_port(lobby_id, region, port)]; + let mut ports = vec![direct_proxied_port(lobby_id, region_id, port)]; match backend::matchmaker::lobby_runtime::ProxyProtocol::from_i32(port.proxy_protocol) { Some( backend::matchmaker::lobby_runtime::ProxyProtocol::Http | backend::matchmaker::lobby_runtime::ProxyProtocol::Https, ) => { - ports.push(path_proxied_port(lobby_id, region, port)); + ports.push(path_proxied_port(lobby_id, region_id, port)); } Some( backend::matchmaker::lobby_runtime::ProxyProtocol::Udp @@ -795,10 +797,10 @@ async fn resolve_image_artifact_url( let build_compression = unwrap!(backend::build::BuildCompression::from_i32( build.compression )); - let upload_id_proto = unwrap_ref!(build.upload_id); + let upload_id_proto = unwrap!(build.upload_id); let upload_res = op!([ctx] upload_get { - upload_ids: vec![*upload_id_proto], + upload_ids: vec![upload_id_proto], }) .await?; let upload = unwrap!(upload_res.uploads.first()); @@ -817,11 +819,11 @@ async fn resolve_image_artifact_url( let file_name = util_build::file_name(build_kind, build_compression); let mm_lobby_delivery_method = unwrap!( - std::env::var("RIVET_DS_BUILD_DELIVERY_METHOD").ok(), - "missing RIVET_DS_BUILD_DELIVERY_METHOD" + backend::cluster::BuildDeliveryMethod::from_i32(region.build_delivery_method), + "invalid datacenter build delivery method" ); - match mm_lobby_delivery_method.as_str() { - "s3_direct" => { + match mm_lobby_delivery_method { + backend::cluster::BuildDeliveryMethod::S3Direct => { tracing::info!("using s3 direct delivery"); let bucket = "bucket-build"; @@ -850,12 +852,49 @@ async fn resolve_image_artifact_url( Ok(addr_str) } - "traffic_server" => { + backend::cluster::BuildDeliveryMethod::TrafficServer => { tracing::info!("using traffic server delivery"); + let region_id = unwrap_ref!(region.region_id).as_uuid(); + + // Hash build id + let build_id = unwrap_ref!(build.build_id).as_uuid(); + let mut hasher = DefaultHasher::new(); + hasher.write(build_id.as_bytes()); + let hash = hasher.finish() as i64; + + // Get vlan ip from build id hash for consistent routing + let (ats_vlan_ip,) = sql_fetch_one!( + [ctx, (String,)] + " + WITH sel AS ( + -- Select candidate vlan ips + SELECT + vlan_ip + FROM db_cluster.servers + WHERE + datacenter_id = $1 AND + pool_type = $2 AND + vlan_ip IS NOT NULL AND + cloud_destroy_ts IS NULL + ) + SELECT vlan_ip + FROM sel + -- Use mod to make sure the hash stays within bounds + OFFSET abs($3 % (SELECT COUNT(*) from sel)) + LIMIT 1 + ", + // NOTE: region_id is just the old name for datacenter_id + ®ion_id, + backend::cluster::PoolType::Ats as i64, + hash, + ) + .await?; + let upload_id = unwrap_ref!(upload.upload_id).as_uuid(); let addr = format!( - "http://127.0.0.1:8080/s3-cache/{provider}/{namespace}-bucket-build/{upload_id}/{file_name}", + "http://{vlan_ip}:8080/s3-cache/{provider}/{namespace}-bucket-build/{upload_id}/{file_name}", + vlan_ip = ats_vlan_ip, provider = heck::KebabCase::to_kebab_case(provider.as_str()), namespace = util::env::namespace(), upload_id = upload_id, @@ -865,15 +904,12 @@ async fn resolve_image_artifact_url( Ok(addr) } - _ => { - bail!("invalid RIVET_DS_BUILD_DELIVERY_METHOD") - } } } fn direct_proxied_port( lobby_id: Uuid, - region: &backend::region::Region, + region_id: Uuid, port: &backend::matchmaker::lobby_runtime::Port, ) -> GlobalResult { Ok(backend::job::ProxiedPortConfig { @@ -885,7 +921,7 @@ fn direct_proxied_port( "{}-{}.lobby.{}.{}", lobby_id, port.label, - region.name_id, + region_id, unwrap!(util::env::domain_job()), )], proxy_protocol: job_proxy_protocol(port.proxy_protocol)? as i32, @@ -895,7 +931,7 @@ fn direct_proxied_port( fn path_proxied_port( lobby_id: Uuid, - region: &backend::region::Region, + region_id: Uuid, port: &backend::matchmaker::lobby_runtime::Port, ) -> GlobalResult { Ok(backend::job::ProxiedPortConfig { @@ -906,7 +942,7 @@ fn path_proxied_port( // TODO: Not just for hostnames anymore, change name? ingress_hostnames: vec![format!( "lobby.{}.{}/{}-{}", - region.name_id, + region_id, unwrap!(util::env::domain_job()), lobby_id, port.label, diff --git a/svc/pkg/mm/worker/src/workers/lobby_find/find.rs b/svc/pkg/mm/worker/src/workers/lobby_find/find.rs index 4c9ea73570..2a00c795aa 100644 --- a/svc/pkg/mm/worker/src/workers/lobby_find/find.rs +++ b/svc/pkg/mm/worker/src/workers/lobby_find/find.rs @@ -203,6 +203,7 @@ pub async fn find( is_closed: false, is_custom: false, state_json: None, + is_node_closed: false, }, ready_expire_ts: ctx.ts() + util_mm::consts::LOBBY_READY_TIMEOUT, }) diff --git a/svc/pkg/mm/worker/src/workers/mod.rs b/svc/pkg/mm/worker/src/workers/mod.rs index 6ec09b02c2..05eb13a3a3 100644 --- a/svc/pkg/mm/worker/src/workers/mod.rs +++ b/svc/pkg/mm/worker/src/workers/mod.rs @@ -11,10 +11,12 @@ mod lobby_job_run_cleanup; mod lobby_ready_set; mod lobby_state_set; mod lobby_stop; +mod nomad_node_closed_set; mod player_register; mod player_remove; chirp_worker::workers![ + nomad_node_closed_set, lobby_cleanup, lobby_closed_set, lobby_create, diff --git a/svc/pkg/mm/worker/src/workers/nomad_node_closed_set.rs b/svc/pkg/mm/worker/src/workers/nomad_node_closed_set.rs new file mode 100644 index 0000000000..e7880ae4ed --- /dev/null +++ b/svc/pkg/mm/worker/src/workers/nomad_node_closed_set.rs @@ -0,0 +1,99 @@ +use chirp_worker::prelude::*; +use proto::backend::pkg::*; + +lazy_static::lazy_static! { + static ref REDIS_SCRIPT: redis::Script = redis::Script::new(include_str!("../../redis-scripts/nomad_node_closed_set.lua")); +} + +#[derive(Debug, sqlx::FromRow)] +struct LobbyRow { + lobby_id: Uuid, + namespace_id: Uuid, + lobby_group_id: Uuid, + max_players_normal: i64, + max_players_party: i64, +} + +#[worker(name = "mm-nomad-node-closed-set")] +async fn worker( + ctx: &OperationContext, +) -> GlobalResult<()> { + let datacenter_id = unwrap_ref!(ctx.datacenter_id).as_uuid(); + + // Select all lobbies in the node + let lobby_rows = sql_fetch_all!( + [ctx, LobbyRow] + " + UPDATE db_mm_state.lobbies AS l + SET is_closed = $2 + FROM db_job_state.run_meta_nomad AS n + WHERE + l.run_id = n.run_id AND + n.node_id = $1 + RETURNING + lobby_id, namespace_id, lobby_group_id, max_players_normal, max_players_party + ", + &ctx.nomad_node_id, + ctx.is_closed, + ) + .await?; + + // Update matchmaking index + if ctx.is_closed { + let mut pipe = redis::pipe(); + pipe.atomic(); + + for lobby in lobby_rows { + pipe.zrem( + util_mm::key::lobby_available_spots( + lobby.namespace_id, + datacenter_id, + lobby.lobby_group_id, + util_mm::JoinKind::Normal, + ), + lobby.lobby_id.to_string(), + ) + .zrem( + util_mm::key::lobby_available_spots( + lobby.namespace_id, + datacenter_id, + lobby.lobby_group_id, + util_mm::JoinKind::Party, + ), + lobby.lobby_id.to_string(), + ) + .hset(util_mm::key::lobby_config(lobby.lobby_id), "nc", 1); + } + + pipe.query_async(&mut ctx.redis_mm().await?).await?; + } else { + let mut script = REDIS_SCRIPT.prepare_invoke(); + + script.arg(lobby_rows.len()); + + for lobby in lobby_rows { + script + .key(util_mm::key::lobby_config(lobby.lobby_id)) + .key(util_mm::key::lobby_player_ids(lobby.lobby_id)) + .key(util_mm::key::lobby_available_spots( + lobby.namespace_id, + datacenter_id, + lobby.lobby_group_id, + util_mm::JoinKind::Normal, + )) + .key(util_mm::key::lobby_available_spots( + lobby.namespace_id, + datacenter_id, + lobby.lobby_group_id, + util_mm::JoinKind::Party, + )) + .arg(lobby.lobby_id.to_string()) + .arg(lobby.max_players_normal) + .arg(lobby.max_players_party); + } + + script.invoke_async(&mut ctx.redis_mm().await?).await?; + } + + Ok(()) +} diff --git a/svc/pkg/mm/worker/tests/lobby_connectivity.rs b/svc/pkg/mm/worker/tests/lobby_connectivity.rs index 66c1e1692c..a85a5ffdc2 100644 --- a/svc/pkg/mm/worker/tests/lobby_connectivity.rs +++ b/svc/pkg/mm/worker/tests/lobby_connectivity.rs @@ -2,14 +2,14 @@ mod common; use chirp_worker::prelude::*; use common::*; -use proto::backend::{self}; +use proto::backend; use std::{ io::{BufRead, BufReader, Write}, net::{TcpStream, UdpSocket}, }; #[worker_test] -async fn lobby_connectivity_http(ctx: TestCtx) { +async fn lobby_connectivity_http_normal(ctx: TestCtx) { if !util::feature::job_run() { return; } @@ -202,7 +202,7 @@ async fn lobby_connectivity_udp(ctx: TestCtx) { let recv_len = socket.recv(&mut response).unwrap(); assert_eq!( - random_body.as_ref(), + random_body.as_bytes(), &response[..recv_len], "echoed wrong response" ); @@ -236,7 +236,7 @@ async fn lobby_connectivity_udp_host(ctx: TestCtx) { let recv_len = socket.recv(&mut response).unwrap(); assert_eq!( - random_body.as_ref(), + random_body.as_bytes(), &response[..recv_len], "echoed wrong response" ); @@ -255,7 +255,7 @@ async fn lobby_connectivity_udp_host(ctx: TestCtx) { let recv_len = socket.recv(&mut response).unwrap(); assert_eq!( - random_body.as_ref(), + random_body.as_bytes(), &response[..recv_len], "echoed wrong response" ); diff --git a/svc/pkg/mm/worker/tests/lobby_create.rs b/svc/pkg/mm/worker/tests/lobby_create.rs index 6aec813b6a..160853cdca 100644 --- a/svc/pkg/mm/worker/tests/lobby_create.rs +++ b/svc/pkg/mm/worker/tests/lobby_create.rs @@ -2,188 +2,8 @@ use chirp_worker::prelude::*; use proto::backend::{self, pkg::*}; use std::collections::HashMap; -struct Setup { - namespace_id: Uuid, - lobby_group_id: Uuid, - region_id: Uuid, -} - -async fn setup(ctx: &TestCtx) -> Setup { - let region_res = op!([ctx] faker_region {}).await.unwrap(); - let region_id = region_res.region_id.as_ref().unwrap().as_uuid(); - - let game_res = op!([ctx] faker_game { - ..Default::default() - }) - .await - .unwrap(); - let namespace_id = game_res.namespace_ids.first().unwrap().clone().as_uuid(); - - let build_res = op!([ctx] faker_build { - game_id: game_res.game_id, - image: backend::faker::Image::MmLobbyAutoReady as i32, - }) - .await - .unwrap(); - - let game_version_res = op!([ctx] faker_game_version { - game_id: game_res.game_id, - override_lobby_groups: Some(faker::game_version::request::OverrideLobbyGroups { - lobby_groups: vec![backend::matchmaker::LobbyGroup { - name_id: "test-1".into(), - - regions: vec![backend::matchmaker::lobby_group::Region { - region_id: Some(region_id.into()), - tier_name_id: util_mm::test::TIER_NAME_ID.to_owned(), - idle_lobbies: Some(backend::matchmaker::lobby_group::IdleLobbies { - min_idle_lobbies: 0, - // Don't auto-destroy lobbies from tests - max_idle_lobbies: 32, - }), - }], - max_players_normal: 8, - max_players_direct: 10, - max_players_party: 12, - listable: true, - taggable: false, - allow_dynamic_max_players: false, - - runtime: Some(backend::matchmaker::lobby_runtime::Docker { - build_id: build_res.build_id, - args: Vec::new(), - env_vars: vec![backend::matchmaker::lobby_runtime::EnvVar { - key: "HELLO".into(), - value: "world".into(), - }], - network_mode: backend::matchmaker::lobby_runtime::NetworkMode::Bridge as i32, - ports: vec![ - backend::matchmaker::lobby_runtime::Port { - label: "1234".into(), - target_port: Some(1234), - port_range: None, - proxy_protocol: backend::matchmaker::lobby_runtime::ProxyProtocol::Http as i32, - proxy_kind: backend::matchmaker::lobby_runtime::ProxyKind::GameGuard as i32, - }, - backend::matchmaker::lobby_runtime::Port { - label: "1235".into(), - target_port: Some(1235), - port_range: None, - proxy_protocol: backend::matchmaker::lobby_runtime::ProxyProtocol::Https as i32, - proxy_kind: backend::matchmaker::lobby_runtime::ProxyKind::GameGuard as i32, - }, - backend::matchmaker::lobby_runtime::Port { - label: "1236".into(), - target_port: Some(1236), - port_range: None, - proxy_protocol: backend::matchmaker::lobby_runtime::ProxyProtocol::Tcp as i32, - proxy_kind: backend::matchmaker::lobby_runtime::ProxyKind::GameGuard as i32, - }, - backend::matchmaker::lobby_runtime::Port { - label: "1237".into(), - target_port: Some(1237), - port_range: None, - proxy_protocol: backend::matchmaker::lobby_runtime::ProxyProtocol::TcpTls as i32, - proxy_kind: backend::matchmaker::lobby_runtime::ProxyKind::GameGuard as i32, - }, - ], - - }.into()), - - actions: None, - }, - backend::matchmaker::LobbyGroup { - name_id: "test-2".into(), - - regions: vec![backend::matchmaker::lobby_group::Region { - region_id: Some(region_id.into()), - tier_name_id: util_mm::test::TIER_NAME_ID.to_owned(), - idle_lobbies: Some(backend::matchmaker::lobby_group::IdleLobbies { - min_idle_lobbies: 0, - // See above - max_idle_lobbies: 32, - }), - }], - max_players_normal: 8, - max_players_direct: 10, - max_players_party: 12, - listable: true, - taggable: false, - allow_dynamic_max_players: false, - - runtime: Some(backend::matchmaker::lobby_runtime::Docker { - build_id: build_res.build_id, - args: Vec::new(), - env_vars: vec![backend::matchmaker::lobby_runtime::EnvVar { - key: "HELLO".into(), - value: "world".into(), - }], - network_mode: backend::matchmaker::lobby_runtime::NetworkMode::Host as i32, - ports: vec![ - backend::matchmaker::lobby_runtime::Port { - label: "1234".into(), - target_port: Some(1234), - port_range: None, - proxy_protocol: backend::matchmaker::lobby_runtime::ProxyProtocol::Http as i32, - proxy_kind: backend::matchmaker::lobby_runtime::ProxyKind::GameGuard as i32, - }, - backend::matchmaker::lobby_runtime::Port { - label: "26000-27000".into(), - target_port: None, - port_range: Some(backend::matchmaker::lobby_runtime::PortRange { - min: 26000, - max: 27000, - }), - proxy_protocol: backend::matchmaker::lobby_runtime::ProxyProtocol::Udp as i32, - proxy_kind: backend::matchmaker::lobby_runtime::ProxyKind::None as i32, - }, - ], - - }.into()), - - actions: None, - }], - }), - ..Default::default() - }) - .await - .unwrap(); - - let version_get_res = op!([ctx] mm_config_version_get { - version_ids: vec![game_version_res.version_id.unwrap()], - }) - .await - .unwrap(); - let lobby_group_id = version_get_res - .versions - .first() - .unwrap() - .config_meta - .as_ref() - .unwrap() - .lobby_groups - .first() - .unwrap() - .lobby_group_id - .as_ref() - .unwrap() - .as_uuid(); - - op!([ctx] game_namespace_version_set { - namespace_id: Some(namespace_id.into()), - version_id: game_version_res.version_id, - }) - .await - .unwrap(); - - Setup { - namespace_id, - lobby_group_id, - region_id, - } -} - #[worker_test] -async fn lobby_create(ctx: TestCtx) { +async fn single_lobby_create(ctx: TestCtx) { if !util::feature::job_run() { return; } @@ -266,7 +86,7 @@ async fn custom_private_lobby_create(ctx: TestCtx) { assert!(is_custom); assert_eq!( - backend::matchmaker::lobby::Publicity::Private as i32 as i64, + backend::matchmaker::lobby::Publicity::Private as i64, publicity ); } @@ -457,3 +277,183 @@ async fn lobby_create_reuse_job_id(ctx: TestCtx) { // tokio::time::sleep(std::time::Duration::from_secs(1)).await; // } // } + +struct Setup { + namespace_id: Uuid, + lobby_group_id: Uuid, + region_id: Uuid, +} + +async fn setup(ctx: &TestCtx) -> Setup { + let region_res = op!([ctx] faker_region {}).await.unwrap(); + let region_id = region_res.region_id.as_ref().unwrap().as_uuid(); + + let game_res = op!([ctx] faker_game { + ..Default::default() + }) + .await + .unwrap(); + let namespace_id = game_res.namespace_ids.first().unwrap().clone().as_uuid(); + + let build_res = op!([ctx] faker_build { + game_id: game_res.game_id, + image: backend::faker::Image::MmLobbyAutoReady as i32, + }) + .await + .unwrap(); + + let game_version_res = op!([ctx] faker_game_version { + game_id: game_res.game_id, + override_lobby_groups: Some(faker::game_version::request::OverrideLobbyGroups { + lobby_groups: vec![backend::matchmaker::LobbyGroup { + name_id: "test-1".into(), + + regions: vec![backend::matchmaker::lobby_group::Region { + region_id: Some(region_id.into()), + tier_name_id: util_mm::test::TIER_NAME_ID.to_owned(), + idle_lobbies: Some(backend::matchmaker::lobby_group::IdleLobbies { + min_idle_lobbies: 0, + // Don't auto-destroy lobbies from tests + max_idle_lobbies: 32, + }), + }], + max_players_normal: 8, + max_players_direct: 10, + max_players_party: 12, + listable: true, + taggable: false, + allow_dynamic_max_players: false, + + runtime: Some(backend::matchmaker::lobby_runtime::Docker { + build_id: build_res.build_id, + args: Vec::new(), + env_vars: vec![backend::matchmaker::lobby_runtime::EnvVar { + key: "HELLO".into(), + value: "world".into(), + }], + network_mode: backend::matchmaker::lobby_runtime::NetworkMode::Bridge as i32, + ports: vec![ + backend::matchmaker::lobby_runtime::Port { + label: "1234".into(), + target_port: Some(1234), + port_range: None, + proxy_protocol: backend::matchmaker::lobby_runtime::ProxyProtocol::Http as i32, + proxy_kind: backend::matchmaker::lobby_runtime::ProxyKind::GameGuard as i32, + }, + backend::matchmaker::lobby_runtime::Port { + label: "1235".into(), + target_port: Some(1235), + port_range: None, + proxy_protocol: backend::matchmaker::lobby_runtime::ProxyProtocol::Https as i32, + proxy_kind: backend::matchmaker::lobby_runtime::ProxyKind::GameGuard as i32, + }, + backend::matchmaker::lobby_runtime::Port { + label: "1236".into(), + target_port: Some(1236), + port_range: None, + proxy_protocol: backend::matchmaker::lobby_runtime::ProxyProtocol::Tcp as i32, + proxy_kind: backend::matchmaker::lobby_runtime::ProxyKind::GameGuard as i32, + }, + backend::matchmaker::lobby_runtime::Port { + label: "1237".into(), + target_port: Some(1237), + port_range: None, + proxy_protocol: backend::matchmaker::lobby_runtime::ProxyProtocol::TcpTls as i32, + proxy_kind: backend::matchmaker::lobby_runtime::ProxyKind::GameGuard as i32, + }, + ], + + }.into()), + + actions: None, + }, + backend::matchmaker::LobbyGroup { + name_id: "test-2".into(), + + regions: vec![backend::matchmaker::lobby_group::Region { + region_id: Some(region_id.into()), + tier_name_id: util_mm::test::TIER_NAME_ID.to_owned(), + idle_lobbies: Some(backend::matchmaker::lobby_group::IdleLobbies { + min_idle_lobbies: 0, + // See above + max_idle_lobbies: 32, + }), + }], + max_players_normal: 8, + max_players_direct: 10, + max_players_party: 12, + listable: true, + taggable: false, + allow_dynamic_max_players: false, + + runtime: Some(backend::matchmaker::lobby_runtime::Docker { + build_id: build_res.build_id, + args: Vec::new(), + env_vars: vec![backend::matchmaker::lobby_runtime::EnvVar { + key: "HELLO".into(), + value: "world".into(), + }], + network_mode: backend::matchmaker::lobby_runtime::NetworkMode::Host as i32, + ports: vec![ + backend::matchmaker::lobby_runtime::Port { + label: "1234".into(), + target_port: Some(1234), + port_range: None, + proxy_protocol: backend::matchmaker::lobby_runtime::ProxyProtocol::Http as i32, + proxy_kind: backend::matchmaker::lobby_runtime::ProxyKind::GameGuard as i32, + }, + backend::matchmaker::lobby_runtime::Port { + label: "26000-27000".into(), + target_port: None, + port_range: Some(backend::matchmaker::lobby_runtime::PortRange { + min: 26000, + max: 27000, + }), + proxy_protocol: backend::matchmaker::lobby_runtime::ProxyProtocol::Udp as i32, + proxy_kind: backend::matchmaker::lobby_runtime::ProxyKind::None as i32, + }, + ], + + }.into()), + + actions: None, + }], + }), + ..Default::default() + }) + .await + .unwrap(); + + let version_get_res = op!([ctx] mm_config_version_get { + version_ids: vec![game_version_res.version_id.unwrap()], + }) + .await + .unwrap(); + let lobby_group_id = version_get_res + .versions + .first() + .unwrap() + .config_meta + .as_ref() + .unwrap() + .lobby_groups + .first() + .unwrap() + .lobby_group_id + .as_ref() + .unwrap() + .as_uuid(); + + op!([ctx] game_namespace_version_set { + namespace_id: Some(namespace_id.into()), + version_id: game_version_res.version_id, + }) + .await + .unwrap(); + + Setup { + namespace_id, + lobby_group_id, + region_id, + } +} diff --git a/svc/pkg/mm/worker/tests/nomad_node_closed_set.rs b/svc/pkg/mm/worker/tests/nomad_node_closed_set.rs new file mode 100644 index 0000000000..763ead1745 --- /dev/null +++ b/svc/pkg/mm/worker/tests/nomad_node_closed_set.rs @@ -0,0 +1,13 @@ +use chirp_worker::prelude::*; +use proto::backend::pkg::*; + +#[worker_test] +async fn nomad_node_closed_set(ctx: TestCtx) { + // msg!([ctx] mm::msg::nomad_node_closed_set(nomad_node_id) { + + // }) + // .await + // .unwrap(); + + todo!(); +} diff --git a/svc/pkg/module/ops/game-version-publish/Cargo.toml b/svc/pkg/module/ops/game-version-publish/Cargo.toml index fdb0b7f12e..4424e8623b 100644 --- a/svc/pkg/module/ops/game-version-publish/Cargo.toml +++ b/svc/pkg/module/ops/game-version-publish/Cargo.toml @@ -9,7 +9,6 @@ license = "Apache-2.0" rivet-operation = { path = "../../../../../lib/operation/core" } chirp-client = { path = "../../../../../lib/chirp/client" } prost = "0.10" -unzip-n = "0.1.2" itertools = "0.10.5" [dependencies.sqlx] diff --git a/svc/pkg/module/worker/src/workers/instance_create.rs b/svc/pkg/module/worker/src/workers/instance_create.rs index 91fd45de0d..5eb26f01df 100644 --- a/svc/pkg/module/worker/src/workers/instance_create.rs +++ b/svc/pkg/module/worker/src/workers/instance_create.rs @@ -100,7 +100,7 @@ async fn worker( std::env::var("FLY_ORGANIZATION_ID"), std::env::var("FLY_REGION"), ) else { - bail!("fly not enabled") + bail!("fly not enabled"); }; let fly_auth_token = util::env::read_secret(&["fly", "auth_token"]).await?; diff --git a/svc/pkg/monolith/standalone/worker/Cargo.toml b/svc/pkg/monolith/standalone/worker/Cargo.toml index b357111d5d..b0d6e709d5 100644 --- a/svc/pkg/monolith/standalone/worker/Cargo.toml +++ b/svc/pkg/monolith/standalone/worker/Cargo.toml @@ -23,11 +23,13 @@ tracing-subscriber = { version = "0.3", default-features = false, features = [ cdn-worker = { path = "../../../cdn/worker" } cf-custom-hostname-worker = { path = "../../../cf-custom-hostname/worker" } cloud-worker = { path = "../../../cloud/worker" } +cluster-worker = { path = "../../../cluster/worker" } external-worker = { path = "../../../external/worker" } game-user-worker = { path = "../../../game-user/worker" } job-log-worker = { path = "../../../job-log/worker" } job-run-worker = { path = "../../../job-run/worker" } kv-worker = { path = "../../../kv/worker" } +linode-worker = { path = "../../../linode/worker" } mm-worker = { path = "../../../mm/worker" } module-worker = { path = "../../../module/worker" } push-notification-worker = { path = "../../../push-notification/worker" } diff --git a/svc/pkg/monolith/standalone/worker/src/lib.rs b/svc/pkg/monolith/standalone/worker/src/lib.rs index 008ec7ee70..68a61fe410 100644 --- a/svc/pkg/monolith/standalone/worker/src/lib.rs +++ b/svc/pkg/monolith/standalone/worker/src/lib.rs @@ -25,11 +25,13 @@ pub async fn run_from_env(pools: rivet_pools::Pools) -> GlobalResult<()> { cdn_worker, cf_custom_hostname_worker, cloud_worker, + cluster_worker, external_worker, game_user_worker, job_log_worker, job_run_worker, kv_worker, + linode_worker, mm_worker, module_worker, push_notification_worker, diff --git a/svc/pkg/job-run/standalone/nomad-monitor/Cargo.toml b/svc/pkg/nomad/standalone/monitor/Cargo.toml similarity index 86% rename from svc/pkg/job-run/standalone/nomad-monitor/Cargo.toml rename to svc/pkg/nomad/standalone/monitor/Cargo.toml index eec41f7efe..9f10765849 100644 --- a/svc/pkg/job-run/standalone/nomad-monitor/Cargo.toml +++ b/svc/pkg/nomad/standalone/monitor/Cargo.toml @@ -1,5 +1,5 @@ [package] -name = "job-run-nomad-monitor" +name = "nomad-monitor" version = "0.0.1" edition = "2021" authors = ["Rivet Gaming, LLC "] @@ -11,7 +11,6 @@ chrono = "0.4" futures-util = "0.3" indoc = "1.0" lazy_static = "1.4" -nomad-client = "0.0.9" nomad-util = { path = "../../../../../lib/nomad-util" } prost = "0.10" rivet-connection = { path = "../../../../../lib/connection" } @@ -31,5 +30,9 @@ tracing-subscriber = { version = "0.3", default-features = false, features = [ ] } util-job = { package = "rivet-util-job", path = "../../../job/util" } +[dependencies.nomad_client] +git = "https://github.com/rivet-gg/nomad-client" +rev = "abb66bf0c30c7ff5b0c695dae952481c33e538b5" # pragma: allowlist secret + [dev-dependencies] chirp-worker = { path = "../../../../../lib/chirp/worker" } diff --git a/svc/pkg/nomad/standalone/monitor/README.md b/svc/pkg/nomad/standalone/monitor/README.md new file mode 100644 index 0000000000..a322b271e0 --- /dev/null +++ b/svc/pkg/nomad/standalone/monitor/README.md @@ -0,0 +1 @@ +# nomad-monitor diff --git a/svc/pkg/job-run/standalone/nomad-monitor/Service.toml b/svc/pkg/nomad/standalone/monitor/Service.toml similarity index 80% rename from svc/pkg/job-run/standalone/nomad-monitor/Service.toml rename to svc/pkg/nomad/standalone/monitor/Service.toml index d1cf4245ac..7ec28d3a2d 100644 --- a/svc/pkg/job-run/standalone/nomad-monitor/Service.toml +++ b/svc/pkg/nomad/standalone/monitor/Service.toml @@ -1,5 +1,5 @@ [service] -name = "job-run-nomad-monitor" +name = "nomad-monitor" essential = true priority = 70 diff --git a/svc/pkg/nomad/standalone/monitor/src/lib.rs b/svc/pkg/nomad/standalone/monitor/src/lib.rs new file mode 100644 index 0000000000..c17e5eefab --- /dev/null +++ b/svc/pkg/nomad/standalone/monitor/src/lib.rs @@ -0,0 +1,124 @@ +use std::sync::Arc; + +use rivet_operation::prelude::*; + +mod monitors; +use monitors::*; + +pub async fn run_from_env(pools: rivet_pools::Pools) -> GlobalResult<()> { + let shared_client = chirp_client::SharedClient::from_env(pools.clone())?; + let redis_job = pools.redis("persistent")?; + + // Start nomad event monitor + let redis_index_key = "nomad:monitor_index"; + let configuration = nomad_util::new_config_from_env().unwrap(); + + nomad_util::monitor::Monitor::run( + configuration, + redis_job, + redis_index_key, + &["Allocation", "Evaluation", "Node"], + |event| handle(shared_client.clone(), event), + ) + .await?; + + Ok(()) +} + +async fn handle( + shared_client: Arc, + event: nomad_util::monitor::NomadEvent, +) { + // TODO: Figure out how to abstract the branches + if let Some(payload) = event + .decode::("Allocation", "PlanResult") + .unwrap() + { + let client = shared_client.wrap_new("nomad-alloc-plan-monitor"); + let spawn_res = tokio::task::Builder::new() + .name("nomad_alloc_plan_monitor::handle_event") + .spawn(async move { + match alloc_plan::handle(client, &payload, event.payload.to_string()).await { + Ok(_) => {} + Err(err) => { + tracing::error!(?err, ?payload, "error handling event"); + } + } + }); + if let Err(err) = spawn_res { + tracing::error!(?err, "failed to spawn handle_event task"); + } + } else if let Some(payload) = event + .decode::("Allocation", "AllocationUpdated") + .unwrap() + { + let client = shared_client.wrap_new("nomad-alloc-updated-monitor"); + let spawn_res = tokio::task::Builder::new() + .name("nomad_alloc_update_monitor::handle_event") + .spawn(async move { + match alloc_update::handle(client, &payload, event.payload.to_string()).await { + Ok(_) => {} + Err(err) => { + tracing::error!(?err, ?payload, "error handling event"); + } + } + }); + if let Err(err) = spawn_res { + tracing::error!(?err, "failed to spawn handle_event task"); + } + } else if let Some(payload) = event + .decode::("Evaluation", "EvaluationUpdated") + .unwrap() + { + let client = shared_client.wrap_new("nomad-eval-update-monitor"); + let spawn_res = tokio::task::Builder::new() + .name("nomad_eval_update_monitor::handle_event") + .spawn(async move { + match eval_update::handle(client, &payload, event.payload.to_string()).await { + Ok(_) => {} + Err(err) => { + tracing::error!(?err, ?payload, "error handling event"); + } + } + }); + if let Err(err) = spawn_res { + tracing::error!(?err, "failed to spawn handle_event task"); + } + } else if let Some(payload) = event + .decode::("Node", "NodeRegistration") + .unwrap() + { + let client = shared_client.wrap_new("nomad-node-registration-monitor"); + let spawn_res = tokio::task::Builder::new() + .name("nomad_node_registration_monitor::handle") + .spawn(async move { + match node_registration::handle(client, &payload).await { + Ok(_) => {} + Err(err) => { + tracing::error!(?err, ?payload, "error handling event"); + } + } + }); + if let Err(err) = spawn_res { + tracing::error!(?err, "failed to spawn handle_event task"); + } + } else if let Some(payload) = event + .decode::("Node", "NodeDrain") + .unwrap() + { + let client = shared_client.wrap_new("nomad-node-drain-monitor"); + let spawn_res = tokio::task::Builder::new() + .name("nomad_node_drain_monitor::handle") + .spawn(async move { + match node_drain::handle(client, &payload).await { + Ok(_) => {} + Err(err) => { + tracing::error!(?err, ?payload, "error handling event"); + } + } + }); + if let Err(err) = spawn_res { + tracing::error!(?err, "failed to spawn handle_event task"); + } + } +} diff --git a/svc/pkg/nomad/standalone/monitor/src/main.rs b/svc/pkg/nomad/standalone/monitor/src/main.rs new file mode 100644 index 0000000000..8dada711e0 --- /dev/null +++ b/svc/pkg/nomad/standalone/monitor/src/main.rs @@ -0,0 +1,23 @@ +use rivet_operation::prelude::*; + +fn main() -> GlobalResult<()> { + rivet_runtime::run(start()).unwrap() +} + +async fn start() -> GlobalResult<()> { + let pools = rivet_pools::from_env("nomad-monitor").await?; + + tokio::task::Builder::new() + .name("nomad_monitor::health_checks") + .spawn(rivet_health_checks::run_standalone( + rivet_health_checks::Config { + pools: Some(pools.clone()), + }, + ))?; + + tokio::task::Builder::new() + .name("nomad_monitor::metrics") + .spawn(rivet_metrics::run_standalone())?; + + nomad_monitor::run_from_env(pools).await +} diff --git a/svc/pkg/nomad/standalone/monitor/src/monitors/alloc_plan.rs b/svc/pkg/nomad/standalone/monitor/src/monitors/alloc_plan.rs new file mode 100644 index 0000000000..78b86baaec --- /dev/null +++ b/svc/pkg/nomad/standalone/monitor/src/monitors/alloc_plan.rs @@ -0,0 +1,31 @@ +use proto::backend::pkg::*; + +use rivet_operation::prelude::*; +use serde::Deserialize; + +#[derive(Debug, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct PlanResult { + allocation: nomad_client::models::Allocation, +} + +pub async fn handle( + client: chirp_client::Client, + PlanResult { allocation: alloc }: &PlanResult, + payload_json: String, +) -> GlobalResult<()> { + let job_id = unwrap_ref!(alloc.job_id, "alloc has no job id"); + + if !util_job::is_nomad_job_run(job_id) { + tracing::info!(%job_id, "disregarding event"); + return Ok(()); + } + + msg!([client] nomad::msg::monitor_alloc_plan(job_id) { + dispatched_job_id: job_id.clone(), + payload_json: payload_json, + }) + .await?; + + Ok(()) +} diff --git a/svc/pkg/nomad/standalone/monitor/src/monitors/alloc_update.rs b/svc/pkg/nomad/standalone/monitor/src/monitors/alloc_update.rs new file mode 100644 index 0000000000..d61bdbfbec --- /dev/null +++ b/svc/pkg/nomad/standalone/monitor/src/monitors/alloc_update.rs @@ -0,0 +1,30 @@ +use proto::backend::pkg::*; +use rivet_operation::prelude::*; +use serde::Deserialize; + +#[derive(Debug, Clone, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct AllocationUpdated { + allocation: nomad_client::models::Allocation, +} + +pub async fn handle( + client: chirp_client::Client, + AllocationUpdated { allocation: alloc }: &AllocationUpdated, + payload_json: String, +) -> GlobalResult<()> { + let job_id = unwrap_ref!(alloc.job_id); + + if !util_job::is_nomad_job_run(job_id) { + tracing::info!(%job_id, "disregarding event"); + return Ok(()); + } + + msg!([client] nomad::msg::monitor_alloc_update(job_id) { + dispatched_job_id: job_id.clone(), + payload_json: payload_json, + }) + .await?; + + Ok(()) +} diff --git a/svc/pkg/nomad/standalone/monitor/src/monitors/eval_update.rs b/svc/pkg/nomad/standalone/monitor/src/monitors/eval_update.rs new file mode 100644 index 0000000000..729d141a99 --- /dev/null +++ b/svc/pkg/nomad/standalone/monitor/src/monitors/eval_update.rs @@ -0,0 +1,44 @@ +use rivet_operation::prelude::*; +use serde::Deserialize; + +use proto::backend::pkg::*; + +#[derive(Debug, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct PlanResult { + evaluation: nomad_client::models::Evaluation, +} + +pub async fn handle( + client: chirp_client::Client, + PlanResult { evaluation: eval }: &PlanResult, + payload_json: String, +) -> GlobalResult<()> { + let job_id = unwrap_ref!(eval.job_id, "eval has no job id"); + let triggered_by = unwrap_ref!(eval.triggered_by).as_str(); + let eval_status_raw = unwrap_ref!(eval.status).as_str(); + + // Ignore jobs we don't care about + if !util_job::is_nomad_job_run(job_id) || triggered_by != "job-register" { + tracing::info!(%job_id, "disregarding event"); + return Ok(()); + } + + // Ignore statuses we don't care about + if eval_status_raw != "complete" { + tracing::info!( + %job_id, + ?eval_status_raw, + "ignoring status" + ); + return Ok(()); + } + + msg!([client] nomad::msg::monitor_eval_update(job_id) { + dispatched_job_id: job_id.clone(), + payload_json: payload_json, + }) + .await?; + + Ok(()) +} diff --git a/svc/pkg/job-run/standalone/nomad-monitor/src/monitors/mod.rs b/svc/pkg/nomad/standalone/monitor/src/monitors/mod.rs similarity index 57% rename from svc/pkg/job-run/standalone/nomad-monitor/src/monitors/mod.rs rename to svc/pkg/nomad/standalone/monitor/src/monitors/mod.rs index 25fa957f68..f8674be664 100644 --- a/svc/pkg/job-run/standalone/nomad-monitor/src/monitors/mod.rs +++ b/svc/pkg/nomad/standalone/monitor/src/monitors/mod.rs @@ -1,3 +1,5 @@ pub mod alloc_plan; pub mod alloc_update; pub mod eval_update; +pub mod node_drain; +pub mod node_registration; diff --git a/svc/pkg/nomad/standalone/monitor/src/monitors/node_drain.rs b/svc/pkg/nomad/standalone/monitor/src/monitors/node_drain.rs new file mode 100644 index 0000000000..802e427869 --- /dev/null +++ b/svc/pkg/nomad/standalone/monitor/src/monitors/node_drain.rs @@ -0,0 +1,38 @@ +use proto::backend::pkg::*; +use rivet_operation::prelude::*; +use serde::Deserialize; + +#[derive(Debug, Clone, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct NodeDrain { + node: nomad_client::models::Node, +} + +pub async fn handle( + client: chirp_client::Client, + NodeDrain { node }: &NodeDrain, +) -> GlobalResult<()> { + let node_id = unwrap_ref!(node.ID); + let meta = unwrap_ref!(node.meta, "no metadata on node"); + let server_id = util::uuid::parse(unwrap!(meta.get("server-id"), "no server-id in metadata"))?; + + if let Some(events) = &node.events { + // Check if the last message in the node events is a drain complete message + let is_last_drain_complete_message = events + .last() + .filter(|event| event.details.is_none()) + .and_then(|event| event.message.as_ref()) + .map(|msg| msg == "Node drain complete") + .unwrap_or_default(); + + if is_last_drain_complete_message { + msg!([client] nomad::msg::monitor_node_drain_complete(server_id) { + server_id: Some(server_id.into()), + node_id: node_id.to_owned(), + }) + .await?; + } + } + + Ok(()) +} diff --git a/svc/pkg/nomad/standalone/monitor/src/monitors/node_registration.rs b/svc/pkg/nomad/standalone/monitor/src/monitors/node_registration.rs new file mode 100644 index 0000000000..fe095faab0 --- /dev/null +++ b/svc/pkg/nomad/standalone/monitor/src/monitors/node_registration.rs @@ -0,0 +1,26 @@ +use proto::backend::pkg::*; +use rivet_operation::prelude::*; +use serde::Deserialize; + +#[derive(Debug, Clone, Deserialize)] +#[serde(rename_all = "PascalCase")] +pub struct NodeRegistration { + node: nomad_client::models::Node, +} + +pub async fn handle( + client: chirp_client::Client, + NodeRegistration { node }: &NodeRegistration, +) -> GlobalResult<()> { + let node_id = unwrap_ref!(node.ID); + let meta = unwrap_ref!(node.meta, "no metadata on node"); + let server_id = util::uuid::parse(unwrap!(meta.get("server-id"), "no server-id in metadata"))?; + + msg!([client] nomad::msg::monitor_node_registered(server_id) { + server_id: Some(server_id.into()), + node_id: node_id.to_owned(), + }) + .await?; + + Ok(()) +} diff --git a/svc/pkg/nomad/standalone/monitor/tests/integration.rs b/svc/pkg/nomad/standalone/monitor/tests/integration.rs new file mode 100644 index 0000000000..1468958f97 --- /dev/null +++ b/svc/pkg/nomad/standalone/monitor/tests/integration.rs @@ -0,0 +1,16 @@ +use chirp_worker::prelude::*; + +use ::nomad_monitor::run_from_env; + +#[tokio::test(flavor = "multi_thread")] +async fn basic() { + tracing_subscriber::fmt() + .json() + .with_max_level(tracing::Level::INFO) + .with_span_events(tracing_subscriber::fmt::format::FmtSpan::NONE) + .init(); + + let pools = rivet_pools::from_env("nomad-monitor-test").await.unwrap(); + + run_from_env(pools).await.unwrap(); +} diff --git a/svc/pkg/job-run/types/msg/nomad-monitor-alloc-plan.proto b/svc/pkg/nomad/types/msg/nomad-monitor-alloc-plan.proto similarity index 62% rename from svc/pkg/job-run/types/msg/nomad-monitor-alloc-plan.proto rename to svc/pkg/nomad/types/msg/nomad-monitor-alloc-plan.proto index 7e1df5b60d..b129c4e79a 100644 --- a/svc/pkg/job-run/types/msg/nomad-monitor-alloc-plan.proto +++ b/svc/pkg/nomad/types/msg/nomad-monitor-alloc-plan.proto @@ -1,10 +1,10 @@ syntax = "proto3"; -package rivet.backend.pkg.job_run.msg.nomad_monitor_alloc_plan; +package rivet.backend.pkg.nomad.msg.monitor_alloc_plan; import "proto/common.proto"; -/// name = "msg-job-run-nomad-monitor-alloc-plan" +/// name = "msg-nomad-monitor-alloc-plan" /// parameters = [ /// { name = "dispatched_job_id" }, /// ] diff --git a/svc/pkg/job-run/types/msg/nomad-monitor-alloc-update.proto b/svc/pkg/nomad/types/msg/nomad-monitor-alloc-update.proto similarity index 61% rename from svc/pkg/job-run/types/msg/nomad-monitor-alloc-update.proto rename to svc/pkg/nomad/types/msg/nomad-monitor-alloc-update.proto index a72c7ec204..1de554494a 100644 --- a/svc/pkg/job-run/types/msg/nomad-monitor-alloc-update.proto +++ b/svc/pkg/nomad/types/msg/nomad-monitor-alloc-update.proto @@ -1,10 +1,10 @@ syntax = "proto3"; -package rivet.backend.pkg.job_run.msg.nomad_monitor_alloc_update; +package rivet.backend.pkg.nomad.msg.monitor_alloc_update; import "proto/common.proto"; -/// name = "msg-job-run-nomad-monitor-alloc-update" +/// name = "msg-nomad-monitor-alloc-update" /// parameters = [ /// { name = "dispatched_job_id" }, /// ] diff --git a/svc/pkg/job-run/types/msg/nomad-monitor-eval-update.proto b/svc/pkg/nomad/types/msg/nomad-monitor-eval-update.proto similarity index 62% rename from svc/pkg/job-run/types/msg/nomad-monitor-eval-update.proto rename to svc/pkg/nomad/types/msg/nomad-monitor-eval-update.proto index 823fae9d8f..d4c2636a52 100644 --- a/svc/pkg/job-run/types/msg/nomad-monitor-eval-update.proto +++ b/svc/pkg/nomad/types/msg/nomad-monitor-eval-update.proto @@ -1,10 +1,10 @@ syntax = "proto3"; -package rivet.backend.pkg.job_run.msg.nomad_monitor_eval_update; +package rivet.backend.pkg.nomad.msg.monitor_eval_update; import "proto/common.proto"; -/// name = "msg-job-run-nomad-monitor-eval-update" +/// name = "msg-nomad-monitor-eval-update" /// parameters = [ /// { name = "dispatched_job_id" }, /// ] diff --git a/svc/pkg/nomad/types/msg/nomad-monitor-node-drain-complete.proto b/svc/pkg/nomad/types/msg/nomad-monitor-node-drain-complete.proto new file mode 100644 index 0000000000..92441e620e --- /dev/null +++ b/svc/pkg/nomad/types/msg/nomad-monitor-node-drain-complete.proto @@ -0,0 +1,14 @@ +syntax = "proto3"; + +package rivet.backend.pkg.nomad.msg.monitor_node_drain_complete; + +import "proto/common.proto"; + +/// name = "msg-nomad-monitor-node-drain-complete" +/// parameters = [ +/// { name = "server_id" }, +/// ] +message Message { + rivet.common.Uuid server_id = 1; + string node_id = 2; +} diff --git a/svc/pkg/nomad/types/msg/nomad-monitor-node-registered.proto b/svc/pkg/nomad/types/msg/nomad-monitor-node-registered.proto new file mode 100644 index 0000000000..23b2ab4b76 --- /dev/null +++ b/svc/pkg/nomad/types/msg/nomad-monitor-node-registered.proto @@ -0,0 +1,14 @@ +syntax = "proto3"; + +package rivet.backend.pkg.nomad.msg.monitor_node_registered; + +import "proto/common.proto"; + +/// name = "msg-nomad-monitor-node-registered" +/// parameters = [ +/// { name = "server_id" }, +/// ] +message Message { + rivet.common.Uuid server_id = 1; + string node_id = 2; +} diff --git a/svc/pkg/region/ops/config-get/src/lib.rs b/svc/pkg/region/ops/config-get/src/lib.rs deleted file mode 100644 index 91f29c3f6c..0000000000 --- a/svc/pkg/region/ops/config-get/src/lib.rs +++ /dev/null @@ -1,56 +0,0 @@ -use proto::backend::pkg::*; -use rivet_operation::prelude::*; -use serde::{Deserialize, Serialize}; -use std::collections::HashMap; -use tokio::fs; - -#[operation(name = "region-config-get")] -pub async fn handle( - _ctx: OperationContext, -) -> GlobalResult { - Ok(region::config_get::Response { - regions: read().await, - }) -} - -#[derive(Debug, Clone, Serialize, Deserialize)] -struct Region { - pub id: Uuid, - pub provider: String, - pub provider_region: String, -} - -const READ_CONFIG_ONCE: tokio::sync::OnceCell> = - tokio::sync::OnceCell::const_new(); - -// TODO: Building the region config in to the binary is not clean. We should find a way to -// dynamically configure this. We can't use the env since the config is too large. -// This will be removed anyways once we have dynamically provisioned clusters. -pub async fn read() -> HashMap { - READ_CONFIG_ONCE - .get_or_init(|| async { - // Read config - let config_buf = fs::read("/etc/rivet/region_config.json") - .await - .expect("failed to read /region_config.json"); - let config = serde_json::from_slice::>(config_buf.as_slice()) - .expect("invalid region config"); - - // Convert to proto - config - .into_iter() - .map(|(k, v)| { - ( - k, - region::config_get::Region { - id: Some(v.id.into()), - provider: v.provider, - provider_region: v.provider_region, - }, - ) - }) - .collect::>() - }) - .await - .clone() -} diff --git a/svc/pkg/region/ops/get/Cargo.toml b/svc/pkg/region/ops/get/Cargo.toml index 76277e3f54..1f08ec5c4e 100644 --- a/svc/pkg/region/ops/get/Cargo.toml +++ b/svc/pkg/region/ops/get/Cargo.toml @@ -9,7 +9,9 @@ license = "Apache-2.0" rivet-operation = { path = "../../../../../lib/operation/core" } chirp-client = { path = "../../../../../lib/chirp/client" } prost = "0.10" -region-config-get = { path = "../config-get" } + +cluster-datacenter-get = { path = "../../../cluster/ops/datacenter-get" } +cluster-datacenter-location-get = { path = "../../../cluster/ops/datacenter-location-get" } [dependencies.sqlx] version = "0.7" diff --git a/svc/pkg/region/ops/get/src/lib.rs b/svc/pkg/region/ops/get/src/lib.rs index eadf8d51b2..bff9efdc1b 100644 --- a/svc/pkg/region/ops/get/src/lib.rs +++ b/svc/pkg/region/ops/get/src/lib.rs @@ -1,205 +1,39 @@ -use proto::backend::{ - self, - pkg::{region::config_get::Region, *}, -}; +use proto::backend::{self, pkg::*}; use rivet_operation::prelude::*; -fn universal_region(region: &Region) -> backend::region::UniversalRegion { - use backend::region::UniversalRegion; - - match region.provider.as_str() { - "local" => match region.provider_region.as_str() { - "lcl1" => UniversalRegion::Local, - _ => { - tracing::error!(provider_region = ?region.provider_region, "unknown local region"); - UniversalRegion::Unknown - } - }, - "digitalocean" => match region.provider_region.as_str() { - "ams1" | "ams2" | "ams3" => UniversalRegion::Amsterdam, - "blr1" => UniversalRegion::Bangalore, - "fra1" => UniversalRegion::Frankfurt, - "lon1" => UniversalRegion::London, - "nyc1" | "nyc2" | "nyc3" => UniversalRegion::NewYorkCity, - "sfo1" | "sfo2" | "sfo3" => UniversalRegion::SanFrancisco, - "sgp1" => UniversalRegion::Singapore, - "tor1" => UniversalRegion::Toronto, - _ => { - tracing::error!(provider_region = ?region.provider_region, "unknown digitalocean region"); - UniversalRegion::Unknown - } - }, - "linode" => match region.provider_region.as_str() { - "nl-ams" => UniversalRegion::Amsterdam, - "ap-west" => UniversalRegion::Mumbai, - "ca-central" => UniversalRegion::Toronto, - "ap-southeast" => UniversalRegion::Sydney, - "us-central" => UniversalRegion::Dallas, - "us-west" => UniversalRegion::SanFrancisco, - "us-southeast" => UniversalRegion::Atlanta, - "us-east" => UniversalRegion::NewYorkCity, - "us-iad" => UniversalRegion::WashingtonDc, - "eu-west" => UniversalRegion::London, - "ap-south" => UniversalRegion::Singapore, - "eu-central" => UniversalRegion::Frankfurt, - "ap-northeast" => UniversalRegion::Tokyo, - "us-ord" => UniversalRegion::Chicago, - "fr-par" => UniversalRegion::Paris, - "us-sea" => UniversalRegion::Seattle, - "br-gru" => UniversalRegion::SaoPaulo, - "se-sto" => UniversalRegion::Stockholm, - "in-maa" => UniversalRegion::Chennai, - "jp-osa" => UniversalRegion::Osaka, - "it-mil" => UniversalRegion::Milan, - "us-mia" => UniversalRegion::Miami, - "id-cgk" => UniversalRegion::Jakarta, - "us-lax" => UniversalRegion::LosAngeles, - _ => { - tracing::error!(provider_region = ?region.provider_region, "unknown linode region"); - UniversalRegion::Unknown - } - }, - _ => { - tracing::error!(provider = ?region.provider, provider_region = ?region.provider_region, "unknown provider"); - UniversalRegion::Unknown - } - } -} - -fn provider_display_name(region: &Region) -> &'static str { - match region.provider.as_str() { - "local" => "Local", - "digitalocean" => "DigitalOcean", - "linode" => "Linode", - _ => "Unknown", - } -} - -/// See corresponding values in `region-resolve`. -// fn universal_region_short(universal_region: &backend::region::UniversalRegion) -> &'static str { -// use backend::region::UniversalRegion; - -// match universal_region { -// UniversalRegion::Unknown => "ukn", -// UniversalRegion::Local => "lcl", - -// UniversalRegion::Amsterdam => "ams", -// UniversalRegion::Atlanta => "atl", -// UniversalRegion::Bangalore => "blr", -// UniversalRegion::Dallas => "dfw", -// UniversalRegion::Frankfurt => "fra", -// UniversalRegion::London => "lon", -// UniversalRegion::Mumbai => "mba", -// UniversalRegion::Newark => "ewr", -// UniversalRegion::NewYorkCity => "nyc", -// UniversalRegion::SanFrancisco => "sfo", -// UniversalRegion::Singapore => "sgp", -// UniversalRegion::Sydney => "syd", -// UniversalRegion::Tokyo => "tok", -// UniversalRegion::Toronto => "tor", -// UniversalRegion::WashingtonDc => "dca", -// } -// } - -fn universal_region_display_name( - universal_region: &backend::region::UniversalRegion, -) -> &'static str { - use backend::region::UniversalRegion; - - match universal_region { - UniversalRegion::Unknown => "Unknown", - UniversalRegion::Local => "Local", - - UniversalRegion::Amsterdam => "Amsterdam", - UniversalRegion::Atlanta => "Atlanta", - UniversalRegion::Bangalore => "Bangalore", - UniversalRegion::Dallas => "Dallas", - UniversalRegion::Frankfurt => "Frankfurt", - UniversalRegion::London => "London", - UniversalRegion::Mumbai => "Mumbai", - UniversalRegion::Newark => "Newark", - UniversalRegion::NewYorkCity => "New York City", - UniversalRegion::SanFrancisco => "San Francisco", - UniversalRegion::Singapore => "Singapore", - UniversalRegion::Sydney => "Sydney", - UniversalRegion::Tokyo => "Tokyo", - UniversalRegion::Toronto => "Toronto", - UniversalRegion::WashingtonDc => "Washington, DC", - UniversalRegion::Chicago => "Chicago", - UniversalRegion::Paris => "Paris", - UniversalRegion::Seattle => "Seattle", - UniversalRegion::SaoPaulo => "São Paulo", - UniversalRegion::Stockholm => "Stockholm", - UniversalRegion::Chennai => "Chennai", - UniversalRegion::Osaka => "Osaka", - UniversalRegion::Milan => "Milan", - UniversalRegion::Miami => "Miami", - UniversalRegion::Jakarta => "Jakarta", - UniversalRegion::LosAngeles => "Los Angeles", - } -} - -fn universal_region_coords(universal_region: &backend::region::UniversalRegion) -> (f64, f64) { - use backend::region::UniversalRegion; - - match universal_region { - UniversalRegion::Unknown => (0.0, 0.0), - UniversalRegion::Local => (32.23239, -110.96132), - - UniversalRegion::Amsterdam => (52.36730, 4.89982), - UniversalRegion::Atlanta => (33.74819, -84.39086), - UniversalRegion::Bangalore => (12.97740, 77.57423), - UniversalRegion::Dallas => (32.77557, -96.79560), - UniversalRegion::Frankfurt => (50.11044, 8.68183), - UniversalRegion::London => (51.50335, -0.07940), - UniversalRegion::Mumbai => (18.94010, 72.83466), - UniversalRegion::Newark => (40.735717094562006, -74.1724228101556), - UniversalRegion::NewYorkCity => (40.71298, -74.00720), - UniversalRegion::SanFrancisco => (37.77938, -122.41843), - UniversalRegion::Singapore => (1.27980, 103.83728), - UniversalRegion::Sydney => (-33.87271, 151.20569), - UniversalRegion::Tokyo => (35.68951, 139.69170), - UniversalRegion::Toronto => (43.65161, -79.38313), - UniversalRegion::WashingtonDc => (38.89212213251763, -77.00908542245845), - UniversalRegion::Chicago => (41.8781, -87.6298), - UniversalRegion::Paris => (48.8566, 2.3522), - UniversalRegion::Seattle => (47.6062, -122.3321), - UniversalRegion::SaoPaulo => (-23.5505, -46.6333), - UniversalRegion::Stockholm => (59.3293, 18.0686), - UniversalRegion::Chennai => (13.0827, 80.2707), - UniversalRegion::Osaka => (34.6937, 135.5023), - UniversalRegion::Milan => (45.4642, 9.1900), - UniversalRegion::Miami => (25.7617, -80.1918), - UniversalRegion::Jakarta => (-6.2088, 106.8456), - UniversalRegion::LosAngeles => (34.0522, -118.2437), - } -} - -fn convert_region( - name_id: &str, - region: &Region, - primary_region_id: Uuid, +fn convert_datacenter( + datacenter: &backend::cluster::Datacenter, + locations: &[cluster::datacenter_location_get::response::Datacenter], ) -> GlobalResult { - let universal_region = universal_region(region); - let provider_display_name = provider_display_name(region).to_owned(); + let datacenter_id = unwrap_ref!(datacenter.datacenter_id).as_uuid(); + let provider = unwrap!(backend::cluster::Provider::from_i32(datacenter.provider)); + + let coords = locations + .iter() + .find(|location| location.datacenter_id == datacenter.datacenter_id) + .and_then(|dc| dc.coords.clone()) + .unwrap_or(backend::net::Coordinates { + latitude: 0.0, + longitude: 0.0, + }); - let region_display_name = universal_region_display_name(&universal_region).to_owned(); - let (latitude, longitude) = universal_region_coords(&universal_region); Ok(backend::region::Region { - region_id: region.id, + region_id: datacenter.datacenter_id, enabled: true, nomad_region: "global".into(), - nomad_datacenter: name_id.to_owned(), - provider: region.provider.clone(), - provider_region: region.provider_region.clone(), - // TODO: Replace with more intelligent method of determining the CDN region - cdn_region_id: Some(primary_region_id.into()), - universal_region: universal_region as i32, - provider_display_name, - region_display_name, - name_id: name_id.to_owned(), - latitude, - longitude, + nomad_datacenter: datacenter_id.to_string(), + provider: match provider { + backend::cluster::Provider::Linode => "linode".to_string(), + }, + provider_region: datacenter.provider_datacenter_id.clone(), + provider_display_name: match provider { + backend::cluster::Provider::Linode => "Linode".to_string(), + }, + region_display_name: datacenter.display_name.clone(), + name_id: datacenter.name_id.clone(), + coords: Some(coords), + + build_delivery_method: datacenter.build_delivery_method, }) } @@ -207,23 +41,20 @@ fn convert_region( async fn handle( ctx: OperationContext, ) -> GlobalResult { - let res = op!([ctx] region_config_get {}).await?; - let regions = &res.regions; - let primary_region = unwrap!( - regions.get(util::env::primary_region()), - "missing primary region" - ); - - let regions = regions + let (datacenters_res, locations_res) = tokio::try_join!( + op!([ctx] cluster_datacenter_get { + datacenter_ids: ctx.region_ids.clone(), + }), + op!([ctx] cluster_datacenter_location_get { + datacenter_ids: ctx.region_ids.clone(), + }), + )?; + + let regions = datacenters_res + .datacenters .iter() - .filter(|(_, x)| { - x.id.as_ref() - .map_or(false, |id| ctx.region_ids.contains(id)) - }) - .map(|(name_id, region)| { - convert_region(name_id, region, unwrap_ref!(primary_region.id).as_uuid()) - }) - .collect::>>()?; + .map(|dc| convert_datacenter(dc, &locations_res.datacenters)) + .collect::>>()?; Ok(region::get::Response { regions }) } diff --git a/svc/pkg/region/ops/list/Cargo.toml b/svc/pkg/region/ops/list/Cargo.toml index 3848bc105f..454b938830 100644 --- a/svc/pkg/region/ops/list/Cargo.toml +++ b/svc/pkg/region/ops/list/Cargo.toml @@ -9,7 +9,8 @@ license = "Apache-2.0" rivet-operation = { path = "../../../../../lib/operation/core" } chirp-client = { path = "../../../../../lib/chirp/client" } prost = "0.10" -region-config-get = { path = "../config-get" } + +cluster-datacenter-list = { path = "../../../cluster/ops/datacenter-list" } [dependencies.sqlx] version = "0.7" diff --git a/svc/pkg/region/ops/list/src/lib.rs b/svc/pkg/region/ops/list/src/lib.rs index 30a57ce724..2947ce7c82 100644 --- a/svc/pkg/region/ops/list/src/lib.rs +++ b/svc/pkg/region/ops/list/src/lib.rs @@ -5,9 +5,16 @@ use rivet_operation::prelude::*; async fn handle( ctx: OperationContext, ) -> GlobalResult { - let res = op!([ctx] region_config_get {}).await?; - let mut region_ids = res.regions.values().flat_map(|x| x.id).collect::>(); - region_ids.sort_by_cached_key(|x| x.as_uuid()); + let datacenter_list_res = op!([ctx] cluster_datacenter_list { + cluster_ids: vec![util::env::default_cluster_id().into()], + }) + .await?; + let cluster = unwrap!( + datacenter_list_res.clusters.first(), + "default cluster doesn't exist" + ); - Ok(region::list::Response { region_ids }) + Ok(region::list::Response { + region_ids: cluster.datacenter_ids.clone(), + }) } diff --git a/svc/pkg/region/ops/recommend/src/lib.rs b/svc/pkg/region/ops/recommend/src/lib.rs index 0f30c9c846..ed323493a3 100644 --- a/svc/pkg/region/ops/recommend/src/lib.rs +++ b/svc/pkg/region/ops/recommend/src/lib.rs @@ -1,7 +1,7 @@ use futures_util::TryFutureExt; use std::cmp::{Ordering, PartialOrd}; -use proto::backend::pkg::*; +use proto::backend::{self, pkg::*}; use rivet_operation::prelude::*; #[derive(Debug, Clone)] @@ -34,10 +34,11 @@ async fn handle( .iter() .map(common::Uuid::as_uuid) .collect::>(); + let coords = unwrap_ref!(ctx.coords); #[allow(deprecated)] - let origin = if let (Some(lat), Some(long)) = (ctx.latitude, ctx.longitude) { - OriginKind::Coords(lat, long) + let origin = if let Some(coords) = &ctx.coords { + OriginKind::Coords(coords.latitude, coords.longitude) } else if let Some(origin_ip) = &ctx.origin_ip { OriginKind::Ip(origin_ip.clone()) } else { @@ -65,9 +66,12 @@ async fn list_regions( ip: origin_ip.to_owned(), }) .await?; + let ip_info = unwrap_ref!(res.ip_info, "cannot recommend regions to a bogon ip"); - GlobalResult::Ok((ip_info.latitude, ip_info.longitude)) + let coords = unwrap_ref!(ip_info.coords); + + GlobalResult::Ok((coords.latitude, coords.longitude)) } } }, @@ -92,10 +96,12 @@ async fn list_regions( .regions .iter() .map(|region| { + let coords = unwrap_ref!(region.coords); + Ok(( - (region.latitude, region.longitude), + (coords.latitude, coords.longitude), unwrap!( - geoutils::Location::new(region.latitude, region.longitude) + geoutils::Location::new(coords.latitude, coords.longitude) .distance_to(&origin_location) .ok(), "failed to calculate distance to region" @@ -115,8 +121,10 @@ async fn list_regions( .map(|((latitude, longitude), distance_meters, region_id)| { region::recommend::response::Region { region_id: Some(region_id.into()), - latitude, - longitude, + coords: Some(backend::net::Coordinates { + latitude, + longitude, + }), distance_meters, } }) diff --git a/svc/pkg/region/ops/recommend/tests/integration.rs b/svc/pkg/region/ops/recommend/tests/integration.rs index 6256b1bd5d..48dcaf24b9 100644 --- a/svc/pkg/region/ops/recommend/tests/integration.rs +++ b/svc/pkg/region/ops/recommend/tests/integration.rs @@ -1,4 +1,5 @@ use chirp_worker::prelude::*; +use proto::backend; #[worker_test] async fn empty(ctx: TestCtx) { @@ -17,9 +18,11 @@ async fn empty(ctx: TestCtx) { // .unwrap(); op!([ctx] region_recommend { - latitude: Some(100.0), - longitude: Some(200.0), region_ids: regions_res.region_ids.clone(), + coords: Some(backend::net::Coordinates { + latitude: 100.0, + longitude: 200.0, + }), ..Default::default() }) .await diff --git a/svc/pkg/region/ops/resolve/Cargo.toml b/svc/pkg/region/ops/resolve/Cargo.toml index b3906bac78..378cdf9edd 100644 --- a/svc/pkg/region/ops/resolve/Cargo.toml +++ b/svc/pkg/region/ops/resolve/Cargo.toml @@ -9,7 +9,9 @@ license = "Apache-2.0" rivet-operation = { path = "../../../../../lib/operation/core" } chirp-client = { path = "../../../../../lib/chirp/client" } prost = "0.10" -region-config-get = { path = "../config-get" } + +cluster-datacenter-get = { path = "../../../cluster/ops/datacenter-get" } +cluster-datacenter-list = { path = "../../../cluster/ops/datacenter-list" } [dependencies.sqlx] version = "0.7" diff --git a/svc/pkg/region/ops/resolve/src/lib.rs b/svc/pkg/region/ops/resolve/src/lib.rs index 6ad9a3914e..33f094aa70 100644 --- a/svc/pkg/region/ops/resolve/src/lib.rs +++ b/svc/pkg/region/ops/resolve/src/lib.rs @@ -5,14 +5,27 @@ use rivet_operation::prelude::*; async fn handle( ctx: OperationContext, ) -> GlobalResult { - let res = op!([ctx] region_config_get {}).await?; - let regions = res - .regions + let datacenter_list_res = op!([ctx] cluster_datacenter_list { + cluster_ids: vec![util::env::default_cluster_id().into()], + }) + .await?; + let cluster = unwrap!( + datacenter_list_res.clusters.first(), + "default cluster doesn't exist" + ); + + let datacenters_res = op!([ctx] cluster_datacenter_get { + datacenter_ids: cluster.datacenter_ids.clone(), + }) + .await?; + + let regions = datacenters_res + .datacenters .iter() - .filter(|(x, _)| ctx.name_ids.contains(x)) - .map(|(name_id, region)| region::resolve::response::Region { - region_id: region.id, - name_id: name_id.clone(), + .filter(|dc| ctx.name_ids.contains(&dc.name_id)) + .map(|dc| region::resolve::response::Region { + region_id: dc.datacenter_id, + name_id: dc.name_id.clone(), }) .collect::>(); diff --git a/svc/pkg/region/types/config-get.proto b/svc/pkg/region/types/config-get.proto deleted file mode 100644 index e7a2d96766..0000000000 --- a/svc/pkg/region/types/config-get.proto +++ /dev/null @@ -1,19 +0,0 @@ -syntax = "proto3"; - -package rivet.backend.pkg.region.config_get; - -import "proto/common.proto"; - -message Region { - rivet.common.Uuid id = 1; - string provider = 2; - string provider_region = 3; -} - -message Request { - -} - -message Response { - map regions = 1; -} diff --git a/svc/pkg/region/types/recommend.proto b/svc/pkg/region/types/recommend.proto index fe1a90159e..e2de692664 100644 --- a/svc/pkg/region/types/recommend.proto +++ b/svc/pkg/region/types/recommend.proto @@ -3,19 +3,20 @@ syntax = "proto3"; package rivet.backend.pkg.region.recommend; import "proto/common.proto"; +import "proto/backend/net.proto"; message Request { + reserved 3, 4; optional string origin_ip = 1 [deprecated = true]; - optional double latitude = 3; - optional double longitude = 4; + optional rivet.backend.net.Coordinates coords = 5; repeated rivet.common.Uuid region_ids = 2; } message Response { message Region { + reserved 2, 3; rivet.common.Uuid region_id = 1; - double latitude = 2; - double longitude = 3; + rivet.backend.net.Coordinates coords = 5; double distance_meters = 4; } diff --git a/svc/pkg/tier/ops/list/Cargo.toml b/svc/pkg/tier/ops/list/Cargo.toml index 68e7a16369..c335edf0bd 100644 --- a/svc/pkg/tier/ops/list/Cargo.toml +++ b/svc/pkg/tier/ops/list/Cargo.toml @@ -9,6 +9,12 @@ license = "Apache-2.0" rivet-operation = { path = "../../../../../lib/operation/core" } chirp-client = { path = "../../../../../lib/chirp/client" } prost = "0.10" +util-cluster = { package = "rivet-util-cluster", path = "../../../cluster/util" } + +cluster-datacenter-get = { path = "../../../cluster/ops/datacenter-get" } +linode-instance-type-get = { path = "../../../linode/ops/instance-type-get" } [dev-dependencies] chirp-worker = { path = "../../../../../lib/chirp/worker" } + +cluster-datacenter-list = { path = "../../../cluster/ops/datacenter-list" } diff --git a/svc/pkg/tier/ops/list/Service.toml b/svc/pkg/tier/ops/list/Service.toml index 3828cc537c..0c163e632c 100644 --- a/svc/pkg/tier/ops/list/Service.toml +++ b/svc/pkg/tier/ops/list/Service.toml @@ -5,3 +5,6 @@ name = "tier-list" kind = "rust" [operation] + +[secrets] +"linode/token" = {} diff --git a/svc/pkg/tier/ops/list/src/lib.rs b/svc/pkg/tier/ops/list/src/lib.rs index 0d329c4c19..ac2152fb02 100644 --- a/svc/pkg/tier/ops/list/src/lib.rs +++ b/svc/pkg/tier/ops/list/src/lib.rs @@ -1,101 +1,87 @@ use proto::backend::{self, pkg::*}; use rivet_operation::prelude::*; - -// See pkr/static/nomad-config.hcl.tpl client.reserved -const RESERVE_SYSTEM_CPU: u64 = 500; -const RESERVE_SYSTEM_MEMORY: u64 = 512; - -// See module.traefik_job resources -const RESERVE_LB_CPU: u64 = 1500; -const RESERVE_LB_MEMORY: u64 = 512; - -const RESERVE_CPU: u64 = RESERVE_SYSTEM_CPU + RESERVE_LB_CPU; -const RESERVE_MEMORY: u64 = RESERVE_SYSTEM_MEMORY + RESERVE_LB_MEMORY; - -struct GameNodeConfig { - cpu_cores: u64, - cpu: u64, - memory: u64, - disk: u64, - bandwidth: u64, -} - -impl GameNodeConfig { - fn cpu_per_core(&self) -> u64 { - self.cpu / self.cpu_cores - } - - fn memory_per_core(&self) -> u64 { - self.memory / self.cpu_cores - } - - fn disk_per_core(&self) -> u64 { - self.disk / self.cpu_cores - } - - fn bandwidth_per_core(&self) -> u64 { - self.bandwidth / self.cpu_cores - } -} - -/// Returns the default game node config. -fn get_game_node_config() -> GameNodeConfig { - // TODO: CPU should be different based on the provider. For now, we use the - // minimum value from tf/prod/config.tf - - // Multiply config for 2 core, 4 GB to scale up to the 4 core, 8 GB - // plan - let mut config = GameNodeConfig { - cpu_cores: 4, - // DigitalOcean: 7,984 - // Linode: 7,996 - cpu: 7900, - // DigitalOcean: 7,957 - // Linode: 7,934 - memory: 7900, - disk: 64_000, - bandwidth: 2_000_000, - }; - - // Remove reserved resources - config.cpu -= RESERVE_CPU; - config.memory -= RESERVE_MEMORY; - - config -} +use util_cluster::JobNodeConfig; #[operation(name = "tier-list")] async fn handle(ctx: OperationContext) -> GlobalResult { - let region_ids = ctx - .region_ids - .iter() - .map(common::Uuid::as_uuid) - .collect::>(); - - let tiers = vec![ - generate_tier("basic-4d1", 4, 1), - generate_tier("basic-2d1", 2, 1), - generate_tier("basic-1d1", 1, 1), - generate_tier("basic-1d2", 1, 2), - generate_tier("basic-1d4", 1, 4), - generate_tier("basic-1d8", 1, 8), - generate_tier("basic-1d16", 1, 16), - ]; + let datacenters_res = op!([ctx] cluster_datacenter_get { + datacenter_ids: ctx.region_ids.clone(), + }) + .await?; - Ok(tier::list::Response { - regions: region_ids - .into_iter() - .map(|region_id| tier::list::response::Region { - region_id: Some(region_id.into()), - tiers: tiers.clone(), - }) + let hardware = datacenters_res + .datacenters + .iter() + .map(|dc| { + let job_pool = unwrap!( + dc.pools + .iter() + .find(|pool| pool.pool_type == backend::cluster::PoolType::Job as i32), + "no job pool" + ); + + // Choose the first hardware in the list + let hardware = unwrap!(job_pool.hardware.first(), "no hardware") + .provider_hardware + .clone(); + + Ok((dc.datacenter_id, hardware)) + }) + .collect::>>()?; + + let instance_types_res = op!([ctx] linode_instance_type_get { + hardware_ids: hardware + .iter() + .map(|(_, hardware)| hardware.clone()) .collect::>(), }) -} + .await?; + + let regions = hardware + .into_iter() + .map(|(datacenter_id, hardware)| { + let instance_type = unwrap!( + instance_types_res + .instance_types + .iter() + .find(|it| it.hardware_id == hardware), + "datacenter hardware stats not found" + ); + let config = JobNodeConfig::from_linode(instance_type); + + let config = + JobNodeConfig::from_linode(&linode::instance_type_get::response::InstanceType { + hardware_id: "".to_string(), + vcpus: 8, + memory: 2u64.pow(14), + disk: 2u64.pow(15) * 10, + transfer: 6_000, + }); + + Ok(tier::list::response::Region { + region_id: datacenter_id, + tiers: vec![ + generate_tier(&config, "basic-4d1", 4, 1), + generate_tier(&config, "basic-2d1", 2, 1), + generate_tier(&config, "basic-1d1", 1, 1), + generate_tier(&config, "basic-1d2", 1, 2), + generate_tier(&config, "basic-1d4", 1, 4), + generate_tier(&config, "basic-1d8", 1, 8), + generate_tier(&config, "basic-1d16", 1, 16), + ], + }) + }) + .collect::>>()?; -fn generate_tier(name: &str, numerator: u64, denominator: u64) -> backend::region::Tier { - let c = get_game_node_config(); + Ok(tier::list::Response { regions }) +} +fn generate_tier( + c: &JobNodeConfig, + name: &str, + numerator: u64, + denominator: u64, +) -> backend::region::Tier { backend::region::Tier { tier_name_id: name.into(), rivet_cores_numerator: numerator as u32, diff --git a/svc/pkg/tier/ops/list/tests/integration.rs b/svc/pkg/tier/ops/list/tests/integration.rs index f3336ea97e..5342833a62 100644 --- a/svc/pkg/tier/ops/list/tests/integration.rs +++ b/svc/pkg/tier/ops/list/tests/integration.rs @@ -2,9 +2,18 @@ use chirp_worker::prelude::*; #[worker_test] async fn empty(ctx: TestCtx) { - op!([ctx] tier_list { - region_ids: vec![Uuid::new_v4().into()] + let datacenters_res = op!([ctx] cluster_datacenter_list { + cluster_ids: vec![util::env::default_cluster_id().into()], }) .await .unwrap(); + let cluster = datacenters_res.clusters.first().unwrap(); + + let res = op!([ctx] tier_list { + region_ids: cluster.datacenter_ids.clone(), + }) + .await + .unwrap(); + + tracing::info!(?res); } diff --git a/svc/pkg/token/ops/create/src/lib.rs b/svc/pkg/token/ops/create/src/lib.rs index 76bd75d176..120492dd61 100644 --- a/svc/pkg/token/ops/create/src/lib.rs +++ b/svc/pkg/token/ops/create/src/lib.rs @@ -7,7 +7,7 @@ use rivet_operation::prelude::*; lazy_static::lazy_static! { /// The private EdDSA key in a PEM format. Corresponds to /// `rivet_claims::Config::jwt_key_public`. - static ref JWT_KEY_PRIVATE: String = std::env::var("RIVET_JWT_KEY_PRIVATE").unwrap(); + static ref JWT_KEY_PRIVATE: String = util::env::var("RIVET_JWT_KEY_PRIVATE").unwrap(); } #[operation(name = "token-create")] diff --git a/svc/pkg/upload/ops/prepare/src/lib.rs b/svc/pkg/upload/ops/prepare/src/lib.rs index a2b4f2fee3..d45b47ec09 100644 --- a/svc/pkg/upload/ops/prepare/src/lib.rs +++ b/svc/pkg/upload/ops/prepare/src/lib.rs @@ -134,7 +134,7 @@ async fn handle( total_content_length as i64, &ctx.bucket, user_id, - proto_provider as i32 as i64, + proto_provider as i64, paths, mimes, content_lengths, diff --git a/svc/pkg/upload/standalone/provider-fill/src/lib.rs b/svc/pkg/upload/standalone/provider-fill/src/lib.rs index c629a44d40..230638b7b4 100644 --- a/svc/pkg/upload/standalone/provider-fill/src/lib.rs +++ b/svc/pkg/upload/standalone/provider-fill/src/lib.rs @@ -39,7 +39,7 @@ pub async fn run_from_env() -> GlobalResult<()> { SET provider = $1 WHERE provider IS NULL ", - proto_provider as i32 as i64, + proto_provider as i64, ) .await?; diff --git a/svc/pkg/user/worker/src/workers/create.rs b/svc/pkg/user/worker/src/workers/create.rs index f64b5b6a86..371f34cf3a 100644 --- a/svc/pkg/user/worker/src/workers/create.rs +++ b/svc/pkg/user/worker/src/workers/create.rs @@ -25,6 +25,7 @@ async fn worker(ctx: &OperationContext) -> GlobalRes namespace_ids: vec![namespace_id], }) .await?; + let version_id = unwrap!(unwrap!(namespace_res.namespaces.first()).version_id); let identity_config_res = op!([ctx] identity_config_version_get { diff --git a/svc/templates/api/tests/basic.rs b/svc/templates/api/tests/basic.rs index 21891ab4fd..2f3f541ed8 100644 --- a/svc/templates/api/tests/basic.rs +++ b/svc/templates/api/tests/basic.rs @@ -21,7 +21,7 @@ impl Ctx { let pools = rivet_pools::from_env("api-{{name}}-test").await.unwrap(); let cache = rivet_cache::CacheInner::new( "api-{{name}}-test".to_string(), - std::env::var("RIVET_SOURCE_HASH").unwrap(), + util::env::var("RIVET_SOURCE_HASH").unwrap(), pools.redis_cache().unwrap(), ); let client = chirp_client::SharedClient::from_env(pools.clone()) diff --git a/svc/templates/operation/src/lib.rs b/svc/templates/operation/src/lib.rs index ca5c0d2719..a012f438ff 100644 --- a/svc/templates/operation/src/lib.rs +++ b/svc/templates/operation/src/lib.rs @@ -7,7 +7,7 @@ pub async fn handle( ) -> GlobalResult<{{snake pkg}}::{{snake name}}::Response> { todo!(); - // Ok({{snake pkg}}::{{snake name}}::Response { + Ok({{snake pkg}}::{{snake name}}::Response { - // }) + }) } diff --git a/svc/templates/standalone/src/lib.rs b/svc/templates/standalone/src/lib.rs index 827b2cd711..a46d1d1d6b 100644 --- a/svc/templates/standalone/src/lib.rs +++ b/svc/templates/standalone/src/lib.rs @@ -1,6 +1,3 @@ -use std::collections::HashSet; - -use futures_util::StreamExt; use rivet_operation::prelude::*; #[tracing::instrument(skip_all)] diff --git a/svc/templates/worker/src/workers/mod.rs b/svc/templates/worker/src/workers/mod.rs index 9ca3ba1cd8..a723ed444b 100644 --- a/svc/templates/worker/src/workers/mod.rs +++ b/svc/templates/worker/src/workers/mod.rs @@ -1 +1,5 @@ pub mod {{snake name}}; + +chirp_worker::workers![ + {{snake name}}, +]; diff --git a/watches b/watches new file mode 100644 index 0000000000..e69de29bb2 diff --git a/yarn.lock b/yarn.lock new file mode 100644 index 0000000000..fb57ccd13a --- /dev/null +++ b/yarn.lock @@ -0,0 +1,4 @@ +# THIS IS AN AUTOGENERATED FILE. DO NOT EDIT THIS FILE DIRECTLY. +# yarn lockfile v1 + +